aboutsummaryrefslogtreecommitdiff
path: root/drivers/media/v4l2-core
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/media/v4l2-core')
-rw-r--r--drivers/media/v4l2-core/Kconfig5
-rw-r--r--drivers/media/v4l2-core/Makefile10
-rw-r--r--drivers/media/v4l2-core/tuner-core.c103
-rw-r--r--drivers/media/v4l2-core/v4l2-async.c312
-rw-r--r--drivers/media/v4l2-core/v4l2-clk.c281
-rw-r--r--drivers/media/v4l2-core/v4l2-common.c487
-rw-r--r--drivers/media/v4l2-core/v4l2-compat-ioctl32.c152
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls.c313
-rw-r--r--drivers/media/v4l2-core/v4l2-dev.c130
-rw-r--r--drivers/media/v4l2-core/v4l2-device.c61
-rw-r--r--drivers/media/v4l2-core/v4l2-dv-timings.c629
-rw-r--r--drivers/media/v4l2-core/v4l2-event.c43
-rw-r--r--drivers/media/v4l2-core/v4l2-int-device.c164
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c407
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c280
-rw-r--r--drivers/media/v4l2-core/v4l2-of.c144
-rw-r--r--drivers/media/v4l2-core/v4l2-subdev.c81
-rw-r--r--drivers/media/v4l2-core/videobuf-core.c2
-rw-r--r--drivers/media/v4l2-core/videobuf-dma-contig.c133
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c1700
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-contig.c101
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-sg.c220
-rw-r--r--drivers/media/v4l2-core/videobuf2-dvb.c336
-rw-r--r--drivers/media/v4l2-core/videobuf2-vmalloc.c4
24 files changed, 4287 insertions, 1811 deletions
diff --git a/drivers/media/v4l2-core/Kconfig b/drivers/media/v4l2-core/Kconfig
index 65875c3aba1..9ca0f8d59a1 100644
--- a/drivers/media/v4l2-core/Kconfig
+++ b/drivers/media/v4l2-core/Kconfig
@@ -67,6 +67,7 @@ config VIDEOBUF2_MEMOPS
config VIDEOBUF2_DMA_CONTIG
tristate
+ depends on HAS_DMA
select VIDEOBUF2_CORE
select VIDEOBUF2_MEMOPS
select DMA_SHARED_BUFFER
@@ -82,3 +83,7 @@ config VIDEOBUF2_DMA_SG
#depends on HAS_DMA
select VIDEOBUF2_CORE
select VIDEOBUF2_MEMOPS
+
+config VIDEOBUF2_DVB
+ tristate
+ select VIDEOBUF2_CORE
diff --git a/drivers/media/v4l2-core/Makefile b/drivers/media/v4l2-core/Makefile
index c2d61d4f03d..63d29f27538 100644
--- a/drivers/media/v4l2-core/Makefile
+++ b/drivers/media/v4l2-core/Makefile
@@ -5,13 +5,18 @@
tuner-objs := tuner-core.o
videodev-objs := v4l2-dev.o v4l2-ioctl.o v4l2-device.o v4l2-fh.o \
- v4l2-event.o v4l2-ctrls.o v4l2-subdev.o
+ v4l2-event.o v4l2-ctrls.o v4l2-subdev.o v4l2-clk.o \
+ v4l2-async.o
ifeq ($(CONFIG_COMPAT),y)
videodev-objs += v4l2-compat-ioctl32.o
endif
+ifeq ($(CONFIG_OF),y)
+ videodev-objs += v4l2-of.o
+endif
-obj-$(CONFIG_VIDEO_DEV) += videodev.o v4l2-int-device.o
+obj-$(CONFIG_VIDEO_V4L2) += videodev.o
obj-$(CONFIG_VIDEO_V4L2) += v4l2-common.o
+obj-$(CONFIG_VIDEO_V4L2) += v4l2-dv-timings.o
obj-$(CONFIG_VIDEO_TUNER) += tuner.o
@@ -28,6 +33,7 @@ obj-$(CONFIG_VIDEOBUF2_MEMOPS) += videobuf2-memops.o
obj-$(CONFIG_VIDEOBUF2_VMALLOC) += videobuf2-vmalloc.o
obj-$(CONFIG_VIDEOBUF2_DMA_CONTIG) += videobuf2-dma-contig.o
obj-$(CONFIG_VIDEOBUF2_DMA_SG) += videobuf2-dma-sg.o
+obj-$(CONFIG_VIDEOBUF2_DVB) += videobuf2-dvb.o
ccflags-y += -I$(srctree)/drivers/media/dvb-core
ccflags-y += -I$(srctree)/drivers/media/dvb-frontends
diff --git a/drivers/media/v4l2-core/tuner-core.c b/drivers/media/v4l2-core/tuner-core.c
index b5a819af2b8..06c18ba16fa 100644
--- a/drivers/media/v4l2-core/tuner-core.c
+++ b/drivers/media/v4l2-core/tuner-core.c
@@ -43,7 +43,7 @@
#define UNSET (-1U)
-#define PREFIX (t->i2c->driver->driver.name)
+#define PREFIX (t->i2c->dev.driver->name)
/*
* Driver modprobe parameters
@@ -132,7 +132,7 @@ struct tuner {
bool standby; /* Standby mode */
unsigned int type; /* chip type id */
- unsigned int config;
+ void *config;
const char *name;
};
@@ -218,26 +218,6 @@ static void fe_standby(struct dvb_frontend *fe)
fe_tuner_ops->sleep(fe);
}
-static int fe_has_signal(struct dvb_frontend *fe)
-{
- u16 strength = 0;
-
- if (fe->ops.tuner_ops.get_rf_strength)
- fe->ops.tuner_ops.get_rf_strength(fe, &strength);
-
- return strength;
-}
-
-static int fe_get_afc(struct dvb_frontend *fe)
-{
- s32 afc = 0;
-
- if (fe->ops.tuner_ops.get_afc)
- fe->ops.tuner_ops.get_afc(fe, &afc);
-
- return 0;
-}
-
static int fe_set_config(struct dvb_frontend *fe, void *priv_cfg)
{
struct dvb_tuner_ops *fe_tuner_ops = &fe->ops.tuner_ops;
@@ -253,11 +233,9 @@ static int fe_set_config(struct dvb_frontend *fe, void *priv_cfg)
static void tuner_status(struct dvb_frontend *fe);
-static struct analog_demod_ops tuner_analog_ops = {
+static const struct analog_demod_ops tuner_analog_ops = {
.set_params = fe_set_params,
.standby = fe_standby,
- .has_signal = fe_has_signal,
- .get_afc = fe_get_afc,
.set_config = fe_set_config,
.tuner_status = tuner_status
};
@@ -269,12 +247,11 @@ static struct analog_demod_ops tuner_analog_ops = {
/**
* set_type - Sets the tuner type for a given device
*
- * @c: i2c_client descriptoy
+ * @c: i2c_client descriptor
* @type: type of the tuner (e. g. tuner number)
* @new_mode_mask: Indicates if tuner supports TV and/or Radio
- * @new_config: an optional parameter ranging from 0-255 used by
- a few tuners to adjust an internal parameter,
- like LNA mode
+ * @new_config: an optional parameter used by a few tuners to adjust
+ internal parameters, like LNA mode
* @tuner_callback: an optional function to be called when switching
* to analog mode
*
@@ -282,7 +259,7 @@ static struct analog_demod_ops tuner_analog_ops = {
* by tun_setup structure. It contains several per-tuner initialization "magic"
*/
static void set_type(struct i2c_client *c, unsigned int type,
- unsigned int new_mode_mask, unsigned int new_config,
+ unsigned int new_mode_mask, void *new_config,
int (*tuner_callback) (void *dev, int component, int cmd, int arg))
{
struct tuner *t = to_tuner(i2c_get_clientdata(c));
@@ -297,8 +274,7 @@ static void set_type(struct i2c_client *c, unsigned int type,
}
t->type = type;
- /* prevent invalid config values */
- t->config = new_config < 256 ? new_config : 0;
+ t->config = new_config;
if (tuner_callback != NULL) {
tuner_dbg("defining GPIO callback\n");
t->fe.callback = tuner_callback;
@@ -316,11 +292,8 @@ static void set_type(struct i2c_client *c, unsigned int type,
break;
case TUNER_PHILIPS_TDA8290:
{
- struct tda829x_config cfg = {
- .lna_cfg = t->config,
- };
if (!dvb_attach(tda829x_attach, &t->fe, t->i2c->adapter,
- t->i2c->addr, &cfg))
+ t->i2c->addr, t->config))
goto attach_failed;
break;
}
@@ -409,7 +382,6 @@ static void set_type(struct i2c_client *c, unsigned int type,
case TUNER_NXP_TDA18271:
{
struct tda18271_config cfg = {
- .config = t->config,
.small_i2c = TDA18271_03_BYTE_CHUNK_INIT,
};
@@ -453,6 +425,11 @@ static void set_type(struct i2c_client *c, unsigned int type,
memcpy(analog_ops, &tuner_analog_ops,
sizeof(struct analog_demod_ops));
+ if (fe_tuner_ops->get_rf_strength)
+ analog_ops->has_signal = fe_tuner_ops->get_rf_strength;
+ if (fe_tuner_ops->get_afc)
+ analog_ops->get_afc = fe_tuner_ops->get_afc;
+
} else {
t->name = analog_ops->info.name;
}
@@ -475,7 +452,7 @@ static void set_type(struct i2c_client *c, unsigned int type,
}
tuner_dbg("%s %s I2C addr 0x%02x with type %d used for 0x%02x\n",
- c->adapter->name, c->driver->driver.name, c->addr << 1, type,
+ c->adapter->name, c->dev.driver->name, c->addr << 1, type,
t->mode_mask);
return;
@@ -506,7 +483,7 @@ static int tuner_s_type_addr(struct v4l2_subdev *sd,
struct tuner *t = to_tuner(sd);
struct i2c_client *c = v4l2_get_subdevdata(sd);
- tuner_dbg("Calling set_type_addr for type=%d, addr=0x%02x, mode=0x%02x, config=0x%02x\n",
+ tuner_dbg("Calling set_type_addr for type=%d, addr=0x%02x, mode=0x%02x, config=%p\n",
tun_setup->type,
tun_setup->addr,
tun_setup->mode_mask,
@@ -579,7 +556,7 @@ static void tuner_lookup(struct i2c_adapter *adap,
int mode_mask;
if (pos->i2c->adapter != adap ||
- strcmp(pos->i2c->driver->driver.name, "tuner"))
+ strcmp(pos->i2c->dev.driver->name, "tuner"))
continue;
mode_mask = pos->mode_mask;
@@ -1013,6 +990,11 @@ static void set_radio_freq(struct i2c_client *c, unsigned int freq)
t->standby = false;
analog_ops->set_params(&t->fe, &params);
+ /*
+ * The tuner driver might decide to change the audmode if it only
+ * supports stereo, so update t->audmode.
+ */
+ t->audmode = params.audmode;
}
/*
@@ -1068,9 +1050,12 @@ static void tuner_status(struct dvb_frontend *fe)
if (tuner_status & TUNER_STATUS_STEREO)
tuner_info("Stereo: yes\n");
}
- if (analog_ops->has_signal)
- tuner_info("Signal strength: %d\n",
- analog_ops->has_signal(fe));
+ if (analog_ops->has_signal) {
+ u16 signal;
+
+ if (!analog_ops->has_signal(fe, &signal))
+ tuner_info("Signal strength: %hu\n", signal);
+ }
}
/*
@@ -1129,7 +1114,7 @@ static int tuner_s_std(struct v4l2_subdev *sd, v4l2_std_id std)
return 0;
}
-static int tuner_s_frequency(struct v4l2_subdev *sd, struct v4l2_frequency *f)
+static int tuner_s_frequency(struct v4l2_subdev *sd, const struct v4l2_frequency *f)
{
struct tuner *t = to_tuner(sd);
@@ -1188,9 +1173,13 @@ static int tuner_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
if (check_mode(t, vt->type) == -EINVAL)
return 0;
if (vt->type == t->mode && analog_ops->get_afc)
- vt->afc = analog_ops->get_afc(&t->fe);
- if (analog_ops->has_signal)
- vt->signal = analog_ops->has_signal(&t->fe);
+ analog_ops->get_afc(&t->fe, &vt->afc);
+ if (vt->type == t->mode && analog_ops->has_signal) {
+ u16 signal = (u16)vt->signal;
+
+ if (!analog_ops->has_signal(&t->fe, &signal))
+ vt->signal = signal;
+ }
if (vt->type != V4L2_TUNER_RADIO) {
vt->capability |= V4L2_TUNER_CAP_NORM;
vt->rangelow = tv_range[0] * 16;
@@ -1228,15 +1217,25 @@ static int tuner_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
* Note: vt->type should be initialized before calling it.
* This is done by either video_ioctl2 or by the bridge driver.
*/
-static int tuner_s_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
+static int tuner_s_tuner(struct v4l2_subdev *sd, const struct v4l2_tuner *vt)
{
struct tuner *t = to_tuner(sd);
if (set_mode(t, vt->type))
return 0;
- if (t->mode == V4L2_TUNER_RADIO)
+ if (t->mode == V4L2_TUNER_RADIO) {
t->audmode = vt->audmode;
+ /*
+ * For radio audmode can only be mono or stereo. Map any
+ * other values to stereo. The actual tuner driver that is
+ * called in set_radio_freq can decide to limit the audmode to
+ * mono if only mono is supported.
+ */
+ if (t->audmode != V4L2_TUNER_MODE_MONO &&
+ t->audmode != V4L2_TUNER_MODE_STEREO)
+ t->audmode = V4L2_TUNER_MODE_STEREO;
+ }
set_freq(t, 0);
return 0;
@@ -1302,7 +1301,6 @@ static int tuner_command(struct i2c_client *client, unsigned cmd, void *arg)
static const struct v4l2_subdev_core_ops tuner_core_ops = {
.log_status = tuner_log_status,
- .s_std = tuner_s_std,
.s_power = tuner_s_power,
};
@@ -1316,9 +1314,14 @@ static const struct v4l2_subdev_tuner_ops tuner_tuner_ops = {
.s_config = tuner_s_config,
};
+static const struct v4l2_subdev_video_ops tuner_video_ops = {
+ .s_std = tuner_s_std,
+};
+
static const struct v4l2_subdev_ops tuner_ops = {
.core = &tuner_core_ops,
.tuner = &tuner_tuner_ops,
+ .video = &tuner_video_ops,
};
/*
diff --git a/drivers/media/v4l2-core/v4l2-async.c b/drivers/media/v4l2-core/v4l2-async.c
new file mode 100644
index 00000000000..85a6a34128a
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-async.c
@@ -0,0 +1,312 @@
+/*
+ * V4L2 asynchronous subdevice registration API
+ *
+ * Copyright (C) 2012-2013, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include <media/v4l2-async.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+
+static bool match_i2c(struct device *dev, struct v4l2_async_subdev *asd)
+{
+#if IS_ENABLED(CONFIG_I2C)
+ struct i2c_client *client = i2c_verify_client(dev);
+ return client &&
+ asd->match.i2c.adapter_id == client->adapter->nr &&
+ asd->match.i2c.address == client->addr;
+#else
+ return false;
+#endif
+}
+
+static bool match_devname(struct device *dev, struct v4l2_async_subdev *asd)
+{
+ return !strcmp(asd->match.device_name.name, dev_name(dev));
+}
+
+static bool match_of(struct device *dev, struct v4l2_async_subdev *asd)
+{
+ return dev->of_node == asd->match.of.node;
+}
+
+static LIST_HEAD(subdev_list);
+static LIST_HEAD(notifier_list);
+static DEFINE_MUTEX(list_lock);
+
+static struct v4l2_async_subdev *v4l2_async_belongs(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd)
+{
+ struct v4l2_async_subdev *asd;
+ bool (*match)(struct device *, struct v4l2_async_subdev *);
+
+ list_for_each_entry(asd, &notifier->waiting, list) {
+ /* bus_type has been verified valid before */
+ switch (asd->match_type) {
+ case V4L2_ASYNC_MATCH_CUSTOM:
+ match = asd->match.custom.match;
+ if (!match)
+ /* Match always */
+ return asd;
+ break;
+ case V4L2_ASYNC_MATCH_DEVNAME:
+ match = match_devname;
+ break;
+ case V4L2_ASYNC_MATCH_I2C:
+ match = match_i2c;
+ break;
+ case V4L2_ASYNC_MATCH_OF:
+ match = match_of;
+ break;
+ default:
+ /* Cannot happen, unless someone breaks us */
+ WARN_ON(true);
+ return NULL;
+ }
+
+ /* match cannot be NULL here */
+ if (match(sd->dev, asd))
+ return asd;
+ }
+
+ return NULL;
+}
+
+static int v4l2_async_test_notify(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd,
+ struct v4l2_async_subdev *asd)
+{
+ int ret;
+
+ /* Remove from the waiting list */
+ list_del(&asd->list);
+ sd->asd = asd;
+ sd->notifier = notifier;
+
+ if (notifier->bound) {
+ ret = notifier->bound(notifier, sd, asd);
+ if (ret < 0)
+ return ret;
+ }
+ /* Move from the global subdevice list to notifier's done */
+ list_move(&sd->async_list, &notifier->done);
+
+ ret = v4l2_device_register_subdev(notifier->v4l2_dev, sd);
+ if (ret < 0) {
+ if (notifier->unbind)
+ notifier->unbind(notifier, sd, asd);
+ return ret;
+ }
+
+ if (list_empty(&notifier->waiting) && notifier->complete)
+ return notifier->complete(notifier);
+
+ return 0;
+}
+
+static void v4l2_async_cleanup(struct v4l2_subdev *sd)
+{
+ v4l2_device_unregister_subdev(sd);
+ /* Subdevice driver will reprobe and put the subdev back onto the list */
+ list_del_init(&sd->async_list);
+ sd->asd = NULL;
+ sd->dev = NULL;
+}
+
+int v4l2_async_notifier_register(struct v4l2_device *v4l2_dev,
+ struct v4l2_async_notifier *notifier)
+{
+ struct v4l2_subdev *sd, *tmp;
+ struct v4l2_async_subdev *asd;
+ int i;
+
+ if (!notifier->num_subdevs || notifier->num_subdevs > V4L2_MAX_SUBDEVS)
+ return -EINVAL;
+
+ notifier->v4l2_dev = v4l2_dev;
+ INIT_LIST_HEAD(&notifier->waiting);
+ INIT_LIST_HEAD(&notifier->done);
+
+ for (i = 0; i < notifier->num_subdevs; i++) {
+ asd = notifier->subdevs[i];
+
+ switch (asd->match_type) {
+ case V4L2_ASYNC_MATCH_CUSTOM:
+ case V4L2_ASYNC_MATCH_DEVNAME:
+ case V4L2_ASYNC_MATCH_I2C:
+ case V4L2_ASYNC_MATCH_OF:
+ break;
+ default:
+ dev_err(notifier->v4l2_dev ? notifier->v4l2_dev->dev : NULL,
+ "Invalid match type %u on %p\n",
+ asd->match_type, asd);
+ return -EINVAL;
+ }
+ list_add_tail(&asd->list, &notifier->waiting);
+ }
+
+ mutex_lock(&list_lock);
+
+ /* Keep also completed notifiers on the list */
+ list_add(&notifier->list, &notifier_list);
+
+ list_for_each_entry_safe(sd, tmp, &subdev_list, async_list) {
+ int ret;
+
+ asd = v4l2_async_belongs(notifier, sd);
+ if (!asd)
+ continue;
+
+ ret = v4l2_async_test_notify(notifier, sd, asd);
+ if (ret < 0) {
+ mutex_unlock(&list_lock);
+ return ret;
+ }
+ }
+
+ mutex_unlock(&list_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(v4l2_async_notifier_register);
+
+void v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier)
+{
+ struct v4l2_subdev *sd, *tmp;
+ unsigned int notif_n_subdev = notifier->num_subdevs;
+ unsigned int n_subdev = min(notif_n_subdev, V4L2_MAX_SUBDEVS);
+ struct device **dev;
+ int i = 0;
+
+ if (!notifier->v4l2_dev)
+ return;
+
+ dev = kmalloc(n_subdev * sizeof(*dev), GFP_KERNEL);
+ if (!dev) {
+ dev_err(notifier->v4l2_dev->dev,
+ "Failed to allocate device cache!\n");
+ }
+
+ mutex_lock(&list_lock);
+
+ list_del(&notifier->list);
+
+ list_for_each_entry_safe(sd, tmp, &notifier->done, async_list) {
+ struct device *d;
+
+ d = get_device(sd->dev);
+
+ v4l2_async_cleanup(sd);
+
+ /* If we handled USB devices, we'd have to lock the parent too */
+ device_release_driver(d);
+
+ if (notifier->unbind)
+ notifier->unbind(notifier, sd, sd->asd);
+
+ /*
+ * Store device at the device cache, in order to call
+ * put_device() on the final step
+ */
+ if (dev)
+ dev[i++] = d;
+ else
+ put_device(d);
+ }
+
+ mutex_unlock(&list_lock);
+
+ /*
+ * Call device_attach() to reprobe devices
+ *
+ * NOTE: If dev allocation fails, i is 0, and the whole loop won't be
+ * executed.
+ */
+ while (i--) {
+ struct device *d = dev[i];
+
+ if (d && device_attach(d) < 0) {
+ const char *name = "(none)";
+ int lock = device_trylock(d);
+
+ if (lock && d->driver)
+ name = d->driver->name;
+ dev_err(d, "Failed to re-probe to %s\n", name);
+ if (lock)
+ device_unlock(d);
+ }
+ put_device(d);
+ }
+ kfree(dev);
+
+ notifier->v4l2_dev = NULL;
+
+ /*
+ * Don't care about the waiting list, it is initialised and populated
+ * upon notifier registration.
+ */
+}
+EXPORT_SYMBOL(v4l2_async_notifier_unregister);
+
+int v4l2_async_register_subdev(struct v4l2_subdev *sd)
+{
+ struct v4l2_async_notifier *notifier;
+
+ mutex_lock(&list_lock);
+
+ INIT_LIST_HEAD(&sd->async_list);
+
+ list_for_each_entry(notifier, &notifier_list, list) {
+ struct v4l2_async_subdev *asd = v4l2_async_belongs(notifier, sd);
+ if (asd) {
+ int ret = v4l2_async_test_notify(notifier, sd, asd);
+ mutex_unlock(&list_lock);
+ return ret;
+ }
+ }
+
+ /* None matched, wait for hot-plugging */
+ list_add(&sd->async_list, &subdev_list);
+
+ mutex_unlock(&list_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(v4l2_async_register_subdev);
+
+void v4l2_async_unregister_subdev(struct v4l2_subdev *sd)
+{
+ struct v4l2_async_notifier *notifier = sd->notifier;
+
+ if (!sd->asd) {
+ if (!list_empty(&sd->async_list))
+ v4l2_async_cleanup(sd);
+ return;
+ }
+
+ mutex_lock(&list_lock);
+
+ list_add(&sd->asd->list, &notifier->waiting);
+
+ v4l2_async_cleanup(sd);
+
+ if (notifier->unbind)
+ notifier->unbind(notifier, sd, sd->asd);
+
+ mutex_unlock(&list_lock);
+}
+EXPORT_SYMBOL(v4l2_async_unregister_subdev);
diff --git a/drivers/media/v4l2-core/v4l2-clk.c b/drivers/media/v4l2-core/v4l2-clk.c
new file mode 100644
index 00000000000..e18cc0469cf
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-clk.c
@@ -0,0 +1,281 @@
+/*
+ * V4L2 clock service
+ *
+ * Copyright (C) 2012-2013, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/atomic.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#include <media/v4l2-clk.h>
+#include <media/v4l2-subdev.h>
+
+static DEFINE_MUTEX(clk_lock);
+static LIST_HEAD(clk_list);
+
+static struct v4l2_clk *v4l2_clk_find(const char *dev_id, const char *id)
+{
+ struct v4l2_clk *clk;
+
+ list_for_each_entry(clk, &clk_list, list) {
+ if (strcmp(dev_id, clk->dev_id))
+ continue;
+
+ if (!id || !clk->id || !strcmp(clk->id, id))
+ return clk;
+ }
+
+ return ERR_PTR(-ENODEV);
+}
+
+struct v4l2_clk *v4l2_clk_get(struct device *dev, const char *id)
+{
+ struct v4l2_clk *clk;
+
+ mutex_lock(&clk_lock);
+ clk = v4l2_clk_find(dev_name(dev), id);
+
+ if (!IS_ERR(clk))
+ atomic_inc(&clk->use_count);
+ mutex_unlock(&clk_lock);
+
+ return clk;
+}
+EXPORT_SYMBOL(v4l2_clk_get);
+
+void v4l2_clk_put(struct v4l2_clk *clk)
+{
+ struct v4l2_clk *tmp;
+
+ if (IS_ERR(clk))
+ return;
+
+ mutex_lock(&clk_lock);
+
+ list_for_each_entry(tmp, &clk_list, list)
+ if (tmp == clk)
+ atomic_dec(&clk->use_count);
+
+ mutex_unlock(&clk_lock);
+}
+EXPORT_SYMBOL(v4l2_clk_put);
+
+static int v4l2_clk_lock_driver(struct v4l2_clk *clk)
+{
+ struct v4l2_clk *tmp;
+ int ret = -ENODEV;
+
+ mutex_lock(&clk_lock);
+
+ list_for_each_entry(tmp, &clk_list, list)
+ if (tmp == clk) {
+ ret = !try_module_get(clk->ops->owner);
+ if (ret)
+ ret = -EFAULT;
+ break;
+ }
+
+ mutex_unlock(&clk_lock);
+
+ return ret;
+}
+
+static void v4l2_clk_unlock_driver(struct v4l2_clk *clk)
+{
+ module_put(clk->ops->owner);
+}
+
+int v4l2_clk_enable(struct v4l2_clk *clk)
+{
+ int ret = v4l2_clk_lock_driver(clk);
+
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&clk->lock);
+
+ if (++clk->enable == 1 && clk->ops->enable) {
+ ret = clk->ops->enable(clk);
+ if (ret < 0)
+ clk->enable--;
+ }
+
+ mutex_unlock(&clk->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(v4l2_clk_enable);
+
+/*
+ * You might Oops if you try to disabled a disabled clock, because then the
+ * driver isn't locked and could have been unloaded by now, so, don't do that
+ */
+void v4l2_clk_disable(struct v4l2_clk *clk)
+{
+ int enable;
+
+ mutex_lock(&clk->lock);
+
+ enable = --clk->enable;
+ if (WARN(enable < 0, "Unbalanced %s() on %s:%s!\n", __func__,
+ clk->dev_id, clk->id))
+ clk->enable++;
+ else if (!enable && clk->ops->disable)
+ clk->ops->disable(clk);
+
+ mutex_unlock(&clk->lock);
+
+ v4l2_clk_unlock_driver(clk);
+}
+EXPORT_SYMBOL(v4l2_clk_disable);
+
+unsigned long v4l2_clk_get_rate(struct v4l2_clk *clk)
+{
+ int ret = v4l2_clk_lock_driver(clk);
+
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&clk->lock);
+ if (!clk->ops->get_rate)
+ ret = -ENOSYS;
+ else
+ ret = clk->ops->get_rate(clk);
+ mutex_unlock(&clk->lock);
+
+ v4l2_clk_unlock_driver(clk);
+
+ return ret;
+}
+EXPORT_SYMBOL(v4l2_clk_get_rate);
+
+int v4l2_clk_set_rate(struct v4l2_clk *clk, unsigned long rate)
+{
+ int ret = v4l2_clk_lock_driver(clk);
+
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&clk->lock);
+ if (!clk->ops->set_rate)
+ ret = -ENOSYS;
+ else
+ ret = clk->ops->set_rate(clk, rate);
+ mutex_unlock(&clk->lock);
+
+ v4l2_clk_unlock_driver(clk);
+
+ return ret;
+}
+EXPORT_SYMBOL(v4l2_clk_set_rate);
+
+struct v4l2_clk *v4l2_clk_register(const struct v4l2_clk_ops *ops,
+ const char *dev_id,
+ const char *id, void *priv)
+{
+ struct v4l2_clk *clk;
+ int ret;
+
+ if (!ops || !dev_id)
+ return ERR_PTR(-EINVAL);
+
+ clk = kzalloc(sizeof(struct v4l2_clk), GFP_KERNEL);
+ if (!clk)
+ return ERR_PTR(-ENOMEM);
+
+ clk->id = kstrdup(id, GFP_KERNEL);
+ clk->dev_id = kstrdup(dev_id, GFP_KERNEL);
+ if ((id && !clk->id) || !clk->dev_id) {
+ ret = -ENOMEM;
+ goto ealloc;
+ }
+ clk->ops = ops;
+ clk->priv = priv;
+ atomic_set(&clk->use_count, 0);
+ mutex_init(&clk->lock);
+
+ mutex_lock(&clk_lock);
+ if (!IS_ERR(v4l2_clk_find(dev_id, id))) {
+ mutex_unlock(&clk_lock);
+ ret = -EEXIST;
+ goto eexist;
+ }
+ list_add_tail(&clk->list, &clk_list);
+ mutex_unlock(&clk_lock);
+
+ return clk;
+
+eexist:
+ealloc:
+ kfree(clk->id);
+ kfree(clk->dev_id);
+ kfree(clk);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(v4l2_clk_register);
+
+void v4l2_clk_unregister(struct v4l2_clk *clk)
+{
+ if (WARN(atomic_read(&clk->use_count),
+ "%s(): Refusing to unregister ref-counted %s:%s clock!\n",
+ __func__, clk->dev_id, clk->id))
+ return;
+
+ mutex_lock(&clk_lock);
+ list_del(&clk->list);
+ mutex_unlock(&clk_lock);
+
+ kfree(clk->id);
+ kfree(clk->dev_id);
+ kfree(clk);
+}
+EXPORT_SYMBOL(v4l2_clk_unregister);
+
+struct v4l2_clk_fixed {
+ unsigned long rate;
+ struct v4l2_clk_ops ops;
+};
+
+static unsigned long fixed_get_rate(struct v4l2_clk *clk)
+{
+ struct v4l2_clk_fixed *priv = clk->priv;
+ return priv->rate;
+}
+
+struct v4l2_clk *__v4l2_clk_register_fixed(const char *dev_id,
+ const char *id, unsigned long rate, struct module *owner)
+{
+ struct v4l2_clk *clk;
+ struct v4l2_clk_fixed *priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+
+ if (!priv)
+ return ERR_PTR(-ENOMEM);
+
+ priv->rate = rate;
+ priv->ops.get_rate = fixed_get_rate;
+ priv->ops.owner = owner;
+
+ clk = v4l2_clk_register(&priv->ops, dev_id, id, priv);
+ if (IS_ERR(clk))
+ kfree(priv);
+
+ return clk;
+}
+EXPORT_SYMBOL(__v4l2_clk_register_fixed);
+
+void v4l2_clk_unregister_fixed(struct v4l2_clk *clk)
+{
+ kfree(clk->priv);
+ v4l2_clk_unregister(clk);
+}
+EXPORT_SYMBOL(v4l2_clk_unregister_fixed);
diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c
index 380ddd89fa4..433d6d77942 100644
--- a/drivers/media/v4l2-core/v4l2-common.c
+++ b/drivers/media/v4l2-core/v4l2-common.c
@@ -61,7 +61,6 @@
#include <media/v4l2-common.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ctrls.h>
-#include <media/v4l2-chip-ident.h>
#include <linux/videodev2.h>
@@ -227,63 +226,9 @@ u32 v4l2_ctrl_next(const u32 * const * ctrl_classes, u32 id)
}
EXPORT_SYMBOL(v4l2_ctrl_next);
-int v4l2_chip_match_host(const struct v4l2_dbg_match *match)
-{
- switch (match->type) {
- case V4L2_CHIP_MATCH_HOST:
- return match->addr == 0;
- default:
- return 0;
- }
-}
-EXPORT_SYMBOL(v4l2_chip_match_host);
-
-#if defined(CONFIG_I2C) || (defined(CONFIG_I2C_MODULE) && defined(MODULE))
-int v4l2_chip_match_i2c_client(struct i2c_client *c, const struct v4l2_dbg_match *match)
-{
- int len;
-
- if (c == NULL || match == NULL)
- return 0;
-
- switch (match->type) {
- case V4L2_CHIP_MATCH_I2C_DRIVER:
- if (c->driver == NULL || c->driver->driver.name == NULL)
- return 0;
- len = strlen(c->driver->driver.name);
- /* legacy drivers have a ' suffix, don't try to match that */
- if (len && c->driver->driver.name[len - 1] == '\'')
- len--;
- return len && !strncmp(c->driver->driver.name, match->name, len);
- case V4L2_CHIP_MATCH_I2C_ADDR:
- return c->addr == match->addr;
- default:
- return 0;
- }
-}
-EXPORT_SYMBOL(v4l2_chip_match_i2c_client);
-
-int v4l2_chip_ident_i2c_client(struct i2c_client *c, struct v4l2_dbg_chip_ident *chip,
- u32 ident, u32 revision)
-{
- if (!v4l2_chip_match_i2c_client(c, &chip->match))
- return 0;
- if (chip->ident == V4L2_IDENT_NONE) {
- chip->ident = ident;
- chip->revision = revision;
- }
- else {
- chip->ident = V4L2_IDENT_AMBIGUOUS;
- chip->revision = 0;
- }
- return 0;
-}
-EXPORT_SYMBOL(v4l2_chip_ident_i2c_client);
-
-/* ----------------------------------------------------------------- */
-
/* I2C Helper functions */
+#if IS_ENABLED(CONFIG_I2C)
void v4l2_i2c_subdev_init(struct v4l2_subdev *sd, struct i2c_client *client,
const struct v4l2_subdev_ops *ops)
@@ -291,19 +236,18 @@ void v4l2_i2c_subdev_init(struct v4l2_subdev *sd, struct i2c_client *client,
v4l2_subdev_init(sd, ops);
sd->flags |= V4L2_SUBDEV_FL_IS_I2C;
/* the owner is the same as the i2c_client's driver owner */
- sd->owner = client->driver->driver.owner;
+ sd->owner = client->dev.driver->owner;
+ sd->dev = &client->dev;
/* i2c_client and v4l2_subdev point to one another */
v4l2_set_subdevdata(sd, client);
i2c_set_clientdata(client, sd);
/* initialize name */
snprintf(sd->name, sizeof(sd->name), "%s %d-%04x",
- client->driver->driver.name, i2c_adapter_id(client->adapter),
+ client->dev.driver->name, i2c_adapter_id(client->adapter),
client->addr);
}
EXPORT_SYMBOL_GPL(v4l2_i2c_subdev_init);
-
-
/* Load an i2c sub-device. */
struct v4l2_subdev *v4l2_i2c_new_subdev_board(struct v4l2_device *v4l2_dev,
struct i2c_adapter *adapter, struct i2c_board_info *info,
@@ -330,11 +274,11 @@ struct v4l2_subdev *v4l2_i2c_new_subdev_board(struct v4l2_device *v4l2_dev,
loaded. This delay-load mechanism doesn't work if other drivers
want to use the i2c device, so explicitly loading the module
is the best alternative. */
- if (client == NULL || client->driver == NULL)
+ if (client == NULL || client->dev.driver == NULL)
goto error;
/* Lock the module so we can safely get the v4l2_subdev pointer */
- if (!try_module_get(client->driver->driver.owner))
+ if (!try_module_get(client->dev.driver->owner))
goto error;
sd = i2c_get_clientdata(client);
@@ -343,7 +287,7 @@ struct v4l2_subdev *v4l2_i2c_new_subdev_board(struct v4l2_device *v4l2_dev,
if (v4l2_device_register_subdev(v4l2_dev, sd))
sd = NULL;
/* Decrease the module use count to match the first try_module_get. */
- module_put(client->driver->driver.owner);
+ module_put(client->dev.driver->owner);
error:
/* If we have a client but no subdev, then something went wrong and
@@ -384,7 +328,7 @@ EXPORT_SYMBOL_GPL(v4l2_i2c_subdev_addr);
const unsigned short *v4l2_i2c_tuner_addrs(enum v4l2_i2c_tuner_type type)
{
static const unsigned short radio_addrs[] = {
-#if defined(CONFIG_MEDIA_TUNER_TEA5761) || defined(CONFIG_MEDIA_TUNER_TEA5761_MODULE)
+#if IS_ENABLED(CONFIG_MEDIA_TUNER_TEA5761)
0x10,
#endif
0x60,
@@ -427,6 +371,7 @@ void v4l2_spi_subdev_init(struct v4l2_subdev *sd, struct spi_device *spi,
sd->flags |= V4L2_SUBDEV_FL_IS_SPI;
/* the owner is the same as the spi_device's driver owner */
sd->owner = spi->dev.driver->owner;
+ sd->dev = &spi->dev;
/* spi_device and v4l2_subdev point to one another */
v4l2_set_subdevdata(sd, spi);
spi_set_drvdata(spi, sd);
@@ -550,410 +495,6 @@ void v4l_bound_align_image(u32 *w, unsigned int wmin, unsigned int wmax,
}
EXPORT_SYMBOL_GPL(v4l_bound_align_image);
-/**
- * v4l_fill_dv_preset_info - fill description of a digital video preset
- * @preset - preset value
- * @info - pointer to struct v4l2_dv_enum_preset
- *
- * drivers can use this helper function to fill description of dv preset
- * in info.
- */
-int v4l_fill_dv_preset_info(u32 preset, struct v4l2_dv_enum_preset *info)
-{
- static const struct v4l2_dv_preset_info {
- u16 width;
- u16 height;
- const char *name;
- } dv_presets[] = {
- { 0, 0, "Invalid" }, /* V4L2_DV_INVALID */
- { 720, 480, "480p@59.94" }, /* V4L2_DV_480P59_94 */
- { 720, 576, "576p@50" }, /* V4L2_DV_576P50 */
- { 1280, 720, "720p@24" }, /* V4L2_DV_720P24 */
- { 1280, 720, "720p@25" }, /* V4L2_DV_720P25 */
- { 1280, 720, "720p@30" }, /* V4L2_DV_720P30 */
- { 1280, 720, "720p@50" }, /* V4L2_DV_720P50 */
- { 1280, 720, "720p@59.94" }, /* V4L2_DV_720P59_94 */
- { 1280, 720, "720p@60" }, /* V4L2_DV_720P60 */
- { 1920, 1080, "1080i@29.97" }, /* V4L2_DV_1080I29_97 */
- { 1920, 1080, "1080i@30" }, /* V4L2_DV_1080I30 */
- { 1920, 1080, "1080i@25" }, /* V4L2_DV_1080I25 */
- { 1920, 1080, "1080i@50" }, /* V4L2_DV_1080I50 */
- { 1920, 1080, "1080i@60" }, /* V4L2_DV_1080I60 */
- { 1920, 1080, "1080p@24" }, /* V4L2_DV_1080P24 */
- { 1920, 1080, "1080p@25" }, /* V4L2_DV_1080P25 */
- { 1920, 1080, "1080p@30" }, /* V4L2_DV_1080P30 */
- { 1920, 1080, "1080p@50" }, /* V4L2_DV_1080P50 */
- { 1920, 1080, "1080p@60" }, /* V4L2_DV_1080P60 */
- };
-
- if (info == NULL || preset >= ARRAY_SIZE(dv_presets))
- return -EINVAL;
-
- info->preset = preset;
- info->width = dv_presets[preset].width;
- info->height = dv_presets[preset].height;
- strlcpy(info->name, dv_presets[preset].name, sizeof(info->name));
- return 0;
-}
-EXPORT_SYMBOL_GPL(v4l_fill_dv_preset_info);
-
-/**
- * v4l_match_dv_timings - check if two timings match
- * @t1 - compare this v4l2_dv_timings struct...
- * @t2 - with this struct.
- * @pclock_delta - the allowed pixelclock deviation.
- *
- * Compare t1 with t2 with a given margin of error for the pixelclock.
- */
-bool v4l_match_dv_timings(const struct v4l2_dv_timings *t1,
- const struct v4l2_dv_timings *t2,
- unsigned pclock_delta)
-{
- if (t1->type != t2->type || t1->type != V4L2_DV_BT_656_1120)
- return false;
- if (t1->bt.width == t2->bt.width &&
- t1->bt.height == t2->bt.height &&
- t1->bt.interlaced == t2->bt.interlaced &&
- t1->bt.polarities == t2->bt.polarities &&
- t1->bt.pixelclock >= t2->bt.pixelclock - pclock_delta &&
- t1->bt.pixelclock <= t2->bt.pixelclock + pclock_delta &&
- t1->bt.hfrontporch == t2->bt.hfrontporch &&
- t1->bt.vfrontporch == t2->bt.vfrontporch &&
- t1->bt.vsync == t2->bt.vsync &&
- t1->bt.vbackporch == t2->bt.vbackporch &&
- (!t1->bt.interlaced ||
- (t1->bt.il_vfrontporch == t2->bt.il_vfrontporch &&
- t1->bt.il_vsync == t2->bt.il_vsync &&
- t1->bt.il_vbackporch == t2->bt.il_vbackporch)))
- return true;
- return false;
-}
-EXPORT_SYMBOL_GPL(v4l_match_dv_timings);
-
-/*
- * CVT defines
- * Based on Coordinated Video Timings Standard
- * version 1.1 September 10, 2003
- */
-
-#define CVT_PXL_CLK_GRAN 250000 /* pixel clock granularity */
-
-/* Normal blanking */
-#define CVT_MIN_V_BPORCH 7 /* lines */
-#define CVT_MIN_V_PORCH_RND 3 /* lines */
-#define CVT_MIN_VSYNC_BP 550 /* min time of vsync + back porch (us) */
-
-/* Normal blanking for CVT uses GTF to calculate horizontal blanking */
-#define CVT_CELL_GRAN 8 /* character cell granularity */
-#define CVT_M 600 /* blanking formula gradient */
-#define CVT_C 40 /* blanking formula offset */
-#define CVT_K 128 /* blanking formula scaling factor */
-#define CVT_J 20 /* blanking formula scaling factor */
-#define CVT_C_PRIME (((CVT_C - CVT_J) * CVT_K / 256) + CVT_J)
-#define CVT_M_PRIME (CVT_K * CVT_M / 256)
-
-/* Reduced Blanking */
-#define CVT_RB_MIN_V_BPORCH 7 /* lines */
-#define CVT_RB_V_FPORCH 3 /* lines */
-#define CVT_RB_MIN_V_BLANK 460 /* us */
-#define CVT_RB_H_SYNC 32 /* pixels */
-#define CVT_RB_H_BPORCH 80 /* pixels */
-#define CVT_RB_H_BLANK 160 /* pixels */
-
-/** v4l2_detect_cvt - detect if the given timings follow the CVT standard
- * @frame_height - the total height of the frame (including blanking) in lines.
- * @hfreq - the horizontal frequency in Hz.
- * @vsync - the height of the vertical sync in lines.
- * @polarities - the horizontal and vertical polarities (same as struct
- * v4l2_bt_timings polarities).
- * @fmt - the resulting timings.
- *
- * This function will attempt to detect if the given values correspond to a
- * valid CVT format. If so, then it will return true, and fmt will be filled
- * in with the found CVT timings.
- */
-bool v4l2_detect_cvt(unsigned frame_height, unsigned hfreq, unsigned vsync,
- u32 polarities, struct v4l2_dv_timings *fmt)
-{
- int v_fp, v_bp, h_fp, h_bp, hsync;
- int frame_width, image_height, image_width;
- bool reduced_blanking;
- unsigned pix_clk;
-
- if (vsync < 4 || vsync > 7)
- return false;
-
- if (polarities == V4L2_DV_VSYNC_POS_POL)
- reduced_blanking = false;
- else if (polarities == V4L2_DV_HSYNC_POS_POL)
- reduced_blanking = true;
- else
- return false;
-
- /* Vertical */
- if (reduced_blanking) {
- v_fp = CVT_RB_V_FPORCH;
- v_bp = (CVT_RB_MIN_V_BLANK * hfreq + 999999) / 1000000;
- v_bp -= vsync + v_fp;
-
- if (v_bp < CVT_RB_MIN_V_BPORCH)
- v_bp = CVT_RB_MIN_V_BPORCH;
- } else {
- v_fp = CVT_MIN_V_PORCH_RND;
- v_bp = (CVT_MIN_VSYNC_BP * hfreq + 999999) / 1000000 - vsync;
-
- if (v_bp < CVT_MIN_V_BPORCH)
- v_bp = CVT_MIN_V_BPORCH;
- }
- image_height = (frame_height - v_fp - vsync - v_bp + 1) & ~0x1;
-
- /* Aspect ratio based on vsync */
- switch (vsync) {
- case 4:
- image_width = (image_height * 4) / 3;
- break;
- case 5:
- image_width = (image_height * 16) / 9;
- break;
- case 6:
- image_width = (image_height * 16) / 10;
- break;
- case 7:
- /* special case */
- if (image_height == 1024)
- image_width = (image_height * 5) / 4;
- else if (image_height == 768)
- image_width = (image_height * 15) / 9;
- else
- return false;
- break;
- default:
- return false;
- }
-
- image_width = image_width & ~7;
-
- /* Horizontal */
- if (reduced_blanking) {
- pix_clk = (image_width + CVT_RB_H_BLANK) * hfreq;
- pix_clk = (pix_clk / CVT_PXL_CLK_GRAN) * CVT_PXL_CLK_GRAN;
-
- h_bp = CVT_RB_H_BPORCH;
- hsync = CVT_RB_H_SYNC;
- h_fp = CVT_RB_H_BLANK - h_bp - hsync;
-
- frame_width = image_width + CVT_RB_H_BLANK;
- } else {
- int h_blank;
- unsigned ideal_duty_cycle = CVT_C_PRIME - (CVT_M_PRIME * 1000) / hfreq;
-
- h_blank = (image_width * ideal_duty_cycle + (100 - ideal_duty_cycle) / 2) /
- (100 - ideal_duty_cycle);
- h_blank = h_blank - h_blank % (2 * CVT_CELL_GRAN);
-
- if (h_blank * 100 / image_width < 20) {
- h_blank = image_width / 5;
- h_blank = (h_blank + 0x7) & ~0x7;
- }
-
- pix_clk = (image_width + h_blank) * hfreq;
- pix_clk = (pix_clk / CVT_PXL_CLK_GRAN) * CVT_PXL_CLK_GRAN;
-
- h_bp = h_blank / 2;
- frame_width = image_width + h_blank;
-
- hsync = (frame_width * 8 + 50) / 100;
- hsync = hsync - hsync % CVT_CELL_GRAN;
- h_fp = h_blank - hsync - h_bp;
- }
-
- fmt->bt.polarities = polarities;
- fmt->bt.width = image_width;
- fmt->bt.height = image_height;
- fmt->bt.hfrontporch = h_fp;
- fmt->bt.vfrontporch = v_fp;
- fmt->bt.hsync = hsync;
- fmt->bt.vsync = vsync;
- fmt->bt.hbackporch = frame_width - image_width - h_fp - hsync;
- fmt->bt.vbackporch = frame_height - image_height - v_fp - vsync;
- fmt->bt.pixelclock = pix_clk;
- fmt->bt.standards = V4L2_DV_BT_STD_CVT;
- if (reduced_blanking)
- fmt->bt.flags |= V4L2_DV_FL_REDUCED_BLANKING;
- return true;
-}
-EXPORT_SYMBOL_GPL(v4l2_detect_cvt);
-
-/*
- * GTF defines
- * Based on Generalized Timing Formula Standard
- * Version 1.1 September 2, 1999
- */
-
-#define GTF_PXL_CLK_GRAN 250000 /* pixel clock granularity */
-
-#define GTF_MIN_VSYNC_BP 550 /* min time of vsync + back porch (us) */
-#define GTF_V_FP 1 /* vertical front porch (lines) */
-#define GTF_CELL_GRAN 8 /* character cell granularity */
-
-/* Default */
-#define GTF_D_M 600 /* blanking formula gradient */
-#define GTF_D_C 40 /* blanking formula offset */
-#define GTF_D_K 128 /* blanking formula scaling factor */
-#define GTF_D_J 20 /* blanking formula scaling factor */
-#define GTF_D_C_PRIME ((((GTF_D_C - GTF_D_J) * GTF_D_K) / 256) + GTF_D_J)
-#define GTF_D_M_PRIME ((GTF_D_K * GTF_D_M) / 256)
-
-/* Secondary */
-#define GTF_S_M 3600 /* blanking formula gradient */
-#define GTF_S_C 40 /* blanking formula offset */
-#define GTF_S_K 128 /* blanking formula scaling factor */
-#define GTF_S_J 35 /* blanking formula scaling factor */
-#define GTF_S_C_PRIME ((((GTF_S_C - GTF_S_J) * GTF_S_K) / 256) + GTF_S_J)
-#define GTF_S_M_PRIME ((GTF_S_K * GTF_S_M) / 256)
-
-/** v4l2_detect_gtf - detect if the given timings follow the GTF standard
- * @frame_height - the total height of the frame (including blanking) in lines.
- * @hfreq - the horizontal frequency in Hz.
- * @vsync - the height of the vertical sync in lines.
- * @polarities - the horizontal and vertical polarities (same as struct
- * v4l2_bt_timings polarities).
- * @aspect - preferred aspect ratio. GTF has no method of determining the
- * aspect ratio in order to derive the image width from the
- * image height, so it has to be passed explicitly. Usually
- * the native screen aspect ratio is used for this. If it
- * is not filled in correctly, then 16:9 will be assumed.
- * @fmt - the resulting timings.
- *
- * This function will attempt to detect if the given values correspond to a
- * valid GTF format. If so, then it will return true, and fmt will be filled
- * in with the found GTF timings.
- */
-bool v4l2_detect_gtf(unsigned frame_height,
- unsigned hfreq,
- unsigned vsync,
- u32 polarities,
- struct v4l2_fract aspect,
- struct v4l2_dv_timings *fmt)
-{
- int pix_clk;
- int v_fp, v_bp, h_fp, hsync;
- int frame_width, image_height, image_width;
- bool default_gtf;
- int h_blank;
-
- if (vsync != 3)
- return false;
-
- if (polarities == V4L2_DV_VSYNC_POS_POL)
- default_gtf = true;
- else if (polarities == V4L2_DV_HSYNC_POS_POL)
- default_gtf = false;
- else
- return false;
-
- /* Vertical */
- v_fp = GTF_V_FP;
- v_bp = (GTF_MIN_VSYNC_BP * hfreq + 999999) / 1000000 - vsync;
- image_height = (frame_height - v_fp - vsync - v_bp + 1) & ~0x1;
-
- if (aspect.numerator == 0 || aspect.denominator == 0) {
- aspect.numerator = 16;
- aspect.denominator = 9;
- }
- image_width = ((image_height * aspect.numerator) / aspect.denominator);
-
- /* Horizontal */
- if (default_gtf)
- h_blank = ((image_width * GTF_D_C_PRIME * hfreq) -
- (image_width * GTF_D_M_PRIME * 1000) +
- (hfreq * (100 - GTF_D_C_PRIME) + GTF_D_M_PRIME * 1000) / 2) /
- (hfreq * (100 - GTF_D_C_PRIME) + GTF_D_M_PRIME * 1000);
- else
- h_blank = ((image_width * GTF_S_C_PRIME * hfreq) -
- (image_width * GTF_S_M_PRIME * 1000) +
- (hfreq * (100 - GTF_S_C_PRIME) + GTF_S_M_PRIME * 1000) / 2) /
- (hfreq * (100 - GTF_S_C_PRIME) + GTF_S_M_PRIME * 1000);
-
- h_blank = h_blank - h_blank % (2 * GTF_CELL_GRAN);
- frame_width = image_width + h_blank;
-
- pix_clk = (image_width + h_blank) * hfreq;
- pix_clk = pix_clk / GTF_PXL_CLK_GRAN * GTF_PXL_CLK_GRAN;
-
- hsync = (frame_width * 8 + 50) / 100;
- hsync = hsync - hsync % GTF_CELL_GRAN;
-
- h_fp = h_blank / 2 - hsync;
-
- fmt->bt.polarities = polarities;
- fmt->bt.width = image_width;
- fmt->bt.height = image_height;
- fmt->bt.hfrontporch = h_fp;
- fmt->bt.vfrontporch = v_fp;
- fmt->bt.hsync = hsync;
- fmt->bt.vsync = vsync;
- fmt->bt.hbackporch = frame_width - image_width - h_fp - hsync;
- fmt->bt.vbackporch = frame_height - image_height - v_fp - vsync;
- fmt->bt.pixelclock = pix_clk;
- fmt->bt.standards = V4L2_DV_BT_STD_GTF;
- if (!default_gtf)
- fmt->bt.flags |= V4L2_DV_FL_REDUCED_BLANKING;
- return true;
-}
-EXPORT_SYMBOL_GPL(v4l2_detect_gtf);
-
-/** v4l2_calc_aspect_ratio - calculate the aspect ratio based on bytes
- * 0x15 and 0x16 from the EDID.
- * @hor_landscape - byte 0x15 from the EDID.
- * @vert_portrait - byte 0x16 from the EDID.
- *
- * Determines the aspect ratio from the EDID.
- * See VESA Enhanced EDID standard, release A, rev 2, section 3.6.2:
- * "Horizontal and Vertical Screen Size or Aspect Ratio"
- */
-struct v4l2_fract v4l2_calc_aspect_ratio(u8 hor_landscape, u8 vert_portrait)
-{
- struct v4l2_fract aspect = { 16, 9 };
- u32 tmp;
- u8 ratio;
-
- /* Nothing filled in, fallback to 16:9 */
- if (!hor_landscape && !vert_portrait)
- return aspect;
- /* Both filled in, so they are interpreted as the screen size in cm */
- if (hor_landscape && vert_portrait) {
- aspect.numerator = hor_landscape;
- aspect.denominator = vert_portrait;
- return aspect;
- }
- /* Only one is filled in, so interpret them as a ratio:
- (val + 99) / 100 */
- ratio = hor_landscape | vert_portrait;
- /* Change some rounded values into the exact aspect ratio */
- if (ratio == 79) {
- aspect.numerator = 16;
- aspect.denominator = 9;
- } else if (ratio == 34) {
- aspect.numerator = 4;
- aspect.numerator = 3;
- } else if (ratio == 68) {
- aspect.numerator = 15;
- aspect.numerator = 9;
- } else {
- aspect.numerator = hor_landscape + 99;
- aspect.denominator = 100;
- }
- if (hor_landscape)
- return aspect;
- /* The aspect ratio is for portrait, so swap numerator and denominator */
- tmp = aspect.denominator;
- aspect.denominator = aspect.numerator;
- aspect.numerator = tmp;
- return aspect;
-}
-EXPORT_SYMBOL_GPL(v4l2_calc_aspect_ratio);
-
const struct v4l2_frmsize_discrete *v4l2_find_nearest_format(
const struct v4l2_discrete_probe *probe,
s32 width, s32 height)
@@ -978,3 +519,13 @@ const struct v4l2_frmsize_discrete *v4l2_find_nearest_format(
return best;
}
EXPORT_SYMBOL_GPL(v4l2_find_nearest_format);
+
+void v4l2_get_timestamp(struct timeval *tv)
+{
+ struct timespec ts;
+
+ ktime_get_ts(&ts);
+ tv->tv_sec = ts.tv_sec;
+ tv->tv_usec = ts.tv_nsec / NSEC_PER_USEC;
+}
+EXPORT_SYMBOL_GPL(v4l2_get_timestamp);
diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
index 7157af301b1..7e2411c3641 100644
--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
@@ -178,6 +178,9 @@ struct v4l2_create_buffers32 {
static int __get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
{
+ if (get_user(kp->type, &up->type))
+ return -EFAULT;
+
switch (kp->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
@@ -204,17 +207,16 @@ static int __get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __us
static int get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
{
- if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_format32)) ||
- get_user(kp->type, &up->type))
- return -EFAULT;
+ if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_format32)))
+ return -EFAULT;
return __get_v4l2_format32(kp, up);
}
static int get_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_buffers32 __user *up)
{
if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_create_buffers32)) ||
- copy_from_user(kp, up, offsetof(struct v4l2_create_buffers32, format.fmt)))
- return -EFAULT;
+ copy_from_user(kp, up, offsetof(struct v4l2_create_buffers32, format)))
+ return -EFAULT;
return __get_v4l2_format32(&kp->format, &up->format);
}
@@ -733,14 +735,14 @@ static int put_v4l2_event32(struct v4l2_event *kp, struct v4l2_event32 __user *u
copy_to_user(&up->u, &kp->u, sizeof(kp->u)) ||
put_user(kp->pending, &up->pending) ||
put_user(kp->sequence, &up->sequence) ||
- put_compat_timespec(&kp->timestamp, &up->timestamp) ||
+ compat_put_timespec(&kp->timestamp, &up->timestamp) ||
put_user(kp->id, &up->id) ||
copy_to_user(up->reserved, kp->reserved, 8 * sizeof(__u32)))
return -EFAULT;
return 0;
}
-struct v4l2_subdev_edid32 {
+struct v4l2_edid32 {
__u32 pad;
__u32 start_block;
__u32 blocks;
@@ -748,11 +750,11 @@ struct v4l2_subdev_edid32 {
compat_caddr_t edid;
};
-static int get_v4l2_subdev_edid32(struct v4l2_subdev_edid *kp, struct v4l2_subdev_edid32 __user *up)
+static int get_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up)
{
u32 tmp;
- if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_subdev_edid32)) ||
+ if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_edid32)) ||
get_user(kp->pad, &up->pad) ||
get_user(kp->start_block, &up->start_block) ||
get_user(kp->blocks, &up->blocks) ||
@@ -763,11 +765,11 @@ static int get_v4l2_subdev_edid32(struct v4l2_subdev_edid *kp, struct v4l2_subde
return 0;
}
-static int put_v4l2_subdev_edid32(struct v4l2_subdev_edid *kp, struct v4l2_subdev_edid32 __user *up)
+static int put_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up)
{
u32 tmp = (u32)((unsigned long)kp->edid);
- if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_subdev_edid32)) ||
+ if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_edid32)) ||
put_user(kp->pad, &up->pad) ||
put_user(kp->start_block, &up->start_block) ||
put_user(kp->blocks, &up->blocks) ||
@@ -787,8 +789,8 @@ static int put_v4l2_subdev_edid32(struct v4l2_subdev_edid *kp, struct v4l2_subde
#define VIDIOC_DQBUF32 _IOWR('V', 17, struct v4l2_buffer32)
#define VIDIOC_ENUMSTD32 _IOWR('V', 25, struct v4l2_standard32)
#define VIDIOC_ENUMINPUT32 _IOWR('V', 26, struct v4l2_input32)
-#define VIDIOC_SUBDEV_G_EDID32 _IOWR('V', 63, struct v4l2_subdev_edid32)
-#define VIDIOC_SUBDEV_S_EDID32 _IOWR('V', 64, struct v4l2_subdev_edid32)
+#define VIDIOC_G_EDID32 _IOWR('V', 40, struct v4l2_edid32)
+#define VIDIOC_S_EDID32 _IOWR('V', 41, struct v4l2_edid32)
#define VIDIOC_TRY_FMT32 _IOWR('V', 64, struct v4l2_format32)
#define VIDIOC_G_EXT_CTRLS32 _IOWR('V', 71, struct v4l2_ext_controls32)
#define VIDIOC_S_EXT_CTRLS32 _IOWR('V', 72, struct v4l2_ext_controls32)
@@ -816,7 +818,7 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
struct v4l2_ext_controls v2ecs;
struct v4l2_event v2ev;
struct v4l2_create_buffers v2crt;
- struct v4l2_subdev_edid v2edid;
+ struct v4l2_edid v2edid;
unsigned long vx;
int vi;
} karg;
@@ -849,8 +851,8 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
case VIDIOC_S_OUTPUT32: cmd = VIDIOC_S_OUTPUT; break;
case VIDIOC_CREATE_BUFS32: cmd = VIDIOC_CREATE_BUFS; break;
case VIDIOC_PREPARE_BUF32: cmd = VIDIOC_PREPARE_BUF; break;
- case VIDIOC_SUBDEV_G_EDID32: cmd = VIDIOC_SUBDEV_G_EDID; break;
- case VIDIOC_SUBDEV_S_EDID32: cmd = VIDIOC_SUBDEV_S_EDID; break;
+ case VIDIOC_G_EDID32: cmd = VIDIOC_G_EDID; break;
+ case VIDIOC_S_EDID32: cmd = VIDIOC_S_EDID; break;
}
switch (cmd) {
@@ -868,9 +870,9 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
compatible_arg = 0;
break;
- case VIDIOC_SUBDEV_G_EDID:
- case VIDIOC_SUBDEV_S_EDID:
- err = get_v4l2_subdev_edid32(&karg.v2edid, up);
+ case VIDIOC_G_EDID:
+ case VIDIOC_S_EDID:
+ err = get_v4l2_edid32(&karg.v2edid, up);
compatible_arg = 0;
break;
@@ -966,9 +968,9 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
err = put_v4l2_event32(&karg.v2ev, up);
break;
- case VIDIOC_SUBDEV_G_EDID:
- case VIDIOC_SUBDEV_S_EDID:
- err = put_v4l2_subdev_edid32(&karg.v2edid, up);
+ case VIDIOC_G_EDID:
+ case VIDIOC_S_EDID:
+ err = put_v4l2_edid32(&karg.v2edid, up);
break;
case VIDIOC_G_FMT:
@@ -1006,108 +1008,14 @@ long v4l2_compat_ioctl32(struct file *file, unsigned int cmd, unsigned long arg)
if (!file->f_op->unlocked_ioctl)
return ret;
- switch (cmd) {
- case VIDIOC_QUERYCAP:
- case VIDIOC_RESERVED:
- case VIDIOC_ENUM_FMT:
- case VIDIOC_G_FMT32:
- case VIDIOC_S_FMT32:
- case VIDIOC_REQBUFS:
- case VIDIOC_QUERYBUF32:
- case VIDIOC_G_FBUF32:
- case VIDIOC_S_FBUF32:
- case VIDIOC_OVERLAY32:
- case VIDIOC_QBUF32:
- case VIDIOC_EXPBUF:
- case VIDIOC_DQBUF32:
- case VIDIOC_STREAMON32:
- case VIDIOC_STREAMOFF32:
- case VIDIOC_G_PARM:
- case VIDIOC_S_PARM:
- case VIDIOC_G_STD:
- case VIDIOC_S_STD:
- case VIDIOC_ENUMSTD32:
- case VIDIOC_ENUMINPUT32:
- case VIDIOC_G_CTRL:
- case VIDIOC_S_CTRL:
- case VIDIOC_G_TUNER:
- case VIDIOC_S_TUNER:
- case VIDIOC_G_AUDIO:
- case VIDIOC_S_AUDIO:
- case VIDIOC_QUERYCTRL:
- case VIDIOC_QUERYMENU:
- case VIDIOC_G_INPUT32:
- case VIDIOC_S_INPUT32:
- case VIDIOC_G_OUTPUT32:
- case VIDIOC_S_OUTPUT32:
- case VIDIOC_ENUMOUTPUT:
- case VIDIOC_G_AUDOUT:
- case VIDIOC_S_AUDOUT:
- case VIDIOC_G_MODULATOR:
- case VIDIOC_S_MODULATOR:
- case VIDIOC_S_FREQUENCY:
- case VIDIOC_G_FREQUENCY:
- case VIDIOC_CROPCAP:
- case VIDIOC_G_CROP:
- case VIDIOC_S_CROP:
- case VIDIOC_G_SELECTION:
- case VIDIOC_S_SELECTION:
- case VIDIOC_G_JPEGCOMP:
- case VIDIOC_S_JPEGCOMP:
- case VIDIOC_QUERYSTD:
- case VIDIOC_TRY_FMT32:
- case VIDIOC_ENUMAUDIO:
- case VIDIOC_ENUMAUDOUT:
- case VIDIOC_G_PRIORITY:
- case VIDIOC_S_PRIORITY:
- case VIDIOC_G_SLICED_VBI_CAP:
- case VIDIOC_LOG_STATUS:
- case VIDIOC_G_EXT_CTRLS32:
- case VIDIOC_S_EXT_CTRLS32:
- case VIDIOC_TRY_EXT_CTRLS32:
- case VIDIOC_ENUM_FRAMESIZES:
- case VIDIOC_ENUM_FRAMEINTERVALS:
- case VIDIOC_G_ENC_INDEX:
- case VIDIOC_ENCODER_CMD:
- case VIDIOC_TRY_ENCODER_CMD:
- case VIDIOC_DECODER_CMD:
- case VIDIOC_TRY_DECODER_CMD:
- case VIDIOC_DBG_S_REGISTER:
- case VIDIOC_DBG_G_REGISTER:
- case VIDIOC_DBG_G_CHIP_IDENT:
- case VIDIOC_S_HW_FREQ_SEEK:
- case VIDIOC_ENUM_DV_PRESETS:
- case VIDIOC_S_DV_PRESET:
- case VIDIOC_G_DV_PRESET:
- case VIDIOC_QUERY_DV_PRESET:
- case VIDIOC_S_DV_TIMINGS:
- case VIDIOC_G_DV_TIMINGS:
- case VIDIOC_DQEVENT:
- case VIDIOC_DQEVENT32:
- case VIDIOC_SUBSCRIBE_EVENT:
- case VIDIOC_UNSUBSCRIBE_EVENT:
- case VIDIOC_CREATE_BUFS32:
- case VIDIOC_PREPARE_BUF32:
- case VIDIOC_ENUM_DV_TIMINGS:
- case VIDIOC_QUERY_DV_TIMINGS:
- case VIDIOC_DV_TIMINGS_CAP:
- case VIDIOC_ENUM_FREQ_BANDS:
- case VIDIOC_SUBDEV_G_EDID32:
- case VIDIOC_SUBDEV_S_EDID32:
+ if (_IOC_TYPE(cmd) == 'V' && _IOC_NR(cmd) < BASE_VIDIOC_PRIVATE)
ret = do_video_ioctl(file, cmd, arg);
- break;
+ else if (vdev->fops->compat_ioctl32)
+ ret = vdev->fops->compat_ioctl32(file, cmd, arg);
- default:
- if (vdev->fops->compat_ioctl32)
- ret = vdev->fops->compat_ioctl32(file, cmd, arg);
-
- if (ret == -ENOIOCTLCMD)
- printk(KERN_WARNING "compat_ioctl32: "
- "unknown ioctl '%c', dir=%d, #%d (0x%08x)\n",
- _IOC_TYPE(cmd), _IOC_DIR(cmd), _IOC_NR(cmd),
- cmd);
- break;
- }
+ if (ret == -ENOIOCTLCMD)
+ pr_warn("compat_ioctl32: unknown ioctl '%c', dir=%d, #%d (0x%08x)\n",
+ _IOC_TYPE(cmd), _IOC_DIR(cmd), _IOC_NR(cmd), cmd);
return ret;
}
EXPORT_SYMBOL_GPL(v4l2_compat_ioctl32);
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index f6ee201d934..55c68325410 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -234,6 +234,7 @@ const char * const *v4l2_ctrl_get_menu(u32 id)
"Average",
"Center Weighted",
"Spot",
+ "Matrix",
NULL
};
static const char * const camera_auto_focus_range[] = {
@@ -297,8 +298,8 @@ const char * const *v4l2_ctrl_get_menu(u32 id)
"Text",
NULL
};
- static const char * const tune_preemphasis[] = {
- "No Preemphasis",
+ static const char * const tune_emphasis[] = {
+ "None",
"50 Microseconds",
"75 Microseconds",
NULL,
@@ -419,7 +420,13 @@ const char * const *v4l2_ctrl_get_menu(u32 id)
"Advanced Simple",
"Core",
"Simple Scalable",
- "Advanced Coding Efficency",
+ "Advanced Coding Efficiency",
+ NULL,
+ };
+
+ static const char * const vpx_golden_frame_sel[] = {
+ "Use Previous Frame",
+ "Use Previous Specific Frame",
NULL,
};
@@ -508,7 +515,9 @@ const char * const *v4l2_ctrl_get_menu(u32 id)
case V4L2_CID_SCENE_MODE:
return scene_mode;
case V4L2_CID_TUNE_PREEMPHASIS:
- return tune_preemphasis;
+ return tune_emphasis;
+ case V4L2_CID_TUNE_DEEMPHASIS:
+ return tune_emphasis;
case V4L2_CID_FLASH_LED_MODE:
return flash_led_mode;
case V4L2_CID_FLASH_STROBE_SOURCE:
@@ -535,6 +544,8 @@ const char * const *v4l2_ctrl_get_menu(u32 id)
return mpeg_mpeg4_level;
case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:
return mpeg4_profile;
+ case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL:
+ return vpx_golden_frame_sel;
case V4L2_CID_JPEG_CHROMA_SUBSAMPLING:
return jpeg_chroma_subsampling;
case V4L2_CID_DV_TX_MODE:
@@ -549,6 +560,33 @@ const char * const *v4l2_ctrl_get_menu(u32 id)
}
EXPORT_SYMBOL(v4l2_ctrl_get_menu);
+#define __v4l2_qmenu_int_len(arr, len) ({ *(len) = ARRAY_SIZE(arr); arr; })
+/*
+ * Returns NULL or an s64 type array containing the menu for given
+ * control ID. The total number of the menu items is returned in @len.
+ */
+const s64 *v4l2_ctrl_get_int_menu(u32 id, u32 *len)
+{
+ static const s64 qmenu_int_vpx_num_partitions[] = {
+ 1, 2, 4, 8,
+ };
+
+ static const s64 qmenu_int_vpx_num_ref_frames[] = {
+ 1, 2, 3,
+ };
+
+ switch (id) {
+ case V4L2_CID_MPEG_VIDEO_VPX_NUM_PARTITIONS:
+ return __v4l2_qmenu_int_len(qmenu_int_vpx_num_partitions, len);
+ case V4L2_CID_MPEG_VIDEO_VPX_NUM_REF_FRAMES:
+ return __v4l2_qmenu_int_len(qmenu_int_vpx_num_ref_frames, len);
+ default:
+ *len = 0;
+ return NULL;
+ }
+}
+EXPORT_SYMBOL(v4l2_ctrl_get_int_menu);
+
/* Return the control name. */
const char *v4l2_ctrl_get_name(u32 id)
{
@@ -577,8 +615,6 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_GAIN: return "Gain";
case V4L2_CID_HFLIP: return "Horizontal Flip";
case V4L2_CID_VFLIP: return "Vertical Flip";
- case V4L2_CID_HCENTER: return "Horizontal Center";
- case V4L2_CID_VCENTER: return "Vertical Center";
case V4L2_CID_POWER_LINE_FREQUENCY: return "Power Line Frequency";
case V4L2_CID_HUE_AUTO: return "Hue, Automatic";
case V4L2_CID_WHITE_BALANCE_TEMPERATURE: return "White Balance Temperature";
@@ -599,9 +635,11 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_ALPHA_COMPONENT: return "Alpha Component";
case V4L2_CID_COLORFX_CBCR: return "Color Effects, CbCr";
- /* MPEG controls */
+ /* Codec controls */
+ /* The MPEG controls are applicable to all codec controls
+ * and the 'MPEG' part of the define is historical */
/* Keep the order of the 'case's the same as in videodev2.h! */
- case V4L2_CID_MPEG_CLASS: return "MPEG Encoder Controls";
+ case V4L2_CID_MPEG_CLASS: return "Codec Controls";
case V4L2_CID_MPEG_STREAM_TYPE: return "Stream Type";
case V4L2_CID_MPEG_STREAM_PID_PMT: return "Stream PMT Program ID";
case V4L2_CID_MPEG_STREAM_PID_AUDIO: return "Stream Audio Program ID";
@@ -697,6 +735,23 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_MPEG_VIDEO_DEC_PTS: return "Video Decoder PTS";
case V4L2_CID_MPEG_VIDEO_DEC_FRAME: return "Video Decoder Frame Count";
case V4L2_CID_MPEG_VIDEO_VBV_DELAY: return "Initial Delay for VBV Control";
+ case V4L2_CID_MPEG_VIDEO_MV_H_SEARCH_RANGE: return "Horizontal MV Search Range";
+ case V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE: return "Vertical MV Search Range";
+ case V4L2_CID_MPEG_VIDEO_REPEAT_SEQ_HEADER: return "Repeat Sequence Header";
+
+ /* VPX controls */
+ case V4L2_CID_MPEG_VIDEO_VPX_NUM_PARTITIONS: return "VPX Number of Partitions";
+ case V4L2_CID_MPEG_VIDEO_VPX_IMD_DISABLE_4X4: return "VPX Intra Mode Decision Disable";
+ case V4L2_CID_MPEG_VIDEO_VPX_NUM_REF_FRAMES: return "VPX No. of Refs for P Frame";
+ case V4L2_CID_MPEG_VIDEO_VPX_FILTER_LEVEL: return "VPX Loop Filter Level Range";
+ case V4L2_CID_MPEG_VIDEO_VPX_FILTER_SHARPNESS: return "VPX Deblocking Effect Control";
+ case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_REF_PERIOD: return "VPX Golden Frame Refresh Period";
+ case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL: return "VPX Golden Frame Indicator";
+ case V4L2_CID_MPEG_VIDEO_VPX_MIN_QP: return "VPX Minimum QP Value";
+ case V4L2_CID_MPEG_VIDEO_VPX_MAX_QP: return "VPX Maximum QP Value";
+ case V4L2_CID_MPEG_VIDEO_VPX_I_FRAME_QP: return "VPX I-Frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_VPX_P_FRAME_QP: return "VPX P-Frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_VPX_PROFILE: return "VPX Profile";
/* CAMERA controls */
/* Keep the order of the 'case's the same as in videodev2.h! */
@@ -801,6 +856,20 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_DV_RX_POWER_PRESENT: return "Power Present";
case V4L2_CID_DV_RX_RGB_RANGE: return "Rx RGB Quantization Range";
+ case V4L2_CID_FM_RX_CLASS: return "FM Radio Receiver Controls";
+ case V4L2_CID_TUNE_DEEMPHASIS: return "De-Emphasis";
+ case V4L2_CID_RDS_RECEPTION: return "RDS Reception";
+
+ case V4L2_CID_RF_TUNER_CLASS: return "RF Tuner Controls";
+ case V4L2_CID_RF_TUNER_LNA_GAIN_AUTO: return "LNA Gain, Auto";
+ case V4L2_CID_RF_TUNER_LNA_GAIN: return "LNA Gain";
+ case V4L2_CID_RF_TUNER_MIXER_GAIN_AUTO: return "Mixer Gain, Auto";
+ case V4L2_CID_RF_TUNER_MIXER_GAIN: return "Mixer Gain";
+ case V4L2_CID_RF_TUNER_IF_GAIN_AUTO: return "IF Gain, Auto";
+ case V4L2_CID_RF_TUNER_IF_GAIN: return "IF Gain";
+ case V4L2_CID_RF_TUNER_BANDWIDTH_AUTO: return "Bandwidth, Auto";
+ case V4L2_CID_RF_TUNER_BANDWIDTH: return "Bandwidth";
+ case V4L2_CID_RF_TUNER_PLL_LOCK: return "PLL Lock";
default:
return NULL;
}
@@ -846,12 +915,23 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
case V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM:
case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE:
case V4L2_CID_MPEG_VIDEO_MPEG4_QPEL:
+ case V4L2_CID_MPEG_VIDEO_REPEAT_SEQ_HEADER:
case V4L2_CID_WIDE_DYNAMIC_RANGE:
case V4L2_CID_IMAGE_STABILIZATION:
+ case V4L2_CID_RDS_RECEPTION:
+ case V4L2_CID_RF_TUNER_LNA_GAIN_AUTO:
+ case V4L2_CID_RF_TUNER_MIXER_GAIN_AUTO:
+ case V4L2_CID_RF_TUNER_IF_GAIN_AUTO:
+ case V4L2_CID_RF_TUNER_BANDWIDTH_AUTO:
+ case V4L2_CID_RF_TUNER_PLL_LOCK:
*type = V4L2_CTRL_TYPE_BOOLEAN;
*min = 0;
*max = *step = 1;
break;
+ case V4L2_CID_MPEG_VIDEO_MV_H_SEARCH_RANGE:
+ case V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE:
+ *type = V4L2_CTRL_TYPE_INTEGER;
+ break;
case V4L2_CID_PAN_RESET:
case V4L2_CID_TILT_RESET:
case V4L2_CID_FLASH_STROBE:
@@ -906,6 +986,8 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
case V4L2_CID_DV_TX_RGB_RANGE:
case V4L2_CID_DV_RX_RGB_RANGE:
case V4L2_CID_TEST_PATTERN:
+ case V4L2_CID_TUNE_DEEMPHASIS:
+ case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL:
*type = V4L2_CTRL_TYPE_MENU;
break;
case V4L2_CID_LINK_FREQ:
@@ -917,6 +999,8 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
break;
case V4L2_CID_ISO_SENSITIVITY:
case V4L2_CID_AUTO_EXPOSURE_BIAS:
+ case V4L2_CID_MPEG_VIDEO_VPX_NUM_PARTITIONS:
+ case V4L2_CID_MPEG_VIDEO_VPX_NUM_REF_FRAMES:
*type = V4L2_CTRL_TYPE_INTEGER_MENU;
break;
case V4L2_CID_USER_CLASS:
@@ -928,6 +1012,8 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
case V4L2_CID_IMAGE_SOURCE_CLASS:
case V4L2_CID_IMAGE_PROC_CLASS:
case V4L2_CID_DV_CLASS:
+ case V4L2_CID_FM_RX_CLASS:
+ case V4L2_CID_RF_TUNER_CLASS:
*type = V4L2_CTRL_TYPE_CTRL_CLASS;
/* You can neither read not write these */
*flags |= V4L2_CTRL_FLAG_READ_ONLY | V4L2_CTRL_FLAG_WRITE_ONLY;
@@ -1000,6 +1086,10 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
case V4L2_CID_PILOT_TONE_FREQUENCY:
case V4L2_CID_TUNE_POWER_LEVEL:
case V4L2_CID_TUNE_ANTENNA_CAPACITOR:
+ case V4L2_CID_RF_TUNER_LNA_GAIN:
+ case V4L2_CID_RF_TUNER_MIXER_GAIN:
+ case V4L2_CID_RF_TUNER_IF_GAIN:
+ case V4L2_CID_RF_TUNER_BANDWIDTH:
*flags |= V4L2_CTRL_FLAG_SLIDER;
break;
case V4L2_CID_PAN_RELATIVE:
@@ -1018,6 +1108,9 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
case V4L2_CID_DV_RX_POWER_PRESENT:
*flags |= V4L2_CTRL_FLAG_READ_ONLY;
break;
+ case V4L2_CID_RF_TUNER_PLL_LOCK:
+ *flags |= V4L2_CTRL_FLAG_VOLATILE;
+ break;
}
}
EXPORT_SYMBOL(v4l2_ctrl_fill);
@@ -1160,8 +1253,7 @@ static int new_to_user(struct v4l2_ext_control *c,
}
/* Copy the new value to the current value. */
-static void new_to_cur(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl,
- bool update_inactive)
+static void new_to_cur(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, u32 ch_flags)
{
bool changed = false;
@@ -1185,8 +1277,8 @@ static void new_to_cur(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl,
ctrl->cur.val = ctrl->val;
break;
}
- if (update_inactive) {
- /* Note: update_inactive can only be true for auto clusters. */
+ if (ch_flags & V4L2_EVENT_CTRL_CH_FLAGS) {
+ /* Note: CH_FLAGS is only set for auto clusters. */
ctrl->flags &=
~(V4L2_CTRL_FLAG_INACTIVE | V4L2_CTRL_FLAG_VOLATILE);
if (!is_cur_manual(ctrl->cluster[0])) {
@@ -1196,14 +1288,15 @@ static void new_to_cur(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl,
}
fh = NULL;
}
- if (changed || update_inactive) {
+ if (changed || ch_flags) {
/* If a control was changed that was not one of the controls
modified by the application, then send the event to all. */
if (!ctrl->is_new)
fh = NULL;
send_event(fh, ctrl,
- (changed ? V4L2_EVENT_CTRL_CH_VALUE : 0) |
- (update_inactive ? V4L2_EVENT_CTRL_CH_FLAGS : 0));
+ (changed ? V4L2_EVENT_CTRL_CH_VALUE : 0) | ch_flags);
+ if (ctrl->call_notify && changed && ctrl->handler->notify)
+ ctrl->handler->notify(ctrl, ctrl->handler->notify_priv);
}
}
@@ -1257,6 +1350,41 @@ static int cluster_changed(struct v4l2_ctrl *master)
return diff;
}
+/* Control range checking */
+static int check_range(enum v4l2_ctrl_type type,
+ s32 min, s32 max, u32 step, s32 def)
+{
+ switch (type) {
+ case V4L2_CTRL_TYPE_BOOLEAN:
+ if (step != 1 || max > 1 || min < 0)
+ return -ERANGE;
+ /* fall through */
+ case V4L2_CTRL_TYPE_INTEGER:
+ if (step <= 0 || min > max || def < min || def > max)
+ return -ERANGE;
+ return 0;
+ case V4L2_CTRL_TYPE_BITMASK:
+ if (step || min || !max || (def & ~max))
+ return -ERANGE;
+ return 0;
+ case V4L2_CTRL_TYPE_MENU:
+ case V4L2_CTRL_TYPE_INTEGER_MENU:
+ if (min > max || def < min || def > max)
+ return -ERANGE;
+ /* Note: step == menu_skip_mask for menu controls.
+ So here we check if the default value is masked out. */
+ if (step && ((1 << def) & step))
+ return -EINVAL;
+ return 0;
+ case V4L2_CTRL_TYPE_STRING:
+ if (min > max || min < 0 || step < 1 || def)
+ return -ERANGE;
+ return 0;
+ default:
+ return 0;
+ }
+}
+
/* Validate a new control */
static int validate_new(const struct v4l2_ctrl *ctrl,
struct v4l2_ext_control *c)
@@ -1329,11 +1457,13 @@ static inline int handler_set_err(struct v4l2_ctrl_handler *hdl, int err)
}
/* Initialize the handler */
-int v4l2_ctrl_handler_init(struct v4l2_ctrl_handler *hdl,
- unsigned nr_of_controls_hint)
+int v4l2_ctrl_handler_init_class(struct v4l2_ctrl_handler *hdl,
+ unsigned nr_of_controls_hint,
+ struct lock_class_key *key, const char *name)
{
hdl->lock = &hdl->_lock;
mutex_init(hdl->lock);
+ lockdep_set_class_and_name(hdl->lock, key, name);
INIT_LIST_HEAD(&hdl->ctrls);
INIT_LIST_HEAD(&hdl->ctrl_refs);
hdl->nr_of_buckets = 1 + nr_of_controls_hint / 8;
@@ -1342,7 +1472,7 @@ int v4l2_ctrl_handler_init(struct v4l2_ctrl_handler *hdl,
hdl->error = hdl->buckets ? 0 : -ENOMEM;
return hdl->error;
}
-EXPORT_SYMBOL(v4l2_ctrl_handler_init);
+EXPORT_SYMBOL(v4l2_ctrl_handler_init_class);
/* Free all controls and control refs */
void v4l2_ctrl_handler_free(struct v4l2_ctrl_handler *hdl)
@@ -1529,30 +1659,21 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
{
struct v4l2_ctrl *ctrl;
unsigned sz_extra = 0;
+ int err;
if (hdl->error)
return NULL;
/* Sanity checks */
if (id == 0 || name == NULL || id >= V4L2_CID_PRIVATE_BASE ||
- (type == V4L2_CTRL_TYPE_INTEGER && step == 0) ||
- (type == V4L2_CTRL_TYPE_BITMASK && max == 0) ||
(type == V4L2_CTRL_TYPE_MENU && qmenu == NULL) ||
- (type == V4L2_CTRL_TYPE_INTEGER_MENU && qmenu_int == NULL) ||
- (type == V4L2_CTRL_TYPE_STRING && max == 0)) {
+ (type == V4L2_CTRL_TYPE_INTEGER_MENU && qmenu_int == NULL)) {
handler_set_err(hdl, -ERANGE);
return NULL;
}
- if (type != V4L2_CTRL_TYPE_BITMASK && max < min) {
- handler_set_err(hdl, -ERANGE);
- return NULL;
- }
- if ((type == V4L2_CTRL_TYPE_INTEGER ||
- type == V4L2_CTRL_TYPE_MENU ||
- type == V4L2_CTRL_TYPE_INTEGER_MENU ||
- type == V4L2_CTRL_TYPE_BOOLEAN) &&
- (def < min || def > max)) {
- handler_set_err(hdl, -ERANGE);
+ err = check_range(type, min, max, step, def);
+ if (err) {
+ handler_set_err(hdl, err);
return NULL;
}
if (type == V4L2_CTRL_TYPE_BITMASK && ((def & ~max) || min || step)) {
@@ -1675,7 +1796,9 @@ struct v4l2_ctrl *v4l2_ctrl_new_std_menu(struct v4l2_ctrl_handler *hdl,
const struct v4l2_ctrl_ops *ops,
u32 id, s32 max, s32 mask, s32 def)
{
- const char * const *qmenu = v4l2_ctrl_get_menu(id);
+ const char * const *qmenu = NULL;
+ const s64 *qmenu_int = NULL;
+ unsigned int qmenu_int_len = 0;
const char *name;
enum v4l2_ctrl_type type;
s32 min;
@@ -1683,12 +1806,18 @@ struct v4l2_ctrl *v4l2_ctrl_new_std_menu(struct v4l2_ctrl_handler *hdl,
u32 flags;
v4l2_ctrl_fill(id, &name, &type, &min, &max, &step, &def, &flags);
- if (type != V4L2_CTRL_TYPE_MENU) {
+
+ if (type == V4L2_CTRL_TYPE_MENU)
+ qmenu = v4l2_ctrl_get_menu(id);
+ else if (type == V4L2_CTRL_TYPE_INTEGER_MENU)
+ qmenu_int = v4l2_ctrl_get_int_menu(id, &qmenu_int_len);
+
+ if ((!qmenu && !qmenu_int) || (qmenu_int && max > qmenu_int_len)) {
handler_set_err(hdl, -EINVAL);
return NULL;
}
return v4l2_ctrl_new(hdl, ops, id, name, type,
- 0, max, mask, def, flags, qmenu, NULL, NULL);
+ 0, max, mask, def, flags, qmenu, qmenu_int, NULL);
}
EXPORT_SYMBOL(v4l2_ctrl_new_std_menu);
@@ -1798,6 +1927,8 @@ bool v4l2_ctrl_radio_filter(const struct v4l2_ctrl *ctrl)
{
if (V4L2_CTRL_ID2CLASS(ctrl->id) == V4L2_CTRL_CLASS_FM_TX)
return true;
+ if (V4L2_CTRL_ID2CLASS(ctrl->id) == V4L2_CTRL_CLASS_FM_RX)
+ return true;
switch (ctrl->id) {
case V4L2_CID_AUDIO_MUTE:
case V4L2_CID_AUDIO_VOLUME:
@@ -1820,7 +1951,8 @@ void v4l2_ctrl_cluster(unsigned ncontrols, struct v4l2_ctrl **controls)
int i;
/* The first control is the master control and it must not be NULL */
- BUG_ON(ncontrols == 0 || controls[0] == NULL);
+ if (WARN_ON(ncontrols == 0 || controls[0] == NULL))
+ return;
for (i = 0; i < ncontrols; i++) {
if (controls[i]) {
@@ -1980,6 +2112,13 @@ void v4l2_ctrl_handler_log_status(struct v4l2_ctrl_handler *hdl,
}
EXPORT_SYMBOL(v4l2_ctrl_handler_log_status);
+int v4l2_ctrl_subdev_log_status(struct v4l2_subdev *sd)
+{
+ v4l2_ctrl_handler_log_status(sd->ctrl_handler, sd->name);
+ return 0;
+}
+EXPORT_SYMBOL(v4l2_ctrl_subdev_log_status);
+
/* Call s_ctrl for all controls owned by the handler */
int v4l2_ctrl_handler_setup(struct v4l2_ctrl_handler *hdl)
{
@@ -2426,8 +2565,8 @@ EXPORT_SYMBOL(v4l2_ctrl_g_ctrl_int64);
/* Core function that calls try/s_ctrl and ensures that the new value is
copied to the current value on a set.
Must be called with ctrl->handler->lock held. */
-static int try_or_set_cluster(struct v4l2_fh *fh,
- struct v4l2_ctrl *master, bool set)
+static int try_or_set_cluster(struct v4l2_fh *fh, struct v4l2_ctrl *master,
+ bool set, u32 ch_flags)
{
bool update_flag;
int ret;
@@ -2465,7 +2604,8 @@ static int try_or_set_cluster(struct v4l2_fh *fh,
/* If OK, then make the new values permanent. */
update_flag = is_cur_manual(master) != is_new_manual(master);
for (i = 0; i < master->ncontrols; i++)
- new_to_cur(fh, master->cluster[i], update_flag && i > 0);
+ new_to_cur(fh, master->cluster[i], ch_flags |
+ ((update_flag && i > 0) ? V4L2_EVENT_CTRL_CH_FLAGS : 0));
return 0;
}
@@ -2592,7 +2732,7 @@ static int try_set_ext_ctrls(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl,
} while (!ret && idx);
if (!ret)
- ret = try_or_set_cluster(fh, master, set);
+ ret = try_or_set_cluster(fh, master, set, 0);
/* Copy the new values back to userspace. */
if (!ret) {
@@ -2638,10 +2778,9 @@ EXPORT_SYMBOL(v4l2_subdev_s_ext_ctrls);
/* Helper function for VIDIOC_S_CTRL compatibility */
static int set_ctrl(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl,
- struct v4l2_ext_control *c)
+ struct v4l2_ext_control *c, u32 ch_flags)
{
struct v4l2_ctrl *master = ctrl->cluster[0];
- int ret;
int i;
/* String controls are not supported. The user_to_new() and
@@ -2651,12 +2790,6 @@ static int set_ctrl(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl,
if (ctrl->type == V4L2_CTRL_TYPE_STRING)
return -EINVAL;
- ret = validate_new(ctrl, c);
- if (ret)
- return ret;
-
- v4l2_ctrl_lock(ctrl);
-
/* Reset the 'is_new' flags of the cluster */
for (i = 0; i < master->ncontrols; i++)
if (master->cluster[i])
@@ -2670,10 +2803,22 @@ static int set_ctrl(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl,
update_from_auto_cluster(master);
user_to_new(c, ctrl);
- ret = try_or_set_cluster(fh, master, true);
- cur_to_user(c, ctrl);
+ return try_or_set_cluster(fh, master, true, ch_flags);
+}
- v4l2_ctrl_unlock(ctrl);
+/* Helper function for VIDIOC_S_CTRL compatibility */
+static int set_ctrl_lock(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl,
+ struct v4l2_ext_control *c)
+{
+ int ret = validate_new(ctrl, c);
+
+ if (!ret) {
+ v4l2_ctrl_lock(ctrl);
+ ret = set_ctrl(fh, ctrl, c, 0);
+ if (!ret)
+ cur_to_user(c, ctrl);
+ v4l2_ctrl_unlock(ctrl);
+ }
return ret;
}
@@ -2691,7 +2836,7 @@ int v4l2_s_ctrl(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl,
return -EACCES;
c.value = control->value;
- ret = set_ctrl(fh, ctrl, &c);
+ ret = set_ctrl_lock(fh, ctrl, &c);
control->value = c.value;
return ret;
}
@@ -2710,7 +2855,7 @@ int v4l2_ctrl_s_ctrl(struct v4l2_ctrl *ctrl, s32 val)
/* It's a driver bug if this happens. */
WARN_ON(!type_is_int(ctrl));
c.value = val;
- return set_ctrl(NULL, ctrl, &c);
+ return set_ctrl_lock(NULL, ctrl, &c);
}
EXPORT_SYMBOL(v4l2_ctrl_s_ctrl);
@@ -2721,10 +2866,61 @@ int v4l2_ctrl_s_ctrl_int64(struct v4l2_ctrl *ctrl, s64 val)
/* It's a driver bug if this happens. */
WARN_ON(ctrl->type != V4L2_CTRL_TYPE_INTEGER64);
c.value64 = val;
- return set_ctrl(NULL, ctrl, &c);
+ return set_ctrl_lock(NULL, ctrl, &c);
}
EXPORT_SYMBOL(v4l2_ctrl_s_ctrl_int64);
+void v4l2_ctrl_notify(struct v4l2_ctrl *ctrl, v4l2_ctrl_notify_fnc notify, void *priv)
+{
+ if (ctrl == NULL)
+ return;
+ if (notify == NULL) {
+ ctrl->call_notify = 0;
+ return;
+ }
+ if (WARN_ON(ctrl->handler->notify && ctrl->handler->notify != notify))
+ return;
+ ctrl->handler->notify = notify;
+ ctrl->handler->notify_priv = priv;
+ ctrl->call_notify = 1;
+}
+EXPORT_SYMBOL(v4l2_ctrl_notify);
+
+int v4l2_ctrl_modify_range(struct v4l2_ctrl *ctrl,
+ s32 min, s32 max, u32 step, s32 def)
+{
+ int ret = check_range(ctrl->type, min, max, step, def);
+ struct v4l2_ext_control c;
+
+ switch (ctrl->type) {
+ case V4L2_CTRL_TYPE_INTEGER:
+ case V4L2_CTRL_TYPE_BOOLEAN:
+ case V4L2_CTRL_TYPE_MENU:
+ case V4L2_CTRL_TYPE_INTEGER_MENU:
+ case V4L2_CTRL_TYPE_BITMASK:
+ if (ret)
+ return ret;
+ break;
+ default:
+ return -EINVAL;
+ }
+ v4l2_ctrl_lock(ctrl);
+ ctrl->minimum = min;
+ ctrl->maximum = max;
+ ctrl->step = step;
+ ctrl->default_value = def;
+ c.value = ctrl->cur.val;
+ if (validate_new(ctrl, &c))
+ c.value = def;
+ if (c.value != ctrl->cur.val)
+ ret = set_ctrl(NULL, ctrl, &c, V4L2_EVENT_CTRL_CH_RANGE);
+ else
+ send_event(NULL, ctrl, V4L2_EVENT_CTRL_CH_RANGE);
+ v4l2_ctrl_unlock(ctrl);
+ return ret;
+}
+EXPORT_SYMBOL(v4l2_ctrl_modify_range);
+
static int v4l2_ctrl_add_event(struct v4l2_subscribed_event *sev, unsigned elems)
{
struct v4l2_ctrl *ctrl = v4l2_ctrl_find(sev->fh->ctrl_handler, sev->id);
@@ -2804,6 +3000,15 @@ int v4l2_ctrl_subscribe_event(struct v4l2_fh *fh,
}
EXPORT_SYMBOL(v4l2_ctrl_subscribe_event);
+int v4l2_ctrl_subdev_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ if (!sd->ctrl_handler)
+ return -EINVAL;
+ return v4l2_ctrl_subscribe_event(fh, sub);
+}
+EXPORT_SYMBOL(v4l2_ctrl_subdev_subscribe_event);
+
unsigned int v4l2_ctrl_poll(struct file *file, struct poll_table_struct *wait)
{
struct v4l2_fh *fh = file->private_data;
diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c
index 98dcad9c8a3..634d863c05b 100644
--- a/drivers/media/v4l2-core/v4l2-dev.c
+++ b/drivers/media/v4l2-core/v4l2-dev.c
@@ -38,24 +38,25 @@
* sysfs stuff
*/
-static ssize_t show_index(struct device *cd,
- struct device_attribute *attr, char *buf)
+static ssize_t index_show(struct device *cd,
+ struct device_attribute *attr, char *buf)
{
struct video_device *vdev = to_video_device(cd);
return sprintf(buf, "%i\n", vdev->index);
}
+static DEVICE_ATTR_RO(index);
-static ssize_t show_debug(struct device *cd,
- struct device_attribute *attr, char *buf)
+static ssize_t debug_show(struct device *cd,
+ struct device_attribute *attr, char *buf)
{
struct video_device *vdev = to_video_device(cd);
return sprintf(buf, "%i\n", vdev->debug);
}
-static ssize_t set_debug(struct device *cd, struct device_attribute *attr,
- const char *buf, size_t len)
+static ssize_t debug_store(struct device *cd, struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct video_device *vdev = to_video_device(cd);
int res = 0;
@@ -68,21 +69,24 @@ static ssize_t set_debug(struct device *cd, struct device_attribute *attr,
vdev->debug = value;
return len;
}
+static DEVICE_ATTR_RW(debug);
-static ssize_t show_name(struct device *cd,
+static ssize_t name_show(struct device *cd,
struct device_attribute *attr, char *buf)
{
struct video_device *vdev = to_video_device(cd);
return sprintf(buf, "%.*s\n", (int)sizeof(vdev->name), vdev->name);
}
+static DEVICE_ATTR_RO(name);
-static struct device_attribute video_device_attrs[] = {
- __ATTR(name, S_IRUGO, show_name, NULL),
- __ATTR(debug, 0644, show_debug, set_debug),
- __ATTR(index, S_IRUGO, show_index, NULL),
- __ATTR_NULL
+static struct attribute *video_device_attrs[] = {
+ &dev_attr_name.attr,
+ &dev_attr_debug.attr,
+ &dev_attr_index.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(video_device);
/*
* Active devices
@@ -217,12 +221,12 @@ static void v4l2_device_release(struct device *cd)
static struct class video_class = {
.name = VIDEO_NAME,
- .dev_attrs = video_device_attrs,
+ .dev_groups = video_device_groups,
};
struct video_device *video_devdata(struct file *file)
{
- return video_device[iminor(file->f_path.dentry->d_inode)];
+ return video_device[iminor(file_inode(file))];
}
EXPORT_SYMBOL(video_devdata);
@@ -495,8 +499,8 @@ static const struct file_operations v4l2_fops = {
};
/**
- * get_index - assign stream index number based on parent device
- * @vdev: video_device to assign index number to, vdev->parent should be assigned
+ * get_index - assign stream index number based on v4l2_dev
+ * @vdev: video_device to assign index number to, vdev->v4l2_dev should be assigned
*
* Note that when this is called the new device has not yet been registered
* in the video_device array, but it was able to obtain a minor number.
@@ -514,15 +518,11 @@ static int get_index(struct video_device *vdev)
static DECLARE_BITMAP(used, VIDEO_NUM_DEVICES);
int i;
- /* Some drivers do not set the parent. In that case always return 0. */
- if (vdev->parent == NULL)
- return 0;
-
bitmap_zero(used, VIDEO_NUM_DEVICES);
for (i = 0; i < VIDEO_NUM_DEVICES; i++) {
if (video_device[i] != NULL &&
- video_device[i]->parent == vdev->parent) {
+ video_device[i]->v4l2_dev == vdev->v4l2_dev) {
set_bit(video_device[i]->index, used);
}
}
@@ -554,6 +554,7 @@ static void determine_valid_ioctls(struct video_device *vdev)
bool is_vid = vdev->vfl_type == VFL_TYPE_GRABBER;
bool is_vbi = vdev->vfl_type == VFL_TYPE_VBI;
bool is_radio = vdev->vfl_type == VFL_TYPE_RADIO;
+ bool is_sdr = vdev->vfl_type == VFL_TYPE_SDR;
bool is_rx = vdev->vfl_dir != VFL_DIR_TX;
bool is_tx = vdev->vfl_dir != VFL_DIR_RX;
@@ -568,11 +569,6 @@ static void determine_valid_ioctls(struct video_device *vdev)
if (ops->vidioc_s_priority ||
test_bit(V4L2_FL_USE_FH_PRIO, &vdev->flags))
set_bit(_IOC_NR(VIDIOC_S_PRIORITY), valid_ioctls);
- SET_VALID_IOCTL(ops, VIDIOC_REQBUFS, vidioc_reqbufs);
- SET_VALID_IOCTL(ops, VIDIOC_QUERYBUF, vidioc_querybuf);
- SET_VALID_IOCTL(ops, VIDIOC_QBUF, vidioc_qbuf);
- SET_VALID_IOCTL(ops, VIDIOC_EXPBUF, vidioc_expbuf);
- SET_VALID_IOCTL(ops, VIDIOC_DQBUF, vidioc_dqbuf);
SET_VALID_IOCTL(ops, VIDIOC_STREAMON, vidioc_streamon);
SET_VALID_IOCTL(ops, VIDIOC_STREAMOFF, vidioc_streamoff);
/* Note: the control handler can also be passed through the filehandle,
@@ -597,16 +593,14 @@ static void determine_valid_ioctls(struct video_device *vdev)
SET_VALID_IOCTL(ops, VIDIOC_S_FREQUENCY, vidioc_s_frequency);
SET_VALID_IOCTL(ops, VIDIOC_LOG_STATUS, vidioc_log_status);
#ifdef CONFIG_VIDEO_ADV_DEBUG
- SET_VALID_IOCTL(ops, VIDIOC_DBG_G_REGISTER, vidioc_g_register);
- SET_VALID_IOCTL(ops, VIDIOC_DBG_S_REGISTER, vidioc_s_register);
+ set_bit(_IOC_NR(VIDIOC_DBG_G_CHIP_INFO), valid_ioctls);
+ set_bit(_IOC_NR(VIDIOC_DBG_G_REGISTER), valid_ioctls);
+ set_bit(_IOC_NR(VIDIOC_DBG_S_REGISTER), valid_ioctls);
#endif
- SET_VALID_IOCTL(ops, VIDIOC_DBG_G_CHIP_IDENT, vidioc_g_chip_ident);
/* yes, really vidioc_subscribe_event */
SET_VALID_IOCTL(ops, VIDIOC_DQEVENT, vidioc_subscribe_event);
SET_VALID_IOCTL(ops, VIDIOC_SUBSCRIBE_EVENT, vidioc_subscribe_event);
SET_VALID_IOCTL(ops, VIDIOC_UNSUBSCRIBE_EVENT, vidioc_unsubscribe_event);
- SET_VALID_IOCTL(ops, VIDIOC_CREATE_BUFS, vidioc_create_bufs);
- SET_VALID_IOCTL(ops, VIDIOC_PREPARE_BUF, vidioc_prepare_buf);
if (ops->vidioc_enum_freq_bands || ops->vidioc_g_tuner || ops->vidioc_g_modulator)
set_bit(_IOC_NR(VIDIOC_ENUM_FREQ_BANDS), valid_ioctls);
@@ -669,14 +663,35 @@ static void determine_valid_ioctls(struct video_device *vdev)
ops->vidioc_try_fmt_sliced_vbi_out)))
set_bit(_IOC_NR(VIDIOC_TRY_FMT), valid_ioctls);
SET_VALID_IOCTL(ops, VIDIOC_G_SLICED_VBI_CAP, vidioc_g_sliced_vbi_cap);
+ } else if (is_sdr) {
+ /* SDR specific ioctls */
+ if (ops->vidioc_enum_fmt_sdr_cap)
+ set_bit(_IOC_NR(VIDIOC_ENUM_FMT), valid_ioctls);
+ if (ops->vidioc_g_fmt_sdr_cap)
+ set_bit(_IOC_NR(VIDIOC_G_FMT), valid_ioctls);
+ if (ops->vidioc_s_fmt_sdr_cap)
+ set_bit(_IOC_NR(VIDIOC_S_FMT), valid_ioctls);
+ if (ops->vidioc_try_fmt_sdr_cap)
+ set_bit(_IOC_NR(VIDIOC_TRY_FMT), valid_ioctls);
+ }
+
+ if (is_vid || is_vbi || is_sdr) {
+ /* ioctls valid for video, vbi or sdr */
+ SET_VALID_IOCTL(ops, VIDIOC_REQBUFS, vidioc_reqbufs);
+ SET_VALID_IOCTL(ops, VIDIOC_QUERYBUF, vidioc_querybuf);
+ SET_VALID_IOCTL(ops, VIDIOC_QBUF, vidioc_qbuf);
+ SET_VALID_IOCTL(ops, VIDIOC_EXPBUF, vidioc_expbuf);
+ SET_VALID_IOCTL(ops, VIDIOC_DQBUF, vidioc_dqbuf);
+ SET_VALID_IOCTL(ops, VIDIOC_CREATE_BUFS, vidioc_create_bufs);
+ SET_VALID_IOCTL(ops, VIDIOC_PREPARE_BUF, vidioc_prepare_buf);
}
- if (!is_radio) {
+
+ if (is_vid || is_vbi) {
/* ioctls valid for video or vbi */
if (ops->vidioc_s_std)
set_bit(_IOC_NR(VIDIOC_ENUMSTD), valid_ioctls);
- if (ops->vidioc_g_std || vdev->current_norm)
- set_bit(_IOC_NR(VIDIOC_G_STD), valid_ioctls);
SET_VALID_IOCTL(ops, VIDIOC_S_STD, vidioc_s_std);
+ SET_VALID_IOCTL(ops, VIDIOC_G_STD, vidioc_g_std);
if (is_rx) {
SET_VALID_IOCTL(ops, VIDIOC_QUERYSTD, vidioc_querystd);
SET_VALID_IOCTL(ops, VIDIOC_ENUMINPUT, vidioc_enum_input);
@@ -685,8 +700,8 @@ static void determine_valid_ioctls(struct video_device *vdev)
SET_VALID_IOCTL(ops, VIDIOC_ENUMAUDIO, vidioc_enumaudio);
SET_VALID_IOCTL(ops, VIDIOC_G_AUDIO, vidioc_g_audio);
SET_VALID_IOCTL(ops, VIDIOC_S_AUDIO, vidioc_s_audio);
- SET_VALID_IOCTL(ops, VIDIOC_QUERY_DV_PRESET, vidioc_query_dv_preset);
SET_VALID_IOCTL(ops, VIDIOC_QUERY_DV_TIMINGS, vidioc_query_dv_timings);
+ SET_VALID_IOCTL(ops, VIDIOC_S_EDID, vidioc_s_edid);
}
if (is_tx) {
SET_VALID_IOCTL(ops, VIDIOC_ENUMOUTPUT, vidioc_enum_output);
@@ -705,19 +720,17 @@ static void determine_valid_ioctls(struct video_device *vdev)
if (ops->vidioc_cropcap || ops->vidioc_g_selection)
set_bit(_IOC_NR(VIDIOC_CROPCAP), valid_ioctls);
if (ops->vidioc_g_parm || (vdev->vfl_type == VFL_TYPE_GRABBER &&
- (ops->vidioc_g_std || vdev->current_norm)))
+ ops->vidioc_g_std))
set_bit(_IOC_NR(VIDIOC_G_PARM), valid_ioctls);
SET_VALID_IOCTL(ops, VIDIOC_S_PARM, vidioc_s_parm);
- SET_VALID_IOCTL(ops, VIDIOC_ENUM_DV_PRESETS, vidioc_enum_dv_presets);
- SET_VALID_IOCTL(ops, VIDIOC_S_DV_PRESET, vidioc_s_dv_preset);
- SET_VALID_IOCTL(ops, VIDIOC_G_DV_PRESET, vidioc_g_dv_preset);
SET_VALID_IOCTL(ops, VIDIOC_S_DV_TIMINGS, vidioc_s_dv_timings);
SET_VALID_IOCTL(ops, VIDIOC_G_DV_TIMINGS, vidioc_g_dv_timings);
SET_VALID_IOCTL(ops, VIDIOC_ENUM_DV_TIMINGS, vidioc_enum_dv_timings);
SET_VALID_IOCTL(ops, VIDIOC_DV_TIMINGS_CAP, vidioc_dv_timings_cap);
+ SET_VALID_IOCTL(ops, VIDIOC_G_EDID, vidioc_g_edid);
}
- if (is_tx) {
- /* transmitter only ioctls */
+ if (is_tx && (is_radio || is_sdr)) {
+ /* radio transmitter only ioctls */
SET_VALID_IOCTL(ops, VIDIOC_G_MODULATOR, vidioc_g_modulator);
SET_VALID_IOCTL(ops, VIDIOC_S_MODULATOR, vidioc_s_modulator);
}
@@ -763,6 +776,8 @@ static void determine_valid_ioctls(struct video_device *vdev)
* %VFL_TYPE_RADIO - A radio card
*
* %VFL_TYPE_SUBDEV - A subdevice
+ *
+ * %VFL_TYPE_SDR - Software Defined Radio
*/
int __video_register_device(struct video_device *vdev, int type, int nr,
int warn_if_nr_in_use, struct module *owner)
@@ -780,6 +795,9 @@ int __video_register_device(struct video_device *vdev, int type, int nr,
/* the release callback MUST be present */
if (WARN_ON(!vdev->release))
return -EINVAL;
+ /* the v4l2_dev pointer MUST be present */
+ if (WARN_ON(!vdev->v4l2_dev))
+ return -EINVAL;
/* v4l2_fh support */
spin_lock_init(&vdev->fh_lock);
@@ -799,6 +817,10 @@ int __video_register_device(struct video_device *vdev, int type, int nr,
case VFL_TYPE_SUBDEV:
name_base = "v4l-subdev";
break;
+ case VFL_TYPE_SDR:
+ /* Use device name 'swradio' because 'sdr' was already taken. */
+ name_base = "swradio";
+ break;
default:
printk(KERN_ERR "%s called with unknown type: %d\n",
__func__, type);
@@ -807,16 +829,14 @@ int __video_register_device(struct video_device *vdev, int type, int nr,
vdev->vfl_type = type;
vdev->cdev = NULL;
- if (vdev->v4l2_dev) {
- if (vdev->v4l2_dev->dev)
- vdev->parent = vdev->v4l2_dev->dev;
- if (vdev->ctrl_handler == NULL)
- vdev->ctrl_handler = vdev->v4l2_dev->ctrl_handler;
- /* If the prio state pointer is NULL, then use the v4l2_device
- prio state. */
- if (vdev->prio == NULL)
- vdev->prio = &vdev->v4l2_dev->prio;
- }
+ if (vdev->dev_parent == NULL)
+ vdev->dev_parent = vdev->v4l2_dev->dev;
+ if (vdev->ctrl_handler == NULL)
+ vdev->ctrl_handler = vdev->v4l2_dev->ctrl_handler;
+ /* If the prio state pointer is NULL, then use the v4l2_device
+ prio state. */
+ if (vdev->prio == NULL)
+ vdev->prio = &vdev->v4l2_dev->prio;
/* Part 2: find a free minor, device node number and device index. */
#ifdef CONFIG_VIDEO_FIXED_MINOR_RANGES
@@ -877,6 +897,7 @@ int __video_register_device(struct video_device *vdev, int type, int nr,
/* Should not happen since we thought this minor was free */
WARN_ON(video_device[vdev->minor] != NULL);
vdev->index = get_index(vdev);
+ video_device[vdev->minor] = vdev;
mutex_unlock(&videodev_lock);
if (vdev->ioctl_ops)
@@ -901,8 +922,7 @@ int __video_register_device(struct video_device *vdev, int type, int nr,
/* Part 4: register the device with sysfs */
vdev->dev.class = &video_class;
vdev->dev.devt = MKDEV(VIDEO_MAJOR, vdev->minor);
- if (vdev->parent)
- vdev->dev.parent = vdev->parent;
+ vdev->dev.parent = vdev->dev_parent;
dev_set_name(&vdev->dev, "%s%d", name_base, vdev->num);
ret = device_register(&vdev->dev);
if (ret < 0) {
@@ -939,9 +959,6 @@ int __video_register_device(struct video_device *vdev, int type, int nr,
#endif
/* Part 6: Activate this minor. The char device can now be used. */
set_bit(V4L2_FL_REGISTERED, &vdev->flags);
- mutex_lock(&videodev_lock);
- video_device[vdev->minor] = vdev;
- mutex_unlock(&videodev_lock);
return 0;
@@ -949,6 +966,7 @@ cleanup:
mutex_lock(&videodev_lock);
if (vdev->cdev)
cdev_del(vdev->cdev);
+ video_device[vdev->minor] = NULL;
devnode_clear(vdev);
mutex_unlock(&videodev_lock);
/* Mark this video device as never having been registered. */
diff --git a/drivers/media/v4l2-core/v4l2-device.c b/drivers/media/v4l2-core/v4l2-device.c
index 513969fa695..015f92aab44 100644
--- a/drivers/media/v4l2-core/v4l2-device.c
+++ b/drivers/media/v4l2-core/v4l2-device.c
@@ -44,7 +44,8 @@ int v4l2_device_register(struct device *dev, struct v4l2_device *v4l2_dev)
v4l2_dev->dev = dev;
if (dev == NULL) {
/* If dev == NULL, then name must be filled in by the caller */
- WARN_ON(!v4l2_dev->name[0]);
+ if (WARN_ON(!v4l2_dev->name[0]))
+ return -EINVAL;
return 0;
}
@@ -105,14 +106,16 @@ void v4l2_device_unregister(struct v4l2_device *v4l2_dev)
{
struct v4l2_subdev *sd, *next;
- if (v4l2_dev == NULL)
+ /* Just return if v4l2_dev is NULL or if it was already
+ * unregistered before. */
+ if (v4l2_dev == NULL || !v4l2_dev->name[0])
return;
v4l2_device_disconnect(v4l2_dev);
/* Unregister subdevs */
list_for_each_entry_safe(sd, next, &v4l2_dev->subdevs, list) {
v4l2_device_unregister_subdev(sd);
-#if defined(CONFIG_I2C) || (defined(CONFIG_I2C_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_I2C)
if (sd->flags & V4L2_SUBDEV_FL_IS_I2C) {
struct i2c_client *client = v4l2_get_subdevdata(sd);
@@ -135,6 +138,8 @@ void v4l2_device_unregister(struct v4l2_device *v4l2_dev)
}
#endif
}
+ /* Mark as unregistered, thus preventing duplicate unregistrations */
+ v4l2_dev->name[0] = '\0';
}
EXPORT_SYMBOL_GPL(v4l2_device_unregister);
@@ -153,37 +158,37 @@ int v4l2_device_register_subdev(struct v4l2_device *v4l2_dev,
/* Warn if we apparently re-register a subdev */
WARN_ON(sd->v4l2_dev != NULL);
- if (!try_module_get(sd->owner))
+ /*
+ * The reason to acquire the module here is to avoid unloading
+ * a module of sub-device which is registered to a media
+ * device. To make it possible to unload modules for media
+ * devices that also register sub-devices, do not
+ * try_module_get() such sub-device owners.
+ */
+ sd->owner_v4l2_dev = v4l2_dev->dev && v4l2_dev->dev->driver &&
+ sd->owner == v4l2_dev->dev->driver->owner;
+
+ if (!sd->owner_v4l2_dev && !try_module_get(sd->owner))
return -ENODEV;
sd->v4l2_dev = v4l2_dev;
if (sd->internal_ops && sd->internal_ops->registered) {
err = sd->internal_ops->registered(sd);
- if (err) {
- module_put(sd->owner);
- return err;
- }
+ if (err)
+ goto error_module;
}
/* This just returns 0 if either of the two args is NULL */
err = v4l2_ctrl_add_handler(v4l2_dev->ctrl_handler, sd->ctrl_handler, NULL);
- if (err) {
- if (sd->internal_ops && sd->internal_ops->unregistered)
- sd->internal_ops->unregistered(sd);
- module_put(sd->owner);
- return err;
- }
+ if (err)
+ goto error_unregister;
#if defined(CONFIG_MEDIA_CONTROLLER)
/* Register the entity. */
if (v4l2_dev->mdev) {
err = media_device_register_entity(v4l2_dev->mdev, entity);
- if (err < 0) {
- if (sd->internal_ops && sd->internal_ops->unregistered)
- sd->internal_ops->unregistered(sd);
- module_put(sd->owner);
- return err;
- }
+ if (err < 0)
+ goto error_unregister;
}
#endif
@@ -192,6 +197,15 @@ int v4l2_device_register_subdev(struct v4l2_device *v4l2_dev,
spin_unlock(&v4l2_dev->lock);
return 0;
+
+error_unregister:
+ if (sd->internal_ops && sd->internal_ops->unregistered)
+ sd->internal_ops->unregistered(sd);
+error_module:
+ if (!sd->owner_v4l2_dev)
+ module_put(sd->owner);
+ sd->v4l2_dev = NULL;
+ return err;
}
EXPORT_SYMBOL_GPL(v4l2_device_register_subdev);
@@ -271,10 +285,13 @@ void v4l2_device_unregister_subdev(struct v4l2_subdev *sd)
sd->v4l2_dev = NULL;
#if defined(CONFIG_MEDIA_CONTROLLER)
- if (v4l2_dev->mdev)
+ if (v4l2_dev->mdev) {
+ media_entity_remove_links(&sd->entity);
media_device_unregister_entity(&sd->entity);
+ }
#endif
video_unregister_device(sd->devnode);
- module_put(sd->owner);
+ if (!sd->owner_v4l2_dev)
+ module_put(sd->owner);
}
EXPORT_SYMBOL_GPL(v4l2_device_unregister_subdev);
diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c b/drivers/media/v4l2-core/v4l2-dv-timings.c
new file mode 100644
index 00000000000..ce1c9f5d9de
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-dv-timings.c
@@ -0,0 +1,629 @@
+/*
+ * v4l2-dv-timings - dv-timings helper functions
+ *
+ * Copyright 2013 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/videodev2.h>
+#include <linux/v4l2-dv-timings.h>
+#include <media/v4l2-dv-timings.h>
+
+MODULE_AUTHOR("Hans Verkuil");
+MODULE_DESCRIPTION("V4L2 DV Timings Helper Functions");
+MODULE_LICENSE("GPL");
+
+const struct v4l2_dv_timings v4l2_dv_timings_presets[] = {
+ V4L2_DV_BT_CEA_640X480P59_94,
+ V4L2_DV_BT_CEA_720X480I59_94,
+ V4L2_DV_BT_CEA_720X480P59_94,
+ V4L2_DV_BT_CEA_720X576I50,
+ V4L2_DV_BT_CEA_720X576P50,
+ V4L2_DV_BT_CEA_1280X720P24,
+ V4L2_DV_BT_CEA_1280X720P25,
+ V4L2_DV_BT_CEA_1280X720P30,
+ V4L2_DV_BT_CEA_1280X720P50,
+ V4L2_DV_BT_CEA_1280X720P60,
+ V4L2_DV_BT_CEA_1920X1080P24,
+ V4L2_DV_BT_CEA_1920X1080P25,
+ V4L2_DV_BT_CEA_1920X1080P30,
+ V4L2_DV_BT_CEA_1920X1080I50,
+ V4L2_DV_BT_CEA_1920X1080P50,
+ V4L2_DV_BT_CEA_1920X1080I60,
+ V4L2_DV_BT_CEA_1920X1080P60,
+ V4L2_DV_BT_DMT_640X350P85,
+ V4L2_DV_BT_DMT_640X400P85,
+ V4L2_DV_BT_DMT_720X400P85,
+ V4L2_DV_BT_DMT_640X480P72,
+ V4L2_DV_BT_DMT_640X480P75,
+ V4L2_DV_BT_DMT_640X480P85,
+ V4L2_DV_BT_DMT_800X600P56,
+ V4L2_DV_BT_DMT_800X600P60,
+ V4L2_DV_BT_DMT_800X600P72,
+ V4L2_DV_BT_DMT_800X600P75,
+ V4L2_DV_BT_DMT_800X600P85,
+ V4L2_DV_BT_DMT_800X600P120_RB,
+ V4L2_DV_BT_DMT_848X480P60,
+ V4L2_DV_BT_DMT_1024X768I43,
+ V4L2_DV_BT_DMT_1024X768P60,
+ V4L2_DV_BT_DMT_1024X768P70,
+ V4L2_DV_BT_DMT_1024X768P75,
+ V4L2_DV_BT_DMT_1024X768P85,
+ V4L2_DV_BT_DMT_1024X768P120_RB,
+ V4L2_DV_BT_DMT_1152X864P75,
+ V4L2_DV_BT_DMT_1280X768P60_RB,
+ V4L2_DV_BT_DMT_1280X768P60,
+ V4L2_DV_BT_DMT_1280X768P75,
+ V4L2_DV_BT_DMT_1280X768P85,
+ V4L2_DV_BT_DMT_1280X768P120_RB,
+ V4L2_DV_BT_DMT_1280X800P60_RB,
+ V4L2_DV_BT_DMT_1280X800P60,
+ V4L2_DV_BT_DMT_1280X800P75,
+ V4L2_DV_BT_DMT_1280X800P85,
+ V4L2_DV_BT_DMT_1280X800P120_RB,
+ V4L2_DV_BT_DMT_1280X960P60,
+ V4L2_DV_BT_DMT_1280X960P85,
+ V4L2_DV_BT_DMT_1280X960P120_RB,
+ V4L2_DV_BT_DMT_1280X1024P60,
+ V4L2_DV_BT_DMT_1280X1024P75,
+ V4L2_DV_BT_DMT_1280X1024P85,
+ V4L2_DV_BT_DMT_1280X1024P120_RB,
+ V4L2_DV_BT_DMT_1360X768P60,
+ V4L2_DV_BT_DMT_1360X768P120_RB,
+ V4L2_DV_BT_DMT_1366X768P60,
+ V4L2_DV_BT_DMT_1366X768P60_RB,
+ V4L2_DV_BT_DMT_1400X1050P60_RB,
+ V4L2_DV_BT_DMT_1400X1050P60,
+ V4L2_DV_BT_DMT_1400X1050P75,
+ V4L2_DV_BT_DMT_1400X1050P85,
+ V4L2_DV_BT_DMT_1400X1050P120_RB,
+ V4L2_DV_BT_DMT_1440X900P60_RB,
+ V4L2_DV_BT_DMT_1440X900P60,
+ V4L2_DV_BT_DMT_1440X900P75,
+ V4L2_DV_BT_DMT_1440X900P85,
+ V4L2_DV_BT_DMT_1440X900P120_RB,
+ V4L2_DV_BT_DMT_1600X900P60_RB,
+ V4L2_DV_BT_DMT_1600X1200P60,
+ V4L2_DV_BT_DMT_1600X1200P65,
+ V4L2_DV_BT_DMT_1600X1200P70,
+ V4L2_DV_BT_DMT_1600X1200P75,
+ V4L2_DV_BT_DMT_1600X1200P85,
+ V4L2_DV_BT_DMT_1600X1200P120_RB,
+ V4L2_DV_BT_DMT_1680X1050P60_RB,
+ V4L2_DV_BT_DMT_1680X1050P60,
+ V4L2_DV_BT_DMT_1680X1050P75,
+ V4L2_DV_BT_DMT_1680X1050P85,
+ V4L2_DV_BT_DMT_1680X1050P120_RB,
+ V4L2_DV_BT_DMT_1792X1344P60,
+ V4L2_DV_BT_DMT_1792X1344P75,
+ V4L2_DV_BT_DMT_1792X1344P120_RB,
+ V4L2_DV_BT_DMT_1856X1392P60,
+ V4L2_DV_BT_DMT_1856X1392P75,
+ V4L2_DV_BT_DMT_1856X1392P120_RB,
+ V4L2_DV_BT_DMT_1920X1200P60_RB,
+ V4L2_DV_BT_DMT_1920X1200P60,
+ V4L2_DV_BT_DMT_1920X1200P75,
+ V4L2_DV_BT_DMT_1920X1200P85,
+ V4L2_DV_BT_DMT_1920X1200P120_RB,
+ V4L2_DV_BT_DMT_1920X1440P60,
+ V4L2_DV_BT_DMT_1920X1440P75,
+ V4L2_DV_BT_DMT_1920X1440P120_RB,
+ V4L2_DV_BT_DMT_2048X1152P60_RB,
+ V4L2_DV_BT_DMT_2560X1600P60_RB,
+ V4L2_DV_BT_DMT_2560X1600P60,
+ V4L2_DV_BT_DMT_2560X1600P75,
+ V4L2_DV_BT_DMT_2560X1600P85,
+ V4L2_DV_BT_DMT_2560X1600P120_RB,
+ V4L2_DV_BT_CEA_3840X2160P24,
+ V4L2_DV_BT_CEA_3840X2160P25,
+ V4L2_DV_BT_CEA_3840X2160P30,
+ V4L2_DV_BT_CEA_3840X2160P50,
+ V4L2_DV_BT_CEA_3840X2160P60,
+ V4L2_DV_BT_CEA_4096X2160P24,
+ V4L2_DV_BT_CEA_4096X2160P25,
+ V4L2_DV_BT_CEA_4096X2160P30,
+ V4L2_DV_BT_CEA_4096X2160P50,
+ V4L2_DV_BT_DMT_4096X2160P59_94_RB,
+ V4L2_DV_BT_CEA_4096X2160P60,
+ { }
+};
+EXPORT_SYMBOL_GPL(v4l2_dv_timings_presets);
+
+bool v4l2_valid_dv_timings(const struct v4l2_dv_timings *t,
+ const struct v4l2_dv_timings_cap *dvcap,
+ v4l2_check_dv_timings_fnc fnc,
+ void *fnc_handle)
+{
+ const struct v4l2_bt_timings *bt = &t->bt;
+ const struct v4l2_bt_timings_cap *cap = &dvcap->bt;
+ u32 caps = cap->capabilities;
+
+ if (t->type != V4L2_DV_BT_656_1120)
+ return false;
+ if (t->type != dvcap->type ||
+ bt->height < cap->min_height ||
+ bt->height > cap->max_height ||
+ bt->width < cap->min_width ||
+ bt->width > cap->max_width ||
+ bt->pixelclock < cap->min_pixelclock ||
+ bt->pixelclock > cap->max_pixelclock ||
+ (cap->standards && !(bt->standards & cap->standards)) ||
+ (bt->interlaced && !(caps & V4L2_DV_BT_CAP_INTERLACED)) ||
+ (!bt->interlaced && !(caps & V4L2_DV_BT_CAP_PROGRESSIVE)))
+ return false;
+ return fnc == NULL || fnc(t, fnc_handle);
+}
+EXPORT_SYMBOL_GPL(v4l2_valid_dv_timings);
+
+int v4l2_enum_dv_timings_cap(struct v4l2_enum_dv_timings *t,
+ const struct v4l2_dv_timings_cap *cap,
+ v4l2_check_dv_timings_fnc fnc,
+ void *fnc_handle)
+{
+ u32 i, idx;
+
+ memset(t->reserved, 0, sizeof(t->reserved));
+ for (i = idx = 0; v4l2_dv_timings_presets[i].bt.width; i++) {
+ if (v4l2_valid_dv_timings(v4l2_dv_timings_presets + i, cap,
+ fnc, fnc_handle) &&
+ idx++ == t->index) {
+ t->timings = v4l2_dv_timings_presets[i];
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(v4l2_enum_dv_timings_cap);
+
+bool v4l2_find_dv_timings_cap(struct v4l2_dv_timings *t,
+ const struct v4l2_dv_timings_cap *cap,
+ unsigned pclock_delta,
+ v4l2_check_dv_timings_fnc fnc,
+ void *fnc_handle)
+{
+ int i;
+
+ if (!v4l2_valid_dv_timings(t, cap, fnc, fnc_handle))
+ return false;
+
+ for (i = 0; i < v4l2_dv_timings_presets[i].bt.width; i++) {
+ if (v4l2_valid_dv_timings(v4l2_dv_timings_presets + i, cap,
+ fnc, fnc_handle) &&
+ v4l2_match_dv_timings(t, v4l2_dv_timings_presets + i,
+ pclock_delta)) {
+ *t = v4l2_dv_timings_presets[i];
+ return true;
+ }
+ }
+ return false;
+}
+EXPORT_SYMBOL_GPL(v4l2_find_dv_timings_cap);
+
+/**
+ * v4l2_match_dv_timings - check if two timings match
+ * @t1 - compare this v4l2_dv_timings struct...
+ * @t2 - with this struct.
+ * @pclock_delta - the allowed pixelclock deviation.
+ *
+ * Compare t1 with t2 with a given margin of error for the pixelclock.
+ */
+bool v4l2_match_dv_timings(const struct v4l2_dv_timings *t1,
+ const struct v4l2_dv_timings *t2,
+ unsigned pclock_delta)
+{
+ if (t1->type != t2->type || t1->type != V4L2_DV_BT_656_1120)
+ return false;
+ if (t1->bt.width == t2->bt.width &&
+ t1->bt.height == t2->bt.height &&
+ t1->bt.interlaced == t2->bt.interlaced &&
+ t1->bt.polarities == t2->bt.polarities &&
+ t1->bt.pixelclock >= t2->bt.pixelclock - pclock_delta &&
+ t1->bt.pixelclock <= t2->bt.pixelclock + pclock_delta &&
+ t1->bt.hfrontporch == t2->bt.hfrontporch &&
+ t1->bt.vfrontporch == t2->bt.vfrontporch &&
+ t1->bt.vsync == t2->bt.vsync &&
+ t1->bt.vbackporch == t2->bt.vbackporch &&
+ (!t1->bt.interlaced ||
+ (t1->bt.il_vfrontporch == t2->bt.il_vfrontporch &&
+ t1->bt.il_vsync == t2->bt.il_vsync &&
+ t1->bt.il_vbackporch == t2->bt.il_vbackporch)))
+ return true;
+ return false;
+}
+EXPORT_SYMBOL_GPL(v4l2_match_dv_timings);
+
+void v4l2_print_dv_timings(const char *dev_prefix, const char *prefix,
+ const struct v4l2_dv_timings *t, bool detailed)
+{
+ const struct v4l2_bt_timings *bt = &t->bt;
+ u32 htot, vtot;
+
+ if (t->type != V4L2_DV_BT_656_1120)
+ return;
+
+ htot = V4L2_DV_BT_FRAME_WIDTH(bt);
+ vtot = V4L2_DV_BT_FRAME_HEIGHT(bt);
+
+ if (prefix == NULL)
+ prefix = "";
+
+ pr_info("%s: %s%ux%u%s%u (%ux%u)\n", dev_prefix, prefix,
+ bt->width, bt->height, bt->interlaced ? "i" : "p",
+ (htot * vtot) > 0 ? ((u32)bt->pixelclock / (htot * vtot)) : 0,
+ htot, vtot);
+
+ if (!detailed)
+ return;
+
+ pr_info("%s: horizontal: fp = %u, %ssync = %u, bp = %u\n",
+ dev_prefix, bt->hfrontporch,
+ (bt->polarities & V4L2_DV_HSYNC_POS_POL) ? "+" : "-",
+ bt->hsync, bt->hbackporch);
+ pr_info("%s: vertical: fp = %u, %ssync = %u, bp = %u\n",
+ dev_prefix, bt->vfrontporch,
+ (bt->polarities & V4L2_DV_VSYNC_POS_POL) ? "+" : "-",
+ bt->vsync, bt->vbackporch);
+ pr_info("%s: pixelclock: %llu\n", dev_prefix, bt->pixelclock);
+ pr_info("%s: flags (0x%x):%s%s%s%s\n", dev_prefix, bt->flags,
+ (bt->flags & V4L2_DV_FL_REDUCED_BLANKING) ?
+ " REDUCED_BLANKING" : "",
+ (bt->flags & V4L2_DV_FL_CAN_REDUCE_FPS) ?
+ " CAN_REDUCE_FPS" : "",
+ (bt->flags & V4L2_DV_FL_REDUCED_FPS) ?
+ " REDUCED_FPS" : "",
+ (bt->flags & V4L2_DV_FL_HALF_LINE) ?
+ " HALF_LINE" : "");
+ pr_info("%s: standards (0x%x):%s%s%s%s\n", dev_prefix, bt->standards,
+ (bt->standards & V4L2_DV_BT_STD_CEA861) ? " CEA" : "",
+ (bt->standards & V4L2_DV_BT_STD_DMT) ? " DMT" : "",
+ (bt->standards & V4L2_DV_BT_STD_CVT) ? " CVT" : "",
+ (bt->standards & V4L2_DV_BT_STD_GTF) ? " GTF" : "");
+}
+EXPORT_SYMBOL_GPL(v4l2_print_dv_timings);
+
+/*
+ * CVT defines
+ * Based on Coordinated Video Timings Standard
+ * version 1.1 September 10, 2003
+ */
+
+#define CVT_PXL_CLK_GRAN 250000 /* pixel clock granularity */
+
+/* Normal blanking */
+#define CVT_MIN_V_BPORCH 7 /* lines */
+#define CVT_MIN_V_PORCH_RND 3 /* lines */
+#define CVT_MIN_VSYNC_BP 550 /* min time of vsync + back porch (us) */
+
+/* Normal blanking for CVT uses GTF to calculate horizontal blanking */
+#define CVT_CELL_GRAN 8 /* character cell granularity */
+#define CVT_M 600 /* blanking formula gradient */
+#define CVT_C 40 /* blanking formula offset */
+#define CVT_K 128 /* blanking formula scaling factor */
+#define CVT_J 20 /* blanking formula scaling factor */
+#define CVT_C_PRIME (((CVT_C - CVT_J) * CVT_K / 256) + CVT_J)
+#define CVT_M_PRIME (CVT_K * CVT_M / 256)
+
+/* Reduced Blanking */
+#define CVT_RB_MIN_V_BPORCH 7 /* lines */
+#define CVT_RB_V_FPORCH 3 /* lines */
+#define CVT_RB_MIN_V_BLANK 460 /* us */
+#define CVT_RB_H_SYNC 32 /* pixels */
+#define CVT_RB_H_BPORCH 80 /* pixels */
+#define CVT_RB_H_BLANK 160 /* pixels */
+
+/** v4l2_detect_cvt - detect if the given timings follow the CVT standard
+ * @frame_height - the total height of the frame (including blanking) in lines.
+ * @hfreq - the horizontal frequency in Hz.
+ * @vsync - the height of the vertical sync in lines.
+ * @polarities - the horizontal and vertical polarities (same as struct
+ * v4l2_bt_timings polarities).
+ * @fmt - the resulting timings.
+ *
+ * This function will attempt to detect if the given values correspond to a
+ * valid CVT format. If so, then it will return true, and fmt will be filled
+ * in with the found CVT timings.
+ *
+ * TODO: VESA defined a new version 2 of their reduced blanking
+ * formula. Support for that is currently missing in this CVT
+ * detection function.
+ */
+bool v4l2_detect_cvt(unsigned frame_height, unsigned hfreq, unsigned vsync,
+ u32 polarities, struct v4l2_dv_timings *fmt)
+{
+ int v_fp, v_bp, h_fp, h_bp, hsync;
+ int frame_width, image_height, image_width;
+ bool reduced_blanking;
+ unsigned pix_clk;
+
+ if (vsync < 4 || vsync > 7)
+ return false;
+
+ if (polarities == V4L2_DV_VSYNC_POS_POL)
+ reduced_blanking = false;
+ else if (polarities == V4L2_DV_HSYNC_POS_POL)
+ reduced_blanking = true;
+ else
+ return false;
+
+ /* Vertical */
+ if (reduced_blanking) {
+ v_fp = CVT_RB_V_FPORCH;
+ v_bp = (CVT_RB_MIN_V_BLANK * hfreq + 1999999) / 1000000;
+ v_bp -= vsync + v_fp;
+
+ if (v_bp < CVT_RB_MIN_V_BPORCH)
+ v_bp = CVT_RB_MIN_V_BPORCH;
+ } else {
+ v_fp = CVT_MIN_V_PORCH_RND;
+ v_bp = (CVT_MIN_VSYNC_BP * hfreq + 1999999) / 1000000 - vsync;
+
+ if (v_bp < CVT_MIN_V_BPORCH)
+ v_bp = CVT_MIN_V_BPORCH;
+ }
+ image_height = (frame_height - v_fp - vsync - v_bp + 1) & ~0x1;
+
+ /* Aspect ratio based on vsync */
+ switch (vsync) {
+ case 4:
+ image_width = (image_height * 4) / 3;
+ break;
+ case 5:
+ image_width = (image_height * 16) / 9;
+ break;
+ case 6:
+ image_width = (image_height * 16) / 10;
+ break;
+ case 7:
+ /* special case */
+ if (image_height == 1024)
+ image_width = (image_height * 5) / 4;
+ else if (image_height == 768)
+ image_width = (image_height * 15) / 9;
+ else
+ return false;
+ break;
+ default:
+ return false;
+ }
+
+ image_width = image_width & ~7;
+
+ /* Horizontal */
+ if (reduced_blanking) {
+ pix_clk = (image_width + CVT_RB_H_BLANK) * hfreq;
+ pix_clk = (pix_clk / CVT_PXL_CLK_GRAN) * CVT_PXL_CLK_GRAN;
+
+ h_bp = CVT_RB_H_BPORCH;
+ hsync = CVT_RB_H_SYNC;
+ h_fp = CVT_RB_H_BLANK - h_bp - hsync;
+
+ frame_width = image_width + CVT_RB_H_BLANK;
+ } else {
+ unsigned ideal_duty_cycle_per_myriad =
+ 100 * CVT_C_PRIME - (CVT_M_PRIME * 100000) / hfreq;
+ int h_blank;
+
+ if (ideal_duty_cycle_per_myriad < 2000)
+ ideal_duty_cycle_per_myriad = 2000;
+
+ h_blank = image_width * ideal_duty_cycle_per_myriad /
+ (10000 - ideal_duty_cycle_per_myriad);
+ h_blank = (h_blank / (2 * CVT_CELL_GRAN)) * 2 * CVT_CELL_GRAN;
+
+ pix_clk = (image_width + h_blank) * hfreq;
+ pix_clk = (pix_clk / CVT_PXL_CLK_GRAN) * CVT_PXL_CLK_GRAN;
+
+ h_bp = h_blank / 2;
+ frame_width = image_width + h_blank;
+
+ hsync = (frame_width * 8 + 50) / 100;
+ hsync = hsync - hsync % CVT_CELL_GRAN;
+ h_fp = h_blank - hsync - h_bp;
+ }
+
+ fmt->type = V4L2_DV_BT_656_1120;
+ fmt->bt.polarities = polarities;
+ fmt->bt.width = image_width;
+ fmt->bt.height = image_height;
+ fmt->bt.hfrontporch = h_fp;
+ fmt->bt.vfrontporch = v_fp;
+ fmt->bt.hsync = hsync;
+ fmt->bt.vsync = vsync;
+ fmt->bt.hbackporch = frame_width - image_width - h_fp - hsync;
+ fmt->bt.vbackporch = frame_height - image_height - v_fp - vsync;
+ fmt->bt.pixelclock = pix_clk;
+ fmt->bt.standards = V4L2_DV_BT_STD_CVT;
+ if (reduced_blanking)
+ fmt->bt.flags |= V4L2_DV_FL_REDUCED_BLANKING;
+ return true;
+}
+EXPORT_SYMBOL_GPL(v4l2_detect_cvt);
+
+/*
+ * GTF defines
+ * Based on Generalized Timing Formula Standard
+ * Version 1.1 September 2, 1999
+ */
+
+#define GTF_PXL_CLK_GRAN 250000 /* pixel clock granularity */
+
+#define GTF_MIN_VSYNC_BP 550 /* min time of vsync + back porch (us) */
+#define GTF_V_FP 1 /* vertical front porch (lines) */
+#define GTF_CELL_GRAN 8 /* character cell granularity */
+
+/* Default */
+#define GTF_D_M 600 /* blanking formula gradient */
+#define GTF_D_C 40 /* blanking formula offset */
+#define GTF_D_K 128 /* blanking formula scaling factor */
+#define GTF_D_J 20 /* blanking formula scaling factor */
+#define GTF_D_C_PRIME ((((GTF_D_C - GTF_D_J) * GTF_D_K) / 256) + GTF_D_J)
+#define GTF_D_M_PRIME ((GTF_D_K * GTF_D_M) / 256)
+
+/* Secondary */
+#define GTF_S_M 3600 /* blanking formula gradient */
+#define GTF_S_C 40 /* blanking formula offset */
+#define GTF_S_K 128 /* blanking formula scaling factor */
+#define GTF_S_J 35 /* blanking formula scaling factor */
+#define GTF_S_C_PRIME ((((GTF_S_C - GTF_S_J) * GTF_S_K) / 256) + GTF_S_J)
+#define GTF_S_M_PRIME ((GTF_S_K * GTF_S_M) / 256)
+
+/** v4l2_detect_gtf - detect if the given timings follow the GTF standard
+ * @frame_height - the total height of the frame (including blanking) in lines.
+ * @hfreq - the horizontal frequency in Hz.
+ * @vsync - the height of the vertical sync in lines.
+ * @polarities - the horizontal and vertical polarities (same as struct
+ * v4l2_bt_timings polarities).
+ * @aspect - preferred aspect ratio. GTF has no method of determining the
+ * aspect ratio in order to derive the image width from the
+ * image height, so it has to be passed explicitly. Usually
+ * the native screen aspect ratio is used for this. If it
+ * is not filled in correctly, then 16:9 will be assumed.
+ * @fmt - the resulting timings.
+ *
+ * This function will attempt to detect if the given values correspond to a
+ * valid GTF format. If so, then it will return true, and fmt will be filled
+ * in with the found GTF timings.
+ */
+bool v4l2_detect_gtf(unsigned frame_height,
+ unsigned hfreq,
+ unsigned vsync,
+ u32 polarities,
+ struct v4l2_fract aspect,
+ struct v4l2_dv_timings *fmt)
+{
+ int pix_clk;
+ int v_fp, v_bp, h_fp, hsync;
+ int frame_width, image_height, image_width;
+ bool default_gtf;
+ int h_blank;
+
+ if (vsync != 3)
+ return false;
+
+ if (polarities == V4L2_DV_VSYNC_POS_POL)
+ default_gtf = true;
+ else if (polarities == V4L2_DV_HSYNC_POS_POL)
+ default_gtf = false;
+ else
+ return false;
+
+ /* Vertical */
+ v_fp = GTF_V_FP;
+ v_bp = (GTF_MIN_VSYNC_BP * hfreq + 999999) / 1000000 - vsync;
+ image_height = (frame_height - v_fp - vsync - v_bp + 1) & ~0x1;
+
+ if (aspect.numerator == 0 || aspect.denominator == 0) {
+ aspect.numerator = 16;
+ aspect.denominator = 9;
+ }
+ image_width = ((image_height * aspect.numerator) / aspect.denominator);
+ image_width = (image_width + GTF_CELL_GRAN/2) & ~(GTF_CELL_GRAN - 1);
+
+ /* Horizontal */
+ if (default_gtf)
+ h_blank = ((image_width * GTF_D_C_PRIME * hfreq) -
+ (image_width * GTF_D_M_PRIME * 1000) +
+ (hfreq * (100 - GTF_D_C_PRIME) + GTF_D_M_PRIME * 1000) / 2) /
+ (hfreq * (100 - GTF_D_C_PRIME) + GTF_D_M_PRIME * 1000);
+ else
+ h_blank = ((image_width * GTF_S_C_PRIME * hfreq) -
+ (image_width * GTF_S_M_PRIME * 1000) +
+ (hfreq * (100 - GTF_S_C_PRIME) + GTF_S_M_PRIME * 1000) / 2) /
+ (hfreq * (100 - GTF_S_C_PRIME) + GTF_S_M_PRIME * 1000);
+
+ h_blank = h_blank - h_blank % (2 * GTF_CELL_GRAN);
+ frame_width = image_width + h_blank;
+
+ pix_clk = (image_width + h_blank) * hfreq;
+ pix_clk = pix_clk / GTF_PXL_CLK_GRAN * GTF_PXL_CLK_GRAN;
+
+ hsync = (frame_width * 8 + 50) / 100;
+ hsync = hsync - hsync % GTF_CELL_GRAN;
+
+ h_fp = h_blank / 2 - hsync;
+
+ fmt->type = V4L2_DV_BT_656_1120;
+ fmt->bt.polarities = polarities;
+ fmt->bt.width = image_width;
+ fmt->bt.height = image_height;
+ fmt->bt.hfrontporch = h_fp;
+ fmt->bt.vfrontporch = v_fp;
+ fmt->bt.hsync = hsync;
+ fmt->bt.vsync = vsync;
+ fmt->bt.hbackporch = frame_width - image_width - h_fp - hsync;
+ fmt->bt.vbackporch = frame_height - image_height - v_fp - vsync;
+ fmt->bt.pixelclock = pix_clk;
+ fmt->bt.standards = V4L2_DV_BT_STD_GTF;
+ if (!default_gtf)
+ fmt->bt.flags |= V4L2_DV_FL_REDUCED_BLANKING;
+ return true;
+}
+EXPORT_SYMBOL_GPL(v4l2_detect_gtf);
+
+/** v4l2_calc_aspect_ratio - calculate the aspect ratio based on bytes
+ * 0x15 and 0x16 from the EDID.
+ * @hor_landscape - byte 0x15 from the EDID.
+ * @vert_portrait - byte 0x16 from the EDID.
+ *
+ * Determines the aspect ratio from the EDID.
+ * See VESA Enhanced EDID standard, release A, rev 2, section 3.6.2:
+ * "Horizontal and Vertical Screen Size or Aspect Ratio"
+ */
+struct v4l2_fract v4l2_calc_aspect_ratio(u8 hor_landscape, u8 vert_portrait)
+{
+ struct v4l2_fract aspect = { 16, 9 };
+ u32 tmp;
+ u8 ratio;
+
+ /* Nothing filled in, fallback to 16:9 */
+ if (!hor_landscape && !vert_portrait)
+ return aspect;
+ /* Both filled in, so they are interpreted as the screen size in cm */
+ if (hor_landscape && vert_portrait) {
+ aspect.numerator = hor_landscape;
+ aspect.denominator = vert_portrait;
+ return aspect;
+ }
+ /* Only one is filled in, so interpret them as a ratio:
+ (val + 99) / 100 */
+ ratio = hor_landscape | vert_portrait;
+ /* Change some rounded values into the exact aspect ratio */
+ if (ratio == 79) {
+ aspect.numerator = 16;
+ aspect.denominator = 9;
+ } else if (ratio == 34) {
+ aspect.numerator = 4;
+ aspect.denominator = 3;
+ } else if (ratio == 68) {
+ aspect.numerator = 15;
+ aspect.denominator = 9;
+ } else {
+ aspect.numerator = hor_landscape + 99;
+ aspect.denominator = 100;
+ }
+ if (hor_landscape)
+ return aspect;
+ /* The aspect ratio is for portrait, so swap numerator and denominator */
+ tmp = aspect.denominator;
+ aspect.denominator = aspect.numerator;
+ aspect.numerator = tmp;
+ return aspect;
+}
+EXPORT_SYMBOL_GPL(v4l2_calc_aspect_ratio);
diff --git a/drivers/media/v4l2-core/v4l2-event.c b/drivers/media/v4l2-core/v4l2-event.c
index c7200921815..8761aab99de 100644
--- a/drivers/media/v4l2-core/v4l2-event.c
+++ b/drivers/media/v4l2-core/v4l2-event.c
@@ -311,3 +311,46 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
return 0;
}
EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe);
+
+int v4l2_event_subdev_unsubscribe(struct v4l2_subdev *sd, struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ return v4l2_event_unsubscribe(fh, sub);
+}
+EXPORT_SYMBOL_GPL(v4l2_event_subdev_unsubscribe);
+
+static void v4l2_event_src_replace(struct v4l2_event *old,
+ const struct v4l2_event *new)
+{
+ u32 old_changes = old->u.src_change.changes;
+
+ old->u.src_change = new->u.src_change;
+ old->u.src_change.changes |= old_changes;
+}
+
+static void v4l2_event_src_merge(const struct v4l2_event *old,
+ struct v4l2_event *new)
+{
+ new->u.src_change.changes |= old->u.src_change.changes;
+}
+
+static const struct v4l2_subscribed_event_ops v4l2_event_src_ch_ops = {
+ .replace = v4l2_event_src_replace,
+ .merge = v4l2_event_src_merge,
+};
+
+int v4l2_src_change_event_subscribe(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ if (sub->type == V4L2_EVENT_SOURCE_CHANGE)
+ return v4l2_event_subscribe(fh, sub, 0, &v4l2_event_src_ch_ops);
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(v4l2_src_change_event_subscribe);
+
+int v4l2_src_change_event_subdev_subscribe(struct v4l2_subdev *sd,
+ struct v4l2_fh *fh, struct v4l2_event_subscription *sub)
+{
+ return v4l2_src_change_event_subscribe(fh, sub);
+}
+EXPORT_SYMBOL_GPL(v4l2_src_change_event_subdev_subscribe);
diff --git a/drivers/media/v4l2-core/v4l2-int-device.c b/drivers/media/v4l2-core/v4l2-int-device.c
deleted file mode 100644
index f4473494af7..00000000000
--- a/drivers/media/v4l2-core/v4l2-int-device.c
+++ /dev/null
@@ -1,164 +0,0 @@
-/*
- * drivers/media/video/v4l2-int-device.c
- *
- * V4L2 internal ioctl interface.
- *
- * Copyright (C) 2007 Nokia Corporation.
- *
- * Contact: Sakari Ailus <sakari.ailus@nokia.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- */
-
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/sort.h>
-#include <linux/string.h>
-#include <linux/module.h>
-
-#include <media/v4l2-int-device.h>
-
-static DEFINE_MUTEX(mutex);
-static LIST_HEAD(int_list);
-
-void v4l2_int_device_try_attach_all(void)
-{
- struct v4l2_int_device *m, *s;
-
- list_for_each_entry(m, &int_list, head) {
- if (m->type != v4l2_int_type_master)
- continue;
-
- list_for_each_entry(s, &int_list, head) {
- if (s->type != v4l2_int_type_slave)
- continue;
-
- /* Slave is connected? */
- if (s->u.slave->master)
- continue;
-
- /* Slave wants to attach to master? */
- if (s->u.slave->attach_to[0] != 0
- && strncmp(m->name, s->u.slave->attach_to,
- V4L2NAMESIZE))
- continue;
-
- if (!try_module_get(m->module))
- continue;
-
- s->u.slave->master = m;
- if (m->u.master->attach(s)) {
- s->u.slave->master = NULL;
- module_put(m->module);
- continue;
- }
- }
- }
-}
-EXPORT_SYMBOL_GPL(v4l2_int_device_try_attach_all);
-
-static int ioctl_sort_cmp(const void *a, const void *b)
-{
- const struct v4l2_int_ioctl_desc *d1 = a, *d2 = b;
-
- if (d1->num > d2->num)
- return 1;
-
- if (d1->num < d2->num)
- return -1;
-
- return 0;
-}
-
-int v4l2_int_device_register(struct v4l2_int_device *d)
-{
- if (d->type == v4l2_int_type_slave)
- sort(d->u.slave->ioctls, d->u.slave->num_ioctls,
- sizeof(struct v4l2_int_ioctl_desc),
- &ioctl_sort_cmp, NULL);
- mutex_lock(&mutex);
- list_add(&d->head, &int_list);
- v4l2_int_device_try_attach_all();
- mutex_unlock(&mutex);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(v4l2_int_device_register);
-
-void v4l2_int_device_unregister(struct v4l2_int_device *d)
-{
- mutex_lock(&mutex);
- list_del(&d->head);
- if (d->type == v4l2_int_type_slave
- && d->u.slave->master != NULL) {
- d->u.slave->master->u.master->detach(d);
- module_put(d->u.slave->master->module);
- d->u.slave->master = NULL;
- }
- mutex_unlock(&mutex);
-}
-EXPORT_SYMBOL_GPL(v4l2_int_device_unregister);
-
-/* Adapted from search_extable in extable.c. */
-static v4l2_int_ioctl_func *find_ioctl(struct v4l2_int_slave *slave, int cmd,
- v4l2_int_ioctl_func *no_such_ioctl)
-{
- const struct v4l2_int_ioctl_desc *first = slave->ioctls;
- const struct v4l2_int_ioctl_desc *last =
- first + slave->num_ioctls - 1;
-
- while (first <= last) {
- const struct v4l2_int_ioctl_desc *mid;
-
- mid = (last - first) / 2 + first;
-
- if (mid->num < cmd)
- first = mid + 1;
- else if (mid->num > cmd)
- last = mid - 1;
- else
- return mid->func;
- }
-
- return no_such_ioctl;
-}
-
-static int no_such_ioctl_0(struct v4l2_int_device *d)
-{
- return -ENOIOCTLCMD;
-}
-
-int v4l2_int_ioctl_0(struct v4l2_int_device *d, int cmd)
-{
- return ((v4l2_int_ioctl_func_0 *)
- find_ioctl(d->u.slave, cmd,
- (v4l2_int_ioctl_func *)no_such_ioctl_0))(d);
-}
-EXPORT_SYMBOL_GPL(v4l2_int_ioctl_0);
-
-static int no_such_ioctl_1(struct v4l2_int_device *d, void *arg)
-{
- return -ENOIOCTLCMD;
-}
-
-int v4l2_int_ioctl_1(struct v4l2_int_device *d, int cmd, void *arg)
-{
- return ((v4l2_int_ioctl_func_1 *)
- find_ioctl(d->u.slave, cmd,
- (v4l2_int_ioctl_func *)no_such_ioctl_1))(d, arg);
-}
-EXPORT_SYMBOL_GPL(v4l2_int_ioctl_1);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index aa6e7c788db..16bffd851bf 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -26,15 +26,19 @@
#include <media/v4l2-fh.h>
#include <media/v4l2-event.h>
#include <media/v4l2-device.h>
-#include <media/v4l2-chip-ident.h>
#include <media/videobuf2-core.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/v4l2.h>
+
/* Zero out the end of the struct pointed to by p. Everything after, but
* not including, the specified field is cleared. */
#define CLEAR_AFTER_FIELD(p, field) \
memset((u8 *)(p) + offsetof(typeof(*(p)), field) + sizeof((p)->field), \
0, sizeof(*(p)) - offsetof(typeof(*(p)), field) - sizeof((p)->field))
+#define is_valid_ioctl(vfd, cmd) test_bit(_IOC_NR(cmd), (vfd)->valid_ioctls)
+
struct std_descr {
v4l2_std_id std;
const char *descr;
@@ -148,6 +152,7 @@ const char *v4l2_type_names[] = {
[V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY] = "vid-out-overlay",
[V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE] = "vid-cap-mplane",
[V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE] = "vid-out-mplane",
+ [V4L2_BUF_TYPE_SDR_CAPTURE] = "sdr-cap",
};
EXPORT_SYMBOL(v4l2_type_names);
@@ -167,9 +172,11 @@ static void v4l_print_querycap(const void *arg, bool write_only)
{
const struct v4l2_capability *p = arg;
- pr_cont("driver=%s, card=%s, bus=%s, version=0x%08x, "
+ pr_cont("driver=%.*s, card=%.*s, bus=%.*s, version=0x%08x, "
"capabilities=0x%08x, device_caps=0x%08x\n",
- p->driver, p->card, p->bus_info,
+ (int)sizeof(p->driver), p->driver,
+ (int)sizeof(p->card), p->card,
+ (int)sizeof(p->bus_info), p->bus_info,
p->version, p->capabilities, p->device_caps);
}
@@ -177,20 +184,21 @@ static void v4l_print_enuminput(const void *arg, bool write_only)
{
const struct v4l2_input *p = arg;
- pr_cont("index=%u, name=%s, type=%u, audioset=0x%x, tuner=%u, "
+ pr_cont("index=%u, name=%.*s, type=%u, audioset=0x%x, tuner=%u, "
"std=0x%08Lx, status=0x%x, capabilities=0x%x\n",
- p->index, p->name, p->type, p->audioset, p->tuner,
- (unsigned long long)p->std, p->status, p->capabilities);
+ p->index, (int)sizeof(p->name), p->name, p->type, p->audioset,
+ p->tuner, (unsigned long long)p->std, p->status,
+ p->capabilities);
}
static void v4l_print_enumoutput(const void *arg, bool write_only)
{
const struct v4l2_output *p = arg;
- pr_cont("index=%u, name=%s, type=%u, audioset=0x%x, "
+ pr_cont("index=%u, name=%.*s, type=%u, audioset=0x%x, "
"modulator=%u, std=0x%08Lx, capabilities=0x%x\n",
- p->index, p->name, p->type, p->audioset, p->modulator,
- (unsigned long long)p->std, p->capabilities);
+ p->index, (int)sizeof(p->name), p->name, p->type, p->audioset,
+ p->modulator, (unsigned long long)p->std, p->capabilities);
}
static void v4l_print_audio(const void *arg, bool write_only)
@@ -200,8 +208,9 @@ static void v4l_print_audio(const void *arg, bool write_only)
if (write_only)
pr_cont("index=%u, mode=0x%x\n", p->index, p->mode);
else
- pr_cont("index=%u, name=%s, capability=0x%x, mode=0x%x\n",
- p->index, p->name, p->capability, p->mode);
+ pr_cont("index=%u, name=%.*s, capability=0x%x, mode=0x%x\n",
+ p->index, (int)sizeof(p->name), p->name,
+ p->capability, p->mode);
}
static void v4l_print_audioout(const void *arg, bool write_only)
@@ -211,21 +220,22 @@ static void v4l_print_audioout(const void *arg, bool write_only)
if (write_only)
pr_cont("index=%u\n", p->index);
else
- pr_cont("index=%u, name=%s, capability=0x%x, mode=0x%x\n",
- p->index, p->name, p->capability, p->mode);
+ pr_cont("index=%u, name=%.*s, capability=0x%x, mode=0x%x\n",
+ p->index, (int)sizeof(p->name), p->name,
+ p->capability, p->mode);
}
static void v4l_print_fmtdesc(const void *arg, bool write_only)
{
const struct v4l2_fmtdesc *p = arg;
- pr_cont("index=%u, type=%s, flags=0x%x, pixelformat=%c%c%c%c, description='%s'\n",
+ pr_cont("index=%u, type=%s, flags=0x%x, pixelformat=%c%c%c%c, description='%.*s'\n",
p->index, prt_names(p->type, v4l2_type_names),
p->flags, (p->pixelformat & 0xff),
(p->pixelformat >> 8) & 0xff,
(p->pixelformat >> 16) & 0xff,
(p->pixelformat >> 24) & 0xff,
- p->description);
+ (int)sizeof(p->description), p->description);
}
static void v4l_print_format(const void *arg, bool write_only)
@@ -236,7 +246,7 @@ static void v4l_print_format(const void *arg, bool write_only)
const struct v4l2_vbi_format *vbi;
const struct v4l2_sliced_vbi_format *sliced;
const struct v4l2_window *win;
- const struct v4l2_clip *clip;
+ const struct v4l2_sdr_format *sdr;
unsigned i;
pr_cont("type=%s", prt_names(p->type, v4l2_type_names));
@@ -246,7 +256,7 @@ static void v4l_print_format(const void *arg, bool write_only)
pix = &p->fmt.pix;
pr_cont(", width=%u, height=%u, "
"pixelformat=%c%c%c%c, field=%s, "
- "bytesperline=%u sizeimage=%u, colorspace=%d\n",
+ "bytesperline=%u, sizeimage=%u, colorspace=%d\n",
pix->width, pix->height,
(pix->pixelformat & 0xff),
(pix->pixelformat >> 8) & 0xff,
@@ -277,20 +287,14 @@ static void v4l_print_format(const void *arg, bool write_only)
case V4L2_BUF_TYPE_VIDEO_OVERLAY:
case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
win = &p->fmt.win;
- pr_cont(", wxh=%dx%d, x,y=%d,%d, field=%s, "
- "chromakey=0x%08x, bitmap=%p, "
- "global_alpha=0x%02x\n",
- win->w.width, win->w.height,
- win->w.left, win->w.top,
+ /* Note: we can't print the clip list here since the clips
+ * pointer is a userspace pointer, not a kernelspace
+ * pointer. */
+ pr_cont(", wxh=%dx%d, x,y=%d,%d, field=%s, chromakey=0x%08x, clipcount=%u, clips=%p, bitmap=%p, global_alpha=0x%02x\n",
+ win->w.width, win->w.height, win->w.left, win->w.top,
prt_names(win->field, v4l2_field_names),
- win->chromakey, win->bitmap, win->global_alpha);
- clip = win->clips;
- for (i = 0; i < win->clipcount; i++) {
- printk(KERN_DEBUG "clip %u: wxh=%dx%d, x,y=%d,%d\n",
- i, clip->c.width, clip->c.height,
- clip->c.left, clip->c.top);
- clip = clip->next;
- }
+ win->chromakey, win->clipcount, win->clips,
+ win->bitmap, win->global_alpha);
break;
case V4L2_BUF_TYPE_VBI_CAPTURE:
case V4L2_BUF_TYPE_VBI_OUTPUT:
@@ -316,6 +320,14 @@ static void v4l_print_format(const void *arg, bool write_only)
sliced->service_lines[0][i],
sliced->service_lines[1][i]);
break;
+ case V4L2_BUF_TYPE_SDR_CAPTURE:
+ sdr = &p->fmt.sdr;
+ pr_cont(", pixelformat=%c%c%c%c\n",
+ (sdr->pixelformat >> 0) & 0xff,
+ (sdr->pixelformat >> 8) & 0xff,
+ (sdr->pixelformat >> 16) & 0xff,
+ (sdr->pixelformat >> 24) & 0xff);
+ break;
}
}
@@ -325,7 +337,7 @@ static void v4l_print_framebuffer(const void *arg, bool write_only)
pr_cont("capability=0x%x, flags=0x%x, base=0x%p, width=%u, "
"height=%u, pixelformat=%c%c%c%c, "
- "bytesperline=%u sizeimage=%u, colorspace=%d\n",
+ "bytesperline=%u, sizeimage=%u, colorspace=%d\n",
p->capability, p->flags, p->base,
p->fmt.width, p->fmt.height,
(p->fmt.pixelformat & 0xff),
@@ -346,11 +358,11 @@ static void v4l_print_modulator(const void *arg, bool write_only)
const struct v4l2_modulator *p = arg;
if (write_only)
- pr_cont("index=%u, txsubchans=0x%x", p->index, p->txsubchans);
+ pr_cont("index=%u, txsubchans=0x%x\n", p->index, p->txsubchans);
else
- pr_cont("index=%u, name=%s, capability=0x%x, "
+ pr_cont("index=%u, name=%.*s, capability=0x%x, "
"rangelow=%u, rangehigh=%u, txsubchans=0x%x\n",
- p->index, p->name, p->capability,
+ p->index, (int)sizeof(p->name), p->name, p->capability,
p->rangelow, p->rangehigh, p->txsubchans);
}
@@ -361,10 +373,10 @@ static void v4l_print_tuner(const void *arg, bool write_only)
if (write_only)
pr_cont("index=%u, audmode=%u\n", p->index, p->audmode);
else
- pr_cont("index=%u, name=%s, type=%u, capability=0x%x, "
+ pr_cont("index=%u, name=%.*s, type=%u, capability=0x%x, "
"rangelow=%u, rangehigh=%u, signal=%u, afc=%d, "
"rxsubchans=0x%x, audmode=%u\n",
- p->index, p->name, p->type,
+ p->index, (int)sizeof(p->name), p->name, p->type,
p->capability, p->rangelow,
p->rangehigh, p->signal, p->afc,
p->rxsubchans, p->audmode);
@@ -382,9 +394,9 @@ static void v4l_print_standard(const void *arg, bool write_only)
{
const struct v4l2_standard *p = arg;
- pr_cont("index=%u, id=0x%Lx, name=%s, fps=%u/%u, "
+ pr_cont("index=%u, id=0x%Lx, name=%.*s, fps=%u/%u, "
"framelines=%u\n", p->index,
- (unsigned long long)p->id, p->name,
+ (unsigned long long)p->id, (int)sizeof(p->name), p->name,
p->frameperiod.numerator,
p->frameperiod.denominator,
p->framelines);
@@ -438,13 +450,13 @@ static void v4l_print_buffer(const void *arg, bool write_only)
for (i = 0; i < p->length; ++i) {
plane = &p->m.planes[i];
printk(KERN_DEBUG
- "plane %d: bytesused=%d, data_offset=0x%08x "
+ "plane %d: bytesused=%d, data_offset=0x%08x, "
"offset/userptr=0x%lx, length=%d\n",
i, plane->bytesused, plane->data_offset,
plane->m.userptr, plane->length);
}
} else {
- pr_cont("bytesused=%d, offset/userptr=0x%lx, length=%d\n",
+ pr_cont(", bytesused=%d, offset/userptr=0x%lx, length=%d\n",
p->bytesused, p->m.userptr, p->length);
}
@@ -497,6 +509,8 @@ static void v4l_print_streamparm(const void *arg, bool write_only)
c->capability, c->outputmode,
c->timeperframe.numerator, c->timeperframe.denominator,
c->extendedmode, c->writebuffers);
+ } else {
+ pr_cont("\n");
}
}
@@ -504,9 +518,9 @@ static void v4l_print_queryctrl(const void *arg, bool write_only)
{
const struct v4l2_queryctrl *p = arg;
- pr_cont("id=0x%x, type=%d, name=%s, min/max=%d/%d, "
+ pr_cont("id=0x%x, type=%d, name=%.*s, min/max=%d/%d, "
"step=%d, default=%d, flags=0x%08x\n",
- p->id, p->type, p->name,
+ p->id, p->type, (int)sizeof(p->name), p->name,
p->minimum, p->maximum,
p->step, p->default_value, p->flags);
}
@@ -548,7 +562,7 @@ static void v4l_print_cropcap(const void *arg, bool write_only)
const struct v4l2_cropcap *p = arg;
pr_cont("type=%s, bounds wxh=%dx%d, x,y=%d,%d, "
- "defrect wxh=%dx%d, x,y=%d,%d\n, "
+ "defrect wxh=%dx%d, x,y=%d,%d, "
"pixelaspect %d/%d\n",
prt_names(p->type, v4l2_type_names),
p->bounds.width, p->bounds.height,
@@ -617,17 +631,17 @@ static void v4l_print_decoder_cmd(const void *arg, bool write_only)
pr_info("pts=%llu\n", p->stop.pts);
}
-static void v4l_print_dbg_chip_ident(const void *arg, bool write_only)
+static void v4l_print_dbg_chip_info(const void *arg, bool write_only)
{
- const struct v4l2_dbg_chip_ident *p = arg;
+ const struct v4l2_dbg_chip_info *p = arg;
pr_cont("type=%u, ", p->match.type);
if (p->match.type == V4L2_CHIP_MATCH_I2C_DRIVER)
- pr_cont("name=%s, ", p->match.name);
+ pr_cont("name=%.*s, ",
+ (int)sizeof(p->match.name), p->match.name);
else
pr_cont("addr=%u, ", p->match.addr);
- pr_cont("chip_ident=%u, revision=0x%x\n",
- p->ident, p->revision);
+ pr_cont("name=%.*s\n", (int)sizeof(p->name), p->name);
}
static void v4l_print_dbg_register(const void *arg, bool write_only)
@@ -636,28 +650,14 @@ static void v4l_print_dbg_register(const void *arg, bool write_only)
pr_cont("type=%u, ", p->match.type);
if (p->match.type == V4L2_CHIP_MATCH_I2C_DRIVER)
- pr_cont("name=%s, ", p->match.name);
+ pr_cont("name=%.*s, ",
+ (int)sizeof(p->match.name), p->match.name);
else
pr_cont("addr=%u, ", p->match.addr);
pr_cont("reg=0x%llx, val=0x%llx\n",
p->reg, p->val);
}
-static void v4l_print_dv_enum_presets(const void *arg, bool write_only)
-{
- const struct v4l2_dv_enum_preset *p = arg;
-
- pr_cont("index=%u, preset=%u, name=%s, width=%u, height=%u\n",
- p->index, p->preset, p->name, p->width, p->height);
-}
-
-static void v4l_print_dv_preset(const void *arg, bool write_only)
-{
- const struct v4l2_dv_preset *p = arg;
-
- pr_cont("preset=%u\n", p->preset);
-}
-
static void v4l_print_dv_timings(const void *arg, bool write_only)
{
const struct v4l2_dv_timings *p = arg;
@@ -727,11 +727,11 @@ static void v4l_print_frmsizeenum(const void *arg, bool write_only)
p->type);
switch (p->type) {
case V4L2_FRMSIZE_TYPE_DISCRETE:
- pr_cont(" wxh=%ux%u\n",
+ pr_cont(", wxh=%ux%u\n",
p->discrete.width, p->discrete.height);
break;
case V4L2_FRMSIZE_TYPE_STEPWISE:
- pr_cont(" min=%ux%u, max=%ux%u, step=%ux%u\n",
+ pr_cont(", min=%ux%u, max=%ux%u, step=%ux%u\n",
p->stepwise.min_width, p->stepwise.min_height,
p->stepwise.step_width, p->stepwise.step_height,
p->stepwise.max_width, p->stepwise.max_height);
@@ -757,12 +757,12 @@ static void v4l_print_frmivalenum(const void *arg, bool write_only)
p->width, p->height, p->type);
switch (p->type) {
case V4L2_FRMIVAL_TYPE_DISCRETE:
- pr_cont(" fps=%d/%d\n",
+ pr_cont(", fps=%d/%d\n",
p->discrete.numerator,
p->discrete.denominator);
break;
case V4L2_FRMIVAL_TYPE_STEPWISE:
- pr_cont(" min=%d/%d, max=%d/%d, step=%d/%d\n",
+ pr_cont(", min=%d/%d, max=%d/%d, step=%d/%d\n",
p->stepwise.min.numerator,
p->stepwise.min.denominator,
p->stepwise.max.numerator,
@@ -800,8 +800,8 @@ static void v4l_print_event(const void *arg, bool write_only)
pr_cont("value64=%lld, ", c->value64);
else
pr_cont("value=%d, ", c->value);
- pr_cont("flags=0x%x, minimum=%d, maximum=%d, step=%d,"
- " default_value=%d\n",
+ pr_cont("flags=0x%x, minimum=%d, maximum=%d, step=%d, "
+ "default_value=%d\n",
c->flags, c->minimum, c->maximum,
c->step, c->default_value);
break;
@@ -838,12 +838,20 @@ static void v4l_print_freq_band(const void *arg, bool write_only)
const struct v4l2_frequency_band *p = arg;
pr_cont("tuner=%u, type=%u, index=%u, capability=0x%x, "
- "rangelow=%u, rangehigh=%u, modulation=0x%x\n",
+ "rangelow=%u, rangehigh=%u, modulation=0x%x\n",
p->tuner, p->type, p->index,
p->capability, p->rangelow,
p->rangehigh, p->modulation);
}
+static void v4l_print_edid(const void *arg, bool write_only)
+{
+ const struct v4l2_edid *p = arg;
+
+ pr_cont("pad=%u, start_block=%u, blocks=%u\n",
+ p->pad, p->start_block, p->blocks);
+}
+
static void v4l_print_u32(const void *arg, bool write_only)
{
pr_cont("value=%u\n", *(const u32 *)arg);
@@ -891,6 +899,7 @@ static int check_fmt(struct file *file, enum v4l2_buf_type type)
const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
bool is_vid = vfd->vfl_type == VFL_TYPE_GRABBER;
bool is_vbi = vfd->vfl_type == VFL_TYPE_VBI;
+ bool is_sdr = vfd->vfl_type == VFL_TYPE_SDR;
bool is_rx = vfd->vfl_dir != VFL_DIR_TX;
bool is_tx = vfd->vfl_dir != VFL_DIR_RX;
@@ -940,6 +949,10 @@ static int check_fmt(struct file *file, enum v4l2_buf_type type)
if (is_vbi && is_tx && ops->vidioc_g_fmt_sliced_vbi_out)
return 0;
break;
+ case V4L2_BUF_TYPE_SDR_CAPTURE:
+ if (is_sdr && is_rx && ops->vidioc_g_fmt_sdr_cap)
+ return 0;
+ break;
default:
break;
}
@@ -997,20 +1010,17 @@ static int v4l_s_priority(const struct v4l2_ioctl_ops *ops,
static int v4l_enuminput(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
+ struct video_device *vfd = video_devdata(file);
struct v4l2_input *p = arg;
/*
- * We set the flags for CAP_PRESETS, CAP_DV_TIMINGS &
+ * We set the flags for CAP_DV_TIMINGS &
* CAP_STD here based on ioctl handler provided by the
* driver. If the driver doesn't support these
* for a specific input, it must override these flags.
*/
- if (ops->vidioc_s_std)
+ if (is_valid_ioctl(vfd, VIDIOC_S_STD))
p->capabilities |= V4L2_IN_CAP_STD;
- if (ops->vidioc_s_dv_preset)
- p->capabilities |= V4L2_IN_CAP_PRESETS;
- if (ops->vidioc_s_dv_timings)
- p->capabilities |= V4L2_IN_CAP_DV_TIMINGS;
return ops->vidioc_enum_input(file, fh, p);
}
@@ -1018,20 +1028,17 @@ static int v4l_enuminput(const struct v4l2_ioctl_ops *ops,
static int v4l_enumoutput(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
+ struct video_device *vfd = video_devdata(file);
struct v4l2_output *p = arg;
/*
- * We set the flags for CAP_PRESETS, CAP_DV_TIMINGS &
+ * We set the flags for CAP_DV_TIMINGS &
* CAP_STD here based on ioctl handler provided by the
* driver. If the driver doesn't support these
* for a specific output, it must override these flags.
*/
- if (ops->vidioc_s_std)
+ if (is_valid_ioctl(vfd, VIDIOC_S_STD))
p->capabilities |= V4L2_OUT_CAP_STD;
- if (ops->vidioc_s_dv_preset)
- p->capabilities |= V4L2_OUT_CAP_PRESETS;
- if (ops->vidioc_s_dv_timings)
- p->capabilities |= V4L2_OUT_CAP_DV_TIMINGS;
return ops->vidioc_enum_output(file, fh, p);
}
@@ -1065,6 +1072,10 @@ static int v4l_enum_fmt(const struct v4l2_ioctl_ops *ops,
if (unlikely(!is_tx || !ops->vidioc_enum_fmt_vid_out_mplane))
break;
return ops->vidioc_enum_fmt_vid_out_mplane(file, fh, arg);
+ case V4L2_BUF_TYPE_SDR_CAPTURE:
+ if (unlikely(!is_rx || !ops->vidioc_enum_fmt_sdr_cap))
+ break;
+ return ops->vidioc_enum_fmt_sdr_cap(file, fh, arg);
}
return -EINVAL;
}
@@ -1075,6 +1086,7 @@ static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops,
struct v4l2_format *p = arg;
struct video_device *vfd = video_devdata(file);
bool is_vid = vfd->vfl_type == VFL_TYPE_GRABBER;
+ bool is_sdr = vfd->vfl_type == VFL_TYPE_SDR;
bool is_rx = vfd->vfl_dir != VFL_DIR_TX;
bool is_tx = vfd->vfl_dir != VFL_DIR_RX;
@@ -1119,6 +1131,10 @@ static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops,
if (unlikely(!is_tx || is_vid || !ops->vidioc_g_fmt_sliced_vbi_out))
break;
return ops->vidioc_g_fmt_sliced_vbi_out(file, fh, arg);
+ case V4L2_BUF_TYPE_SDR_CAPTURE:
+ if (unlikely(!is_rx || !is_sdr || !ops->vidioc_g_fmt_sdr_cap))
+ break;
+ return ops->vidioc_g_fmt_sdr_cap(file, fh, arg);
}
return -EINVAL;
}
@@ -1129,6 +1145,7 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
struct v4l2_format *p = arg;
struct video_device *vfd = video_devdata(file);
bool is_vid = vfd->vfl_type == VFL_TYPE_GRABBER;
+ bool is_sdr = vfd->vfl_type == VFL_TYPE_SDR;
bool is_rx = vfd->vfl_dir != VFL_DIR_TX;
bool is_tx = vfd->vfl_dir != VFL_DIR_RX;
@@ -1183,6 +1200,11 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
break;
CLEAR_AFTER_FIELD(p, fmt.sliced);
return ops->vidioc_s_fmt_sliced_vbi_out(file, fh, arg);
+ case V4L2_BUF_TYPE_SDR_CAPTURE:
+ if (unlikely(!is_rx || !is_sdr || !ops->vidioc_s_fmt_sdr_cap))
+ break;
+ CLEAR_AFTER_FIELD(p, fmt.sdr);
+ return ops->vidioc_s_fmt_sdr_cap(file, fh, arg);
}
return -EINVAL;
}
@@ -1193,6 +1215,7 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops,
struct v4l2_format *p = arg;
struct video_device *vfd = video_devdata(file);
bool is_vid = vfd->vfl_type == VFL_TYPE_GRABBER;
+ bool is_sdr = vfd->vfl_type == VFL_TYPE_SDR;
bool is_rx = vfd->vfl_dir != VFL_DIR_TX;
bool is_tx = vfd->vfl_dir != VFL_DIR_RX;
@@ -1247,6 +1270,11 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops,
break;
CLEAR_AFTER_FIELD(p, fmt.sliced);
return ops->vidioc_try_fmt_sliced_vbi_out(file, fh, arg);
+ case V4L2_BUF_TYPE_SDR_CAPTURE:
+ if (unlikely(!is_rx || !is_sdr || !ops->vidioc_try_fmt_sdr_cap))
+ break;
+ CLEAR_AFTER_FIELD(p, fmt.sdr);
+ return ops->vidioc_try_fmt_sdr_cap(file, fh, arg);
}
return -EINVAL;
}
@@ -1307,8 +1335,11 @@ static int v4l_g_frequency(const struct v4l2_ioctl_ops *ops,
struct video_device *vfd = video_devdata(file);
struct v4l2_frequency *p = arg;
- p->type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
- V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
+ if (vfd->vfl_type == VFL_TYPE_SDR)
+ p->type = V4L2_TUNER_ADC;
+ else
+ p->type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
+ V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
return ops->vidioc_g_frequency(file, fh, p);
}
@@ -1316,13 +1347,18 @@ static int v4l_s_frequency(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
struct video_device *vfd = video_devdata(file);
- struct v4l2_frequency *p = arg;
+ const struct v4l2_frequency *p = arg;
enum v4l2_tuner_type type;
- type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
- V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
- if (p->type != type)
- return -EINVAL;
+ if (vfd->vfl_type == VFL_TYPE_SDR) {
+ if (p->type != V4L2_TUNER_ADC && p->type != V4L2_TUNER_RF)
+ return -EINVAL;
+ } else {
+ type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
+ V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
+ if (type != p->type)
+ return -EINVAL;
+ }
return ops->vidioc_s_frequency(file, fh, p);
}
@@ -1363,40 +1399,18 @@ static int v4l_enumstd(const struct v4l2_ioctl_ops *ops,
return 0;
}
-static int v4l_g_std(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
-{
- struct video_device *vfd = video_devdata(file);
- v4l2_std_id *id = arg;
-
- /* Calls the specific handler */
- if (ops->vidioc_g_std)
- return ops->vidioc_g_std(file, fh, arg);
- if (vfd->current_norm) {
- *id = vfd->current_norm;
- return 0;
- }
- return -ENOTTY;
-}
-
static int v4l_s_std(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
struct video_device *vfd = video_devdata(file);
- v4l2_std_id *id = arg, norm;
- int ret;
+ v4l2_std_id id = *(v4l2_std_id *)arg, norm;
- norm = (*id) & vfd->tvnorms;
+ norm = id & vfd->tvnorms;
if (vfd->tvnorms && !norm) /* Check if std is supported */
return -EINVAL;
/* Calls the specific handler */
- ret = ops->vidioc_s_std(file, fh, &norm);
-
- /* Updates standard information */
- if (ret >= 0)
- vfd->current_norm = norm;
- return ret;
+ return ops->vidioc_s_std(file, fh, norm);
}
static int v4l_querystd(const struct v4l2_ioctl_ops *ops,
@@ -1406,10 +1420,10 @@ static int v4l_querystd(const struct v4l2_ioctl_ops *ops,
v4l2_std_id *p = arg;
/*
- * If nothing detected, it should return all supported
- * standard.
- * Drivers just need to mask the std argument, in order
- * to remove the standards that don't apply from the mask.
+ * If no signal is detected, then the driver should return
+ * V4L2_STD_UNKNOWN. Otherwise it should return tvnorms with
+ * any standards that do not apply removed.
+ *
* This means that tuners, audio and video decoders can join
* their efforts to improve the standards detection.
*/
@@ -1424,6 +1438,10 @@ static int v4l_s_hw_freq_seek(const struct v4l2_ioctl_ops *ops,
struct v4l2_hw_freq_seek *p = arg;
enum v4l2_tuner_type type;
+ /* s_hw_freq_seek is not supported for SDR for now */
+ if (vfd->vfl_type == VFL_TYPE_SDR)
+ return -EINVAL;
+
type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
if (p->type != type)
@@ -1499,7 +1517,6 @@ static int v4l_prepare_buf(const struct v4l2_ioctl_ops *ops,
static int v4l_g_parm(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
- struct video_device *vfd = video_devdata(file);
struct v4l2_streamparm *p = arg;
v4l2_std_id std;
int ret = check_fmt(file, p->type);
@@ -1508,16 +1525,13 @@ static int v4l_g_parm(const struct v4l2_ioctl_ops *ops,
return ret;
if (ops->vidioc_g_parm)
return ops->vidioc_g_parm(file, fh, p);
- std = vfd->current_norm;
if (p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
return -EINVAL;
p->parm.capture.readbuffers = 2;
- if (ops->vidioc_g_std)
- ret = ops->vidioc_g_std(file, fh, &std);
+ ret = ops->vidioc_g_std(file, fh, &std);
if (ret == 0)
- v4l2_video_std_frame_period(std,
- &p->parm.capture.timeperframe);
+ v4l2_video_std_frame_period(std, &p->parm.capture.timeperframe);
return ret;
}
@@ -1792,10 +1806,24 @@ static int v4l_dbg_g_register(const struct v4l2_ioctl_ops *ops,
{
#ifdef CONFIG_VIDEO_ADV_DEBUG
struct v4l2_dbg_register *p = arg;
+ struct video_device *vfd = video_devdata(file);
+ struct v4l2_subdev *sd;
+ int idx = 0;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- return ops->vidioc_g_register(file, fh, p);
+ if (p->match.type == V4L2_CHIP_MATCH_SUBDEV) {
+ if (vfd->v4l2_dev == NULL)
+ return -EINVAL;
+ v4l2_device_for_each_subdev(sd, vfd->v4l2_dev)
+ if (p->match.addr == idx++)
+ return v4l2_subdev_call(sd, core, g_register, p);
+ return -EINVAL;
+ }
+ if (ops->vidioc_g_register && p->match.type == V4L2_CHIP_MATCH_BRIDGE &&
+ (ops->vidioc_g_chip_info || p->match.addr == 0))
+ return ops->vidioc_g_register(file, fh, p);
+ return -EINVAL;
#else
return -ENOTTY;
#endif
@@ -1805,24 +1833,71 @@ static int v4l_dbg_s_register(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
#ifdef CONFIG_VIDEO_ADV_DEBUG
- struct v4l2_dbg_register *p = arg;
+ const struct v4l2_dbg_register *p = arg;
+ struct video_device *vfd = video_devdata(file);
+ struct v4l2_subdev *sd;
+ int idx = 0;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- return ops->vidioc_s_register(file, fh, p);
+ if (p->match.type == V4L2_CHIP_MATCH_SUBDEV) {
+ if (vfd->v4l2_dev == NULL)
+ return -EINVAL;
+ v4l2_device_for_each_subdev(sd, vfd->v4l2_dev)
+ if (p->match.addr == idx++)
+ return v4l2_subdev_call(sd, core, s_register, p);
+ return -EINVAL;
+ }
+ if (ops->vidioc_s_register && p->match.type == V4L2_CHIP_MATCH_BRIDGE &&
+ (ops->vidioc_g_chip_info || p->match.addr == 0))
+ return ops->vidioc_s_register(file, fh, p);
+ return -EINVAL;
#else
return -ENOTTY;
#endif
}
-static int v4l_dbg_g_chip_ident(const struct v4l2_ioctl_ops *ops,
+static int v4l_dbg_g_chip_info(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
- struct v4l2_dbg_chip_ident *p = arg;
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ struct video_device *vfd = video_devdata(file);
+ struct v4l2_dbg_chip_info *p = arg;
+ struct v4l2_subdev *sd;
+ int idx = 0;
+
+ switch (p->match.type) {
+ case V4L2_CHIP_MATCH_BRIDGE:
+ if (ops->vidioc_s_register)
+ p->flags |= V4L2_CHIP_FL_WRITABLE;
+ if (ops->vidioc_g_register)
+ p->flags |= V4L2_CHIP_FL_READABLE;
+ strlcpy(p->name, vfd->v4l2_dev->name, sizeof(p->name));
+ if (ops->vidioc_g_chip_info)
+ return ops->vidioc_g_chip_info(file, fh, arg);
+ if (p->match.addr)
+ return -EINVAL;
+ return 0;
- p->ident = V4L2_IDENT_NONE;
- p->revision = 0;
- return ops->vidioc_g_chip_ident(file, fh, p);
+ case V4L2_CHIP_MATCH_SUBDEV:
+ if (vfd->v4l2_dev == NULL)
+ break;
+ v4l2_device_for_each_subdev(sd, vfd->v4l2_dev) {
+ if (p->match.addr != idx++)
+ continue;
+ if (sd->ops->core && sd->ops->core->s_register)
+ p->flags |= V4L2_CHIP_FL_WRITABLE;
+ if (sd->ops->core && sd->ops->core->g_register)
+ p->flags |= V4L2_CHIP_FL_READABLE;
+ strlcpy(p->name, sd->name, sizeof(p->name));
+ return 0;
+ }
+ break;
+ }
+ return -EINVAL;
+#else
+ return -ENOTTY;
+#endif
}
static int v4l_dqevent(const struct v4l2_ioctl_ops *ops,
@@ -1866,14 +1941,19 @@ static int v4l_enum_freq_bands(const struct v4l2_ioctl_ops *ops,
enum v4l2_tuner_type type;
int err;
- type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
- V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
-
- if (type != p->type)
- return -EINVAL;
+ if (vfd->vfl_type == VFL_TYPE_SDR) {
+ if (p->type != V4L2_TUNER_ADC && p->type != V4L2_TUNER_RF)
+ return -EINVAL;
+ type = p->type;
+ } else {
+ type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
+ V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
+ if (type != p->type)
+ return -EINVAL;
+ }
if (ops->vidioc_enum_freq_bands)
return ops->vidioc_enum_freq_bands(file, fh, p);
- if (ops->vidioc_g_tuner) {
+ if (is_valid_ioctl(vfd, VIDIOC_G_TUNER)) {
struct v4l2_tuner t = {
.index = p->tuner,
.type = type,
@@ -1891,7 +1971,7 @@ static int v4l_enum_freq_bands(const struct v4l2_ioctl_ops *ops,
V4L2_BAND_MODULATION_FM : V4L2_BAND_MODULATION_VSB;
return 0;
}
- if (ops->vidioc_g_modulator) {
+ if (is_valid_ioctl(vfd, VIDIOC_G_MODULATOR)) {
struct v4l2_modulator m = {
.index = p->tuner,
};
@@ -1976,7 +2056,7 @@ static struct v4l2_ioctl_info v4l2_ioctls[] = {
IOCTL_INFO_FNC(VIDIOC_STREAMOFF, v4l_streamoff, v4l_print_buftype, INFO_FL_PRIO | INFO_FL_QUEUE),
IOCTL_INFO_FNC(VIDIOC_G_PARM, v4l_g_parm, v4l_print_streamparm, INFO_FL_CLEAR(v4l2_streamparm, type)),
IOCTL_INFO_FNC(VIDIOC_S_PARM, v4l_s_parm, v4l_print_streamparm, INFO_FL_PRIO),
- IOCTL_INFO_FNC(VIDIOC_G_STD, v4l_g_std, v4l_print_std, 0),
+ IOCTL_INFO_STD(VIDIOC_G_STD, vidioc_g_std, v4l_print_std, 0),
IOCTL_INFO_FNC(VIDIOC_S_STD, v4l_s_std, v4l_print_std, INFO_FL_PRIO),
IOCTL_INFO_FNC(VIDIOC_ENUMSTD, v4l_enumstd, v4l_print_standard, INFO_FL_CLEAR(v4l2_standard, index)),
IOCTL_INFO_FNC(VIDIOC_ENUMINPUT, v4l_enuminput, v4l_print_enuminput, INFO_FL_CLEAR(v4l2_input, index)),
@@ -1990,6 +2070,8 @@ static struct v4l2_ioctl_info v4l2_ioctls[] = {
IOCTL_INFO_FNC(VIDIOC_QUERYMENU, v4l_querymenu, v4l_print_querymenu, INFO_FL_CTRL | INFO_FL_CLEAR(v4l2_querymenu, index)),
IOCTL_INFO_STD(VIDIOC_G_INPUT, vidioc_g_input, v4l_print_u32, 0),
IOCTL_INFO_FNC(VIDIOC_S_INPUT, v4l_s_input, v4l_print_u32, INFO_FL_PRIO),
+ IOCTL_INFO_STD(VIDIOC_G_EDID, vidioc_g_edid, v4l_print_edid, INFO_FL_CLEAR(v4l2_edid, edid)),
+ IOCTL_INFO_STD(VIDIOC_S_EDID, vidioc_s_edid, v4l_print_edid, INFO_FL_PRIO | INFO_FL_CLEAR(v4l2_edid, edid)),
IOCTL_INFO_STD(VIDIOC_G_OUTPUT, vidioc_g_output, v4l_print_u32, 0),
IOCTL_INFO_FNC(VIDIOC_S_OUTPUT, v4l_s_output, v4l_print_u32, INFO_FL_PRIO),
IOCTL_INFO_FNC(VIDIOC_ENUMOUTPUT, v4l_enumoutput, v4l_print_enumoutput, INFO_FL_CLEAR(v4l2_output, index)),
@@ -2026,12 +2108,7 @@ static struct v4l2_ioctl_info v4l2_ioctls[] = {
IOCTL_INFO_STD(VIDIOC_TRY_DECODER_CMD, vidioc_try_decoder_cmd, v4l_print_decoder_cmd, 0),
IOCTL_INFO_FNC(VIDIOC_DBG_S_REGISTER, v4l_dbg_s_register, v4l_print_dbg_register, 0),
IOCTL_INFO_FNC(VIDIOC_DBG_G_REGISTER, v4l_dbg_g_register, v4l_print_dbg_register, 0),
- IOCTL_INFO_FNC(VIDIOC_DBG_G_CHIP_IDENT, v4l_dbg_g_chip_ident, v4l_print_dbg_chip_ident, 0),
IOCTL_INFO_FNC(VIDIOC_S_HW_FREQ_SEEK, v4l_s_hw_freq_seek, v4l_print_hw_freq_seek, INFO_FL_PRIO),
- IOCTL_INFO_STD(VIDIOC_ENUM_DV_PRESETS, vidioc_enum_dv_presets, v4l_print_dv_enum_presets, 0),
- IOCTL_INFO_STD(VIDIOC_S_DV_PRESET, vidioc_s_dv_preset, v4l_print_dv_preset, INFO_FL_PRIO),
- IOCTL_INFO_STD(VIDIOC_G_DV_PRESET, vidioc_g_dv_preset, v4l_print_dv_preset, 0),
- IOCTL_INFO_STD(VIDIOC_QUERY_DV_PRESET, vidioc_query_dv_preset, v4l_print_dv_preset, 0),
IOCTL_INFO_STD(VIDIOC_S_DV_TIMINGS, vidioc_s_dv_timings, v4l_print_dv_timings, INFO_FL_PRIO),
IOCTL_INFO_STD(VIDIOC_G_DV_TIMINGS, vidioc_g_dv_timings, v4l_print_dv_timings, 0),
IOCTL_INFO_FNC(VIDIOC_DQEVENT, v4l_dqevent, v4l_print_event, 0),
@@ -2043,6 +2120,7 @@ static struct v4l2_ioctl_info v4l2_ioctls[] = {
IOCTL_INFO_STD(VIDIOC_QUERY_DV_TIMINGS, vidioc_query_dv_timings, v4l_print_dv_timings, 0),
IOCTL_INFO_STD(VIDIOC_DV_TIMINGS_CAP, vidioc_dv_timings_cap, v4l_print_dv_timings_cap, INFO_FL_CLEAR(v4l2_dv_timings_cap, type)),
IOCTL_INFO_FNC(VIDIOC_ENUM_FREQ_BANDS, v4l_enum_freq_bands, v4l_print_freq_band, 0),
+ IOCTL_INFO_FNC(VIDIOC_DBG_G_CHIP_INFO, v4l_dbg_g_chip_info, v4l_print_dbg_chip_info, INFO_FL_CLEAR(v4l2_dbg_chip_info, match)),
};
#define V4L2_IOCTLS ARRAY_SIZE(v4l2_ioctls)
@@ -2147,11 +2225,6 @@ static long __video_do_ioctl(struct file *file,
}
write_only = _IOC_DIR(cmd) == _IOC_WRITE;
- if (write_only && debug > V4L2_DEBUG_IOCTL) {
- v4l_printk_ioctl(video_device_node_name(vfd), cmd);
- pr_cont(": ");
- info->debug(arg, write_only);
- }
if (info->flags & INFO_FL_STD) {
typedef int (*vidioc_op)(struct file *file, void *fh, void *p);
const void *p = vfd->ioctl_ops;
@@ -2170,16 +2243,10 @@ static long __video_do_ioctl(struct file *file,
done:
if (debug) {
- if (write_only && debug > V4L2_DEBUG_IOCTL) {
- if (ret < 0)
- printk(KERN_DEBUG "%s: error %ld\n",
- video_device_node_name(vfd), ret);
- return ret;
- }
v4l_printk_ioctl(video_device_node_name(vfd), cmd);
if (ret < 0)
- pr_cont(": error %ld\n", ret);
- else if (debug == V4L2_DEBUG_IOCTL)
+ pr_cont(": error %ld", ret);
+ if (debug == V4L2_DEBUG_IOCTL)
pr_cont("\n");
else if (_IOC_DIR(cmd) == _IOC_NONE)
info->debug(arg, write_only);
@@ -2193,7 +2260,7 @@ done:
}
static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
- void * __user *user_ptr, void ***kernel_ptr)
+ void __user **user_ptr, void ***kernel_ptr)
{
int ret = 0;
@@ -2210,16 +2277,16 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
break;
}
*user_ptr = (void __user *)buf->m.planes;
- *kernel_ptr = (void *)&buf->m.planes;
+ *kernel_ptr = (void **)&buf->m.planes;
*array_size = sizeof(struct v4l2_plane) * buf->length;
ret = 1;
}
break;
}
- case VIDIOC_SUBDEV_G_EDID:
- case VIDIOC_SUBDEV_S_EDID: {
- struct v4l2_subdev_edid *edid = parg;
+ case VIDIOC_G_EDID:
+ case VIDIOC_S_EDID: {
+ struct v4l2_edid *edid = parg;
if (edid->blocks) {
if (edid->blocks > 256) {
@@ -2227,7 +2294,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
break;
}
*user_ptr = (void __user *)edid->edid;
- *kernel_ptr = (void *)&edid->edid;
+ *kernel_ptr = (void **)&edid->edid;
*array_size = edid->blocks * 128;
ret = 1;
}
@@ -2245,7 +2312,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
break;
}
*user_ptr = (void __user *)ctrls->controls;
- *kernel_ptr = (void *)&ctrls->controls;
+ *kernel_ptr = (void **)&ctrls->controls;
*array_size = sizeof(struct v4l2_ext_control)
* ctrls->count;
ret = 1;
@@ -2337,9 +2404,15 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
err = func(file, cmd, parg);
if (err == -ENOIOCTLCMD)
err = -ENOTTY;
+ if (err == 0) {
+ if (cmd == VIDIOC_DQBUF)
+ trace_v4l2_dqbuf(video_devdata(file)->minor, parg);
+ else if (cmd == VIDIOC_QBUF)
+ trace_v4l2_qbuf(video_devdata(file)->minor, parg);
+ }
if (has_array_args) {
- *kernel_ptr = user_ptr;
+ *kernel_ptr = (void __force *)user_ptr;
if (copy_to_user(user_ptr, mbuf, array_size))
err = -EFAULT;
goto out_array_args;
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
index 438ea45d107..178ce96556c 100644
--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
+++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
@@ -41,6 +41,8 @@ module_param(debug, bool, 0644);
#define TRANS_QUEUED (1 << 0)
/* Instance is currently running in hardware */
#define TRANS_RUNNING (1 << 1)
+/* Instance is currently aborting */
+#define TRANS_ABORT (1 << 2)
/* Offset base for buffers on the destination queue - used to distinguish
@@ -62,7 +64,7 @@ struct v4l2_m2m_dev {
struct list_head job_queue;
spinlock_t job_spinlock;
- struct v4l2_m2m_ops *m2m_ops;
+ const struct v4l2_m2m_ops *m2m_ops;
};
static struct v4l2_m2m_queue_ctx *get_queue_ctx(struct v4l2_m2m_ctx *m2m_ctx,
@@ -196,6 +198,10 @@ static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev)
* 2) at least one destination buffer has to be queued,
* 3) streaming has to be on.
*
+ * If a queue is buffered (for example a decoder hardware ringbuffer that has
+ * to be drained before doing streamoff), allow scheduling without v4l2 buffers
+ * on that queue.
+ *
* There may also be additional, custom requirements. In such case the driver
* should supply a custom callback (job_ready in v4l2_m2m_ops) that should
* return 1 if the instance is ready.
@@ -205,7 +211,7 @@ static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev)
static void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
{
struct v4l2_m2m_dev *m2m_dev;
- unsigned long flags_job, flags;
+ unsigned long flags_job, flags_out, flags_cap;
m2m_dev = m2m_ctx->m2m_dev;
dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx);
@@ -217,26 +223,42 @@ static void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
}
spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job);
+
+ /* If the context is aborted then don't schedule it */
+ if (m2m_ctx->job_flags & TRANS_ABORT) {
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
+ dprintk("Aborted context\n");
+ return;
+ }
+
if (m2m_ctx->job_flags & TRANS_QUEUED) {
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
dprintk("On job queue already\n");
return;
}
- spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
- if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue)) {
- spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
+ spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
+ if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue)
+ && !m2m_ctx->out_q_ctx.buffered) {
+ spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock,
+ flags_out);
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
dprintk("No input buffers available\n");
return;
}
- if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue)) {
- spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
+ spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
+ if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue)
+ && !m2m_ctx->cap_q_ctx.buffered) {
+ spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock,
+ flags_cap);
+ spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock,
+ flags_out);
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
dprintk("No output buffers available\n");
return;
}
- spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
+ spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
+ spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
if (m2m_dev->m2m_ops->job_ready
&& (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) {
@@ -254,6 +276,41 @@ static void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
}
/**
+ * v4l2_m2m_cancel_job() - cancel pending jobs for the context
+ *
+ * In case of streamoff or release called on any context,
+ * 1] If the context is currently running, then abort job will be called
+ * 2] If the context is queued, then the context will be removed from
+ * the job_queue
+ */
+static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx)
+{
+ struct v4l2_m2m_dev *m2m_dev;
+ unsigned long flags;
+
+ m2m_dev = m2m_ctx->m2m_dev;
+ spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
+
+ m2m_ctx->job_flags |= TRANS_ABORT;
+ if (m2m_ctx->job_flags & TRANS_RUNNING) {
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+ m2m_dev->m2m_ops->job_abort(m2m_ctx->priv);
+ dprintk("m2m_ctx %p running, will wait to complete", m2m_ctx);
+ wait_event(m2m_ctx->finished,
+ !(m2m_ctx->job_flags & TRANS_RUNNING));
+ } else if (m2m_ctx->job_flags & TRANS_QUEUED) {
+ list_del(&m2m_ctx->queue);
+ m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+ dprintk("m2m_ctx: %p had been on queue and was removed\n",
+ m2m_ctx);
+ } else {
+ /* Do nothing, was not on queue/running */
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+ }
+}
+
+/**
* v4l2_m2m_job_finish() - inform the framework that a job has been finished
* and have it clean up
*
@@ -369,6 +426,20 @@ int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf);
/**
+ * v4l2_m2m_create_bufs() - create a source or destination buffer, depending
+ * on the type
+ */
+int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ struct v4l2_create_buffers *create)
+{
+ struct vb2_queue *vq;
+
+ vq = v4l2_m2m_get_vq(m2m_ctx, create->format.type);
+ return vb2_create_bufs(vq, create);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_create_bufs);
+
+/**
* v4l2_m2m_expbuf() - export a source or destination buffer, depending on
* the type
*/
@@ -405,10 +476,40 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_streamon);
int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
enum v4l2_buf_type type)
{
- struct vb2_queue *vq;
+ struct v4l2_m2m_dev *m2m_dev;
+ struct v4l2_m2m_queue_ctx *q_ctx;
+ unsigned long flags_job, flags;
+ int ret;
- vq = v4l2_m2m_get_vq(m2m_ctx, type);
- return vb2_streamoff(vq, type);
+ /* wait until the current context is dequeued from job_queue */
+ v4l2_m2m_cancel_job(m2m_ctx);
+
+ q_ctx = get_queue_ctx(m2m_ctx, type);
+ ret = vb2_streamoff(&q_ctx->q, type);
+ if (ret)
+ return ret;
+
+ m2m_dev = m2m_ctx->m2m_dev;
+ spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job);
+ /* We should not be scheduled anymore, since we're dropping a queue. */
+ if (m2m_ctx->job_flags & TRANS_QUEUED)
+ list_del(&m2m_ctx->queue);
+ m2m_ctx->job_flags = 0;
+
+ spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
+ /* Drop queue, since streamoff returns device to the same state as after
+ * calling reqbufs. */
+ INIT_LIST_HEAD(&q_ctx->rdy_queue);
+ q_ctx->num_rdy = 0;
+ spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
+
+ if (m2m_dev->curr_ctx == m2m_ctx) {
+ m2m_dev->curr_ctx = NULL;
+ wake_up(&m2m_ctx->finished);
+ }
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
+
+ return 0;
}
EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff);
@@ -457,12 +558,18 @@ unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
if (m2m_ctx->m2m_dev->m2m_ops->unlock)
m2m_ctx->m2m_dev->m2m_ops->unlock(m2m_ctx->priv);
+ else if (m2m_ctx->q_lock)
+ mutex_unlock(m2m_ctx->q_lock);
- poll_wait(file, &src_q->done_wq, wait);
- poll_wait(file, &dst_q->done_wq, wait);
+ if (list_empty(&src_q->done_list))
+ poll_wait(file, &src_q->done_wq, wait);
+ if (list_empty(&dst_q->done_list))
+ poll_wait(file, &dst_q->done_wq, wait);
if (m2m_ctx->m2m_dev->m2m_ops->lock)
m2m_ctx->m2m_dev->m2m_ops->lock(m2m_ctx->priv);
+ else if (m2m_ctx->q_lock)
+ mutex_lock(m2m_ctx->q_lock);
spin_lock_irqsave(&src_q->done_lock, flags);
if (!list_empty(&src_q->done_list))
@@ -519,7 +626,7 @@ EXPORT_SYMBOL(v4l2_m2m_mmap);
*
* Usually called from driver's probe() function.
*/
-struct v4l2_m2m_dev *v4l2_m2m_init(struct v4l2_m2m_ops *m2m_ops)
+struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops)
{
struct v4l2_m2m_dev *m2m_dev;
@@ -590,6 +697,13 @@ struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev,
if (ret)
goto err;
+ /*
+ * If both queues use same mutex assign it as the common buffer
+ * queues lock to the m2m context. This lock is used in the
+ * v4l2_m2m_ioctl_* helpers.
+ */
+ if (out_q_ctx->q.lock == cap_q_ctx->q.lock)
+ m2m_ctx->q_lock = out_q_ctx->q.lock;
return m2m_ctx;
err:
@@ -605,27 +719,8 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init);
*/
void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx)
{
- struct v4l2_m2m_dev *m2m_dev;
- unsigned long flags;
-
- m2m_dev = m2m_ctx->m2m_dev;
-
- spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
- if (m2m_ctx->job_flags & TRANS_RUNNING) {
- spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
- m2m_dev->m2m_ops->job_abort(m2m_ctx->priv);
- dprintk("m2m_ctx %p running, will wait to complete", m2m_ctx);
- wait_event(m2m_ctx->finished, !(m2m_ctx->job_flags & TRANS_RUNNING));
- } else if (m2m_ctx->job_flags & TRANS_QUEUED) {
- list_del(&m2m_ctx->queue);
- m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
- spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
- dprintk("m2m_ctx: %p had been on queue and was removed\n",
- m2m_ctx);
- } else {
- /* Do nothing, was not on queue/running */
- spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
- }
+ /* wait until the current context is dequeued from job_queue */
+ v4l2_m2m_cancel_job(m2m_ctx);
vb2_queue_release(&m2m_ctx->cap_q_ctx.q);
vb2_queue_release(&m2m_ctx->out_q_ctx.q);
@@ -656,3 +751,118 @@ void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, struct vb2_buffer *vb)
}
EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue);
+/* Videobuf2 ioctl helpers */
+
+int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *rb)
+{
+ struct v4l2_fh *fh = file->private_data;
+
+ return v4l2_m2m_reqbufs(file, fh->m2m_ctx, rb);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_reqbufs);
+
+int v4l2_m2m_ioctl_create_bufs(struct file *file, void *priv,
+ struct v4l2_create_buffers *create)
+{
+ struct v4l2_fh *fh = file->private_data;
+
+ return v4l2_m2m_create_bufs(file, fh->m2m_ctx, create);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_create_bufs);
+
+int v4l2_m2m_ioctl_querybuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct v4l2_fh *fh = file->private_data;
+
+ return v4l2_m2m_querybuf(file, fh->m2m_ctx, buf);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_querybuf);
+
+int v4l2_m2m_ioctl_qbuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct v4l2_fh *fh = file->private_data;
+
+ return v4l2_m2m_qbuf(file, fh->m2m_ctx, buf);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_qbuf);
+
+int v4l2_m2m_ioctl_dqbuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct v4l2_fh *fh = file->private_data;
+
+ return v4l2_m2m_dqbuf(file, fh->m2m_ctx, buf);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_dqbuf);
+
+int v4l2_m2m_ioctl_expbuf(struct file *file, void *priv,
+ struct v4l2_exportbuffer *eb)
+{
+ struct v4l2_fh *fh = file->private_data;
+
+ return v4l2_m2m_expbuf(file, fh->m2m_ctx, eb);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_expbuf);
+
+int v4l2_m2m_ioctl_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct v4l2_fh *fh = file->private_data;
+
+ return v4l2_m2m_streamon(file, fh->m2m_ctx, type);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamon);
+
+int v4l2_m2m_ioctl_streamoff(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct v4l2_fh *fh = file->private_data;
+
+ return v4l2_m2m_streamoff(file, fh->m2m_ctx, type);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamoff);
+
+/*
+ * v4l2_file_operations helpers. It is assumed here same lock is used
+ * for the output and the capture buffer queue.
+ */
+
+int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct v4l2_fh *fh = file->private_data;
+ struct v4l2_m2m_ctx *m2m_ctx = fh->m2m_ctx;
+ int ret;
+
+ if (m2m_ctx->q_lock && mutex_lock_interruptible(m2m_ctx->q_lock))
+ return -ERESTARTSYS;
+
+ ret = v4l2_m2m_mmap(file, m2m_ctx, vma);
+
+ if (m2m_ctx->q_lock)
+ mutex_unlock(m2m_ctx->q_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_fop_mmap);
+
+unsigned int v4l2_m2m_fop_poll(struct file *file, poll_table *wait)
+{
+ struct v4l2_fh *fh = file->private_data;
+ struct v4l2_m2m_ctx *m2m_ctx = fh->m2m_ctx;
+ unsigned int ret;
+
+ if (m2m_ctx->q_lock)
+ mutex_lock(m2m_ctx->q_lock);
+
+ ret = v4l2_m2m_poll(file, m2m_ctx, wait);
+
+ if (m2m_ctx->q_lock)
+ mutex_unlock(m2m_ctx->q_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_fop_poll);
+
diff --git a/drivers/media/v4l2-core/v4l2-of.c b/drivers/media/v4l2-core/v4l2-of.c
new file mode 100644
index 00000000000..b4ed9a955fb
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-of.c
@@ -0,0 +1,144 @@
+/*
+ * V4L2 OF binding parsing library
+ *
+ * Copyright (C) 2012 - 2013 Samsung Electronics Co., Ltd.
+ * Author: Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * Copyright (C) 2012 Renesas Electronics Corp.
+ * Author: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include <media/v4l2-of.h>
+
+static void v4l2_of_parse_csi_bus(const struct device_node *node,
+ struct v4l2_of_endpoint *endpoint)
+{
+ struct v4l2_of_bus_mipi_csi2 *bus = &endpoint->bus.mipi_csi2;
+ u32 data_lanes[ARRAY_SIZE(bus->data_lanes)];
+ struct property *prop;
+ bool have_clk_lane = false;
+ unsigned int flags = 0;
+ u32 v;
+
+ prop = of_find_property(node, "data-lanes", NULL);
+ if (prop) {
+ const __be32 *lane = NULL;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(data_lanes); i++) {
+ lane = of_prop_next_u32(prop, lane, &data_lanes[i]);
+ if (!lane)
+ break;
+ }
+ bus->num_data_lanes = i;
+ while (i--)
+ bus->data_lanes[i] = data_lanes[i];
+ }
+
+ if (!of_property_read_u32(node, "clock-lanes", &v)) {
+ bus->clock_lane = v;
+ have_clk_lane = true;
+ }
+
+ if (of_get_property(node, "clock-noncontinuous", &v))
+ flags |= V4L2_MBUS_CSI2_NONCONTINUOUS_CLOCK;
+ else if (have_clk_lane || bus->num_data_lanes > 0)
+ flags |= V4L2_MBUS_CSI2_CONTINUOUS_CLOCK;
+
+ bus->flags = flags;
+ endpoint->bus_type = V4L2_MBUS_CSI2;
+}
+
+static void v4l2_of_parse_parallel_bus(const struct device_node *node,
+ struct v4l2_of_endpoint *endpoint)
+{
+ struct v4l2_of_bus_parallel *bus = &endpoint->bus.parallel;
+ unsigned int flags = 0;
+ u32 v;
+
+ if (!of_property_read_u32(node, "hsync-active", &v))
+ flags |= v ? V4L2_MBUS_HSYNC_ACTIVE_HIGH :
+ V4L2_MBUS_HSYNC_ACTIVE_LOW;
+
+ if (!of_property_read_u32(node, "vsync-active", &v))
+ flags |= v ? V4L2_MBUS_VSYNC_ACTIVE_HIGH :
+ V4L2_MBUS_VSYNC_ACTIVE_LOW;
+
+ if (!of_property_read_u32(node, "pclk-sample", &v))
+ flags |= v ? V4L2_MBUS_PCLK_SAMPLE_RISING :
+ V4L2_MBUS_PCLK_SAMPLE_FALLING;
+
+ if (!of_property_read_u32(node, "field-even-active", &v))
+ flags |= v ? V4L2_MBUS_FIELD_EVEN_HIGH :
+ V4L2_MBUS_FIELD_EVEN_LOW;
+ if (flags)
+ endpoint->bus_type = V4L2_MBUS_PARALLEL;
+ else
+ endpoint->bus_type = V4L2_MBUS_BT656;
+
+ if (!of_property_read_u32(node, "data-active", &v))
+ flags |= v ? V4L2_MBUS_DATA_ACTIVE_HIGH :
+ V4L2_MBUS_DATA_ACTIVE_LOW;
+
+ if (of_get_property(node, "slave-mode", &v))
+ flags |= V4L2_MBUS_SLAVE;
+ else
+ flags |= V4L2_MBUS_MASTER;
+
+ if (!of_property_read_u32(node, "bus-width", &v))
+ bus->bus_width = v;
+
+ if (!of_property_read_u32(node, "data-shift", &v))
+ bus->data_shift = v;
+
+ if (!of_property_read_u32(node, "sync-on-green-active", &v))
+ flags |= v ? V4L2_MBUS_VIDEO_SOG_ACTIVE_HIGH :
+ V4L2_MBUS_VIDEO_SOG_ACTIVE_LOW;
+
+ bus->flags = flags;
+
+}
+
+/**
+ * v4l2_of_parse_endpoint() - parse all endpoint node properties
+ * @node: pointer to endpoint device_node
+ * @endpoint: pointer to the V4L2 OF endpoint data structure
+ *
+ * All properties are optional. If none are found, we don't set any flags.
+ * This means the port has a static configuration and no properties have
+ * to be specified explicitly.
+ * If any properties that identify the bus as parallel are found and
+ * slave-mode isn't set, we set V4L2_MBUS_MASTER. Similarly, if we recognise
+ * the bus as serial CSI-2 and clock-noncontinuous isn't set, we set the
+ * V4L2_MBUS_CSI2_CONTINUOUS_CLOCK flag.
+ * The caller should hold a reference to @node.
+ *
+ * Return: 0.
+ */
+int v4l2_of_parse_endpoint(const struct device_node *node,
+ struct v4l2_of_endpoint *endpoint)
+{
+ of_graph_parse_endpoint(node, &endpoint->base);
+ endpoint->bus_type = 0;
+ memset(&endpoint->bus, 0, sizeof(endpoint->bus));
+
+ v4l2_of_parse_csi_bus(node, endpoint);
+ /*
+ * Parse the parallel video bus properties only if none
+ * of the MIPI CSI-2 specific properties were found.
+ */
+ if (endpoint->bus.mipi_csi2.flags == 0)
+ v4l2_of_parse_parallel_bus(node, endpoint);
+
+ return 0;
+}
+EXPORT_SYMBOL(v4l2_of_parse_endpoint);
diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c
index 996c248dea4..058c1a6e839 100644
--- a/drivers/media/v4l2-core/v4l2-subdev.c
+++ b/drivers/media/v4l2-core/v4l2-subdev.c
@@ -305,11 +305,23 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
fse);
}
- case VIDIOC_SUBDEV_G_FRAME_INTERVAL:
+ case VIDIOC_SUBDEV_G_FRAME_INTERVAL: {
+ struct v4l2_subdev_frame_interval *fi = arg;
+
+ if (fi->pad >= sd->entity.num_pads)
+ return -EINVAL;
+
return v4l2_subdev_call(sd, video, g_frame_interval, arg);
+ }
+
+ case VIDIOC_SUBDEV_S_FRAME_INTERVAL: {
+ struct v4l2_subdev_frame_interval *fi = arg;
+
+ if (fi->pad >= sd->entity.num_pads)
+ return -EINVAL;
- case VIDIOC_SUBDEV_S_FRAME_INTERVAL:
return v4l2_subdev_call(sd, video, s_frame_interval, arg);
+ }
case VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL: {
struct v4l2_subdev_frame_interval_enum *fie = arg;
@@ -349,11 +361,54 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
sd, pad, set_selection, subdev_fh, sel);
}
- case VIDIOC_SUBDEV_G_EDID:
- return v4l2_subdev_call(sd, pad, get_edid, arg);
+ case VIDIOC_G_EDID: {
+ struct v4l2_subdev_edid *edid = arg;
+
+ if (edid->pad >= sd->entity.num_pads)
+ return -EINVAL;
+ if (edid->blocks && edid->edid == NULL)
+ return -EINVAL;
+
+ return v4l2_subdev_call(sd, pad, get_edid, edid);
+ }
+
+ case VIDIOC_S_EDID: {
+ struct v4l2_subdev_edid *edid = arg;
+
+ if (edid->pad >= sd->entity.num_pads)
+ return -EINVAL;
+ if (edid->blocks && edid->edid == NULL)
+ return -EINVAL;
+
+ return v4l2_subdev_call(sd, pad, set_edid, edid);
+ }
+
+ case VIDIOC_SUBDEV_DV_TIMINGS_CAP: {
+ struct v4l2_dv_timings_cap *cap = arg;
- case VIDIOC_SUBDEV_S_EDID:
- return v4l2_subdev_call(sd, pad, set_edid, arg);
+ if (cap->pad >= sd->entity.num_pads)
+ return -EINVAL;
+
+ return v4l2_subdev_call(sd, pad, dv_timings_cap, cap);
+ }
+
+ case VIDIOC_SUBDEV_ENUM_DV_TIMINGS: {
+ struct v4l2_enum_dv_timings *dvt = arg;
+
+ if (dvt->pad >= sd->entity.num_pads)
+ return -EINVAL;
+
+ return v4l2_subdev_call(sd, pad, enum_dv_timings, dvt);
+ }
+
+ case VIDIOC_SUBDEV_QUERY_DV_TIMINGS:
+ return v4l2_subdev_call(sd, video, query_dv_timings, arg);
+
+ case VIDIOC_SUBDEV_G_DV_TIMINGS:
+ return v4l2_subdev_call(sd, video, g_dv_timings, arg);
+
+ case VIDIOC_SUBDEV_S_DV_TIMINGS:
+ return v4l2_subdev_call(sd, video, s_dv_timings, arg);
#endif
default:
return v4l2_subdev_call(sd, core, ioctl, cmd, arg);
@@ -368,6 +423,17 @@ static long subdev_ioctl(struct file *file, unsigned int cmd,
return video_usercopy(file, cmd, arg, subdev_do_ioctl);
}
+#ifdef CONFIG_COMPAT
+static long subdev_compat_ioctl32(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
+
+ return v4l2_subdev_call(sd, core, compat_ioctl32, cmd, arg);
+}
+#endif
+
static unsigned int subdev_poll(struct file *file, poll_table *wait)
{
struct video_device *vdev = video_devdata(file);
@@ -389,6 +455,9 @@ const struct v4l2_file_operations v4l2_subdev_fops = {
.owner = THIS_MODULE,
.open = subdev_open,
.unlocked_ioctl = subdev_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl32 = subdev_compat_ioctl32,
+#endif
.release = subdev_close,
.poll = subdev_poll,
};
diff --git a/drivers/media/v4l2-core/videobuf-core.c b/drivers/media/v4l2-core/videobuf-core.c
index 5449e8aa984..fb5ee5dd8fe 100644
--- a/drivers/media/v4l2-core/videobuf-core.c
+++ b/drivers/media/v4l2-core/videobuf-core.c
@@ -340,7 +340,7 @@ static void videobuf_status(struct videobuf_queue *q, struct v4l2_buffer *b,
break;
}
- b->flags = 0;
+ b->flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
if (vb->map)
b->flags |= V4L2_BUF_FLAG_MAPPED;
diff --git a/drivers/media/v4l2-core/videobuf-dma-contig.c b/drivers/media/v4l2-core/videobuf-dma-contig.c
index 3a43ba0959b..bf80f0f7dfb 100644
--- a/drivers/media/v4l2-core/videobuf-dma-contig.c
+++ b/drivers/media/v4l2-core/videobuf-dma-contig.c
@@ -27,7 +27,6 @@ struct videobuf_dma_contig_memory {
u32 magic;
void *vaddr;
dma_addr_t dma_handle;
- bool cached;
unsigned long size;
};
@@ -43,26 +42,8 @@ static int __videobuf_dc_alloc(struct device *dev,
unsigned long size, gfp_t flags)
{
mem->size = size;
- if (mem->cached) {
- mem->vaddr = alloc_pages_exact(mem->size, flags | GFP_DMA);
- if (mem->vaddr) {
- int err;
-
- mem->dma_handle = dma_map_single(dev, mem->vaddr,
- mem->size,
- DMA_FROM_DEVICE);
- err = dma_mapping_error(dev, mem->dma_handle);
- if (err) {
- dev_err(dev, "dma_map_single failed\n");
-
- free_pages_exact(mem->vaddr, mem->size);
- mem->vaddr = NULL;
- return err;
- }
- }
- } else
- mem->vaddr = dma_alloc_coherent(dev, mem->size,
- &mem->dma_handle, flags);
+ mem->vaddr = dma_alloc_coherent(dev, mem->size,
+ &mem->dma_handle, flags);
if (!mem->vaddr) {
dev_err(dev, "memory alloc size %ld failed\n", mem->size);
@@ -77,14 +58,7 @@ static int __videobuf_dc_alloc(struct device *dev,
static void __videobuf_dc_free(struct device *dev,
struct videobuf_dma_contig_memory *mem)
{
- if (mem->cached) {
- if (!mem->vaddr)
- return;
- dma_unmap_single(dev, mem->dma_handle, mem->size,
- DMA_FROM_DEVICE);
- free_pages_exact(mem->vaddr, mem->size);
- } else
- dma_free_coherent(dev, mem->size, mem->vaddr, mem->dma_handle);
+ dma_free_coherent(dev, mem->size, mem->vaddr, mem->dma_handle);
mem->vaddr = NULL;
}
@@ -234,7 +208,7 @@ out_up:
return ret;
}
-static struct videobuf_buffer *__videobuf_alloc_vb(size_t size, bool cached)
+static struct videobuf_buffer *__videobuf_alloc(size_t size)
{
struct videobuf_dma_contig_memory *mem;
struct videobuf_buffer *vb;
@@ -244,22 +218,11 @@ static struct videobuf_buffer *__videobuf_alloc_vb(size_t size, bool cached)
vb->priv = ((char *)vb) + size;
mem = vb->priv;
mem->magic = MAGIC_DC_MEM;
- mem->cached = cached;
}
return vb;
}
-static struct videobuf_buffer *__videobuf_alloc_uncached(size_t size)
-{
- return __videobuf_alloc_vb(size, false);
-}
-
-static struct videobuf_buffer *__videobuf_alloc_cached(size_t size)
-{
- return __videobuf_alloc_vb(size, true);
-}
-
static void *__videobuf_to_vaddr(struct videobuf_buffer *buf)
{
struct videobuf_dma_contig_memory *mem = buf->priv;
@@ -310,19 +273,6 @@ static int __videobuf_iolock(struct videobuf_queue *q,
return 0;
}
-static int __videobuf_sync(struct videobuf_queue *q,
- struct videobuf_buffer *buf)
-{
- struct videobuf_dma_contig_memory *mem = buf->priv;
- BUG_ON(!mem);
- MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
-
- dma_sync_single_for_cpu(q->dev, mem->dma_handle, mem->size,
- DMA_FROM_DEVICE);
-
- return 0;
-}
-
static int __videobuf_mmap_mapper(struct videobuf_queue *q,
struct videobuf_buffer *buf,
struct vm_area_struct *vma)
@@ -331,8 +281,6 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q,
struct videobuf_mapping *map;
int retval;
unsigned long size;
- unsigned long pos, start = vma->vm_start;
- struct page *page;
dev_dbg(q->dev, "%s\n", __func__);
@@ -355,47 +303,15 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q,
goto error;
/* Try to remap memory */
-
size = vma->vm_end - vma->vm_start;
- size = (size < mem->size) ? size : mem->size;
-
- if (!mem->cached) {
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- retval = remap_pfn_range(vma, vma->vm_start,
- mem->dma_handle >> PAGE_SHIFT,
- size, vma->vm_page_prot);
- if (retval) {
- dev_err(q->dev, "mmap: remap failed with error %d. ",
- retval);
- dma_free_coherent(q->dev, mem->size,
- mem->vaddr, mem->dma_handle);
- goto error;
- }
- } else {
- pos = (unsigned long)mem->vaddr;
-
- while (size > 0) {
- page = virt_to_page((void *)pos);
- if (NULL == page) {
- dev_err(q->dev, "mmap: virt_to_page failed\n");
- __videobuf_dc_free(q->dev, mem);
- goto error;
- }
- retval = vm_insert_page(vma, start, page);
- if (retval) {
- dev_err(q->dev, "mmap: insert failed with error %d\n",
- retval);
- __videobuf_dc_free(q->dev, mem);
- goto error;
- }
- start += PAGE_SIZE;
- pos += PAGE_SIZE;
-
- if (size > PAGE_SIZE)
- size -= PAGE_SIZE;
- else
- size = 0;
- }
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ retval = vm_iomap_memory(vma, mem->dma_handle, size);
+ if (retval) {
+ dev_err(q->dev, "mmap: remap failed with error %d. ",
+ retval);
+ dma_free_coherent(q->dev, mem->size,
+ mem->vaddr, mem->dma_handle);
+ goto error;
}
vma->vm_ops = &videobuf_vm_ops;
@@ -417,21 +333,12 @@ error:
static struct videobuf_qtype_ops qops = {
.magic = MAGIC_QTYPE_OPS,
- .alloc_vb = __videobuf_alloc_uncached,
+ .alloc_vb = __videobuf_alloc,
.iolock = __videobuf_iolock,
.mmap_mapper = __videobuf_mmap_mapper,
.vaddr = __videobuf_to_vaddr,
};
-static struct videobuf_qtype_ops qops_cached = {
- .magic = MAGIC_QTYPE_OPS,
- .alloc_vb = __videobuf_alloc_cached,
- .iolock = __videobuf_iolock,
- .sync = __videobuf_sync,
- .mmap_mapper = __videobuf_mmap_mapper,
- .vaddr = __videobuf_to_vaddr,
-};
-
void videobuf_queue_dma_contig_init(struct videobuf_queue *q,
const struct videobuf_queue_ops *ops,
struct device *dev,
@@ -447,20 +354,6 @@ void videobuf_queue_dma_contig_init(struct videobuf_queue *q,
}
EXPORT_SYMBOL_GPL(videobuf_queue_dma_contig_init);
-void videobuf_queue_dma_contig_init_cached(struct videobuf_queue *q,
- const struct videobuf_queue_ops *ops,
- struct device *dev,
- spinlock_t *irqlock,
- enum v4l2_buf_type type,
- enum v4l2_field field,
- unsigned int msize,
- void *priv, struct mutex *ext_lock)
-{
- videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize,
- priv, &qops_cached, ext_lock);
-}
-EXPORT_SYMBOL_GPL(videobuf_queue_dma_contig_init_cached);
-
dma_addr_t videobuf_to_dma_contig(struct videobuf_buffer *buf)
{
struct videobuf_dma_contig_memory *mem = buf->priv;
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index e02c4797b1c..7c4489c4236 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -6,6 +6,9 @@
* Author: Pawel Osciak <pawel@osciak.com>
* Marek Szyprowski <m.szyprowski@samsung.com>
*
+ * The vb2_thread implementation was based on code from videobuf-dvb.c:
+ * (c) 2004 Gerd Knorr <kraxel@bytesex.org> [SUSE Labs]
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
@@ -18,31 +21,167 @@
#include <linux/poll.h>
#include <linux/slab.h>
#include <linux/sched.h>
+#include <linux/freezer.h>
+#include <linux/kthread.h>
#include <media/v4l2-dev.h>
#include <media/v4l2-fh.h>
#include <media/v4l2-event.h>
+#include <media/v4l2-common.h>
#include <media/videobuf2-core.h>
static int debug;
module_param(debug, int, 0644);
-#define dprintk(level, fmt, arg...) \
- do { \
- if (debug >= level) \
- printk(KERN_DEBUG "vb2: " fmt, ## arg); \
+#define dprintk(level, fmt, arg...) \
+ do { \
+ if (debug >= level) \
+ pr_debug("vb2: %s: " fmt, __func__, ## arg); \
} while (0)
-#define call_memop(q, op, args...) \
- (((q)->mem_ops->op) ? \
- ((q)->mem_ops->op(args)) : 0)
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+
+/*
+ * If advanced debugging is on, then count how often each op is called
+ * successfully, which can either be per-buffer or per-queue.
+ *
+ * This makes it easy to check that the 'init' and 'cleanup'
+ * (and variations thereof) stay balanced.
+ */
+
+#define log_memop(vb, op) \
+ dprintk(2, "call_memop(%p, %d, %s)%s\n", \
+ (vb)->vb2_queue, (vb)->v4l2_buf.index, #op, \
+ (vb)->vb2_queue->mem_ops->op ? "" : " (nop)")
+
+#define call_memop(vb, op, args...) \
+({ \
+ struct vb2_queue *_q = (vb)->vb2_queue; \
+ int err; \
+ \
+ log_memop(vb, op); \
+ err = _q->mem_ops->op ? _q->mem_ops->op(args) : 0; \
+ if (!err) \
+ (vb)->cnt_mem_ ## op++; \
+ err; \
+})
+
+#define call_ptr_memop(vb, op, args...) \
+({ \
+ struct vb2_queue *_q = (vb)->vb2_queue; \
+ void *ptr; \
+ \
+ log_memop(vb, op); \
+ ptr = _q->mem_ops->op ? _q->mem_ops->op(args) : NULL; \
+ if (!IS_ERR_OR_NULL(ptr)) \
+ (vb)->cnt_mem_ ## op++; \
+ ptr; \
+})
+
+#define call_void_memop(vb, op, args...) \
+({ \
+ struct vb2_queue *_q = (vb)->vb2_queue; \
+ \
+ log_memop(vb, op); \
+ if (_q->mem_ops->op) \
+ _q->mem_ops->op(args); \
+ (vb)->cnt_mem_ ## op++; \
+})
+
+#define log_qop(q, op) \
+ dprintk(2, "call_qop(%p, %s)%s\n", q, #op, \
+ (q)->ops->op ? "" : " (nop)")
+
+#define call_qop(q, op, args...) \
+({ \
+ int err; \
+ \
+ log_qop(q, op); \
+ err = (q)->ops->op ? (q)->ops->op(args) : 0; \
+ if (!err) \
+ (q)->cnt_ ## op++; \
+ err; \
+})
+
+#define call_void_qop(q, op, args...) \
+({ \
+ log_qop(q, op); \
+ if ((q)->ops->op) \
+ (q)->ops->op(args); \
+ (q)->cnt_ ## op++; \
+})
+
+#define log_vb_qop(vb, op, args...) \
+ dprintk(2, "call_vb_qop(%p, %d, %s)%s\n", \
+ (vb)->vb2_queue, (vb)->v4l2_buf.index, #op, \
+ (vb)->vb2_queue->ops->op ? "" : " (nop)")
+
+#define call_vb_qop(vb, op, args...) \
+({ \
+ int err; \
+ \
+ log_vb_qop(vb, op); \
+ err = (vb)->vb2_queue->ops->op ? \
+ (vb)->vb2_queue->ops->op(args) : 0; \
+ if (!err) \
+ (vb)->cnt_ ## op++; \
+ err; \
+})
+
+#define call_void_vb_qop(vb, op, args...) \
+({ \
+ log_vb_qop(vb, op); \
+ if ((vb)->vb2_queue->ops->op) \
+ (vb)->vb2_queue->ops->op(args); \
+ (vb)->cnt_ ## op++; \
+})
+
+#else
+
+#define call_memop(vb, op, args...) \
+ ((vb)->vb2_queue->mem_ops->op ? \
+ (vb)->vb2_queue->mem_ops->op(args) : 0)
+
+#define call_ptr_memop(vb, op, args...) \
+ ((vb)->vb2_queue->mem_ops->op ? \
+ (vb)->vb2_queue->mem_ops->op(args) : NULL)
+
+#define call_void_memop(vb, op, args...) \
+ do { \
+ if ((vb)->vb2_queue->mem_ops->op) \
+ (vb)->vb2_queue->mem_ops->op(args); \
+ } while (0)
#define call_qop(q, op, args...) \
- (((q)->ops->op) ? ((q)->ops->op(args)) : 0)
+ ((q)->ops->op ? (q)->ops->op(args) : 0)
-#define V4L2_BUFFER_STATE_FLAGS (V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_QUEUED | \
+#define call_void_qop(q, op, args...) \
+ do { \
+ if ((q)->ops->op) \
+ (q)->ops->op(args); \
+ } while (0)
+
+#define call_vb_qop(vb, op, args...) \
+ ((vb)->vb2_queue->ops->op ? (vb)->vb2_queue->ops->op(args) : 0)
+
+#define call_void_vb_qop(vb, op, args...) \
+ do { \
+ if ((vb)->vb2_queue->ops->op) \
+ (vb)->vb2_queue->ops->op(args); \
+ } while (0)
+
+#endif
+
+/* Flags that are set by the vb2 core */
+#define V4L2_BUFFER_MASK_FLAGS (V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_QUEUED | \
V4L2_BUF_FLAG_DONE | V4L2_BUF_FLAG_ERROR | \
- V4L2_BUF_FLAG_PREPARED)
+ V4L2_BUF_FLAG_PREPARED | \
+ V4L2_BUF_FLAG_TIMESTAMP_MASK)
+/* Output buffer flags that should be passed on to the driver */
+#define V4L2_BUFFER_OUT_FLAGS (V4L2_BUF_FLAG_PFRAME | V4L2_BUF_FLAG_BFRAME | \
+ V4L2_BUF_FLAG_KEYFRAME | V4L2_BUF_FLAG_TIMECODE)
+
+static void __vb2_queue_cancel(struct vb2_queue *q);
/**
* __vb2_buf_mem_alloc() - allocate video memory for the given buffer
@@ -53,10 +192,15 @@ static int __vb2_buf_mem_alloc(struct vb2_buffer *vb)
void *mem_priv;
int plane;
- /* Allocate memory for all planes in this buffer */
+ /*
+ * Allocate memory for all planes in this buffer
+ * NOTE: mmapped areas should be page aligned
+ */
for (plane = 0; plane < vb->num_planes; ++plane) {
- mem_priv = call_memop(q, alloc, q->alloc_ctx[plane],
- q->plane_sizes[plane]);
+ unsigned long size = PAGE_ALIGN(q->plane_sizes[plane]);
+
+ mem_priv = call_ptr_memop(vb, alloc, q->alloc_ctx[plane],
+ size, q->gfp_flags);
if (IS_ERR_OR_NULL(mem_priv))
goto free;
@@ -69,7 +213,7 @@ static int __vb2_buf_mem_alloc(struct vb2_buffer *vb)
free:
/* Free already allocated memory if one of the allocations failed */
for (; plane > 0; --plane) {
- call_memop(q, put, vb->planes[plane - 1].mem_priv);
+ call_void_memop(vb, put, vb->planes[plane - 1].mem_priv);
vb->planes[plane - 1].mem_priv = NULL;
}
@@ -81,13 +225,12 @@ free:
*/
static void __vb2_buf_mem_free(struct vb2_buffer *vb)
{
- struct vb2_queue *q = vb->vb2_queue;
unsigned int plane;
for (plane = 0; plane < vb->num_planes; ++plane) {
- call_memop(q, put, vb->planes[plane].mem_priv);
+ call_void_memop(vb, put, vb->planes[plane].mem_priv);
vb->planes[plane].mem_priv = NULL;
- dprintk(3, "Freed plane %d of buffer %d\n", plane,
+ dprintk(3, "freed plane %d of buffer %d\n", plane,
vb->v4l2_buf.index);
}
}
@@ -98,12 +241,11 @@ static void __vb2_buf_mem_free(struct vb2_buffer *vb)
*/
static void __vb2_buf_userptr_put(struct vb2_buffer *vb)
{
- struct vb2_queue *q = vb->vb2_queue;
unsigned int plane;
for (plane = 0; plane < vb->num_planes; ++plane) {
if (vb->planes[plane].mem_priv)
- call_memop(q, put_userptr, vb->planes[plane].mem_priv);
+ call_void_memop(vb, put_userptr, vb->planes[plane].mem_priv);
vb->planes[plane].mem_priv = NULL;
}
}
@@ -112,15 +254,15 @@ static void __vb2_buf_userptr_put(struct vb2_buffer *vb)
* __vb2_plane_dmabuf_put() - release memory associated with
* a DMABUF shared plane
*/
-static void __vb2_plane_dmabuf_put(struct vb2_queue *q, struct vb2_plane *p)
+static void __vb2_plane_dmabuf_put(struct vb2_buffer *vb, struct vb2_plane *p)
{
if (!p->mem_priv)
return;
if (p->dbuf_mapped)
- call_memop(q, unmap_dmabuf, p->mem_priv);
+ call_void_memop(vb, unmap_dmabuf, p->mem_priv);
- call_memop(q, detach_dmabuf, p->mem_priv);
+ call_void_memop(vb, detach_dmabuf, p->mem_priv);
dma_buf_put(p->dbuf);
memset(p, 0, sizeof(*p));
}
@@ -131,11 +273,29 @@ static void __vb2_plane_dmabuf_put(struct vb2_queue *q, struct vb2_plane *p)
*/
static void __vb2_buf_dmabuf_put(struct vb2_buffer *vb)
{
- struct vb2_queue *q = vb->vb2_queue;
unsigned int plane;
for (plane = 0; plane < vb->num_planes; ++plane)
- __vb2_plane_dmabuf_put(q, &vb->planes[plane]);
+ __vb2_plane_dmabuf_put(vb, &vb->planes[plane]);
+}
+
+/**
+ * __setup_lengths() - setup initial lengths for every plane in
+ * every buffer on the queue
+ */
+static void __setup_lengths(struct vb2_queue *q, unsigned int n)
+{
+ unsigned int buffer, plane;
+ struct vb2_buffer *vb;
+
+ for (buffer = q->num_buffers; buffer < q->num_buffers + n; ++buffer) {
+ vb = q->bufs[buffer];
+ if (!vb)
+ continue;
+
+ for (plane = 0; plane < vb->num_planes; ++plane)
+ vb->v4l2_planes[plane].length = q->plane_sizes[plane];
+ }
}
/**
@@ -163,10 +323,9 @@ static void __setup_offsets(struct vb2_queue *q, unsigned int n)
continue;
for (plane = 0; plane < vb->num_planes; ++plane) {
- vb->v4l2_planes[plane].length = q->plane_sizes[plane];
vb->v4l2_planes[plane].m.mem_offset = off;
- dprintk(3, "Buffer %d, plane %d offset 0x%08lx\n",
+ dprintk(3, "buffer %d, plane %d offset 0x%08lx\n",
buffer, plane, off);
off += vb->v4l2_planes[plane].length;
@@ -193,7 +352,7 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum v4l2_memory memory,
/* Allocate videobuf buffer structures */
vb = kzalloc(q->buf_struct_size, GFP_KERNEL);
if (!vb) {
- dprintk(1, "Memory alloc for buffer struct failed\n");
+ dprintk(1, "memory alloc for buffer struct failed\n");
break;
}
@@ -212,7 +371,7 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum v4l2_memory memory,
if (memory == V4L2_MEMORY_MMAP) {
ret = __vb2_buf_mem_alloc(vb);
if (ret) {
- dprintk(1, "Failed allocating memory for "
+ dprintk(1, "failed allocating memory for "
"buffer %d\n", buffer);
kfree(vb);
break;
@@ -222,9 +381,9 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum v4l2_memory memory,
* callback, if given. An error in initialization
* results in queue setup failure.
*/
- ret = call_qop(q, buf_init, vb);
+ ret = call_vb_qop(vb, buf_init, vb);
if (ret) {
- dprintk(1, "Buffer %d %p initialization"
+ dprintk(1, "buffer %d %p initialization"
" failed\n", buffer, vb);
__vb2_buf_mem_free(vb);
kfree(vb);
@@ -235,9 +394,11 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum v4l2_memory memory,
q->bufs[q->num_buffers + buffer] = vb;
}
- __setup_offsets(q, buffer);
+ __setup_lengths(q, buffer);
+ if (memory == V4L2_MEMORY_MMAP)
+ __setup_offsets(q, buffer);
- dprintk(1, "Allocated %d buffers, %d plane(s) each\n",
+ dprintk(1, "allocated %d buffers, %d plane(s) each\n",
buffer, num_planes);
return buffer;
@@ -272,23 +433,102 @@ static void __vb2_free_mem(struct vb2_queue *q, unsigned int buffers)
* related information, if no buffers are left return the queue to an
* uninitialized state. Might be called even if the queue has already been freed.
*/
-static void __vb2_queue_free(struct vb2_queue *q, unsigned int buffers)
+static int __vb2_queue_free(struct vb2_queue *q, unsigned int buffers)
{
unsigned int buffer;
- /* Call driver-provided cleanup function for each buffer, if provided */
- if (q->ops->buf_cleanup) {
- for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
- ++buffer) {
- if (NULL == q->bufs[buffer])
- continue;
- q->ops->buf_cleanup(q->bufs[buffer]);
+ /*
+ * Sanity check: when preparing a buffer the queue lock is released for
+ * a short while (see __buf_prepare for the details), which would allow
+ * a race with a reqbufs which can call this function. Removing the
+ * buffers from underneath __buf_prepare is obviously a bad idea, so we
+ * check if any of the buffers is in the state PREPARING, and if so we
+ * just return -EAGAIN.
+ */
+ for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
+ ++buffer) {
+ if (q->bufs[buffer] == NULL)
+ continue;
+ if (q->bufs[buffer]->state == VB2_BUF_STATE_PREPARING) {
+ dprintk(1, "preparing buffers, cannot free\n");
+ return -EAGAIN;
}
}
+ /* Call driver-provided cleanup function for each buffer, if provided */
+ for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
+ ++buffer) {
+ struct vb2_buffer *vb = q->bufs[buffer];
+
+ if (vb && vb->planes[0].mem_priv)
+ call_void_vb_qop(vb, buf_cleanup, vb);
+ }
+
/* Release video buffer memory */
__vb2_free_mem(q, buffers);
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ /*
+ * Check that all the calls were balances during the life-time of this
+ * queue. If not (or if the debug level is 1 or up), then dump the
+ * counters to the kernel log.
+ */
+ if (q->num_buffers) {
+ bool unbalanced = q->cnt_start_streaming != q->cnt_stop_streaming ||
+ q->cnt_wait_prepare != q->cnt_wait_finish;
+
+ if (unbalanced || debug) {
+ pr_info("vb2: counters for queue %p:%s\n", q,
+ unbalanced ? " UNBALANCED!" : "");
+ pr_info("vb2: setup: %u start_streaming: %u stop_streaming: %u\n",
+ q->cnt_queue_setup, q->cnt_start_streaming,
+ q->cnt_stop_streaming);
+ pr_info("vb2: wait_prepare: %u wait_finish: %u\n",
+ q->cnt_wait_prepare, q->cnt_wait_finish);
+ }
+ q->cnt_queue_setup = 0;
+ q->cnt_wait_prepare = 0;
+ q->cnt_wait_finish = 0;
+ q->cnt_start_streaming = 0;
+ q->cnt_stop_streaming = 0;
+ }
+ for (buffer = 0; buffer < q->num_buffers; ++buffer) {
+ struct vb2_buffer *vb = q->bufs[buffer];
+ bool unbalanced = vb->cnt_mem_alloc != vb->cnt_mem_put ||
+ vb->cnt_mem_prepare != vb->cnt_mem_finish ||
+ vb->cnt_mem_get_userptr != vb->cnt_mem_put_userptr ||
+ vb->cnt_mem_attach_dmabuf != vb->cnt_mem_detach_dmabuf ||
+ vb->cnt_mem_map_dmabuf != vb->cnt_mem_unmap_dmabuf ||
+ vb->cnt_buf_queue != vb->cnt_buf_done ||
+ vb->cnt_buf_prepare != vb->cnt_buf_finish ||
+ vb->cnt_buf_init != vb->cnt_buf_cleanup;
+
+ if (unbalanced || debug) {
+ pr_info("vb2: counters for queue %p, buffer %d:%s\n",
+ q, buffer, unbalanced ? " UNBALANCED!" : "");
+ pr_info("vb2: buf_init: %u buf_cleanup: %u buf_prepare: %u buf_finish: %u\n",
+ vb->cnt_buf_init, vb->cnt_buf_cleanup,
+ vb->cnt_buf_prepare, vb->cnt_buf_finish);
+ pr_info("vb2: buf_queue: %u buf_done: %u\n",
+ vb->cnt_buf_queue, vb->cnt_buf_done);
+ pr_info("vb2: alloc: %u put: %u prepare: %u finish: %u mmap: %u\n",
+ vb->cnt_mem_alloc, vb->cnt_mem_put,
+ vb->cnt_mem_prepare, vb->cnt_mem_finish,
+ vb->cnt_mem_mmap);
+ pr_info("vb2: get_userptr: %u put_userptr: %u\n",
+ vb->cnt_mem_get_userptr, vb->cnt_mem_put_userptr);
+ pr_info("vb2: attach_dmabuf: %u detach_dmabuf: %u map_dmabuf: %u unmap_dmabuf: %u\n",
+ vb->cnt_mem_attach_dmabuf, vb->cnt_mem_detach_dmabuf,
+ vb->cnt_mem_map_dmabuf, vb->cnt_mem_unmap_dmabuf);
+ pr_info("vb2: get_dmabuf: %u num_users: %u vaddr: %u cookie: %u\n",
+ vb->cnt_mem_get_dmabuf,
+ vb->cnt_mem_num_users,
+ vb->cnt_mem_vaddr,
+ vb->cnt_mem_cookie);
+ }
+ }
+#endif
+
/* Free videobuf buffers */
for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
++buffer) {
@@ -297,9 +537,11 @@ static void __vb2_queue_free(struct vb2_queue *q, unsigned int buffers)
}
q->num_buffers -= buffers;
- if (!q->num_buffers)
+ if (!q->num_buffers) {
q->memory = 0;
- INIT_LIST_HEAD(&q->queued_list);
+ INIT_LIST_HEAD(&q->queued_list);
+ }
+ return 0;
}
/**
@@ -313,13 +555,13 @@ static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer
/* Is memory for copying plane information present? */
if (NULL == b->m.planes) {
- dprintk(1, "Multi-planar buffer passed but "
+ dprintk(1, "multi-planar buffer passed but "
"planes array not provided\n");
return -EINVAL;
}
if (b->length < vb->num_planes || b->length > VIDEO_MAX_PLANES) {
- dprintk(1, "Incorrect planes array length, "
+ dprintk(1, "incorrect planes array length, "
"expected %d, got %d\n", vb->num_planes, b->length);
return -EINVAL;
}
@@ -328,6 +570,43 @@ static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer
}
/**
+ * __verify_length() - Verify that the bytesused value for each plane fits in
+ * the plane length and that the data offset doesn't exceed the bytesused value.
+ */
+static int __verify_length(struct vb2_buffer *vb, const struct v4l2_buffer *b)
+{
+ unsigned int length;
+ unsigned int plane;
+
+ if (!V4L2_TYPE_IS_OUTPUT(b->type))
+ return 0;
+
+ if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ length = (b->memory == V4L2_MEMORY_USERPTR)
+ ? b->m.planes[plane].length
+ : vb->v4l2_planes[plane].length;
+
+ if (b->m.planes[plane].bytesused > length)
+ return -EINVAL;
+
+ if (b->m.planes[plane].data_offset > 0 &&
+ b->m.planes[plane].data_offset >=
+ b->m.planes[plane].bytesused)
+ return -EINVAL;
+ }
+ } else {
+ length = (b->memory == V4L2_MEMORY_USERPTR)
+ ? b->length : vb->v4l2_planes[0].length;
+
+ if (b->bytesused > length)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
* __buffer_in_use() - return true if the buffer is in use and
* the queue cannot be freed (by the means of REQBUFS(0)) call
*/
@@ -342,7 +621,7 @@ static bool __buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb)
* case anyway. If num_users() returns more than 1,
* we are not the only user of the plane's memory.
*/
- if (mem_priv && call_memop(q, num_users, mem_priv) > 1)
+ if (mem_priv && call_memop(vb, num_users, mem_priv) > 1)
return true;
}
return false;
@@ -401,7 +680,17 @@ static void __fill_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b)
/*
* Clear any buffer state related flags.
*/
- b->flags &= ~V4L2_BUFFER_STATE_FLAGS;
+ b->flags &= ~V4L2_BUFFER_MASK_FLAGS;
+ b->flags |= q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK;
+ if ((q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) !=
+ V4L2_BUF_FLAG_TIMESTAMP_COPY) {
+ /*
+ * For non-COPY timestamps, drop timestamp source bits
+ * and obtain the timestamp source from the queue.
+ */
+ b->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ b->flags |= q->timestamp_flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ }
switch (vb->state) {
case VB2_BUF_STATE_QUEUED:
@@ -417,6 +706,7 @@ static void __fill_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b)
case VB2_BUF_STATE_PREPARED:
b->flags |= V4L2_BUF_FLAG_PREPARED;
break;
+ case VB2_BUF_STATE_PREPARING:
case VB2_BUF_STATE_DEQUEUED:
/* nothing */
break;
@@ -445,12 +735,12 @@ int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b)
int ret;
if (b->type != q->type) {
- dprintk(1, "querybuf: wrong buffer type\n");
+ dprintk(1, "wrong buffer type\n");
return -EINVAL;
}
if (b->index >= q->num_buffers) {
- dprintk(1, "querybuf: buffer index out of range\n");
+ dprintk(1, "buffer index out of range\n");
return -EINVAL;
}
vb = q->bufs[b->index];
@@ -510,12 +800,12 @@ static int __verify_memory_type(struct vb2_queue *q,
{
if (memory != V4L2_MEMORY_MMAP && memory != V4L2_MEMORY_USERPTR &&
memory != V4L2_MEMORY_DMABUF) {
- dprintk(1, "reqbufs: unsupported memory type\n");
+ dprintk(1, "unsupported memory type\n");
return -EINVAL;
}
if (type != q->type) {
- dprintk(1, "reqbufs: requested type is incorrect\n");
+ dprintk(1, "requested type is incorrect\n");
return -EINVAL;
}
@@ -524,17 +814,17 @@ static int __verify_memory_type(struct vb2_queue *q,
* are available.
*/
if (memory == V4L2_MEMORY_MMAP && __verify_mmap_ops(q)) {
- dprintk(1, "reqbufs: MMAP for current setup unsupported\n");
+ dprintk(1, "MMAP for current setup unsupported\n");
return -EINVAL;
}
if (memory == V4L2_MEMORY_USERPTR && __verify_userptr_ops(q)) {
- dprintk(1, "reqbufs: USERPTR for current setup unsupported\n");
+ dprintk(1, "USERPTR for current setup unsupported\n");
return -EINVAL;
}
if (memory == V4L2_MEMORY_DMABUF && __verify_dmabuf_ops(q)) {
- dprintk(1, "reqbufs: DMABUF for current setup unsupported\n");
+ dprintk(1, "DMABUF for current setup unsupported\n");
return -EINVAL;
}
@@ -543,8 +833,8 @@ static int __verify_memory_type(struct vb2_queue *q,
* create_bufs is called with count == 0, but count == 0 should still
* do the memory and type validation.
*/
- if (q->fileio) {
- dprintk(1, "reqbufs: file io in progress\n");
+ if (vb2_fileio_is_active(q)) {
+ dprintk(1, "file io in progress\n");
return -EBUSY;
}
return 0;
@@ -579,7 +869,7 @@ static int __reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
int ret;
if (q->streaming) {
- dprintk(1, "reqbufs: streaming active\n");
+ dprintk(1, "streaming active\n");
return -EBUSY;
}
@@ -589,11 +879,19 @@ static int __reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
* are not in use and can be freed.
*/
if (q->memory == V4L2_MEMORY_MMAP && __buffers_in_use(q)) {
- dprintk(1, "reqbufs: memory in use, cannot free\n");
+ dprintk(1, "memory in use, cannot free\n");
return -EBUSY;
}
- __vb2_queue_free(q, q->num_buffers);
+ /*
+ * Call queue_cancel to clean up any buffers in the PREPARED or
+ * QUEUED state which is possible if buffers were prepared or
+ * queued without ever calling STREAMON.
+ */
+ __vb2_queue_cancel(q);
+ ret = __vb2_queue_free(q, q->num_buffers);
+ if (ret)
+ return ret;
/*
* In case of REQBUFS(0) return immediately without calling
@@ -607,6 +905,7 @@ static int __reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
* Make sure the requested values and current defaults are sane.
*/
num_buffers = min_t(unsigned int, req->count, VIDEO_MAX_FRAME);
+ num_buffers = max_t(unsigned int, num_buffers, q->min_buffers_needed);
memset(q->plane_sizes, 0, sizeof(q->plane_sizes));
memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx));
q->memory = req->memory;
@@ -621,18 +920,23 @@ static int __reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
return ret;
/* Finally, allocate buffers and video memory */
- ret = __vb2_queue_alloc(q, req->memory, num_buffers, num_planes);
- if (ret == 0) {
- dprintk(1, "Memory allocation failed\n");
+ allocated_buffers = __vb2_queue_alloc(q, req->memory, num_buffers, num_planes);
+ if (allocated_buffers == 0) {
+ dprintk(1, "memory allocation failed\n");
return -ENOMEM;
}
- allocated_buffers = ret;
+ /*
+ * There is no point in continuing if we can't allocate the minimum
+ * number of buffers needed by this vb2_queue.
+ */
+ if (allocated_buffers < q->min_buffers_needed)
+ ret = -ENOMEM;
/*
* Check if driver can handle the allocated number of buffers.
*/
- if (allocated_buffers < num_buffers) {
+ if (!ret && allocated_buffers < num_buffers) {
num_buffers = allocated_buffers;
ret = call_qop(q, queue_setup, q, NULL, &num_buffers,
@@ -650,6 +954,10 @@ static int __reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
q->num_buffers = allocated_buffers;
if (ret < 0) {
+ /*
+ * Note: __vb2_queue_free() will subtract 'allocated_buffers'
+ * from q->num_buffers.
+ */
__vb2_queue_free(q, allocated_buffers);
return ret;
}
@@ -698,8 +1006,7 @@ static int __create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create
int ret;
if (q->num_buffers == VIDEO_MAX_FRAME) {
- dprintk(1, "%s(): maximum number of buffers already allocated\n",
- __func__);
+ dprintk(1, "maximum number of buffers already allocated\n");
return -ENOBUFS;
}
@@ -721,20 +1028,18 @@ static int __create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create
return ret;
/* Finally, allocate buffers and video memory */
- ret = __vb2_queue_alloc(q, create->memory, num_buffers,
+ allocated_buffers = __vb2_queue_alloc(q, create->memory, num_buffers,
num_planes);
- if (ret == 0) {
- dprintk(1, "Memory allocation failed\n");
+ if (allocated_buffers == 0) {
+ dprintk(1, "memory allocation failed\n");
return -ENOMEM;
}
- allocated_buffers = ret;
-
/*
* Check if driver can handle the so far allocated number of buffers.
*/
- if (ret < num_buffers) {
- num_buffers = ret;
+ if (allocated_buffers < num_buffers) {
+ num_buffers = allocated_buffers;
/*
* q->num_buffers contains the total number of buffers, that the
@@ -755,6 +1060,10 @@ static int __create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create
q->num_buffers += allocated_buffers;
if (ret < 0) {
+ /*
+ * Note: __vb2_queue_free() will subtract 'allocated_buffers'
+ * from q->num_buffers.
+ */
__vb2_queue_free(q, allocated_buffers);
return -ENOMEM;
}
@@ -796,12 +1105,10 @@ EXPORT_SYMBOL_GPL(vb2_create_bufs);
*/
void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no)
{
- struct vb2_queue *q = vb->vb2_queue;
-
if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv)
return NULL;
- return call_memop(q, vaddr, vb->planes[plane_no].mem_priv);
+ return call_ptr_memop(vb, vaddr, vb->planes[plane_no].mem_priv);
}
EXPORT_SYMBOL_GPL(vb2_plane_vaddr);
@@ -819,12 +1126,10 @@ EXPORT_SYMBOL_GPL(vb2_plane_vaddr);
*/
void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no)
{
- struct vb2_queue *q = vb->vb2_queue;
-
if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv)
return NULL;
- return call_memop(q, cookie, vb->planes[plane_no].mem_priv);
+ return call_ptr_memop(vb, cookie, vb->planes[plane_no].mem_priv);
}
EXPORT_SYMBOL_GPL(vb2_plane_cookie);
@@ -832,13 +1137,20 @@ EXPORT_SYMBOL_GPL(vb2_plane_cookie);
* vb2_buffer_done() - inform videobuf that an operation on a buffer is finished
* @vb: vb2_buffer returned from the driver
* @state: either VB2_BUF_STATE_DONE if the operation finished successfully
- * or VB2_BUF_STATE_ERROR if the operation finished with an error
+ * or VB2_BUF_STATE_ERROR if the operation finished with an error.
+ * If start_streaming fails then it should return buffers with state
+ * VB2_BUF_STATE_QUEUED to put them back into the queue.
*
* This function should be called by the driver after a hardware operation on
* a buffer is finished and the buffer may be returned to userspace. The driver
* cannot use this buffer anymore until it is queued back to it by videobuf
* by the means of buf_queue callback. Only buffers previously queued to the
* driver by buf_queue can be passed to this function.
+ *
+ * While streaming a buffer can only be returned in state DONE or ERROR.
+ * The start_streaming op can also return them in case the DMA engine cannot
+ * be started for some reason. In that case the buffers should be returned with
+ * state QUEUED.
*/
void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
{
@@ -846,32 +1158,72 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
unsigned long flags;
unsigned int plane;
- if (vb->state != VB2_BUF_STATE_ACTIVE)
+ if (WARN_ON(vb->state != VB2_BUF_STATE_ACTIVE))
return;
- if (state != VB2_BUF_STATE_DONE && state != VB2_BUF_STATE_ERROR)
- return;
+ if (!q->start_streaming_called) {
+ if (WARN_ON(state != VB2_BUF_STATE_QUEUED))
+ state = VB2_BUF_STATE_QUEUED;
+ } else if (WARN_ON(state != VB2_BUF_STATE_DONE &&
+ state != VB2_BUF_STATE_ERROR)) {
+ state = VB2_BUF_STATE_ERROR;
+ }
- dprintk(4, "Done processing on buffer %d, state: %d\n",
- vb->v4l2_buf.index, vb->state);
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ /*
+ * Although this is not a callback, it still does have to balance
+ * with the buf_queue op. So update this counter manually.
+ */
+ vb->cnt_buf_done++;
+#endif
+ dprintk(4, "done processing on buffer %d, state: %d\n",
+ vb->v4l2_buf.index, state);
/* sync buffers */
for (plane = 0; plane < vb->num_planes; ++plane)
- call_memop(q, finish, vb->planes[plane].mem_priv);
+ call_void_memop(vb, finish, vb->planes[plane].mem_priv);
/* Add the buffer to the done buffers list */
spin_lock_irqsave(&q->done_lock, flags);
vb->state = state;
- list_add_tail(&vb->done_entry, &q->done_list);
- atomic_dec(&q->queued_count);
+ if (state != VB2_BUF_STATE_QUEUED)
+ list_add_tail(&vb->done_entry, &q->done_list);
+ atomic_dec(&q->owned_by_drv_count);
spin_unlock_irqrestore(&q->done_lock, flags);
+ if (state == VB2_BUF_STATE_QUEUED)
+ return;
+
/* Inform any processes that may be waiting for buffers */
wake_up(&q->done_wq);
}
EXPORT_SYMBOL_GPL(vb2_buffer_done);
/**
+ * vb2_discard_done() - discard all buffers marked as DONE
+ * @q: videobuf2 queue
+ *
+ * This function is intended to be used with suspend/resume operations. It
+ * discards all 'done' buffers as they would be too old to be requested after
+ * resume.
+ *
+ * Drivers must stop the hardware and synchronize with interrupt handlers and/or
+ * delayed works before calling this function to make sure no buffer will be
+ * touched by the driver and/or hardware.
+ */
+void vb2_discard_done(struct vb2_queue *q)
+{
+ struct vb2_buffer *vb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&q->done_lock, flags);
+ list_for_each_entry(vb, &q->done_list, done_entry)
+ vb->state = VB2_BUF_STATE_ERROR;
+ spin_unlock_irqrestore(&q->done_lock, flags);
+}
+EXPORT_SYMBOL_GPL(vb2_discard_done);
+
+/**
* __fill_vb2_buffer() - fill a vb2_buffer with information provided in a
* v4l2_buffer by the userspace. The caller has already verified that struct
* v4l2_buffer has a valid number of planes.
@@ -884,15 +1236,30 @@ static void __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b
if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
/* Fill in driver-provided information for OUTPUT types */
if (V4L2_TYPE_IS_OUTPUT(b->type)) {
+ bool bytesused_is_used;
+
+ /* Check if bytesused == 0 for all planes */
+ for (plane = 0; plane < vb->num_planes; ++plane)
+ if (b->m.planes[plane].bytesused)
+ break;
+ bytesused_is_used = plane < vb->num_planes;
+
/*
* Will have to go up to b->length when API starts
* accepting variable number of planes.
+ *
+ * If bytesused_is_used is false, then fall back to the
+ * full buffer size. In that case userspace clearly
+ * never bothered to set it and it's a safe assumption
+ * that they really meant to use the full plane sizes.
*/
for (plane = 0; plane < vb->num_planes; ++plane) {
- v4l2_planes[plane].bytesused =
- b->m.planes[plane].bytesused;
- v4l2_planes[plane].data_offset =
- b->m.planes[plane].data_offset;
+ struct v4l2_plane *pdst = &v4l2_planes[plane];
+ struct v4l2_plane *psrc = &b->m.planes[plane];
+
+ pdst->bytesused = bytesused_is_used ?
+ psrc->bytesused : psrc->length;
+ pdst->data_offset = psrc->data_offset;
}
}
@@ -910,8 +1277,6 @@ static void __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b
b->m.planes[plane].m.fd;
v4l2_planes[plane].length =
b->m.planes[plane].length;
- v4l2_planes[plane].data_offset =
- b->m.planes[plane].data_offset;
}
}
} else {
@@ -920,11 +1285,15 @@ static void __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b
* so fill in relevant v4l2_buffer struct fields instead.
* In videobuf we use our internal V4l2_planes struct for
* single-planar buffers as well, for simplicity.
+ *
+ * If bytesused == 0, then fall back to the full buffer size
+ * as that's a sensible default.
*/
- if (V4L2_TYPE_IS_OUTPUT(b->type)) {
- v4l2_planes[0].bytesused = b->bytesused;
- v4l2_planes[0].data_offset = 0;
- }
+ if (V4L2_TYPE_IS_OUTPUT(b->type))
+ v4l2_planes[0].bytesused =
+ b->bytesused ? b->bytesused : b->length;
+ else
+ v4l2_planes[0].bytesused = 0;
if (b->memory == V4L2_MEMORY_USERPTR) {
v4l2_planes[0].m.userptr = b->m.userptr;
@@ -934,14 +1303,43 @@ static void __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b
if (b->memory == V4L2_MEMORY_DMABUF) {
v4l2_planes[0].m.fd = b->m.fd;
v4l2_planes[0].length = b->length;
- v4l2_planes[0].data_offset = 0;
}
+ }
+ /* Zero flags that the vb2 core handles */
+ vb->v4l2_buf.flags = b->flags & ~V4L2_BUFFER_MASK_FLAGS;
+ if ((vb->vb2_queue->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) !=
+ V4L2_BUF_FLAG_TIMESTAMP_COPY || !V4L2_TYPE_IS_OUTPUT(b->type)) {
+ /*
+ * Non-COPY timestamps and non-OUTPUT queues will get
+ * their timestamp and timestamp source flags from the
+ * queue.
+ */
+ vb->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ }
+
+ if (V4L2_TYPE_IS_OUTPUT(b->type)) {
+ /*
+ * For output buffers mask out the timecode flag:
+ * this will be handled later in vb2_internal_qbuf().
+ * The 'field' is valid metadata for this output buffer
+ * and so that needs to be copied here.
+ */
+ vb->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TIMECODE;
+ vb->v4l2_buf.field = b->field;
+ } else {
+ /* Zero any output buffer flags as this is a capture buffer */
+ vb->v4l2_buf.flags &= ~V4L2_BUFFER_OUT_FLAGS;
}
+}
- vb->v4l2_buf.field = b->field;
- vb->v4l2_buf.timestamp = b->timestamp;
- vb->v4l2_buf.flags = b->flags & ~V4L2_BUFFER_STATE_FLAGS;
+/**
+ * __qbuf_mmap() - handle qbuf of an MMAP buffer
+ */
+static int __qbuf_mmap(struct vb2_buffer *vb, const struct v4l2_buffer *b)
+{
+ __fill_vb2_buffer(vb, b, vb->v4l2_planes);
+ return call_vb_qop(vb, buf_prepare, vb);
}
/**
@@ -955,7 +1353,9 @@ static int __qbuf_userptr(struct vb2_buffer *vb, const struct v4l2_buffer *b)
unsigned int plane;
int ret;
int write = !V4L2_TYPE_IS_OUTPUT(q->type);
+ bool reacquired = vb->planes[0].mem_priv == NULL;
+ memset(planes, 0, sizeof(planes[0]) * vb->num_planes);
/* Copy relevant information provided by the userspace */
__fill_vb2_buffer(vb, b, planes);
@@ -966,29 +1366,37 @@ static int __qbuf_userptr(struct vb2_buffer *vb, const struct v4l2_buffer *b)
&& vb->v4l2_planes[plane].length == planes[plane].length)
continue;
- dprintk(3, "qbuf: userspace address for plane %d changed, "
+ dprintk(3, "userspace address for plane %d changed, "
"reacquiring memory\n", plane);
/* Check if the provided plane buffer is large enough */
if (planes[plane].length < q->plane_sizes[plane]) {
+ dprintk(1, "provided buffer size %u is less than "
+ "setup size %u for plane %d\n",
+ planes[plane].length,
+ q->plane_sizes[plane], plane);
ret = -EINVAL;
goto err;
}
/* Release previously acquired memory if present */
- if (vb->planes[plane].mem_priv)
- call_memop(q, put_userptr, vb->planes[plane].mem_priv);
+ if (vb->planes[plane].mem_priv) {
+ if (!reacquired) {
+ reacquired = true;
+ call_void_vb_qop(vb, buf_cleanup, vb);
+ }
+ call_void_memop(vb, put_userptr, vb->planes[plane].mem_priv);
+ }
vb->planes[plane].mem_priv = NULL;
- vb->v4l2_planes[plane].m.userptr = 0;
- vb->v4l2_planes[plane].length = 0;
+ memset(&vb->v4l2_planes[plane], 0, sizeof(struct v4l2_plane));
/* Acquire each plane's memory */
- mem_priv = call_memop(q, get_userptr, q->alloc_ctx[plane],
+ mem_priv = call_ptr_memop(vb, get_userptr, q->alloc_ctx[plane],
planes[plane].m.userptr,
planes[plane].length, write);
if (IS_ERR_OR_NULL(mem_priv)) {
- dprintk(1, "qbuf: failed acquiring userspace "
+ dprintk(1, "failed acquiring userspace "
"memory for plane %d\n", plane);
ret = mem_priv ? PTR_ERR(mem_priv) : -EINVAL;
goto err;
@@ -997,28 +1405,38 @@ static int __qbuf_userptr(struct vb2_buffer *vb, const struct v4l2_buffer *b)
}
/*
- * Call driver-specific initialization on the newly acquired buffer,
- * if provided.
- */
- ret = call_qop(q, buf_init, vb);
- if (ret) {
- dprintk(1, "qbuf: buffer initialization failed\n");
- goto err;
- }
-
- /*
* Now that everything is in order, copy relevant information
* provided by userspace.
*/
for (plane = 0; plane < vb->num_planes; ++plane)
vb->v4l2_planes[plane] = planes[plane];
+ if (reacquired) {
+ /*
+ * One or more planes changed, so we must call buf_init to do
+ * the driver-specific initialization on the newly acquired
+ * buffer, if provided.
+ */
+ ret = call_vb_qop(vb, buf_init, vb);
+ if (ret) {
+ dprintk(1, "buffer initialization failed\n");
+ goto err;
+ }
+ }
+
+ ret = call_vb_qop(vb, buf_prepare, vb);
+ if (ret) {
+ dprintk(1, "buffer preparation failed\n");
+ call_void_vb_qop(vb, buf_cleanup, vb);
+ goto err;
+ }
+
return 0;
err:
/* In case of errors, release planes that were already acquired */
for (plane = 0; plane < vb->num_planes; ++plane) {
if (vb->planes[plane].mem_priv)
- call_memop(q, put_userptr, vb->planes[plane].mem_priv);
+ call_void_memop(vb, put_userptr, vb->planes[plane].mem_priv);
vb->planes[plane].mem_priv = NULL;
vb->v4l2_planes[plane].m.userptr = 0;
vb->v4l2_planes[plane].length = 0;
@@ -1028,15 +1446,6 @@ err:
}
/**
- * __qbuf_mmap() - handle qbuf of an MMAP buffer
- */
-static int __qbuf_mmap(struct vb2_buffer *vb, const struct v4l2_buffer *b)
-{
- __fill_vb2_buffer(vb, b, vb->v4l2_planes);
- return 0;
-}
-
-/**
* __qbuf_dmabuf() - handle qbuf of a DMABUF buffer
*/
static int __qbuf_dmabuf(struct vb2_buffer *vb, const struct v4l2_buffer *b)
@@ -1047,15 +1456,17 @@ static int __qbuf_dmabuf(struct vb2_buffer *vb, const struct v4l2_buffer *b)
unsigned int plane;
int ret;
int write = !V4L2_TYPE_IS_OUTPUT(q->type);
+ bool reacquired = vb->planes[0].mem_priv == NULL;
- /* Verify and copy relevant information provided by the userspace */
+ memset(planes, 0, sizeof(planes[0]) * vb->num_planes);
+ /* Copy relevant information provided by the userspace */
__fill_vb2_buffer(vb, b, planes);
for (plane = 0; plane < vb->num_planes; ++plane) {
struct dma_buf *dbuf = dma_buf_get(planes[plane].m.fd);
if (IS_ERR_OR_NULL(dbuf)) {
- dprintk(1, "qbuf: invalid dmabuf fd for plane %d\n",
+ dprintk(1, "invalid dmabuf fd for plane %d\n",
plane);
ret = -EINVAL;
goto err;
@@ -1065,8 +1476,9 @@ static int __qbuf_dmabuf(struct vb2_buffer *vb, const struct v4l2_buffer *b)
if (planes[plane].length == 0)
planes[plane].length = dbuf->size;
- if (planes[plane].length < planes[plane].data_offset +
- q->plane_sizes[plane]) {
+ if (planes[plane].length < q->plane_sizes[plane]) {
+ dprintk(1, "invalid dmabuf length for plane %d\n",
+ plane);
ret = -EINVAL;
goto err;
}
@@ -1078,17 +1490,22 @@ static int __qbuf_dmabuf(struct vb2_buffer *vb, const struct v4l2_buffer *b)
continue;
}
- dprintk(1, "qbuf: buffer for plane %d changed\n", plane);
+ dprintk(1, "buffer for plane %d changed\n", plane);
+
+ if (!reacquired) {
+ reacquired = true;
+ call_void_vb_qop(vb, buf_cleanup, vb);
+ }
/* Release previously acquired memory if present */
- __vb2_plane_dmabuf_put(q, &vb->planes[plane]);
+ __vb2_plane_dmabuf_put(vb, &vb->planes[plane]);
memset(&vb->v4l2_planes[plane], 0, sizeof(struct v4l2_plane));
/* Acquire each plane's memory */
- mem_priv = call_memop(q, attach_dmabuf, q->alloc_ctx[plane],
+ mem_priv = call_ptr_memop(vb, attach_dmabuf, q->alloc_ctx[plane],
dbuf, planes[plane].length, write);
if (IS_ERR(mem_priv)) {
- dprintk(1, "qbuf: failed to attach dmabuf\n");
+ dprintk(1, "failed to attach dmabuf\n");
ret = PTR_ERR(mem_priv);
dma_buf_put(dbuf);
goto err;
@@ -1103,9 +1520,9 @@ static int __qbuf_dmabuf(struct vb2_buffer *vb, const struct v4l2_buffer *b)
* the buffer(s)..
*/
for (plane = 0; plane < vb->num_planes; ++plane) {
- ret = call_memop(q, map_dmabuf, vb->planes[plane].mem_priv);
+ ret = call_memop(vb, map_dmabuf, vb->planes[plane].mem_priv);
if (ret) {
- dprintk(1, "qbuf: failed to map dmabuf for plane %d\n",
+ dprintk(1, "failed to map dmabuf for plane %d\n",
plane);
goto err;
}
@@ -1113,22 +1530,31 @@ static int __qbuf_dmabuf(struct vb2_buffer *vb, const struct v4l2_buffer *b)
}
/*
- * Call driver-specific initialization on the newly acquired buffer,
- * if provided.
- */
- ret = call_qop(q, buf_init, vb);
- if (ret) {
- dprintk(1, "qbuf: buffer initialization failed\n");
- goto err;
- }
-
- /*
* Now that everything is in order, copy relevant information
* provided by userspace.
*/
for (plane = 0; plane < vb->num_planes; ++plane)
vb->v4l2_planes[plane] = planes[plane];
+ if (reacquired) {
+ /*
+ * Call driver-specific initialization on the newly acquired buffer,
+ * if provided.
+ */
+ ret = call_vb_qop(vb, buf_init, vb);
+ if (ret) {
+ dprintk(1, "buffer initialization failed\n");
+ goto err;
+ }
+ }
+
+ ret = call_vb_qop(vb, buf_prepare, vb);
+ if (ret) {
+ dprintk(1, "buffer preparation failed\n");
+ call_void_vb_qop(vb, buf_cleanup, vb);
+ goto err;
+ }
+
return 0;
err:
/* In case of errors, release planes that were already acquired */
@@ -1146,26 +1572,70 @@ static void __enqueue_in_driver(struct vb2_buffer *vb)
unsigned int plane;
vb->state = VB2_BUF_STATE_ACTIVE;
- atomic_inc(&q->queued_count);
+ atomic_inc(&q->owned_by_drv_count);
/* sync buffers */
for (plane = 0; plane < vb->num_planes; ++plane)
- call_memop(q, prepare, vb->planes[plane].mem_priv);
+ call_void_memop(vb, prepare, vb->planes[plane].mem_priv);
- q->ops->buf_queue(vb);
+ call_void_vb_qop(vb, buf_queue, vb);
}
static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b)
{
struct vb2_queue *q = vb->vb2_queue;
+ struct rw_semaphore *mmap_sem;
int ret;
+ ret = __verify_length(vb, b);
+ if (ret < 0) {
+ dprintk(1, "plane parameters verification failed: %d\n", ret);
+ return ret;
+ }
+ if (b->field == V4L2_FIELD_ALTERNATE && V4L2_TYPE_IS_OUTPUT(q->type)) {
+ /*
+ * If the format's field is ALTERNATE, then the buffer's field
+ * should be either TOP or BOTTOM, not ALTERNATE since that
+ * makes no sense. The driver has to know whether the
+ * buffer represents a top or a bottom field in order to
+ * program any DMA correctly. Using ALTERNATE is wrong, since
+ * that just says that it is either a top or a bottom field,
+ * but not which of the two it is.
+ */
+ dprintk(1, "the field is incorrectly set to ALTERNATE for an output buffer\n");
+ return -EINVAL;
+ }
+
+ vb->state = VB2_BUF_STATE_PREPARING;
+ vb->v4l2_buf.timestamp.tv_sec = 0;
+ vb->v4l2_buf.timestamp.tv_usec = 0;
+ vb->v4l2_buf.sequence = 0;
+
switch (q->memory) {
case V4L2_MEMORY_MMAP:
ret = __qbuf_mmap(vb, b);
break;
case V4L2_MEMORY_USERPTR:
+ /*
+ * In case of user pointer buffers vb2 allocators need to get
+ * direct access to userspace pages. This requires getting
+ * the mmap semaphore for read access in the current process
+ * structure. The same semaphore is taken before calling mmap
+ * operation, while both qbuf/prepare_buf and mmap are called
+ * by the driver or v4l2 core with the driver's lock held.
+ * To avoid an AB-BA deadlock (mmap_sem then driver's lock in
+ * mmap and driver's lock then mmap_sem in qbuf/prepare_buf),
+ * the videobuf2 core releases the driver's lock, takes
+ * mmap_sem and then takes the driver's lock again.
+ */
+ mmap_sem = &current->mm->mmap_sem;
+ call_void_qop(q, wait_prepare, q);
+ down_read(mmap_sem);
+ call_void_qop(q, wait_finish, q);
+
ret = __qbuf_userptr(vb, b);
+
+ up_read(mmap_sem);
break;
case V4L2_MEMORY_DMABUF:
ret = __qbuf_dmabuf(vb, b);
@@ -1175,16 +1645,40 @@ static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b)
ret = -EINVAL;
}
- if (!ret)
- ret = call_qop(q, buf_prepare, vb);
if (ret)
- dprintk(1, "qbuf: buffer preparation failed: %d\n", ret);
- else
- vb->state = VB2_BUF_STATE_PREPARED;
+ dprintk(1, "buffer preparation failed: %d\n", ret);
+ vb->state = ret ? VB2_BUF_STATE_DEQUEUED : VB2_BUF_STATE_PREPARED;
return ret;
}
+static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b,
+ const char *opname)
+{
+ if (b->type != q->type) {
+ dprintk(1, "%s: invalid buffer type\n", opname);
+ return -EINVAL;
+ }
+
+ if (b->index >= q->num_buffers) {
+ dprintk(1, "%s: buffer index out of range\n", opname);
+ return -EINVAL;
+ }
+
+ if (q->bufs[b->index] == NULL) {
+ /* Should never happen */
+ dprintk(1, "%s: buffer is NULL\n", opname);
+ return -EINVAL;
+ }
+
+ if (b->memory != q->memory) {
+ dprintk(1, "%s: invalid memory type\n", opname);
+ return -EINVAL;
+ }
+
+ return __verify_planes_array(q->bufs[b->index], b);
+}
+
/**
* vb2_prepare_buf() - Pass ownership of a buffer from userspace to the kernel
* @q: videobuf2 queue
@@ -1205,142 +1699,106 @@ int vb2_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b)
struct vb2_buffer *vb;
int ret;
- if (q->fileio) {
- dprintk(1, "%s(): file io in progress\n", __func__);
+ if (vb2_fileio_is_active(q)) {
+ dprintk(1, "file io in progress\n");
return -EBUSY;
}
- if (b->type != q->type) {
- dprintk(1, "%s(): invalid buffer type\n", __func__);
- return -EINVAL;
- }
-
- if (b->index >= q->num_buffers) {
- dprintk(1, "%s(): buffer index out of range\n", __func__);
- return -EINVAL;
- }
+ ret = vb2_queue_or_prepare_buf(q, b, "prepare_buf");
+ if (ret)
+ return ret;
vb = q->bufs[b->index];
- if (NULL == vb) {
- /* Should never happen */
- dprintk(1, "%s(): buffer is NULL\n", __func__);
- return -EINVAL;
- }
-
- if (b->memory != q->memory) {
- dprintk(1, "%s(): invalid memory type\n", __func__);
- return -EINVAL;
- }
-
if (vb->state != VB2_BUF_STATE_DEQUEUED) {
- dprintk(1, "%s(): invalid buffer state %d\n", __func__, vb->state);
+ dprintk(1, "invalid buffer state %d\n",
+ vb->state);
return -EINVAL;
}
- ret = __verify_planes_array(vb, b);
- if (ret < 0)
- return ret;
- ret = __buf_prepare(vb, b);
- if (ret < 0)
- return ret;
- __fill_v4l2_buffer(vb, b);
+ ret = __buf_prepare(vb, b);
+ if (!ret) {
+ /* Fill buffer information for the userspace */
+ __fill_v4l2_buffer(vb, b);
- return 0;
+ dprintk(1, "prepare of buffer %d succeeded\n", vb->v4l2_buf.index);
+ }
+ return ret;
}
EXPORT_SYMBOL_GPL(vb2_prepare_buf);
/**
- * vb2_qbuf() - Queue a buffer from userspace
+ * vb2_start_streaming() - Attempt to start streaming.
* @q: videobuf2 queue
- * @b: buffer structure passed from userspace to vidioc_qbuf handler
- * in driver
- *
- * Should be called from vidioc_qbuf ioctl handler of a driver.
- * This function:
- * 1) verifies the passed buffer,
- * 2) if necessary, calls buf_prepare callback in the driver (if provided), in
- * which driver-specific buffer initialization can be performed,
- * 3) if streaming is on, queues the buffer in driver by the means of buf_queue
- * callback for processing.
*
- * The return values from this function are intended to be directly returned
- * from vidioc_qbuf handler in driver.
+ * Attempt to start streaming. When this function is called there must be
+ * at least q->min_buffers_needed buffers queued up (i.e. the minimum
+ * number of buffers required for the DMA engine to function). If the
+ * @start_streaming op fails it is supposed to return all the driver-owned
+ * buffers back to vb2 in state QUEUED. Check if that happened and if
+ * not warn and reclaim them forcefully.
*/
-int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
+static int vb2_start_streaming(struct vb2_queue *q)
{
- struct rw_semaphore *mmap_sem = NULL;
struct vb2_buffer *vb;
- int ret = 0;
+ int ret;
/*
- * In case of user pointer buffers vb2 allocator needs to get direct
- * access to userspace pages. This requires getting read access on
- * mmap semaphore in the current process structure. The same
- * semaphore is taken before calling mmap operation, while both mmap
- * and qbuf are called by the driver or v4l2 core with driver's lock
- * held. To avoid a AB-BA deadlock (mmap_sem then driver's lock in
- * mmap and driver's lock then mmap_sem in qbuf) the videobuf2 core
- * release driver's lock, takes mmap_sem and then takes again driver's
- * lock.
- *
- * To avoid race with other vb2 calls, which might be called after
- * releasing driver's lock, this operation is performed at the
- * beggining of qbuf processing. This way the queue status is
- * consistent after getting driver's lock back.
+ * If any buffers were queued before streamon,
+ * we can now pass them to driver for processing.
*/
- if (q->memory == V4L2_MEMORY_USERPTR) {
- mmap_sem = &current->mm->mmap_sem;
- call_qop(q, wait_prepare, q);
- down_read(mmap_sem);
- call_qop(q, wait_finish, q);
- }
+ list_for_each_entry(vb, &q->queued_list, queued_entry)
+ __enqueue_in_driver(vb);
- if (q->fileio) {
- dprintk(1, "qbuf: file io in progress\n");
- ret = -EBUSY;
- goto unlock;
- }
+ /* Tell the driver to start streaming */
+ ret = call_qop(q, start_streaming, q,
+ atomic_read(&q->owned_by_drv_count));
+ q->start_streaming_called = ret == 0;
+ if (!ret)
+ return 0;
- if (b->type != q->type) {
- dprintk(1, "qbuf: invalid buffer type\n");
- ret = -EINVAL;
- goto unlock;
- }
+ dprintk(1, "driver refused to start streaming\n");
+ if (WARN_ON(atomic_read(&q->owned_by_drv_count))) {
+ unsigned i;
- if (b->index >= q->num_buffers) {
- dprintk(1, "qbuf: buffer index out of range\n");
- ret = -EINVAL;
- goto unlock;
+ /*
+ * Forcefully reclaim buffers if the driver did not
+ * correctly return them to vb2.
+ */
+ for (i = 0; i < q->num_buffers; ++i) {
+ vb = q->bufs[i];
+ if (vb->state == VB2_BUF_STATE_ACTIVE)
+ vb2_buffer_done(vb, VB2_BUF_STATE_QUEUED);
+ }
+ /* Must be zero now */
+ WARN_ON(atomic_read(&q->owned_by_drv_count));
}
+ return ret;
+}
- vb = q->bufs[b->index];
- if (NULL == vb) {
- /* Should never happen */
- dprintk(1, "qbuf: buffer is NULL\n");
- ret = -EINVAL;
- goto unlock;
- }
+static int vb2_internal_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
+{
+ int ret = vb2_queue_or_prepare_buf(q, b, "qbuf");
+ struct vb2_buffer *vb;
- if (b->memory != q->memory) {
- dprintk(1, "qbuf: invalid memory type\n");
- ret = -EINVAL;
- goto unlock;
- }
- ret = __verify_planes_array(vb, b);
if (ret)
- goto unlock;
+ return ret;
+
+ vb = q->bufs[b->index];
switch (vb->state) {
case VB2_BUF_STATE_DEQUEUED:
ret = __buf_prepare(vb, b);
if (ret)
- goto unlock;
+ return ret;
+ break;
case VB2_BUF_STATE_PREPARED:
break;
+ case VB2_BUF_STATE_PREPARING:
+ dprintk(1, "buffer still being prepared\n");
+ return -EINVAL;
default:
- dprintk(1, "qbuf: buffer already in use\n");
- ret = -EINVAL;
- goto unlock;
+ dprintk(1, "invalid buffer state %d\n", vb->state);
+ return -EINVAL;
}
/*
@@ -1348,23 +1806,73 @@ int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
* dequeued in dqbuf.
*/
list_add_tail(&vb->queued_entry, &q->queued_list);
+ q->queued_count++;
vb->state = VB2_BUF_STATE_QUEUED;
+ if (V4L2_TYPE_IS_OUTPUT(q->type)) {
+ /*
+ * For output buffers copy the timestamp if needed,
+ * and the timecode field and flag if needed.
+ */
+ if ((q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) ==
+ V4L2_BUF_FLAG_TIMESTAMP_COPY)
+ vb->v4l2_buf.timestamp = b->timestamp;
+ vb->v4l2_buf.flags |= b->flags & V4L2_BUF_FLAG_TIMECODE;
+ if (b->flags & V4L2_BUF_FLAG_TIMECODE)
+ vb->v4l2_buf.timecode = b->timecode;
+ }
/*
* If already streaming, give the buffer to driver for processing.
* If not, the buffer will be given to driver on next streamon.
*/
- if (q->streaming)
+ if (q->start_streaming_called)
__enqueue_in_driver(vb);
/* Fill buffer information for the userspace */
__fill_v4l2_buffer(vb, b);
+ /*
+ * If streamon has been called, and we haven't yet called
+ * start_streaming() since not enough buffers were queued, and
+ * we now have reached the minimum number of queued buffers,
+ * then we can finally call start_streaming().
+ */
+ if (q->streaming && !q->start_streaming_called &&
+ q->queued_count >= q->min_buffers_needed) {
+ ret = vb2_start_streaming(q);
+ if (ret)
+ return ret;
+ }
+
dprintk(1, "qbuf of buffer %d succeeded\n", vb->v4l2_buf.index);
-unlock:
- if (mmap_sem)
- up_read(mmap_sem);
- return ret;
+ return 0;
+}
+
+/**
+ * vb2_qbuf() - Queue a buffer from userspace
+ * @q: videobuf2 queue
+ * @b: buffer structure passed from userspace to vidioc_qbuf handler
+ * in driver
+ *
+ * Should be called from vidioc_qbuf ioctl handler of a driver.
+ * This function:
+ * 1) verifies the passed buffer,
+ * 2) if necessary, calls buf_prepare callback in the driver (if provided), in
+ * which driver-specific buffer initialization can be performed,
+ * 3) if streaming is on, queues the buffer in driver by the means of buf_queue
+ * callback for processing.
+ *
+ * The return values from this function are intended to be directly returned
+ * from vidioc_qbuf handler in driver.
+ */
+int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
+{
+ if (vb2_fileio_is_active(q)) {
+ dprintk(1, "file io in progress\n");
+ return -EBUSY;
+ }
+
+ return vb2_internal_qbuf(q, b);
}
EXPORT_SYMBOL_GPL(vb2_qbuf);
@@ -1389,7 +1897,7 @@ static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
int ret;
if (!q->streaming) {
- dprintk(1, "Streaming off, will not wait for buffers\n");
+ dprintk(1, "streaming off, will not wait for buffers\n");
return -EINVAL;
}
@@ -1401,7 +1909,7 @@ static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
}
if (nonblocking) {
- dprintk(1, "Nonblocking and no buffers to dequeue, "
+ dprintk(1, "nonblocking and no buffers to dequeue, "
"will not wait\n");
return -EAGAIN;
}
@@ -1411,12 +1919,12 @@ static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
* become ready or for streamoff. Driver's lock is released to
* allow streamoff or qbuf to be called while waiting.
*/
- call_qop(q, wait_prepare, q);
+ call_void_qop(q, wait_prepare, q);
/*
* All locks have been released, it is safe to sleep now.
*/
- dprintk(3, "Will sleep waiting for buffers\n");
+ dprintk(3, "will sleep waiting for buffers\n");
ret = wait_event_interruptible(q->done_wq,
!list_empty(&q->done_list) || !q->streaming);
@@ -1424,9 +1932,9 @@ static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
* We need to reevaluate both conditions again after reacquiring
* the locks or return an error if one occurred.
*/
- call_qop(q, wait_finish, q);
+ call_void_qop(q, wait_finish, q);
if (ret) {
- dprintk(1, "Sleep was interrupted\n");
+ dprintk(1, "sleep was interrupted\n");
return ret;
}
}
@@ -1481,11 +1989,12 @@ static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
int vb2_wait_for_all_buffers(struct vb2_queue *q)
{
if (!q->streaming) {
- dprintk(1, "Streaming off, will not wait for buffers\n");
+ dprintk(1, "streaming off, will not wait for buffers\n");
return -EINVAL;
}
- wait_event(q->done_wq, !atomic_read(&q->queued_count));
+ if (q->start_streaming_called)
+ wait_event(q->done_wq, !atomic_read(&q->owned_by_drv_count));
return 0;
}
EXPORT_SYMBOL_GPL(vb2_wait_for_all_buffers);
@@ -1509,72 +2018,43 @@ static void __vb2_dqbuf(struct vb2_buffer *vb)
for (i = 0; i < vb->num_planes; ++i) {
if (!vb->planes[i].dbuf_mapped)
continue;
- call_memop(q, unmap_dmabuf, vb->planes[i].mem_priv);
+ call_void_memop(vb, unmap_dmabuf, vb->planes[i].mem_priv);
vb->planes[i].dbuf_mapped = 0;
}
}
-/**
- * vb2_dqbuf() - Dequeue a buffer to the userspace
- * @q: videobuf2 queue
- * @b: buffer structure passed from userspace to vidioc_dqbuf handler
- * in driver
- * @nonblocking: if true, this call will not sleep waiting for a buffer if no
- * buffers ready for dequeuing are present. Normally the driver
- * would be passing (file->f_flags & O_NONBLOCK) here
- *
- * Should be called from vidioc_dqbuf ioctl handler of a driver.
- * This function:
- * 1) verifies the passed buffer,
- * 2) calls buf_finish callback in the driver (if provided), in which
- * driver can perform any additional operations that may be required before
- * returning the buffer to userspace, such as cache sync,
- * 3) the buffer struct members are filled with relevant information for
- * the userspace.
- *
- * The return values from this function are intended to be directly returned
- * from vidioc_dqbuf handler in driver.
- */
-int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
+static int vb2_internal_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
{
struct vb2_buffer *vb = NULL;
int ret;
- if (q->fileio) {
- dprintk(1, "dqbuf: file io in progress\n");
- return -EBUSY;
- }
-
if (b->type != q->type) {
- dprintk(1, "dqbuf: invalid buffer type\n");
+ dprintk(1, "invalid buffer type\n");
return -EINVAL;
}
ret = __vb2_get_done_vb(q, &vb, b, nonblocking);
if (ret < 0)
return ret;
- ret = call_qop(q, buf_finish, vb);
- if (ret) {
- dprintk(1, "dqbuf: buffer finish failed\n");
- return ret;
- }
-
switch (vb->state) {
case VB2_BUF_STATE_DONE:
- dprintk(3, "dqbuf: Returning done buffer\n");
+ dprintk(3, "returning done buffer\n");
break;
case VB2_BUF_STATE_ERROR:
- dprintk(3, "dqbuf: Returning done buffer with errors\n");
+ dprintk(3, "returning done buffer with errors\n");
break;
default:
- dprintk(1, "dqbuf: Invalid buffer state\n");
+ dprintk(1, "invalid buffer state\n");
return -EINVAL;
}
+ call_void_vb_qop(vb, buf_finish, vb);
+
/* Fill buffer information for the userspace */
__fill_v4l2_buffer(vb, b);
/* Remove from videobuf queue */
list_del(&vb->queued_entry);
+ q->queued_count--;
/* go back to dequeued state */
__vb2_dqbuf(vb);
@@ -1583,6 +2063,36 @@ int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
return 0;
}
+
+/**
+ * vb2_dqbuf() - Dequeue a buffer to the userspace
+ * @q: videobuf2 queue
+ * @b: buffer structure passed from userspace to vidioc_dqbuf handler
+ * in driver
+ * @nonblocking: if true, this call will not sleep waiting for a buffer if no
+ * buffers ready for dequeuing are present. Normally the driver
+ * would be passing (file->f_flags & O_NONBLOCK) here
+ *
+ * Should be called from vidioc_dqbuf ioctl handler of a driver.
+ * This function:
+ * 1) verifies the passed buffer,
+ * 2) calls buf_finish callback in the driver (if provided), in which
+ * driver can perform any additional operations that may be required before
+ * returning the buffer to userspace, such as cache sync,
+ * 3) the buffer struct members are filled with relevant information for
+ * the userspace.
+ *
+ * The return values from this function are intended to be directly returned
+ * from vidioc_dqbuf handler in driver.
+ */
+int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
+{
+ if (vb2_fileio_is_active(q)) {
+ dprintk(1, "file io in progress\n");
+ return -EBUSY;
+ }
+ return vb2_internal_dqbuf(q, b, nonblocking);
+}
EXPORT_SYMBOL_GPL(vb2_dqbuf);
/**
@@ -1599,9 +2109,20 @@ static void __vb2_queue_cancel(struct vb2_queue *q)
* Tell driver to stop all transactions and release all queued
* buffers.
*/
- if (q->streaming)
- call_qop(q, stop_streaming, q);
+ if (q->start_streaming_called)
+ call_void_qop(q, stop_streaming, q);
+
+ if (WARN_ON(atomic_read(&q->owned_by_drv_count))) {
+ for (i = 0; i < q->num_buffers; ++i)
+ if (q->bufs[i]->state == VB2_BUF_STATE_ACTIVE)
+ vb2_buffer_done(q->bufs[i], VB2_BUF_STATE_ERROR);
+ /* Must be zero now */
+ WARN_ON(atomic_read(&q->owned_by_drv_count));
+ }
+
q->streaming = 0;
+ q->start_streaming_called = 0;
+ q->queued_count = 0;
/*
* Remove all buffers from videobuf's list...
@@ -1612,14 +2133,70 @@ static void __vb2_queue_cancel(struct vb2_queue *q)
* has not already dequeued before initiating cancel.
*/
INIT_LIST_HEAD(&q->done_list);
- atomic_set(&q->queued_count, 0);
+ atomic_set(&q->owned_by_drv_count, 0);
wake_up_all(&q->done_wq);
/*
* Reinitialize all buffers for next use.
+ * Make sure to call buf_finish for any queued buffers. Normally
+ * that's done in dqbuf, but that's not going to happen when we
+ * cancel the whole queue. Note: this code belongs here, not in
+ * __vb2_dqbuf() since in vb2_internal_dqbuf() there is a critical
+ * call to __fill_v4l2_buffer() after buf_finish(). That order can't
+ * be changed, so we can't move the buf_finish() to __vb2_dqbuf().
+ */
+ for (i = 0; i < q->num_buffers; ++i) {
+ struct vb2_buffer *vb = q->bufs[i];
+
+ if (vb->state != VB2_BUF_STATE_DEQUEUED) {
+ vb->state = VB2_BUF_STATE_PREPARED;
+ call_void_vb_qop(vb, buf_finish, vb);
+ }
+ __vb2_dqbuf(vb);
+ }
+}
+
+static int vb2_internal_streamon(struct vb2_queue *q, enum v4l2_buf_type type)
+{
+ int ret;
+
+ if (type != q->type) {
+ dprintk(1, "invalid stream type\n");
+ return -EINVAL;
+ }
+
+ if (q->streaming) {
+ dprintk(3, "already streaming\n");
+ return 0;
+ }
+
+ if (!q->num_buffers) {
+ dprintk(1, "no buffers have been allocated\n");
+ return -EINVAL;
+ }
+
+ if (q->num_buffers < q->min_buffers_needed) {
+ dprintk(1, "need at least %u allocated buffers\n",
+ q->min_buffers_needed);
+ return -EINVAL;
+ }
+
+ /*
+ * Tell driver to start streaming provided sufficient buffers
+ * are available.
*/
- for (i = 0; i < q->num_buffers; ++i)
- __vb2_dqbuf(q->bufs[i]);
+ if (q->queued_count >= q->min_buffers_needed) {
+ ret = vb2_start_streaming(q);
+ if (ret) {
+ __vb2_queue_cancel(q);
+ return ret;
+ }
+ }
+
+ q->streaming = 1;
+
+ dprintk(3, "successful\n");
+ return 0;
}
/**
@@ -1637,48 +2214,35 @@ static void __vb2_queue_cancel(struct vb2_queue *q)
*/
int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type)
{
- struct vb2_buffer *vb;
- int ret;
-
- if (q->fileio) {
- dprintk(1, "streamon: file io in progress\n");
+ if (vb2_fileio_is_active(q)) {
+ dprintk(1, "file io in progress\n");
return -EBUSY;
}
+ return vb2_internal_streamon(q, type);
+}
+EXPORT_SYMBOL_GPL(vb2_streamon);
+static int vb2_internal_streamoff(struct vb2_queue *q, enum v4l2_buf_type type)
+{
if (type != q->type) {
- dprintk(1, "streamon: invalid stream type\n");
+ dprintk(1, "invalid stream type\n");
return -EINVAL;
}
- if (q->streaming) {
- dprintk(1, "streamon: already streaming\n");
- return -EBUSY;
- }
-
- /*
- * If any buffers were queued before streamon,
- * we can now pass them to driver for processing.
- */
- list_for_each_entry(vb, &q->queued_list, queued_entry)
- __enqueue_in_driver(vb);
-
/*
- * Let driver notice that streaming state has been enabled.
+ * Cancel will pause streaming and remove all buffers from the driver
+ * and videobuf, effectively returning control over them to userspace.
+ *
+ * Note that we do this even if q->streaming == 0: if you prepare or
+ * queue buffers, and then call streamoff without ever having called
+ * streamon, you would still expect those buffers to be returned to
+ * their normal dequeued state.
*/
- ret = call_qop(q, start_streaming, q, atomic_read(&q->queued_count));
- if (ret) {
- dprintk(1, "streamon: driver refused to start streaming\n");
- __vb2_queue_cancel(q);
- return ret;
- }
-
- q->streaming = 1;
+ __vb2_queue_cancel(q);
- dprintk(3, "Streamon successful\n");
+ dprintk(3, "successful\n");
return 0;
}
-EXPORT_SYMBOL_GPL(vb2_streamon);
-
/**
* vb2_streamoff - stop streaming
@@ -1697,29 +2261,11 @@ EXPORT_SYMBOL_GPL(vb2_streamon);
*/
int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type)
{
- if (q->fileio) {
- dprintk(1, "streamoff: file io in progress\n");
+ if (vb2_fileio_is_active(q)) {
+ dprintk(1, "file io in progress\n");
return -EBUSY;
}
-
- if (type != q->type) {
- dprintk(1, "streamoff: invalid stream type\n");
- return -EINVAL;
- }
-
- if (!q->streaming) {
- dprintk(1, "streamoff: not streaming\n");
- return -EINVAL;
- }
-
- /*
- * Cancel will pause streaming and remove all buffers from the driver
- * and videobuf, effectively returning control over them to userspace.
- */
- __vb2_queue_cancel(q);
-
- dprintk(3, "Streamoff successful\n");
- return 0;
+ return vb2_internal_streamoff(q, type);
}
EXPORT_SYMBOL_GPL(vb2_streamoff);
@@ -1769,22 +2315,22 @@ int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb)
struct dma_buf *dbuf;
if (q->memory != V4L2_MEMORY_MMAP) {
- dprintk(1, "Queue is not currently set up for mmap\n");
+ dprintk(1, "queue is not currently set up for mmap\n");
return -EINVAL;
}
if (!q->mem_ops->get_dmabuf) {
- dprintk(1, "Queue does not support DMA buffer exporting\n");
+ dprintk(1, "queue does not support DMA buffer exporting\n");
return -EINVAL;
}
- if (eb->flags & ~O_CLOEXEC) {
- dprintk(1, "Queue does support only O_CLOEXEC flag\n");
+ if (eb->flags & ~(O_CLOEXEC | O_ACCMODE)) {
+ dprintk(1, "queue does support only O_CLOEXEC and access mode flags\n");
return -EINVAL;
}
if (eb->type != q->type) {
- dprintk(1, "qbuf: invalid buffer type\n");
+ dprintk(1, "invalid buffer type\n");
return -EINVAL;
}
@@ -1800,16 +2346,21 @@ int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb)
return -EINVAL;
}
+ if (vb2_fileio_is_active(q)) {
+ dprintk(1, "expbuf: file io in progress\n");
+ return -EBUSY;
+ }
+
vb_plane = &vb->planes[eb->plane];
- dbuf = call_memop(q, get_dmabuf, vb_plane->mem_priv);
+ dbuf = call_ptr_memop(vb, get_dmabuf, vb_plane->mem_priv, eb->flags & O_ACCMODE);
if (IS_ERR_OR_NULL(dbuf)) {
- dprintk(1, "Failed to export buffer %d, plane %d\n",
+ dprintk(1, "failed to export buffer %d, plane %d\n",
eb->index, eb->plane);
return -EINVAL;
}
- ret = dma_buf_fd(dbuf, eb->flags);
+ ret = dma_buf_fd(dbuf, eb->flags & ~O_ACCMODE);
if (ret < 0) {
dprintk(3, "buffer %d, plane %d failed to export (%d)\n",
eb->index, eb->plane, ret);
@@ -1848,11 +2399,12 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
{
unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
struct vb2_buffer *vb;
- unsigned int buffer, plane;
+ unsigned int buffer = 0, plane = 0;
int ret;
+ unsigned long length;
if (q->memory != V4L2_MEMORY_MMAP) {
- dprintk(1, "Queue is not currently set up for mmap\n");
+ dprintk(1, "queue is not currently set up for mmap\n");
return -EINVAL;
}
@@ -1860,20 +2412,24 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
* Check memory area access mode.
*/
if (!(vma->vm_flags & VM_SHARED)) {
- dprintk(1, "Invalid vma flags, VM_SHARED needed\n");
+ dprintk(1, "invalid vma flags, VM_SHARED needed\n");
return -EINVAL;
}
if (V4L2_TYPE_IS_OUTPUT(q->type)) {
if (!(vma->vm_flags & VM_WRITE)) {
- dprintk(1, "Invalid vma flags, VM_WRITE needed\n");
+ dprintk(1, "invalid vma flags, VM_WRITE needed\n");
return -EINVAL;
}
} else {
if (!(vma->vm_flags & VM_READ)) {
- dprintk(1, "Invalid vma flags, VM_READ needed\n");
+ dprintk(1, "invalid vma flags, VM_READ needed\n");
return -EINVAL;
}
}
+ if (vb2_fileio_is_active(q)) {
+ dprintk(1, "mmap: file io in progress\n");
+ return -EBUSY;
+ }
/*
* Find the plane corresponding to the offset passed by userspace.
@@ -1884,11 +2440,23 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
vb = q->bufs[buffer];
- ret = call_memop(q, mmap, vb->planes[plane].mem_priv, vma);
+ /*
+ * MMAP requires page_aligned buffers.
+ * The buffer length was page_aligned at __vb2_buf_mem_alloc(),
+ * so, we need to do the same here.
+ */
+ length = PAGE_ALIGN(vb->v4l2_planes[plane].length);
+ if (length < (vma->vm_end - vma->vm_start)) {
+ dprintk(1,
+ "MMAP invalid, as it would overflow buffer length\n");
+ return -EINVAL;
+ }
+
+ ret = call_memop(vb, mmap, vb->planes[plane].mem_priv, vma);
if (ret)
return ret;
- dprintk(3, "Buffer %d, plane %d successfully mapped\n", buffer, plane);
+ dprintk(3, "buffer %d, plane %d successfully mapped\n", buffer, plane);
return 0;
}
EXPORT_SYMBOL_GPL(vb2_mmap);
@@ -1906,7 +2474,7 @@ unsigned long vb2_get_unmapped_area(struct vb2_queue *q,
int ret;
if (q->memory != V4L2_MEMORY_MMAP) {
- dprintk(1, "Queue is not currently set up for mmap\n");
+ dprintk(1, "queue is not currently set up for mmap\n");
return -EINVAL;
}
@@ -1963,10 +2531,15 @@ unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
poll_wait(file, &fh->wait, wait);
}
+ if (!V4L2_TYPE_IS_OUTPUT(q->type) && !(req_events & (POLLIN | POLLRDNORM)))
+ return res;
+ if (V4L2_TYPE_IS_OUTPUT(q->type) && !(req_events & (POLLOUT | POLLWRNORM)))
+ return res;
+
/*
* Start file I/O emulator only if streaming API has not been used yet.
*/
- if (q->num_buffers == 0 && q->fileio == NULL) {
+ if (q->num_buffers == 0 && !vb2_fileio_is_active(q)) {
if (!V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_READ) &&
(req_events & (POLLIN | POLLRDNORM))) {
if (__vb2_init_fileio(q, 1))
@@ -1989,7 +2562,8 @@ unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
if (list_empty(&q->queued_list))
return res | POLLERR;
- poll_wait(file, &q->done_wq, wait);
+ if (list_empty(&q->done_list))
+ poll_wait(file, &q->done_wq, wait);
/*
* Take first buffer available for dequeuing.
@@ -2032,9 +2606,16 @@ int vb2_queue_init(struct vb2_queue *q)
WARN_ON(!q->type) ||
WARN_ON(!q->io_modes) ||
WARN_ON(!q->ops->queue_setup) ||
- WARN_ON(!q->ops->buf_queue))
+ WARN_ON(!q->ops->buf_queue) ||
+ WARN_ON(q->timestamp_flags &
+ ~(V4L2_BUF_FLAG_TIMESTAMP_MASK |
+ V4L2_BUF_FLAG_TSTAMP_SRC_MASK)))
return -EINVAL;
+ /* Warn that the driver should choose an appropriate timestamp type */
+ WARN_ON((q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) ==
+ V4L2_BUF_FLAG_TIMESTAMP_UNKNOWN);
+
INIT_LIST_HEAD(&q->queued_list);
INIT_LIST_HEAD(&q->done_list);
spin_lock_init(&q->done_lock);
@@ -2080,6 +2661,22 @@ struct vb2_fileio_buf {
/**
* struct vb2_fileio_data - queue context used by file io emulator
*
+ * @cur_index: the index of the buffer currently being read from or
+ * written to. If equal to q->num_buffers then a new buffer
+ * must be dequeued.
+ * @initial_index: in the read() case all buffers are queued up immediately
+ * in __vb2_init_fileio() and __vb2_perform_fileio() just cycles
+ * buffers. However, in the write() case no buffers are initially
+ * queued, instead whenever a buffer is full it is queued up by
+ * __vb2_perform_fileio(). Only once all available buffers have
+ * been queued up will __vb2_perform_fileio() start to dequeue
+ * buffers. This means that initially __vb2_perform_fileio()
+ * needs to know what buffer index to use when it is queuing up
+ * the buffers for the first time. That initial index is stored
+ * in this field. Once it is equal to q->num_buffers all
+ * available buffers have been queued and __vb2_perform_fileio()
+ * should start the normal dequeue/queue cycle.
+ *
* vb2 provides a compatibility layer and emulator of file io (read and
* write) calls on top of streaming API. For proper operation it required
* this structure to save the driver state between each call of the read
@@ -2087,9 +2684,11 @@ struct vb2_fileio_buf {
*/
struct vb2_fileio_data {
struct v4l2_requestbuffers req;
+ struct v4l2_plane p;
struct v4l2_buffer b;
struct vb2_fileio_buf bufs[VIDEO_MAX_FRAME];
- unsigned int index;
+ unsigned int cur_index;
+ unsigned int initial_index;
unsigned int q_count;
unsigned int dq_count;
unsigned int flags;
@@ -2109,9 +2708,9 @@ static int __vb2_init_fileio(struct vb2_queue *q, int read)
/*
* Sanity check
*/
- if ((read && !(q->io_modes & VB2_READ)) ||
- (!read && !(q->io_modes & VB2_WRITE)))
- BUG();
+ if (WARN_ON((read && !(q->io_modes & VB2_READ)) ||
+ (!read && !(q->io_modes & VB2_WRITE))))
+ return -EINVAL;
/*
* Check if device supports mapping buffers to kernel virtual space.
@@ -2146,7 +2745,8 @@ static int __vb2_init_fileio(struct vb2_queue *q, int read)
fileio->req.count = count;
fileio->req.memory = V4L2_MEMORY_MMAP;
fileio->req.type = q->type;
- ret = vb2_reqbufs(q, &fileio->req);
+ q->fileio = fileio;
+ ret = __reqbufs(q, &fileio->req);
if (ret)
goto err_kfree;
@@ -2164,8 +2764,10 @@ static int __vb2_init_fileio(struct vb2_queue *q, int read)
*/
for (i = 0; i < q->num_buffers; i++) {
fileio->bufs[i].vaddr = vb2_plane_vaddr(q->bufs[i], 0);
- if (fileio->bufs[i].vaddr == NULL)
+ if (fileio->bufs[i].vaddr == NULL) {
+ ret = -EINVAL;
goto err_reqbufs;
+ }
fileio->bufs[i].size = vb2_plane_size(q->bufs[i], 0);
}
@@ -2173,38 +2775,51 @@ static int __vb2_init_fileio(struct vb2_queue *q, int read)
* Read mode requires pre queuing of all buffers.
*/
if (read) {
+ bool is_multiplanar = V4L2_TYPE_IS_MULTIPLANAR(q->type);
+
/*
* Queue all buffers.
*/
for (i = 0; i < q->num_buffers; i++) {
struct v4l2_buffer *b = &fileio->b;
+
memset(b, 0, sizeof(*b));
b->type = q->type;
+ if (is_multiplanar) {
+ memset(&fileio->p, 0, sizeof(fileio->p));
+ b->m.planes = &fileio->p;
+ b->length = 1;
+ }
b->memory = q->memory;
b->index = i;
- ret = vb2_qbuf(q, b);
+ ret = vb2_internal_qbuf(q, b);
if (ret)
goto err_reqbufs;
fileio->bufs[i].queued = 1;
}
-
/*
- * Start streaming.
+ * All buffers have been queued, so mark that by setting
+ * initial_index to q->num_buffers
*/
- ret = vb2_streamon(q, q->type);
- if (ret)
- goto err_reqbufs;
+ fileio->initial_index = q->num_buffers;
+ fileio->cur_index = q->num_buffers;
}
- q->fileio = fileio;
+ /*
+ * Start streaming.
+ */
+ ret = vb2_internal_streamon(q, q->type);
+ if (ret)
+ goto err_reqbufs;
return ret;
err_reqbufs:
fileio->req.count = 0;
- vb2_reqbufs(q, &fileio->req);
+ __reqbufs(q, &fileio->req);
err_kfree:
+ q->fileio = NULL;
kfree(fileio);
return ret;
}
@@ -2218,13 +2833,8 @@ static int __vb2_cleanup_fileio(struct vb2_queue *q)
struct vb2_fileio_data *fileio = q->fileio;
if (fileio) {
- /*
- * Hack fileio context to enable direct calls to vb2 ioctl
- * interface.
- */
+ vb2_internal_streamoff(q, q->type);
q->fileio = NULL;
-
- vb2_streamoff(q, q->type);
fileio->req.count = 0;
vb2_reqbufs(q, &fileio->req);
kfree(fileio);
@@ -2247,9 +2857,18 @@ static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_
{
struct vb2_fileio_data *fileio;
struct vb2_fileio_buf *buf;
+ bool is_multiplanar = V4L2_TYPE_IS_MULTIPLANAR(q->type);
+ /*
+ * When using write() to write data to an output video node the vb2 core
+ * should set timestamps if V4L2_BUF_FLAG_TIMESTAMP_COPY is set. Nobody
+ * else is able to provide this information with the write() operation.
+ */
+ bool set_timestamp = !read &&
+ (q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) ==
+ V4L2_BUF_FLAG_TIMESTAMP_COPY;
int ret, index;
- dprintk(3, "file io: mode %s, offset %ld, count %zd, %sblocking\n",
+ dprintk(3, "mode %s, offset %ld, count %zd, %sblocking\n",
read ? "read" : "write", (long)*ppos, count,
nonblock ? "non" : "");
@@ -2259,48 +2878,48 @@ static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_
/*
* Initialize emulator on first call.
*/
- if (!q->fileio) {
+ if (!vb2_fileio_is_active(q)) {
ret = __vb2_init_fileio(q, read);
- dprintk(3, "file io: vb2_init_fileio result: %d\n", ret);
+ dprintk(3, "vb2_init_fileio result: %d\n", ret);
if (ret)
return ret;
}
fileio = q->fileio;
/*
- * Hack fileio context to enable direct calls to vb2 ioctl interface.
- * The pointer will be restored before returning from this function.
- */
- q->fileio = NULL;
-
- index = fileio->index;
- buf = &fileio->bufs[index];
-
- /*
* Check if we need to dequeue the buffer.
*/
- if (buf->queued) {
- struct vb2_buffer *vb;
-
+ index = fileio->cur_index;
+ if (index >= q->num_buffers) {
/*
* Call vb2_dqbuf to get buffer back.
*/
memset(&fileio->b, 0, sizeof(fileio->b));
fileio->b.type = q->type;
fileio->b.memory = q->memory;
- fileio->b.index = index;
- ret = vb2_dqbuf(q, &fileio->b, nonblock);
- dprintk(5, "file io: vb2_dqbuf result: %d\n", ret);
+ if (is_multiplanar) {
+ memset(&fileio->p, 0, sizeof(fileio->p));
+ fileio->b.m.planes = &fileio->p;
+ fileio->b.length = 1;
+ }
+ ret = vb2_internal_dqbuf(q, &fileio->b, nonblock);
+ dprintk(5, "vb2_dqbuf result: %d\n", ret);
if (ret)
- goto end;
+ return ret;
fileio->dq_count += 1;
+ fileio->cur_index = index = fileio->b.index;
+ buf = &fileio->bufs[index];
+
/*
* Get number of bytes filled by the driver
*/
- vb = q->bufs[index];
- buf->size = vb2_get_plane_payload(vb, 0);
+ buf->pos = 0;
buf->queued = 0;
+ buf->size = read ? vb2_get_plane_payload(q->bufs[index], 0)
+ : vb2_plane_size(q->bufs[index], 0);
+ } else {
+ buf = &fileio->bufs[index];
}
/*
@@ -2314,16 +2933,15 @@ static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_
/*
* Transfer data to userspace.
*/
- dprintk(3, "file io: copying %zd bytes - buffer %d, offset %u\n",
+ dprintk(3, "copying %zd bytes - buffer %d, offset %u\n",
count, index, buf->pos);
if (read)
ret = copy_to_user(data, buf->vaddr + buf->pos, count);
else
ret = copy_from_user(buf->vaddr + buf->pos, data, count);
if (ret) {
- dprintk(3, "file io: error copying data\n");
- ret = -EFAULT;
- goto end;
+ dprintk(3, "error copying data\n");
+ return -EFAULT;
}
/*
@@ -2342,11 +2960,7 @@ static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_
*/
if (read && (fileio->flags & VB2_FILEIO_READ_ONCE) &&
fileio->dq_count == 1) {
- dprintk(3, "file io: read limit reached\n");
- /*
- * Restore fileio pointer and release the context.
- */
- q->fileio = fileio;
+ dprintk(3, "read limit reached\n");
return __vb2_cleanup_fileio(q);
}
@@ -2358,32 +2972,40 @@ static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_
fileio->b.memory = q->memory;
fileio->b.index = index;
fileio->b.bytesused = buf->pos;
- ret = vb2_qbuf(q, &fileio->b);
- dprintk(5, "file io: vb2_dbuf result: %d\n", ret);
+ if (is_multiplanar) {
+ memset(&fileio->p, 0, sizeof(fileio->p));
+ fileio->p.bytesused = buf->pos;
+ fileio->b.m.planes = &fileio->p;
+ fileio->b.length = 1;
+ }
+ if (set_timestamp)
+ v4l2_get_timestamp(&fileio->b.timestamp);
+ ret = vb2_internal_qbuf(q, &fileio->b);
+ dprintk(5, "vb2_dbuf result: %d\n", ret);
if (ret)
- goto end;
+ return ret;
/*
* Buffer has been queued, update the status
*/
buf->pos = 0;
buf->queued = 1;
- buf->size = q->bufs[0]->v4l2_planes[0].length;
+ buf->size = vb2_plane_size(q->bufs[index], 0);
fileio->q_count += 1;
-
/*
- * Switch to the next buffer
+ * If we are queuing up buffers for the first time, then
+ * increase initial_index by one.
*/
- fileio->index = (index + 1) % q->num_buffers;
-
+ if (fileio->initial_index < q->num_buffers)
+ fileio->initial_index++;
/*
- * Start streaming if required.
+ * The next buffer to use is either a buffer that's going to be
+ * queued for the first time (initial_index < q->num_buffers)
+ * or it is equal to q->num_buffers, meaning that the next
+ * time we need to dequeue a buffer since we've now queued up
+ * all the 'first time' buffers.
*/
- if (!read && !q->streaming) {
- ret = vb2_streamon(q, q->type);
- if (ret)
- goto end;
- }
+ fileio->cur_index = fileio->initial_index;
}
/*
@@ -2391,11 +3013,6 @@ static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_
*/
if (ret == 0)
ret = count;
-end:
- /*
- * Restore the fileio context and block vb2 ioctl interface.
- */
- q->fileio = fileio;
return ret;
}
@@ -2406,13 +3023,155 @@ size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count,
}
EXPORT_SYMBOL_GPL(vb2_read);
-size_t vb2_write(struct vb2_queue *q, char __user *data, size_t count,
+size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count,
loff_t *ppos, int nonblocking)
{
- return __vb2_perform_fileio(q, data, count, ppos, nonblocking, 0);
+ return __vb2_perform_fileio(q, (char __user *) data, count,
+ ppos, nonblocking, 0);
}
EXPORT_SYMBOL_GPL(vb2_write);
+struct vb2_threadio_data {
+ struct task_struct *thread;
+ vb2_thread_fnc fnc;
+ void *priv;
+ bool stop;
+};
+
+static int vb2_thread(void *data)
+{
+ struct vb2_queue *q = data;
+ struct vb2_threadio_data *threadio = q->threadio;
+ struct vb2_fileio_data *fileio = q->fileio;
+ bool set_timestamp = false;
+ int prequeue = 0;
+ int index = 0;
+ int ret = 0;
+
+ if (V4L2_TYPE_IS_OUTPUT(q->type)) {
+ prequeue = q->num_buffers;
+ set_timestamp =
+ (q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) ==
+ V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ }
+
+ set_freezable();
+
+ for (;;) {
+ struct vb2_buffer *vb;
+
+ /*
+ * Call vb2_dqbuf to get buffer back.
+ */
+ memset(&fileio->b, 0, sizeof(fileio->b));
+ fileio->b.type = q->type;
+ fileio->b.memory = q->memory;
+ if (prequeue) {
+ fileio->b.index = index++;
+ prequeue--;
+ } else {
+ call_void_qop(q, wait_finish, q);
+ ret = vb2_internal_dqbuf(q, &fileio->b, 0);
+ call_void_qop(q, wait_prepare, q);
+ dprintk(5, "file io: vb2_dqbuf result: %d\n", ret);
+ }
+ if (threadio->stop)
+ break;
+ if (ret)
+ break;
+ try_to_freeze();
+
+ vb = q->bufs[fileio->b.index];
+ if (!(fileio->b.flags & V4L2_BUF_FLAG_ERROR))
+ ret = threadio->fnc(vb, threadio->priv);
+ if (ret)
+ break;
+ call_void_qop(q, wait_finish, q);
+ if (set_timestamp)
+ v4l2_get_timestamp(&fileio->b.timestamp);
+ ret = vb2_internal_qbuf(q, &fileio->b);
+ call_void_qop(q, wait_prepare, q);
+ if (ret)
+ break;
+ }
+
+ /* Hmm, linux becomes *very* unhappy without this ... */
+ while (!kthread_should_stop()) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule();
+ }
+ return 0;
+}
+
+/*
+ * This function should not be used for anything else but the videobuf2-dvb
+ * support. If you think you have another good use-case for this, then please
+ * contact the linux-media mailinglist first.
+ */
+int vb2_thread_start(struct vb2_queue *q, vb2_thread_fnc fnc, void *priv,
+ const char *thread_name)
+{
+ struct vb2_threadio_data *threadio;
+ int ret = 0;
+
+ if (q->threadio)
+ return -EBUSY;
+ if (vb2_is_busy(q))
+ return -EBUSY;
+ if (WARN_ON(q->fileio))
+ return -EBUSY;
+
+ threadio = kzalloc(sizeof(*threadio), GFP_KERNEL);
+ if (threadio == NULL)
+ return -ENOMEM;
+ threadio->fnc = fnc;
+ threadio->priv = priv;
+
+ ret = __vb2_init_fileio(q, !V4L2_TYPE_IS_OUTPUT(q->type));
+ dprintk(3, "file io: vb2_init_fileio result: %d\n", ret);
+ if (ret)
+ goto nomem;
+ q->threadio = threadio;
+ threadio->thread = kthread_run(vb2_thread, q, "vb2-%s", thread_name);
+ if (IS_ERR(threadio->thread)) {
+ ret = PTR_ERR(threadio->thread);
+ threadio->thread = NULL;
+ goto nothread;
+ }
+ return 0;
+
+nothread:
+ __vb2_cleanup_fileio(q);
+nomem:
+ kfree(threadio);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vb2_thread_start);
+
+int vb2_thread_stop(struct vb2_queue *q)
+{
+ struct vb2_threadio_data *threadio = q->threadio;
+ struct vb2_fileio_data *fileio = q->fileio;
+ int err;
+
+ if (threadio == NULL)
+ return 0;
+ call_void_qop(q, wait_finish, q);
+ threadio->stop = true;
+ vb2_internal_streamoff(q, q->type);
+ call_void_qop(q, wait_prepare, q);
+ q->fileio = NULL;
+ fileio->req.count = 0;
+ vb2_reqbufs(q, &fileio->req);
+ kfree(fileio);
+ err = kthread_stop(threadio->thread);
+ threadio->thread = NULL;
+ kfree(threadio);
+ q->fileio = NULL;
+ q->threadio = NULL;
+ return err;
+}
+EXPORT_SYMBOL_GPL(vb2_thread_stop);
/*
* The following functions are not part of the vb2 core API, but are helper
@@ -2546,24 +3305,44 @@ EXPORT_SYMBOL_GPL(vb2_ioctl_expbuf);
int vb2_fop_mmap(struct file *file, struct vm_area_struct *vma)
{
struct video_device *vdev = video_devdata(file);
+ struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
+ int err;
- return vb2_mmap(vdev->queue, vma);
+ if (lock && mutex_lock_interruptible(lock))
+ return -ERESTARTSYS;
+ err = vb2_mmap(vdev->queue, vma);
+ if (lock)
+ mutex_unlock(lock);
+ return err;
}
EXPORT_SYMBOL_GPL(vb2_fop_mmap);
-int vb2_fop_release(struct file *file)
+int _vb2_fop_release(struct file *file, struct mutex *lock)
{
struct video_device *vdev = video_devdata(file);
if (file->private_data == vdev->queue->owner) {
+ if (lock)
+ mutex_lock(lock);
vb2_queue_release(vdev->queue);
vdev->queue->owner = NULL;
+ if (lock)
+ mutex_unlock(lock);
}
return v4l2_fh_release(file);
}
+EXPORT_SYMBOL_GPL(_vb2_fop_release);
+
+int vb2_fop_release(struct file *file)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
+
+ return _vb2_fop_release(file, lock);
+}
EXPORT_SYMBOL_GPL(vb2_fop_release);
-ssize_t vb2_fop_write(struct file *file, char __user *buf,
+ssize_t vb2_fop_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
struct video_device *vdev = video_devdata(file);
@@ -2619,7 +3398,7 @@ unsigned int vb2_fop_poll(struct file *file, poll_table *wait)
/* Try to be smart: only lock if polling might start fileio,
otherwise locking will only introduce unwanted delays. */
- if (q->num_buffers == 0 && q->fileio == NULL) {
+ if (q->num_buffers == 0 && !vb2_fileio_is_active(q)) {
if (!V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_READ) &&
(req_events & (POLLIN | POLLRDNORM)))
must_lock = true;
@@ -2653,8 +3432,15 @@ unsigned long vb2_fop_get_unmapped_area(struct file *file, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags)
{
struct video_device *vdev = video_devdata(file);
+ struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
+ int ret;
- return vb2_get_unmapped_area(vdev->queue, addr, len, pgoff, flags);
+ if (lock && mutex_lock_interruptible(lock))
+ return -ERESTARTSYS;
+ ret = vb2_get_unmapped_area(vdev->queue, addr, len, pgoff, flags);
+ if (lock)
+ mutex_unlock(lock);
+ return ret;
}
EXPORT_SYMBOL_GPL(vb2_fop_get_unmapped_area);
#endif
diff --git a/drivers/media/v4l2-core/videobuf2-dma-contig.c b/drivers/media/v4l2-core/videobuf2-dma-contig.c
index 10beaee7f0a..880be0782dd 100644
--- a/drivers/media/v4l2-core/videobuf2-dma-contig.c
+++ b/drivers/media/v4l2-core/videobuf2-dma-contig.c
@@ -152,7 +152,7 @@ static void vb2_dc_put(void *buf_priv)
kfree(buf);
}
-static void *vb2_dc_alloc(void *alloc_ctx, unsigned long size)
+static void *vb2_dc_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_flags)
{
struct vb2_dc_conf *conf = alloc_ctx;
struct device *dev = conf->dev;
@@ -162,10 +162,8 @@ static void *vb2_dc_alloc(void *alloc_ctx, unsigned long size)
if (!buf)
return ERR_PTR(-ENOMEM);
- /* align image size to PAGE_SIZE */
- size = PAGE_ALIGN(size);
-
- buf->vaddr = dma_alloc_coherent(dev, size, &buf->dma_addr, GFP_KERNEL);
+ buf->vaddr = dma_alloc_coherent(dev, size, &buf->dma_addr,
+ GFP_KERNEL | gfp_flags);
if (!buf->vaddr) {
dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
kfree(buf);
@@ -395,7 +393,7 @@ static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
return sgt;
}
-static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv)
+static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv, unsigned long flags)
{
struct vb2_dc_buf *buf = buf_priv;
struct dma_buf *dbuf;
@@ -406,7 +404,7 @@ static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv)
if (WARN_ON(!buf->sgt_base))
return NULL;
- dbuf = dma_buf_export(buf, &vb2_dc_dmabuf_ops, buf->size, 0);
+ dbuf = dma_buf_export(buf, &vb2_dc_dmabuf_ops, buf->size, flags);
if (IS_ERR(dbuf))
return NULL;
@@ -425,6 +423,39 @@ static inline int vma_is_io(struct vm_area_struct *vma)
return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
}
+static int vb2_dc_get_user_pfn(unsigned long start, int n_pages,
+ struct vm_area_struct *vma, unsigned long *res)
+{
+ unsigned long pfn, start_pfn, prev_pfn;
+ unsigned int i;
+ int ret;
+
+ if (!vma_is_io(vma))
+ return -EFAULT;
+
+ ret = follow_pfn(vma, start, &pfn);
+ if (ret)
+ return ret;
+
+ start_pfn = pfn;
+ start += PAGE_SIZE;
+
+ for (i = 1; i < n_pages; ++i, start += PAGE_SIZE) {
+ prev_pfn = pfn;
+ ret = follow_pfn(vma, start, &pfn);
+
+ if (ret) {
+ pr_err("no page for address %lu\n", start);
+ return ret;
+ }
+ if (pfn != prev_pfn + 1)
+ return -EINVAL;
+ }
+
+ *res = start_pfn;
+ return 0;
+}
+
static int vb2_dc_get_user_pages(unsigned long start, struct page **pages,
int n_pages, struct vm_area_struct *vma, int write)
{
@@ -435,6 +466,9 @@ static int vb2_dc_get_user_pages(unsigned long start, struct page **pages,
unsigned long pfn;
int ret = follow_pfn(vma, start, &pfn);
+ if (!pfn_valid(pfn))
+ return -EINVAL;
+
if (ret) {
pr_err("no page for address %lu\n", start);
return ret;
@@ -470,16 +504,49 @@ static void vb2_dc_put_userptr(void *buf_priv)
struct vb2_dc_buf *buf = buf_priv;
struct sg_table *sgt = buf->dma_sgt;
- dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
- if (!vma_is_io(buf->vma))
- vb2_dc_sgt_foreach_page(sgt, vb2_dc_put_dirty_page);
+ if (sgt) {
+ dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
+ if (!vma_is_io(buf->vma))
+ vb2_dc_sgt_foreach_page(sgt, vb2_dc_put_dirty_page);
- sg_free_table(sgt);
- kfree(sgt);
+ sg_free_table(sgt);
+ kfree(sgt);
+ }
vb2_put_vma(buf->vma);
kfree(buf);
}
+/*
+ * For some kind of reserved memory there might be no struct page available,
+ * so all that can be done to support such 'pages' is to try to convert
+ * pfn to dma address or at the last resort just assume that
+ * dma address == physical address (like it has been assumed in earlier version
+ * of videobuf2-dma-contig
+ */
+
+#ifdef __arch_pfn_to_dma
+static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
+{
+ return (dma_addr_t)__arch_pfn_to_dma(dev, pfn);
+}
+#elif defined(__pfn_to_bus)
+static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
+{
+ return (dma_addr_t)__pfn_to_bus(pfn);
+}
+#elif defined(__pfn_to_phys)
+static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
+{
+ return (dma_addr_t)__pfn_to_phys(pfn);
+}
+#else
+static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
+{
+ /* really, we cannot do anything better at this point */
+ return (dma_addr_t)(pfn) << PAGE_SHIFT;
+}
+#endif
+
static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
unsigned long size, int write)
{
@@ -550,6 +617,14 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
/* extract page list from userspace mapping */
ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, write);
if (ret) {
+ unsigned long pfn;
+ if (vb2_dc_get_user_pfn(start, n_pages, vma, &pfn) == 0) {
+ buf->dma_addr = vb2_dc_pfn_to_dma(buf->dev, pfn);
+ buf->size = size;
+ kfree(pages);
+ return buf;
+ }
+
pr_err("failed to get user pages\n");
goto fail_vma;
}
@@ -644,7 +719,7 @@ static int vb2_dc_map_dmabuf(void *mem_priv)
/* get the associated scatterlist for this buffer */
sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
- if (IS_ERR_OR_NULL(sgt)) {
+ if (IS_ERR(sgt)) {
pr_err("Error getting dmabuf scatterlist\n");
return -EINVAL;
}
diff --git a/drivers/media/v4l2-core/videobuf2-dma-sg.c b/drivers/media/v4l2-core/videobuf2-dma-sg.c
index 25c3b360e1a..adefc31bb85 100644
--- a/drivers/media/v4l2-core/videobuf2-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf2-dma-sg.c
@@ -21,22 +21,76 @@
#include <media/videobuf2-memops.h>
#include <media/videobuf2-dma-sg.h>
+static int debug;
+module_param(debug, int, 0644);
+
+#define dprintk(level, fmt, arg...) \
+ do { \
+ if (debug >= level) \
+ printk(KERN_DEBUG "vb2-dma-sg: " fmt, ## arg); \
+ } while (0)
+
struct vb2_dma_sg_buf {
void *vaddr;
struct page **pages;
int write;
int offset;
- struct vb2_dma_sg_desc sg_desc;
+ struct sg_table sg_table;
+ size_t size;
+ unsigned int num_pages;
atomic_t refcount;
struct vb2_vmarea_handler handler;
+ struct vm_area_struct *vma;
};
static void vb2_dma_sg_put(void *buf_priv);
-static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size)
+static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf,
+ gfp_t gfp_flags)
+{
+ unsigned int last_page = 0;
+ int size = buf->size;
+
+ while (size > 0) {
+ struct page *pages;
+ int order;
+ int i;
+
+ order = get_order(size);
+ /* Dont over allocate*/
+ if ((PAGE_SIZE << order) > size)
+ order--;
+
+ pages = NULL;
+ while (!pages) {
+ pages = alloc_pages(GFP_KERNEL | __GFP_ZERO |
+ __GFP_NOWARN | gfp_flags, order);
+ if (pages)
+ break;
+
+ if (order == 0) {
+ while (last_page--)
+ __free_page(buf->pages[last_page]);
+ return -ENOMEM;
+ }
+ order--;
+ }
+
+ split_page(pages, order);
+ for (i = 0; i < (1 << order); i++)
+ buf->pages[last_page++] = &pages[i];
+
+ size -= PAGE_SIZE << order;
+ }
+
+ return 0;
+}
+
+static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_flags)
{
struct vb2_dma_sg_buf *buf;
- int i;
+ int ret;
+ int num_pages;
buf = kzalloc(sizeof *buf, GFP_KERNEL);
if (!buf)
@@ -45,27 +99,23 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size)
buf->vaddr = NULL;
buf->write = 0;
buf->offset = 0;
- buf->sg_desc.size = size;
- buf->sg_desc.num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
-
- buf->sg_desc.sglist = vzalloc(buf->sg_desc.num_pages *
- sizeof(*buf->sg_desc.sglist));
- if (!buf->sg_desc.sglist)
- goto fail_sglist_alloc;
- sg_init_table(buf->sg_desc.sglist, buf->sg_desc.num_pages);
+ buf->size = size;
+ /* size is already page aligned */
+ buf->num_pages = size >> PAGE_SHIFT;
- buf->pages = kzalloc(buf->sg_desc.num_pages * sizeof(struct page *),
+ buf->pages = kzalloc(buf->num_pages * sizeof(struct page *),
GFP_KERNEL);
if (!buf->pages)
goto fail_pages_array_alloc;
- for (i = 0; i < buf->sg_desc.num_pages; ++i) {
- buf->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN);
- if (NULL == buf->pages[i])
- goto fail_pages_alloc;
- sg_set_page(&buf->sg_desc.sglist[i],
- buf->pages[i], PAGE_SIZE, 0);
- }
+ ret = vb2_dma_sg_alloc_compacted(buf, gfp_flags);
+ if (ret)
+ goto fail_pages_alloc;
+
+ ret = sg_alloc_table_from_pages(&buf->sg_table, buf->pages,
+ buf->num_pages, 0, size, gfp_flags);
+ if (ret)
+ goto fail_table_alloc;
buf->handler.refcount = &buf->refcount;
buf->handler.put = vb2_dma_sg_put;
@@ -73,19 +123,17 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size)
atomic_inc(&buf->refcount);
- printk(KERN_DEBUG "%s: Allocated buffer of %d pages\n",
- __func__, buf->sg_desc.num_pages);
+ dprintk(1, "%s: Allocated buffer of %d pages\n",
+ __func__, buf->num_pages);
return buf;
+fail_table_alloc:
+ num_pages = buf->num_pages;
+ while (num_pages--)
+ __free_page(buf->pages[num_pages]);
fail_pages_alloc:
- while (--i >= 0)
- __free_page(buf->pages[i]);
kfree(buf->pages);
-
fail_pages_array_alloc:
- vfree(buf->sg_desc.sglist);
-
-fail_sglist_alloc:
kfree(buf);
return NULL;
}
@@ -93,14 +141,14 @@ fail_sglist_alloc:
static void vb2_dma_sg_put(void *buf_priv)
{
struct vb2_dma_sg_buf *buf = buf_priv;
- int i = buf->sg_desc.num_pages;
+ int i = buf->num_pages;
if (atomic_dec_and_test(&buf->refcount)) {
- printk(KERN_DEBUG "%s: Freeing buffer of %d pages\n", __func__,
- buf->sg_desc.num_pages);
+ dprintk(1, "%s: Freeing buffer of %d pages\n", __func__,
+ buf->num_pages);
if (buf->vaddr)
- vm_unmap_ram(buf->vaddr, buf->sg_desc.num_pages);
- vfree(buf->sg_desc.sglist);
+ vm_unmap_ram(buf->vaddr, buf->num_pages);
+ sg_free_table(&buf->sg_table);
while (--i >= 0)
__free_page(buf->pages[i]);
kfree(buf->pages);
@@ -108,12 +156,18 @@ static void vb2_dma_sg_put(void *buf_priv)
}
}
+static inline int vma_is_io(struct vm_area_struct *vma)
+{
+ return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
+}
+
static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
unsigned long size, int write)
{
struct vb2_dma_sg_buf *buf;
unsigned long first, last;
- int num_pages_from_user, i;
+ int num_pages_from_user;
+ struct vm_area_struct *vma;
buf = kzalloc(sizeof *buf, GFP_KERNEL);
if (!buf)
@@ -122,56 +176,76 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
buf->vaddr = NULL;
buf->write = write;
buf->offset = vaddr & ~PAGE_MASK;
- buf->sg_desc.size = size;
+ buf->size = size;
first = (vaddr & PAGE_MASK) >> PAGE_SHIFT;
last = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT;
- buf->sg_desc.num_pages = last - first + 1;
-
- buf->sg_desc.sglist = vzalloc(
- buf->sg_desc.num_pages * sizeof(*buf->sg_desc.sglist));
- if (!buf->sg_desc.sglist)
- goto userptr_fail_sglist_alloc;
+ buf->num_pages = last - first + 1;
- sg_init_table(buf->sg_desc.sglist, buf->sg_desc.num_pages);
-
- buf->pages = kzalloc(buf->sg_desc.num_pages * sizeof(struct page *),
+ buf->pages = kzalloc(buf->num_pages * sizeof(struct page *),
GFP_KERNEL);
if (!buf->pages)
- goto userptr_fail_pages_array_alloc;
+ goto userptr_fail_alloc_pages;
+
+ vma = find_vma(current->mm, vaddr);
+ if (!vma) {
+ dprintk(1, "no vma for address %lu\n", vaddr);
+ goto userptr_fail_find_vma;
+ }
+
+ if (vma->vm_end < vaddr + size) {
+ dprintk(1, "vma at %lu is too small for %lu bytes\n",
+ vaddr, size);
+ goto userptr_fail_find_vma;
+ }
+
+ buf->vma = vb2_get_vma(vma);
+ if (!buf->vma) {
+ dprintk(1, "failed to copy vma\n");
+ goto userptr_fail_find_vma;
+ }
- num_pages_from_user = get_user_pages(current, current->mm,
+ if (vma_is_io(buf->vma)) {
+ for (num_pages_from_user = 0;
+ num_pages_from_user < buf->num_pages;
+ ++num_pages_from_user, vaddr += PAGE_SIZE) {
+ unsigned long pfn;
+
+ if (follow_pfn(vma, vaddr, &pfn)) {
+ dprintk(1, "no page for address %lu\n", vaddr);
+ break;
+ }
+ buf->pages[num_pages_from_user] = pfn_to_page(pfn);
+ }
+ } else
+ num_pages_from_user = get_user_pages(current, current->mm,
vaddr & PAGE_MASK,
- buf->sg_desc.num_pages,
+ buf->num_pages,
write,
1, /* force */
buf->pages,
NULL);
- if (num_pages_from_user != buf->sg_desc.num_pages)
+ if (num_pages_from_user != buf->num_pages)
goto userptr_fail_get_user_pages;
- sg_set_page(&buf->sg_desc.sglist[0], buf->pages[0],
- PAGE_SIZE - buf->offset, buf->offset);
- size -= PAGE_SIZE - buf->offset;
- for (i = 1; i < buf->sg_desc.num_pages; ++i) {
- sg_set_page(&buf->sg_desc.sglist[i], buf->pages[i],
- min_t(size_t, PAGE_SIZE, size), 0);
- size -= min_t(size_t, PAGE_SIZE, size);
- }
+ if (sg_alloc_table_from_pages(&buf->sg_table, buf->pages,
+ buf->num_pages, buf->offset, size, 0))
+ goto userptr_fail_alloc_table_from_pages;
+
return buf;
+userptr_fail_alloc_table_from_pages:
userptr_fail_get_user_pages:
- printk(KERN_DEBUG "get_user_pages requested/got: %d/%d]\n",
- num_pages_from_user, buf->sg_desc.num_pages);
- while (--num_pages_from_user >= 0)
- put_page(buf->pages[num_pages_from_user]);
+ dprintk(1, "get_user_pages requested/got: %d/%d]\n",
+ buf->num_pages, num_pages_from_user);
+ if (!vma_is_io(buf->vma))
+ while (--num_pages_from_user >= 0)
+ put_page(buf->pages[num_pages_from_user]);
+ vb2_put_vma(buf->vma);
+userptr_fail_find_vma:
kfree(buf->pages);
-
-userptr_fail_pages_array_alloc:
- vfree(buf->sg_desc.sglist);
-
-userptr_fail_sglist_alloc:
+userptr_fail_alloc_pages:
kfree(buf);
return NULL;
}
@@ -183,19 +257,21 @@ userptr_fail_sglist_alloc:
static void vb2_dma_sg_put_userptr(void *buf_priv)
{
struct vb2_dma_sg_buf *buf = buf_priv;
- int i = buf->sg_desc.num_pages;
+ int i = buf->num_pages;
- printk(KERN_DEBUG "%s: Releasing userspace buffer of %d pages\n",
- __func__, buf->sg_desc.num_pages);
+ dprintk(1, "%s: Releasing userspace buffer of %d pages\n",
+ __func__, buf->num_pages);
if (buf->vaddr)
- vm_unmap_ram(buf->vaddr, buf->sg_desc.num_pages);
+ vm_unmap_ram(buf->vaddr, buf->num_pages);
+ sg_free_table(&buf->sg_table);
while (--i >= 0) {
if (buf->write)
set_page_dirty_lock(buf->pages[i]);
- put_page(buf->pages[i]);
+ if (!vma_is_io(buf->vma))
+ put_page(buf->pages[i]);
}
- vfree(buf->sg_desc.sglist);
kfree(buf->pages);
+ vb2_put_vma(buf->vma);
kfree(buf);
}
@@ -207,7 +283,7 @@ static void *vb2_dma_sg_vaddr(void *buf_priv)
if (!buf->vaddr)
buf->vaddr = vm_map_ram(buf->pages,
- buf->sg_desc.num_pages,
+ buf->num_pages,
-1,
PAGE_KERNEL);
@@ -263,7 +339,7 @@ static void *vb2_dma_sg_cookie(void *buf_priv)
{
struct vb2_dma_sg_buf *buf = buf_priv;
- return &buf->sg_desc;
+ return &buf->sg_table;
}
const struct vb2_mem_ops vb2_dma_sg_memops = {
diff --git a/drivers/media/v4l2-core/videobuf2-dvb.c b/drivers/media/v4l2-core/videobuf2-dvb.c
new file mode 100644
index 00000000000..d09269846b7
--- /dev/null
+++ b/drivers/media/v4l2-core/videobuf2-dvb.c
@@ -0,0 +1,336 @@
+/*
+ *
+ * some helper function for simple DVB cards which simply DMA the
+ * complete transport stream and let the computer sort everything else
+ * (i.e. we are using the software demux, ...). Also uses the
+ * video-buf to manage DMA buffers.
+ *
+ * (c) 2004 Gerd Knorr <kraxel@bytesex.org> [SUSE Labs]
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+
+#include <media/videobuf2-dvb.h>
+
+/* ------------------------------------------------------------------ */
+
+MODULE_AUTHOR("Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]");
+MODULE_LICENSE("GPL");
+
+/* ------------------------------------------------------------------ */
+
+static int dvb_fnc(struct vb2_buffer *vb, void *priv)
+{
+ struct vb2_dvb *dvb = priv;
+
+ dvb_dmx_swfilter(&dvb->demux, vb2_plane_vaddr(vb, 0),
+ vb2_get_plane_payload(vb, 0));
+ return 0;
+}
+
+static int vb2_dvb_start_feed(struct dvb_demux_feed *feed)
+{
+ struct dvb_demux *demux = feed->demux;
+ struct vb2_dvb *dvb = demux->priv;
+ int rc = 0;
+
+ if (!demux->dmx.frontend)
+ return -EINVAL;
+
+ mutex_lock(&dvb->lock);
+ dvb->nfeeds++;
+
+ if (!dvb->dvbq.threadio) {
+ rc = vb2_thread_start(&dvb->dvbq, dvb_fnc, dvb, dvb->name);
+ if (rc)
+ dvb->nfeeds--;
+ }
+ if (!rc)
+ rc = dvb->nfeeds;
+ mutex_unlock(&dvb->lock);
+ return rc;
+}
+
+static int vb2_dvb_stop_feed(struct dvb_demux_feed *feed)
+{
+ struct dvb_demux *demux = feed->demux;
+ struct vb2_dvb *dvb = demux->priv;
+ int err = 0;
+
+ mutex_lock(&dvb->lock);
+ dvb->nfeeds--;
+ if (0 == dvb->nfeeds)
+ err = vb2_thread_stop(&dvb->dvbq);
+ mutex_unlock(&dvb->lock);
+ return err;
+}
+
+static int vb2_dvb_register_adapter(struct vb2_dvb_frontends *fe,
+ struct module *module,
+ void *adapter_priv,
+ struct device *device,
+ char *adapter_name,
+ short *adapter_nr,
+ int mfe_shared)
+{
+ int result;
+
+ mutex_init(&fe->lock);
+
+ /* register adapter */
+ result = dvb_register_adapter(&fe->adapter, adapter_name, module,
+ device, adapter_nr);
+ if (result < 0) {
+ pr_warn("%s: dvb_register_adapter failed (errno = %d)\n",
+ adapter_name, result);
+ }
+ fe->adapter.priv = adapter_priv;
+ fe->adapter.mfe_shared = mfe_shared;
+
+ return result;
+}
+
+static int vb2_dvb_register_frontend(struct dvb_adapter *adapter,
+ struct vb2_dvb *dvb)
+{
+ int result;
+
+ /* register frontend */
+ result = dvb_register_frontend(adapter, dvb->frontend);
+ if (result < 0) {
+ pr_warn("%s: dvb_register_frontend failed (errno = %d)\n",
+ dvb->name, result);
+ goto fail_frontend;
+ }
+
+ /* register demux stuff */
+ dvb->demux.dmx.capabilities =
+ DMX_TS_FILTERING | DMX_SECTION_FILTERING |
+ DMX_MEMORY_BASED_FILTERING;
+ dvb->demux.priv = dvb;
+ dvb->demux.filternum = 256;
+ dvb->demux.feednum = 256;
+ dvb->demux.start_feed = vb2_dvb_start_feed;
+ dvb->demux.stop_feed = vb2_dvb_stop_feed;
+ result = dvb_dmx_init(&dvb->demux);
+ if (result < 0) {
+ pr_warn("%s: dvb_dmx_init failed (errno = %d)\n",
+ dvb->name, result);
+ goto fail_dmx;
+ }
+
+ dvb->dmxdev.filternum = 256;
+ dvb->dmxdev.demux = &dvb->demux.dmx;
+ dvb->dmxdev.capabilities = 0;
+ result = dvb_dmxdev_init(&dvb->dmxdev, adapter);
+
+ if (result < 0) {
+ pr_warn("%s: dvb_dmxdev_init failed (errno = %d)\n",
+ dvb->name, result);
+ goto fail_dmxdev;
+ }
+
+ dvb->fe_hw.source = DMX_FRONTEND_0;
+ result = dvb->demux.dmx.add_frontend(&dvb->demux.dmx, &dvb->fe_hw);
+ if (result < 0) {
+ pr_warn("%s: add_frontend failed (DMX_FRONTEND_0, errno = %d)\n",
+ dvb->name, result);
+ goto fail_fe_hw;
+ }
+
+ dvb->fe_mem.source = DMX_MEMORY_FE;
+ result = dvb->demux.dmx.add_frontend(&dvb->demux.dmx, &dvb->fe_mem);
+ if (result < 0) {
+ pr_warn("%s: add_frontend failed (DMX_MEMORY_FE, errno = %d)\n",
+ dvb->name, result);
+ goto fail_fe_mem;
+ }
+
+ result = dvb->demux.dmx.connect_frontend(&dvb->demux.dmx, &dvb->fe_hw);
+ if (result < 0) {
+ pr_warn("%s: connect_frontend failed (errno = %d)\n",
+ dvb->name, result);
+ goto fail_fe_conn;
+ }
+
+ /* register network adapter */
+ result = dvb_net_init(adapter, &dvb->net, &dvb->demux.dmx);
+ if (result < 0) {
+ pr_warn("%s: dvb_net_init failed (errno = %d)\n",
+ dvb->name, result);
+ goto fail_fe_conn;
+ }
+ return 0;
+
+fail_fe_conn:
+ dvb->demux.dmx.remove_frontend(&dvb->demux.dmx, &dvb->fe_mem);
+fail_fe_mem:
+ dvb->demux.dmx.remove_frontend(&dvb->demux.dmx, &dvb->fe_hw);
+fail_fe_hw:
+ dvb_dmxdev_release(&dvb->dmxdev);
+fail_dmxdev:
+ dvb_dmx_release(&dvb->demux);
+fail_dmx:
+ dvb_unregister_frontend(dvb->frontend);
+fail_frontend:
+ dvb_frontend_detach(dvb->frontend);
+ dvb->frontend = NULL;
+
+ return result;
+}
+
+/* ------------------------------------------------------------------ */
+/* Register a single adapter and one or more frontends */
+int vb2_dvb_register_bus(struct vb2_dvb_frontends *f,
+ struct module *module,
+ void *adapter_priv,
+ struct device *device,
+ short *adapter_nr,
+ int mfe_shared)
+{
+ struct list_head *list, *q;
+ struct vb2_dvb_frontend *fe;
+ int res;
+
+ fe = vb2_dvb_get_frontend(f, 1);
+ if (!fe) {
+ pr_warn("Unable to register the adapter which has no frontends\n");
+ return -EINVAL;
+ }
+
+ /* Bring up the adapter */
+ res = vb2_dvb_register_adapter(f, module, adapter_priv, device,
+ fe->dvb.name, adapter_nr, mfe_shared);
+ if (res < 0) {
+ pr_warn("vb2_dvb_register_adapter failed (errno = %d)\n", res);
+ return res;
+ }
+
+ /* Attach all of the frontends to the adapter */
+ mutex_lock(&f->lock);
+ list_for_each_safe(list, q, &f->felist) {
+ fe = list_entry(list, struct vb2_dvb_frontend, felist);
+ res = vb2_dvb_register_frontend(&f->adapter, &fe->dvb);
+ if (res < 0) {
+ pr_warn("%s: vb2_dvb_register_frontend failed (errno = %d)\n",
+ fe->dvb.name, res);
+ goto err;
+ }
+ }
+ mutex_unlock(&f->lock);
+ return 0;
+
+err:
+ mutex_unlock(&f->lock);
+ vb2_dvb_unregister_bus(f);
+ return res;
+}
+EXPORT_SYMBOL(vb2_dvb_register_bus);
+
+void vb2_dvb_unregister_bus(struct vb2_dvb_frontends *f)
+{
+ vb2_dvb_dealloc_frontends(f);
+
+ dvb_unregister_adapter(&f->adapter);
+}
+EXPORT_SYMBOL(vb2_dvb_unregister_bus);
+
+struct vb2_dvb_frontend *vb2_dvb_get_frontend(
+ struct vb2_dvb_frontends *f, int id)
+{
+ struct list_head *list, *q;
+ struct vb2_dvb_frontend *fe, *ret = NULL;
+
+ mutex_lock(&f->lock);
+
+ list_for_each_safe(list, q, &f->felist) {
+ fe = list_entry(list, struct vb2_dvb_frontend, felist);
+ if (fe->id == id) {
+ ret = fe;
+ break;
+ }
+ }
+
+ mutex_unlock(&f->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(vb2_dvb_get_frontend);
+
+int vb2_dvb_find_frontend(struct vb2_dvb_frontends *f,
+ struct dvb_frontend *p)
+{
+ struct list_head *list, *q;
+ struct vb2_dvb_frontend *fe = NULL;
+ int ret = 0;
+
+ mutex_lock(&f->lock);
+
+ list_for_each_safe(list, q, &f->felist) {
+ fe = list_entry(list, struct vb2_dvb_frontend, felist);
+ if (fe->dvb.frontend == p) {
+ ret = fe->id;
+ break;
+ }
+ }
+
+ mutex_unlock(&f->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(vb2_dvb_find_frontend);
+
+struct vb2_dvb_frontend *vb2_dvb_alloc_frontend(
+ struct vb2_dvb_frontends *f, int id)
+{
+ struct vb2_dvb_frontend *fe;
+
+ fe = kzalloc(sizeof(struct vb2_dvb_frontend), GFP_KERNEL);
+ if (fe == NULL)
+ return NULL;
+
+ fe->id = id;
+ mutex_init(&fe->dvb.lock);
+
+ mutex_lock(&f->lock);
+ list_add_tail(&fe->felist, &f->felist);
+ mutex_unlock(&f->lock);
+ return fe;
+}
+EXPORT_SYMBOL(vb2_dvb_alloc_frontend);
+
+void vb2_dvb_dealloc_frontends(struct vb2_dvb_frontends *f)
+{
+ struct list_head *list, *q;
+ struct vb2_dvb_frontend *fe;
+
+ mutex_lock(&f->lock);
+ list_for_each_safe(list, q, &f->felist) {
+ fe = list_entry(list, struct vb2_dvb_frontend, felist);
+ if (fe->dvb.net.dvbdev) {
+ dvb_net_release(&fe->dvb.net);
+ fe->dvb.demux.dmx.remove_frontend(&fe->dvb.demux.dmx,
+ &fe->dvb.fe_mem);
+ fe->dvb.demux.dmx.remove_frontend(&fe->dvb.demux.dmx,
+ &fe->dvb.fe_hw);
+ dvb_dmxdev_release(&fe->dvb.dmxdev);
+ dvb_dmx_release(&fe->dvb.demux);
+ dvb_unregister_frontend(fe->dvb.frontend);
+ }
+ if (fe->dvb.frontend)
+ /* always allocated, may have been reset */
+ dvb_frontend_detach(fe->dvb.frontend);
+ list_del(list); /* remove list entry */
+ kfree(fe); /* free frontend allocation */
+ }
+ mutex_unlock(&f->lock);
+}
+EXPORT_SYMBOL(vb2_dvb_dealloc_frontends);
diff --git a/drivers/media/v4l2-core/videobuf2-vmalloc.c b/drivers/media/v4l2-core/videobuf2-vmalloc.c
index a47fd4f589a..313d9771b2b 100644
--- a/drivers/media/v4l2-core/videobuf2-vmalloc.c
+++ b/drivers/media/v4l2-core/videobuf2-vmalloc.c
@@ -35,11 +35,11 @@ struct vb2_vmalloc_buf {
static void vb2_vmalloc_put(void *buf_priv);
-static void *vb2_vmalloc_alloc(void *alloc_ctx, unsigned long size)
+static void *vb2_vmalloc_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_flags)
{
struct vb2_vmalloc_buf *buf;
- buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+ buf = kzalloc(sizeof(*buf), GFP_KERNEL | gfp_flags);
if (!buf)
return NULL;