diff options
Diffstat (limited to 'drivers/media/v4l2-core')
24 files changed, 2284 insertions, 1006 deletions
diff --git a/drivers/media/v4l2-core/Kconfig b/drivers/media/v4l2-core/Kconfig index 8c05565a240..9ca0f8d59a1 100644 --- a/drivers/media/v4l2-core/Kconfig +++ b/drivers/media/v4l2-core/Kconfig @@ -84,13 +84,6 @@ config VIDEOBUF2_DMA_SG  	select VIDEOBUF2_CORE  	select VIDEOBUF2_MEMOPS -config VIDEO_V4L2_INT_DEVICE -	tristate "V4L2 int device (DEPRECATED)" -	depends on VIDEO_V4L2 -	---help--- -	  An early framework for a hardware-independent interface for -	  image sensors and bridges etc. Currently used by omap24xxcam and -	  tcm825x drivers that should be converted to V4L2 subdev. - -	  Do not use for new developments. - +config VIDEOBUF2_DVB +	tristate +	select VIDEOBUF2_CORE diff --git a/drivers/media/v4l2-core/Makefile b/drivers/media/v4l2-core/Makefile index 1a85eee581f..63d29f27538 100644 --- a/drivers/media/v4l2-core/Makefile +++ b/drivers/media/v4l2-core/Makefile @@ -15,7 +15,6 @@ ifeq ($(CONFIG_OF),y)  endif  obj-$(CONFIG_VIDEO_V4L2) += videodev.o -obj-$(CONFIG_VIDEO_V4L2_INT_DEVICE) += v4l2-int-device.o  obj-$(CONFIG_VIDEO_V4L2) += v4l2-common.o  obj-$(CONFIG_VIDEO_V4L2) += v4l2-dv-timings.o @@ -34,6 +33,7 @@ obj-$(CONFIG_VIDEOBUF2_MEMOPS) += videobuf2-memops.o  obj-$(CONFIG_VIDEOBUF2_VMALLOC) += videobuf2-vmalloc.o  obj-$(CONFIG_VIDEOBUF2_DMA_CONTIG) += videobuf2-dma-contig.o  obj-$(CONFIG_VIDEOBUF2_DMA_SG) += videobuf2-dma-sg.o +obj-$(CONFIG_VIDEOBUF2_DVB) += videobuf2-dvb.o  ccflags-y += -I$(srctree)/drivers/media/dvb-core  ccflags-y += -I$(srctree)/drivers/media/dvb-frontends diff --git a/drivers/media/v4l2-core/tuner-core.c b/drivers/media/v4l2-core/tuner-core.c index ddc9379eb27..06c18ba16fa 100644 --- a/drivers/media/v4l2-core/tuner-core.c +++ b/drivers/media/v4l2-core/tuner-core.c @@ -43,7 +43,7 @@  #define UNSET (-1U) -#define PREFIX (t->i2c->driver->driver.name) +#define PREFIX (t->i2c->dev.driver->name)  /*   * Driver modprobe parameters @@ -247,7 +247,7 @@ static const struct analog_demod_ops tuner_analog_ops = {  /**   * set_type - Sets the tuner type for a given device   * - * @c:			i2c_client descriptoy + * @c:			i2c_client descriptor   * @type:		type of the tuner (e. g. tuner number)   * @new_mode_mask:	Indicates if tuner supports TV and/or Radio   * @new_config:		an optional parameter used by a few tuners to adjust @@ -452,7 +452,7 @@ static void set_type(struct i2c_client *c, unsigned int type,  	}  	tuner_dbg("%s %s I2C addr 0x%02x with type %d used for 0x%02x\n", -		  c->adapter->name, c->driver->driver.name, c->addr << 1, type, +		  c->adapter->name, c->dev.driver->name, c->addr << 1, type,  		  t->mode_mask);  	return; @@ -556,7 +556,7 @@ static void tuner_lookup(struct i2c_adapter *adap,  		int mode_mask;  		if (pos->i2c->adapter != adap || -		    strcmp(pos->i2c->driver->driver.name, "tuner")) +		    strcmp(pos->i2c->dev.driver->name, "tuner"))  			continue;  		mode_mask = pos->mode_mask; @@ -1301,7 +1301,6 @@ static int tuner_command(struct i2c_client *client, unsigned cmd, void *arg)  static const struct v4l2_subdev_core_ops tuner_core_ops = {  	.log_status = tuner_log_status, -	.s_std = tuner_s_std,  	.s_power = tuner_s_power,  }; @@ -1315,9 +1314,14 @@ static const struct v4l2_subdev_tuner_ops tuner_tuner_ops = {  	.s_config = tuner_s_config,  }; +static const struct v4l2_subdev_video_ops tuner_video_ops = { +	.s_std = tuner_s_std, +}; +  static const struct v4l2_subdev_ops tuner_ops = {  	.core = &tuner_core_ops,  	.tuner = &tuner_tuner_ops, +	.video = &tuner_video_ops,  };  /* diff --git a/drivers/media/v4l2-core/v4l2-async.c b/drivers/media/v4l2-core/v4l2-async.c index c85d69da35b..85a6a34128a 100644 --- a/drivers/media/v4l2-core/v4l2-async.c +++ b/drivers/media/v4l2-core/v4l2-async.c @@ -189,30 +189,53 @@ void v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier)  	struct v4l2_subdev *sd, *tmp;  	unsigned int notif_n_subdev = notifier->num_subdevs;  	unsigned int n_subdev = min(notif_n_subdev, V4L2_MAX_SUBDEVS); -	struct device *dev[n_subdev]; +	struct device **dev;  	int i = 0;  	if (!notifier->v4l2_dev)  		return; +	dev = kmalloc(n_subdev * sizeof(*dev), GFP_KERNEL); +	if (!dev) { +		dev_err(notifier->v4l2_dev->dev, +			"Failed to allocate device cache!\n"); +	} +  	mutex_lock(&list_lock);  	list_del(¬ifier->list);  	list_for_each_entry_safe(sd, tmp, ¬ifier->done, async_list) { -		dev[i] = get_device(sd->dev); +		struct device *d; + +		d = get_device(sd->dev);  		v4l2_async_cleanup(sd);  		/* If we handled USB devices, we'd have to lock the parent too */ -		device_release_driver(dev[i++]); +		device_release_driver(d);  		if (notifier->unbind)  			notifier->unbind(notifier, sd, sd->asd); + +		/* +		 * Store device at the device cache, in order to call +		 * put_device() on the final step +		 */ +		if (dev) +			dev[i++] = d; +		else +			put_device(d);  	}  	mutex_unlock(&list_lock); +	/* +	 * Call device_attach() to reprobe devices +	 * +	 * NOTE: If dev allocation fails, i is 0, and the whole loop won't be +	 * executed. +	 */  	while (i--) {  		struct device *d = dev[i]; @@ -228,6 +251,7 @@ void v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier)  		}  		put_device(d);  	} +	kfree(dev);  	notifier->v4l2_dev = NULL; diff --git a/drivers/media/v4l2-core/v4l2-clk.c b/drivers/media/v4l2-core/v4l2-clk.c index b67de8642b5..e18cc0469cf 100644 --- a/drivers/media/v4l2-core/v4l2-clk.c +++ b/drivers/media/v4l2-core/v4l2-clk.c @@ -240,3 +240,42 @@ void v4l2_clk_unregister(struct v4l2_clk *clk)  	kfree(clk);  }  EXPORT_SYMBOL(v4l2_clk_unregister); + +struct v4l2_clk_fixed { +	unsigned long rate; +	struct v4l2_clk_ops ops; +}; + +static unsigned long fixed_get_rate(struct v4l2_clk *clk) +{ +	struct v4l2_clk_fixed *priv = clk->priv; +	return priv->rate; +} + +struct v4l2_clk *__v4l2_clk_register_fixed(const char *dev_id, +		const char *id, unsigned long rate, struct module *owner) +{ +	struct v4l2_clk *clk; +	struct v4l2_clk_fixed *priv = kzalloc(sizeof(*priv), GFP_KERNEL); + +	if (!priv) +		return ERR_PTR(-ENOMEM); + +	priv->rate = rate; +	priv->ops.get_rate = fixed_get_rate; +	priv->ops.owner = owner; + +	clk = v4l2_clk_register(&priv->ops, dev_id, id, priv); +	if (IS_ERR(clk)) +		kfree(priv); + +	return clk; +} +EXPORT_SYMBOL(__v4l2_clk_register_fixed); + +void v4l2_clk_unregister_fixed(struct v4l2_clk *clk) +{ +	kfree(clk->priv); +	v4l2_clk_unregister(clk); +} +EXPORT_SYMBOL(v4l2_clk_unregister_fixed); diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c index 037d7a55aa8..433d6d77942 100644 --- a/drivers/media/v4l2-core/v4l2-common.c +++ b/drivers/media/v4l2-core/v4l2-common.c @@ -236,14 +236,14 @@ void v4l2_i2c_subdev_init(struct v4l2_subdev *sd, struct i2c_client *client,  	v4l2_subdev_init(sd, ops);  	sd->flags |= V4L2_SUBDEV_FL_IS_I2C;  	/* the owner is the same as the i2c_client's driver owner */ -	sd->owner = client->driver->driver.owner; +	sd->owner = client->dev.driver->owner;  	sd->dev = &client->dev;  	/* i2c_client and v4l2_subdev point to one another */  	v4l2_set_subdevdata(sd, client);  	i2c_set_clientdata(client, sd);  	/* initialize name */  	snprintf(sd->name, sizeof(sd->name), "%s %d-%04x", -		client->driver->driver.name, i2c_adapter_id(client->adapter), +		client->dev.driver->name, i2c_adapter_id(client->adapter),  		client->addr);  }  EXPORT_SYMBOL_GPL(v4l2_i2c_subdev_init); @@ -274,11 +274,11 @@ struct v4l2_subdev *v4l2_i2c_new_subdev_board(struct v4l2_device *v4l2_dev,  	   loaded. This delay-load mechanism doesn't work if other drivers  	   want to use the i2c device, so explicitly loading the module  	   is the best alternative. */ -	if (client == NULL || client->driver == NULL) +	if (client == NULL || client->dev.driver == NULL)  		goto error;  	/* Lock the module so we can safely get the v4l2_subdev pointer */ -	if (!try_module_get(client->driver->driver.owner)) +	if (!try_module_get(client->dev.driver->owner))  		goto error;  	sd = i2c_get_clientdata(client); @@ -287,7 +287,7 @@ struct v4l2_subdev *v4l2_i2c_new_subdev_board(struct v4l2_device *v4l2_dev,  	if (v4l2_device_register_subdev(v4l2_dev, sd))  		sd = NULL;  	/* Decrease the module use count to match the first try_module_get. */ -	module_put(client->driver->driver.owner); +	module_put(client->dev.driver->owner);  error:  	/* If we have a client but no subdev, then something went wrong and diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c index 8f7a6a454a4..7e2411c3641 100644 --- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c +++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c @@ -178,6 +178,9 @@ struct v4l2_create_buffers32 {  static int __get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)  { +	if (get_user(kp->type, &up->type)) +		return -EFAULT; +  	switch (kp->type) {  	case V4L2_BUF_TYPE_VIDEO_CAPTURE:  	case V4L2_BUF_TYPE_VIDEO_OUTPUT: @@ -204,17 +207,16 @@ static int __get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __us  static int get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)  { -	if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_format32)) || -			get_user(kp->type, &up->type)) -			return -EFAULT; +	if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_format32))) +		return -EFAULT;  	return __get_v4l2_format32(kp, up);  }  static int get_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_buffers32 __user *up)  {  	if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_create_buffers32)) || -	    copy_from_user(kp, up, offsetof(struct v4l2_create_buffers32, format.fmt))) -			return -EFAULT; +	    copy_from_user(kp, up, offsetof(struct v4l2_create_buffers32, format))) +		return -EFAULT;  	return __get_v4l2_format32(&kp->format, &up->format);  } @@ -733,14 +735,14 @@ static int put_v4l2_event32(struct v4l2_event *kp, struct v4l2_event32 __user *u  		copy_to_user(&up->u, &kp->u, sizeof(kp->u)) ||  		put_user(kp->pending, &up->pending) ||  		put_user(kp->sequence, &up->sequence) || -		put_compat_timespec(&kp->timestamp, &up->timestamp) || +		compat_put_timespec(&kp->timestamp, &up->timestamp) ||  		put_user(kp->id, &up->id) ||  		copy_to_user(up->reserved, kp->reserved, 8 * sizeof(__u32)))  			return -EFAULT;  	return 0;  } -struct v4l2_subdev_edid32 { +struct v4l2_edid32 {  	__u32 pad;  	__u32 start_block;  	__u32 blocks; @@ -748,11 +750,11 @@ struct v4l2_subdev_edid32 {  	compat_caddr_t edid;  }; -static int get_v4l2_subdev_edid32(struct v4l2_subdev_edid *kp, struct v4l2_subdev_edid32 __user *up) +static int get_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up)  {  	u32 tmp; -	if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_subdev_edid32)) || +	if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_edid32)) ||  		get_user(kp->pad, &up->pad) ||  		get_user(kp->start_block, &up->start_block) ||  		get_user(kp->blocks, &up->blocks) || @@ -763,11 +765,11 @@ static int get_v4l2_subdev_edid32(struct v4l2_subdev_edid *kp, struct v4l2_subde  	return 0;  } -static int put_v4l2_subdev_edid32(struct v4l2_subdev_edid *kp, struct v4l2_subdev_edid32 __user *up) +static int put_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up)  {  	u32 tmp = (u32)((unsigned long)kp->edid); -	if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_subdev_edid32)) || +	if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_edid32)) ||  		put_user(kp->pad, &up->pad) ||  		put_user(kp->start_block, &up->start_block) ||  		put_user(kp->blocks, &up->blocks) || @@ -787,8 +789,8 @@ static int put_v4l2_subdev_edid32(struct v4l2_subdev_edid *kp, struct v4l2_subde  #define VIDIOC_DQBUF32		_IOWR('V', 17, struct v4l2_buffer32)  #define VIDIOC_ENUMSTD32	_IOWR('V', 25, struct v4l2_standard32)  #define VIDIOC_ENUMINPUT32	_IOWR('V', 26, struct v4l2_input32) -#define VIDIOC_SUBDEV_G_EDID32	_IOWR('V', 63, struct v4l2_subdev_edid32) -#define VIDIOC_SUBDEV_S_EDID32	_IOWR('V', 64, struct v4l2_subdev_edid32) +#define VIDIOC_G_EDID32		_IOWR('V', 40, struct v4l2_edid32) +#define VIDIOC_S_EDID32		_IOWR('V', 41, struct v4l2_edid32)  #define VIDIOC_TRY_FMT32      	_IOWR('V', 64, struct v4l2_format32)  #define VIDIOC_G_EXT_CTRLS32    _IOWR('V', 71, struct v4l2_ext_controls32)  #define VIDIOC_S_EXT_CTRLS32    _IOWR('V', 72, struct v4l2_ext_controls32) @@ -816,7 +818,7 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar  		struct v4l2_ext_controls v2ecs;  		struct v4l2_event v2ev;  		struct v4l2_create_buffers v2crt; -		struct v4l2_subdev_edid v2edid; +		struct v4l2_edid v2edid;  		unsigned long vx;  		int vi;  	} karg; @@ -849,8 +851,8 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar  	case VIDIOC_S_OUTPUT32: cmd = VIDIOC_S_OUTPUT; break;  	case VIDIOC_CREATE_BUFS32: cmd = VIDIOC_CREATE_BUFS; break;  	case VIDIOC_PREPARE_BUF32: cmd = VIDIOC_PREPARE_BUF; break; -	case VIDIOC_SUBDEV_G_EDID32: cmd = VIDIOC_SUBDEV_G_EDID; break; -	case VIDIOC_SUBDEV_S_EDID32: cmd = VIDIOC_SUBDEV_S_EDID; break; +	case VIDIOC_G_EDID32: cmd = VIDIOC_G_EDID; break; +	case VIDIOC_S_EDID32: cmd = VIDIOC_S_EDID; break;  	}  	switch (cmd) { @@ -868,9 +870,9 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar  		compatible_arg = 0;  		break; -	case VIDIOC_SUBDEV_G_EDID: -	case VIDIOC_SUBDEV_S_EDID: -		err = get_v4l2_subdev_edid32(&karg.v2edid, up); +	case VIDIOC_G_EDID: +	case VIDIOC_S_EDID: +		err = get_v4l2_edid32(&karg.v2edid, up);  		compatible_arg = 0;  		break; @@ -966,9 +968,9 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar  		err = put_v4l2_event32(&karg.v2ev, up);  		break; -	case VIDIOC_SUBDEV_G_EDID: -	case VIDIOC_SUBDEV_S_EDID: -		err = put_v4l2_subdev_edid32(&karg.v2edid, up); +	case VIDIOC_G_EDID: +	case VIDIOC_S_EDID: +		err = put_v4l2_edid32(&karg.v2edid, up);  		break;  	case VIDIOC_G_FMT: @@ -1006,103 +1008,14 @@ long v4l2_compat_ioctl32(struct file *file, unsigned int cmd, unsigned long arg)  	if (!file->f_op->unlocked_ioctl)  		return ret; -	switch (cmd) { -	case VIDIOC_QUERYCAP: -	case VIDIOC_RESERVED: -	case VIDIOC_ENUM_FMT: -	case VIDIOC_G_FMT32: -	case VIDIOC_S_FMT32: -	case VIDIOC_REQBUFS: -	case VIDIOC_QUERYBUF32: -	case VIDIOC_G_FBUF32: -	case VIDIOC_S_FBUF32: -	case VIDIOC_OVERLAY32: -	case VIDIOC_QBUF32: -	case VIDIOC_EXPBUF: -	case VIDIOC_DQBUF32: -	case VIDIOC_STREAMON32: -	case VIDIOC_STREAMOFF32: -	case VIDIOC_G_PARM: -	case VIDIOC_S_PARM: -	case VIDIOC_G_STD: -	case VIDIOC_S_STD: -	case VIDIOC_ENUMSTD32: -	case VIDIOC_ENUMINPUT32: -	case VIDIOC_G_CTRL: -	case VIDIOC_S_CTRL: -	case VIDIOC_G_TUNER: -	case VIDIOC_S_TUNER: -	case VIDIOC_G_AUDIO: -	case VIDIOC_S_AUDIO: -	case VIDIOC_QUERYCTRL: -	case VIDIOC_QUERYMENU: -	case VIDIOC_G_INPUT32: -	case VIDIOC_S_INPUT32: -	case VIDIOC_G_OUTPUT32: -	case VIDIOC_S_OUTPUT32: -	case VIDIOC_ENUMOUTPUT: -	case VIDIOC_G_AUDOUT: -	case VIDIOC_S_AUDOUT: -	case VIDIOC_G_MODULATOR: -	case VIDIOC_S_MODULATOR: -	case VIDIOC_S_FREQUENCY: -	case VIDIOC_G_FREQUENCY: -	case VIDIOC_CROPCAP: -	case VIDIOC_G_CROP: -	case VIDIOC_S_CROP: -	case VIDIOC_G_SELECTION: -	case VIDIOC_S_SELECTION: -	case VIDIOC_G_JPEGCOMP: -	case VIDIOC_S_JPEGCOMP: -	case VIDIOC_QUERYSTD: -	case VIDIOC_TRY_FMT32: -	case VIDIOC_ENUMAUDIO: -	case VIDIOC_ENUMAUDOUT: -	case VIDIOC_G_PRIORITY: -	case VIDIOC_S_PRIORITY: -	case VIDIOC_G_SLICED_VBI_CAP: -	case VIDIOC_LOG_STATUS: -	case VIDIOC_G_EXT_CTRLS32: -	case VIDIOC_S_EXT_CTRLS32: -	case VIDIOC_TRY_EXT_CTRLS32: -	case VIDIOC_ENUM_FRAMESIZES: -	case VIDIOC_ENUM_FRAMEINTERVALS: -	case VIDIOC_G_ENC_INDEX: -	case VIDIOC_ENCODER_CMD: -	case VIDIOC_TRY_ENCODER_CMD: -	case VIDIOC_DECODER_CMD: -	case VIDIOC_TRY_DECODER_CMD: -	case VIDIOC_DBG_S_REGISTER: -	case VIDIOC_DBG_G_REGISTER: -	case VIDIOC_S_HW_FREQ_SEEK: -	case VIDIOC_S_DV_TIMINGS: -	case VIDIOC_G_DV_TIMINGS: -	case VIDIOC_DQEVENT: -	case VIDIOC_DQEVENT32: -	case VIDIOC_SUBSCRIBE_EVENT: -	case VIDIOC_UNSUBSCRIBE_EVENT: -	case VIDIOC_CREATE_BUFS32: -	case VIDIOC_PREPARE_BUF32: -	case VIDIOC_ENUM_DV_TIMINGS: -	case VIDIOC_QUERY_DV_TIMINGS: -	case VIDIOC_DV_TIMINGS_CAP: -	case VIDIOC_ENUM_FREQ_BANDS: -	case VIDIOC_SUBDEV_G_EDID32: -	case VIDIOC_SUBDEV_S_EDID32: +	if (_IOC_TYPE(cmd) == 'V' && _IOC_NR(cmd) < BASE_VIDIOC_PRIVATE)  		ret = do_video_ioctl(file, cmd, arg); -		break; +	else if (vdev->fops->compat_ioctl32) +		ret = vdev->fops->compat_ioctl32(file, cmd, arg); -	default: -		if (vdev->fops->compat_ioctl32) -			ret = vdev->fops->compat_ioctl32(file, cmd, arg); - -		if (ret == -ENOIOCTLCMD) -			printk(KERN_WARNING "compat_ioctl32: " -				"unknown ioctl '%c', dir=%d, #%d (0x%08x)\n", -				_IOC_TYPE(cmd), _IOC_DIR(cmd), _IOC_NR(cmd), -				cmd); -		break; -	} +	if (ret == -ENOIOCTLCMD) +		pr_warn("compat_ioctl32: unknown ioctl '%c', dir=%d, #%d (0x%08x)\n", +			_IOC_TYPE(cmd), _IOC_DIR(cmd), _IOC_NR(cmd), cmd);  	return ret;  }  EXPORT_SYMBOL_GPL(v4l2_compat_ioctl32); diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c index c3f08038868..55c68325410 100644 --- a/drivers/media/v4l2-core/v4l2-ctrls.c +++ b/drivers/media/v4l2-core/v4l2-ctrls.c @@ -420,7 +420,7 @@ const char * const *v4l2_ctrl_get_menu(u32 id)  		"Advanced Simple",  		"Core",  		"Simple Scalable", -		"Advanced Coding Efficency", +		"Advanced Coding Efficiency",  		NULL,  	}; @@ -565,13 +565,13 @@ EXPORT_SYMBOL(v4l2_ctrl_get_menu);   * Returns NULL or an s64 type array containing the menu for given   * control ID. The total number of the menu items is returned in @len.   */ -const s64 const *v4l2_ctrl_get_int_menu(u32 id, u32 *len) +const s64 *v4l2_ctrl_get_int_menu(u32 id, u32 *len)  { -	static const s64 const qmenu_int_vpx_num_partitions[] = { +	static const s64 qmenu_int_vpx_num_partitions[] = {  		1, 2, 4, 8,  	}; -	static const s64 const qmenu_int_vpx_num_ref_frames[] = { +	static const s64 qmenu_int_vpx_num_ref_frames[] = {  		1, 2, 3,  	}; @@ -583,7 +583,7 @@ const s64 const *v4l2_ctrl_get_int_menu(u32 id, u32 *len)  	default:  		*len = 0;  		return NULL; -	}; +	}  }  EXPORT_SYMBOL(v4l2_ctrl_get_int_menu); @@ -735,6 +735,8 @@ const char *v4l2_ctrl_get_name(u32 id)  	case V4L2_CID_MPEG_VIDEO_DEC_PTS:			return "Video Decoder PTS";  	case V4L2_CID_MPEG_VIDEO_DEC_FRAME:			return "Video Decoder Frame Count";  	case V4L2_CID_MPEG_VIDEO_VBV_DELAY:			return "Initial Delay for VBV Control"; +	case V4L2_CID_MPEG_VIDEO_MV_H_SEARCH_RANGE:		return "Horizontal MV Search Range"; +	case V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE:		return "Vertical MV Search Range";  	case V4L2_CID_MPEG_VIDEO_REPEAT_SEQ_HEADER:		return "Repeat Sequence Header";  	/* VPX controls */ @@ -745,6 +747,11 @@ const char *v4l2_ctrl_get_name(u32 id)  	case V4L2_CID_MPEG_VIDEO_VPX_FILTER_SHARPNESS:		return "VPX Deblocking Effect Control";  	case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_REF_PERIOD:	return "VPX Golden Frame Refresh Period";  	case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL:		return "VPX Golden Frame Indicator"; +	case V4L2_CID_MPEG_VIDEO_VPX_MIN_QP:			return "VPX Minimum QP Value"; +	case V4L2_CID_MPEG_VIDEO_VPX_MAX_QP:			return "VPX Maximum QP Value"; +	case V4L2_CID_MPEG_VIDEO_VPX_I_FRAME_QP:		return "VPX I-Frame QP Value"; +	case V4L2_CID_MPEG_VIDEO_VPX_P_FRAME_QP:		return "VPX P-Frame QP Value"; +	case V4L2_CID_MPEG_VIDEO_VPX_PROFILE:			return "VPX Profile";  	/* CAMERA controls */  	/* Keep the order of the 'case's the same as in videodev2.h! */ @@ -852,6 +859,17 @@ const char *v4l2_ctrl_get_name(u32 id)  	case V4L2_CID_FM_RX_CLASS:		return "FM Radio Receiver Controls";  	case V4L2_CID_TUNE_DEEMPHASIS:		return "De-Emphasis";  	case V4L2_CID_RDS_RECEPTION:		return "RDS Reception"; + +	case V4L2_CID_RF_TUNER_CLASS:		return "RF Tuner Controls"; +	case V4L2_CID_RF_TUNER_LNA_GAIN_AUTO:	return "LNA Gain, Auto"; +	case V4L2_CID_RF_TUNER_LNA_GAIN:	return "LNA Gain"; +	case V4L2_CID_RF_TUNER_MIXER_GAIN_AUTO:	return "Mixer Gain, Auto"; +	case V4L2_CID_RF_TUNER_MIXER_GAIN:	return "Mixer Gain"; +	case V4L2_CID_RF_TUNER_IF_GAIN_AUTO:	return "IF Gain, Auto"; +	case V4L2_CID_RF_TUNER_IF_GAIN:		return "IF Gain"; +	case V4L2_CID_RF_TUNER_BANDWIDTH_AUTO:	return "Bandwidth, Auto"; +	case V4L2_CID_RF_TUNER_BANDWIDTH:	return "Bandwidth"; +	case V4L2_CID_RF_TUNER_PLL_LOCK:	return "PLL Lock";  	default:  		return NULL;  	} @@ -901,10 +919,19 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,  	case V4L2_CID_WIDE_DYNAMIC_RANGE:  	case V4L2_CID_IMAGE_STABILIZATION:  	case V4L2_CID_RDS_RECEPTION: +	case V4L2_CID_RF_TUNER_LNA_GAIN_AUTO: +	case V4L2_CID_RF_TUNER_MIXER_GAIN_AUTO: +	case V4L2_CID_RF_TUNER_IF_GAIN_AUTO: +	case V4L2_CID_RF_TUNER_BANDWIDTH_AUTO: +	case V4L2_CID_RF_TUNER_PLL_LOCK:  		*type = V4L2_CTRL_TYPE_BOOLEAN;  		*min = 0;  		*max = *step = 1;  		break; +	case V4L2_CID_MPEG_VIDEO_MV_H_SEARCH_RANGE: +	case V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE: +		*type = V4L2_CTRL_TYPE_INTEGER; +		break;  	case V4L2_CID_PAN_RESET:  	case V4L2_CID_TILT_RESET:  	case V4L2_CID_FLASH_STROBE: @@ -986,6 +1013,7 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,  	case V4L2_CID_IMAGE_PROC_CLASS:  	case V4L2_CID_DV_CLASS:  	case V4L2_CID_FM_RX_CLASS: +	case V4L2_CID_RF_TUNER_CLASS:  		*type = V4L2_CTRL_TYPE_CTRL_CLASS;  		/* You can neither read not write these */  		*flags |= V4L2_CTRL_FLAG_READ_ONLY | V4L2_CTRL_FLAG_WRITE_ONLY; @@ -1058,6 +1086,10 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,  	case V4L2_CID_PILOT_TONE_FREQUENCY:  	case V4L2_CID_TUNE_POWER_LEVEL:  	case V4L2_CID_TUNE_ANTENNA_CAPACITOR: +	case V4L2_CID_RF_TUNER_LNA_GAIN: +	case V4L2_CID_RF_TUNER_MIXER_GAIN: +	case V4L2_CID_RF_TUNER_IF_GAIN: +	case V4L2_CID_RF_TUNER_BANDWIDTH:  		*flags |= V4L2_CTRL_FLAG_SLIDER;  		break;  	case V4L2_CID_PAN_RELATIVE: @@ -1076,6 +1108,9 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,  	case V4L2_CID_DV_RX_POWER_PRESENT:  		*flags |= V4L2_CTRL_FLAG_READ_ONLY;  		break; +	case V4L2_CID_RF_TUNER_PLL_LOCK: +		*flags |= V4L2_CTRL_FLAG_VOLATILE; +		break;  	}  }  EXPORT_SYMBOL(v4l2_ctrl_fill); @@ -1916,7 +1951,8 @@ void v4l2_ctrl_cluster(unsigned ncontrols, struct v4l2_ctrl **controls)  	int i;  	/* The first control is the master control and it must not be NULL */ -	BUG_ON(ncontrols == 0 || controls[0] == NULL); +	if (WARN_ON(ncontrols == 0 || controls[0] == NULL)) +		return;  	for (i = 0; i < ncontrols; i++) {  		if (controls[i]) { diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c index b5aaaac427a..634d863c05b 100644 --- a/drivers/media/v4l2-core/v4l2-dev.c +++ b/drivers/media/v4l2-core/v4l2-dev.c @@ -554,6 +554,7 @@ static void determine_valid_ioctls(struct video_device *vdev)  	bool is_vid = vdev->vfl_type == VFL_TYPE_GRABBER;  	bool is_vbi = vdev->vfl_type == VFL_TYPE_VBI;  	bool is_radio = vdev->vfl_type == VFL_TYPE_RADIO; +	bool is_sdr = vdev->vfl_type == VFL_TYPE_SDR;  	bool is_rx = vdev->vfl_dir != VFL_DIR_TX;  	bool is_tx = vdev->vfl_dir != VFL_DIR_RX; @@ -662,9 +663,20 @@ static void determine_valid_ioctls(struct video_device *vdev)  			       ops->vidioc_try_fmt_sliced_vbi_out)))  			set_bit(_IOC_NR(VIDIOC_TRY_FMT), valid_ioctls);  		SET_VALID_IOCTL(ops, VIDIOC_G_SLICED_VBI_CAP, vidioc_g_sliced_vbi_cap); +	} else if (is_sdr) { +		/* SDR specific ioctls */ +		if (ops->vidioc_enum_fmt_sdr_cap) +			set_bit(_IOC_NR(VIDIOC_ENUM_FMT), valid_ioctls); +		if (ops->vidioc_g_fmt_sdr_cap) +			set_bit(_IOC_NR(VIDIOC_G_FMT), valid_ioctls); +		if (ops->vidioc_s_fmt_sdr_cap) +			set_bit(_IOC_NR(VIDIOC_S_FMT), valid_ioctls); +		if (ops->vidioc_try_fmt_sdr_cap) +			set_bit(_IOC_NR(VIDIOC_TRY_FMT), valid_ioctls);  	} -	if (!is_radio) { -		/* ioctls valid for video or vbi */ + +	if (is_vid || is_vbi || is_sdr) { +		/* ioctls valid for video, vbi or sdr */  		SET_VALID_IOCTL(ops, VIDIOC_REQBUFS, vidioc_reqbufs);  		SET_VALID_IOCTL(ops, VIDIOC_QUERYBUF, vidioc_querybuf);  		SET_VALID_IOCTL(ops, VIDIOC_QBUF, vidioc_qbuf); @@ -672,6 +684,10 @@ static void determine_valid_ioctls(struct video_device *vdev)  		SET_VALID_IOCTL(ops, VIDIOC_DQBUF, vidioc_dqbuf);  		SET_VALID_IOCTL(ops, VIDIOC_CREATE_BUFS, vidioc_create_bufs);  		SET_VALID_IOCTL(ops, VIDIOC_PREPARE_BUF, vidioc_prepare_buf); +	} + +	if (is_vid || is_vbi) { +		/* ioctls valid for video or vbi */  		if (ops->vidioc_s_std)  			set_bit(_IOC_NR(VIDIOC_ENUMSTD), valid_ioctls);  		SET_VALID_IOCTL(ops, VIDIOC_S_STD, vidioc_s_std); @@ -685,6 +701,7 @@ static void determine_valid_ioctls(struct video_device *vdev)  			SET_VALID_IOCTL(ops, VIDIOC_G_AUDIO, vidioc_g_audio);  			SET_VALID_IOCTL(ops, VIDIOC_S_AUDIO, vidioc_s_audio);  			SET_VALID_IOCTL(ops, VIDIOC_QUERY_DV_TIMINGS, vidioc_query_dv_timings); +			SET_VALID_IOCTL(ops, VIDIOC_S_EDID, vidioc_s_edid);  		}  		if (is_tx) {  			SET_VALID_IOCTL(ops, VIDIOC_ENUMOUTPUT, vidioc_enum_output); @@ -710,9 +727,10 @@ static void determine_valid_ioctls(struct video_device *vdev)  		SET_VALID_IOCTL(ops, VIDIOC_G_DV_TIMINGS, vidioc_g_dv_timings);  		SET_VALID_IOCTL(ops, VIDIOC_ENUM_DV_TIMINGS, vidioc_enum_dv_timings);  		SET_VALID_IOCTL(ops, VIDIOC_DV_TIMINGS_CAP, vidioc_dv_timings_cap); +		SET_VALID_IOCTL(ops, VIDIOC_G_EDID, vidioc_g_edid);  	} -	if (is_tx) { -		/* transmitter only ioctls */ +	if (is_tx && (is_radio || is_sdr)) { +		/* radio transmitter only ioctls */  		SET_VALID_IOCTL(ops, VIDIOC_G_MODULATOR, vidioc_g_modulator);  		SET_VALID_IOCTL(ops, VIDIOC_S_MODULATOR, vidioc_s_modulator);  	} @@ -758,6 +776,8 @@ static void determine_valid_ioctls(struct video_device *vdev)   *	%VFL_TYPE_RADIO - A radio card   *   *	%VFL_TYPE_SUBDEV - A subdevice + * + *	%VFL_TYPE_SDR - Software Defined Radio   */  int __video_register_device(struct video_device *vdev, int type, int nr,  		int warn_if_nr_in_use, struct module *owner) @@ -797,6 +817,10 @@ int __video_register_device(struct video_device *vdev, int type, int nr,  	case VFL_TYPE_SUBDEV:  		name_base = "v4l-subdev";  		break; +	case VFL_TYPE_SDR: +		/* Use device name 'swradio' because 'sdr' was already taken. */ +		name_base = "swradio"; +		break;  	default:  		printk(KERN_ERR "%s called with unknown type: %d\n",  		       __func__, type); @@ -872,8 +896,8 @@ int __video_register_device(struct video_device *vdev, int type, int nr,  	/* Should not happen since we thought this minor was free */  	WARN_ON(video_device[vdev->minor] != NULL); -	video_device[vdev->minor] = vdev;  	vdev->index = get_index(vdev); +	video_device[vdev->minor] = vdev;  	mutex_unlock(&videodev_lock);  	if (vdev->ioctl_ops) diff --git a/drivers/media/v4l2-core/v4l2-device.c b/drivers/media/v4l2-core/v4l2-device.c index 02d1b632711..015f92aab44 100644 --- a/drivers/media/v4l2-core/v4l2-device.c +++ b/drivers/media/v4l2-core/v4l2-device.c @@ -158,7 +158,17 @@ int v4l2_device_register_subdev(struct v4l2_device *v4l2_dev,  	/* Warn if we apparently re-register a subdev */  	WARN_ON(sd->v4l2_dev != NULL); -	if (!try_module_get(sd->owner)) +	/* +	 * The reason to acquire the module here is to avoid unloading +	 * a module of sub-device which is registered to a media +	 * device. To make it possible to unload modules for media +	 * devices that also register sub-devices, do not +	 * try_module_get() such sub-device owners. +	 */ +	sd->owner_v4l2_dev = v4l2_dev->dev && v4l2_dev->dev->driver && +		sd->owner == v4l2_dev->dev->driver->owner; + +	if (!sd->owner_v4l2_dev && !try_module_get(sd->owner))  		return -ENODEV;  	sd->v4l2_dev = v4l2_dev; @@ -192,7 +202,8 @@ error_unregister:  	if (sd->internal_ops && sd->internal_ops->unregistered)  		sd->internal_ops->unregistered(sd);  error_module: -	module_put(sd->owner); +	if (!sd->owner_v4l2_dev) +		module_put(sd->owner);  	sd->v4l2_dev = NULL;  	return err;  } @@ -280,6 +291,7 @@ void v4l2_device_unregister_subdev(struct v4l2_subdev *sd)  	}  #endif  	video_unregister_device(sd->devnode); -	module_put(sd->owner); +	if (!sd->owner_v4l2_dev) +		module_put(sd->owner);  }  EXPORT_SYMBOL_GPL(v4l2_device_unregister_subdev); diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c b/drivers/media/v4l2-core/v4l2-dv-timings.c index ee52b9f4a94..ce1c9f5d9de 100644 --- a/drivers/media/v4l2-core/v4l2-dv-timings.c +++ b/drivers/media/v4l2-core/v4l2-dv-timings.c @@ -26,6 +26,10 @@  #include <linux/v4l2-dv-timings.h>  #include <media/v4l2-dv-timings.h> +MODULE_AUTHOR("Hans Verkuil"); +MODULE_DESCRIPTION("V4L2 DV Timings Helper Functions"); +MODULE_LICENSE("GPL"); +  const struct v4l2_dv_timings v4l2_dv_timings_presets[] = {  	V4L2_DV_BT_CEA_640X480P59_94,  	V4L2_DV_BT_CEA_720X480I59_94, @@ -127,6 +131,17 @@ const struct v4l2_dv_timings v4l2_dv_timings_presets[] = {  	V4L2_DV_BT_DMT_2560X1600P75,  	V4L2_DV_BT_DMT_2560X1600P85,  	V4L2_DV_BT_DMT_2560X1600P120_RB, +	V4L2_DV_BT_CEA_3840X2160P24, +	V4L2_DV_BT_CEA_3840X2160P25, +	V4L2_DV_BT_CEA_3840X2160P30, +	V4L2_DV_BT_CEA_3840X2160P50, +	V4L2_DV_BT_CEA_3840X2160P60, +	V4L2_DV_BT_CEA_4096X2160P24, +	V4L2_DV_BT_CEA_4096X2160P25, +	V4L2_DV_BT_CEA_4096X2160P30, +	V4L2_DV_BT_CEA_4096X2160P50, +	V4L2_DV_BT_DMT_4096X2160P59_94_RB, +	V4L2_DV_BT_CEA_4096X2160P60,  	{ }  };  EXPORT_SYMBOL_GPL(v4l2_dv_timings_presets); @@ -324,6 +339,10 @@ EXPORT_SYMBOL_GPL(v4l2_print_dv_timings);   * This function will attempt to detect if the given values correspond to a   * valid CVT format. If so, then it will return true, and fmt will be filled   * in with the found CVT timings. + * + * TODO: VESA defined a new version 2 of their reduced blanking + * formula. Support for that is currently missing in this CVT + * detection function.   */  bool v4l2_detect_cvt(unsigned frame_height, unsigned hfreq, unsigned vsync,  		u32 polarities, struct v4l2_dv_timings *fmt) @@ -515,6 +534,7 @@ bool v4l2_detect_gtf(unsigned frame_height,  		aspect.denominator = 9;  	}  	image_width = ((image_height * aspect.numerator) / aspect.denominator); +	image_width = (image_width + GTF_CELL_GRAN/2) & ~(GTF_CELL_GRAN - 1);  	/* Horizontal */  	if (default_gtf) @@ -590,10 +610,10 @@ struct v4l2_fract v4l2_calc_aspect_ratio(u8 hor_landscape, u8 vert_portrait)  		aspect.denominator = 9;  	} else if (ratio == 34) {  		aspect.numerator = 4; -		aspect.numerator = 3; +		aspect.denominator = 3;  	} else if (ratio == 68) {  		aspect.numerator = 15; -		aspect.numerator = 9; +		aspect.denominator = 9;  	} else {  		aspect.numerator = hor_landscape + 99;  		aspect.denominator = 100; diff --git a/drivers/media/v4l2-core/v4l2-event.c b/drivers/media/v4l2-core/v4l2-event.c index 86dcb5483c4..8761aab99de 100644 --- a/drivers/media/v4l2-core/v4l2-event.c +++ b/drivers/media/v4l2-core/v4l2-event.c @@ -318,3 +318,39 @@ int v4l2_event_subdev_unsubscribe(struct v4l2_subdev *sd, struct v4l2_fh *fh,  	return v4l2_event_unsubscribe(fh, sub);  }  EXPORT_SYMBOL_GPL(v4l2_event_subdev_unsubscribe); + +static void v4l2_event_src_replace(struct v4l2_event *old, +				const struct v4l2_event *new) +{ +	u32 old_changes = old->u.src_change.changes; + +	old->u.src_change = new->u.src_change; +	old->u.src_change.changes |= old_changes; +} + +static void v4l2_event_src_merge(const struct v4l2_event *old, +				struct v4l2_event *new) +{ +	new->u.src_change.changes |= old->u.src_change.changes; +} + +static const struct v4l2_subscribed_event_ops v4l2_event_src_ch_ops = { +	.replace = v4l2_event_src_replace, +	.merge = v4l2_event_src_merge, +}; + +int v4l2_src_change_event_subscribe(struct v4l2_fh *fh, +				const struct v4l2_event_subscription *sub) +{ +	if (sub->type == V4L2_EVENT_SOURCE_CHANGE) +		return v4l2_event_subscribe(fh, sub, 0, &v4l2_event_src_ch_ops); +	return -EINVAL; +} +EXPORT_SYMBOL_GPL(v4l2_src_change_event_subscribe); + +int v4l2_src_change_event_subdev_subscribe(struct v4l2_subdev *sd, +		struct v4l2_fh *fh, struct v4l2_event_subscription *sub) +{ +	return v4l2_src_change_event_subscribe(fh, sub); +} +EXPORT_SYMBOL_GPL(v4l2_src_change_event_subdev_subscribe); diff --git a/drivers/media/v4l2-core/v4l2-int-device.c b/drivers/media/v4l2-core/v4l2-int-device.c deleted file mode 100644 index f4473494af7..00000000000 --- a/drivers/media/v4l2-core/v4l2-int-device.c +++ /dev/null @@ -1,164 +0,0 @@ -/* - * drivers/media/video/v4l2-int-device.c - * - * V4L2 internal ioctl interface. - * - * Copyright (C) 2007 Nokia Corporation. - * - * Contact: Sakari Ailus <sakari.ailus@nokia.com> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA - * 02110-1301 USA - */ - -#include <linux/kernel.h> -#include <linux/list.h> -#include <linux/sort.h> -#include <linux/string.h> -#include <linux/module.h> - -#include <media/v4l2-int-device.h> - -static DEFINE_MUTEX(mutex); -static LIST_HEAD(int_list); - -void v4l2_int_device_try_attach_all(void) -{ -	struct v4l2_int_device *m, *s; - -	list_for_each_entry(m, &int_list, head) { -		if (m->type != v4l2_int_type_master) -			continue; - -		list_for_each_entry(s, &int_list, head) { -			if (s->type != v4l2_int_type_slave) -				continue; - -			/* Slave is connected? */ -			if (s->u.slave->master) -				continue; - -			/* Slave wants to attach to master? */ -			if (s->u.slave->attach_to[0] != 0 -			    && strncmp(m->name, s->u.slave->attach_to, -				       V4L2NAMESIZE)) -				continue; - -			if (!try_module_get(m->module)) -				continue; - -			s->u.slave->master = m; -			if (m->u.master->attach(s)) { -				s->u.slave->master = NULL; -				module_put(m->module); -				continue; -			} -		} -	} -} -EXPORT_SYMBOL_GPL(v4l2_int_device_try_attach_all); - -static int ioctl_sort_cmp(const void *a, const void *b) -{ -	const struct v4l2_int_ioctl_desc *d1 = a, *d2 = b; - -	if (d1->num > d2->num) -		return 1; - -	if (d1->num < d2->num) -		return -1; - -	return 0; -} - -int v4l2_int_device_register(struct v4l2_int_device *d) -{ -	if (d->type == v4l2_int_type_slave) -		sort(d->u.slave->ioctls, d->u.slave->num_ioctls, -		     sizeof(struct v4l2_int_ioctl_desc), -		     &ioctl_sort_cmp, NULL); -	mutex_lock(&mutex); -	list_add(&d->head, &int_list); -	v4l2_int_device_try_attach_all(); -	mutex_unlock(&mutex); - -	return 0; -} -EXPORT_SYMBOL_GPL(v4l2_int_device_register); - -void v4l2_int_device_unregister(struct v4l2_int_device *d) -{ -	mutex_lock(&mutex); -	list_del(&d->head); -	if (d->type == v4l2_int_type_slave -	    && d->u.slave->master != NULL) { -		d->u.slave->master->u.master->detach(d); -		module_put(d->u.slave->master->module); -		d->u.slave->master = NULL; -	} -	mutex_unlock(&mutex); -} -EXPORT_SYMBOL_GPL(v4l2_int_device_unregister); - -/* Adapted from search_extable in extable.c. */ -static v4l2_int_ioctl_func *find_ioctl(struct v4l2_int_slave *slave, int cmd, -				       v4l2_int_ioctl_func *no_such_ioctl) -{ -	const struct v4l2_int_ioctl_desc *first = slave->ioctls; -	const struct v4l2_int_ioctl_desc *last = -		first + slave->num_ioctls - 1; - -	while (first <= last) { -		const struct v4l2_int_ioctl_desc *mid; - -		mid = (last - first) / 2 + first; - -		if (mid->num < cmd) -			first = mid + 1; -		else if (mid->num > cmd) -			last = mid - 1; -		else -			return mid->func; -	} - -	return no_such_ioctl; -} - -static int no_such_ioctl_0(struct v4l2_int_device *d) -{ -	return -ENOIOCTLCMD; -} - -int v4l2_int_ioctl_0(struct v4l2_int_device *d, int cmd) -{ -	return ((v4l2_int_ioctl_func_0 *) -		find_ioctl(d->u.slave, cmd, -			   (v4l2_int_ioctl_func *)no_such_ioctl_0))(d); -} -EXPORT_SYMBOL_GPL(v4l2_int_ioctl_0); - -static int no_such_ioctl_1(struct v4l2_int_device *d, void *arg) -{ -	return -ENOIOCTLCMD; -} - -int v4l2_int_ioctl_1(struct v4l2_int_device *d, int cmd, void *arg) -{ -	return ((v4l2_int_ioctl_func_1 *) -		find_ioctl(d->u.slave, cmd, -			   (v4l2_int_ioctl_func *)no_such_ioctl_1))(d, arg); -} -EXPORT_SYMBOL_GPL(v4l2_int_ioctl_1); - -MODULE_LICENSE("GPL"); diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c index 68e6b5e912f..16bffd851bf 100644 --- a/drivers/media/v4l2-core/v4l2-ioctl.c +++ b/drivers/media/v4l2-core/v4l2-ioctl.c @@ -28,6 +28,9 @@  #include <media/v4l2-device.h>  #include <media/videobuf2-core.h> +#define CREATE_TRACE_POINTS +#include <trace/events/v4l2.h> +  /* Zero out the end of the struct pointed to by p.  Everything after, but   * not including, the specified field is cleared. */  #define CLEAR_AFTER_FIELD(p, field) \ @@ -149,6 +152,7 @@ const char *v4l2_type_names[] = {  	[V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY] = "vid-out-overlay",  	[V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE] = "vid-cap-mplane",  	[V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE] = "vid-out-mplane", +	[V4L2_BUF_TYPE_SDR_CAPTURE]        = "sdr-cap",  };  EXPORT_SYMBOL(v4l2_type_names); @@ -242,6 +246,7 @@ static void v4l_print_format(const void *arg, bool write_only)  	const struct v4l2_vbi_format *vbi;  	const struct v4l2_sliced_vbi_format *sliced;  	const struct v4l2_window *win; +	const struct v4l2_sdr_format *sdr;  	unsigned i;  	pr_cont("type=%s", prt_names(p->type, v4l2_type_names)); @@ -315,6 +320,14 @@ static void v4l_print_format(const void *arg, bool write_only)  				sliced->service_lines[0][i],  				sliced->service_lines[1][i]);  		break; +	case V4L2_BUF_TYPE_SDR_CAPTURE: +		sdr = &p->fmt.sdr; +		pr_cont(", pixelformat=%c%c%c%c\n", +			(sdr->pixelformat >>  0) & 0xff, +			(sdr->pixelformat >>  8) & 0xff, +			(sdr->pixelformat >> 16) & 0xff, +			(sdr->pixelformat >> 24) & 0xff); +		break;  	}  } @@ -549,7 +562,7 @@ static void v4l_print_cropcap(const void *arg, bool write_only)  	const struct v4l2_cropcap *p = arg;  	pr_cont("type=%s, bounds wxh=%dx%d, x,y=%d,%d, " -		"defrect wxh=%dx%d, x,y=%d,%d\n, " +		"defrect wxh=%dx%d, x,y=%d,%d, "  		"pixelaspect %d/%d\n",  		prt_names(p->type, v4l2_type_names),  		p->bounds.width, p->bounds.height, @@ -831,6 +844,14 @@ static void v4l_print_freq_band(const void *arg, bool write_only)  			p->rangehigh, p->modulation);  } +static void v4l_print_edid(const void *arg, bool write_only) +{ +	const struct v4l2_edid *p = arg; + +	pr_cont("pad=%u, start_block=%u, blocks=%u\n", +		p->pad, p->start_block, p->blocks); +} +  static void v4l_print_u32(const void *arg, bool write_only)  {  	pr_cont("value=%u\n", *(const u32 *)arg); @@ -878,6 +899,7 @@ static int check_fmt(struct file *file, enum v4l2_buf_type type)  	const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;  	bool is_vid = vfd->vfl_type == VFL_TYPE_GRABBER;  	bool is_vbi = vfd->vfl_type == VFL_TYPE_VBI; +	bool is_sdr = vfd->vfl_type == VFL_TYPE_SDR;  	bool is_rx = vfd->vfl_dir != VFL_DIR_TX;  	bool is_tx = vfd->vfl_dir != VFL_DIR_RX; @@ -927,6 +949,10 @@ static int check_fmt(struct file *file, enum v4l2_buf_type type)  		if (is_vbi && is_tx && ops->vidioc_g_fmt_sliced_vbi_out)  			return 0;  		break; +	case V4L2_BUF_TYPE_SDR_CAPTURE: +		if (is_sdr && is_rx && ops->vidioc_g_fmt_sdr_cap) +			return 0; +		break;  	default:  		break;  	} @@ -1046,6 +1072,10 @@ static int v4l_enum_fmt(const struct v4l2_ioctl_ops *ops,  		if (unlikely(!is_tx || !ops->vidioc_enum_fmt_vid_out_mplane))  			break;  		return ops->vidioc_enum_fmt_vid_out_mplane(file, fh, arg); +	case V4L2_BUF_TYPE_SDR_CAPTURE: +		if (unlikely(!is_rx || !ops->vidioc_enum_fmt_sdr_cap)) +			break; +		return ops->vidioc_enum_fmt_sdr_cap(file, fh, arg);  	}  	return -EINVAL;  } @@ -1056,6 +1086,7 @@ static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops,  	struct v4l2_format *p = arg;  	struct video_device *vfd = video_devdata(file);  	bool is_vid = vfd->vfl_type == VFL_TYPE_GRABBER; +	bool is_sdr = vfd->vfl_type == VFL_TYPE_SDR;  	bool is_rx = vfd->vfl_dir != VFL_DIR_TX;  	bool is_tx = vfd->vfl_dir != VFL_DIR_RX; @@ -1100,6 +1131,10 @@ static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops,  		if (unlikely(!is_tx || is_vid || !ops->vidioc_g_fmt_sliced_vbi_out))  			break;  		return ops->vidioc_g_fmt_sliced_vbi_out(file, fh, arg); +	case V4L2_BUF_TYPE_SDR_CAPTURE: +		if (unlikely(!is_rx || !is_sdr || !ops->vidioc_g_fmt_sdr_cap)) +			break; +		return ops->vidioc_g_fmt_sdr_cap(file, fh, arg);  	}  	return -EINVAL;  } @@ -1110,6 +1145,7 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,  	struct v4l2_format *p = arg;  	struct video_device *vfd = video_devdata(file);  	bool is_vid = vfd->vfl_type == VFL_TYPE_GRABBER; +	bool is_sdr = vfd->vfl_type == VFL_TYPE_SDR;  	bool is_rx = vfd->vfl_dir != VFL_DIR_TX;  	bool is_tx = vfd->vfl_dir != VFL_DIR_RX; @@ -1164,6 +1200,11 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,  			break;  		CLEAR_AFTER_FIELD(p, fmt.sliced);  		return ops->vidioc_s_fmt_sliced_vbi_out(file, fh, arg); +	case V4L2_BUF_TYPE_SDR_CAPTURE: +		if (unlikely(!is_rx || !is_sdr || !ops->vidioc_s_fmt_sdr_cap)) +			break; +		CLEAR_AFTER_FIELD(p, fmt.sdr); +		return ops->vidioc_s_fmt_sdr_cap(file, fh, arg);  	}  	return -EINVAL;  } @@ -1174,6 +1215,7 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops,  	struct v4l2_format *p = arg;  	struct video_device *vfd = video_devdata(file);  	bool is_vid = vfd->vfl_type == VFL_TYPE_GRABBER; +	bool is_sdr = vfd->vfl_type == VFL_TYPE_SDR;  	bool is_rx = vfd->vfl_dir != VFL_DIR_TX;  	bool is_tx = vfd->vfl_dir != VFL_DIR_RX; @@ -1228,6 +1270,11 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops,  			break;  		CLEAR_AFTER_FIELD(p, fmt.sliced);  		return ops->vidioc_try_fmt_sliced_vbi_out(file, fh, arg); +	case V4L2_BUF_TYPE_SDR_CAPTURE: +		if (unlikely(!is_rx || !is_sdr || !ops->vidioc_try_fmt_sdr_cap)) +			break; +		CLEAR_AFTER_FIELD(p, fmt.sdr); +		return ops->vidioc_try_fmt_sdr_cap(file, fh, arg);  	}  	return -EINVAL;  } @@ -1288,8 +1335,11 @@ static int v4l_g_frequency(const struct v4l2_ioctl_ops *ops,  	struct video_device *vfd = video_devdata(file);  	struct v4l2_frequency *p = arg; -	p->type = (vfd->vfl_type == VFL_TYPE_RADIO) ? -			V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV; +	if (vfd->vfl_type == VFL_TYPE_SDR) +		p->type = V4L2_TUNER_ADC; +	else +		p->type = (vfd->vfl_type == VFL_TYPE_RADIO) ? +				V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;  	return ops->vidioc_g_frequency(file, fh, p);  } @@ -1300,10 +1350,15 @@ static int v4l_s_frequency(const struct v4l2_ioctl_ops *ops,  	const struct v4l2_frequency *p = arg;  	enum v4l2_tuner_type type; -	type = (vfd->vfl_type == VFL_TYPE_RADIO) ? -			V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV; -	if (p->type != type) -		return -EINVAL; +	if (vfd->vfl_type == VFL_TYPE_SDR) { +		if (p->type != V4L2_TUNER_ADC && p->type != V4L2_TUNER_RF) +			return -EINVAL; +	} else { +		type = (vfd->vfl_type == VFL_TYPE_RADIO) ? +				V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV; +		if (type != p->type) +			return -EINVAL; +	}  	return ops->vidioc_s_frequency(file, fh, p);  } @@ -1383,6 +1438,10 @@ static int v4l_s_hw_freq_seek(const struct v4l2_ioctl_ops *ops,  	struct v4l2_hw_freq_seek *p = arg;  	enum v4l2_tuner_type type; +	/* s_hw_freq_seek is not supported for SDR for now */ +	if (vfd->vfl_type == VFL_TYPE_SDR) +		return -EINVAL; +  	type = (vfd->vfl_type == VFL_TYPE_RADIO) ?  		V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;  	if (p->type != type) @@ -1882,11 +1941,16 @@ static int v4l_enum_freq_bands(const struct v4l2_ioctl_ops *ops,  	enum v4l2_tuner_type type;  	int err; -	type = (vfd->vfl_type == VFL_TYPE_RADIO) ? -			V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV; - -	if (type != p->type) -		return -EINVAL; +	if (vfd->vfl_type == VFL_TYPE_SDR) { +		if (p->type != V4L2_TUNER_ADC && p->type != V4L2_TUNER_RF) +			return -EINVAL; +		type = p->type; +	} else { +		type = (vfd->vfl_type == VFL_TYPE_RADIO) ? +				V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV; +		if (type != p->type) +			return -EINVAL; +	}  	if (ops->vidioc_enum_freq_bands)  		return ops->vidioc_enum_freq_bands(file, fh, p);  	if (is_valid_ioctl(vfd, VIDIOC_G_TUNER)) { @@ -2006,6 +2070,8 @@ static struct v4l2_ioctl_info v4l2_ioctls[] = {  	IOCTL_INFO_FNC(VIDIOC_QUERYMENU, v4l_querymenu, v4l_print_querymenu, INFO_FL_CTRL | INFO_FL_CLEAR(v4l2_querymenu, index)),  	IOCTL_INFO_STD(VIDIOC_G_INPUT, vidioc_g_input, v4l_print_u32, 0),  	IOCTL_INFO_FNC(VIDIOC_S_INPUT, v4l_s_input, v4l_print_u32, INFO_FL_PRIO), +	IOCTL_INFO_STD(VIDIOC_G_EDID, vidioc_g_edid, v4l_print_edid, INFO_FL_CLEAR(v4l2_edid, edid)), +	IOCTL_INFO_STD(VIDIOC_S_EDID, vidioc_s_edid, v4l_print_edid, INFO_FL_PRIO | INFO_FL_CLEAR(v4l2_edid, edid)),  	IOCTL_INFO_STD(VIDIOC_G_OUTPUT, vidioc_g_output, v4l_print_u32, 0),  	IOCTL_INFO_FNC(VIDIOC_S_OUTPUT, v4l_s_output, v4l_print_u32, INFO_FL_PRIO),  	IOCTL_INFO_FNC(VIDIOC_ENUMOUTPUT, v4l_enumoutput, v4l_print_enumoutput, INFO_FL_CLEAR(v4l2_output, index)), @@ -2194,7 +2260,7 @@ done:  }  static int check_array_args(unsigned int cmd, void *parg, size_t *array_size, -			    void * __user *user_ptr, void ***kernel_ptr) +			    void __user **user_ptr, void ***kernel_ptr)  {  	int ret = 0; @@ -2211,16 +2277,16 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,  				break;  			}  			*user_ptr = (void __user *)buf->m.planes; -			*kernel_ptr = (void *)&buf->m.planes; +			*kernel_ptr = (void **)&buf->m.planes;  			*array_size = sizeof(struct v4l2_plane) * buf->length;  			ret = 1;  		}  		break;  	} -	case VIDIOC_SUBDEV_G_EDID: -	case VIDIOC_SUBDEV_S_EDID: { -		struct v4l2_subdev_edid *edid = parg; +	case VIDIOC_G_EDID: +	case VIDIOC_S_EDID: { +		struct v4l2_edid *edid = parg;  		if (edid->blocks) {  			if (edid->blocks > 256) { @@ -2228,7 +2294,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,  				break;  			}  			*user_ptr = (void __user *)edid->edid; -			*kernel_ptr = (void *)&edid->edid; +			*kernel_ptr = (void **)&edid->edid;  			*array_size = edid->blocks * 128;  			ret = 1;  		} @@ -2246,7 +2312,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,  				break;  			}  			*user_ptr = (void __user *)ctrls->controls; -			*kernel_ptr = (void *)&ctrls->controls; +			*kernel_ptr = (void **)&ctrls->controls;  			*array_size = sizeof(struct v4l2_ext_control)  				    * ctrls->count;  			ret = 1; @@ -2338,9 +2404,15 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,  	err = func(file, cmd, parg);  	if (err == -ENOIOCTLCMD)  		err = -ENOTTY; +	if (err == 0) { +		if (cmd == VIDIOC_DQBUF) +			trace_v4l2_dqbuf(video_devdata(file)->minor, parg); +		else if (cmd == VIDIOC_QBUF) +			trace_v4l2_qbuf(video_devdata(file)->minor, parg); +	}  	if (has_array_args) { -		*kernel_ptr = user_ptr; +		*kernel_ptr = (void __force *)user_ptr;  		if (copy_to_user(user_ptr, mbuf, array_size))  			err = -EFAULT;  		goto out_array_args; diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c index 7c437128821..178ce96556c 100644 --- a/drivers/media/v4l2-core/v4l2-mem2mem.c +++ b/drivers/media/v4l2-core/v4l2-mem2mem.c @@ -41,6 +41,8 @@ module_param(debug, bool, 0644);  #define TRANS_QUEUED		(1 << 0)  /* Instance is currently running in hardware */  #define TRANS_RUNNING		(1 << 1) +/* Instance is currently aborting */ +#define TRANS_ABORT		(1 << 2)  /* Offset base for buffers on the destination queue - used to distinguish @@ -221,6 +223,14 @@ static void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)  	}  	spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job); + +	/* If the context is aborted then don't schedule it */ +	if (m2m_ctx->job_flags & TRANS_ABORT) { +		spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); +		dprintk("Aborted context\n"); +		return; +	} +  	if (m2m_ctx->job_flags & TRANS_QUEUED) {  		spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);  		dprintk("On job queue already\n"); @@ -280,6 +290,8 @@ static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx)  	m2m_dev = m2m_ctx->m2m_dev;  	spin_lock_irqsave(&m2m_dev->job_spinlock, flags); + +	m2m_ctx->job_flags |= TRANS_ABORT;  	if (m2m_ctx->job_flags & TRANS_RUNNING) {  		spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);  		m2m_dev->m2m_ops->job_abort(m2m_ctx->priv); @@ -480,13 +492,15 @@ int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,  	m2m_dev = m2m_ctx->m2m_dev;  	spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job);  	/* We should not be scheduled anymore, since we're dropping a queue. */ -	INIT_LIST_HEAD(&m2m_ctx->queue); +	if (m2m_ctx->job_flags & TRANS_QUEUED) +		list_del(&m2m_ctx->queue);  	m2m_ctx->job_flags = 0;  	spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);  	/* Drop queue, since streamoff returns device to the same state as after  	 * calling reqbufs. */  	INIT_LIST_HEAD(&q_ctx->rdy_queue); +	q_ctx->num_rdy = 0;  	spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);  	if (m2m_dev->curr_ctx == m2m_ctx) { @@ -544,6 +558,8 @@ unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,  	if (m2m_ctx->m2m_dev->m2m_ops->unlock)  		m2m_ctx->m2m_dev->m2m_ops->unlock(m2m_ctx->priv); +	else if (m2m_ctx->q_lock) +		mutex_unlock(m2m_ctx->q_lock);  	if (list_empty(&src_q->done_list))  		poll_wait(file, &src_q->done_wq, wait); @@ -552,6 +568,8 @@ unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,  	if (m2m_ctx->m2m_dev->m2m_ops->lock)  		m2m_ctx->m2m_dev->m2m_ops->lock(m2m_ctx->priv); +	else if (m2m_ctx->q_lock) +		mutex_lock(m2m_ctx->q_lock);  	spin_lock_irqsave(&src_q->done_lock, flags);  	if (!list_empty(&src_q->done_list)) @@ -679,6 +697,13 @@ struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev,  	if (ret)  		goto err; +	/* +	 * If both queues use same mutex assign it as the common buffer +	 * queues lock to the m2m context. This lock is used in the +	 * v4l2_m2m_ioctl_* helpers. +	 */ +	if (out_q_ctx->q.lock == cap_q_ctx->q.lock) +		m2m_ctx->q_lock = out_q_ctx->q.lock;  	return m2m_ctx;  err: @@ -726,3 +751,118 @@ void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, struct vb2_buffer *vb)  }  EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue); +/* Videobuf2 ioctl helpers */ + +int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv, +				struct v4l2_requestbuffers *rb) +{ +	struct v4l2_fh *fh = file->private_data; + +	return v4l2_m2m_reqbufs(file, fh->m2m_ctx, rb); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_reqbufs); + +int v4l2_m2m_ioctl_create_bufs(struct file *file, void *priv, +				struct v4l2_create_buffers *create) +{ +	struct v4l2_fh *fh = file->private_data; + +	return v4l2_m2m_create_bufs(file, fh->m2m_ctx, create); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_create_bufs); + +int v4l2_m2m_ioctl_querybuf(struct file *file, void *priv, +				struct v4l2_buffer *buf) +{ +	struct v4l2_fh *fh = file->private_data; + +	return v4l2_m2m_querybuf(file, fh->m2m_ctx, buf); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_querybuf); + +int v4l2_m2m_ioctl_qbuf(struct file *file, void *priv, +				struct v4l2_buffer *buf) +{ +	struct v4l2_fh *fh = file->private_data; + +	return v4l2_m2m_qbuf(file, fh->m2m_ctx, buf); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_qbuf); + +int v4l2_m2m_ioctl_dqbuf(struct file *file, void *priv, +				struct v4l2_buffer *buf) +{ +	struct v4l2_fh *fh = file->private_data; + +	return v4l2_m2m_dqbuf(file, fh->m2m_ctx, buf); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_dqbuf); + +int v4l2_m2m_ioctl_expbuf(struct file *file, void *priv, +				struct v4l2_exportbuffer *eb) +{ +	struct v4l2_fh *fh = file->private_data; + +	return v4l2_m2m_expbuf(file, fh->m2m_ctx, eb); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_expbuf); + +int v4l2_m2m_ioctl_streamon(struct file *file, void *priv, +				enum v4l2_buf_type type) +{ +	struct v4l2_fh *fh = file->private_data; + +	return v4l2_m2m_streamon(file, fh->m2m_ctx, type); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamon); + +int v4l2_m2m_ioctl_streamoff(struct file *file, void *priv, +				enum v4l2_buf_type type) +{ +	struct v4l2_fh *fh = file->private_data; + +	return v4l2_m2m_streamoff(file, fh->m2m_ctx, type); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamoff); + +/* + * v4l2_file_operations helpers. It is assumed here same lock is used + * for the output and the capture buffer queue. + */ + +int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma) +{ +	struct v4l2_fh *fh = file->private_data; +	struct v4l2_m2m_ctx *m2m_ctx = fh->m2m_ctx; +	int ret; + +	if (m2m_ctx->q_lock && mutex_lock_interruptible(m2m_ctx->q_lock)) +		return -ERESTARTSYS; + +	ret = v4l2_m2m_mmap(file, m2m_ctx, vma); + +	if (m2m_ctx->q_lock) +		mutex_unlock(m2m_ctx->q_lock); + +	return ret; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_fop_mmap); + +unsigned int v4l2_m2m_fop_poll(struct file *file, poll_table *wait) +{ +	struct v4l2_fh *fh = file->private_data; +	struct v4l2_m2m_ctx *m2m_ctx = fh->m2m_ctx; +	unsigned int ret; + +	if (m2m_ctx->q_lock) +		mutex_lock(m2m_ctx->q_lock); + +	ret = v4l2_m2m_poll(file, m2m_ctx, wait); + +	if (m2m_ctx->q_lock) +		mutex_unlock(m2m_ctx->q_lock); + +	return ret; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_fop_poll); + diff --git a/drivers/media/v4l2-core/v4l2-of.c b/drivers/media/v4l2-core/v4l2-of.c index a6478dca0cd..b4ed9a955fb 100644 --- a/drivers/media/v4l2-core/v4l2-of.c +++ b/drivers/media/v4l2-core/v4l2-of.c @@ -121,21 +121,15 @@ static void v4l2_of_parse_parallel_bus(const struct device_node *node,   * the bus as serial CSI-2 and clock-noncontinuous isn't set, we set the   * V4L2_MBUS_CSI2_CONTINUOUS_CLOCK flag.   * The caller should hold a reference to @node. + * + * Return: 0.   */ -void v4l2_of_parse_endpoint(const struct device_node *node, -			    struct v4l2_of_endpoint *endpoint) +int v4l2_of_parse_endpoint(const struct device_node *node, +			   struct v4l2_of_endpoint *endpoint)  { -	struct device_node *port_node = of_get_parent(node); - -	memset(endpoint, 0, offsetof(struct v4l2_of_endpoint, head)); - -	endpoint->local_node = node; -	/* -	 * It doesn't matter whether the two calls below succeed. -	 * If they don't then the default value 0 is used. -	 */ -	of_property_read_u32(port_node, "reg", &endpoint->port); -	of_property_read_u32(node, "reg", &endpoint->id); +	of_graph_parse_endpoint(node, &endpoint->base); +	endpoint->bus_type = 0; +	memset(&endpoint->bus, 0, sizeof(endpoint->bus));  	v4l2_of_parse_csi_bus(node, endpoint);  	/* @@ -145,123 +139,6 @@ void v4l2_of_parse_endpoint(const struct device_node *node,  	if (endpoint->bus.mipi_csi2.flags == 0)  		v4l2_of_parse_parallel_bus(node, endpoint); -	of_node_put(port_node); +	return 0;  }  EXPORT_SYMBOL(v4l2_of_parse_endpoint); - -/** - * v4l2_of_get_next_endpoint() - get next endpoint node - * @parent: pointer to the parent device node - * @prev: previous endpoint node, or NULL to get first - * - * Return: An 'endpoint' node pointer with refcount incremented. Refcount - * of the passed @prev node is not decremented, the caller have to use - * of_node_put() on it when done. - */ -struct device_node *v4l2_of_get_next_endpoint(const struct device_node *parent, -					struct device_node *prev) -{ -	struct device_node *endpoint; -	struct device_node *port = NULL; - -	if (!parent) -		return NULL; - -	if (!prev) { -		struct device_node *node; -		/* -		 * It's the first call, we have to find a port subnode -		 * within this node or within an optional 'ports' node. -		 */ -		node = of_get_child_by_name(parent, "ports"); -		if (node) -			parent = node; - -		port = of_get_child_by_name(parent, "port"); - -		if (port) { -			/* Found a port, get an endpoint. */ -			endpoint = of_get_next_child(port, NULL); -			of_node_put(port); -		} else { -			endpoint = NULL; -		} - -		if (!endpoint) -			pr_err("%s(): no endpoint nodes specified for %s\n", -			       __func__, parent->full_name); -		of_node_put(node); -	} else { -		port = of_get_parent(prev); -		if (!port) -			/* Hm, has someone given us the root node ?... */ -			return NULL; - -		/* Avoid dropping prev node refcount to 0. */ -		of_node_get(prev); -		endpoint = of_get_next_child(port, prev); -		if (endpoint) { -			of_node_put(port); -			return endpoint; -		} - -		/* No more endpoints under this port, try the next one. */ -		do { -			port = of_get_next_child(parent, port); -			if (!port) -				return NULL; -		} while (of_node_cmp(port->name, "port")); - -		/* Pick up the first endpoint in this port. */ -		endpoint = of_get_next_child(port, NULL); -		of_node_put(port); -	} - -	return endpoint; -} -EXPORT_SYMBOL(v4l2_of_get_next_endpoint); - -/** - * v4l2_of_get_remote_port_parent() - get remote port's parent node - * @node: pointer to a local endpoint device_node - * - * Return: Remote device node associated with remote endpoint node linked - *	   to @node. Use of_node_put() on it when done. - */ -struct device_node *v4l2_of_get_remote_port_parent( -			       const struct device_node *node) -{ -	struct device_node *np; -	unsigned int depth; - -	/* Get remote endpoint node. */ -	np = of_parse_phandle(node, "remote-endpoint", 0); - -	/* Walk 3 levels up only if there is 'ports' node. */ -	for (depth = 3; depth && np; depth--) { -		np = of_get_next_parent(np); -		if (depth == 2 && of_node_cmp(np->name, "ports")) -			break; -	} -	return np; -} -EXPORT_SYMBOL(v4l2_of_get_remote_port_parent); - -/** - * v4l2_of_get_remote_port() - get remote port node - * @node: pointer to a local endpoint device_node - * - * Return: Remote port node associated with remote endpoint node linked - *	   to @node. Use of_node_put() on it when done. - */ -struct device_node *v4l2_of_get_remote_port(const struct device_node *node) -{ -	struct device_node *np; - -	/* Get remote endpoint node. */ -	np = of_parse_phandle(node, "remote-endpoint", 0); -	if (!np) -		return NULL; -	return of_get_parent(np); -} -EXPORT_SYMBOL(v4l2_of_get_remote_port); diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c index 996c248dea4..058c1a6e839 100644 --- a/drivers/media/v4l2-core/v4l2-subdev.c +++ b/drivers/media/v4l2-core/v4l2-subdev.c @@ -305,11 +305,23 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)  					fse);  	} -	case VIDIOC_SUBDEV_G_FRAME_INTERVAL: +	case VIDIOC_SUBDEV_G_FRAME_INTERVAL: { +		struct v4l2_subdev_frame_interval *fi = arg; + +		if (fi->pad >= sd->entity.num_pads) +			return -EINVAL; +  		return v4l2_subdev_call(sd, video, g_frame_interval, arg); +	} + +	case VIDIOC_SUBDEV_S_FRAME_INTERVAL: { +		struct v4l2_subdev_frame_interval *fi = arg; + +		if (fi->pad >= sd->entity.num_pads) +			return -EINVAL; -	case VIDIOC_SUBDEV_S_FRAME_INTERVAL:  		return v4l2_subdev_call(sd, video, s_frame_interval, arg); +	}  	case VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL: {  		struct v4l2_subdev_frame_interval_enum *fie = arg; @@ -349,11 +361,54 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)  			sd, pad, set_selection, subdev_fh, sel);  	} -	case VIDIOC_SUBDEV_G_EDID: -		return v4l2_subdev_call(sd, pad, get_edid, arg); +	case VIDIOC_G_EDID: { +		struct v4l2_subdev_edid *edid = arg; + +		if (edid->pad >= sd->entity.num_pads) +			return -EINVAL; +		if (edid->blocks && edid->edid == NULL) +			return -EINVAL; + +		return v4l2_subdev_call(sd, pad, get_edid, edid); +	} + +	case VIDIOC_S_EDID: { +		struct v4l2_subdev_edid *edid = arg; + +		if (edid->pad >= sd->entity.num_pads) +			return -EINVAL; +		if (edid->blocks && edid->edid == NULL) +			return -EINVAL; + +		return v4l2_subdev_call(sd, pad, set_edid, edid); +	} + +	case VIDIOC_SUBDEV_DV_TIMINGS_CAP: { +		struct v4l2_dv_timings_cap *cap = arg; -	case VIDIOC_SUBDEV_S_EDID: -		return v4l2_subdev_call(sd, pad, set_edid, arg); +		if (cap->pad >= sd->entity.num_pads) +			return -EINVAL; + +		return v4l2_subdev_call(sd, pad, dv_timings_cap, cap); +	} + +	case VIDIOC_SUBDEV_ENUM_DV_TIMINGS: { +		struct v4l2_enum_dv_timings *dvt = arg; + +		if (dvt->pad >= sd->entity.num_pads) +			return -EINVAL; + +		return v4l2_subdev_call(sd, pad, enum_dv_timings, dvt); +	} + +	case VIDIOC_SUBDEV_QUERY_DV_TIMINGS: +		return v4l2_subdev_call(sd, video, query_dv_timings, arg); + +	case VIDIOC_SUBDEV_G_DV_TIMINGS: +		return v4l2_subdev_call(sd, video, g_dv_timings, arg); + +	case VIDIOC_SUBDEV_S_DV_TIMINGS: +		return v4l2_subdev_call(sd, video, s_dv_timings, arg);  #endif  	default:  		return v4l2_subdev_call(sd, core, ioctl, cmd, arg); @@ -368,6 +423,17 @@ static long subdev_ioctl(struct file *file, unsigned int cmd,  	return video_usercopy(file, cmd, arg, subdev_do_ioctl);  } +#ifdef CONFIG_COMPAT +static long subdev_compat_ioctl32(struct file *file, unsigned int cmd, +	unsigned long arg) +{ +	struct video_device *vdev = video_devdata(file); +	struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev); + +	return v4l2_subdev_call(sd, core, compat_ioctl32, cmd, arg); +} +#endif +  static unsigned int subdev_poll(struct file *file, poll_table *wait)  {  	struct video_device *vdev = video_devdata(file); @@ -389,6 +455,9 @@ const struct v4l2_file_operations v4l2_subdev_fops = {  	.owner = THIS_MODULE,  	.open = subdev_open,  	.unlocked_ioctl = subdev_ioctl, +#ifdef CONFIG_COMPAT +	.compat_ioctl32 = subdev_compat_ioctl32, +#endif  	.release = subdev_close,  	.poll = subdev_poll,  }; diff --git a/drivers/media/v4l2-core/videobuf-dma-contig.c b/drivers/media/v4l2-core/videobuf-dma-contig.c index 65411adcd0e..bf80f0f7dfb 100644 --- a/drivers/media/v4l2-core/videobuf-dma-contig.c +++ b/drivers/media/v4l2-core/videobuf-dma-contig.c @@ -66,14 +66,11 @@ static void __videobuf_dc_free(struct device *dev,  static void videobuf_vm_open(struct vm_area_struct *vma)  {  	struct videobuf_mapping *map = vma->vm_private_data; -	struct videobuf_queue *q = map->q; -	dev_dbg(q->dev, "vm_open %p [count=%u,vma=%08lx-%08lx]\n", +	dev_dbg(map->q->dev, "vm_open %p [count=%u,vma=%08lx-%08lx]\n",  		map, map->count, vma->vm_start, vma->vm_end); -	videobuf_queue_lock(q);  	map->count++; -	videobuf_queue_unlock(q);  }  static void videobuf_vm_close(struct vm_area_struct *vma) @@ -85,11 +82,12 @@ static void videobuf_vm_close(struct vm_area_struct *vma)  	dev_dbg(q->dev, "vm_close %p [count=%u,vma=%08lx-%08lx]\n",  		map, map->count, vma->vm_start, vma->vm_end); -	videobuf_queue_lock(q); -	if (!--map->count) { +	map->count--; +	if (0 == map->count) {  		struct videobuf_dma_contig_memory *mem;  		dev_dbg(q->dev, "munmap %p q=%p\n", map, q); +		videobuf_queue_lock(q);  		/* We need first to cancel streams, before unmapping */  		if (q->streaming) @@ -128,8 +126,8 @@ static void videobuf_vm_close(struct vm_area_struct *vma)  		kfree(map); +		videobuf_queue_unlock(q);  	} -	videobuf_queue_unlock(q);  }  static const struct vm_operations_struct videobuf_vm_ops = { @@ -307,7 +305,7 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q,  	/* Try to remap memory */  	size = vma->vm_end - vma->vm_start;  	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); -	retval = vm_iomap_memory(vma, vma->vm_start, size); +	retval = vm_iomap_memory(vma, mem->dma_handle, size);  	if (retval) {  		dev_err(q->dev, "mmap: remap failed with error %d. ",  			retval); diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c index 9db674ccdc6..828e7c10bd7 100644 --- a/drivers/media/v4l2-core/videobuf-dma-sg.c +++ b/drivers/media/v4l2-core/videobuf-dma-sg.c @@ -338,14 +338,11 @@ EXPORT_SYMBOL_GPL(videobuf_dma_free);  static void videobuf_vm_open(struct vm_area_struct *vma)  {  	struct videobuf_mapping *map = vma->vm_private_data; -	struct videobuf_queue *q = map->q;  	dprintk(2, "vm_open %p [count=%d,vma=%08lx-%08lx]\n", map,  		map->count, vma->vm_start, vma->vm_end); -	videobuf_queue_lock(q);  	map->count++; -	videobuf_queue_unlock(q);  }  static void videobuf_vm_close(struct vm_area_struct *vma) @@ -358,9 +355,10 @@ static void videobuf_vm_close(struct vm_area_struct *vma)  	dprintk(2, "vm_close %p [count=%d,vma=%08lx-%08lx]\n", map,  		map->count, vma->vm_start, vma->vm_end); -	videobuf_queue_lock(q); -	if (!--map->count) { +	map->count--; +	if (0 == map->count) {  		dprintk(1, "munmap %p q=%p\n", map, q); +		videobuf_queue_lock(q);  		for (i = 0; i < VIDEO_MAX_FRAME; i++) {  			if (NULL == q->bufs[i])  				continue; @@ -376,9 +374,9 @@ static void videobuf_vm_close(struct vm_area_struct *vma)  			q->bufs[i]->baddr = 0;  			q->ops->buf_release(q, q->bufs[i]);  		} +		videobuf_queue_unlock(q);  		kfree(map);  	} -	videobuf_queue_unlock(q);  	return;  } diff --git a/drivers/media/v4l2-core/videobuf-vmalloc.c b/drivers/media/v4l2-core/videobuf-vmalloc.c index 1365c651c17..2ff7fcc77b1 100644 --- a/drivers/media/v4l2-core/videobuf-vmalloc.c +++ b/drivers/media/v4l2-core/videobuf-vmalloc.c @@ -54,14 +54,11 @@ MODULE_LICENSE("GPL");  static void videobuf_vm_open(struct vm_area_struct *vma)  {  	struct videobuf_mapping *map = vma->vm_private_data; -	struct videobuf_queue *q = map->q;  	dprintk(2, "vm_open %p [count=%u,vma=%08lx-%08lx]\n", map,  		map->count, vma->vm_start, vma->vm_end); -	videobuf_queue_lock(q);  	map->count++; -	videobuf_queue_unlock(q);  }  static void videobuf_vm_close(struct vm_area_struct *vma) @@ -73,11 +70,12 @@ static void videobuf_vm_close(struct vm_area_struct *vma)  	dprintk(2, "vm_close %p [count=%u,vma=%08lx-%08lx]\n", map,  		map->count, vma->vm_start, vma->vm_end); -	videobuf_queue_lock(q); -	if (!--map->count) { +	map->count--; +	if (0 == map->count) {  		struct videobuf_vmalloc_memory *mem;  		dprintk(1, "munmap %p q=%p\n", map, q); +		videobuf_queue_lock(q);  		/* We need first to cancel streams, before unmapping */  		if (q->streaming) @@ -116,8 +114,8 @@ static void videobuf_vm_close(struct vm_area_struct *vma)  		kfree(map); +		videobuf_queue_unlock(q);  	} -	videobuf_queue_unlock(q);  	return;  } diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c index 594c75eab5a..7c4489c4236 100644 --- a/drivers/media/v4l2-core/videobuf2-core.c +++ b/drivers/media/v4l2-core/videobuf2-core.c @@ -6,6 +6,9 @@   * Author: Pawel Osciak <pawel@osciak.com>   *	   Marek Szyprowski <m.szyprowski@samsung.com>   * + * The vb2_thread implementation was based on code from videobuf-dvb.c: + *	(c) 2004 Gerd Knorr <kraxel@bytesex.org> [SUSE Labs] + *   * This program is free software; you can redistribute it and/or modify   * it under the terms of the GNU General Public License as published by   * the Free Software Foundation. @@ -18,32 +21,167 @@  #include <linux/poll.h>  #include <linux/slab.h>  #include <linux/sched.h> +#include <linux/freezer.h> +#include <linux/kthread.h>  #include <media/v4l2-dev.h>  #include <media/v4l2-fh.h>  #include <media/v4l2-event.h> +#include <media/v4l2-common.h>  #include <media/videobuf2-core.h>  static int debug;  module_param(debug, int, 0644); -#define dprintk(level, fmt, arg...)					\ -	do {								\ -		if (debug >= level)					\ -			printk(KERN_DEBUG "vb2: " fmt, ## arg);		\ +#define dprintk(level, fmt, arg...)					      \ +	do {								      \ +		if (debug >= level)					      \ +			pr_debug("vb2: %s: " fmt, __func__, ## arg); \  	} while (0) -#define call_memop(q, op, args...)					\ -	(((q)->mem_ops->op) ?						\ -		((q)->mem_ops->op(args)) : 0) +#ifdef CONFIG_VIDEO_ADV_DEBUG + +/* + * If advanced debugging is on, then count how often each op is called + * successfully, which can either be per-buffer or per-queue. + * + * This makes it easy to check that the 'init' and 'cleanup' + * (and variations thereof) stay balanced. + */ + +#define log_memop(vb, op)						\ +	dprintk(2, "call_memop(%p, %d, %s)%s\n",			\ +		(vb)->vb2_queue, (vb)->v4l2_buf.index, #op,		\ +		(vb)->vb2_queue->mem_ops->op ? "" : " (nop)") + +#define call_memop(vb, op, args...)					\ +({									\ +	struct vb2_queue *_q = (vb)->vb2_queue;				\ +	int err;							\ +									\ +	log_memop(vb, op);						\ +	err = _q->mem_ops->op ? _q->mem_ops->op(args) : 0;		\ +	if (!err)							\ +		(vb)->cnt_mem_ ## op++;					\ +	err;								\ +}) + +#define call_ptr_memop(vb, op, args...)					\ +({									\ +	struct vb2_queue *_q = (vb)->vb2_queue;				\ +	void *ptr;							\ +									\ +	log_memop(vb, op);						\ +	ptr = _q->mem_ops->op ? _q->mem_ops->op(args) : NULL;		\ +	if (!IS_ERR_OR_NULL(ptr))					\ +		(vb)->cnt_mem_ ## op++;					\ +	ptr;								\ +}) + +#define call_void_memop(vb, op, args...)				\ +({									\ +	struct vb2_queue *_q = (vb)->vb2_queue;				\ +									\ +	log_memop(vb, op);						\ +	if (_q->mem_ops->op)						\ +		_q->mem_ops->op(args);					\ +	(vb)->cnt_mem_ ## op++;						\ +}) + +#define log_qop(q, op)							\ +	dprintk(2, "call_qop(%p, %s)%s\n", q, #op,			\ +		(q)->ops->op ? "" : " (nop)") + +#define call_qop(q, op, args...)					\ +({									\ +	int err;							\ +									\ +	log_qop(q, op);							\ +	err = (q)->ops->op ? (q)->ops->op(args) : 0;			\ +	if (!err)							\ +		(q)->cnt_ ## op++;					\ +	err;								\ +}) + +#define call_void_qop(q, op, args...)					\ +({									\ +	log_qop(q, op);							\ +	if ((q)->ops->op)						\ +		(q)->ops->op(args);					\ +	(q)->cnt_ ## op++;						\ +}) + +#define log_vb_qop(vb, op, args...)					\ +	dprintk(2, "call_vb_qop(%p, %d, %s)%s\n",			\ +		(vb)->vb2_queue, (vb)->v4l2_buf.index, #op,		\ +		(vb)->vb2_queue->ops->op ? "" : " (nop)") + +#define call_vb_qop(vb, op, args...)					\ +({									\ +	int err;							\ +									\ +	log_vb_qop(vb, op);						\ +	err = (vb)->vb2_queue->ops->op ?				\ +		(vb)->vb2_queue->ops->op(args) : 0;			\ +	if (!err)							\ +		(vb)->cnt_ ## op++;					\ +	err;								\ +}) + +#define call_void_vb_qop(vb, op, args...)				\ +({									\ +	log_vb_qop(vb, op);						\ +	if ((vb)->vb2_queue->ops->op)					\ +		(vb)->vb2_queue->ops->op(args);				\ +	(vb)->cnt_ ## op++;						\ +}) + +#else + +#define call_memop(vb, op, args...)					\ +	((vb)->vb2_queue->mem_ops->op ?					\ +		(vb)->vb2_queue->mem_ops->op(args) : 0) + +#define call_ptr_memop(vb, op, args...)					\ +	((vb)->vb2_queue->mem_ops->op ?					\ +		(vb)->vb2_queue->mem_ops->op(args) : NULL) + +#define call_void_memop(vb, op, args...)				\ +	do {								\ +		if ((vb)->vb2_queue->mem_ops->op)			\ +			(vb)->vb2_queue->mem_ops->op(args);		\ +	} while (0)  #define call_qop(q, op, args...)					\ -	(((q)->ops->op) ? ((q)->ops->op(args)) : 0) +	((q)->ops->op ? (q)->ops->op(args) : 0) + +#define call_void_qop(q, op, args...)					\ +	do {								\ +		if ((q)->ops->op)					\ +			(q)->ops->op(args);				\ +	} while (0) + +#define call_vb_qop(vb, op, args...)					\ +	((vb)->vb2_queue->ops->op ? (vb)->vb2_queue->ops->op(args) : 0) + +#define call_void_vb_qop(vb, op, args...)				\ +	do {								\ +		if ((vb)->vb2_queue->ops->op)				\ +			(vb)->vb2_queue->ops->op(args);			\ +	} while (0) +#endif + +/* Flags that are set by the vb2 core */  #define V4L2_BUFFER_MASK_FLAGS	(V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_QUEUED | \  				 V4L2_BUF_FLAG_DONE | V4L2_BUF_FLAG_ERROR | \  				 V4L2_BUF_FLAG_PREPARED | \  				 V4L2_BUF_FLAG_TIMESTAMP_MASK) +/* Output buffer flags that should be passed on to the driver */ +#define V4L2_BUFFER_OUT_FLAGS	(V4L2_BUF_FLAG_PFRAME | V4L2_BUF_FLAG_BFRAME | \ +				 V4L2_BUF_FLAG_KEYFRAME | V4L2_BUF_FLAG_TIMECODE) + +static void __vb2_queue_cancel(struct vb2_queue *q);  /**   * __vb2_buf_mem_alloc() - allocate video memory for the given buffer @@ -61,7 +199,7 @@ static int __vb2_buf_mem_alloc(struct vb2_buffer *vb)  	for (plane = 0; plane < vb->num_planes; ++plane) {  		unsigned long size = PAGE_ALIGN(q->plane_sizes[plane]); -		mem_priv = call_memop(q, alloc, q->alloc_ctx[plane], +		mem_priv = call_ptr_memop(vb, alloc, q->alloc_ctx[plane],  				      size, q->gfp_flags);  		if (IS_ERR_OR_NULL(mem_priv))  			goto free; @@ -75,7 +213,7 @@ static int __vb2_buf_mem_alloc(struct vb2_buffer *vb)  free:  	/* Free already allocated memory if one of the allocations failed */  	for (; plane > 0; --plane) { -		call_memop(q, put, vb->planes[plane - 1].mem_priv); +		call_void_memop(vb, put, vb->planes[plane - 1].mem_priv);  		vb->planes[plane - 1].mem_priv = NULL;  	} @@ -87,13 +225,12 @@ free:   */  static void __vb2_buf_mem_free(struct vb2_buffer *vb)  { -	struct vb2_queue *q = vb->vb2_queue;  	unsigned int plane;  	for (plane = 0; plane < vb->num_planes; ++plane) { -		call_memop(q, put, vb->planes[plane].mem_priv); +		call_void_memop(vb, put, vb->planes[plane].mem_priv);  		vb->planes[plane].mem_priv = NULL; -		dprintk(3, "Freed plane %d of buffer %d\n", plane, +		dprintk(3, "freed plane %d of buffer %d\n", plane,  			vb->v4l2_buf.index);  	}  } @@ -104,12 +241,11 @@ static void __vb2_buf_mem_free(struct vb2_buffer *vb)   */  static void __vb2_buf_userptr_put(struct vb2_buffer *vb)  { -	struct vb2_queue *q = vb->vb2_queue;  	unsigned int plane;  	for (plane = 0; plane < vb->num_planes; ++plane) {  		if (vb->planes[plane].mem_priv) -			call_memop(q, put_userptr, vb->planes[plane].mem_priv); +			call_void_memop(vb, put_userptr, vb->planes[plane].mem_priv);  		vb->planes[plane].mem_priv = NULL;  	}  } @@ -118,15 +254,15 @@ static void __vb2_buf_userptr_put(struct vb2_buffer *vb)   * __vb2_plane_dmabuf_put() - release memory associated with   * a DMABUF shared plane   */ -static void __vb2_plane_dmabuf_put(struct vb2_queue *q, struct vb2_plane *p) +static void __vb2_plane_dmabuf_put(struct vb2_buffer *vb, struct vb2_plane *p)  {  	if (!p->mem_priv)  		return;  	if (p->dbuf_mapped) -		call_memop(q, unmap_dmabuf, p->mem_priv); +		call_void_memop(vb, unmap_dmabuf, p->mem_priv); -	call_memop(q, detach_dmabuf, p->mem_priv); +	call_void_memop(vb, detach_dmabuf, p->mem_priv);  	dma_buf_put(p->dbuf);  	memset(p, 0, sizeof(*p));  } @@ -137,11 +273,29 @@ static void __vb2_plane_dmabuf_put(struct vb2_queue *q, struct vb2_plane *p)   */  static void __vb2_buf_dmabuf_put(struct vb2_buffer *vb)  { -	struct vb2_queue *q = vb->vb2_queue;  	unsigned int plane;  	for (plane = 0; plane < vb->num_planes; ++plane) -		__vb2_plane_dmabuf_put(q, &vb->planes[plane]); +		__vb2_plane_dmabuf_put(vb, &vb->planes[plane]); +} + +/** + * __setup_lengths() - setup initial lengths for every plane in + * every buffer on the queue + */ +static void __setup_lengths(struct vb2_queue *q, unsigned int n) +{ +	unsigned int buffer, plane; +	struct vb2_buffer *vb; + +	for (buffer = q->num_buffers; buffer < q->num_buffers + n; ++buffer) { +		vb = q->bufs[buffer]; +		if (!vb) +			continue; + +		for (plane = 0; plane < vb->num_planes; ++plane) +			vb->v4l2_planes[plane].length = q->plane_sizes[plane]; +	}  }  /** @@ -169,10 +323,9 @@ static void __setup_offsets(struct vb2_queue *q, unsigned int n)  			continue;  		for (plane = 0; plane < vb->num_planes; ++plane) { -			vb->v4l2_planes[plane].length = q->plane_sizes[plane];  			vb->v4l2_planes[plane].m.mem_offset = off; -			dprintk(3, "Buffer %d, plane %d offset 0x%08lx\n", +			dprintk(3, "buffer %d, plane %d offset 0x%08lx\n",  					buffer, plane, off);  			off += vb->v4l2_planes[plane].length; @@ -199,7 +352,7 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum v4l2_memory memory,  		/* Allocate videobuf buffer structures */  		vb = kzalloc(q->buf_struct_size, GFP_KERNEL);  		if (!vb) { -			dprintk(1, "Memory alloc for buffer struct failed\n"); +			dprintk(1, "memory alloc for buffer struct failed\n");  			break;  		} @@ -218,7 +371,7 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum v4l2_memory memory,  		if (memory == V4L2_MEMORY_MMAP) {  			ret = __vb2_buf_mem_alloc(vb);  			if (ret) { -				dprintk(1, "Failed allocating memory for " +				dprintk(1, "failed allocating memory for "  						"buffer %d\n", buffer);  				kfree(vb);  				break; @@ -228,9 +381,9 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum v4l2_memory memory,  			 * callback, if given. An error in initialization  			 * results in queue setup failure.  			 */ -			ret = call_qop(q, buf_init, vb); +			ret = call_vb_qop(vb, buf_init, vb);  			if (ret) { -				dprintk(1, "Buffer %d %p initialization" +				dprintk(1, "buffer %d %p initialization"  					" failed\n", buffer, vb);  				__vb2_buf_mem_free(vb);  				kfree(vb); @@ -241,9 +394,11 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum v4l2_memory memory,  		q->bufs[q->num_buffers + buffer] = vb;  	} -	__setup_offsets(q, buffer); +	__setup_lengths(q, buffer); +	if (memory == V4L2_MEMORY_MMAP) +		__setup_offsets(q, buffer); -	dprintk(1, "Allocated %d buffers, %d plane(s) each\n", +	dprintk(1, "allocated %d buffers, %d plane(s) each\n",  			buffer, num_planes);  	return buffer; @@ -278,23 +433,102 @@ static void __vb2_free_mem(struct vb2_queue *q, unsigned int buffers)   * related information, if no buffers are left return the queue to an   * uninitialized state. Might be called even if the queue has already been freed.   */ -static void __vb2_queue_free(struct vb2_queue *q, unsigned int buffers) +static int __vb2_queue_free(struct vb2_queue *q, unsigned int buffers)  {  	unsigned int buffer; -	/* Call driver-provided cleanup function for each buffer, if provided */ -	if (q->ops->buf_cleanup) { -		for (buffer = q->num_buffers - buffers; buffer < q->num_buffers; -		     ++buffer) { -			if (NULL == q->bufs[buffer]) -				continue; -			q->ops->buf_cleanup(q->bufs[buffer]); +	/* +	 * Sanity check: when preparing a buffer the queue lock is released for +	 * a short while (see __buf_prepare for the details), which would allow +	 * a race with a reqbufs which can call this function. Removing the +	 * buffers from underneath __buf_prepare is obviously a bad idea, so we +	 * check if any of the buffers is in the state PREPARING, and if so we +	 * just return -EAGAIN. +	 */ +	for (buffer = q->num_buffers - buffers; buffer < q->num_buffers; +	     ++buffer) { +		if (q->bufs[buffer] == NULL) +			continue; +		if (q->bufs[buffer]->state == VB2_BUF_STATE_PREPARING) { +			dprintk(1, "preparing buffers, cannot free\n"); +			return -EAGAIN;  		}  	} +	/* Call driver-provided cleanup function for each buffer, if provided */ +	for (buffer = q->num_buffers - buffers; buffer < q->num_buffers; +	     ++buffer) { +		struct vb2_buffer *vb = q->bufs[buffer]; + +		if (vb && vb->planes[0].mem_priv) +			call_void_vb_qop(vb, buf_cleanup, vb); +	} +  	/* Release video buffer memory */  	__vb2_free_mem(q, buffers); +#ifdef CONFIG_VIDEO_ADV_DEBUG +	/* +	 * Check that all the calls were balances during the life-time of this +	 * queue. If not (or if the debug level is 1 or up), then dump the +	 * counters to the kernel log. +	 */ +	if (q->num_buffers) { +		bool unbalanced = q->cnt_start_streaming != q->cnt_stop_streaming || +				  q->cnt_wait_prepare != q->cnt_wait_finish; + +		if (unbalanced || debug) { +			pr_info("vb2: counters for queue %p:%s\n", q, +				unbalanced ? " UNBALANCED!" : ""); +			pr_info("vb2:     setup: %u start_streaming: %u stop_streaming: %u\n", +				q->cnt_queue_setup, q->cnt_start_streaming, +				q->cnt_stop_streaming); +			pr_info("vb2:     wait_prepare: %u wait_finish: %u\n", +				q->cnt_wait_prepare, q->cnt_wait_finish); +		} +		q->cnt_queue_setup = 0; +		q->cnt_wait_prepare = 0; +		q->cnt_wait_finish = 0; +		q->cnt_start_streaming = 0; +		q->cnt_stop_streaming = 0; +	} +	for (buffer = 0; buffer < q->num_buffers; ++buffer) { +		struct vb2_buffer *vb = q->bufs[buffer]; +		bool unbalanced = vb->cnt_mem_alloc != vb->cnt_mem_put || +				  vb->cnt_mem_prepare != vb->cnt_mem_finish || +				  vb->cnt_mem_get_userptr != vb->cnt_mem_put_userptr || +				  vb->cnt_mem_attach_dmabuf != vb->cnt_mem_detach_dmabuf || +				  vb->cnt_mem_map_dmabuf != vb->cnt_mem_unmap_dmabuf || +				  vb->cnt_buf_queue != vb->cnt_buf_done || +				  vb->cnt_buf_prepare != vb->cnt_buf_finish || +				  vb->cnt_buf_init != vb->cnt_buf_cleanup; + +		if (unbalanced || debug) { +			pr_info("vb2:   counters for queue %p, buffer %d:%s\n", +				q, buffer, unbalanced ? " UNBALANCED!" : ""); +			pr_info("vb2:     buf_init: %u buf_cleanup: %u buf_prepare: %u buf_finish: %u\n", +				vb->cnt_buf_init, vb->cnt_buf_cleanup, +				vb->cnt_buf_prepare, vb->cnt_buf_finish); +			pr_info("vb2:     buf_queue: %u buf_done: %u\n", +				vb->cnt_buf_queue, vb->cnt_buf_done); +			pr_info("vb2:     alloc: %u put: %u prepare: %u finish: %u mmap: %u\n", +				vb->cnt_mem_alloc, vb->cnt_mem_put, +				vb->cnt_mem_prepare, vb->cnt_mem_finish, +				vb->cnt_mem_mmap); +			pr_info("vb2:     get_userptr: %u put_userptr: %u\n", +				vb->cnt_mem_get_userptr, vb->cnt_mem_put_userptr); +			pr_info("vb2:     attach_dmabuf: %u detach_dmabuf: %u map_dmabuf: %u unmap_dmabuf: %u\n", +				vb->cnt_mem_attach_dmabuf, vb->cnt_mem_detach_dmabuf, +				vb->cnt_mem_map_dmabuf, vb->cnt_mem_unmap_dmabuf); +			pr_info("vb2:     get_dmabuf: %u num_users: %u vaddr: %u cookie: %u\n", +				vb->cnt_mem_get_dmabuf, +				vb->cnt_mem_num_users, +				vb->cnt_mem_vaddr, +				vb->cnt_mem_cookie); +		} +	} +#endif +  	/* Free videobuf buffers */  	for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;  	     ++buffer) { @@ -303,9 +537,11 @@ static void __vb2_queue_free(struct vb2_queue *q, unsigned int buffers)  	}  	q->num_buffers -= buffers; -	if (!q->num_buffers) +	if (!q->num_buffers) {  		q->memory = 0; -	INIT_LIST_HEAD(&q->queued_list); +		INIT_LIST_HEAD(&q->queued_list); +	} +	return 0;  }  /** @@ -319,13 +555,13 @@ static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer  	/* Is memory for copying plane information present? */  	if (NULL == b->m.planes) { -		dprintk(1, "Multi-planar buffer passed but " +		dprintk(1, "multi-planar buffer passed but "  			   "planes array not provided\n");  		return -EINVAL;  	}  	if (b->length < vb->num_planes || b->length > VIDEO_MAX_PLANES) { -		dprintk(1, "Incorrect planes array length, " +		dprintk(1, "incorrect planes array length, "  			   "expected %d, got %d\n", vb->num_planes, b->length);  		return -EINVAL;  	} @@ -353,7 +589,9 @@ static int __verify_length(struct vb2_buffer *vb, const struct v4l2_buffer *b)  			if (b->m.planes[plane].bytesused > length)  				return -EINVAL; -			if (b->m.planes[plane].data_offset >= + +			if (b->m.planes[plane].data_offset > 0 && +			    b->m.planes[plane].data_offset >=  			    b->m.planes[plane].bytesused)  				return -EINVAL;  		} @@ -383,7 +621,7 @@ static bool __buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb)  		 * case anyway. If num_users() returns more than 1,  		 * we are not the only user of the plane's memory.  		 */ -		if (mem_priv && call_memop(q, num_users, mem_priv) > 1) +		if (mem_priv && call_memop(vb, num_users, mem_priv) > 1)  			return true;  	}  	return false; @@ -443,7 +681,16 @@ static void __fill_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b)  	 * Clear any buffer state related flags.  	 */  	b->flags &= ~V4L2_BUFFER_MASK_FLAGS; -	b->flags |= q->timestamp_type; +	b->flags |= q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK; +	if ((q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) != +	    V4L2_BUF_FLAG_TIMESTAMP_COPY) { +		/* +		 * For non-COPY timestamps, drop timestamp source bits +		 * and obtain the timestamp source from the queue. +		 */ +		b->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK; +		b->flags |= q->timestamp_flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK; +	}  	switch (vb->state) {  	case VB2_BUF_STATE_QUEUED: @@ -459,6 +706,7 @@ static void __fill_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b)  	case VB2_BUF_STATE_PREPARED:  		b->flags |= V4L2_BUF_FLAG_PREPARED;  		break; +	case VB2_BUF_STATE_PREPARING:  	case VB2_BUF_STATE_DEQUEUED:  		/* nothing */  		break; @@ -487,12 +735,12 @@ int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b)  	int ret;  	if (b->type != q->type) { -		dprintk(1, "querybuf: wrong buffer type\n"); +		dprintk(1, "wrong buffer type\n");  		return -EINVAL;  	}  	if (b->index >= q->num_buffers) { -		dprintk(1, "querybuf: buffer index out of range\n"); +		dprintk(1, "buffer index out of range\n");  		return -EINVAL;  	}  	vb = q->bufs[b->index]; @@ -552,12 +800,12 @@ static int __verify_memory_type(struct vb2_queue *q,  {  	if (memory != V4L2_MEMORY_MMAP && memory != V4L2_MEMORY_USERPTR &&  	    memory != V4L2_MEMORY_DMABUF) { -		dprintk(1, "reqbufs: unsupported memory type\n"); +		dprintk(1, "unsupported memory type\n");  		return -EINVAL;  	}  	if (type != q->type) { -		dprintk(1, "reqbufs: requested type is incorrect\n"); +		dprintk(1, "requested type is incorrect\n");  		return -EINVAL;  	} @@ -566,17 +814,17 @@ static int __verify_memory_type(struct vb2_queue *q,  	 * are available.  	 */  	if (memory == V4L2_MEMORY_MMAP && __verify_mmap_ops(q)) { -		dprintk(1, "reqbufs: MMAP for current setup unsupported\n"); +		dprintk(1, "MMAP for current setup unsupported\n");  		return -EINVAL;  	}  	if (memory == V4L2_MEMORY_USERPTR && __verify_userptr_ops(q)) { -		dprintk(1, "reqbufs: USERPTR for current setup unsupported\n"); +		dprintk(1, "USERPTR for current setup unsupported\n");  		return -EINVAL;  	}  	if (memory == V4L2_MEMORY_DMABUF && __verify_dmabuf_ops(q)) { -		dprintk(1, "reqbufs: DMABUF for current setup unsupported\n"); +		dprintk(1, "DMABUF for current setup unsupported\n");  		return -EINVAL;  	} @@ -585,8 +833,8 @@ static int __verify_memory_type(struct vb2_queue *q,  	 * create_bufs is called with count == 0, but count == 0 should still  	 * do the memory and type validation.  	 */ -	if (q->fileio) { -		dprintk(1, "reqbufs: file io in progress\n"); +	if (vb2_fileio_is_active(q)) { +		dprintk(1, "file io in progress\n");  		return -EBUSY;  	}  	return 0; @@ -621,7 +869,7 @@ static int __reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)  	int ret;  	if (q->streaming) { -		dprintk(1, "reqbufs: streaming active\n"); +		dprintk(1, "streaming active\n");  		return -EBUSY;  	} @@ -631,11 +879,19 @@ static int __reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)  		 * are not in use and can be freed.  		 */  		if (q->memory == V4L2_MEMORY_MMAP && __buffers_in_use(q)) { -			dprintk(1, "reqbufs: memory in use, cannot free\n"); +			dprintk(1, "memory in use, cannot free\n");  			return -EBUSY;  		} -		__vb2_queue_free(q, q->num_buffers); +		/* +		 * Call queue_cancel to clean up any buffers in the PREPARED or +		 * QUEUED state which is possible if buffers were prepared or +		 * queued without ever calling STREAMON. +		 */ +		__vb2_queue_cancel(q); +		ret = __vb2_queue_free(q, q->num_buffers); +		if (ret) +			return ret;  		/*  		 * In case of REQBUFS(0) return immediately without calling @@ -649,6 +905,7 @@ static int __reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)  	 * Make sure the requested values and current defaults are sane.  	 */  	num_buffers = min_t(unsigned int, req->count, VIDEO_MAX_FRAME); +	num_buffers = max_t(unsigned int, num_buffers, q->min_buffers_needed);  	memset(q->plane_sizes, 0, sizeof(q->plane_sizes));  	memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx));  	q->memory = req->memory; @@ -663,18 +920,23 @@ static int __reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)  		return ret;  	/* Finally, allocate buffers and video memory */ -	ret = __vb2_queue_alloc(q, req->memory, num_buffers, num_planes); -	if (ret == 0) { -		dprintk(1, "Memory allocation failed\n"); +	allocated_buffers = __vb2_queue_alloc(q, req->memory, num_buffers, num_planes); +	if (allocated_buffers == 0) { +		dprintk(1, "memory allocation failed\n");  		return -ENOMEM;  	} -	allocated_buffers = ret; +	/* +	 * There is no point in continuing if we can't allocate the minimum +	 * number of buffers needed by this vb2_queue. +	 */ +	if (allocated_buffers < q->min_buffers_needed) +		ret = -ENOMEM;  	/*  	 * Check if driver can handle the allocated number of buffers.  	 */ -	if (allocated_buffers < num_buffers) { +	if (!ret && allocated_buffers < num_buffers) {  		num_buffers = allocated_buffers;  		ret = call_qop(q, queue_setup, q, NULL, &num_buffers, @@ -692,6 +954,10 @@ static int __reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)  	q->num_buffers = allocated_buffers;  	if (ret < 0) { +		/* +		 * Note: __vb2_queue_free() will subtract 'allocated_buffers' +		 * from q->num_buffers. +		 */  		__vb2_queue_free(q, allocated_buffers);  		return ret;  	} @@ -740,8 +1006,7 @@ static int __create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create  	int ret;  	if (q->num_buffers == VIDEO_MAX_FRAME) { -		dprintk(1, "%s(): maximum number of buffers already allocated\n", -			__func__); +		dprintk(1, "maximum number of buffers already allocated\n");  		return -ENOBUFS;  	} @@ -763,20 +1028,18 @@ static int __create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create  		return ret;  	/* Finally, allocate buffers and video memory */ -	ret = __vb2_queue_alloc(q, create->memory, num_buffers, +	allocated_buffers = __vb2_queue_alloc(q, create->memory, num_buffers,  				num_planes); -	if (ret == 0) { -		dprintk(1, "Memory allocation failed\n"); +	if (allocated_buffers == 0) { +		dprintk(1, "memory allocation failed\n");  		return -ENOMEM;  	} -	allocated_buffers = ret; -  	/*  	 * Check if driver can handle the so far allocated number of buffers.  	 */ -	if (ret < num_buffers) { -		num_buffers = ret; +	if (allocated_buffers < num_buffers) { +		num_buffers = allocated_buffers;  		/*  		 * q->num_buffers contains the total number of buffers, that the @@ -797,6 +1060,10 @@ static int __create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create  	q->num_buffers += allocated_buffers;  	if (ret < 0) { +		/* +		 * Note: __vb2_queue_free() will subtract 'allocated_buffers' +		 * from q->num_buffers. +		 */  		__vb2_queue_free(q, allocated_buffers);  		return -ENOMEM;  	} @@ -838,12 +1105,10 @@ EXPORT_SYMBOL_GPL(vb2_create_bufs);   */  void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no)  { -	struct vb2_queue *q = vb->vb2_queue; -  	if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv)  		return NULL; -	return call_memop(q, vaddr, vb->planes[plane_no].mem_priv); +	return call_ptr_memop(vb, vaddr, vb->planes[plane_no].mem_priv);  }  EXPORT_SYMBOL_GPL(vb2_plane_vaddr); @@ -861,12 +1126,10 @@ EXPORT_SYMBOL_GPL(vb2_plane_vaddr);   */  void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no)  { -	struct vb2_queue *q = vb->vb2_queue; -  	if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv)  		return NULL; -	return call_memop(q, cookie, vb->planes[plane_no].mem_priv); +	return call_ptr_memop(vb, cookie, vb->planes[plane_no].mem_priv);  }  EXPORT_SYMBOL_GPL(vb2_plane_cookie); @@ -874,13 +1137,20 @@ EXPORT_SYMBOL_GPL(vb2_plane_cookie);   * vb2_buffer_done() - inform videobuf that an operation on a buffer is finished   * @vb:		vb2_buffer returned from the driver   * @state:	either VB2_BUF_STATE_DONE if the operation finished successfully - *		or VB2_BUF_STATE_ERROR if the operation finished with an error + *		or VB2_BUF_STATE_ERROR if the operation finished with an error. + *		If start_streaming fails then it should return buffers with state + *		VB2_BUF_STATE_QUEUED to put them back into the queue.   *   * This function should be called by the driver after a hardware operation on   * a buffer is finished and the buffer may be returned to userspace. The driver   * cannot use this buffer anymore until it is queued back to it by videobuf   * by the means of buf_queue callback. Only buffers previously queued to the   * driver by buf_queue can be passed to this function. + * + * While streaming a buffer can only be returned in state DONE or ERROR. + * The start_streaming op can also return them in case the DMA engine cannot + * be started for some reason. In that case the buffers should be returned with + * state QUEUED.   */  void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)  { @@ -888,32 +1158,72 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)  	unsigned long flags;  	unsigned int plane; -	if (vb->state != VB2_BUF_STATE_ACTIVE) +	if (WARN_ON(vb->state != VB2_BUF_STATE_ACTIVE))  		return; -	if (state != VB2_BUF_STATE_DONE && state != VB2_BUF_STATE_ERROR) -		return; +	if (!q->start_streaming_called) { +		if (WARN_ON(state != VB2_BUF_STATE_QUEUED)) +			state = VB2_BUF_STATE_QUEUED; +	} else if (WARN_ON(state != VB2_BUF_STATE_DONE && +			   state != VB2_BUF_STATE_ERROR)) { +			state = VB2_BUF_STATE_ERROR; +	} -	dprintk(4, "Done processing on buffer %d, state: %d\n", +#ifdef CONFIG_VIDEO_ADV_DEBUG +	/* +	 * Although this is not a callback, it still does have to balance +	 * with the buf_queue op. So update this counter manually. +	 */ +	vb->cnt_buf_done++; +#endif +	dprintk(4, "done processing on buffer %d, state: %d\n",  			vb->v4l2_buf.index, state);  	/* sync buffers */  	for (plane = 0; plane < vb->num_planes; ++plane) -		call_memop(q, finish, vb->planes[plane].mem_priv); +		call_void_memop(vb, finish, vb->planes[plane].mem_priv);  	/* Add the buffer to the done buffers list */  	spin_lock_irqsave(&q->done_lock, flags);  	vb->state = state; -	list_add_tail(&vb->done_entry, &q->done_list); -	atomic_dec(&q->queued_count); +	if (state != VB2_BUF_STATE_QUEUED) +		list_add_tail(&vb->done_entry, &q->done_list); +	atomic_dec(&q->owned_by_drv_count);  	spin_unlock_irqrestore(&q->done_lock, flags); +	if (state == VB2_BUF_STATE_QUEUED) +		return; +  	/* Inform any processes that may be waiting for buffers */  	wake_up(&q->done_wq);  }  EXPORT_SYMBOL_GPL(vb2_buffer_done);  /** + * vb2_discard_done() - discard all buffers marked as DONE + * @q:		videobuf2 queue + * + * This function is intended to be used with suspend/resume operations. It + * discards all 'done' buffers as they would be too old to be requested after + * resume. + * + * Drivers must stop the hardware and synchronize with interrupt handlers and/or + * delayed works before calling this function to make sure no buffer will be + * touched by the driver and/or hardware. + */ +void vb2_discard_done(struct vb2_queue *q) +{ +	struct vb2_buffer *vb; +	unsigned long flags; + +	spin_lock_irqsave(&q->done_lock, flags); +	list_for_each_entry(vb, &q->done_list, done_entry) +		vb->state = VB2_BUF_STATE_ERROR; +	spin_unlock_irqrestore(&q->done_lock, flags); +} +EXPORT_SYMBOL_GPL(vb2_discard_done); + +/**   * __fill_vb2_buffer() - fill a vb2_buffer with information provided in a   * v4l2_buffer by the userspace. The caller has already verified that struct   * v4l2_buffer has a valid number of planes. @@ -926,15 +1236,30 @@ static void __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b  	if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) {  		/* Fill in driver-provided information for OUTPUT types */  		if (V4L2_TYPE_IS_OUTPUT(b->type)) { +			bool bytesused_is_used; + +			/* Check if bytesused == 0 for all planes */ +			for (plane = 0; plane < vb->num_planes; ++plane) +				if (b->m.planes[plane].bytesused) +					break; +			bytesused_is_used = plane < vb->num_planes; +  			/*  			 * Will have to go up to b->length when API starts  			 * accepting variable number of planes. +			 * +			 * If bytesused_is_used is false, then fall back to the +			 * full buffer size. In that case userspace clearly +			 * never bothered to set it and it's a safe assumption +			 * that they really meant to use the full plane sizes.  			 */  			for (plane = 0; plane < vb->num_planes; ++plane) { -				v4l2_planes[plane].bytesused = -					b->m.planes[plane].bytesused; -				v4l2_planes[plane].data_offset = -					b->m.planes[plane].data_offset; +				struct v4l2_plane *pdst = &v4l2_planes[plane]; +				struct v4l2_plane *psrc = &b->m.planes[plane]; + +				pdst->bytesused = bytesused_is_used ? +					psrc->bytesused : psrc->length; +				pdst->data_offset = psrc->data_offset;  			}  		} @@ -952,8 +1277,6 @@ static void __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b  					b->m.planes[plane].m.fd;  				v4l2_planes[plane].length =  					b->m.planes[plane].length; -				v4l2_planes[plane].data_offset = -					b->m.planes[plane].data_offset;  			}  		}  	} else { @@ -962,11 +1285,15 @@ static void __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b  		 * so fill in relevant v4l2_buffer struct fields instead.  		 * In videobuf we use our internal V4l2_planes struct for  		 * single-planar buffers as well, for simplicity. +		 * +		 * If bytesused == 0, then fall back to the full buffer size +		 * as that's a sensible default.  		 */ -		if (V4L2_TYPE_IS_OUTPUT(b->type)) { -			v4l2_planes[0].bytesused = b->bytesused; -			v4l2_planes[0].data_offset = 0; -		} +		if (V4L2_TYPE_IS_OUTPUT(b->type)) +			v4l2_planes[0].bytesused = +				b->bytesused ? b->bytesused : b->length; +		else +			v4l2_planes[0].bytesused = 0;  		if (b->memory == V4L2_MEMORY_USERPTR) {  			v4l2_planes[0].m.userptr = b->m.userptr; @@ -976,14 +1303,43 @@ static void __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b  		if (b->memory == V4L2_MEMORY_DMABUF) {  			v4l2_planes[0].m.fd = b->m.fd;  			v4l2_planes[0].length = b->length; -			v4l2_planes[0].data_offset = 0;  		} -  	} -	vb->v4l2_buf.field = b->field; -	vb->v4l2_buf.timestamp = b->timestamp; +	/* Zero flags that the vb2 core handles */  	vb->v4l2_buf.flags = b->flags & ~V4L2_BUFFER_MASK_FLAGS; +	if ((vb->vb2_queue->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) != +	    V4L2_BUF_FLAG_TIMESTAMP_COPY || !V4L2_TYPE_IS_OUTPUT(b->type)) { +		/* +		 * Non-COPY timestamps and non-OUTPUT queues will get +		 * their timestamp and timestamp source flags from the +		 * queue. +		 */ +		vb->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK; +	} + +	if (V4L2_TYPE_IS_OUTPUT(b->type)) { +		/* +		 * For output buffers mask out the timecode flag: +		 * this will be handled later in vb2_internal_qbuf(). +		 * The 'field' is valid metadata for this output buffer +		 * and so that needs to be copied here. +		 */ +		vb->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TIMECODE; +		vb->v4l2_buf.field = b->field; +	} else { +		/* Zero any output buffer flags as this is a capture buffer */ +		vb->v4l2_buf.flags &= ~V4L2_BUFFER_OUT_FLAGS; +	} +} + +/** + * __qbuf_mmap() - handle qbuf of an MMAP buffer + */ +static int __qbuf_mmap(struct vb2_buffer *vb, const struct v4l2_buffer *b) +{ +	__fill_vb2_buffer(vb, b, vb->v4l2_planes); +	return call_vb_qop(vb, buf_prepare, vb);  }  /** @@ -997,7 +1353,9 @@ static int __qbuf_userptr(struct vb2_buffer *vb, const struct v4l2_buffer *b)  	unsigned int plane;  	int ret;  	int write = !V4L2_TYPE_IS_OUTPUT(q->type); +	bool reacquired = vb->planes[0].mem_priv == NULL; +	memset(planes, 0, sizeof(planes[0]) * vb->num_planes);  	/* Copy relevant information provided by the userspace */  	__fill_vb2_buffer(vb, b, planes); @@ -1008,29 +1366,37 @@ static int __qbuf_userptr(struct vb2_buffer *vb, const struct v4l2_buffer *b)  		    && vb->v4l2_planes[plane].length == planes[plane].length)  			continue; -		dprintk(3, "qbuf: userspace address for plane %d changed, " +		dprintk(3, "userspace address for plane %d changed, "  				"reacquiring memory\n", plane);  		/* Check if the provided plane buffer is large enough */  		if (planes[plane].length < q->plane_sizes[plane]) { +			dprintk(1, "provided buffer size %u is less than " +						"setup size %u for plane %d\n", +						planes[plane].length, +						q->plane_sizes[plane], plane);  			ret = -EINVAL;  			goto err;  		}  		/* Release previously acquired memory if present */ -		if (vb->planes[plane].mem_priv) -			call_memop(q, put_userptr, vb->planes[plane].mem_priv); +		if (vb->planes[plane].mem_priv) { +			if (!reacquired) { +				reacquired = true; +				call_void_vb_qop(vb, buf_cleanup, vb); +			} +			call_void_memop(vb, put_userptr, vb->planes[plane].mem_priv); +		}  		vb->planes[plane].mem_priv = NULL; -		vb->v4l2_planes[plane].m.userptr = 0; -		vb->v4l2_planes[plane].length = 0; +		memset(&vb->v4l2_planes[plane], 0, sizeof(struct v4l2_plane));  		/* Acquire each plane's memory */ -		mem_priv = call_memop(q, get_userptr, q->alloc_ctx[plane], +		mem_priv = call_ptr_memop(vb, get_userptr, q->alloc_ctx[plane],  				      planes[plane].m.userptr,  				      planes[plane].length, write);  		if (IS_ERR_OR_NULL(mem_priv)) { -			dprintk(1, "qbuf: failed acquiring userspace " +			dprintk(1, "failed acquiring userspace "  						"memory for plane %d\n", plane);  			ret = mem_priv ? PTR_ERR(mem_priv) : -EINVAL;  			goto err; @@ -1039,28 +1405,38 @@ static int __qbuf_userptr(struct vb2_buffer *vb, const struct v4l2_buffer *b)  	}  	/* -	 * Call driver-specific initialization on the newly acquired buffer, -	 * if provided. -	 */ -	ret = call_qop(q, buf_init, vb); -	if (ret) { -		dprintk(1, "qbuf: buffer initialization failed\n"); -		goto err; -	} - -	/*  	 * Now that everything is in order, copy relevant information  	 * provided by userspace.  	 */  	for (plane = 0; plane < vb->num_planes; ++plane)  		vb->v4l2_planes[plane] = planes[plane]; +	if (reacquired) { +		/* +		 * One or more planes changed, so we must call buf_init to do +		 * the driver-specific initialization on the newly acquired +		 * buffer, if provided. +		 */ +		ret = call_vb_qop(vb, buf_init, vb); +		if (ret) { +			dprintk(1, "buffer initialization failed\n"); +			goto err; +		} +	} + +	ret = call_vb_qop(vb, buf_prepare, vb); +	if (ret) { +		dprintk(1, "buffer preparation failed\n"); +		call_void_vb_qop(vb, buf_cleanup, vb); +		goto err; +	} +  	return 0;  err:  	/* In case of errors, release planes that were already acquired */  	for (plane = 0; plane < vb->num_planes; ++plane) {  		if (vb->planes[plane].mem_priv) -			call_memop(q, put_userptr, vb->planes[plane].mem_priv); +			call_void_memop(vb, put_userptr, vb->planes[plane].mem_priv);  		vb->planes[plane].mem_priv = NULL;  		vb->v4l2_planes[plane].m.userptr = 0;  		vb->v4l2_planes[plane].length = 0; @@ -1070,15 +1446,6 @@ err:  }  /** - * __qbuf_mmap() - handle qbuf of an MMAP buffer - */ -static int __qbuf_mmap(struct vb2_buffer *vb, const struct v4l2_buffer *b) -{ -	__fill_vb2_buffer(vb, b, vb->v4l2_planes); -	return 0; -} - -/**   * __qbuf_dmabuf() - handle qbuf of a DMABUF buffer   */  static int __qbuf_dmabuf(struct vb2_buffer *vb, const struct v4l2_buffer *b) @@ -1089,15 +1456,17 @@ static int __qbuf_dmabuf(struct vb2_buffer *vb, const struct v4l2_buffer *b)  	unsigned int plane;  	int ret;  	int write = !V4L2_TYPE_IS_OUTPUT(q->type); +	bool reacquired = vb->planes[0].mem_priv == NULL; -	/* Verify and copy relevant information provided by the userspace */ +	memset(planes, 0, sizeof(planes[0]) * vb->num_planes); +	/* Copy relevant information provided by the userspace */  	__fill_vb2_buffer(vb, b, planes);  	for (plane = 0; plane < vb->num_planes; ++plane) {  		struct dma_buf *dbuf = dma_buf_get(planes[plane].m.fd);  		if (IS_ERR_OR_NULL(dbuf)) { -			dprintk(1, "qbuf: invalid dmabuf fd for plane %d\n", +			dprintk(1, "invalid dmabuf fd for plane %d\n",  				plane);  			ret = -EINVAL;  			goto err; @@ -1107,8 +1476,9 @@ static int __qbuf_dmabuf(struct vb2_buffer *vb, const struct v4l2_buffer *b)  		if (planes[plane].length == 0)  			planes[plane].length = dbuf->size; -		if (planes[plane].length < planes[plane].data_offset + -		    q->plane_sizes[plane]) { +		if (planes[plane].length < q->plane_sizes[plane]) { +			dprintk(1, "invalid dmabuf length for plane %d\n", +				plane);  			ret = -EINVAL;  			goto err;  		} @@ -1120,17 +1490,22 @@ static int __qbuf_dmabuf(struct vb2_buffer *vb, const struct v4l2_buffer *b)  			continue;  		} -		dprintk(1, "qbuf: buffer for plane %d changed\n", plane); +		dprintk(1, "buffer for plane %d changed\n", plane); + +		if (!reacquired) { +			reacquired = true; +			call_void_vb_qop(vb, buf_cleanup, vb); +		}  		/* Release previously acquired memory if present */ -		__vb2_plane_dmabuf_put(q, &vb->planes[plane]); +		__vb2_plane_dmabuf_put(vb, &vb->planes[plane]);  		memset(&vb->v4l2_planes[plane], 0, sizeof(struct v4l2_plane));  		/* Acquire each plane's memory */ -		mem_priv = call_memop(q, attach_dmabuf, q->alloc_ctx[plane], +		mem_priv = call_ptr_memop(vb, attach_dmabuf, q->alloc_ctx[plane],  			dbuf, planes[plane].length, write);  		if (IS_ERR(mem_priv)) { -			dprintk(1, "qbuf: failed to attach dmabuf\n"); +			dprintk(1, "failed to attach dmabuf\n");  			ret = PTR_ERR(mem_priv);  			dma_buf_put(dbuf);  			goto err; @@ -1145,9 +1520,9 @@ static int __qbuf_dmabuf(struct vb2_buffer *vb, const struct v4l2_buffer *b)  	 * the buffer(s)..  	 */  	for (plane = 0; plane < vb->num_planes; ++plane) { -		ret = call_memop(q, map_dmabuf, vb->planes[plane].mem_priv); +		ret = call_memop(vb, map_dmabuf, vb->planes[plane].mem_priv);  		if (ret) { -			dprintk(1, "qbuf: failed to map dmabuf for plane %d\n", +			dprintk(1, "failed to map dmabuf for plane %d\n",  				plane);  			goto err;  		} @@ -1155,22 +1530,31 @@ static int __qbuf_dmabuf(struct vb2_buffer *vb, const struct v4l2_buffer *b)  	}  	/* -	 * Call driver-specific initialization on the newly acquired buffer, -	 * if provided. -	 */ -	ret = call_qop(q, buf_init, vb); -	if (ret) { -		dprintk(1, "qbuf: buffer initialization failed\n"); -		goto err; -	} - -	/*  	 * Now that everything is in order, copy relevant information  	 * provided by userspace.  	 */  	for (plane = 0; plane < vb->num_planes; ++plane)  		vb->v4l2_planes[plane] = planes[plane]; +	if (reacquired) { +		/* +		 * Call driver-specific initialization on the newly acquired buffer, +		 * if provided. +		 */ +		ret = call_vb_qop(vb, buf_init, vb); +		if (ret) { +			dprintk(1, "buffer initialization failed\n"); +			goto err; +		} +	} + +	ret = call_vb_qop(vb, buf_prepare, vb); +	if (ret) { +		dprintk(1, "buffer preparation failed\n"); +		call_void_vb_qop(vb, buf_cleanup, vb); +		goto err; +	} +  	return 0;  err:  	/* In case of errors, release planes that were already acquired */ @@ -1188,30 +1572,70 @@ static void __enqueue_in_driver(struct vb2_buffer *vb)  	unsigned int plane;  	vb->state = VB2_BUF_STATE_ACTIVE; -	atomic_inc(&q->queued_count); +	atomic_inc(&q->owned_by_drv_count);  	/* sync buffers */  	for (plane = 0; plane < vb->num_planes; ++plane) -		call_memop(q, prepare, vb->planes[plane].mem_priv); +		call_void_memop(vb, prepare, vb->planes[plane].mem_priv); -	q->ops->buf_queue(vb); +	call_void_vb_qop(vb, buf_queue, vb);  }  static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b)  {  	struct vb2_queue *q = vb->vb2_queue; +	struct rw_semaphore *mmap_sem;  	int ret;  	ret = __verify_length(vb, b); -	if (ret < 0) +	if (ret < 0) { +		dprintk(1, "plane parameters verification failed: %d\n", ret);  		return ret; +	} +	if (b->field == V4L2_FIELD_ALTERNATE && V4L2_TYPE_IS_OUTPUT(q->type)) { +		/* +		 * If the format's field is ALTERNATE, then the buffer's field +		 * should be either TOP or BOTTOM, not ALTERNATE since that +		 * makes no sense. The driver has to know whether the +		 * buffer represents a top or a bottom field in order to +		 * program any DMA correctly. Using ALTERNATE is wrong, since +		 * that just says that it is either a top or a bottom field, +		 * but not which of the two it is. +		 */ +		dprintk(1, "the field is incorrectly set to ALTERNATE for an output buffer\n"); +		return -EINVAL; +	} + +	vb->state = VB2_BUF_STATE_PREPARING; +	vb->v4l2_buf.timestamp.tv_sec = 0; +	vb->v4l2_buf.timestamp.tv_usec = 0; +	vb->v4l2_buf.sequence = 0;  	switch (q->memory) {  	case V4L2_MEMORY_MMAP:  		ret = __qbuf_mmap(vb, b);  		break;  	case V4L2_MEMORY_USERPTR: +		/* +		 * In case of user pointer buffers vb2 allocators need to get +		 * direct access to userspace pages. This requires getting +		 * the mmap semaphore for read access in the current process +		 * structure. The same semaphore is taken before calling mmap +		 * operation, while both qbuf/prepare_buf and mmap are called +		 * by the driver or v4l2 core with the driver's lock held. +		 * To avoid an AB-BA deadlock (mmap_sem then driver's lock in +		 * mmap and driver's lock then mmap_sem in qbuf/prepare_buf), +		 * the videobuf2 core releases the driver's lock, takes +		 * mmap_sem and then takes the driver's lock again. +		 */ +		mmap_sem = ¤t->mm->mmap_sem; +		call_void_qop(q, wait_prepare, q); +		down_read(mmap_sem); +		call_void_qop(q, wait_finish, q); +  		ret = __qbuf_userptr(vb, b); + +		up_read(mmap_sem);  		break;  	case V4L2_MEMORY_DMABUF:  		ret = __qbuf_dmabuf(vb, b); @@ -1221,109 +1645,38 @@ static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b)  		ret = -EINVAL;  	} -	if (!ret) -		ret = call_qop(q, buf_prepare, vb);  	if (ret) -		dprintk(1, "qbuf: buffer preparation failed: %d\n", ret); -	else -		vb->state = VB2_BUF_STATE_PREPARED; +		dprintk(1, "buffer preparation failed: %d\n", ret); +	vb->state = ret ? VB2_BUF_STATE_DEQUEUED : VB2_BUF_STATE_PREPARED;  	return ret;  }  static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b, -				    const char *opname, -				    int (*handler)(struct vb2_queue *, -						   struct v4l2_buffer *, -						   struct vb2_buffer *)) +				    const char *opname)  { -	struct rw_semaphore *mmap_sem = NULL; -	struct vb2_buffer *vb; -	int ret; - -	/* -	 * In case of user pointer buffers vb2 allocators need to get direct -	 * access to userspace pages. This requires getting the mmap semaphore -	 * for read access in the current process structure. The same semaphore -	 * is taken before calling mmap operation, while both qbuf/prepare_buf -	 * and mmap are called by the driver or v4l2 core with the driver's lock -	 * held. To avoid an AB-BA deadlock (mmap_sem then driver's lock in mmap -	 * and driver's lock then mmap_sem in qbuf/prepare_buf) the videobuf2 -	 * core releases the driver's lock, takes mmap_sem and then takes the -	 * driver's lock again. -	 * -	 * To avoid racing with other vb2 calls, which might be called after -	 * releasing the driver's lock, this operation is performed at the -	 * beginning of qbuf/prepare_buf processing. This way the queue status -	 * is consistent after getting the driver's lock back. -	 */ -	if (q->memory == V4L2_MEMORY_USERPTR) { -		mmap_sem = ¤t->mm->mmap_sem; -		call_qop(q, wait_prepare, q); -		down_read(mmap_sem); -		call_qop(q, wait_finish, q); -	} - -	if (q->fileio) { -		dprintk(1, "%s(): file io in progress\n", opname); -		ret = -EBUSY; -		goto unlock; -	} -  	if (b->type != q->type) { -		dprintk(1, "%s(): invalid buffer type\n", opname); -		ret = -EINVAL; -		goto unlock; +		dprintk(1, "%s: invalid buffer type\n", opname); +		return -EINVAL;  	}  	if (b->index >= q->num_buffers) { -		dprintk(1, "%s(): buffer index out of range\n", opname); -		ret = -EINVAL; -		goto unlock; +		dprintk(1, "%s: buffer index out of range\n", opname); +		return -EINVAL;  	} -	vb = q->bufs[b->index]; -	if (NULL == vb) { +	if (q->bufs[b->index] == NULL) {  		/* Should never happen */ -		dprintk(1, "%s(): buffer is NULL\n", opname); -		ret = -EINVAL; -		goto unlock; +		dprintk(1, "%s: buffer is NULL\n", opname); +		return -EINVAL;  	}  	if (b->memory != q->memory) { -		dprintk(1, "%s(): invalid memory type\n", opname); -		ret = -EINVAL; -		goto unlock; -	} - -	ret = __verify_planes_array(vb, b); -	if (ret) -		goto unlock; - -	ret = handler(q, b, vb); -	if (ret) -		goto unlock; - -	/* Fill buffer information for the userspace */ -	__fill_v4l2_buffer(vb, b); - -	dprintk(1, "%s() of buffer %d succeeded\n", opname, vb->v4l2_buf.index); -unlock: -	if (mmap_sem) -		up_read(mmap_sem); -	return ret; -} - -static int __vb2_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b, -			     struct vb2_buffer *vb) -{ -	if (vb->state != VB2_BUF_STATE_DEQUEUED) { -		dprintk(1, "%s(): invalid buffer state %d\n", __func__, -			vb->state); +		dprintk(1, "%s: invalid memory type\n", opname);  		return -EINVAL;  	} -	return __buf_prepare(vb, b); +	return __verify_planes_array(q->bufs[b->index], b);  }  /** @@ -1343,24 +1696,108 @@ static int __vb2_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b,   */  int vb2_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b)  { -	return vb2_queue_or_prepare_buf(q, b, "prepare_buf", __vb2_prepare_buf); +	struct vb2_buffer *vb; +	int ret; + +	if (vb2_fileio_is_active(q)) { +		dprintk(1, "file io in progress\n"); +		return -EBUSY; +	} + +	ret = vb2_queue_or_prepare_buf(q, b, "prepare_buf"); +	if (ret) +		return ret; + +	vb = q->bufs[b->index]; +	if (vb->state != VB2_BUF_STATE_DEQUEUED) { +		dprintk(1, "invalid buffer state %d\n", +			vb->state); +		return -EINVAL; +	} + +	ret = __buf_prepare(vb, b); +	if (!ret) { +		/* Fill buffer information for the userspace */ +		__fill_v4l2_buffer(vb, b); + +		dprintk(1, "prepare of buffer %d succeeded\n", vb->v4l2_buf.index); +	} +	return ret;  }  EXPORT_SYMBOL_GPL(vb2_prepare_buf); -static int __vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b, -		      struct vb2_buffer *vb) +/** + * vb2_start_streaming() - Attempt to start streaming. + * @q:		videobuf2 queue + * + * Attempt to start streaming. When this function is called there must be + * at least q->min_buffers_needed buffers queued up (i.e. the minimum + * number of buffers required for the DMA engine to function). If the + * @start_streaming op fails it is supposed to return all the driver-owned + * buffers back to vb2 in state QUEUED. Check if that happened and if + * not warn and reclaim them forcefully. + */ +static int vb2_start_streaming(struct vb2_queue *q)  { +	struct vb2_buffer *vb;  	int ret; +	/* +	 * If any buffers were queued before streamon, +	 * we can now pass them to driver for processing. +	 */ +	list_for_each_entry(vb, &q->queued_list, queued_entry) +		__enqueue_in_driver(vb); + +	/* Tell the driver to start streaming */ +	ret = call_qop(q, start_streaming, q, +		       atomic_read(&q->owned_by_drv_count)); +	q->start_streaming_called = ret == 0; +	if (!ret) +		return 0; + +	dprintk(1, "driver refused to start streaming\n"); +	if (WARN_ON(atomic_read(&q->owned_by_drv_count))) { +		unsigned i; + +		/* +		 * Forcefully reclaim buffers if the driver did not +		 * correctly return them to vb2. +		 */ +		for (i = 0; i < q->num_buffers; ++i) { +			vb = q->bufs[i]; +			if (vb->state == VB2_BUF_STATE_ACTIVE) +				vb2_buffer_done(vb, VB2_BUF_STATE_QUEUED); +		} +		/* Must be zero now */ +		WARN_ON(atomic_read(&q->owned_by_drv_count)); +	} +	return ret; +} + +static int vb2_internal_qbuf(struct vb2_queue *q, struct v4l2_buffer *b) +{ +	int ret = vb2_queue_or_prepare_buf(q, b, "qbuf"); +	struct vb2_buffer *vb; + +	if (ret) +		return ret; + +	vb = q->bufs[b->index]; +  	switch (vb->state) {  	case VB2_BUF_STATE_DEQUEUED:  		ret = __buf_prepare(vb, b);  		if (ret)  			return ret; +		break;  	case VB2_BUF_STATE_PREPARED:  		break; +	case VB2_BUF_STATE_PREPARING: +		dprintk(1, "buffer still being prepared\n"); +		return -EINVAL;  	default: -		dprintk(1, "qbuf: buffer already in use\n"); +		dprintk(1, "invalid buffer state %d\n", vb->state);  		return -EINVAL;  	} @@ -1369,15 +1806,45 @@ static int __vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b,  	 * dequeued in dqbuf.  	 */  	list_add_tail(&vb->queued_entry, &q->queued_list); +	q->queued_count++;  	vb->state = VB2_BUF_STATE_QUEUED; +	if (V4L2_TYPE_IS_OUTPUT(q->type)) { +		/* +		 * For output buffers copy the timestamp if needed, +		 * and the timecode field and flag if needed. +		 */ +		if ((q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) == +		    V4L2_BUF_FLAG_TIMESTAMP_COPY) +			vb->v4l2_buf.timestamp = b->timestamp; +		vb->v4l2_buf.flags |= b->flags & V4L2_BUF_FLAG_TIMECODE; +		if (b->flags & V4L2_BUF_FLAG_TIMECODE) +			vb->v4l2_buf.timecode = b->timecode; +	}  	/*  	 * If already streaming, give the buffer to driver for processing.  	 * If not, the buffer will be given to driver on next streamon.  	 */ -	if (q->streaming) +	if (q->start_streaming_called)  		__enqueue_in_driver(vb); +	/* Fill buffer information for the userspace */ +	__fill_v4l2_buffer(vb, b); + +	/* +	 * If streamon has been called, and we haven't yet called +	 * start_streaming() since not enough buffers were queued, and +	 * we now have reached the minimum number of queued buffers, +	 * then we can finally call start_streaming(). +	 */ +	if (q->streaming && !q->start_streaming_called && +	    q->queued_count >= q->min_buffers_needed) { +		ret = vb2_start_streaming(q); +		if (ret) +			return ret; +	} + +	dprintk(1, "qbuf of buffer %d succeeded\n", vb->v4l2_buf.index);  	return 0;  } @@ -1400,7 +1867,12 @@ static int __vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b,   */  int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)  { -	return vb2_queue_or_prepare_buf(q, b, "qbuf", __vb2_qbuf); +	if (vb2_fileio_is_active(q)) { +		dprintk(1, "file io in progress\n"); +		return -EBUSY; +	} + +	return vb2_internal_qbuf(q, b);  }  EXPORT_SYMBOL_GPL(vb2_qbuf); @@ -1425,7 +1897,7 @@ static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)  		int ret;  		if (!q->streaming) { -			dprintk(1, "Streaming off, will not wait for buffers\n"); +			dprintk(1, "streaming off, will not wait for buffers\n");  			return -EINVAL;  		} @@ -1437,7 +1909,7 @@ static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)  		}  		if (nonblocking) { -			dprintk(1, "Nonblocking and no buffers to dequeue, " +			dprintk(1, "nonblocking and no buffers to dequeue, "  								"will not wait\n");  			return -EAGAIN;  		} @@ -1447,12 +1919,12 @@ static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)  		 * become ready or for streamoff. Driver's lock is released to  		 * allow streamoff or qbuf to be called while waiting.  		 */ -		call_qop(q, wait_prepare, q); +		call_void_qop(q, wait_prepare, q);  		/*  		 * All locks have been released, it is safe to sleep now.  		 */ -		dprintk(3, "Will sleep waiting for buffers\n"); +		dprintk(3, "will sleep waiting for buffers\n");  		ret = wait_event_interruptible(q->done_wq,  				!list_empty(&q->done_list) || !q->streaming); @@ -1460,9 +1932,9 @@ static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)  		 * We need to reevaluate both conditions again after reacquiring  		 * the locks or return an error if one occurred.  		 */ -		call_qop(q, wait_finish, q); +		call_void_qop(q, wait_finish, q);  		if (ret) { -			dprintk(1, "Sleep was interrupted\n"); +			dprintk(1, "sleep was interrupted\n");  			return ret;  		}  	} @@ -1517,11 +1989,12 @@ static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,  int vb2_wait_for_all_buffers(struct vb2_queue *q)  {  	if (!q->streaming) { -		dprintk(1, "Streaming off, will not wait for buffers\n"); +		dprintk(1, "streaming off, will not wait for buffers\n");  		return -EINVAL;  	} -	wait_event(q->done_wq, !atomic_read(&q->queued_count)); +	if (q->start_streaming_called) +		wait_event(q->done_wq, !atomic_read(&q->owned_by_drv_count));  	return 0;  }  EXPORT_SYMBOL_GPL(vb2_wait_for_all_buffers); @@ -1545,72 +2018,43 @@ static void __vb2_dqbuf(struct vb2_buffer *vb)  		for (i = 0; i < vb->num_planes; ++i) {  			if (!vb->planes[i].dbuf_mapped)  				continue; -			call_memop(q, unmap_dmabuf, vb->planes[i].mem_priv); +			call_void_memop(vb, unmap_dmabuf, vb->planes[i].mem_priv);  			vb->planes[i].dbuf_mapped = 0;  		}  } -/** - * vb2_dqbuf() - Dequeue a buffer to the userspace - * @q:		videobuf2 queue - * @b:		buffer structure passed from userspace to vidioc_dqbuf handler - *		in driver - * @nonblocking: if true, this call will not sleep waiting for a buffer if no - *		 buffers ready for dequeuing are present. Normally the driver - *		 would be passing (file->f_flags & O_NONBLOCK) here - * - * Should be called from vidioc_dqbuf ioctl handler of a driver. - * This function: - * 1) verifies the passed buffer, - * 2) calls buf_finish callback in the driver (if provided), in which - *    driver can perform any additional operations that may be required before - *    returning the buffer to userspace, such as cache sync, - * 3) the buffer struct members are filled with relevant information for - *    the userspace. - * - * The return values from this function are intended to be directly returned - * from vidioc_dqbuf handler in driver. - */ -int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking) +static int vb2_internal_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)  {  	struct vb2_buffer *vb = NULL;  	int ret; -	if (q->fileio) { -		dprintk(1, "dqbuf: file io in progress\n"); -		return -EBUSY; -	} -  	if (b->type != q->type) { -		dprintk(1, "dqbuf: invalid buffer type\n"); +		dprintk(1, "invalid buffer type\n");  		return -EINVAL;  	}  	ret = __vb2_get_done_vb(q, &vb, b, nonblocking);  	if (ret < 0)  		return ret; -	ret = call_qop(q, buf_finish, vb); -	if (ret) { -		dprintk(1, "dqbuf: buffer finish failed\n"); -		return ret; -	} -  	switch (vb->state) {  	case VB2_BUF_STATE_DONE: -		dprintk(3, "dqbuf: Returning done buffer\n"); +		dprintk(3, "returning done buffer\n");  		break;  	case VB2_BUF_STATE_ERROR: -		dprintk(3, "dqbuf: Returning done buffer with errors\n"); +		dprintk(3, "returning done buffer with errors\n");  		break;  	default: -		dprintk(1, "dqbuf: Invalid buffer state\n"); +		dprintk(1, "invalid buffer state\n");  		return -EINVAL;  	} +	call_void_vb_qop(vb, buf_finish, vb); +  	/* Fill buffer information for the userspace */  	__fill_v4l2_buffer(vb, b);  	/* Remove from videobuf queue */  	list_del(&vb->queued_entry); +	q->queued_count--;  	/* go back to dequeued state */  	__vb2_dqbuf(vb); @@ -1619,6 +2063,36 @@ int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)  	return 0;  } + +/** + * vb2_dqbuf() - Dequeue a buffer to the userspace + * @q:		videobuf2 queue + * @b:		buffer structure passed from userspace to vidioc_dqbuf handler + *		in driver + * @nonblocking: if true, this call will not sleep waiting for a buffer if no + *		 buffers ready for dequeuing are present. Normally the driver + *		 would be passing (file->f_flags & O_NONBLOCK) here + * + * Should be called from vidioc_dqbuf ioctl handler of a driver. + * This function: + * 1) verifies the passed buffer, + * 2) calls buf_finish callback in the driver (if provided), in which + *    driver can perform any additional operations that may be required before + *    returning the buffer to userspace, such as cache sync, + * 3) the buffer struct members are filled with relevant information for + *    the userspace. + * + * The return values from this function are intended to be directly returned + * from vidioc_dqbuf handler in driver. + */ +int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking) +{ +	if (vb2_fileio_is_active(q)) { +		dprintk(1, "file io in progress\n"); +		return -EBUSY; +	} +	return vb2_internal_dqbuf(q, b, nonblocking); +}  EXPORT_SYMBOL_GPL(vb2_dqbuf);  /** @@ -1635,9 +2109,20 @@ static void __vb2_queue_cancel(struct vb2_queue *q)  	 * Tell driver to stop all transactions and release all queued  	 * buffers.  	 */ -	if (q->streaming) -		call_qop(q, stop_streaming, q); +	if (q->start_streaming_called) +		call_void_qop(q, stop_streaming, q); + +	if (WARN_ON(atomic_read(&q->owned_by_drv_count))) { +		for (i = 0; i < q->num_buffers; ++i) +			if (q->bufs[i]->state == VB2_BUF_STATE_ACTIVE) +				vb2_buffer_done(q->bufs[i], VB2_BUF_STATE_ERROR); +		/* Must be zero now */ +		WARN_ON(atomic_read(&q->owned_by_drv_count)); +	} +  	q->streaming = 0; +	q->start_streaming_called = 0; +	q->queued_count = 0;  	/*  	 * Remove all buffers from videobuf's list... @@ -1648,14 +2133,70 @@ static void __vb2_queue_cancel(struct vb2_queue *q)  	 * has not already dequeued before initiating cancel.  	 */  	INIT_LIST_HEAD(&q->done_list); -	atomic_set(&q->queued_count, 0); +	atomic_set(&q->owned_by_drv_count, 0);  	wake_up_all(&q->done_wq);  	/*  	 * Reinitialize all buffers for next use. +	 * Make sure to call buf_finish for any queued buffers. Normally +	 * that's done in dqbuf, but that's not going to happen when we +	 * cancel the whole queue. Note: this code belongs here, not in +	 * __vb2_dqbuf() since in vb2_internal_dqbuf() there is a critical +	 * call to __fill_v4l2_buffer() after buf_finish(). That order can't +	 * be changed, so we can't move the buf_finish() to __vb2_dqbuf(). +	 */ +	for (i = 0; i < q->num_buffers; ++i) { +		struct vb2_buffer *vb = q->bufs[i]; + +		if (vb->state != VB2_BUF_STATE_DEQUEUED) { +			vb->state = VB2_BUF_STATE_PREPARED; +			call_void_vb_qop(vb, buf_finish, vb); +		} +		__vb2_dqbuf(vb); +	} +} + +static int vb2_internal_streamon(struct vb2_queue *q, enum v4l2_buf_type type) +{ +	int ret; + +	if (type != q->type) { +		dprintk(1, "invalid stream type\n"); +		return -EINVAL; +	} + +	if (q->streaming) { +		dprintk(3, "already streaming\n"); +		return 0; +	} + +	if (!q->num_buffers) { +		dprintk(1, "no buffers have been allocated\n"); +		return -EINVAL; +	} + +	if (q->num_buffers < q->min_buffers_needed) { +		dprintk(1, "need at least %u allocated buffers\n", +				q->min_buffers_needed); +		return -EINVAL; +	} + +	/* +	 * Tell driver to start streaming provided sufficient buffers +	 * are available.  	 */ -	for (i = 0; i < q->num_buffers; ++i) -		__vb2_dqbuf(q->bufs[i]); +	if (q->queued_count >= q->min_buffers_needed) { +		ret = vb2_start_streaming(q); +		if (ret) { +			__vb2_queue_cancel(q); +			return ret; +		} +	} + +	q->streaming = 1; + +	dprintk(3, "successful\n"); +	return 0;  }  /** @@ -1673,48 +2214,35 @@ static void __vb2_queue_cancel(struct vb2_queue *q)   */  int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type)  { -	struct vb2_buffer *vb; -	int ret; - -	if (q->fileio) { -		dprintk(1, "streamon: file io in progress\n"); +	if (vb2_fileio_is_active(q)) { +		dprintk(1, "file io in progress\n");  		return -EBUSY;  	} +	return vb2_internal_streamon(q, type); +} +EXPORT_SYMBOL_GPL(vb2_streamon); +static int vb2_internal_streamoff(struct vb2_queue *q, enum v4l2_buf_type type) +{  	if (type != q->type) { -		dprintk(1, "streamon: invalid stream type\n"); +		dprintk(1, "invalid stream type\n");  		return -EINVAL;  	} -	if (q->streaming) { -		dprintk(1, "streamon: already streaming\n"); -		return -EBUSY; -	} - -	/* -	 * If any buffers were queued before streamon, -	 * we can now pass them to driver for processing. -	 */ -	list_for_each_entry(vb, &q->queued_list, queued_entry) -		__enqueue_in_driver(vb); -  	/* -	 * Let driver notice that streaming state has been enabled. +	 * Cancel will pause streaming and remove all buffers from the driver +	 * and videobuf, effectively returning control over them to userspace. +	 * +	 * Note that we do this even if q->streaming == 0: if you prepare or +	 * queue buffers, and then call streamoff without ever having called +	 * streamon, you would still expect those buffers to be returned to +	 * their normal dequeued state.  	 */ -	ret = call_qop(q, start_streaming, q, atomic_read(&q->queued_count)); -	if (ret) { -		dprintk(1, "streamon: driver refused to start streaming\n"); -		__vb2_queue_cancel(q); -		return ret; -	} - -	q->streaming = 1; +	__vb2_queue_cancel(q); -	dprintk(3, "Streamon successful\n"); +	dprintk(3, "successful\n");  	return 0;  } -EXPORT_SYMBOL_GPL(vb2_streamon); -  /**   * vb2_streamoff - stop streaming @@ -1733,29 +2261,11 @@ EXPORT_SYMBOL_GPL(vb2_streamon);   */  int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type)  { -	if (q->fileio) { -		dprintk(1, "streamoff: file io in progress\n"); +	if (vb2_fileio_is_active(q)) { +		dprintk(1, "file io in progress\n");  		return -EBUSY;  	} - -	if (type != q->type) { -		dprintk(1, "streamoff: invalid stream type\n"); -		return -EINVAL; -	} - -	if (!q->streaming) { -		dprintk(1, "streamoff: not streaming\n"); -		return -EINVAL; -	} - -	/* -	 * Cancel will pause streaming and remove all buffers from the driver -	 * and videobuf, effectively returning control over them to userspace. -	 */ -	__vb2_queue_cancel(q); - -	dprintk(3, "Streamoff successful\n"); -	return 0; +	return vb2_internal_streamoff(q, type);  }  EXPORT_SYMBOL_GPL(vb2_streamoff); @@ -1805,22 +2315,22 @@ int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb)  	struct dma_buf *dbuf;  	if (q->memory != V4L2_MEMORY_MMAP) { -		dprintk(1, "Queue is not currently set up for mmap\n"); +		dprintk(1, "queue is not currently set up for mmap\n");  		return -EINVAL;  	}  	if (!q->mem_ops->get_dmabuf) { -		dprintk(1, "Queue does not support DMA buffer exporting\n"); +		dprintk(1, "queue does not support DMA buffer exporting\n");  		return -EINVAL;  	} -	if (eb->flags & ~O_CLOEXEC) { -		dprintk(1, "Queue does support only O_CLOEXEC flag\n"); +	if (eb->flags & ~(O_CLOEXEC | O_ACCMODE)) { +		dprintk(1, "queue does support only O_CLOEXEC and access mode flags\n");  		return -EINVAL;  	}  	if (eb->type != q->type) { -		dprintk(1, "qbuf: invalid buffer type\n"); +		dprintk(1, "invalid buffer type\n");  		return -EINVAL;  	} @@ -1836,16 +2346,21 @@ int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb)  		return -EINVAL;  	} +	if (vb2_fileio_is_active(q)) { +		dprintk(1, "expbuf: file io in progress\n"); +		return -EBUSY; +	} +  	vb_plane = &vb->planes[eb->plane]; -	dbuf = call_memop(q, get_dmabuf, vb_plane->mem_priv); +	dbuf = call_ptr_memop(vb, get_dmabuf, vb_plane->mem_priv, eb->flags & O_ACCMODE);  	if (IS_ERR_OR_NULL(dbuf)) { -		dprintk(1, "Failed to export buffer %d, plane %d\n", +		dprintk(1, "failed to export buffer %d, plane %d\n",  			eb->index, eb->plane);  		return -EINVAL;  	} -	ret = dma_buf_fd(dbuf, eb->flags); +	ret = dma_buf_fd(dbuf, eb->flags & ~O_ACCMODE);  	if (ret < 0) {  		dprintk(3, "buffer %d, plane %d failed to export (%d)\n",  			eb->index, eb->plane, ret); @@ -1884,12 +2399,12 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)  {  	unsigned long off = vma->vm_pgoff << PAGE_SHIFT;  	struct vb2_buffer *vb; -	unsigned int buffer, plane; +	unsigned int buffer = 0, plane = 0;  	int ret;  	unsigned long length;  	if (q->memory != V4L2_MEMORY_MMAP) { -		dprintk(1, "Queue is not currently set up for mmap\n"); +		dprintk(1, "queue is not currently set up for mmap\n");  		return -EINVAL;  	} @@ -1897,20 +2412,24 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)  	 * Check memory area access mode.  	 */  	if (!(vma->vm_flags & VM_SHARED)) { -		dprintk(1, "Invalid vma flags, VM_SHARED needed\n"); +		dprintk(1, "invalid vma flags, VM_SHARED needed\n");  		return -EINVAL;  	}  	if (V4L2_TYPE_IS_OUTPUT(q->type)) {  		if (!(vma->vm_flags & VM_WRITE)) { -			dprintk(1, "Invalid vma flags, VM_WRITE needed\n"); +			dprintk(1, "invalid vma flags, VM_WRITE needed\n");  			return -EINVAL;  		}  	} else {  		if (!(vma->vm_flags & VM_READ)) { -			dprintk(1, "Invalid vma flags, VM_READ needed\n"); +			dprintk(1, "invalid vma flags, VM_READ needed\n");  			return -EINVAL;  		}  	} +	if (vb2_fileio_is_active(q)) { +		dprintk(1, "mmap: file io in progress\n"); +		return -EBUSY; +	}  	/*  	 * Find the plane corresponding to the offset passed by userspace. @@ -1933,11 +2452,11 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)  		return -EINVAL;  	} -	ret = call_memop(q, mmap, vb->planes[plane].mem_priv, vma); +	ret = call_memop(vb, mmap, vb->planes[plane].mem_priv, vma);  	if (ret)  		return ret; -	dprintk(3, "Buffer %d, plane %d successfully mapped\n", buffer, plane); +	dprintk(3, "buffer %d, plane %d successfully mapped\n", buffer, plane);  	return 0;  }  EXPORT_SYMBOL_GPL(vb2_mmap); @@ -1955,7 +2474,7 @@ unsigned long vb2_get_unmapped_area(struct vb2_queue *q,  	int ret;  	if (q->memory != V4L2_MEMORY_MMAP) { -		dprintk(1, "Queue is not currently set up for mmap\n"); +		dprintk(1, "queue is not currently set up for mmap\n");  		return -EINVAL;  	} @@ -2020,7 +2539,7 @@ unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)  	/*  	 * Start file I/O emulator only if streaming API has not been used yet.  	 */ -	if (q->num_buffers == 0 && q->fileio == NULL) { +	if (q->num_buffers == 0 && !vb2_fileio_is_active(q)) {  		if (!V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_READ) &&  				(req_events & (POLLIN | POLLRDNORM))) {  			if (__vb2_init_fileio(q, 1)) @@ -2088,11 +2607,14 @@ int vb2_queue_init(struct vb2_queue *q)  	    WARN_ON(!q->io_modes)	  ||  	    WARN_ON(!q->ops->queue_setup) ||  	    WARN_ON(!q->ops->buf_queue)   || -	    WARN_ON(q->timestamp_type & ~V4L2_BUF_FLAG_TIMESTAMP_MASK)) +	    WARN_ON(q->timestamp_flags & +		    ~(V4L2_BUF_FLAG_TIMESTAMP_MASK | +		      V4L2_BUF_FLAG_TSTAMP_SRC_MASK)))  		return -EINVAL;  	/* Warn that the driver should choose an appropriate timestamp type */ -	WARN_ON(q->timestamp_type == V4L2_BUF_FLAG_TIMESTAMP_UNKNOWN); +	WARN_ON((q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) == +		V4L2_BUF_FLAG_TIMESTAMP_UNKNOWN);  	INIT_LIST_HEAD(&q->queued_list);  	INIT_LIST_HEAD(&q->done_list); @@ -2139,6 +2661,22 @@ struct vb2_fileio_buf {  /**   * struct vb2_fileio_data - queue context used by file io emulator   * + * @cur_index:	the index of the buffer currently being read from or + *		written to. If equal to q->num_buffers then a new buffer + *		must be dequeued. + * @initial_index: in the read() case all buffers are queued up immediately + *		in __vb2_init_fileio() and __vb2_perform_fileio() just cycles + *		buffers. However, in the write() case no buffers are initially + *		queued, instead whenever a buffer is full it is queued up by + *		__vb2_perform_fileio(). Only once all available buffers have + *		been queued up will __vb2_perform_fileio() start to dequeue + *		buffers. This means that initially __vb2_perform_fileio() + *		needs to know what buffer index to use when it is queuing up + *		the buffers for the first time. That initial index is stored + *		in this field. Once it is equal to q->num_buffers all + *		available buffers have been queued and __vb2_perform_fileio() + *		should start the normal dequeue/queue cycle. + *   * vb2 provides a compatibility layer and emulator of file io (read and   * write) calls on top of streaming API. For proper operation it required   * this structure to save the driver state between each call of the read @@ -2146,9 +2684,11 @@ struct vb2_fileio_buf {   */  struct vb2_fileio_data {  	struct v4l2_requestbuffers req; +	struct v4l2_plane p;  	struct v4l2_buffer b;  	struct vb2_fileio_buf bufs[VIDEO_MAX_FRAME]; -	unsigned int index; +	unsigned int cur_index; +	unsigned int initial_index;  	unsigned int q_count;  	unsigned int dq_count;  	unsigned int flags; @@ -2168,9 +2708,9 @@ static int __vb2_init_fileio(struct vb2_queue *q, int read)  	/*  	 * Sanity check  	 */ -	if ((read && !(q->io_modes & VB2_READ)) || -	   (!read && !(q->io_modes & VB2_WRITE))) -		BUG(); +	if (WARN_ON((read && !(q->io_modes & VB2_READ)) || +		    (!read && !(q->io_modes & VB2_WRITE)))) +		return -EINVAL;  	/*  	 * Check if device supports mapping buffers to kernel virtual space. @@ -2205,7 +2745,8 @@ static int __vb2_init_fileio(struct vb2_queue *q, int read)  	fileio->req.count = count;  	fileio->req.memory = V4L2_MEMORY_MMAP;  	fileio->req.type = q->type; -	ret = vb2_reqbufs(q, &fileio->req); +	q->fileio = fileio; +	ret = __reqbufs(q, &fileio->req);  	if (ret)  		goto err_kfree; @@ -2234,38 +2775,51 @@ static int __vb2_init_fileio(struct vb2_queue *q, int read)  	 * Read mode requires pre queuing of all buffers.  	 */  	if (read) { +		bool is_multiplanar = V4L2_TYPE_IS_MULTIPLANAR(q->type); +  		/*  		 * Queue all buffers.  		 */  		for (i = 0; i < q->num_buffers; i++) {  			struct v4l2_buffer *b = &fileio->b; +  			memset(b, 0, sizeof(*b));  			b->type = q->type; +			if (is_multiplanar) { +				memset(&fileio->p, 0, sizeof(fileio->p)); +				b->m.planes = &fileio->p; +				b->length = 1; +			}  			b->memory = q->memory;  			b->index = i; -			ret = vb2_qbuf(q, b); +			ret = vb2_internal_qbuf(q, b);  			if (ret)  				goto err_reqbufs;  			fileio->bufs[i].queued = 1;  		} -  		/* -		 * Start streaming. +		 * All buffers have been queued, so mark that by setting +		 * initial_index to q->num_buffers  		 */ -		ret = vb2_streamon(q, q->type); -		if (ret) -			goto err_reqbufs; +		fileio->initial_index = q->num_buffers; +		fileio->cur_index = q->num_buffers;  	} -	q->fileio = fileio; +	/* +	 * Start streaming. +	 */ +	ret = vb2_internal_streamon(q, q->type); +	if (ret) +		goto err_reqbufs;  	return ret;  err_reqbufs:  	fileio->req.count = 0; -	vb2_reqbufs(q, &fileio->req); +	__reqbufs(q, &fileio->req);  err_kfree: +	q->fileio = NULL;  	kfree(fileio);  	return ret;  } @@ -2279,13 +2833,8 @@ static int __vb2_cleanup_fileio(struct vb2_queue *q)  	struct vb2_fileio_data *fileio = q->fileio;  	if (fileio) { -		/* -		 * Hack fileio context to enable direct calls to vb2 ioctl -		 * interface. -		 */ +		vb2_internal_streamoff(q, q->type);  		q->fileio = NULL; - -		vb2_streamoff(q, q->type);  		fileio->req.count = 0;  		vb2_reqbufs(q, &fileio->req);  		kfree(fileio); @@ -2308,9 +2857,18 @@ static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_  {  	struct vb2_fileio_data *fileio;  	struct vb2_fileio_buf *buf; +	bool is_multiplanar = V4L2_TYPE_IS_MULTIPLANAR(q->type); +	/* +	 * When using write() to write data to an output video node the vb2 core +	 * should set timestamps if V4L2_BUF_FLAG_TIMESTAMP_COPY is set. Nobody +	 * else is able to provide this information with the write() operation. +	 */ +	bool set_timestamp = !read && +		(q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) == +		V4L2_BUF_FLAG_TIMESTAMP_COPY;  	int ret, index; -	dprintk(3, "file io: mode %s, offset %ld, count %zd, %sblocking\n", +	dprintk(3, "mode %s, offset %ld, count %zd, %sblocking\n",  		read ? "read" : "write", (long)*ppos, count,  		nonblock ? "non" : ""); @@ -2320,48 +2878,48 @@ static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_  	/*  	 * Initialize emulator on first call.  	 */ -	if (!q->fileio) { +	if (!vb2_fileio_is_active(q)) {  		ret = __vb2_init_fileio(q, read); -		dprintk(3, "file io: vb2_init_fileio result: %d\n", ret); +		dprintk(3, "vb2_init_fileio result: %d\n", ret);  		if (ret)  			return ret;  	}  	fileio = q->fileio;  	/* -	 * Hack fileio context to enable direct calls to vb2 ioctl interface. -	 * The pointer will be restored before returning from this function. -	 */ -	q->fileio = NULL; - -	index = fileio->index; -	buf = &fileio->bufs[index]; - -	/*  	 * Check if we need to dequeue the buffer.  	 */ -	if (buf->queued) { -		struct vb2_buffer *vb; - +	index = fileio->cur_index; +	if (index >= q->num_buffers) {  		/*  		 * Call vb2_dqbuf to get buffer back.  		 */  		memset(&fileio->b, 0, sizeof(fileio->b));  		fileio->b.type = q->type;  		fileio->b.memory = q->memory; -		fileio->b.index = index; -		ret = vb2_dqbuf(q, &fileio->b, nonblock); -		dprintk(5, "file io: vb2_dqbuf result: %d\n", ret); +		if (is_multiplanar) { +			memset(&fileio->p, 0, sizeof(fileio->p)); +			fileio->b.m.planes = &fileio->p; +			fileio->b.length = 1; +		} +		ret = vb2_internal_dqbuf(q, &fileio->b, nonblock); +		dprintk(5, "vb2_dqbuf result: %d\n", ret);  		if (ret) -			goto end; +			return ret;  		fileio->dq_count += 1; +		fileio->cur_index = index = fileio->b.index; +		buf = &fileio->bufs[index]; +  		/*  		 * Get number of bytes filled by the driver  		 */ -		vb = q->bufs[index]; -		buf->size = vb2_get_plane_payload(vb, 0); +		buf->pos = 0;  		buf->queued = 0; +		buf->size = read ? vb2_get_plane_payload(q->bufs[index], 0) +				 : vb2_plane_size(q->bufs[index], 0); +	} else { +		buf = &fileio->bufs[index];  	}  	/* @@ -2375,16 +2933,15 @@ static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_  	/*  	 * Transfer data to userspace.  	 */ -	dprintk(3, "file io: copying %zd bytes - buffer %d, offset %u\n", +	dprintk(3, "copying %zd bytes - buffer %d, offset %u\n",  		count, index, buf->pos);  	if (read)  		ret = copy_to_user(data, buf->vaddr + buf->pos, count);  	else  		ret = copy_from_user(buf->vaddr + buf->pos, data, count);  	if (ret) { -		dprintk(3, "file io: error copying data\n"); -		ret = -EFAULT; -		goto end; +		dprintk(3, "error copying data\n"); +		return -EFAULT;  	}  	/* @@ -2403,11 +2960,7 @@ static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_  		 */  		if (read && (fileio->flags & VB2_FILEIO_READ_ONCE) &&  		    fileio->dq_count == 1) { -			dprintk(3, "file io: read limit reached\n"); -			/* -			 * Restore fileio pointer and release the context. -			 */ -			q->fileio = fileio; +			dprintk(3, "read limit reached\n");  			return __vb2_cleanup_fileio(q);  		} @@ -2419,32 +2972,40 @@ static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_  		fileio->b.memory = q->memory;  		fileio->b.index = index;  		fileio->b.bytesused = buf->pos; -		ret = vb2_qbuf(q, &fileio->b); -		dprintk(5, "file io: vb2_dbuf result: %d\n", ret); +		if (is_multiplanar) { +			memset(&fileio->p, 0, sizeof(fileio->p)); +			fileio->p.bytesused = buf->pos; +			fileio->b.m.planes = &fileio->p; +			fileio->b.length = 1; +		} +		if (set_timestamp) +			v4l2_get_timestamp(&fileio->b.timestamp); +		ret = vb2_internal_qbuf(q, &fileio->b); +		dprintk(5, "vb2_dbuf result: %d\n", ret);  		if (ret) -			goto end; +			return ret;  		/*  		 * Buffer has been queued, update the status  		 */  		buf->pos = 0;  		buf->queued = 1; -		buf->size = q->bufs[0]->v4l2_planes[0].length; +		buf->size = vb2_plane_size(q->bufs[index], 0);  		fileio->q_count += 1; -  		/* -		 * Switch to the next buffer +		 * If we are queuing up buffers for the first time, then +		 * increase initial_index by one.  		 */ -		fileio->index = (index + 1) % q->num_buffers; - +		if (fileio->initial_index < q->num_buffers) +			fileio->initial_index++;  		/* -		 * Start streaming if required. +		 * The next buffer to use is either a buffer that's going to be +		 * queued for the first time (initial_index < q->num_buffers) +		 * or it is equal to q->num_buffers, meaning that the next +		 * time we need to dequeue a buffer since we've now queued up +		 * all the 'first time' buffers.  		 */ -		if (!read && !q->streaming) { -			ret = vb2_streamon(q, q->type); -			if (ret) -				goto end; -		} +		fileio->cur_index = fileio->initial_index;  	}  	/* @@ -2452,11 +3013,6 @@ static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_  	 */  	if (ret == 0)  		ret = count; -end: -	/* -	 * Restore the fileio context and block vb2 ioctl interface. -	 */ -	q->fileio = fileio;  	return ret;  } @@ -2467,13 +3023,155 @@ size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count,  }  EXPORT_SYMBOL_GPL(vb2_read); -size_t vb2_write(struct vb2_queue *q, char __user *data, size_t count, +size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count,  		loff_t *ppos, int nonblocking)  { -	return __vb2_perform_fileio(q, data, count, ppos, nonblocking, 0); +	return __vb2_perform_fileio(q, (char __user *) data, count, +							ppos, nonblocking, 0);  }  EXPORT_SYMBOL_GPL(vb2_write); +struct vb2_threadio_data { +	struct task_struct *thread; +	vb2_thread_fnc fnc; +	void *priv; +	bool stop; +}; + +static int vb2_thread(void *data) +{ +	struct vb2_queue *q = data; +	struct vb2_threadio_data *threadio = q->threadio; +	struct vb2_fileio_data *fileio = q->fileio; +	bool set_timestamp = false; +	int prequeue = 0; +	int index = 0; +	int ret = 0; + +	if (V4L2_TYPE_IS_OUTPUT(q->type)) { +		prequeue = q->num_buffers; +		set_timestamp = +			(q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) == +			V4L2_BUF_FLAG_TIMESTAMP_COPY; +	} + +	set_freezable(); + +	for (;;) { +		struct vb2_buffer *vb; + +		/* +		 * Call vb2_dqbuf to get buffer back. +		 */ +		memset(&fileio->b, 0, sizeof(fileio->b)); +		fileio->b.type = q->type; +		fileio->b.memory = q->memory; +		if (prequeue) { +			fileio->b.index = index++; +			prequeue--; +		} else { +			call_void_qop(q, wait_finish, q); +			ret = vb2_internal_dqbuf(q, &fileio->b, 0); +			call_void_qop(q, wait_prepare, q); +			dprintk(5, "file io: vb2_dqbuf result: %d\n", ret); +		} +		if (threadio->stop) +			break; +		if (ret) +			break; +		try_to_freeze(); + +		vb = q->bufs[fileio->b.index]; +		if (!(fileio->b.flags & V4L2_BUF_FLAG_ERROR)) +			ret = threadio->fnc(vb, threadio->priv); +		if (ret) +			break; +		call_void_qop(q, wait_finish, q); +		if (set_timestamp) +			v4l2_get_timestamp(&fileio->b.timestamp); +		ret = vb2_internal_qbuf(q, &fileio->b); +		call_void_qop(q, wait_prepare, q); +		if (ret) +			break; +	} + +	/* Hmm, linux becomes *very* unhappy without this ... */ +	while (!kthread_should_stop()) { +		set_current_state(TASK_INTERRUPTIBLE); +		schedule(); +	} +	return 0; +} + +/* + * This function should not be used for anything else but the videobuf2-dvb + * support. If you think you have another good use-case for this, then please + * contact the linux-media mailinglist first. + */ +int vb2_thread_start(struct vb2_queue *q, vb2_thread_fnc fnc, void *priv, +		     const char *thread_name) +{ +	struct vb2_threadio_data *threadio; +	int ret = 0; + +	if (q->threadio) +		return -EBUSY; +	if (vb2_is_busy(q)) +		return -EBUSY; +	if (WARN_ON(q->fileio)) +		return -EBUSY; + +	threadio = kzalloc(sizeof(*threadio), GFP_KERNEL); +	if (threadio == NULL) +		return -ENOMEM; +	threadio->fnc = fnc; +	threadio->priv = priv; + +	ret = __vb2_init_fileio(q, !V4L2_TYPE_IS_OUTPUT(q->type)); +	dprintk(3, "file io: vb2_init_fileio result: %d\n", ret); +	if (ret) +		goto nomem; +	q->threadio = threadio; +	threadio->thread = kthread_run(vb2_thread, q, "vb2-%s", thread_name); +	if (IS_ERR(threadio->thread)) { +		ret = PTR_ERR(threadio->thread); +		threadio->thread = NULL; +		goto nothread; +	} +	return 0; + +nothread: +	__vb2_cleanup_fileio(q); +nomem: +	kfree(threadio); +	return ret; +} +EXPORT_SYMBOL_GPL(vb2_thread_start); + +int vb2_thread_stop(struct vb2_queue *q) +{ +	struct vb2_threadio_data *threadio = q->threadio; +	struct vb2_fileio_data *fileio = q->fileio; +	int err; + +	if (threadio == NULL) +		return 0; +	call_void_qop(q, wait_finish, q); +	threadio->stop = true; +	vb2_internal_streamoff(q, q->type); +	call_void_qop(q, wait_prepare, q); +	q->fileio = NULL; +	fileio->req.count = 0; +	vb2_reqbufs(q, &fileio->req); +	kfree(fileio); +	err = kthread_stop(threadio->thread); +	threadio->thread = NULL; +	kfree(threadio); +	q->fileio = NULL; +	q->threadio = NULL; +	return err; +} +EXPORT_SYMBOL_GPL(vb2_thread_stop);  /*   * The following functions are not part of the vb2 core API, but are helper @@ -2619,19 +3317,32 @@ int vb2_fop_mmap(struct file *file, struct vm_area_struct *vma)  }  EXPORT_SYMBOL_GPL(vb2_fop_mmap); -int vb2_fop_release(struct file *file) +int _vb2_fop_release(struct file *file, struct mutex *lock)  {  	struct video_device *vdev = video_devdata(file);  	if (file->private_data == vdev->queue->owner) { +		if (lock) +			mutex_lock(lock);  		vb2_queue_release(vdev->queue);  		vdev->queue->owner = NULL; +		if (lock) +			mutex_unlock(lock);  	}  	return v4l2_fh_release(file);  } +EXPORT_SYMBOL_GPL(_vb2_fop_release); + +int vb2_fop_release(struct file *file) +{ +	struct video_device *vdev = video_devdata(file); +	struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock; + +	return _vb2_fop_release(file, lock); +}  EXPORT_SYMBOL_GPL(vb2_fop_release); -ssize_t vb2_fop_write(struct file *file, char __user *buf, +ssize_t vb2_fop_write(struct file *file, const char __user *buf,  		size_t count, loff_t *ppos)  {  	struct video_device *vdev = video_devdata(file); @@ -2687,7 +3398,7 @@ unsigned int vb2_fop_poll(struct file *file, poll_table *wait)  	/* Try to be smart: only lock if polling might start fileio,  	   otherwise locking will only introduce unwanted delays. */ -	if (q->num_buffers == 0 && q->fileio == NULL) { +	if (q->num_buffers == 0 && !vb2_fileio_is_active(q)) {  		if (!V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_READ) &&  				(req_events & (POLLIN | POLLRDNORM)))  			must_lock = true; diff --git a/drivers/media/v4l2-core/videobuf2-dma-contig.c b/drivers/media/v4l2-core/videobuf2-dma-contig.c index fd56f256320..880be0782dd 100644 --- a/drivers/media/v4l2-core/videobuf2-dma-contig.c +++ b/drivers/media/v4l2-core/videobuf2-dma-contig.c @@ -393,7 +393,7 @@ static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)  	return sgt;  } -static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv) +static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv, unsigned long flags)  {  	struct vb2_dc_buf *buf = buf_priv;  	struct dma_buf *dbuf; @@ -404,7 +404,7 @@ static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv)  	if (WARN_ON(!buf->sgt_base))  		return NULL; -	dbuf = dma_buf_export(buf, &vb2_dc_dmabuf_ops, buf->size, 0); +	dbuf = dma_buf_export(buf, &vb2_dc_dmabuf_ops, buf->size, flags);  	if (IS_ERR(dbuf))  		return NULL; @@ -423,6 +423,39 @@ static inline int vma_is_io(struct vm_area_struct *vma)  	return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));  } +static int vb2_dc_get_user_pfn(unsigned long start, int n_pages, +	struct vm_area_struct *vma, unsigned long *res) +{ +	unsigned long pfn, start_pfn, prev_pfn; +	unsigned int i; +	int ret; + +	if (!vma_is_io(vma)) +		return -EFAULT; + +	ret = follow_pfn(vma, start, &pfn); +	if (ret) +		return ret; + +	start_pfn = pfn; +	start += PAGE_SIZE; + +	for (i = 1; i < n_pages; ++i, start += PAGE_SIZE) { +		prev_pfn = pfn; +		ret = follow_pfn(vma, start, &pfn); + +		if (ret) { +			pr_err("no page for address %lu\n", start); +			return ret; +		} +		if (pfn != prev_pfn + 1) +			return -EINVAL; +	} + +	*res = start_pfn; +	return 0; +} +  static int vb2_dc_get_user_pages(unsigned long start, struct page **pages,  	int n_pages, struct vm_area_struct *vma, int write)  { @@ -433,6 +466,9 @@ static int vb2_dc_get_user_pages(unsigned long start, struct page **pages,  			unsigned long pfn;  			int ret = follow_pfn(vma, start, &pfn); +			if (!pfn_valid(pfn)) +				return -EINVAL; +  			if (ret) {  				pr_err("no page for address %lu\n", start);  				return ret; @@ -468,16 +504,49 @@ static void vb2_dc_put_userptr(void *buf_priv)  	struct vb2_dc_buf *buf = buf_priv;  	struct sg_table *sgt = buf->dma_sgt; -	dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir); -	if (!vma_is_io(buf->vma)) -		vb2_dc_sgt_foreach_page(sgt, vb2_dc_put_dirty_page); +	if (sgt) { +		dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir); +		if (!vma_is_io(buf->vma)) +			vb2_dc_sgt_foreach_page(sgt, vb2_dc_put_dirty_page); -	sg_free_table(sgt); -	kfree(sgt); +		sg_free_table(sgt); +		kfree(sgt); +	}  	vb2_put_vma(buf->vma);  	kfree(buf);  } +/* + * For some kind of reserved memory there might be no struct page available, + * so all that can be done to support such 'pages' is to try to convert + * pfn to dma address or at the last resort just assume that + * dma address == physical address (like it has been assumed in earlier version + * of videobuf2-dma-contig + */ + +#ifdef __arch_pfn_to_dma +static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn) +{ +	return (dma_addr_t)__arch_pfn_to_dma(dev, pfn); +} +#elif defined(__pfn_to_bus) +static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn) +{ +	return (dma_addr_t)__pfn_to_bus(pfn); +} +#elif defined(__pfn_to_phys) +static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn) +{ +	return (dma_addr_t)__pfn_to_phys(pfn); +} +#else +static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn) +{ +	/* really, we cannot do anything better at this point */ +	return (dma_addr_t)(pfn) << PAGE_SHIFT; +} +#endif +  static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,  	unsigned long size, int write)  { @@ -548,6 +617,14 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,  	/* extract page list from userspace mapping */  	ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, write);  	if (ret) { +		unsigned long pfn; +		if (vb2_dc_get_user_pfn(start, n_pages, vma, &pfn) == 0) { +			buf->dma_addr = vb2_dc_pfn_to_dma(buf->dev, pfn); +			buf->size = size; +			kfree(pages); +			return buf; +		} +  		pr_err("failed to get user pages\n");  		goto fail_vma;  	} @@ -642,7 +719,7 @@ static int vb2_dc_map_dmabuf(void *mem_priv)  	/* get the associated scatterlist for this buffer */  	sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir); -	if (IS_ERR_OR_NULL(sgt)) { +	if (IS_ERR(sgt)) {  		pr_err("Error getting dmabuf scatterlist\n");  		return -EINVAL;  	} diff --git a/drivers/media/v4l2-core/videobuf2-dma-sg.c b/drivers/media/v4l2-core/videobuf2-dma-sg.c index 16ae3dcc7e2..adefc31bb85 100644 --- a/drivers/media/v4l2-core/videobuf2-dma-sg.c +++ b/drivers/media/v4l2-core/videobuf2-dma-sg.c @@ -35,17 +35,62 @@ struct vb2_dma_sg_buf {  	struct page			**pages;  	int				write;  	int				offset; -	struct vb2_dma_sg_desc		sg_desc; +	struct sg_table			sg_table; +	size_t				size; +	unsigned int			num_pages;  	atomic_t			refcount;  	struct vb2_vmarea_handler	handler; +	struct vm_area_struct		*vma;  };  static void vb2_dma_sg_put(void *buf_priv); +static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf, +		gfp_t gfp_flags) +{ +	unsigned int last_page = 0; +	int size = buf->size; + +	while (size > 0) { +		struct page *pages; +		int order; +		int i; + +		order = get_order(size); +		/* Dont over allocate*/ +		if ((PAGE_SIZE << order) > size) +			order--; + +		pages = NULL; +		while (!pages) { +			pages = alloc_pages(GFP_KERNEL | __GFP_ZERO | +					__GFP_NOWARN | gfp_flags, order); +			if (pages) +				break; + +			if (order == 0) { +				while (last_page--) +					__free_page(buf->pages[last_page]); +				return -ENOMEM; +			} +			order--; +		} + +		split_page(pages, order); +		for (i = 0; i < (1 << order); i++) +			buf->pages[last_page++] = &pages[i]; + +		size -= PAGE_SIZE << order; +	} + +	return 0; +} +  static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_flags)  {  	struct vb2_dma_sg_buf *buf; -	int i; +	int ret; +	int num_pages;  	buf = kzalloc(sizeof *buf, GFP_KERNEL);  	if (!buf) @@ -54,29 +99,23 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_fla  	buf->vaddr = NULL;  	buf->write = 0;  	buf->offset = 0; -	buf->sg_desc.size = size; +	buf->size = size;  	/* size is already page aligned */ -	buf->sg_desc.num_pages = size >> PAGE_SHIFT; - -	buf->sg_desc.sglist = vzalloc(buf->sg_desc.num_pages * -				      sizeof(*buf->sg_desc.sglist)); -	if (!buf->sg_desc.sglist) -		goto fail_sglist_alloc; -	sg_init_table(buf->sg_desc.sglist, buf->sg_desc.num_pages); +	buf->num_pages = size >> PAGE_SHIFT; -	buf->pages = kzalloc(buf->sg_desc.num_pages * sizeof(struct page *), +	buf->pages = kzalloc(buf->num_pages * sizeof(struct page *),  			     GFP_KERNEL);  	if (!buf->pages)  		goto fail_pages_array_alloc; -	for (i = 0; i < buf->sg_desc.num_pages; ++i) { -		buf->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO | -					   __GFP_NOWARN | gfp_flags); -		if (NULL == buf->pages[i]) -			goto fail_pages_alloc; -		sg_set_page(&buf->sg_desc.sglist[i], -			    buf->pages[i], PAGE_SIZE, 0); -	} +	ret = vb2_dma_sg_alloc_compacted(buf, gfp_flags); +	if (ret) +		goto fail_pages_alloc; + +	ret = sg_alloc_table_from_pages(&buf->sg_table, buf->pages, +			buf->num_pages, 0, size, gfp_flags); +	if (ret) +		goto fail_table_alloc;  	buf->handler.refcount = &buf->refcount;  	buf->handler.put = vb2_dma_sg_put; @@ -85,18 +124,16 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_fla  	atomic_inc(&buf->refcount);  	dprintk(1, "%s: Allocated buffer of %d pages\n", -		__func__, buf->sg_desc.num_pages); +		__func__, buf->num_pages);  	return buf; +fail_table_alloc: +	num_pages = buf->num_pages; +	while (num_pages--) +		__free_page(buf->pages[num_pages]);  fail_pages_alloc: -	while (--i >= 0) -		__free_page(buf->pages[i]);  	kfree(buf->pages); -  fail_pages_array_alloc: -	vfree(buf->sg_desc.sglist); - -fail_sglist_alloc:  	kfree(buf);  	return NULL;  } @@ -104,14 +141,14 @@ fail_sglist_alloc:  static void vb2_dma_sg_put(void *buf_priv)  {  	struct vb2_dma_sg_buf *buf = buf_priv; -	int i = buf->sg_desc.num_pages; +	int i = buf->num_pages;  	if (atomic_dec_and_test(&buf->refcount)) {  		dprintk(1, "%s: Freeing buffer of %d pages\n", __func__, -			buf->sg_desc.num_pages); +			buf->num_pages);  		if (buf->vaddr) -			vm_unmap_ram(buf->vaddr, buf->sg_desc.num_pages); -		vfree(buf->sg_desc.sglist); +			vm_unmap_ram(buf->vaddr, buf->num_pages); +		sg_free_table(&buf->sg_table);  		while (--i >= 0)  			__free_page(buf->pages[i]);  		kfree(buf->pages); @@ -119,12 +156,18 @@ static void vb2_dma_sg_put(void *buf_priv)  	}  } +static inline int vma_is_io(struct vm_area_struct *vma) +{ +	return !!(vma->vm_flags & (VM_IO | VM_PFNMAP)); +} +  static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,  				    unsigned long size, int write)  {  	struct vb2_dma_sg_buf *buf;  	unsigned long first, last; -	int num_pages_from_user, i; +	int num_pages_from_user; +	struct vm_area_struct *vma;  	buf = kzalloc(sizeof *buf, GFP_KERNEL);  	if (!buf) @@ -133,56 +176,76 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,  	buf->vaddr = NULL;  	buf->write = write;  	buf->offset = vaddr & ~PAGE_MASK; -	buf->sg_desc.size = size; +	buf->size = size;  	first = (vaddr           & PAGE_MASK) >> PAGE_SHIFT;  	last  = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT; -	buf->sg_desc.num_pages = last - first + 1; - -	buf->sg_desc.sglist = vzalloc( -		buf->sg_desc.num_pages * sizeof(*buf->sg_desc.sglist)); -	if (!buf->sg_desc.sglist) -		goto userptr_fail_sglist_alloc; - -	sg_init_table(buf->sg_desc.sglist, buf->sg_desc.num_pages); +	buf->num_pages = last - first + 1; -	buf->pages = kzalloc(buf->sg_desc.num_pages * sizeof(struct page *), +	buf->pages = kzalloc(buf->num_pages * sizeof(struct page *),  			     GFP_KERNEL);  	if (!buf->pages) -		goto userptr_fail_pages_array_alloc; +		goto userptr_fail_alloc_pages; + +	vma = find_vma(current->mm, vaddr); +	if (!vma) { +		dprintk(1, "no vma for address %lu\n", vaddr); +		goto userptr_fail_find_vma; +	} + +	if (vma->vm_end < vaddr + size) { +		dprintk(1, "vma at %lu is too small for %lu bytes\n", +			vaddr, size); +		goto userptr_fail_find_vma; +	} -	num_pages_from_user = get_user_pages(current, current->mm, +	buf->vma = vb2_get_vma(vma); +	if (!buf->vma) { +		dprintk(1, "failed to copy vma\n"); +		goto userptr_fail_find_vma; +	} + +	if (vma_is_io(buf->vma)) { +		for (num_pages_from_user = 0; +		     num_pages_from_user < buf->num_pages; +		     ++num_pages_from_user, vaddr += PAGE_SIZE) { +			unsigned long pfn; + +			if (follow_pfn(vma, vaddr, &pfn)) { +				dprintk(1, "no page for address %lu\n", vaddr); +				break; +			} +			buf->pages[num_pages_from_user] = pfn_to_page(pfn); +		} +	} else +		num_pages_from_user = get_user_pages(current, current->mm,  					     vaddr & PAGE_MASK, -					     buf->sg_desc.num_pages, +					     buf->num_pages,  					     write,  					     1, /* force */  					     buf->pages,  					     NULL); -	if (num_pages_from_user != buf->sg_desc.num_pages) +	if (num_pages_from_user != buf->num_pages)  		goto userptr_fail_get_user_pages; -	sg_set_page(&buf->sg_desc.sglist[0], buf->pages[0], -		    PAGE_SIZE - buf->offset, buf->offset); -	size -= PAGE_SIZE - buf->offset; -	for (i = 1; i < buf->sg_desc.num_pages; ++i) { -		sg_set_page(&buf->sg_desc.sglist[i], buf->pages[i], -			    min_t(size_t, PAGE_SIZE, size), 0); -		size -= min_t(size_t, PAGE_SIZE, size); -	} +	if (sg_alloc_table_from_pages(&buf->sg_table, buf->pages, +			buf->num_pages, buf->offset, size, 0)) +		goto userptr_fail_alloc_table_from_pages; +  	return buf; +userptr_fail_alloc_table_from_pages:  userptr_fail_get_user_pages:  	dprintk(1, "get_user_pages requested/got: %d/%d]\n", -	       num_pages_from_user, buf->sg_desc.num_pages); -	while (--num_pages_from_user >= 0) -		put_page(buf->pages[num_pages_from_user]); +		buf->num_pages, num_pages_from_user); +	if (!vma_is_io(buf->vma)) +		while (--num_pages_from_user >= 0) +			put_page(buf->pages[num_pages_from_user]); +	vb2_put_vma(buf->vma); +userptr_fail_find_vma:  	kfree(buf->pages); - -userptr_fail_pages_array_alloc: -	vfree(buf->sg_desc.sglist); - -userptr_fail_sglist_alloc: +userptr_fail_alloc_pages:  	kfree(buf);  	return NULL;  } @@ -194,19 +257,21 @@ userptr_fail_sglist_alloc:  static void vb2_dma_sg_put_userptr(void *buf_priv)  {  	struct vb2_dma_sg_buf *buf = buf_priv; -	int i = buf->sg_desc.num_pages; +	int i = buf->num_pages;  	dprintk(1, "%s: Releasing userspace buffer of %d pages\n", -	       __func__, buf->sg_desc.num_pages); +	       __func__, buf->num_pages);  	if (buf->vaddr) -		vm_unmap_ram(buf->vaddr, buf->sg_desc.num_pages); +		vm_unmap_ram(buf->vaddr, buf->num_pages); +	sg_free_table(&buf->sg_table);  	while (--i >= 0) {  		if (buf->write)  			set_page_dirty_lock(buf->pages[i]); -		put_page(buf->pages[i]); +		if (!vma_is_io(buf->vma)) +			put_page(buf->pages[i]);  	} -	vfree(buf->sg_desc.sglist);  	kfree(buf->pages); +	vb2_put_vma(buf->vma);  	kfree(buf);  } @@ -218,7 +283,7 @@ static void *vb2_dma_sg_vaddr(void *buf_priv)  	if (!buf->vaddr)  		buf->vaddr = vm_map_ram(buf->pages, -					buf->sg_desc.num_pages, +					buf->num_pages,  					-1,  					PAGE_KERNEL); @@ -274,7 +339,7 @@ static void *vb2_dma_sg_cookie(void *buf_priv)  {  	struct vb2_dma_sg_buf *buf = buf_priv; -	return &buf->sg_desc; +	return &buf->sg_table;  }  const struct vb2_mem_ops vb2_dma_sg_memops = { diff --git a/drivers/media/v4l2-core/videobuf2-dvb.c b/drivers/media/v4l2-core/videobuf2-dvb.c new file mode 100644 index 00000000000..d09269846b7 --- /dev/null +++ b/drivers/media/v4l2-core/videobuf2-dvb.c @@ -0,0 +1,336 @@ +/* + * + * some helper function for simple DVB cards which simply DMA the + * complete transport stream and let the computer sort everything else + * (i.e. we are using the software demux, ...).  Also uses the + * video-buf to manage DMA buffers. + * + * (c) 2004 Gerd Knorr <kraxel@bytesex.org> [SUSE Labs] + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/device.h> +#include <linux/slab.h> + +#include <media/videobuf2-dvb.h> + +/* ------------------------------------------------------------------ */ + +MODULE_AUTHOR("Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]"); +MODULE_LICENSE("GPL"); + +/* ------------------------------------------------------------------ */ + +static int dvb_fnc(struct vb2_buffer *vb, void *priv) +{ +	struct vb2_dvb *dvb = priv; + +	dvb_dmx_swfilter(&dvb->demux, vb2_plane_vaddr(vb, 0), +				      vb2_get_plane_payload(vb, 0)); +	return 0; +} + +static int vb2_dvb_start_feed(struct dvb_demux_feed *feed) +{ +	struct dvb_demux *demux = feed->demux; +	struct vb2_dvb *dvb = demux->priv; +	int rc = 0; + +	if (!demux->dmx.frontend) +		return -EINVAL; + +	mutex_lock(&dvb->lock); +	dvb->nfeeds++; + +	if (!dvb->dvbq.threadio) { +		rc = vb2_thread_start(&dvb->dvbq, dvb_fnc, dvb, dvb->name); +		if (rc) +			dvb->nfeeds--; +	} +	if (!rc) +		rc = dvb->nfeeds; +	mutex_unlock(&dvb->lock); +	return rc; +} + +static int vb2_dvb_stop_feed(struct dvb_demux_feed *feed) +{ +	struct dvb_demux *demux = feed->demux; +	struct vb2_dvb *dvb = demux->priv; +	int err = 0; + +	mutex_lock(&dvb->lock); +	dvb->nfeeds--; +	if (0 == dvb->nfeeds) +		err = vb2_thread_stop(&dvb->dvbq); +	mutex_unlock(&dvb->lock); +	return err; +} + +static int vb2_dvb_register_adapter(struct vb2_dvb_frontends *fe, +			  struct module *module, +			  void *adapter_priv, +			  struct device *device, +			  char *adapter_name, +			  short *adapter_nr, +			  int mfe_shared) +{ +	int result; + +	mutex_init(&fe->lock); + +	/* register adapter */ +	result = dvb_register_adapter(&fe->adapter, adapter_name, module, +		device, adapter_nr); +	if (result < 0) { +		pr_warn("%s: dvb_register_adapter failed (errno = %d)\n", +		       adapter_name, result); +	} +	fe->adapter.priv = adapter_priv; +	fe->adapter.mfe_shared = mfe_shared; + +	return result; +} + +static int vb2_dvb_register_frontend(struct dvb_adapter *adapter, +	struct vb2_dvb *dvb) +{ +	int result; + +	/* register frontend */ +	result = dvb_register_frontend(adapter, dvb->frontend); +	if (result < 0) { +		pr_warn("%s: dvb_register_frontend failed (errno = %d)\n", +		       dvb->name, result); +		goto fail_frontend; +	} + +	/* register demux stuff */ +	dvb->demux.dmx.capabilities = +		DMX_TS_FILTERING | DMX_SECTION_FILTERING | +		DMX_MEMORY_BASED_FILTERING; +	dvb->demux.priv       = dvb; +	dvb->demux.filternum  = 256; +	dvb->demux.feednum    = 256; +	dvb->demux.start_feed = vb2_dvb_start_feed; +	dvb->demux.stop_feed  = vb2_dvb_stop_feed; +	result = dvb_dmx_init(&dvb->demux); +	if (result < 0) { +		pr_warn("%s: dvb_dmx_init failed (errno = %d)\n", +		       dvb->name, result); +		goto fail_dmx; +	} + +	dvb->dmxdev.filternum    = 256; +	dvb->dmxdev.demux        = &dvb->demux.dmx; +	dvb->dmxdev.capabilities = 0; +	result = dvb_dmxdev_init(&dvb->dmxdev, adapter); + +	if (result < 0) { +		pr_warn("%s: dvb_dmxdev_init failed (errno = %d)\n", +		       dvb->name, result); +		goto fail_dmxdev; +	} + +	dvb->fe_hw.source = DMX_FRONTEND_0; +	result = dvb->demux.dmx.add_frontend(&dvb->demux.dmx, &dvb->fe_hw); +	if (result < 0) { +		pr_warn("%s: add_frontend failed (DMX_FRONTEND_0, errno = %d)\n", +		       dvb->name, result); +		goto fail_fe_hw; +	} + +	dvb->fe_mem.source = DMX_MEMORY_FE; +	result = dvb->demux.dmx.add_frontend(&dvb->demux.dmx, &dvb->fe_mem); +	if (result < 0) { +		pr_warn("%s: add_frontend failed (DMX_MEMORY_FE, errno = %d)\n", +		       dvb->name, result); +		goto fail_fe_mem; +	} + +	result = dvb->demux.dmx.connect_frontend(&dvb->demux.dmx, &dvb->fe_hw); +	if (result < 0) { +		pr_warn("%s: connect_frontend failed (errno = %d)\n", +		       dvb->name, result); +		goto fail_fe_conn; +	} + +	/* register network adapter */ +	result = dvb_net_init(adapter, &dvb->net, &dvb->demux.dmx); +	if (result < 0) { +		pr_warn("%s: dvb_net_init failed (errno = %d)\n", +		       dvb->name, result); +		goto fail_fe_conn; +	} +	return 0; + +fail_fe_conn: +	dvb->demux.dmx.remove_frontend(&dvb->demux.dmx, &dvb->fe_mem); +fail_fe_mem: +	dvb->demux.dmx.remove_frontend(&dvb->demux.dmx, &dvb->fe_hw); +fail_fe_hw: +	dvb_dmxdev_release(&dvb->dmxdev); +fail_dmxdev: +	dvb_dmx_release(&dvb->demux); +fail_dmx: +	dvb_unregister_frontend(dvb->frontend); +fail_frontend: +	dvb_frontend_detach(dvb->frontend); +	dvb->frontend = NULL; + +	return result; +} + +/* ------------------------------------------------------------------ */ +/* Register a single adapter and one or more frontends */ +int vb2_dvb_register_bus(struct vb2_dvb_frontends *f, +			 struct module *module, +			 void *adapter_priv, +			 struct device *device, +			 short *adapter_nr, +			 int mfe_shared) +{ +	struct list_head *list, *q; +	struct vb2_dvb_frontend *fe; +	int res; + +	fe = vb2_dvb_get_frontend(f, 1); +	if (!fe) { +		pr_warn("Unable to register the adapter which has no frontends\n"); +		return -EINVAL; +	} + +	/* Bring up the adapter */ +	res = vb2_dvb_register_adapter(f, module, adapter_priv, device, +		fe->dvb.name, adapter_nr, mfe_shared); +	if (res < 0) { +		pr_warn("vb2_dvb_register_adapter failed (errno = %d)\n", res); +		return res; +	} + +	/* Attach all of the frontends to the adapter */ +	mutex_lock(&f->lock); +	list_for_each_safe(list, q, &f->felist) { +		fe = list_entry(list, struct vb2_dvb_frontend, felist); +		res = vb2_dvb_register_frontend(&f->adapter, &fe->dvb); +		if (res < 0) { +			pr_warn("%s: vb2_dvb_register_frontend failed (errno = %d)\n", +				fe->dvb.name, res); +			goto err; +		} +	} +	mutex_unlock(&f->lock); +	return 0; + +err: +	mutex_unlock(&f->lock); +	vb2_dvb_unregister_bus(f); +	return res; +} +EXPORT_SYMBOL(vb2_dvb_register_bus); + +void vb2_dvb_unregister_bus(struct vb2_dvb_frontends *f) +{ +	vb2_dvb_dealloc_frontends(f); + +	dvb_unregister_adapter(&f->adapter); +} +EXPORT_SYMBOL(vb2_dvb_unregister_bus); + +struct vb2_dvb_frontend *vb2_dvb_get_frontend( +	struct vb2_dvb_frontends *f, int id) +{ +	struct list_head *list, *q; +	struct vb2_dvb_frontend *fe, *ret = NULL; + +	mutex_lock(&f->lock); + +	list_for_each_safe(list, q, &f->felist) { +		fe = list_entry(list, struct vb2_dvb_frontend, felist); +		if (fe->id == id) { +			ret = fe; +			break; +		} +	} + +	mutex_unlock(&f->lock); + +	return ret; +} +EXPORT_SYMBOL(vb2_dvb_get_frontend); + +int vb2_dvb_find_frontend(struct vb2_dvb_frontends *f, +	struct dvb_frontend *p) +{ +	struct list_head *list, *q; +	struct vb2_dvb_frontend *fe = NULL; +	int ret = 0; + +	mutex_lock(&f->lock); + +	list_for_each_safe(list, q, &f->felist) { +		fe = list_entry(list, struct vb2_dvb_frontend, felist); +		if (fe->dvb.frontend == p) { +			ret = fe->id; +			break; +		} +	} + +	mutex_unlock(&f->lock); + +	return ret; +} +EXPORT_SYMBOL(vb2_dvb_find_frontend); + +struct vb2_dvb_frontend *vb2_dvb_alloc_frontend( +	struct vb2_dvb_frontends *f, int id) +{ +	struct vb2_dvb_frontend *fe; + +	fe = kzalloc(sizeof(struct vb2_dvb_frontend), GFP_KERNEL); +	if (fe == NULL) +		return NULL; + +	fe->id = id; +	mutex_init(&fe->dvb.lock); + +	mutex_lock(&f->lock); +	list_add_tail(&fe->felist, &f->felist); +	mutex_unlock(&f->lock); +	return fe; +} +EXPORT_SYMBOL(vb2_dvb_alloc_frontend); + +void vb2_dvb_dealloc_frontends(struct vb2_dvb_frontends *f) +{ +	struct list_head *list, *q; +	struct vb2_dvb_frontend *fe; + +	mutex_lock(&f->lock); +	list_for_each_safe(list, q, &f->felist) { +		fe = list_entry(list, struct vb2_dvb_frontend, felist); +		if (fe->dvb.net.dvbdev) { +			dvb_net_release(&fe->dvb.net); +			fe->dvb.demux.dmx.remove_frontend(&fe->dvb.demux.dmx, +				&fe->dvb.fe_mem); +			fe->dvb.demux.dmx.remove_frontend(&fe->dvb.demux.dmx, +				&fe->dvb.fe_hw); +			dvb_dmxdev_release(&fe->dvb.dmxdev); +			dvb_dmx_release(&fe->dvb.demux); +			dvb_unregister_frontend(fe->dvb.frontend); +		} +		if (fe->dvb.frontend) +			/* always allocated, may have been reset */ +			dvb_frontend_detach(fe->dvb.frontend); +		list_del(list); /* remove list entry */ +		kfree(fe);	/* free frontend allocation */ +	} +	mutex_unlock(&f->lock); +} +EXPORT_SYMBOL(vb2_dvb_dealloc_frontends);  | 
