aboutsummaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorMauro Carvalho Chehab <m.chehab@samsung.com>2013-11-02 06:20:16 -0300
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2013-12-04 11:06:33 -0800
commite715159bf75d8fcc847d9cb5ca0060c926eb0499 (patch)
treec61510be4d4d635cec1725826116b1325425afc3 /drivers
parent282e059380646cb413fb3917ba9035032cd6d986 (diff)
media: v4l2-async: Don't use dynamic static allocation
commit 24e9a47e14f0a97ee97abc3dd86b2ef254448a17 upstream. Dynamic static allocation is evil, as Kernel stack is too low, and compilation complains about it on some archs: drivers/media/v4l2-core/v4l2-async.c:238:1: warning: 'v4l2_async_notifier_unregister' uses dynamic stack allocation [enabled by default] Instead, let's enforce a limit for the buffer. In this specific case, there's a hard limit imposed by V4L2_MAX_SUBDEVS, with is currently 128. That means that the buffer size can be up to 128x8 = 1024 bytes (on a 64bits kernel), with is too big for stack. Worse than that, someone could increase it and cause real troubles. So, let's use dynamically allocated data, instead. Signed-off-by: Mauro Carvalho Chehab <m.chehab@samsung.com> Reviewed-by: Hans Verkuil <hans.verkuil@cisco.com> Signed-off-by: Mauro Carvalho Chehab <m.chehab@samsung.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/media/v4l2-core/v4l2-async.c30
1 files changed, 27 insertions, 3 deletions
diff --git a/drivers/media/v4l2-core/v4l2-async.c b/drivers/media/v4l2-core/v4l2-async.c
index c85d69da35b..85a6a34128a 100644
--- a/drivers/media/v4l2-core/v4l2-async.c
+++ b/drivers/media/v4l2-core/v4l2-async.c
@@ -189,30 +189,53 @@ void v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier)
struct v4l2_subdev *sd, *tmp;
unsigned int notif_n_subdev = notifier->num_subdevs;
unsigned int n_subdev = min(notif_n_subdev, V4L2_MAX_SUBDEVS);
- struct device *dev[n_subdev];
+ struct device **dev;
int i = 0;
if (!notifier->v4l2_dev)
return;
+ dev = kmalloc(n_subdev * sizeof(*dev), GFP_KERNEL);
+ if (!dev) {
+ dev_err(notifier->v4l2_dev->dev,
+ "Failed to allocate device cache!\n");
+ }
+
mutex_lock(&list_lock);
list_del(&notifier->list);
list_for_each_entry_safe(sd, tmp, &notifier->done, async_list) {
- dev[i] = get_device(sd->dev);
+ struct device *d;
+
+ d = get_device(sd->dev);
v4l2_async_cleanup(sd);
/* If we handled USB devices, we'd have to lock the parent too */
- device_release_driver(dev[i++]);
+ device_release_driver(d);
if (notifier->unbind)
notifier->unbind(notifier, sd, sd->asd);
+
+ /*
+ * Store device at the device cache, in order to call
+ * put_device() on the final step
+ */
+ if (dev)
+ dev[i++] = d;
+ else
+ put_device(d);
}
mutex_unlock(&list_lock);
+ /*
+ * Call device_attach() to reprobe devices
+ *
+ * NOTE: If dev allocation fails, i is 0, and the whole loop won't be
+ * executed.
+ */
while (i--) {
struct device *d = dev[i];
@@ -228,6 +251,7 @@ void v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier)
}
put_device(d);
}
+ kfree(dev);
notifier->v4l2_dev = NULL;