diff options
Diffstat (limited to 'drivers/dma/dmatest.c')
| -rw-r--r-- | drivers/dma/dmatest.c | 704 | 
1 files changed, 531 insertions, 173 deletions
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 5589358b684..e27cec25c59 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -2,13 +2,18 @@   * DMA Engine test module   *   * Copyright (C) 2007 Atmel Corporation + * Copyright (C) 2013 Intel Corporation   *   * This program is free software; you can redistribute it and/or modify   * it under the terms of the GNU General Public License version 2 as   * published by the Free Software Foundation.   */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +  #include <linux/delay.h> +#include <linux/dma-mapping.h>  #include <linux/dmaengine.h> +#include <linux/freezer.h>  #include <linux/init.h>  #include <linux/kthread.h>  #include <linux/module.h> @@ -18,42 +23,114 @@  #include <linux/wait.h>  static unsigned int test_buf_size = 16384; -module_param(test_buf_size, uint, S_IRUGO); +module_param(test_buf_size, uint, S_IRUGO | S_IWUSR);  MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer");  static char test_channel[20]; -module_param_string(channel, test_channel, sizeof(test_channel), S_IRUGO); +module_param_string(channel, test_channel, sizeof(test_channel), +		S_IRUGO | S_IWUSR);  MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)"); -static char test_device[20]; -module_param_string(device, test_device, sizeof(test_device), S_IRUGO); +static char test_device[32]; +module_param_string(device, test_device, sizeof(test_device), +		S_IRUGO | S_IWUSR);  MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)");  static unsigned int threads_per_chan = 1; -module_param(threads_per_chan, uint, S_IRUGO); +module_param(threads_per_chan, uint, S_IRUGO | S_IWUSR);  MODULE_PARM_DESC(threads_per_chan,  		"Number of threads to start per channel (default: 1)");  static unsigned int max_channels; -module_param(max_channels, uint, S_IRUGO); +module_param(max_channels, uint, S_IRUGO | S_IWUSR);  MODULE_PARM_DESC(max_channels,  		"Maximum number of channels to use (default: all)");  static unsigned int iterations; -module_param(iterations, uint, S_IRUGO); +module_param(iterations, uint, S_IRUGO | S_IWUSR);  MODULE_PARM_DESC(iterations,  		"Iterations before stopping test (default: infinite)");  static unsigned int xor_sources = 3; -module_param(xor_sources, uint, S_IRUGO); +module_param(xor_sources, uint, S_IRUGO | S_IWUSR);  MODULE_PARM_DESC(xor_sources,  		"Number of xor source buffers (default: 3)");  static unsigned int pq_sources = 3; -module_param(pq_sources, uint, S_IRUGO); +module_param(pq_sources, uint, S_IRUGO | S_IWUSR);  MODULE_PARM_DESC(pq_sources,  		"Number of p+q source buffers (default: 3)"); +static int timeout = 3000; +module_param(timeout, uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), " +		 "Pass -1 for infinite timeout"); + +static bool noverify; +module_param(noverify, bool, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(noverify, "Disable random data setup and verification"); + +static bool verbose; +module_param(verbose, bool, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(verbose, "Enable \"success\" result messages (default: off)"); + +/** + * struct dmatest_params - test parameters. + * @buf_size:		size of the memcpy test buffer + * @channel:		bus ID of the channel to test + * @device:		bus ID of the DMA Engine to test + * @threads_per_chan:	number of threads to start per channel + * @max_channels:	maximum number of channels to use + * @iterations:		iterations before stopping test + * @xor_sources:	number of xor source buffers + * @pq_sources:		number of p+q source buffers + * @timeout:		transfer timeout in msec, -1 for infinite timeout + */ +struct dmatest_params { +	unsigned int	buf_size; +	char		channel[20]; +	char		device[32]; +	unsigned int	threads_per_chan; +	unsigned int	max_channels; +	unsigned int	iterations; +	unsigned int	xor_sources; +	unsigned int	pq_sources; +	int		timeout; +	bool		noverify; +}; + +/** + * struct dmatest_info - test information. + * @params:		test parameters + * @lock:		access protection to the fields of this structure + */ +static struct dmatest_info { +	/* Test parameters */ +	struct dmatest_params	params; + +	/* Internal state */ +	struct list_head	channels; +	unsigned int		nr_channels; +	struct mutex		lock; +	bool			did_init; +} test_info = { +	.channels = LIST_HEAD_INIT(test_info.channels), +	.lock = __MUTEX_INITIALIZER(test_info.lock), +}; + +static int dmatest_run_set(const char *val, const struct kernel_param *kp); +static int dmatest_run_get(char *val, const struct kernel_param *kp); +static struct kernel_param_ops run_ops = { +	.set = dmatest_run_set, +	.get = dmatest_run_get, +}; +static bool dmatest_run; +module_param_cb(run, &run_ops, &dmatest_run, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(run, "Run the test (default: false)"); + +/* Maximum amount of mismatched bytes in buffer to print */ +#define MAX_ERROR_COUNT		32 +  /*   * Initialization patterns. All bytes in the source buffer has bit 7   * set, all bytes in the destination buffer has bit 7 cleared. @@ -73,11 +150,13 @@ MODULE_PARM_DESC(pq_sources,  struct dmatest_thread {  	struct list_head	node; +	struct dmatest_info	*info;  	struct task_struct	*task;  	struct dma_chan		*chan;  	u8			**srcs;  	u8			**dsts;  	enum dma_transaction_type type; +	bool			done;  };  struct dmatest_chan { @@ -86,36 +165,69 @@ struct dmatest_chan {  	struct list_head	threads;  }; -/* - * These are protected by dma_list_mutex since they're only used by - * the DMA filter function callback - */ -static LIST_HEAD(dmatest_channels); -static unsigned int nr_channels; +static DECLARE_WAIT_QUEUE_HEAD(thread_wait); +static bool wait; -static bool dmatest_match_channel(struct dma_chan *chan) +static bool is_threaded_test_run(struct dmatest_info *info)  { -	if (test_channel[0] == '\0') +	struct dmatest_chan *dtc; + +	list_for_each_entry(dtc, &info->channels, node) { +		struct dmatest_thread *thread; + +		list_for_each_entry(thread, &dtc->threads, node) { +			if (!thread->done) +				return true; +		} +	} + +	return false; +} + +static int dmatest_wait_get(char *val, const struct kernel_param *kp) +{ +	struct dmatest_info *info = &test_info; +	struct dmatest_params *params = &info->params; + +	if (params->iterations) +		wait_event(thread_wait, !is_threaded_test_run(info)); +	wait = true; +	return param_get_bool(val, kp); +} + +static struct kernel_param_ops wait_ops = { +	.get = dmatest_wait_get, +	.set = param_set_bool, +}; +module_param_cb(wait, &wait_ops, &wait, S_IRUGO); +MODULE_PARM_DESC(wait, "Wait for tests to complete (default: false)"); + +static bool dmatest_match_channel(struct dmatest_params *params, +		struct dma_chan *chan) +{ +	if (params->channel[0] == '\0')  		return true; -	return strcmp(dma_chan_name(chan), test_channel) == 0; +	return strcmp(dma_chan_name(chan), params->channel) == 0;  } -static bool dmatest_match_device(struct dma_device *device) +static bool dmatest_match_device(struct dmatest_params *params, +		struct dma_device *device)  { -	if (test_device[0] == '\0') +	if (params->device[0] == '\0')  		return true; -	return strcmp(dev_name(device->dev), test_device) == 0; +	return strcmp(dev_name(device->dev), params->device) == 0;  }  static unsigned long dmatest_random(void)  {  	unsigned long buf; -	get_random_bytes(&buf, sizeof(buf)); +	prandom_bytes(&buf, sizeof(buf));  	return buf;  } -static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len) +static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len, +		unsigned int buf_size)  {  	unsigned int i;  	u8 *buf; @@ -126,13 +238,14 @@ static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len)  		for ( ; i < start + len; i++)  			buf[i] = PATTERN_SRC | PATTERN_COPY  				| (~i & PATTERN_COUNT_MASK); -		for ( ; i < test_buf_size; i++) +		for ( ; i < buf_size; i++)  			buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);  		buf++;  	}  } -static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len) +static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len, +		unsigned int buf_size)  {  	unsigned int i;  	u8 *buf; @@ -143,7 +256,7 @@ static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len)  		for ( ; i < start + len; i++)  			buf[i] = PATTERN_DST | PATTERN_OVERWRITE  				| (~i & PATTERN_COUNT_MASK); -		for ( ; i < test_buf_size; i++) +		for ( ; i < buf_size; i++)  			buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);  	}  } @@ -156,22 +269,18 @@ static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index,  	const char	*thread_name = current->comm;  	if (is_srcbuf) -		pr_warning("%s: srcbuf[0x%x] overwritten!" -				" Expected %02x, got %02x\n", -				thread_name, index, expected, actual); +		pr_warn("%s: srcbuf[0x%x] overwritten! Expected %02x, got %02x\n", +			thread_name, index, expected, actual);  	else if ((pattern & PATTERN_COPY)  			&& (diff & (PATTERN_COPY | PATTERN_OVERWRITE))) -		pr_warning("%s: dstbuf[0x%x] not copied!" -				" Expected %02x, got %02x\n", -				thread_name, index, expected, actual); +		pr_warn("%s: dstbuf[0x%x] not copied! Expected %02x, got %02x\n", +			thread_name, index, expected, actual);  	else if (diff & PATTERN_SRC) -		pr_warning("%s: dstbuf[0x%x] was copied!" -				" Expected %02x, got %02x\n", -				thread_name, index, expected, actual); +		pr_warn("%s: dstbuf[0x%x] was copied! Expected %02x, got %02x\n", +			thread_name, index, expected, actual);  	else -		pr_warning("%s: dstbuf[0x%x] mismatch!" -				" Expected %02x, got %02x\n", -				thread_name, index, expected, actual); +		pr_warn("%s: dstbuf[0x%x] mismatch! Expected %02x, got %02x\n", +			thread_name, index, expected, actual);  }  static unsigned int dmatest_verify(u8 **bufs, unsigned int start, @@ -191,7 +300,7 @@ static unsigned int dmatest_verify(u8 **bufs, unsigned int start,  			actual = buf[i];  			expected = pattern | (~counter & PATTERN_COUNT_MASK);  			if (actual != expected) { -				if (error_count < 32) +				if (error_count < MAX_ERROR_COUNT)  					dmatest_mismatch(actual, pattern, i,  							 counter, is_srcbuf);  				error_count++; @@ -200,16 +309,77 @@ static unsigned int dmatest_verify(u8 **bufs, unsigned int start,  		}  	} -	if (error_count > 32) -		pr_warning("%s: %u errors suppressed\n", -			current->comm, error_count - 32); +	if (error_count > MAX_ERROR_COUNT) +		pr_warn("%s: %u errors suppressed\n", +			current->comm, error_count - MAX_ERROR_COUNT);  	return error_count;  } -static void dmatest_callback(void *completion) +/* poor man's completion - we want to use wait_event_freezable() on it */ +struct dmatest_done { +	bool			done; +	wait_queue_head_t	*wait; +}; + +static void dmatest_callback(void *arg)  { -	complete(completion); +	struct dmatest_done *done = arg; + +	done->done = true; +	wake_up_all(done->wait); +} + +static unsigned int min_odd(unsigned int x, unsigned int y) +{ +	unsigned int val = min(x, y); + +	return val % 2 ? val : val - 1; +} + +static void result(const char *err, unsigned int n, unsigned int src_off, +		   unsigned int dst_off, unsigned int len, unsigned long data) +{ +	pr_info("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)\n", +		current->comm, n, err, src_off, dst_off, len, data); +} + +static void dbg_result(const char *err, unsigned int n, unsigned int src_off, +		       unsigned int dst_off, unsigned int len, +		       unsigned long data) +{ +	pr_debug("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)\n", +		   current->comm, n, err, src_off, dst_off, len, data); +} + +#define verbose_result(err, n, src_off, dst_off, len, data) ({ \ +	if (verbose) \ +		result(err, n, src_off, dst_off, len, data); \ +	else \ +		dbg_result(err, n, src_off, dst_off, len, data); \ +}) + +static unsigned long long dmatest_persec(s64 runtime, unsigned int val) +{ +	unsigned long long per_sec = 1000000; + +	if (runtime <= 0) +		return 0; + +	/* drop precision until runtime is 32-bits */ +	while (runtime > UINT_MAX) { +		runtime >>= 1; +		per_sec <<= 1; +	} + +	per_sec *= val; +	do_div(per_sec, runtime); +	return per_sec; +} + +static unsigned long long dmatest_KBs(s64 runtime, unsigned long long len) +{ +	return dmatest_persec(runtime, len >> 10);  }  /* @@ -228,9 +398,13 @@ static void dmatest_callback(void *completion)   */  static int dmatest_func(void *data)  { +	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait);  	struct dmatest_thread	*thread = data; +	struct dmatest_done	done = { .wait = &done_wait }; +	struct dmatest_info	*info; +	struct dmatest_params	*params;  	struct dma_chan		*chan; -	const char		*thread_name; +	struct dma_device	*dev;  	unsigned int		src_off, dst_off, len;  	unsigned int		error_count;  	unsigned int		failed_tests = 0; @@ -238,36 +412,49 @@ static int dmatest_func(void *data)  	dma_cookie_t		cookie;  	enum dma_status		status;  	enum dma_ctrl_flags 	flags; -	u8			pq_coefs[pq_sources + 1]; +	u8			*pq_coefs = NULL;  	int			ret;  	int			src_cnt;  	int			dst_cnt;  	int			i; +	ktime_t			ktime; +	s64			runtime = 0; +	unsigned long long	total_len = 0; -	thread_name = current->comm; +	set_freezable();  	ret = -ENOMEM;  	smp_rmb(); +	info = thread->info; +	params = &info->params;  	chan = thread->chan; +	dev = chan->device;  	if (thread->type == DMA_MEMCPY)  		src_cnt = dst_cnt = 1;  	else if (thread->type == DMA_XOR) { -		src_cnt = xor_sources | 1; /* force odd to ensure dst = src */ +		/* force odd to ensure dst = src */ +		src_cnt = min_odd(params->xor_sources | 1, dev->max_xor);  		dst_cnt = 1;  	} else if (thread->type == DMA_PQ) { -		src_cnt = pq_sources | 1; /* force odd to ensure dst = src */ +		/* force odd to ensure dst = src */ +		src_cnt = min_odd(params->pq_sources | 1, dma_maxpq(dev, 0));  		dst_cnt = 2; + +		pq_coefs = kmalloc(params->pq_sources+1, GFP_KERNEL); +		if (!pq_coefs) +			goto err_thread_type; +  		for (i = 0; i < src_cnt; i++)  			pq_coefs[i] = 1;  	} else -		goto err_srcs; +		goto err_thread_type;  	thread->srcs = kcalloc(src_cnt+1, sizeof(u8 *), GFP_KERNEL);  	if (!thread->srcs)  		goto err_srcs;  	for (i = 0; i < src_cnt; i++) { -		thread->srcs[i] = kmalloc(test_buf_size, GFP_KERNEL); +		thread->srcs[i] = kmalloc(params->buf_size, GFP_KERNEL);  		if (!thread->srcs[i])  			goto err_srcbuf;  	} @@ -277,7 +464,7 @@ static int dmatest_func(void *data)  	if (!thread->dsts)  		goto err_dsts;  	for (i = 0; i < dst_cnt; i++) { -		thread->dsts[i] = kmalloc(test_buf_size, GFP_KERNEL); +		thread->dsts[i] = kmalloc(params->buf_size, GFP_KERNEL);  		if (!thread->dsts[i])  			goto err_dstbuf;  	} @@ -285,16 +472,18 @@ static int dmatest_func(void *data)  	set_user_nice(current, 10); -	flags = DMA_CTRL_ACK | DMA_COMPL_SKIP_DEST_UNMAP | DMA_PREP_INTERRUPT; +	/* +	 * src and dst buffers are freed by ourselves below +	 */ +	flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; +	ktime = ktime_get();  	while (!kthread_should_stop() -	       && !(iterations && total_tests >= iterations)) { -		struct dma_device *dev = chan->device; +	       && !(params->iterations && total_tests >= params->iterations)) {  		struct dma_async_tx_descriptor *tx = NULL; -		dma_addr_t dma_srcs[src_cnt]; -		dma_addr_t dma_dsts[dst_cnt]; -		struct completion cmp; -		unsigned long tmo = msecs_to_jiffies(3000); +		struct dmaengine_unmap_data *um; +		dma_addr_t srcs[src_cnt]; +		dma_addr_t *dsts;  		u8 align = 0;  		total_tests++; @@ -307,150 +496,196 @@ static int dmatest_func(void *data)  		else if (thread->type == DMA_PQ)  			align = dev->pq_align; -		if (1 << align > test_buf_size) { +		if (1 << align > params->buf_size) {  			pr_err("%u-byte buffer too small for %d-byte alignment\n", -			       test_buf_size, 1 << align); +			       params->buf_size, 1 << align);  			break;  		} -		len = dmatest_random() % test_buf_size + 1; +		if (params->noverify) { +			len = params->buf_size; +			src_off = 0; +			dst_off = 0; +		} else { +			len = dmatest_random() % params->buf_size + 1; +			len = (len >> align) << align; +			if (!len) +				len = 1 << align; +			src_off = dmatest_random() % (params->buf_size - len + 1); +			dst_off = dmatest_random() % (params->buf_size - len + 1); + +			src_off = (src_off >> align) << align; +			dst_off = (dst_off >> align) << align; + +			dmatest_init_srcs(thread->srcs, src_off, len, +					  params->buf_size); +			dmatest_init_dsts(thread->dsts, dst_off, len, +					  params->buf_size); +		} +  		len = (len >> align) << align;  		if (!len)  			len = 1 << align; -		src_off = dmatest_random() % (test_buf_size - len + 1); -		dst_off = dmatest_random() % (test_buf_size - len + 1); +		total_len += len; -		src_off = (src_off >> align) << align; -		dst_off = (dst_off >> align) << align; - -		dmatest_init_srcs(thread->srcs, src_off, len); -		dmatest_init_dsts(thread->dsts, dst_off, len); +		um = dmaengine_get_unmap_data(dev->dev, src_cnt+dst_cnt, +					      GFP_KERNEL); +		if (!um) { +			failed_tests++; +			result("unmap data NULL", total_tests, +			       src_off, dst_off, len, ret); +			continue; +		} +		um->len = params->buf_size;  		for (i = 0; i < src_cnt; i++) { -			u8 *buf = thread->srcs[i] + src_off; - -			dma_srcs[i] = dma_map_single(dev->dev, buf, len, -						     DMA_TO_DEVICE); +			void *buf = thread->srcs[i]; +			struct page *pg = virt_to_page(buf); +			unsigned pg_off = (unsigned long) buf & ~PAGE_MASK; + +			um->addr[i] = dma_map_page(dev->dev, pg, pg_off, +						   um->len, DMA_TO_DEVICE); +			srcs[i] = um->addr[i] + src_off; +			ret = dma_mapping_error(dev->dev, um->addr[i]); +			if (ret) { +				dmaengine_unmap_put(um); +				result("src mapping error", total_tests, +				       src_off, dst_off, len, ret); +				failed_tests++; +				continue; +			} +			um->to_cnt++;  		}  		/* map with DMA_BIDIRECTIONAL to force writeback/invalidate */ +		dsts = &um->addr[src_cnt];  		for (i = 0; i < dst_cnt; i++) { -			dma_dsts[i] = dma_map_single(dev->dev, thread->dsts[i], -						     test_buf_size, -						     DMA_BIDIRECTIONAL); +			void *buf = thread->dsts[i]; +			struct page *pg = virt_to_page(buf); +			unsigned pg_off = (unsigned long) buf & ~PAGE_MASK; + +			dsts[i] = dma_map_page(dev->dev, pg, pg_off, um->len, +					       DMA_BIDIRECTIONAL); +			ret = dma_mapping_error(dev->dev, dsts[i]); +			if (ret) { +				dmaengine_unmap_put(um); +				result("dst mapping error", total_tests, +				       src_off, dst_off, len, ret); +				failed_tests++; +				continue; +			} +			um->bidi_cnt++;  		} -  		if (thread->type == DMA_MEMCPY)  			tx = dev->device_prep_dma_memcpy(chan, -							 dma_dsts[0] + dst_off, -							 dma_srcs[0], len, -							 flags); +							 dsts[0] + dst_off, +							 srcs[0], len, flags);  		else if (thread->type == DMA_XOR)  			tx = dev->device_prep_dma_xor(chan, -						      dma_dsts[0] + dst_off, -						      dma_srcs, src_cnt, +						      dsts[0] + dst_off, +						      srcs, src_cnt,  						      len, flags);  		else if (thread->type == DMA_PQ) {  			dma_addr_t dma_pq[dst_cnt];  			for (i = 0; i < dst_cnt; i++) -				dma_pq[i] = dma_dsts[i] + dst_off; -			tx = dev->device_prep_dma_pq(chan, dma_pq, dma_srcs, +				dma_pq[i] = dsts[i] + dst_off; +			tx = dev->device_prep_dma_pq(chan, dma_pq, srcs,  						     src_cnt, pq_coefs,  						     len, flags);  		}  		if (!tx) { -			for (i = 0; i < src_cnt; i++) -				dma_unmap_single(dev->dev, dma_srcs[i], len, -						 DMA_TO_DEVICE); -			for (i = 0; i < dst_cnt; i++) -				dma_unmap_single(dev->dev, dma_dsts[i], -						 test_buf_size, -						 DMA_BIDIRECTIONAL); -			pr_warning("%s: #%u: prep error with src_off=0x%x " -					"dst_off=0x%x len=0x%x\n", -					thread_name, total_tests - 1, -					src_off, dst_off, len); +			dmaengine_unmap_put(um); +			result("prep error", total_tests, src_off, +			       dst_off, len, ret);  			msleep(100);  			failed_tests++;  			continue;  		} -		init_completion(&cmp); +		done.done = false;  		tx->callback = dmatest_callback; -		tx->callback_param = &cmp; +		tx->callback_param = &done;  		cookie = tx->tx_submit(tx);  		if (dma_submit_error(cookie)) { -			pr_warning("%s: #%u: submit error %d with src_off=0x%x " -					"dst_off=0x%x len=0x%x\n", -					thread_name, total_tests - 1, cookie, -					src_off, dst_off, len); +			dmaengine_unmap_put(um); +			result("submit error", total_tests, src_off, +			       dst_off, len, ret);  			msleep(100);  			failed_tests++;  			continue;  		}  		dma_async_issue_pending(chan); -		tmo = wait_for_completion_timeout(&cmp, tmo); +		wait_event_freezable_timeout(done_wait, done.done, +					     msecs_to_jiffies(params->timeout)); +  		status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); -		if (tmo == 0) { -			pr_warning("%s: #%u: test timed out\n", -				   thread_name, total_tests - 1); +		if (!done.done) { +			/* +			 * We're leaving the timed out dma operation with +			 * dangling pointer to done_wait.  To make this +			 * correct, we'll need to allocate wait_done for +			 * each test iteration and perform "who's gonna +			 * free it this time?" dancing.  For now, just +			 * leave it dangling. +			 */ +			dmaengine_unmap_put(um); +			result("test timed out", total_tests, src_off, dst_off, +			       len, 0);  			failed_tests++;  			continue; -		} else if (status != DMA_SUCCESS) { -			pr_warning("%s: #%u: got completion callback," -				   " but status is \'%s\'\n", -				   thread_name, total_tests - 1, -				   status == DMA_ERROR ? "error" : "in progress"); +		} else if (status != DMA_COMPLETE) { +			dmaengine_unmap_put(um); +			result(status == DMA_ERROR ? +			       "completion error status" : +			       "completion busy status", total_tests, src_off, +			       dst_off, len, ret);  			failed_tests++;  			continue;  		} -		/* Unmap by myself (see DMA_COMPL_SKIP_DEST_UNMAP above) */ -		for (i = 0; i < dst_cnt; i++) -			dma_unmap_single(dev->dev, dma_dsts[i], test_buf_size, -					 DMA_BIDIRECTIONAL); +		dmaengine_unmap_put(um); -		error_count = 0; +		if (params->noverify) { +			verbose_result("test passed", total_tests, src_off, +				       dst_off, len, 0); +			continue; +		} -		pr_debug("%s: verifying source buffer...\n", thread_name); -		error_count += dmatest_verify(thread->srcs, 0, src_off, +		pr_debug("%s: verifying source buffer...\n", current->comm); +		error_count = dmatest_verify(thread->srcs, 0, src_off,  				0, PATTERN_SRC, true);  		error_count += dmatest_verify(thread->srcs, src_off,  				src_off + len, src_off,  				PATTERN_SRC | PATTERN_COPY, true);  		error_count += dmatest_verify(thread->srcs, src_off + len, -				test_buf_size, src_off + len, +				params->buf_size, src_off + len,  				PATTERN_SRC, true); -		pr_debug("%s: verifying dest buffer...\n", -				thread->task->comm); +		pr_debug("%s: verifying dest buffer...\n", current->comm);  		error_count += dmatest_verify(thread->dsts, 0, dst_off,  				0, PATTERN_DST, false);  		error_count += dmatest_verify(thread->dsts, dst_off,  				dst_off + len, src_off,  				PATTERN_SRC | PATTERN_COPY, false);  		error_count += dmatest_verify(thread->dsts, dst_off + len, -				test_buf_size, dst_off + len, +				params->buf_size, dst_off + len,  				PATTERN_DST, false);  		if (error_count) { -			pr_warning("%s: #%u: %u errors with " -				"src_off=0x%x dst_off=0x%x len=0x%x\n", -				thread_name, total_tests - 1, error_count, -				src_off, dst_off, len); +			result("data error", total_tests, src_off, dst_off, +			       len, error_count);  			failed_tests++;  		} else { -			pr_debug("%s: #%u: No errors with " -				"src_off=0x%x dst_off=0x%x len=0x%x\n", -				thread_name, total_tests - 1, -				src_off, dst_off, len); +			verbose_result("test passed", total_tests, src_off, +				       dst_off, len, 0);  		}  	} +	runtime = ktime_us_delta(ktime_get(), ktime);  	ret = 0;  	for (i = 0; thread->dsts[i]; i++) @@ -463,14 +698,19 @@ err_dsts:  err_srcbuf:  	kfree(thread->srcs);  err_srcs: -	pr_notice("%s: terminating after %u tests, %u failures (status %d)\n", -			thread_name, total_tests, failed_tests, ret); +	kfree(pq_coefs); +err_thread_type: +	pr_info("%s: summary %u tests, %u failures %llu iops %llu KB/s (%d)\n", +		current->comm, total_tests, failed_tests, +		dmatest_persec(runtime, total_tests), +		dmatest_KBs(runtime, total_len), ret); -	if (iterations > 0) -		while (!kthread_should_stop()) { -			DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit); -			interruptible_sleep_on(&wait_dmatest_exit); -		} +	/* terminate all transfers on specified channels */ +	if (ret) +		dmaengine_terminate_all(chan); + +	thread->done = true; +	wake_up(&thread_wait);  	return ret;  } @@ -483,16 +723,23 @@ static void dmatest_cleanup_channel(struct dmatest_chan *dtc)  	list_for_each_entry_safe(thread, _thread, &dtc->threads, node) {  		ret = kthread_stop(thread->task); -		pr_debug("dmatest: thread %s exited with status %d\n", -				thread->task->comm, ret); +		pr_debug("thread %s exited with status %d\n", +			 thread->task->comm, ret);  		list_del(&thread->node); +		put_task_struct(thread->task);  		kfree(thread);  	} + +	/* terminate all transfers on specified channels */ +	dmaengine_terminate_all(dtc->chan); +  	kfree(dtc);  } -static int dmatest_add_threads(struct dmatest_chan *dtc, enum dma_transaction_type type) +static int dmatest_add_threads(struct dmatest_info *info, +		struct dmatest_chan *dtc, enum dma_transaction_type type)  { +	struct dmatest_params *params = &info->params;  	struct dmatest_thread *thread;  	struct dma_chan *chan = dtc->chan;  	char *op; @@ -507,35 +754,37 @@ static int dmatest_add_threads(struct dmatest_chan *dtc, enum dma_transaction_ty  	else  		return -EINVAL; -	for (i = 0; i < threads_per_chan; i++) { +	for (i = 0; i < params->threads_per_chan; i++) {  		thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL);  		if (!thread) { -			pr_warning("dmatest: No memory for %s-%s%u\n", -				   dma_chan_name(chan), op, i); - +			pr_warn("No memory for %s-%s%u\n", +				dma_chan_name(chan), op, i);  			break;  		} +		thread->info = info;  		thread->chan = dtc->chan;  		thread->type = type;  		smp_wmb(); -		thread->task = kthread_run(dmatest_func, thread, "%s-%s%u", +		thread->task = kthread_create(dmatest_func, thread, "%s-%s%u",  				dma_chan_name(chan), op, i);  		if (IS_ERR(thread->task)) { -			pr_warning("dmatest: Failed to run thread %s-%s%u\n", -					dma_chan_name(chan), op, i); +			pr_warn("Failed to create thread %s-%s%u\n", +				dma_chan_name(chan), op, i);  			kfree(thread);  			break;  		}  		/* srcbuf and dstbuf are allocated by the thread itself */ - +		get_task_struct(thread->task);  		list_add_tail(&thread->node, &dtc->threads); +		wake_up_process(thread->task);  	}  	return i;  } -static int dmatest_add_channel(struct dma_chan *chan) +static int dmatest_add_channel(struct dmatest_info *info, +		struct dma_chan *chan)  {  	struct dmatest_chan	*dtc;  	struct dma_device	*dma_dev = chan->device; @@ -544,7 +793,7 @@ static int dmatest_add_channel(struct dma_chan *chan)  	dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL);  	if (!dtc) { -		pr_warning("dmatest: No memory for %s\n", dma_chan_name(chan)); +		pr_warn("No memory for %s\n", dma_chan_name(chan));  		return -ENOMEM;  	} @@ -552,77 +801,186 @@ static int dmatest_add_channel(struct dma_chan *chan)  	INIT_LIST_HEAD(&dtc->threads);  	if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) { -		cnt = dmatest_add_threads(dtc, DMA_MEMCPY); +		cnt = dmatest_add_threads(info, dtc, DMA_MEMCPY);  		thread_count += cnt > 0 ? cnt : 0;  	}  	if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { -		cnt = dmatest_add_threads(dtc, DMA_XOR); +		cnt = dmatest_add_threads(info, dtc, DMA_XOR);  		thread_count += cnt > 0 ? cnt : 0;  	}  	if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) { -		cnt = dmatest_add_threads(dtc, DMA_PQ); -		thread_count += cnt > 0 ?: 0; +		cnt = dmatest_add_threads(info, dtc, DMA_PQ); +		thread_count += cnt > 0 ? cnt : 0;  	} -	pr_info("dmatest: Started %u threads using %s\n", +	pr_info("Started %u threads using %s\n",  		thread_count, dma_chan_name(chan)); -	list_add_tail(&dtc->node, &dmatest_channels); -	nr_channels++; +	list_add_tail(&dtc->node, &info->channels); +	info->nr_channels++;  	return 0;  }  static bool filter(struct dma_chan *chan, void *param)  { -	if (!dmatest_match_channel(chan) || !dmatest_match_device(chan->device)) +	struct dmatest_params *params = param; + +	if (!dmatest_match_channel(params, chan) || +	    !dmatest_match_device(params, chan->device))  		return false;  	else  		return true;  } -static int __init dmatest_init(void) +static void request_channels(struct dmatest_info *info, +			     enum dma_transaction_type type)  {  	dma_cap_mask_t mask; -	struct dma_chan *chan; -	int err = 0;  	dma_cap_zero(mask); -	dma_cap_set(DMA_MEMCPY, mask); +	dma_cap_set(type, mask);  	for (;;) { -		chan = dma_request_channel(mask, filter, NULL); +		struct dmatest_params *params = &info->params; +		struct dma_chan *chan; + +		chan = dma_request_channel(mask, filter, params);  		if (chan) { -			err = dmatest_add_channel(chan); -			if (err) { +			if (dmatest_add_channel(info, chan)) {  				dma_release_channel(chan);  				break; /* add_channel failed, punt */  			}  		} else  			break; /* no more channels available */ -		if (max_channels && nr_channels >= max_channels) +		if (params->max_channels && +		    info->nr_channels >= params->max_channels)  			break; /* we have all we need */  	} +} -	return err; +static void run_threaded_test(struct dmatest_info *info) +{ +	struct dmatest_params *params = &info->params; + +	/* Copy test parameters */ +	params->buf_size = test_buf_size; +	strlcpy(params->channel, strim(test_channel), sizeof(params->channel)); +	strlcpy(params->device, strim(test_device), sizeof(params->device)); +	params->threads_per_chan = threads_per_chan; +	params->max_channels = max_channels; +	params->iterations = iterations; +	params->xor_sources = xor_sources; +	params->pq_sources = pq_sources; +	params->timeout = timeout; +	params->noverify = noverify; + +	request_channels(info, DMA_MEMCPY); +	request_channels(info, DMA_XOR); +	request_channels(info, DMA_PQ);  } -/* when compiled-in wait for drivers to load first */ -late_initcall(dmatest_init); -static void __exit dmatest_exit(void) +static void stop_threaded_test(struct dmatest_info *info)  {  	struct dmatest_chan *dtc, *_dtc;  	struct dma_chan *chan; -	list_for_each_entry_safe(dtc, _dtc, &dmatest_channels, node) { +	list_for_each_entry_safe(dtc, _dtc, &info->channels, node) {  		list_del(&dtc->node);  		chan = dtc->chan;  		dmatest_cleanup_channel(dtc); -		pr_debug("dmatest: dropped channel %s\n", -			 dma_chan_name(chan)); +		pr_debug("dropped channel %s\n", dma_chan_name(chan));  		dma_release_channel(chan);  	} + +	info->nr_channels = 0; +} + +static void restart_threaded_test(struct dmatest_info *info, bool run) +{ +	/* we might be called early to set run=, defer running until all +	 * parameters have been evaluated +	 */ +	if (!info->did_init) +		return; + +	/* Stop any running test first */ +	stop_threaded_test(info); + +	/* Run test with new parameters */ +	run_threaded_test(info); +} + +static int dmatest_run_get(char *val, const struct kernel_param *kp) +{ +	struct dmatest_info *info = &test_info; + +	mutex_lock(&info->lock); +	if (is_threaded_test_run(info)) { +		dmatest_run = true; +	} else { +		stop_threaded_test(info); +		dmatest_run = false; +	} +	mutex_unlock(&info->lock); + +	return param_get_bool(val, kp); +} + +static int dmatest_run_set(const char *val, const struct kernel_param *kp) +{ +	struct dmatest_info *info = &test_info; +	int ret; + +	mutex_lock(&info->lock); +	ret = param_set_bool(val, kp); +	if (ret) { +		mutex_unlock(&info->lock); +		return ret; +	} + +	if (is_threaded_test_run(info)) +		ret = -EBUSY; +	else if (dmatest_run) +		restart_threaded_test(info, dmatest_run); + +	mutex_unlock(&info->lock); + +	return ret; +} + +static int __init dmatest_init(void) +{ +	struct dmatest_info *info = &test_info; +	struct dmatest_params *params = &info->params; + +	if (dmatest_run) { +		mutex_lock(&info->lock); +		run_threaded_test(info); +		mutex_unlock(&info->lock); +	} + +	if (params->iterations && wait) +		wait_event(thread_wait, !is_threaded_test_run(info)); + +	/* module parameters are stable, inittime tests are started, +	 * let userspace take over 'run' control +	 */ +	info->did_init = true; + +	return 0; +} +/* when compiled-in wait for drivers to load first */ +late_initcall(dmatest_init); + +static void __exit dmatest_exit(void) +{ +	struct dmatest_info *info = &test_info; + +	mutex_lock(&info->lock); +	stop_threaded_test(info); +	mutex_unlock(&info->lock);  }  module_exit(dmatest_exit); -MODULE_AUTHOR("Haavard Skinnemoen <hskinnemoen@atmel.com>"); +MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");  MODULE_LICENSE("GPL v2");  | 
