Linux Audio

Check our new training course

Loading...
v4.6
  1/*
  2 * This file is provided under a dual BSD/GPLv2 license.  When using or
  3 *   redistributing this file, you may do so under either license.
  4 *
  5 *   GPL LICENSE SUMMARY
  6 *
  7 *   Copyright(c) 2015 Intel Corporation. All rights reserved.
 
  8 *
  9 *   This program is free software; you can redistribute it and/or modify
 10 *   it under the terms of version 2 of the GNU General Public License as
 11 *   published by the Free Software Foundation.
 12 *
 13 *   BSD LICENSE
 14 *
 15 *   Copyright(c) 2015 Intel Corporation. All rights reserved.
 
 16 *
 17 *   Redistribution and use in source and binary forms, with or without
 18 *   modification, are permitted provided that the following conditions
 19 *   are met:
 20 *
 21 *     * Redistributions of source code must retain the above copyright
 22 *       notice, this list of conditions and the following disclaimer.
 23 *     * Redistributions in binary form must reproduce the above copy
 24 *       notice, this list of conditions and the following disclaimer in
 25 *       the documentation and/or other materials provided with the
 26 *       distribution.
 27 *     * Neither the name of Intel Corporation nor the names of its
 28 *       contributors may be used to endorse or promote products derived
 29 *       from this software without specific prior written permission.
 30 *
 31 *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 32 *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 33 *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 34 *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 35 *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 36 *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 37 *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 38 *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 39 *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 40 *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 41 *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 42 *
 43 *   PCIe NTB Perf Linux driver
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 44 */
 45
 46#include <linux/init.h>
 47#include <linux/kernel.h>
 48#include <linux/module.h>
 49#include <linux/kthread.h>
 50#include <linux/time.h>
 51#include <linux/timer.h>
 52#include <linux/dma-mapping.h>
 
 53#include <linux/pci.h>
 
 54#include <linux/slab.h>
 55#include <linux/spinlock.h>
 56#include <linux/debugfs.h>
 57#include <linux/dmaengine.h>
 58#include <linux/delay.h>
 59#include <linux/sizes.h>
 
 
 
 60#include <linux/ntb.h>
 61
 62#define DRIVER_NAME		"ntb_perf"
 63#define DRIVER_DESCRIPTION	"PCIe NTB Performance Measurement Tool"
 64
 65#define DRIVER_LICENSE		"Dual BSD/GPL"
 66#define DRIVER_VERSION		"1.0"
 67#define DRIVER_AUTHOR		"Dave Jiang <dave.jiang@intel.com>"
 68
 69#define PERF_LINK_DOWN_TIMEOUT	10
 70#define PERF_VERSION		0xffff0001
 71#define MAX_THREADS		32
 72#define MAX_TEST_SIZE		SZ_1M
 73#define MAX_SRCS		32
 74#define DMA_OUT_RESOURCE_TO	50
 75#define DMA_RETRIES		20
 76#define SZ_4G			(1ULL << 32)
 77#define MAX_SEG_ORDER		20 /* no larger than 1M for kmalloc buffer */
 78
 79MODULE_LICENSE(DRIVER_LICENSE);
 80MODULE_VERSION(DRIVER_VERSION);
 81MODULE_AUTHOR(DRIVER_AUTHOR);
 82MODULE_DESCRIPTION(DRIVER_DESCRIPTION);
 83
 84static struct dentry *perf_debugfs_dir;
 85
 86static unsigned int seg_order = 19; /* 512K */
 87module_param(seg_order, uint, 0644);
 88MODULE_PARM_DESC(seg_order, "size order [n^2] of buffer segment for testing");
 89
 90static unsigned int run_order = 32; /* 4G */
 91module_param(run_order, uint, 0644);
 92MODULE_PARM_DESC(run_order, "size order [n^2] of total data to transfer");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 93
 94static bool use_dma; /* default to 0 */
 95module_param(use_dma, bool, 0644);
 96MODULE_PARM_DESC(use_dma, "Using DMA engine to measure performance");
 97
 98struct perf_mw {
 99	phys_addr_t	phys_addr;
100	resource_size_t	phys_size;
101	resource_size_t	xlat_align;
102	resource_size_t	xlat_align_size;
103	void __iomem	*vbase;
104	size_t		xlat_size;
105	size_t		buf_size;
106	void		*virt_addr;
107	dma_addr_t	dma_addr;
 
 
 
 
108};
109
110struct perf_ctx;
111
112struct pthr_ctx {
113	struct task_struct	*thread;
114	struct perf_ctx		*perf;
115	atomic_t		dma_sync;
116	struct dma_chan		*dma_chan;
117	int			dma_prep_err;
118	int			src_idx;
119	void			*srcs[MAX_SRCS];
 
 
 
 
 
 
 
 
 
 
120};
 
 
121
122struct perf_ctx {
123	struct ntb_dev		*ntb;
124	spinlock_t		db_lock;
125	struct perf_mw		mw;
126	bool			link_is_up;
127	struct work_struct	link_cleanup;
128	struct delayed_work	link_work;
129	struct dentry		*debugfs_node_dir;
130	struct dentry		*debugfs_run;
131	struct dentry		*debugfs_threads;
132	u8			perf_threads;
133	bool			run;
134	struct pthr_ctx		pthr_ctx[MAX_THREADS];
135	atomic_t		tsync;
 
136};
 
 
 
 
 
137
138enum {
139	VERSION = 0,
140	MW_SZ_HIGH,
141	MW_SZ_LOW,
142	SPAD_MSG,
143	SPAD_ACK,
144	MAX_SPAD
 
 
 
 
 
 
 
 
 
 
 
 
145};
146
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
147static void perf_link_event(void *ctx)
148{
149	struct perf_ctx *perf = ctx;
150
151	if (ntb_link_is_up(perf->ntb, NULL, NULL) == 1)
152		schedule_delayed_work(&perf->link_work, 2*HZ);
153	else
154		schedule_work(&perf->link_cleanup);
 
 
 
 
 
 
 
 
 
 
 
 
155}
156
157static void perf_db_event(void *ctx, int vec)
158{
159	struct perf_ctx *perf = ctx;
160	u64 db_bits, db_mask;
161
162	db_mask = ntb_db_vector_mask(perf->ntb, vec);
163	db_bits = ntb_db_read(perf->ntb);
164
165	dev_dbg(&perf->ntb->dev, "doorbell vec %d mask %#llx bits %#llx\n",
166		vec, db_mask, db_bits);
 
 
 
 
 
 
 
 
 
 
 
167}
168
169static const struct ntb_ctx_ops perf_ops = {
170	.link_event = perf_link_event,
171	.db_event = perf_db_event,
 
172};
173
174static void perf_copy_callback(void *data)
175{
176	struct pthr_ctx *pctx = data;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
177
178	atomic_dec(&pctx->dma_sync);
179}
180
181static ssize_t perf_copy(struct pthr_ctx *pctx, char __iomem *dst,
182			 char *src, size_t size)
183{
184	struct perf_ctx *perf = pctx->perf;
185	struct dma_async_tx_descriptor *txd;
186	struct dma_chan *chan = pctx->dma_chan;
187	struct dma_device *device;
188	struct dmaengine_unmap_data *unmap;
189	dma_cookie_t cookie;
190	size_t src_off, dst_off;
191	struct perf_mw *mw = &perf->mw;
192	void __iomem *vbase;
193	void __iomem *dst_vaddr;
194	dma_addr_t dst_phys;
195	int retries = 0;
196
197	if (!use_dma) {
198		memcpy_toio(dst, src, size);
199		return size;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
200	}
201
202	if (!chan) {
203		dev_err(&perf->ntb->dev, "DMA engine does not exist\n");
 
204		return -EINVAL;
205	}
206
207	device = chan->device;
208	src_off = (uintptr_t)src & ~PAGE_MASK;
209	dst_off = (uintptr_t __force)dst & ~PAGE_MASK;
210
211	if (!is_dma_copy_aligned(device, src_off, dst_off, size))
212		return -ENODEV;
213
214	vbase = mw->vbase;
215	dst_vaddr = dst;
216	dst_phys = mw->phys_addr + (dst_vaddr - vbase);
217
218	unmap = dmaengine_get_unmap_data(device->dev, 1, GFP_NOWAIT);
219	if (!unmap)
 
 
 
220		return -ENOMEM;
 
 
 
 
 
221
222	unmap->len = size;
223	unmap->addr[0] = dma_map_page(device->dev, virt_to_page(src),
224				      src_off, size, DMA_TO_DEVICE);
225	if (dma_mapping_error(device->dev, unmap->addr[0]))
226		goto err_get_unmap;
 
227
228	unmap->to_cnt = 1;
 
 
 
 
 
229
230	do {
231		txd = device->device_prep_dma_memcpy(chan, dst_phys,
232						     unmap->addr[0],
233						     size, DMA_PREP_INTERRUPT);
234		if (!txd) {
235			set_current_state(TASK_INTERRUPTIBLE);
236			schedule_timeout(DMA_OUT_RESOURCE_TO);
237		}
238	} while (!txd && (++retries < DMA_RETRIES));
239
240	if (!txd) {
241		pctx->dma_prep_err++;
242		goto err_get_unmap;
243	}
244
245	txd->callback = perf_copy_callback;
246	txd->callback_param = pctx;
247	dma_set_unmap(txd, unmap);
248
249	cookie = dmaengine_submit(txd);
250	if (dma_submit_error(cookie))
251		goto err_set_unmap;
252
253	atomic_inc(&pctx->dma_sync);
254	dma_async_issue_pending(chan);
255
256	return size;
 
257
258err_set_unmap:
259	dmaengine_unmap_put(unmap);
260err_get_unmap:
261	dmaengine_unmap_put(unmap);
262	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
263}
264
265static int perf_move_data(struct pthr_ctx *pctx, char __iomem *dst, char *src,
266			  u64 buf_size, u64 win_size, u64 total)
267{
268	int chunks, total_chunks, i;
269	int copied_chunks = 0;
270	u64 copied = 0, result;
271	char __iomem *tmp = dst;
272	u64 perf, diff_us;
273	ktime_t kstart, kstop, kdiff;
274
275	chunks = div64_u64(win_size, buf_size);
276	total_chunks = div64_u64(total, buf_size);
277	kstart = ktime_get();
278
279	for (i = 0; i < total_chunks; i++) {
280		result = perf_copy(pctx, tmp, src, buf_size);
281		copied += result;
282		copied_chunks++;
283		if (copied_chunks == chunks) {
284			tmp = dst;
285			copied_chunks = 0;
286		} else
287			tmp += buf_size;
288
289		/* Probably should schedule every 4GB to prevent soft hang. */
290		if (((copied % SZ_4G) == 0) && !use_dma) {
291			set_current_state(TASK_INTERRUPTIBLE);
292			schedule_timeout(1);
293		}
294	}
295
296	if (use_dma) {
297		pr_info("%s: All DMA descriptors submitted\n", current->comm);
298		while (atomic_read(&pctx->dma_sync) != 0)
299			msleep(20);
 
 
 
300	}
301
302	kstop = ktime_get();
303	kdiff = ktime_sub(kstop, kstart);
304	diff_us = ktime_to_us(kdiff);
305
306	pr_info("%s: copied %llu bytes\n", current->comm, copied);
 
 
 
 
307
308	pr_info("%s: lasted %llu usecs\n", current->comm, diff_us);
309
310	perf = div64_u64(copied, diff_us);
 
311
312	pr_info("%s: MBytes/s: %llu\n", current->comm, perf);
313
314	return 0;
315}
316
317static bool perf_dma_filter_fn(struct dma_chan *chan, void *node)
318{
319	return dev_to_node(&chan->dev->device) == (int)(unsigned long)node;
320}
321
322static int ntb_perf_thread(void *data)
323{
324	struct pthr_ctx *pctx = data;
325	struct perf_ctx *perf = pctx->perf;
326	struct pci_dev *pdev = perf->ntb->pdev;
327	struct perf_mw *mw = &perf->mw;
328	char __iomem *dst;
329	u64 win_size, buf_size, total;
330	void *src;
331	int rc, node, i;
332	struct dma_chan *dma_chan = NULL;
333
334	pr_info("kthread %s starting...\n", current->comm);
 
335
336	node = dev_to_node(&pdev->dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
337
338	if (use_dma && !pctx->dma_chan) {
339		dma_cap_mask_t dma_mask;
340
341		dma_cap_zero(dma_mask);
342		dma_cap_set(DMA_MEMCPY, dma_mask);
343		dma_chan = dma_request_channel(dma_mask, perf_dma_filter_fn,
344					       (void *)(unsigned long)node);
345		if (!dma_chan) {
346			pr_warn("%s: cannot acquire DMA channel, quitting\n",
347				current->comm);
348			return -ENODEV;
349		}
350		pctx->dma_chan = dma_chan;
351	}
352
353	for (i = 0; i < MAX_SRCS; i++) {
354		pctx->srcs[i] = kmalloc_node(MAX_TEST_SIZE, GFP_KERNEL, node);
355		if (!pctx->srcs[i]) {
356			rc = -ENOMEM;
357			goto err;
358		}
 
 
 
 
 
 
 
 
 
 
 
 
359	}
360
361	win_size = mw->phys_size;
362	buf_size = 1ULL << seg_order;
363	total = 1ULL << run_order;
364
365	if (buf_size > MAX_TEST_SIZE)
366		buf_size = MAX_TEST_SIZE;
367
368	dst = (char __iomem *)mw->vbase;
 
369
370	atomic_inc(&perf->tsync);
371	while (atomic_read(&perf->tsync) != perf->perf_threads)
372		schedule();
373
374	src = pctx->srcs[pctx->src_idx];
375	pctx->src_idx = (pctx->src_idx + 1) & (MAX_SRCS - 1);
376
377	rc = perf_move_data(pctx, dst, src, buf_size, win_size, total);
378
379	atomic_dec(&perf->tsync);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
380
381	if (rc < 0) {
382		pr_err("%s: failed\n", current->comm);
383		rc = -ENXIO;
384		goto err;
 
 
 
 
 
 
 
385	}
386
387	for (i = 0; i < MAX_SRCS; i++) {
388		kfree(pctx->srcs[i]);
389		pctx->srcs[i] = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
390	}
 
391
392	return 0;
 
 
 
 
 
 
393
394err:
395	for (i = 0; i < MAX_SRCS; i++) {
396		kfree(pctx->srcs[i]);
397		pctx->srcs[i] = NULL;
 
 
 
 
 
 
398	}
399
400	if (dma_chan) {
401		dma_release_channel(dma_chan);
402		pctx->dma_chan = NULL;
 
 
 
 
 
403	}
404
405	return rc;
 
 
 
 
 
 
 
 
 
 
 
406}
407
408static void perf_free_mw(struct perf_ctx *perf)
409{
410	struct perf_mw *mw = &perf->mw;
411	struct pci_dev *pdev = perf->ntb->pdev;
412
413	if (!mw->virt_addr)
414		return;
415
416	ntb_mw_clear_trans(perf->ntb, 0);
417	dma_free_coherent(&pdev->dev, mw->buf_size,
418			  mw->virt_addr, mw->dma_addr);
419	mw->xlat_size = 0;
420	mw->buf_size = 0;
421	mw->virt_addr = NULL;
422}
423
424static int perf_set_mw(struct perf_ctx *perf, resource_size_t size)
425{
426	struct perf_mw *mw = &perf->mw;
427	size_t xlat_size, buf_size;
428	int rc;
429
430	if (!size)
431		return -EINVAL;
 
 
432
433	xlat_size = round_up(size, mw->xlat_align_size);
434	buf_size = round_up(size, mw->xlat_align);
435
436	if (mw->xlat_size == xlat_size)
437		return 0;
438
439	if (mw->buf_size)
440		perf_free_mw(perf);
 
 
 
 
 
 
 
 
 
441
442	mw->xlat_size = xlat_size;
443	mw->buf_size = buf_size;
444
445	mw->virt_addr = dma_alloc_coherent(&perf->ntb->pdev->dev, buf_size,
446					   &mw->dma_addr, GFP_KERNEL);
447	if (!mw->virt_addr) {
448		mw->xlat_size = 0;
449		mw->buf_size = 0;
450	}
451
452	rc = ntb_mw_set_trans(perf->ntb, 0, mw->dma_addr, mw->xlat_size);
453	if (rc) {
454		dev_err(&perf->ntb->dev, "Unable to set mw0 translation\n");
455		perf_free_mw(perf);
456		return -EIO;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
457	}
458
459	return 0;
460}
461
462static void perf_link_work(struct work_struct *work)
463{
464	struct perf_ctx *perf =
465		container_of(work, struct perf_ctx, link_work.work);
466	struct ntb_dev *ndev = perf->ntb;
467	struct pci_dev *pdev = ndev->pdev;
468	u32 val;
469	u64 size;
470	int rc;
471
472	dev_dbg(&perf->ntb->pdev->dev, "%s called\n", __func__);
 
473
474	size = perf->mw.phys_size;
475	ntb_peer_spad_write(ndev, MW_SZ_HIGH, upper_32_bits(size));
476	ntb_peer_spad_write(ndev, MW_SZ_LOW, lower_32_bits(size));
477	ntb_peer_spad_write(ndev, VERSION, PERF_VERSION);
478
479	/* now read what peer wrote */
480	val = ntb_spad_read(ndev, VERSION);
481	if (val != PERF_VERSION) {
482		dev_dbg(&pdev->dev, "Remote version = %#x\n", val);
483		goto out;
484	}
485
486	val = ntb_spad_read(ndev, MW_SZ_HIGH);
487	size = (u64)val << 32;
488
489	val = ntb_spad_read(ndev, MW_SZ_LOW);
490	size |= val;
491
492	dev_dbg(&pdev->dev, "Remote MW size = %#llx\n", size);
 
493
494	rc = perf_set_mw(perf, size);
495	if (rc)
496		goto out1;
 
 
 
497
498	perf->link_is_up = true;
 
499
500	return;
 
 
 
 
501
502out1:
503	perf_free_mw(perf);
504
505out:
506	if (ntb_link_is_up(ndev, NULL, NULL) == 1)
507		schedule_delayed_work(&perf->link_work,
508				      msecs_to_jiffies(PERF_LINK_DOWN_TIMEOUT));
509}
510
511static void perf_link_cleanup(struct work_struct *work)
512{
513	struct perf_ctx *perf = container_of(work,
514					     struct perf_ctx,
515					     link_cleanup);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
516
517	dev_dbg(&perf->ntb->pdev->dev, "%s called\n", __func__);
518
519	if (!perf->link_is_up)
520		cancel_delayed_work_sync(&perf->link_work);
521}
522
523static int perf_setup_mw(struct ntb_dev *ntb, struct perf_ctx *perf)
524{
525	struct perf_mw *mw;
526	int rc;
527
528	mw = &perf->mw;
 
529
530	rc = ntb_mw_get_range(ntb, 0, &mw->phys_addr, &mw->phys_size,
531			      &mw->xlat_align, &mw->xlat_align_size);
532	if (rc)
533		return rc;
534
535	perf->mw.vbase = ioremap_wc(mw->phys_addr, mw->phys_size);
536	if (!mw->vbase)
537		return -ENOMEM;
538
539	return 0;
540}
541
542static ssize_t debugfs_run_read(struct file *filp, char __user *ubuf,
543				size_t count, loff_t *offp)
544{
545	struct perf_ctx *perf = filp->private_data;
546	char *buf;
547	ssize_t ret, out_offset;
548
549	if (!perf)
550		return 0;
551
552	buf = kmalloc(64, GFP_KERNEL);
553	if (!buf)
554		return -ENOMEM;
555	out_offset = snprintf(buf, 64, "%d\n", perf->run);
556	ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset);
557	kfree(buf);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
558
559	return ret;
560}
561
562static void threads_cleanup(struct perf_ctx *perf)
 
563{
564	struct pthr_ctx *pctx;
565	int i;
 
 
 
 
 
 
 
 
 
 
 
 
566
567	perf->run = false;
568	for (i = 0; i < MAX_THREADS; i++) {
569		pctx = &perf->pthr_ctx[i];
570		if (pctx->thread) {
571			kthread_stop(pctx->thread);
572			pctx->thread = NULL;
573		}
 
 
 
 
 
574	}
 
 
 
 
575}
576
577static ssize_t debugfs_run_write(struct file *filp, const char __user *ubuf,
578				 size_t count, loff_t *offp)
579{
580	struct perf_ctx *perf = filp->private_data;
581	int node, i;
582
583	if (!perf->link_is_up)
584		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
585
586	if (perf->perf_threads == 0)
587		return 0;
 
 
588
589	if (atomic_read(&perf->tsync) == 0)
590		perf->run = false;
 
 
591
592	if (perf->run)
593		threads_cleanup(perf);
594	else {
595		perf->run = true;
596
597		if (perf->perf_threads > MAX_THREADS) {
598			perf->perf_threads = MAX_THREADS;
599			pr_info("Reset total threads to: %u\n", MAX_THREADS);
600		}
601
602		/* no greater than 1M */
603		if (seg_order > MAX_SEG_ORDER) {
604			seg_order = MAX_SEG_ORDER;
605			pr_info("Fix seg_order to %u\n", seg_order);
606		}
607
608		if (run_order < seg_order) {
609			run_order = seg_order;
610			pr_info("Fix run_order to %u\n", run_order);
611		}
612
613		node = dev_to_node(&perf->ntb->pdev->dev);
614		/* launch kernel thread */
615		for (i = 0; i < perf->perf_threads; i++) {
616			struct pthr_ctx *pctx;
617
618			pctx = &perf->pthr_ctx[i];
619			atomic_set(&pctx->dma_sync, 0);
620			pctx->perf = perf;
621			pctx->thread =
622				kthread_create_on_node(ntb_perf_thread,
623						       (void *)pctx,
624						       node, "ntb_perf %d", i);
625			if (IS_ERR(pctx->thread)) {
626				pctx->thread = NULL;
627				goto err;
628			} else
629				wake_up_process(pctx->thread);
630
631			if (perf->run == false)
632				return -ENXIO;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
633		}
634
 
 
 
 
 
 
 
 
635	}
636
637	return count;
 
638
639err:
640	threads_cleanup(perf);
641	return -ENXIO;
642}
643
644static const struct file_operations ntb_perf_debugfs_run = {
645	.owner = THIS_MODULE,
646	.open = simple_open,
647	.read = debugfs_run_read,
648	.write = debugfs_run_write,
649};
650
651static int perf_debugfs_setup(struct perf_ctx *perf)
 
652{
653	struct pci_dev *pdev = perf->ntb->pdev;
 
 
654
655	if (!debugfs_initialized())
656		return -ENODEV;
 
 
 
 
 
 
 
 
 
 
 
 
657
658	if (!perf_debugfs_dir) {
659		perf_debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
660		if (!perf_debugfs_dir)
661			return -ENODEV;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
662	}
663
664	perf->debugfs_node_dir = debugfs_create_dir(pci_name(pdev),
665						    perf_debugfs_dir);
666	if (!perf->debugfs_node_dir)
667		return -ENODEV;
668
669	perf->debugfs_run = debugfs_create_file("run", S_IRUSR | S_IWUSR,
670						perf->debugfs_node_dir, perf,
671						&ntb_perf_debugfs_run);
672	if (!perf->debugfs_run)
673		return -ENODEV;
674
675	perf->debugfs_threads = debugfs_create_u8("threads", S_IRUSR | S_IWUSR,
676						  perf->debugfs_node_dir,
677						  &perf->perf_threads);
678	if (!perf->debugfs_threads)
679		return -ENODEV;
680
681	return 0;
 
 
 
 
 
682}
683
684static int perf_probe(struct ntb_client *client, struct ntb_dev *ntb)
 
 
 
 
 
 
 
 
 
 
685{
686	struct pci_dev *pdev = ntb->pdev;
687	struct perf_ctx *perf;
688	int node;
689	int rc = 0;
690
691	node = dev_to_node(&pdev->dev);
 
 
692
693	perf = kzalloc_node(sizeof(*perf), GFP_KERNEL, node);
694	if (!perf) {
695		rc = -ENOMEM;
696		goto err_perf;
697	}
698
699	perf->ntb = ntb;
700	perf->perf_threads = 1;
701	atomic_set(&perf->tsync, 0);
702	perf->run = false;
703	spin_lock_init(&perf->db_lock);
704	perf_setup_mw(ntb, perf);
705	INIT_DELAYED_WORK(&perf->link_work, perf_link_work);
706	INIT_WORK(&perf->link_cleanup, perf_link_cleanup);
707
708	rc = ntb_set_ctx(ntb, perf, &perf_ops);
709	if (rc)
710		goto err_ctx;
711
712	perf->link_is_up = false;
713	ntb_link_enable(ntb, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
714	ntb_link_event(ntb);
715
716	rc = perf_debugfs_setup(perf);
717	if (rc)
718		goto err_ctx;
 
 
 
 
 
 
 
 
719
720	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
721
722err_ctx:
723	cancel_delayed_work_sync(&perf->link_work);
724	cancel_work_sync(&perf->link_cleanup);
725	kfree(perf);
726err_perf:
727	return rc;
728}
729
730static void perf_remove(struct ntb_client *client, struct ntb_dev *ntb)
731{
732	struct perf_ctx *perf = ntb->ctx;
733	int i;
734
735	dev_dbg(&perf->ntb->dev, "%s called\n", __func__);
 
 
736
737	cancel_delayed_work_sync(&perf->link_work);
738	cancel_work_sync(&perf->link_cleanup);
 
739
740	ntb_clear_ctx(ntb);
741	ntb_link_disable(ntb);
742
743	debugfs_remove_recursive(perf_debugfs_dir);
744	perf_debugfs_dir = NULL;
 
745
746	if (use_dma) {
747		for (i = 0; i < MAX_THREADS; i++) {
748			struct pthr_ctx *pctx = &perf->pthr_ctx[i];
749
750			if (pctx->dma_chan)
751				dma_release_channel(pctx->dma_chan);
752		}
753	}
754
755	kfree(perf);
 
 
 
 
 
 
 
 
 
 
 
756}
757
758static struct ntb_client perf_client = {
759	.ops = {
760		.probe = perf_probe,
761		.remove = perf_remove,
762	},
763};
764module_ntb_client(perf_client);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
v5.4
   1/*
   2 * This file is provided under a dual BSD/GPLv2 license.  When using or
   3 *   redistributing this file, you may do so under either license.
   4 *
   5 *   GPL LICENSE SUMMARY
   6 *
   7 *   Copyright(c) 2015 Intel Corporation. All rights reserved.
   8 *   Copyright(c) 2017 T-Platforms. All Rights Reserved.
   9 *
  10 *   This program is free software; you can redistribute it and/or modify
  11 *   it under the terms of version 2 of the GNU General Public License as
  12 *   published by the Free Software Foundation.
  13 *
  14 *   BSD LICENSE
  15 *
  16 *   Copyright(c) 2015 Intel Corporation. All rights reserved.
  17 *   Copyright(c) 2017 T-Platforms. All Rights Reserved.
  18 *
  19 *   Redistribution and use in source and binary forms, with or without
  20 *   modification, are permitted provided that the following conditions
  21 *   are met:
  22 *
  23 *     * Redistributions of source code must retain the above copyright
  24 *       notice, this list of conditions and the following disclaimer.
  25 *     * Redistributions in binary form must reproduce the above copy
  26 *       notice, this list of conditions and the following disclaimer in
  27 *       the documentation and/or other materials provided with the
  28 *       distribution.
  29 *     * Neither the name of Intel Corporation nor the names of its
  30 *       contributors may be used to endorse or promote products derived
  31 *       from this software without specific prior written permission.
  32 *
  33 *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  34 *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  35 *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  36 *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  37 *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  38 *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  39 *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  40 *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  41 *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  42 *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  43 *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  44 *
  45 * PCIe NTB Perf Linux driver
  46 */
  47
  48/*
  49 * How to use this tool, by example.
  50 *
  51 * Assuming $DBG_DIR is something like:
  52 * '/sys/kernel/debug/ntb_perf/0000:00:03.0'
  53 * Suppose aside from local device there is at least one remote device
  54 * connected to NTB with index 0.
  55 *-----------------------------------------------------------------------------
  56 * Eg: install driver with specified chunk/total orders and dma-enabled flag
  57 *
  58 * root@self# insmod ntb_perf.ko chunk_order=19 total_order=28 use_dma
  59 *-----------------------------------------------------------------------------
  60 * Eg: check NTB ports (index) and MW mapping information
  61 *
  62 * root@self# cat $DBG_DIR/info
  63 *-----------------------------------------------------------------------------
  64 * Eg: start performance test with peer (index 0) and get the test metrics
  65 *
  66 * root@self# echo 0 > $DBG_DIR/run
  67 * root@self# cat $DBG_DIR/run
  68 */
  69
  70#include <linux/init.h>
  71#include <linux/kernel.h>
  72#include <linux/module.h>
  73#include <linux/sched.h>
  74#include <linux/wait.h>
 
  75#include <linux/dma-mapping.h>
  76#include <linux/dmaengine.h>
  77#include <linux/pci.h>
  78#include <linux/ktime.h>
  79#include <linux/slab.h>
 
 
 
  80#include <linux/delay.h>
  81#include <linux/sizes.h>
  82#include <linux/workqueue.h>
  83#include <linux/debugfs.h>
  84#include <linux/random.h>
  85#include <linux/ntb.h>
  86
  87#define DRIVER_NAME		"ntb_perf"
  88#define DRIVER_VERSION		"2.0"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  89
  90MODULE_LICENSE("Dual BSD/GPL");
  91MODULE_VERSION(DRIVER_VERSION);
  92MODULE_AUTHOR("Dave Jiang <dave.jiang@intel.com>");
  93MODULE_DESCRIPTION("PCIe NTB Performance Measurement Tool");
 
 
  94
  95#define MAX_THREADS_CNT		32
  96#define DEF_THREADS_CNT		1
  97#define MAX_CHUNK_SIZE		SZ_1M
  98#define MAX_CHUNK_ORDER		20 /* no larger than 1M */
  99
 100#define DMA_TRIES		100
 101#define DMA_MDELAY		10
 102
 103#define MSG_TRIES		1000
 104#define MSG_UDELAY_LOW		1000
 105#define MSG_UDELAY_HIGH		2000
 106
 107#define PERF_BUF_LEN 1024
 108
 109static unsigned long max_mw_size;
 110module_param(max_mw_size, ulong, 0644);
 111MODULE_PARM_DESC(max_mw_size, "Upper limit of memory window size");
 112
 113static unsigned char chunk_order = 19; /* 512K */
 114module_param(chunk_order, byte, 0644);
 115MODULE_PARM_DESC(chunk_order, "Data chunk order [2^n] to transfer");
 116
 117static unsigned char total_order = 30; /* 1G */
 118module_param(total_order, byte, 0644);
 119MODULE_PARM_DESC(total_order, "Total data order [2^n] to transfer");
 120
 121static bool use_dma; /* default to 0 */
 122module_param(use_dma, bool, 0644);
 123MODULE_PARM_DESC(use_dma, "Use DMA engine to measure performance");
 124
 125/*==============================================================================
 126 *                         Perf driver data definition
 127 *==============================================================================
 128 */
 129
 130enum perf_cmd {
 131	PERF_CMD_INVAL = -1,/* invalid spad command */
 132	PERF_CMD_SSIZE = 0, /* send out buffer size */
 133	PERF_CMD_RSIZE = 1, /* recv in  buffer size */
 134	PERF_CMD_SXLAT = 2, /* send in  buffer xlat */
 135	PERF_CMD_RXLAT = 3, /* recv out buffer xlat */
 136	PERF_CMD_CLEAR = 4, /* clear allocated memory */
 137	PERF_STS_DONE  = 5, /* init is done */
 138	PERF_STS_LNKUP = 6, /* link up state flag */
 139};
 140
 141struct perf_ctx;
 142
 143struct perf_peer {
 144	struct perf_ctx	*perf;
 145	int pidx;
 146	int gidx;
 147
 148	/* Outbound MW params */
 149	u64 outbuf_xlat;
 150	resource_size_t outbuf_size;
 151	void __iomem *outbuf;
 152
 153	/* Inbound MW params */
 154	dma_addr_t inbuf_xlat;
 155	resource_size_t inbuf_size;
 156	void		*inbuf;
 157
 158	/* NTB connection setup service */
 159	struct work_struct	service;
 160	unsigned long		sts;
 161};
 162#define to_peer_service(__work) \
 163	container_of(__work, struct perf_peer, service)
 164
 165struct perf_thread {
 166	struct perf_ctx *perf;
 167	int tidx;
 168
 169	/* DMA-based test sync parameters */
 170	atomic_t dma_sync;
 171	wait_queue_head_t dma_wait;
 172	struct dma_chan *dma_chan;
 173
 174	/* Data source and measured statistics */
 175	void *src;
 176	u64 copied;
 177	ktime_t duration;
 178	int status;
 179	struct work_struct work;
 180};
 181#define to_thread_work(__work) \
 182	container_of(__work, struct perf_thread, work)
 183
 184struct perf_ctx {
 185	struct ntb_dev *ntb;
 186
 187	/* Global device index and peers descriptors */
 188	int gidx;
 189	int pcnt;
 190	struct perf_peer *peers;
 191
 192	/* Performance measuring work-threads interface */
 193	unsigned long busy_flag;
 194	wait_queue_head_t twait;
 195	atomic_t tsync;
 196	u8 tcnt;
 197	struct perf_peer *test_peer;
 198	struct perf_thread threads[MAX_THREADS_CNT];
 199
 200	/* Scratchpad/Message IO operations */
 201	int (*cmd_send)(struct perf_peer *peer, enum perf_cmd cmd, u64 data);
 202	int (*cmd_recv)(struct perf_ctx *perf, int *pidx, enum perf_cmd *cmd,
 203			u64 *data);
 204
 205	struct dentry *dbgfs_dir;
 206};
 207
 208/*
 209 * Scratchpads-base commands interface
 210 */
 211#define PERF_SPAD_CNT(_pcnt) \
 212	(3*((_pcnt) + 1))
 213#define PERF_SPAD_CMD(_gidx) \
 214	(3*(_gidx))
 215#define PERF_SPAD_LDATA(_gidx) \
 216	(3*(_gidx) + 1)
 217#define PERF_SPAD_HDATA(_gidx) \
 218	(3*(_gidx) + 2)
 219#define PERF_SPAD_NOTIFY(_gidx) \
 220	(BIT_ULL(_gidx))
 221
 222/*
 223 * Messages-base commands interface
 224 */
 225#define PERF_MSG_CNT		3
 226#define PERF_MSG_CMD		0
 227#define PERF_MSG_LDATA		1
 228#define PERF_MSG_HDATA		2
 229
 230/*==============================================================================
 231 *                           Static data declarations
 232 *==============================================================================
 233 */
 234
 235static struct dentry *perf_dbgfs_topdir;
 236
 237static struct workqueue_struct *perf_wq __read_mostly;
 238
 239/*==============================================================================
 240 *                  NTB cross-link commands execution service
 241 *==============================================================================
 242 */
 243
 244static void perf_terminate_test(struct perf_ctx *perf);
 245
 246static inline bool perf_link_is_up(struct perf_peer *peer)
 247{
 248	u64 link;
 249
 250	link = ntb_link_is_up(peer->perf->ntb, NULL, NULL);
 251	return !!(link & BIT_ULL_MASK(peer->pidx));
 252}
 253
 254static int perf_spad_cmd_send(struct perf_peer *peer, enum perf_cmd cmd,
 255			      u64 data)
 256{
 257	struct perf_ctx *perf = peer->perf;
 258	int try;
 259	u32 sts;
 260
 261	dev_dbg(&perf->ntb->dev, "CMD send: %d 0x%llx\n", cmd, data);
 262
 263	/*
 264	 * Perform predefined number of attempts before give up.
 265	 * We are sending the data to the port specific scratchpad, so
 266	 * to prevent a multi-port access race-condition. Additionally
 267	 * there is no need in local locking since only thread-safe
 268	 * service work is using this method.
 269	 */
 270	for (try = 0; try < MSG_TRIES; try++) {
 271		if (!perf_link_is_up(peer))
 272			return -ENOLINK;
 273
 274		sts = ntb_peer_spad_read(perf->ntb, peer->pidx,
 275					 PERF_SPAD_CMD(perf->gidx));
 276		if (sts != PERF_CMD_INVAL) {
 277			usleep_range(MSG_UDELAY_LOW, MSG_UDELAY_HIGH);
 278			continue;
 279		}
 280
 281		ntb_peer_spad_write(perf->ntb, peer->pidx,
 282				    PERF_SPAD_LDATA(perf->gidx),
 283				    lower_32_bits(data));
 284		ntb_peer_spad_write(perf->ntb, peer->pidx,
 285				    PERF_SPAD_HDATA(perf->gidx),
 286				    upper_32_bits(data));
 287		ntb_peer_spad_write(perf->ntb, peer->pidx,
 288				    PERF_SPAD_CMD(perf->gidx),
 289				    cmd);
 290		ntb_peer_db_set(perf->ntb, PERF_SPAD_NOTIFY(peer->gidx));
 291
 292		dev_dbg(&perf->ntb->dev, "DB ring peer %#llx\n",
 293			PERF_SPAD_NOTIFY(peer->gidx));
 294
 295		break;
 296	}
 297
 298	return try < MSG_TRIES ? 0 : -EAGAIN;
 299}
 300
 301static int perf_spad_cmd_recv(struct perf_ctx *perf, int *pidx,
 302			      enum perf_cmd *cmd, u64 *data)
 303{
 304	struct perf_peer *peer;
 305	u32 val;
 306
 307	ntb_db_clear(perf->ntb, PERF_SPAD_NOTIFY(perf->gidx));
 308
 309	/*
 310	 * We start scanning all over, since cleared DB may have been set
 311	 * by any peer. Yes, it makes peer with smaller index being
 312	 * serviced with greater priority, but it's convenient for spad
 313	 * and message code unification and simplicity.
 314	 */
 315	for (*pidx = 0; *pidx < perf->pcnt; (*pidx)++) {
 316		peer = &perf->peers[*pidx];
 317
 318		if (!perf_link_is_up(peer))
 319			continue;
 320
 321		val = ntb_spad_read(perf->ntb, PERF_SPAD_CMD(peer->gidx));
 322		if (val == PERF_CMD_INVAL)
 323			continue;
 324
 325		*cmd = val;
 326
 327		val = ntb_spad_read(perf->ntb, PERF_SPAD_LDATA(peer->gidx));
 328		*data = val;
 329
 330		val = ntb_spad_read(perf->ntb, PERF_SPAD_HDATA(peer->gidx));
 331		*data |= (u64)val << 32;
 332
 333		/* Next command can be retrieved from now */
 334		ntb_spad_write(perf->ntb, PERF_SPAD_CMD(peer->gidx),
 335			       PERF_CMD_INVAL);
 336
 337		dev_dbg(&perf->ntb->dev, "CMD recv: %d 0x%llx\n", *cmd, *data);
 338
 339		return 0;
 340	}
 341
 342	return -ENODATA;
 343}
 344
 345static int perf_msg_cmd_send(struct perf_peer *peer, enum perf_cmd cmd,
 346			     u64 data)
 347{
 348	struct perf_ctx *perf = peer->perf;
 349	int try, ret;
 350	u64 outbits;
 351
 352	dev_dbg(&perf->ntb->dev, "CMD send: %d 0x%llx\n", cmd, data);
 353
 354	/*
 355	 * Perform predefined number of attempts before give up. Message
 356	 * registers are free of race-condition problem when accessed
 357	 * from different ports, so we don't need splitting registers
 358	 * by global device index. We also won't have local locking,
 359	 * since the method is used from service work only.
 360	 */
 361	outbits = ntb_msg_outbits(perf->ntb);
 362	for (try = 0; try < MSG_TRIES; try++) {
 363		if (!perf_link_is_up(peer))
 364			return -ENOLINK;
 365
 366		ret = ntb_msg_clear_sts(perf->ntb, outbits);
 367		if (ret)
 368			return ret;
 369
 370		ntb_peer_msg_write(perf->ntb, peer->pidx, PERF_MSG_LDATA,
 371				   lower_32_bits(data));
 372
 373		if (ntb_msg_read_sts(perf->ntb) & outbits) {
 374			usleep_range(MSG_UDELAY_LOW, MSG_UDELAY_HIGH);
 375			continue;
 376		}
 377
 378		ntb_peer_msg_write(perf->ntb, peer->pidx, PERF_MSG_HDATA,
 379				   upper_32_bits(data));
 380
 381		/* This call shall trigger peer message event */
 382		ntb_peer_msg_write(perf->ntb, peer->pidx, PERF_MSG_CMD, cmd);
 383
 384		break;
 385	}
 386
 387	return try < MSG_TRIES ? 0 : -EAGAIN;
 388}
 389
 390static int perf_msg_cmd_recv(struct perf_ctx *perf, int *pidx,
 391			     enum perf_cmd *cmd, u64 *data)
 392{
 393	u64 inbits;
 394	u32 val;
 395
 396	inbits = ntb_msg_inbits(perf->ntb);
 397
 398	if (hweight64(ntb_msg_read_sts(perf->ntb) & inbits) < 3)
 399		return -ENODATA;
 400
 401	val = ntb_msg_read(perf->ntb, pidx, PERF_MSG_CMD);
 402	*cmd = val;
 403
 404	val = ntb_msg_read(perf->ntb, pidx, PERF_MSG_LDATA);
 405	*data = val;
 406
 407	val = ntb_msg_read(perf->ntb, pidx, PERF_MSG_HDATA);
 408	*data |= (u64)val << 32;
 409
 410	/* Next command can be retrieved from now */
 411	ntb_msg_clear_sts(perf->ntb, inbits);
 412
 413	dev_dbg(&perf->ntb->dev, "CMD recv: %d 0x%llx\n", *cmd, *data);
 414
 415	return 0;
 416}
 417
 418static int perf_cmd_send(struct perf_peer *peer, enum perf_cmd cmd, u64 data)
 419{
 420	struct perf_ctx *perf = peer->perf;
 421
 422	if (cmd == PERF_CMD_SSIZE || cmd == PERF_CMD_SXLAT)
 423		return perf->cmd_send(peer, cmd, data);
 424
 425	dev_err(&perf->ntb->dev, "Send invalid command\n");
 426	return -EINVAL;
 427}
 428
 429static int perf_cmd_exec(struct perf_peer *peer, enum perf_cmd cmd)
 430{
 431	switch (cmd) {
 432	case PERF_CMD_SSIZE:
 433	case PERF_CMD_RSIZE:
 434	case PERF_CMD_SXLAT:
 435	case PERF_CMD_RXLAT:
 436	case PERF_CMD_CLEAR:
 437		break;
 438	default:
 439		dev_err(&peer->perf->ntb->dev, "Exec invalid command\n");
 440		return -EINVAL;
 441	}
 442
 443	/* No need of memory barrier, since bit ops have invernal lock */
 444	set_bit(cmd, &peer->sts);
 445
 446	dev_dbg(&peer->perf->ntb->dev, "CMD exec: %d\n", cmd);
 447
 448	(void)queue_work(system_highpri_wq, &peer->service);
 449
 450	return 0;
 451}
 452
 453static int perf_cmd_recv(struct perf_ctx *perf)
 454{
 455	struct perf_peer *peer;
 456	int ret, pidx, cmd;
 457	u64 data;
 458
 459	while (!(ret = perf->cmd_recv(perf, &pidx, &cmd, &data))) {
 460		peer = &perf->peers[pidx];
 461
 462		switch (cmd) {
 463		case PERF_CMD_SSIZE:
 464			peer->inbuf_size = data;
 465			return perf_cmd_exec(peer, PERF_CMD_RSIZE);
 466		case PERF_CMD_SXLAT:
 467			peer->outbuf_xlat = data;
 468			return perf_cmd_exec(peer, PERF_CMD_RXLAT);
 469		default:
 470			dev_err(&perf->ntb->dev, "Recv invalid command\n");
 471			return -EINVAL;
 472		}
 473	}
 474
 475	/* Return 0 if no data left to process, otherwise an error */
 476	return ret == -ENODATA ? 0 : ret;
 477}
 478
 479static void perf_link_event(void *ctx)
 480{
 481	struct perf_ctx *perf = ctx;
 482	struct perf_peer *peer;
 483	bool lnk_up;
 484	int pidx;
 485
 486	for (pidx = 0; pidx < perf->pcnt; pidx++) {
 487		peer = &perf->peers[pidx];
 488
 489		lnk_up = perf_link_is_up(peer);
 490
 491		if (lnk_up &&
 492		    !test_and_set_bit(PERF_STS_LNKUP, &peer->sts)) {
 493			perf_cmd_exec(peer, PERF_CMD_SSIZE);
 494		} else if (!lnk_up &&
 495			   test_and_clear_bit(PERF_STS_LNKUP, &peer->sts)) {
 496			perf_cmd_exec(peer, PERF_CMD_CLEAR);
 497		}
 498	}
 499}
 500
 501static void perf_db_event(void *ctx, int vec)
 502{
 503	struct perf_ctx *perf = ctx;
 
 504
 505	dev_dbg(&perf->ntb->dev, "DB vec %d mask %#llx bits %#llx\n", vec,
 506		ntb_db_vector_mask(perf->ntb, vec), ntb_db_read(perf->ntb));
 507
 508	/* Just receive all available commands */
 509	(void)perf_cmd_recv(perf);
 510}
 511
 512static void perf_msg_event(void *ctx)
 513{
 514	struct perf_ctx *perf = ctx;
 515
 516	dev_dbg(&perf->ntb->dev, "Msg status bits %#llx\n",
 517		ntb_msg_read_sts(perf->ntb));
 518
 519	/* Messages are only sent one-by-one */
 520	(void)perf_cmd_recv(perf);
 521}
 522
 523static const struct ntb_ctx_ops perf_ops = {
 524	.link_event = perf_link_event,
 525	.db_event = perf_db_event,
 526	.msg_event = perf_msg_event
 527};
 528
 529static void perf_free_outbuf(struct perf_peer *peer)
 530{
 531	(void)ntb_peer_mw_clear_trans(peer->perf->ntb, peer->pidx, peer->gidx);
 532}
 533
 534static int perf_setup_outbuf(struct perf_peer *peer)
 535{
 536	struct perf_ctx *perf = peer->perf;
 537	int ret;
 538
 539	/* Outbuf size can be unaligned due to custom max_mw_size */
 540	ret = ntb_peer_mw_set_trans(perf->ntb, peer->pidx, peer->gidx,
 541				    peer->outbuf_xlat, peer->outbuf_size);
 542	if (ret) {
 543		dev_err(&perf->ntb->dev, "Failed to set outbuf translation\n");
 544		return ret;
 545	}
 546
 547	/* Initialization is finally done */
 548	set_bit(PERF_STS_DONE, &peer->sts);
 549
 550	return 0;
 551}
 552
 553static void perf_free_inbuf(struct perf_peer *peer)
 
 554{
 555	if (!peer->inbuf)
 556		return;
 
 
 
 
 
 
 
 
 
 
 557
 558	(void)ntb_mw_clear_trans(peer->perf->ntb, peer->pidx, peer->gidx);
 559	dma_free_coherent(&peer->perf->ntb->dev, peer->inbuf_size,
 560			  peer->inbuf, peer->inbuf_xlat);
 561	peer->inbuf = NULL;
 562}
 563
 564static int perf_setup_inbuf(struct perf_peer *peer)
 565{
 566	resource_size_t xlat_align, size_align, size_max;
 567	struct perf_ctx *perf = peer->perf;
 568	int ret;
 569
 570	/* Get inbound MW parameters */
 571	ret = ntb_mw_get_align(perf->ntb, peer->pidx, perf->gidx,
 572			       &xlat_align, &size_align, &size_max);
 573	if (ret) {
 574		dev_err(&perf->ntb->dev, "Couldn't get inbuf restrictions\n");
 575		return ret;
 576	}
 577
 578	if (peer->inbuf_size > size_max) {
 579		dev_err(&perf->ntb->dev, "Too big inbuf size %pa > %pa\n",
 580			&peer->inbuf_size, &size_max);
 581		return -EINVAL;
 582	}
 583
 584	peer->inbuf_size = round_up(peer->inbuf_size, size_align);
 
 
 
 
 
 585
 586	perf_free_inbuf(peer);
 
 
 587
 588	peer->inbuf = dma_alloc_coherent(&perf->ntb->dev, peer->inbuf_size,
 589					 &peer->inbuf_xlat, GFP_KERNEL);
 590	if (!peer->inbuf) {
 591		dev_err(&perf->ntb->dev, "Failed to alloc inbuf of %pa\n",
 592			&peer->inbuf_size);
 593		return -ENOMEM;
 594	}
 595	if (!IS_ALIGNED(peer->inbuf_xlat, xlat_align)) {
 596		dev_err(&perf->ntb->dev, "Unaligned inbuf allocated\n");
 597		goto err_free_inbuf;
 598	}
 599
 600	ret = ntb_mw_set_trans(perf->ntb, peer->pidx, peer->gidx,
 601			       peer->inbuf_xlat, peer->inbuf_size);
 602	if (ret) {
 603		dev_err(&perf->ntb->dev, "Failed to set inbuf translation\n");
 604		goto err_free_inbuf;
 605	}
 606
 607	/*
 608	 * We submit inbuf xlat transmission cmd for execution here to follow
 609	 * the code architecture, even though this method is called from service
 610	 * work itself so the command will be executed right after it returns.
 611	 */
 612	(void)perf_cmd_exec(peer, PERF_CMD_SXLAT);
 613
 614	return 0;
 
 
 
 
 
 
 
 
 615
 616err_free_inbuf:
 617	perf_free_inbuf(peer);
 
 
 618
 619	return ret;
 620}
 
 621
 622static void perf_service_work(struct work_struct *work)
 623{
 624	struct perf_peer *peer = to_peer_service(work);
 625
 626	if (test_and_clear_bit(PERF_CMD_SSIZE, &peer->sts))
 627		perf_cmd_send(peer, PERF_CMD_SSIZE, peer->outbuf_size);
 628
 629	if (test_and_clear_bit(PERF_CMD_RSIZE, &peer->sts))
 630		perf_setup_inbuf(peer);
 631
 632	if (test_and_clear_bit(PERF_CMD_SXLAT, &peer->sts))
 633		perf_cmd_send(peer, PERF_CMD_SXLAT, peer->inbuf_xlat);
 634
 635	if (test_and_clear_bit(PERF_CMD_RXLAT, &peer->sts))
 636		perf_setup_outbuf(peer);
 637
 638	if (test_and_clear_bit(PERF_CMD_CLEAR, &peer->sts)) {
 639		clear_bit(PERF_STS_DONE, &peer->sts);
 640		if (test_bit(0, &peer->perf->busy_flag) &&
 641		    peer == peer->perf->test_peer) {
 642			dev_warn(&peer->perf->ntb->dev,
 643				"Freeing while test on-fly\n");
 644			perf_terminate_test(peer->perf);
 645		}
 646		perf_free_outbuf(peer);
 647		perf_free_inbuf(peer);
 648	}
 649}
 650
 651static int perf_init_service(struct perf_ctx *perf)
 
 652{
 653	u64 mask;
 654
 655	if (ntb_peer_mw_count(perf->ntb) < perf->pcnt + 1) {
 656		dev_err(&perf->ntb->dev, "Not enough memory windows\n");
 657		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 658	}
 659
 660	if (ntb_msg_count(perf->ntb) >= PERF_MSG_CNT) {
 661		perf->cmd_send = perf_msg_cmd_send;
 662		perf->cmd_recv = perf_msg_cmd_recv;
 663
 664		dev_dbg(&perf->ntb->dev, "Message service initialized\n");
 665
 666		return 0;
 667	}
 668
 669	dev_dbg(&perf->ntb->dev, "Message service unsupported\n");
 
 
 670
 671	mask = GENMASK_ULL(perf->pcnt, 0);
 672	if (ntb_spad_count(perf->ntb) >= PERF_SPAD_CNT(perf->pcnt) &&
 673	    (ntb_db_valid_mask(perf->ntb) & mask) == mask) {
 674		perf->cmd_send = perf_spad_cmd_send;
 675		perf->cmd_recv = perf_spad_cmd_recv;
 676
 677		dev_dbg(&perf->ntb->dev, "Scratchpad service initialized\n");
 678
 679		return 0;
 680	}
 681
 682	dev_dbg(&perf->ntb->dev, "Scratchpad service unsupported\n");
 683
 684	dev_err(&perf->ntb->dev, "Command services unsupported\n");
 
 685
 686	return -EINVAL;
 
 
 687}
 688
 689static int perf_enable_service(struct perf_ctx *perf)
 690{
 691	u64 mask, incmd_bit;
 692	int ret, sidx, scnt;
 
 
 
 
 
 
 
 693
 694	mask = ntb_db_valid_mask(perf->ntb);
 695	(void)ntb_db_set_mask(perf->ntb, mask);
 696
 697	ret = ntb_set_ctx(perf->ntb, perf, &perf_ops);
 698	if (ret)
 699		return ret;
 700
 701	if (perf->cmd_send == perf_msg_cmd_send) {
 702		u64 inbits, outbits;
 703
 704		inbits = ntb_msg_inbits(perf->ntb);
 705		outbits = ntb_msg_outbits(perf->ntb);
 706		(void)ntb_msg_set_mask(perf->ntb, inbits | outbits);
 707
 708		incmd_bit = BIT_ULL(__ffs64(inbits));
 709		ret = ntb_msg_clear_mask(perf->ntb, incmd_bit);
 710
 711		dev_dbg(&perf->ntb->dev, "MSG sts unmasked %#llx\n", incmd_bit);
 712	} else {
 713		scnt = ntb_spad_count(perf->ntb);
 714		for (sidx = 0; sidx < scnt; sidx++)
 715			ntb_spad_write(perf->ntb, sidx, PERF_CMD_INVAL);
 716		incmd_bit = PERF_SPAD_NOTIFY(perf->gidx);
 717		ret = ntb_db_clear_mask(perf->ntb, incmd_bit);
 718
 719		dev_dbg(&perf->ntb->dev, "DB bits unmasked %#llx\n", incmd_bit);
 720	}
 721	if (ret) {
 722		ntb_clear_ctx(perf->ntb);
 723		return ret;
 
 
 
 
 
 
 
 
 724	}
 725
 726	ntb_link_enable(perf->ntb, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
 727	/* Might be not necessary */
 728	ntb_link_event(perf->ntb);
 729
 730	return 0;
 731}
 732
 733static void perf_disable_service(struct perf_ctx *perf)
 734{
 735	int pidx;
 736
 737	if (perf->cmd_send == perf_msg_cmd_send) {
 738		u64 inbits;
 739
 740		inbits = ntb_msg_inbits(perf->ntb);
 741		(void)ntb_msg_set_mask(perf->ntb, inbits);
 742	} else {
 743		(void)ntb_db_set_mask(perf->ntb, PERF_SPAD_NOTIFY(perf->gidx));
 744	}
 745
 746	ntb_clear_ctx(perf->ntb);
 
 
 747
 748	for (pidx = 0; pidx < perf->pcnt; pidx++)
 749		perf_cmd_exec(&perf->peers[pidx], PERF_CMD_CLEAR);
 750
 751	for (pidx = 0; pidx < perf->pcnt; pidx++)
 752		flush_work(&perf->peers[pidx].service);
 753
 754	for (pidx = 0; pidx < perf->pcnt; pidx++) {
 755		struct perf_peer *peer = &perf->peers[pidx];
 
 756
 757		ntb_spad_write(perf->ntb, PERF_SPAD_CMD(peer->gidx), 0);
 758	}
 759
 760	ntb_db_clear(perf->ntb, PERF_SPAD_NOTIFY(perf->gidx));
 761
 762	ntb_link_disable(perf->ntb);
 763}
 764
 765/*==============================================================================
 766 *                      Performance measuring work-thread
 767 *==============================================================================
 768 */
 769
 770static void perf_dma_copy_callback(void *data)
 771{
 772	struct perf_thread *pthr = data;
 773
 774	atomic_dec(&pthr->dma_sync);
 775	wake_up(&pthr->dma_wait);
 776}
 777
 778static int perf_copy_chunk(struct perf_thread *pthr,
 779			   void __iomem *dst, void *src, size_t len)
 780{
 781	struct dma_async_tx_descriptor *tx;
 782	struct dmaengine_unmap_data *unmap;
 783	struct device *dma_dev;
 784	int try = 0, ret = 0;
 785
 786	if (!use_dma) {
 787		memcpy_toio(dst, src, len);
 788		goto ret_check_tsync;
 789	}
 790
 791	dma_dev = pthr->dma_chan->device->dev;
 792
 793	if (!is_dma_copy_aligned(pthr->dma_chan->device, offset_in_page(src),
 794				 offset_in_page(dst), len))
 795		return -EIO;
 796
 797	unmap = dmaengine_get_unmap_data(dma_dev, 2, GFP_NOWAIT);
 798	if (!unmap)
 799		return -ENOMEM;
 800
 801	unmap->len = len;
 802	unmap->addr[0] = dma_map_page(dma_dev, virt_to_page(src),
 803		offset_in_page(src), len, DMA_TO_DEVICE);
 804	if (dma_mapping_error(dma_dev, unmap->addr[0])) {
 805		ret = -EIO;
 806		goto err_free_resource;
 807	}
 808	unmap->to_cnt = 1;
 809
 810	unmap->addr[1] = dma_map_page(dma_dev, virt_to_page(dst),
 811		offset_in_page(dst), len, DMA_FROM_DEVICE);
 812	if (dma_mapping_error(dma_dev, unmap->addr[1])) {
 813		ret = -EIO;
 814		goto err_free_resource;
 815	}
 816	unmap->from_cnt = 1;
 817
 818	do {
 819		tx = dmaengine_prep_dma_memcpy(pthr->dma_chan, unmap->addr[1],
 820			unmap->addr[0], len, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 821		if (!tx)
 822			msleep(DMA_MDELAY);
 823	} while (!tx && (try++ < DMA_TRIES));
 824
 825	if (!tx) {
 826		ret = -EIO;
 827		goto err_free_resource;
 828	}
 829
 830	tx->callback = perf_dma_copy_callback;
 831	tx->callback_param = pthr;
 832	dma_set_unmap(tx, unmap);
 833
 834	ret = dma_submit_error(dmaengine_submit(tx));
 835	if (ret) {
 836		dmaengine_unmap_put(unmap);
 837		goto err_free_resource;
 838	}
 839
 840	dmaengine_unmap_put(unmap);
 841
 842	atomic_inc(&pthr->dma_sync);
 843	dma_async_issue_pending(pthr->dma_chan);
 844
 845ret_check_tsync:
 846	return likely(atomic_read(&pthr->perf->tsync) > 0) ? 0 : -EINTR;
 847
 848err_free_resource:
 849	dmaengine_unmap_put(unmap);
 850
 851	return ret;
 852}
 853
 854static bool perf_dma_filter(struct dma_chan *chan, void *data)
 855{
 856	struct perf_ctx *perf = data;
 857	int node;
 858
 859	node = dev_to_node(&perf->ntb->dev);
 
 860
 861	return node == NUMA_NO_NODE || node == dev_to_node(chan->device->dev);
 
 
 
 
 
 862}
 863
 864static int perf_init_test(struct perf_thread *pthr)
 865{
 866	struct perf_ctx *perf = pthr->perf;
 867	dma_cap_mask_t dma_mask;
 
 868
 869	pthr->src = kmalloc_node(perf->test_peer->outbuf_size, GFP_KERNEL,
 870				 dev_to_node(&perf->ntb->dev));
 871	if (!pthr->src)
 872		return -ENOMEM;
 873
 874	get_random_bytes(pthr->src, perf->test_peer->outbuf_size);
 
 875
 876	if (!use_dma)
 877		return 0;
 878
 879	dma_cap_zero(dma_mask);
 880	dma_cap_set(DMA_MEMCPY, dma_mask);
 881	pthr->dma_chan = dma_request_channel(dma_mask, perf_dma_filter, perf);
 882	if (!pthr->dma_chan) {
 883		dev_err(&perf->ntb->dev, "%d: Failed to get DMA channel\n",
 884			pthr->tidx);
 885		atomic_dec(&perf->tsync);
 886		wake_up(&perf->twait);
 887		kfree(pthr->src);
 888		return -ENODEV;
 889	}
 890
 891	atomic_set(&pthr->dma_sync, 0);
 
 892
 893	return 0;
 894}
 895
 896static int perf_run_test(struct perf_thread *pthr)
 897{
 898	struct perf_peer *peer = pthr->perf->test_peer;
 899	struct perf_ctx *perf = pthr->perf;
 900	void __iomem *flt_dst, *bnd_dst;
 901	u64 total_size, chunk_size;
 902	void *flt_src;
 903	int ret = 0;
 904
 905	total_size = 1ULL << total_order;
 906	chunk_size = 1ULL << chunk_order;
 907	chunk_size = min_t(u64, peer->outbuf_size, chunk_size);
 908
 909	flt_src = pthr->src;
 910	bnd_dst = peer->outbuf + peer->outbuf_size;
 911	flt_dst = peer->outbuf;
 912
 913	pthr->duration = ktime_get();
 914
 915	/* Copied field is cleared on test launch stage */
 916	while (pthr->copied < total_size) {
 917		ret = perf_copy_chunk(pthr, flt_dst, flt_src, chunk_size);
 918		if (ret) {
 919			dev_err(&perf->ntb->dev, "%d: Got error %d on test\n",
 920				pthr->tidx, ret);
 921			return ret;
 922		}
 923
 924		pthr->copied += chunk_size;
 925
 926		flt_dst += chunk_size;
 927		flt_src += chunk_size;
 928		if (flt_dst >= bnd_dst || flt_dst < peer->outbuf) {
 929			flt_dst = peer->outbuf;
 930			flt_src = pthr->src;
 931		}
 932
 933		/* Give up CPU to give a chance for other threads to use it */
 934		schedule();
 935	}
 936
 937	return 0;
 938}
 939
 940static int perf_sync_test(struct perf_thread *pthr)
 941{
 942	struct perf_ctx *perf = pthr->perf;
 
 
 
 
 
 
 943
 944	if (!use_dma)
 945		goto no_dma_ret;
 946
 947	wait_event(pthr->dma_wait,
 948		   (atomic_read(&pthr->dma_sync) == 0 ||
 949		    atomic_read(&perf->tsync) < 0));
 
 950
 951	if (atomic_read(&perf->tsync) < 0)
 952		return -EINTR;
 953
 954no_dma_ret:
 955	pthr->duration = ktime_sub(ktime_get(), pthr->duration);
 
 956
 957	dev_dbg(&perf->ntb->dev, "%d: copied %llu bytes\n",
 958		pthr->tidx, pthr->copied);
 959
 960	dev_dbg(&perf->ntb->dev, "%d: lasted %llu usecs\n",
 961		pthr->tidx, ktime_to_us(pthr->duration));
 962
 963	dev_dbg(&perf->ntb->dev, "%d: %llu MBytes/s\n", pthr->tidx,
 964		div64_u64(pthr->copied, ktime_to_us(pthr->duration)));
 965
 966	return 0;
 967}
 968
 969static void perf_clear_test(struct perf_thread *pthr)
 970{
 971	struct perf_ctx *perf = pthr->perf;
 972
 973	if (!use_dma)
 974		goto no_dma_notify;
 975
 976	/*
 977	 * If test finished without errors, termination isn't needed.
 978	 * We call it anyway just to be sure of the transfers completion.
 979	 */
 980	(void)dmaengine_terminate_sync(pthr->dma_chan);
 981
 982	dma_release_channel(pthr->dma_chan);
 
 983
 984no_dma_notify:
 985	atomic_dec(&perf->tsync);
 986	wake_up(&perf->twait);
 987	kfree(pthr->src);
 988}
 989
 990static void perf_thread_work(struct work_struct *work)
 991{
 992	struct perf_thread *pthr = to_thread_work(work);
 993	int ret;
 994
 995	/*
 996	 * Perform stages in compliance with use_dma flag value.
 997	 * Test status is changed only if error happened, otherwise
 998	 * status -ENODATA is kept while test is on-fly. Results
 999	 * synchronization is performed only if test fininshed
1000	 * without an error or interruption.
1001	 */
1002	ret = perf_init_test(pthr);
1003	if (ret) {
1004		pthr->status = ret;
1005		return;
1006	}
1007
1008	ret = perf_run_test(pthr);
1009	if (ret) {
1010		pthr->status = ret;
1011		goto err_clear_test;
1012	}
1013
1014	pthr->status = perf_sync_test(pthr);
1015
1016err_clear_test:
1017	perf_clear_test(pthr);
1018}
1019
1020static int perf_set_tcnt(struct perf_ctx *perf, u8 tcnt)
1021{
1022	if (tcnt == 0 || tcnt > MAX_THREADS_CNT)
1023		return -EINVAL;
1024
1025	if (test_and_set_bit_lock(0, &perf->busy_flag))
1026		return -EBUSY;
1027
1028	perf->tcnt = tcnt;
 
 
 
1029
1030	clear_bit_unlock(0, &perf->busy_flag);
 
 
1031
1032	return 0;
1033}
1034
1035static void perf_terminate_test(struct perf_ctx *perf)
 
1036{
1037	int tidx;
 
 
1038
1039	atomic_set(&perf->tsync, -1);
1040	wake_up(&perf->twait);
1041
1042	for (tidx = 0; tidx < MAX_THREADS_CNT; tidx++) {
1043		wake_up(&perf->threads[tidx].dma_wait);
1044		cancel_work_sync(&perf->threads[tidx].work);
1045	}
1046}
1047
1048static int perf_submit_test(struct perf_peer *peer)
1049{
1050	struct perf_ctx *perf = peer->perf;
1051	struct perf_thread *pthr;
1052	int tidx, ret;
1053
1054	if (!test_bit(PERF_STS_DONE, &peer->sts))
1055		return -ENOLINK;
1056
1057	if (test_and_set_bit_lock(0, &perf->busy_flag))
1058		return -EBUSY;
1059
1060	perf->test_peer = peer;
1061	atomic_set(&perf->tsync, perf->tcnt);
1062
1063	for (tidx = 0; tidx < MAX_THREADS_CNT; tidx++) {
1064		pthr = &perf->threads[tidx];
1065
1066		pthr->status = -ENODATA;
1067		pthr->copied = 0;
1068		pthr->duration = ktime_set(0, 0);
1069		if (tidx < perf->tcnt)
1070			(void)queue_work(perf_wq, &pthr->work);
1071	}
1072
1073	ret = wait_event_interruptible(perf->twait,
1074				       atomic_read(&perf->tsync) <= 0);
1075	if (ret == -ERESTARTSYS) {
1076		perf_terminate_test(perf);
1077		ret = -EINTR;
1078	}
1079
1080	clear_bit_unlock(0, &perf->busy_flag);
1081
1082	return ret;
1083}
1084
1085static int perf_read_stats(struct perf_ctx *perf, char *buf,
1086			   size_t size, ssize_t *pos)
1087{
1088	struct perf_thread *pthr;
1089	int tidx;
1090
1091	if (test_and_set_bit_lock(0, &perf->busy_flag))
1092		return -EBUSY;
1093
1094	(*pos) += scnprintf(buf + *pos, size - *pos,
1095		"    Peer %d test statistics:\n", perf->test_peer->pidx);
1096
1097	for (tidx = 0; tidx < MAX_THREADS_CNT; tidx++) {
1098		pthr = &perf->threads[tidx];
1099
1100		if (pthr->status == -ENODATA)
1101			continue;
1102
1103		if (pthr->status) {
1104			(*pos) += scnprintf(buf + *pos, size - *pos,
1105				"%d: error status %d\n", tidx, pthr->status);
1106			continue;
 
 
1107		}
1108
1109		(*pos) += scnprintf(buf + *pos, size - *pos,
1110			"%d: copied %llu bytes in %llu usecs, %llu MBytes/s\n",
1111			tidx, pthr->copied, ktime_to_us(pthr->duration),
1112			div64_u64(pthr->copied, ktime_to_us(pthr->duration)));
1113	}
1114
1115	clear_bit_unlock(0, &perf->busy_flag);
1116
1117	return 0;
1118}
1119
1120static void perf_init_threads(struct perf_ctx *perf)
 
1121{
1122	struct perf_thread *pthr;
1123	int tidx;
1124
1125	perf->tcnt = DEF_THREADS_CNT;
1126	perf->test_peer = &perf->peers[0];
1127	init_waitqueue_head(&perf->twait);
1128
1129	for (tidx = 0; tidx < MAX_THREADS_CNT; tidx++) {
1130		pthr = &perf->threads[tidx];
1131
1132		pthr->perf = perf;
1133		pthr->tidx = tidx;
1134		pthr->status = -ENODATA;
1135		init_waitqueue_head(&pthr->dma_wait);
1136		INIT_WORK(&pthr->work, perf_thread_work);
1137	}
1138}
1139
1140static void perf_clear_threads(struct perf_ctx *perf)
1141{
1142	perf_terminate_test(perf);
1143}
1144
1145/*==============================================================================
1146 *                               DebugFS nodes
1147 *==============================================================================
1148 */
1149
1150static ssize_t perf_dbgfs_read_info(struct file *filep, char __user *ubuf,
1151				    size_t size, loff_t *offp)
1152{
1153	struct perf_ctx *perf = filep->private_data;
1154	struct perf_peer *peer;
1155	size_t buf_size;
1156	ssize_t pos = 0;
1157	int ret, pidx;
1158	char *buf;
1159
1160	buf_size = min_t(size_t, size, 0x1000U);
 
 
 
 
1161
1162	buf = kmalloc(buf_size, GFP_KERNEL);
1163	if (!buf)
1164		return -ENOMEM;
 
1165
1166	pos += scnprintf(buf + pos, buf_size - pos,
1167		"    Performance measuring tool info:\n\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1168
1169	pos += scnprintf(buf + pos, buf_size - pos,
1170		"Local port %d, Global index %d\n", ntb_port_number(perf->ntb),
1171		perf->gidx);
1172	pos += scnprintf(buf + pos, buf_size - pos, "Test status: ");
1173	if (test_bit(0, &perf->busy_flag)) {
1174		pos += scnprintf(buf + pos, buf_size - pos,
1175			"on-fly with port %d (%d)\n",
1176			ntb_peer_port_number(perf->ntb, perf->test_peer->pidx),
1177			perf->test_peer->pidx);
1178	} else {
1179		pos += scnprintf(buf + pos, buf_size - pos, "idle\n");
1180	}
1181
1182	for (pidx = 0; pidx < perf->pcnt; pidx++) {
1183		peer = &perf->peers[pidx];
1184
1185		pos += scnprintf(buf + pos, buf_size - pos,
1186			"Port %d (%d), Global index %d:\n",
1187			ntb_peer_port_number(perf->ntb, peer->pidx), peer->pidx,
1188			peer->gidx);
1189
1190		pos += scnprintf(buf + pos, buf_size - pos,
1191			"\tLink status: %s\n",
1192			test_bit(PERF_STS_LNKUP, &peer->sts) ? "up" : "down");
1193
1194		pos += scnprintf(buf + pos, buf_size - pos,
1195			"\tOut buffer addr 0x%pK\n", peer->outbuf);
1196
1197		pos += scnprintf(buf + pos, buf_size - pos,
1198			"\tOut buffer size %pa\n", &peer->outbuf_size);
1199
1200		pos += scnprintf(buf + pos, buf_size - pos,
1201			"\tOut buffer xlat 0x%016llx[p]\n", peer->outbuf_xlat);
1202
1203		if (!peer->inbuf) {
1204			pos += scnprintf(buf + pos, buf_size - pos,
1205				"\tIn buffer addr: unallocated\n");
1206			continue;
1207		}
1208
1209		pos += scnprintf(buf + pos, buf_size - pos,
1210			"\tIn buffer addr 0x%pK\n", peer->inbuf);
1211
1212		pos += scnprintf(buf + pos, buf_size - pos,
1213			"\tIn buffer size %pa\n", &peer->inbuf_size);
1214
1215		pos += scnprintf(buf + pos, buf_size - pos,
1216			"\tIn buffer xlat %pad[p]\n", &peer->inbuf_xlat);
1217	}
1218
1219	ret = simple_read_from_buffer(ubuf, size, offp, buf, pos);
1220	kfree(buf);
1221
1222	return ret;
 
 
1223}
1224
1225static const struct file_operations perf_dbgfs_info = {
 
1226	.open = simple_open,
1227	.read = perf_dbgfs_read_info
 
1228};
1229
1230static ssize_t perf_dbgfs_read_run(struct file *filep, char __user *ubuf,
1231				   size_t size, loff_t *offp)
1232{
1233	struct perf_ctx *perf = filep->private_data;
1234	ssize_t ret, pos = 0;
1235	char *buf;
1236
1237	buf = kmalloc(PERF_BUF_LEN, GFP_KERNEL);
1238	if (!buf)
1239		return -ENOMEM;
1240
1241	ret = perf_read_stats(perf, buf, PERF_BUF_LEN, &pos);
1242	if (ret)
1243		goto err_free;
1244
1245	ret = simple_read_from_buffer(ubuf, size, offp, buf, pos);
1246err_free:
1247	kfree(buf);
1248
1249	return ret;
1250}
1251
1252static ssize_t perf_dbgfs_write_run(struct file *filep, const char __user *ubuf,
1253				    size_t size, loff_t *offp)
1254{
1255	struct perf_ctx *perf = filep->private_data;
1256	struct perf_peer *peer;
1257	int pidx, ret;
1258
1259	ret = kstrtoint_from_user(ubuf, size, 0, &pidx);
1260	if (ret)
1261		return ret;
1262
1263	if (pidx < 0 || pidx >= perf->pcnt)
1264		return -EINVAL;
1265
1266	peer = &perf->peers[pidx];
1267
1268	ret = perf_submit_test(peer);
1269	if (ret)
1270		return ret;
1271
1272	return size;
1273}
1274
1275static const struct file_operations perf_dbgfs_run = {
1276	.open = simple_open,
1277	.read = perf_dbgfs_read_run,
1278	.write = perf_dbgfs_write_run
1279};
1280
1281static ssize_t perf_dbgfs_read_tcnt(struct file *filep, char __user *ubuf,
1282				    size_t size, loff_t *offp)
1283{
1284	struct perf_ctx *perf = filep->private_data;
1285	char buf[8];
1286	ssize_t pos;
1287
1288	pos = scnprintf(buf, sizeof(buf), "%hhu\n", perf->tcnt);
1289
1290	return simple_read_from_buffer(ubuf, size, offp, buf, pos);
1291}
1292
1293static ssize_t perf_dbgfs_write_tcnt(struct file *filep,
1294				     const char __user *ubuf,
1295				     size_t size, loff_t *offp)
1296{
1297	struct perf_ctx *perf = filep->private_data;
1298	int ret;
1299	u8 val;
1300
1301	ret = kstrtou8_from_user(ubuf, size, 0, &val);
1302	if (ret)
1303		return ret;
1304
1305	ret = perf_set_tcnt(perf, val);
1306	if (ret)
1307		return ret;
1308
1309	return size;
1310}
1311
1312static const struct file_operations perf_dbgfs_tcnt = {
1313	.open = simple_open,
1314	.read = perf_dbgfs_read_tcnt,
1315	.write = perf_dbgfs_write_tcnt
1316};
1317
1318static void perf_setup_dbgfs(struct perf_ctx *perf)
1319{
1320	struct pci_dev *pdev = perf->ntb->pdev;
1321
1322	perf->dbgfs_dir = debugfs_create_dir(pci_name(pdev), perf_dbgfs_topdir);
1323	if (!perf->dbgfs_dir) {
1324		dev_warn(&perf->ntb->dev, "DebugFS unsupported\n");
1325		return;
1326	}
1327
1328	debugfs_create_file("info", 0600, perf->dbgfs_dir, perf,
1329			    &perf_dbgfs_info);
 
 
1330
1331	debugfs_create_file("run", 0600, perf->dbgfs_dir, perf,
1332			    &perf_dbgfs_run);
 
 
 
1333
1334	debugfs_create_file("threads_count", 0600, perf->dbgfs_dir, perf,
1335			    &perf_dbgfs_tcnt);
 
 
 
1336
1337	/* They are made read-only for test exec safety and integrity */
1338	debugfs_create_u8("chunk_order", 0500, perf->dbgfs_dir, &chunk_order);
1339
1340	debugfs_create_u8("total_order", 0500, perf->dbgfs_dir, &total_order);
1341
1342	debugfs_create_bool("use_dma", 0500, perf->dbgfs_dir, &use_dma);
1343}
1344
1345static void perf_clear_dbgfs(struct perf_ctx *perf)
1346{
1347	debugfs_remove_recursive(perf->dbgfs_dir);
1348}
1349
1350/*==============================================================================
1351 *                        Basic driver initialization
1352 *==============================================================================
1353 */
1354
1355static struct perf_ctx *perf_create_data(struct ntb_dev *ntb)
1356{
 
1357	struct perf_ctx *perf;
 
 
1358
1359	perf = devm_kzalloc(&ntb->dev, sizeof(*perf), GFP_KERNEL);
1360	if (!perf)
1361		return ERR_PTR(-ENOMEM);
1362
1363	perf->pcnt = ntb_peer_port_count(ntb);
1364	perf->peers = devm_kcalloc(&ntb->dev, perf->pcnt, sizeof(*perf->peers),
1365				  GFP_KERNEL);
1366	if (!perf->peers)
1367		return ERR_PTR(-ENOMEM);
1368
1369	perf->ntb = ntb;
1370
1371	return perf;
1372}
1373
1374static int perf_setup_peer_mw(struct perf_peer *peer)
1375{
1376	struct perf_ctx *perf = peer->perf;
1377	phys_addr_t phys_addr;
1378	int ret;
1379
1380	/* Get outbound MW parameters and map it */
1381	ret = ntb_peer_mw_get_addr(perf->ntb, perf->gidx, &phys_addr,
1382				   &peer->outbuf_size);
1383	if (ret)
1384		return ret;
1385
1386	peer->outbuf = devm_ioremap_wc(&perf->ntb->dev, phys_addr,
1387					peer->outbuf_size);
1388	if (!peer->outbuf)
1389		return -ENOMEM;
1390
1391	if (max_mw_size && peer->outbuf_size > max_mw_size) {
1392		peer->outbuf_size = max_mw_size;
1393		dev_warn(&peer->perf->ntb->dev,
1394			"Peer %d outbuf reduced to %pa\n", peer->pidx,
1395			&peer->outbuf_size);
1396	}
1397
1398	return 0;
1399}
1400
1401static int perf_init_peers(struct perf_ctx *perf)
1402{
1403	struct perf_peer *peer;
1404	int pidx, lport, ret;
1405
1406	lport = ntb_port_number(perf->ntb);
1407	perf->gidx = -1;
1408	for (pidx = 0; pidx < perf->pcnt; pidx++) {
1409		peer = &perf->peers[pidx];
1410
1411		peer->perf = perf;
1412		peer->pidx = pidx;
1413		if (lport < ntb_peer_port_number(perf->ntb, pidx)) {
1414			if (perf->gidx == -1)
1415				perf->gidx = pidx;
1416			peer->gidx = pidx + 1;
1417		} else {
1418			peer->gidx = pidx;
1419		}
1420		INIT_WORK(&peer->service, perf_service_work);
1421	}
1422	if (perf->gidx == -1)
1423		perf->gidx = pidx;
1424
1425	for (pidx = 0; pidx < perf->pcnt; pidx++) {
1426		ret = perf_setup_peer_mw(&perf->peers[pidx]);
1427		if (ret)
1428			return ret;
1429	}
1430
1431	dev_dbg(&perf->ntb->dev, "Global port index %d\n", perf->gidx);
1432
1433	return 0;
 
 
 
 
 
1434}
1435
1436static int perf_probe(struct ntb_client *client, struct ntb_dev *ntb)
1437{
1438	struct perf_ctx *perf;
1439	int ret;
1440
1441	perf = perf_create_data(ntb);
1442	if (IS_ERR(perf))
1443		return PTR_ERR(perf);
1444
1445	ret = perf_init_peers(perf);
1446	if (ret)
1447		return ret;
1448
1449	perf_init_threads(perf);
 
1450
1451	ret = perf_init_service(perf);
1452	if (ret)
1453		return ret;
1454
1455	ret = perf_enable_service(perf);
1456	if (ret)
1457		return ret;
1458
1459	perf_setup_dbgfs(perf);
 
 
 
1460
1461	return 0;
1462}
1463
1464static void perf_remove(struct ntb_client *client, struct ntb_dev *ntb)
1465{
1466	struct perf_ctx *perf = ntb->ctx;
1467
1468	perf_clear_dbgfs(perf);
1469
1470	perf_disable_service(perf);
1471
1472	perf_clear_threads(perf);
1473}
1474
1475static struct ntb_client perf_client = {
1476	.ops = {
1477		.probe = perf_probe,
1478		.remove = perf_remove
1479	}
1480};
1481
1482static int __init perf_init(void)
1483{
1484	int ret;
1485
1486	if (chunk_order > MAX_CHUNK_ORDER) {
1487		chunk_order = MAX_CHUNK_ORDER;
1488		pr_info("Chunk order reduced to %hhu\n", chunk_order);
1489	}
1490
1491	if (total_order < chunk_order) {
1492		total_order = chunk_order;
1493		pr_info("Total data order reduced to %hhu\n", total_order);
1494	}
1495
1496	perf_wq = alloc_workqueue("perf_wq", WQ_UNBOUND | WQ_SYSFS, 0);
1497	if (!perf_wq)
1498		return -ENOMEM;
1499
1500	if (debugfs_initialized())
1501		perf_dbgfs_topdir = debugfs_create_dir(KBUILD_MODNAME, NULL);
1502
1503	ret = ntb_register_client(&perf_client);
1504	if (ret) {
1505		debugfs_remove_recursive(perf_dbgfs_topdir);
1506		destroy_workqueue(perf_wq);
1507	}
1508
1509	return ret;
1510}
1511module_init(perf_init);
1512
1513static void __exit perf_exit(void)
1514{
1515	ntb_unregister_client(&perf_client);
1516	debugfs_remove_recursive(perf_dbgfs_topdir);
1517	destroy_workqueue(perf_wq);
1518}
1519module_exit(perf_exit);
1520