Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Loopback bridge driver for the Greybus loopback module.
4 *
5 * Copyright 2014 Google Inc.
6 * Copyright 2014 Linaro Ltd.
7 */
8
9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/mutex.h>
14#include <linux/slab.h>
15#include <linux/kthread.h>
16#include <linux/delay.h>
17#include <linux/random.h>
18#include <linux/sizes.h>
19#include <linux/cdev.h>
20#include <linux/fs.h>
21#include <linux/kfifo.h>
22#include <linux/debugfs.h>
23#include <linux/list_sort.h>
24#include <linux/spinlock.h>
25#include <linux/workqueue.h>
26#include <linux/atomic.h>
27#include <linux/pm_runtime.h>
28#include <linux/greybus.h>
29#include <asm/div64.h>
30
31#define NSEC_PER_DAY 86400000000000ULL
32
33struct gb_loopback_stats {
34 u32 min;
35 u32 max;
36 u64 sum;
37 u32 count;
38};
39
40struct gb_loopback_device {
41 struct dentry *root;
42 u32 count;
43 size_t size_max;
44
45 /* We need to take a lock in atomic context */
46 spinlock_t lock;
47 wait_queue_head_t wq;
48};
49
50static struct gb_loopback_device gb_dev;
51
52struct gb_loopback_async_operation {
53 struct gb_loopback *gb;
54 struct gb_operation *operation;
55 ktime_t ts;
56 int (*completion)(struct gb_loopback_async_operation *op_async);
57};
58
59struct gb_loopback {
60 struct gb_connection *connection;
61
62 struct dentry *file;
63 struct kfifo kfifo_lat;
64 struct mutex mutex;
65 struct task_struct *task;
66 struct device *dev;
67 wait_queue_head_t wq;
68 wait_queue_head_t wq_completion;
69 atomic_t outstanding_operations;
70
71 /* Per connection stats */
72 ktime_t ts;
73 struct gb_loopback_stats latency;
74 struct gb_loopback_stats throughput;
75 struct gb_loopback_stats requests_per_second;
76 struct gb_loopback_stats apbridge_unipro_latency;
77 struct gb_loopback_stats gbphy_firmware_latency;
78
79 int type;
80 int async;
81 int id;
82 u32 size;
83 u32 iteration_max;
84 u32 iteration_count;
85 int us_wait;
86 u32 error;
87 u32 requests_completed;
88 u32 requests_timedout;
89 u32 timeout;
90 u32 jiffy_timeout;
91 u32 timeout_min;
92 u32 timeout_max;
93 u32 outstanding_operations_max;
94 u64 elapsed_nsecs;
95 u32 apbridge_latency_ts;
96 u32 gbphy_latency_ts;
97
98 u32 send_count;
99};
100
101static struct class loopback_class = {
102 .name = "gb_loopback",
103};
104
105static DEFINE_IDA(loopback_ida);
106
107/* Min/max values in jiffies */
108#define GB_LOOPBACK_TIMEOUT_MIN 1
109#define GB_LOOPBACK_TIMEOUT_MAX 10000
110
111#define GB_LOOPBACK_FIFO_DEFAULT 8192
112
113static unsigned int kfifo_depth = GB_LOOPBACK_FIFO_DEFAULT;
114module_param(kfifo_depth, uint, 0444);
115
116/* Maximum size of any one send data buffer we support */
117#define MAX_PACKET_SIZE (PAGE_SIZE * 2)
118
119#define GB_LOOPBACK_US_WAIT_MAX 1000000
120
121/* interface sysfs attributes */
122#define gb_loopback_ro_attr(field) \
123static ssize_t field##_show(struct device *dev, \
124 struct device_attribute *attr, \
125 char *buf) \
126{ \
127 struct gb_loopback *gb = dev_get_drvdata(dev); \
128 return sprintf(buf, "%u\n", gb->field); \
129} \
130static DEVICE_ATTR_RO(field)
131
132#define gb_loopback_ro_stats_attr(name, field, type) \
133static ssize_t name##_##field##_show(struct device *dev, \
134 struct device_attribute *attr, \
135 char *buf) \
136{ \
137 struct gb_loopback *gb = dev_get_drvdata(dev); \
138 /* Report 0 for min and max if no transfer succeeded */ \
139 if (!gb->requests_completed) \
140 return sprintf(buf, "0\n"); \
141 return sprintf(buf, "%" #type "\n", gb->name.field); \
142} \
143static DEVICE_ATTR_RO(name##_##field)
144
145#define gb_loopback_ro_avg_attr(name) \
146static ssize_t name##_avg_show(struct device *dev, \
147 struct device_attribute *attr, \
148 char *buf) \
149{ \
150 struct gb_loopback_stats *stats; \
151 struct gb_loopback *gb; \
152 u64 avg, rem; \
153 u32 count; \
154 gb = dev_get_drvdata(dev); \
155 stats = &gb->name; \
156 count = stats->count ? stats->count : 1; \
157 avg = stats->sum + count / 2000000; /* round closest */ \
158 rem = do_div(avg, count); \
159 rem *= 1000000; \
160 do_div(rem, count); \
161 return sprintf(buf, "%llu.%06u\n", avg, (u32)rem); \
162} \
163static DEVICE_ATTR_RO(name##_avg)
164
165#define gb_loopback_stats_attrs(field) \
166 gb_loopback_ro_stats_attr(field, min, u); \
167 gb_loopback_ro_stats_attr(field, max, u); \
168 gb_loopback_ro_avg_attr(field)
169
170#define gb_loopback_attr(field, type) \
171static ssize_t field##_show(struct device *dev, \
172 struct device_attribute *attr, \
173 char *buf) \
174{ \
175 struct gb_loopback *gb = dev_get_drvdata(dev); \
176 return sprintf(buf, "%" #type "\n", gb->field); \
177} \
178static ssize_t field##_store(struct device *dev, \
179 struct device_attribute *attr, \
180 const char *buf, \
181 size_t len) \
182{ \
183 int ret; \
184 struct gb_loopback *gb = dev_get_drvdata(dev); \
185 mutex_lock(&gb->mutex); \
186 ret = sscanf(buf, "%"#type, &gb->field); \
187 if (ret != 1) \
188 len = -EINVAL; \
189 else \
190 gb_loopback_check_attr(gb, bundle); \
191 mutex_unlock(&gb->mutex); \
192 return len; \
193} \
194static DEVICE_ATTR_RW(field)
195
196#define gb_dev_loopback_ro_attr(field, conn) \
197static ssize_t field##_show(struct device *dev, \
198 struct device_attribute *attr, \
199 char *buf) \
200{ \
201 struct gb_loopback *gb = dev_get_drvdata(dev); \
202 return sprintf(buf, "%u\n", gb->field); \
203} \
204static DEVICE_ATTR_RO(field)
205
206#define gb_dev_loopback_rw_attr(field, type) \
207static ssize_t field##_show(struct device *dev, \
208 struct device_attribute *attr, \
209 char *buf) \
210{ \
211 struct gb_loopback *gb = dev_get_drvdata(dev); \
212 return sprintf(buf, "%" #type "\n", gb->field); \
213} \
214static ssize_t field##_store(struct device *dev, \
215 struct device_attribute *attr, \
216 const char *buf, \
217 size_t len) \
218{ \
219 int ret; \
220 struct gb_loopback *gb = dev_get_drvdata(dev); \
221 mutex_lock(&gb->mutex); \
222 ret = sscanf(buf, "%"#type, &gb->field); \
223 if (ret != 1) \
224 len = -EINVAL; \
225 else \
226 gb_loopback_check_attr(gb); \
227 mutex_unlock(&gb->mutex); \
228 return len; \
229} \
230static DEVICE_ATTR_RW(field)
231
232static void gb_loopback_reset_stats(struct gb_loopback *gb);
233static void gb_loopback_check_attr(struct gb_loopback *gb)
234{
235 if (gb->us_wait > GB_LOOPBACK_US_WAIT_MAX)
236 gb->us_wait = GB_LOOPBACK_US_WAIT_MAX;
237 if (gb->size > gb_dev.size_max)
238 gb->size = gb_dev.size_max;
239 gb->requests_timedout = 0;
240 gb->requests_completed = 0;
241 gb->iteration_count = 0;
242 gb->send_count = 0;
243 gb->error = 0;
244
245 if (kfifo_depth < gb->iteration_max) {
246 dev_warn(gb->dev,
247 "cannot log bytes %u kfifo_depth %u\n",
248 gb->iteration_max, kfifo_depth);
249 }
250 kfifo_reset_out(&gb->kfifo_lat);
251
252 switch (gb->type) {
253 case GB_LOOPBACK_TYPE_PING:
254 case GB_LOOPBACK_TYPE_TRANSFER:
255 case GB_LOOPBACK_TYPE_SINK:
256 gb->jiffy_timeout = usecs_to_jiffies(gb->timeout);
257 if (!gb->jiffy_timeout)
258 gb->jiffy_timeout = GB_LOOPBACK_TIMEOUT_MIN;
259 else if (gb->jiffy_timeout > GB_LOOPBACK_TIMEOUT_MAX)
260 gb->jiffy_timeout = GB_LOOPBACK_TIMEOUT_MAX;
261 gb_loopback_reset_stats(gb);
262 wake_up(&gb->wq);
263 break;
264 default:
265 gb->type = 0;
266 break;
267 }
268}
269
270/* Time to send and receive one message */
271gb_loopback_stats_attrs(latency);
272/* Number of requests sent per second on this cport */
273gb_loopback_stats_attrs(requests_per_second);
274/* Quantity of data sent and received on this cport */
275gb_loopback_stats_attrs(throughput);
276/* Latency across the UniPro link from APBridge's perspective */
277gb_loopback_stats_attrs(apbridge_unipro_latency);
278/* Firmware induced overhead in the GPBridge */
279gb_loopback_stats_attrs(gbphy_firmware_latency);
280
281/* Number of errors encountered during loop */
282gb_loopback_ro_attr(error);
283/* Number of requests successfully completed async */
284gb_loopback_ro_attr(requests_completed);
285/* Number of requests timed out async */
286gb_loopback_ro_attr(requests_timedout);
287/* Timeout minimum in useconds */
288gb_loopback_ro_attr(timeout_min);
289/* Timeout minimum in useconds */
290gb_loopback_ro_attr(timeout_max);
291
292/*
293 * Type of loopback message to send based on protocol type definitions
294 * 0 => Don't send message
295 * 2 => Send ping message continuously (message without payload)
296 * 3 => Send transfer message continuously (message with payload,
297 * payload returned in response)
298 * 4 => Send a sink message (message with payload, no payload in response)
299 */
300gb_dev_loopback_rw_attr(type, d);
301/* Size of transfer message payload: 0-4096 bytes */
302gb_dev_loopback_rw_attr(size, u);
303/* Time to wait between two messages: 0-1000 ms */
304gb_dev_loopback_rw_attr(us_wait, d);
305/* Maximum iterations for a given operation: 1-(2^32-1), 0 implies infinite */
306gb_dev_loopback_rw_attr(iteration_max, u);
307/* The current index of the for (i = 0; i < iteration_max; i++) loop */
308gb_dev_loopback_ro_attr(iteration_count, false);
309/* A flag to indicate synchronous or asynchronous operations */
310gb_dev_loopback_rw_attr(async, u);
311/* Timeout of an individual asynchronous request */
312gb_dev_loopback_rw_attr(timeout, u);
313/* Maximum number of in-flight operations before back-off */
314gb_dev_loopback_rw_attr(outstanding_operations_max, u);
315
316static struct attribute *loopback_attrs[] = {
317 &dev_attr_latency_min.attr,
318 &dev_attr_latency_max.attr,
319 &dev_attr_latency_avg.attr,
320 &dev_attr_requests_per_second_min.attr,
321 &dev_attr_requests_per_second_max.attr,
322 &dev_attr_requests_per_second_avg.attr,
323 &dev_attr_throughput_min.attr,
324 &dev_attr_throughput_max.attr,
325 &dev_attr_throughput_avg.attr,
326 &dev_attr_apbridge_unipro_latency_min.attr,
327 &dev_attr_apbridge_unipro_latency_max.attr,
328 &dev_attr_apbridge_unipro_latency_avg.attr,
329 &dev_attr_gbphy_firmware_latency_min.attr,
330 &dev_attr_gbphy_firmware_latency_max.attr,
331 &dev_attr_gbphy_firmware_latency_avg.attr,
332 &dev_attr_type.attr,
333 &dev_attr_size.attr,
334 &dev_attr_us_wait.attr,
335 &dev_attr_iteration_count.attr,
336 &dev_attr_iteration_max.attr,
337 &dev_attr_async.attr,
338 &dev_attr_error.attr,
339 &dev_attr_requests_completed.attr,
340 &dev_attr_requests_timedout.attr,
341 &dev_attr_timeout.attr,
342 &dev_attr_outstanding_operations_max.attr,
343 &dev_attr_timeout_min.attr,
344 &dev_attr_timeout_max.attr,
345 NULL,
346};
347ATTRIBUTE_GROUPS(loopback);
348
349static void gb_loopback_calculate_stats(struct gb_loopback *gb, bool error);
350
351static u32 gb_loopback_nsec_to_usec_latency(u64 elapsed_nsecs)
352{
353 do_div(elapsed_nsecs, NSEC_PER_USEC);
354 return elapsed_nsecs;
355}
356
357static u64 __gb_loopback_calc_latency(u64 t1, u64 t2)
358{
359 if (t2 > t1)
360 return t2 - t1;
361 else
362 return NSEC_PER_DAY - t2 + t1;
363}
364
365static u64 gb_loopback_calc_latency(ktime_t ts, ktime_t te)
366{
367 return __gb_loopback_calc_latency(ktime_to_ns(ts), ktime_to_ns(te));
368}
369
370static int gb_loopback_operation_sync(struct gb_loopback *gb, int type,
371 void *request, int request_size,
372 void *response, int response_size)
373{
374 struct gb_operation *operation;
375 ktime_t ts, te;
376 int ret;
377
378 ts = ktime_get();
379 operation = gb_operation_create(gb->connection, type, request_size,
380 response_size, GFP_KERNEL);
381 if (!operation)
382 return -ENOMEM;
383
384 if (request_size)
385 memcpy(operation->request->payload, request, request_size);
386
387 ret = gb_operation_request_send_sync(operation);
388 if (ret) {
389 dev_err(&gb->connection->bundle->dev,
390 "synchronous operation failed: %d\n", ret);
391 goto out_put_operation;
392 } else {
393 if (response_size == operation->response->payload_size) {
394 memcpy(response, operation->response->payload,
395 response_size);
396 } else {
397 dev_err(&gb->connection->bundle->dev,
398 "response size %zu expected %d\n",
399 operation->response->payload_size,
400 response_size);
401 ret = -EINVAL;
402 goto out_put_operation;
403 }
404 }
405
406 te = ktime_get();
407
408 /* Calculate the total time the message took */
409 gb->elapsed_nsecs = gb_loopback_calc_latency(ts, te);
410
411out_put_operation:
412 gb_operation_put(operation);
413
414 return ret;
415}
416
417static void gb_loopback_async_wait_all(struct gb_loopback *gb)
418{
419 wait_event(gb->wq_completion,
420 !atomic_read(&gb->outstanding_operations));
421}
422
423static void gb_loopback_async_operation_callback(struct gb_operation *operation)
424{
425 struct gb_loopback_async_operation *op_async;
426 struct gb_loopback *gb;
427 ktime_t te;
428 int result;
429
430 te = ktime_get();
431 result = gb_operation_result(operation);
432 op_async = gb_operation_get_data(operation);
433 gb = op_async->gb;
434
435 mutex_lock(&gb->mutex);
436
437 if (!result && op_async->completion)
438 result = op_async->completion(op_async);
439
440 if (!result) {
441 gb->elapsed_nsecs = gb_loopback_calc_latency(op_async->ts, te);
442 } else {
443 gb->error++;
444 if (result == -ETIMEDOUT)
445 gb->requests_timedout++;
446 }
447
448 gb->iteration_count++;
449 gb_loopback_calculate_stats(gb, result);
450
451 mutex_unlock(&gb->mutex);
452
453 dev_dbg(&gb->connection->bundle->dev, "complete operation %d\n",
454 operation->id);
455
456 /* Wake up waiters */
457 atomic_dec(&op_async->gb->outstanding_operations);
458 wake_up(&gb->wq_completion);
459
460 /* Release resources */
461 gb_operation_put(operation);
462 kfree(op_async);
463}
464
465static int gb_loopback_async_operation(struct gb_loopback *gb, int type,
466 void *request, int request_size,
467 int response_size,
468 void *completion)
469{
470 struct gb_loopback_async_operation *op_async;
471 struct gb_operation *operation;
472 int ret;
473
474 op_async = kzalloc(sizeof(*op_async), GFP_KERNEL);
475 if (!op_async)
476 return -ENOMEM;
477
478 operation = gb_operation_create(gb->connection, type, request_size,
479 response_size, GFP_KERNEL);
480 if (!operation) {
481 kfree(op_async);
482 return -ENOMEM;
483 }
484
485 if (request_size)
486 memcpy(operation->request->payload, request, request_size);
487
488 gb_operation_set_data(operation, op_async);
489
490 op_async->gb = gb;
491 op_async->operation = operation;
492 op_async->completion = completion;
493
494 op_async->ts = ktime_get();
495
496 atomic_inc(&gb->outstanding_operations);
497 ret = gb_operation_request_send(operation,
498 gb_loopback_async_operation_callback,
499 jiffies_to_msecs(gb->jiffy_timeout),
500 GFP_KERNEL);
501 if (ret) {
502 atomic_dec(&gb->outstanding_operations);
503 gb_operation_put(operation);
504 kfree(op_async);
505 }
506 return ret;
507}
508
509static int gb_loopback_sync_sink(struct gb_loopback *gb, u32 len)
510{
511 struct gb_loopback_transfer_request *request;
512 int retval;
513
514 request = kmalloc(len + sizeof(*request), GFP_KERNEL);
515 if (!request)
516 return -ENOMEM;
517
518 request->len = cpu_to_le32(len);
519 retval = gb_loopback_operation_sync(gb, GB_LOOPBACK_TYPE_SINK,
520 request, len + sizeof(*request),
521 NULL, 0);
522 kfree(request);
523 return retval;
524}
525
526static int gb_loopback_sync_transfer(struct gb_loopback *gb, u32 len)
527{
528 struct gb_loopback_transfer_request *request;
529 struct gb_loopback_transfer_response *response;
530 int retval;
531
532 gb->apbridge_latency_ts = 0;
533 gb->gbphy_latency_ts = 0;
534
535 request = kmalloc(len + sizeof(*request), GFP_KERNEL);
536 if (!request)
537 return -ENOMEM;
538 response = kmalloc(len + sizeof(*response), GFP_KERNEL);
539 if (!response) {
540 kfree(request);
541 return -ENOMEM;
542 }
543
544 memset(request->data, 0x5A, len);
545
546 request->len = cpu_to_le32(len);
547 retval = gb_loopback_operation_sync(gb, GB_LOOPBACK_TYPE_TRANSFER,
548 request, len + sizeof(*request),
549 response, len + sizeof(*response));
550 if (retval)
551 goto gb_error;
552
553 if (memcmp(request->data, response->data, len)) {
554 dev_err(&gb->connection->bundle->dev,
555 "Loopback Data doesn't match\n");
556 retval = -EREMOTEIO;
557 }
558 gb->apbridge_latency_ts = (u32)__le32_to_cpu(response->reserved0);
559 gb->gbphy_latency_ts = (u32)__le32_to_cpu(response->reserved1);
560
561gb_error:
562 kfree(request);
563 kfree(response);
564
565 return retval;
566}
567
568static int gb_loopback_sync_ping(struct gb_loopback *gb)
569{
570 return gb_loopback_operation_sync(gb, GB_LOOPBACK_TYPE_PING,
571 NULL, 0, NULL, 0);
572}
573
574static int gb_loopback_async_sink(struct gb_loopback *gb, u32 len)
575{
576 struct gb_loopback_transfer_request *request;
577 int retval;
578
579 request = kmalloc(len + sizeof(*request), GFP_KERNEL);
580 if (!request)
581 return -ENOMEM;
582
583 request->len = cpu_to_le32(len);
584 retval = gb_loopback_async_operation(gb, GB_LOOPBACK_TYPE_SINK,
585 request, len + sizeof(*request),
586 0, NULL);
587 kfree(request);
588 return retval;
589}
590
591static int gb_loopback_async_transfer_complete(
592 struct gb_loopback_async_operation *op_async)
593{
594 struct gb_loopback *gb;
595 struct gb_operation *operation;
596 struct gb_loopback_transfer_request *request;
597 struct gb_loopback_transfer_response *response;
598 size_t len;
599 int retval = 0;
600
601 gb = op_async->gb;
602 operation = op_async->operation;
603 request = operation->request->payload;
604 response = operation->response->payload;
605 len = le32_to_cpu(request->len);
606
607 if (memcmp(request->data, response->data, len)) {
608 dev_err(&gb->connection->bundle->dev,
609 "Loopback Data doesn't match operation id %d\n",
610 operation->id);
611 retval = -EREMOTEIO;
612 } else {
613 gb->apbridge_latency_ts =
614 (u32)__le32_to_cpu(response->reserved0);
615 gb->gbphy_latency_ts =
616 (u32)__le32_to_cpu(response->reserved1);
617 }
618
619 return retval;
620}
621
622static int gb_loopback_async_transfer(struct gb_loopback *gb, u32 len)
623{
624 struct gb_loopback_transfer_request *request;
625 int retval, response_len;
626
627 request = kmalloc(len + sizeof(*request), GFP_KERNEL);
628 if (!request)
629 return -ENOMEM;
630
631 memset(request->data, 0x5A, len);
632
633 request->len = cpu_to_le32(len);
634 response_len = sizeof(struct gb_loopback_transfer_response);
635 retval = gb_loopback_async_operation(gb, GB_LOOPBACK_TYPE_TRANSFER,
636 request, len + sizeof(*request),
637 len + response_len,
638 gb_loopback_async_transfer_complete);
639 if (retval)
640 goto gb_error;
641
642gb_error:
643 kfree(request);
644 return retval;
645}
646
647static int gb_loopback_async_ping(struct gb_loopback *gb)
648{
649 return gb_loopback_async_operation(gb, GB_LOOPBACK_TYPE_PING,
650 NULL, 0, 0, NULL);
651}
652
653static int gb_loopback_request_handler(struct gb_operation *operation)
654{
655 struct gb_connection *connection = operation->connection;
656 struct gb_loopback_transfer_request *request;
657 struct gb_loopback_transfer_response *response;
658 struct device *dev = &connection->bundle->dev;
659 size_t len;
660
661 /* By convention, the AP initiates the version operation */
662 switch (operation->type) {
663 case GB_LOOPBACK_TYPE_PING:
664 case GB_LOOPBACK_TYPE_SINK:
665 return 0;
666 case GB_LOOPBACK_TYPE_TRANSFER:
667 if (operation->request->payload_size < sizeof(*request)) {
668 dev_err(dev, "transfer request too small (%zu < %zu)\n",
669 operation->request->payload_size,
670 sizeof(*request));
671 return -EINVAL; /* -EMSGSIZE */
672 }
673 request = operation->request->payload;
674 len = le32_to_cpu(request->len);
675 if (len > gb_dev.size_max) {
676 dev_err(dev, "transfer request too large (%zu > %zu)\n",
677 len, gb_dev.size_max);
678 return -EINVAL;
679 }
680
681 if (!gb_operation_response_alloc(operation,
682 len + sizeof(*response), GFP_KERNEL)) {
683 dev_err(dev, "error allocating response\n");
684 return -ENOMEM;
685 }
686 response = operation->response->payload;
687 response->len = cpu_to_le32(len);
688 if (len)
689 memcpy(response->data, request->data, len);
690
691 return 0;
692 default:
693 dev_err(dev, "unsupported request: %u\n", operation->type);
694 return -EINVAL;
695 }
696}
697
698static void gb_loopback_reset_stats(struct gb_loopback *gb)
699{
700 struct gb_loopback_stats reset = {
701 .min = U32_MAX,
702 };
703
704 /* Reset per-connection stats */
705 memcpy(&gb->latency, &reset,
706 sizeof(struct gb_loopback_stats));
707 memcpy(&gb->throughput, &reset,
708 sizeof(struct gb_loopback_stats));
709 memcpy(&gb->requests_per_second, &reset,
710 sizeof(struct gb_loopback_stats));
711 memcpy(&gb->apbridge_unipro_latency, &reset,
712 sizeof(struct gb_loopback_stats));
713 memcpy(&gb->gbphy_firmware_latency, &reset,
714 sizeof(struct gb_loopback_stats));
715
716 /* Should be initialized at least once per transaction set */
717 gb->apbridge_latency_ts = 0;
718 gb->gbphy_latency_ts = 0;
719 gb->ts = ktime_set(0, 0);
720}
721
722static void gb_loopback_update_stats(struct gb_loopback_stats *stats, u32 val)
723{
724 if (stats->min > val)
725 stats->min = val;
726 if (stats->max < val)
727 stats->max = val;
728 stats->sum += val;
729 stats->count++;
730}
731
732static void gb_loopback_update_stats_window(struct gb_loopback_stats *stats,
733 u64 val, u32 count)
734{
735 stats->sum += val;
736 stats->count += count;
737
738 do_div(val, count);
739 if (stats->min > val)
740 stats->min = val;
741 if (stats->max < val)
742 stats->max = val;
743}
744
745static void gb_loopback_requests_update(struct gb_loopback *gb, u32 latency)
746{
747 u64 req = gb->requests_completed * USEC_PER_SEC;
748
749 gb_loopback_update_stats_window(&gb->requests_per_second, req, latency);
750}
751
752static void gb_loopback_throughput_update(struct gb_loopback *gb, u32 latency)
753{
754 u64 aggregate_size = sizeof(struct gb_operation_msg_hdr) * 2;
755
756 switch (gb->type) {
757 case GB_LOOPBACK_TYPE_PING:
758 break;
759 case GB_LOOPBACK_TYPE_SINK:
760 aggregate_size += sizeof(struct gb_loopback_transfer_request) +
761 gb->size;
762 break;
763 case GB_LOOPBACK_TYPE_TRANSFER:
764 aggregate_size += sizeof(struct gb_loopback_transfer_request) +
765 sizeof(struct gb_loopback_transfer_response) +
766 gb->size * 2;
767 break;
768 default:
769 return;
770 }
771
772 aggregate_size *= gb->requests_completed;
773 aggregate_size *= USEC_PER_SEC;
774 gb_loopback_update_stats_window(&gb->throughput, aggregate_size,
775 latency);
776}
777
778static void gb_loopback_calculate_latency_stats(struct gb_loopback *gb)
779{
780 u32 lat;
781
782 /* Express latency in terms of microseconds */
783 lat = gb_loopback_nsec_to_usec_latency(gb->elapsed_nsecs);
784
785 /* Log latency stastic */
786 gb_loopback_update_stats(&gb->latency, lat);
787
788 /* Raw latency log on a per thread basis */
789 kfifo_in(&gb->kfifo_lat, (unsigned char *)&lat, sizeof(lat));
790
791 /* Log the firmware supplied latency values */
792 gb_loopback_update_stats(&gb->apbridge_unipro_latency,
793 gb->apbridge_latency_ts);
794 gb_loopback_update_stats(&gb->gbphy_firmware_latency,
795 gb->gbphy_latency_ts);
796}
797
798static void gb_loopback_calculate_stats(struct gb_loopback *gb, bool error)
799{
800 u64 nlat;
801 u32 lat;
802 ktime_t te;
803
804 if (!error) {
805 gb->requests_completed++;
806 gb_loopback_calculate_latency_stats(gb);
807 }
808
809 te = ktime_get();
810 nlat = gb_loopback_calc_latency(gb->ts, te);
811 if (nlat >= NSEC_PER_SEC || gb->iteration_count == gb->iteration_max) {
812 lat = gb_loopback_nsec_to_usec_latency(nlat);
813
814 gb_loopback_throughput_update(gb, lat);
815 gb_loopback_requests_update(gb, lat);
816
817 if (gb->iteration_count != gb->iteration_max) {
818 gb->ts = te;
819 gb->requests_completed = 0;
820 }
821 }
822}
823
824static void gb_loopback_async_wait_to_send(struct gb_loopback *gb)
825{
826 if (!(gb->async && gb->outstanding_operations_max))
827 return;
828 wait_event_interruptible(gb->wq_completion,
829 (atomic_read(&gb->outstanding_operations) <
830 gb->outstanding_operations_max) ||
831 kthread_should_stop());
832}
833
834static int gb_loopback_fn(void *data)
835{
836 int error = 0;
837 int us_wait = 0;
838 int type;
839 int ret;
840 u32 size;
841
842 struct gb_loopback *gb = data;
843 struct gb_bundle *bundle = gb->connection->bundle;
844
845 ret = gb_pm_runtime_get_sync(bundle);
846 if (ret)
847 return ret;
848
849 while (1) {
850 if (!gb->type) {
851 gb_pm_runtime_put_autosuspend(bundle);
852 wait_event_interruptible(gb->wq, gb->type ||
853 kthread_should_stop());
854 ret = gb_pm_runtime_get_sync(bundle);
855 if (ret)
856 return ret;
857 }
858
859 if (kthread_should_stop())
860 break;
861
862 /* Limit the maximum number of in-flight async operations */
863 gb_loopback_async_wait_to_send(gb);
864 if (kthread_should_stop())
865 break;
866
867 mutex_lock(&gb->mutex);
868
869 /* Optionally terminate */
870 if (gb->send_count == gb->iteration_max) {
871 mutex_unlock(&gb->mutex);
872
873 /* Wait for synchronous and asynchronous completion */
874 gb_loopback_async_wait_all(gb);
875
876 /* Mark complete unless user-space has poked us */
877 mutex_lock(&gb->mutex);
878 if (gb->iteration_count == gb->iteration_max) {
879 gb->type = 0;
880 gb->send_count = 0;
881 sysfs_notify(&gb->dev->kobj, NULL,
882 "iteration_count");
883 dev_dbg(&bundle->dev, "load test complete\n");
884 } else {
885 dev_dbg(&bundle->dev,
886 "continuing on with new test set\n");
887 }
888 mutex_unlock(&gb->mutex);
889 continue;
890 }
891 size = gb->size;
892 us_wait = gb->us_wait;
893 type = gb->type;
894 if (ktime_to_ns(gb->ts) == 0)
895 gb->ts = ktime_get();
896
897 /* Else operations to perform */
898 if (gb->async) {
899 if (type == GB_LOOPBACK_TYPE_PING)
900 error = gb_loopback_async_ping(gb);
901 else if (type == GB_LOOPBACK_TYPE_TRANSFER)
902 error = gb_loopback_async_transfer(gb, size);
903 else if (type == GB_LOOPBACK_TYPE_SINK)
904 error = gb_loopback_async_sink(gb, size);
905
906 if (error) {
907 gb->error++;
908 gb->iteration_count++;
909 }
910 } else {
911 /* We are effectively single threaded here */
912 if (type == GB_LOOPBACK_TYPE_PING)
913 error = gb_loopback_sync_ping(gb);
914 else if (type == GB_LOOPBACK_TYPE_TRANSFER)
915 error = gb_loopback_sync_transfer(gb, size);
916 else if (type == GB_LOOPBACK_TYPE_SINK)
917 error = gb_loopback_sync_sink(gb, size);
918
919 if (error)
920 gb->error++;
921 gb->iteration_count++;
922 gb_loopback_calculate_stats(gb, !!error);
923 }
924 gb->send_count++;
925 mutex_unlock(&gb->mutex);
926
927 if (us_wait) {
928 if (us_wait < 20000)
929 usleep_range(us_wait, us_wait + 100);
930 else
931 msleep(us_wait / 1000);
932 }
933 }
934
935 gb_pm_runtime_put_autosuspend(bundle);
936
937 return 0;
938}
939
940static int gb_loopback_dbgfs_latency_show_common(struct seq_file *s,
941 struct kfifo *kfifo,
942 struct mutex *mutex)
943{
944 u32 latency;
945 int retval;
946
947 if (kfifo_len(kfifo) == 0) {
948 retval = -EAGAIN;
949 goto done;
950 }
951
952 mutex_lock(mutex);
953 retval = kfifo_out(kfifo, &latency, sizeof(latency));
954 if (retval > 0) {
955 seq_printf(s, "%u", latency);
956 retval = 0;
957 }
958 mutex_unlock(mutex);
959done:
960 return retval;
961}
962
963static int gb_loopback_dbgfs_latency_show(struct seq_file *s, void *unused)
964{
965 struct gb_loopback *gb = s->private;
966
967 return gb_loopback_dbgfs_latency_show_common(s, &gb->kfifo_lat,
968 &gb->mutex);
969}
970DEFINE_SHOW_ATTRIBUTE(gb_loopback_dbgfs_latency);
971
972#define DEBUGFS_NAMELEN 32
973
974static int gb_loopback_probe(struct gb_bundle *bundle,
975 const struct greybus_bundle_id *id)
976{
977 struct greybus_descriptor_cport *cport_desc;
978 struct gb_connection *connection;
979 struct gb_loopback *gb;
980 struct device *dev;
981 int retval;
982 char name[DEBUGFS_NAMELEN];
983 unsigned long flags;
984
985 if (bundle->num_cports != 1)
986 return -ENODEV;
987
988 cport_desc = &bundle->cport_desc[0];
989 if (cport_desc->protocol_id != GREYBUS_PROTOCOL_LOOPBACK)
990 return -ENODEV;
991
992 gb = kzalloc(sizeof(*gb), GFP_KERNEL);
993 if (!gb)
994 return -ENOMEM;
995
996 connection = gb_connection_create(bundle, le16_to_cpu(cport_desc->id),
997 gb_loopback_request_handler);
998 if (IS_ERR(connection)) {
999 retval = PTR_ERR(connection);
1000 goto out_kzalloc;
1001 }
1002
1003 gb->connection = connection;
1004 greybus_set_drvdata(bundle, gb);
1005
1006 init_waitqueue_head(&gb->wq);
1007 init_waitqueue_head(&gb->wq_completion);
1008 atomic_set(&gb->outstanding_operations, 0);
1009 gb_loopback_reset_stats(gb);
1010
1011 /* Reported values to user-space for min/max timeouts */
1012 gb->timeout_min = jiffies_to_usecs(GB_LOOPBACK_TIMEOUT_MIN);
1013 gb->timeout_max = jiffies_to_usecs(GB_LOOPBACK_TIMEOUT_MAX);
1014
1015 if (!gb_dev.count) {
1016 /* Calculate maximum payload */
1017 gb_dev.size_max = gb_operation_get_payload_size_max(connection);
1018 if (gb_dev.size_max <=
1019 sizeof(struct gb_loopback_transfer_request)) {
1020 retval = -EINVAL;
1021 goto out_connection_destroy;
1022 }
1023 gb_dev.size_max -= sizeof(struct gb_loopback_transfer_request);
1024 }
1025
1026 /* Create per-connection sysfs and debugfs data-points */
1027 snprintf(name, sizeof(name), "raw_latency_%s",
1028 dev_name(&connection->bundle->dev));
1029 gb->file = debugfs_create_file(name, S_IFREG | 0444, gb_dev.root, gb,
1030 &gb_loopback_dbgfs_latency_fops);
1031
1032 gb->id = ida_alloc(&loopback_ida, GFP_KERNEL);
1033 if (gb->id < 0) {
1034 retval = gb->id;
1035 goto out_debugfs_remove;
1036 }
1037
1038 retval = gb_connection_enable(connection);
1039 if (retval)
1040 goto out_ida_remove;
1041
1042 dev = device_create_with_groups(&loopback_class,
1043 &connection->bundle->dev,
1044 MKDEV(0, 0), gb, loopback_groups,
1045 "gb_loopback%d", gb->id);
1046 if (IS_ERR(dev)) {
1047 retval = PTR_ERR(dev);
1048 goto out_connection_disable;
1049 }
1050 gb->dev = dev;
1051
1052 /* Allocate kfifo */
1053 if (kfifo_alloc(&gb->kfifo_lat, kfifo_depth * sizeof(u32),
1054 GFP_KERNEL)) {
1055 retval = -ENOMEM;
1056 goto out_conn;
1057 }
1058 /* Fork worker thread */
1059 mutex_init(&gb->mutex);
1060 gb->task = kthread_run(gb_loopback_fn, gb, "gb_loopback");
1061 if (IS_ERR(gb->task)) {
1062 retval = PTR_ERR(gb->task);
1063 goto out_kfifo;
1064 }
1065
1066 spin_lock_irqsave(&gb_dev.lock, flags);
1067 gb_dev.count++;
1068 spin_unlock_irqrestore(&gb_dev.lock, flags);
1069
1070 gb_connection_latency_tag_enable(connection);
1071
1072 gb_pm_runtime_put_autosuspend(bundle);
1073
1074 return 0;
1075
1076out_kfifo:
1077 kfifo_free(&gb->kfifo_lat);
1078out_conn:
1079 device_unregister(dev);
1080out_connection_disable:
1081 gb_connection_disable(connection);
1082out_ida_remove:
1083 ida_free(&loopback_ida, gb->id);
1084out_debugfs_remove:
1085 debugfs_remove(gb->file);
1086out_connection_destroy:
1087 gb_connection_destroy(connection);
1088out_kzalloc:
1089 kfree(gb);
1090
1091 return retval;
1092}
1093
1094static void gb_loopback_disconnect(struct gb_bundle *bundle)
1095{
1096 struct gb_loopback *gb = greybus_get_drvdata(bundle);
1097 unsigned long flags;
1098 int ret;
1099
1100 ret = gb_pm_runtime_get_sync(bundle);
1101 if (ret)
1102 gb_pm_runtime_get_noresume(bundle);
1103
1104 gb_connection_disable(gb->connection);
1105
1106 if (!IS_ERR_OR_NULL(gb->task))
1107 kthread_stop(gb->task);
1108
1109 kfifo_free(&gb->kfifo_lat);
1110 gb_connection_latency_tag_disable(gb->connection);
1111 debugfs_remove(gb->file);
1112
1113 /*
1114 * FIXME: gb_loopback_async_wait_all() is redundant now, as connection
1115 * is disabled at the beginning and so we can't have any more
1116 * incoming/outgoing requests.
1117 */
1118 gb_loopback_async_wait_all(gb);
1119
1120 spin_lock_irqsave(&gb_dev.lock, flags);
1121 gb_dev.count--;
1122 spin_unlock_irqrestore(&gb_dev.lock, flags);
1123
1124 device_unregister(gb->dev);
1125 ida_free(&loopback_ida, gb->id);
1126
1127 gb_connection_destroy(gb->connection);
1128 kfree(gb);
1129}
1130
1131static const struct greybus_bundle_id gb_loopback_id_table[] = {
1132 { GREYBUS_DEVICE_CLASS(GREYBUS_CLASS_LOOPBACK) },
1133 { }
1134};
1135MODULE_DEVICE_TABLE(greybus, gb_loopback_id_table);
1136
1137static struct greybus_driver gb_loopback_driver = {
1138 .name = "loopback",
1139 .probe = gb_loopback_probe,
1140 .disconnect = gb_loopback_disconnect,
1141 .id_table = gb_loopback_id_table,
1142};
1143
1144static int loopback_init(void)
1145{
1146 int retval;
1147
1148 spin_lock_init(&gb_dev.lock);
1149 gb_dev.root = debugfs_create_dir("gb_loopback", NULL);
1150
1151 retval = class_register(&loopback_class);
1152 if (retval)
1153 goto err;
1154
1155 retval = greybus_register(&gb_loopback_driver);
1156 if (retval)
1157 goto err_unregister;
1158
1159 return 0;
1160
1161err_unregister:
1162 class_unregister(&loopback_class);
1163err:
1164 debugfs_remove_recursive(gb_dev.root);
1165 return retval;
1166}
1167module_init(loopback_init);
1168
1169static void __exit loopback_exit(void)
1170{
1171 debugfs_remove_recursive(gb_dev.root);
1172 greybus_deregister(&gb_loopback_driver);
1173 class_unregister(&loopback_class);
1174 ida_destroy(&loopback_ida);
1175}
1176module_exit(loopback_exit);
1177
1178MODULE_DESCRIPTION("Loopback bridge driver for the Greybus loopback module");
1179MODULE_LICENSE("GPL v2");
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Loopback bridge driver for the Greybus loopback module.
4 *
5 * Copyright 2014 Google Inc.
6 * Copyright 2014 Linaro Ltd.
7 */
8
9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/mutex.h>
14#include <linux/slab.h>
15#include <linux/kthread.h>
16#include <linux/delay.h>
17#include <linux/random.h>
18#include <linux/sizes.h>
19#include <linux/cdev.h>
20#include <linux/fs.h>
21#include <linux/kfifo.h>
22#include <linux/debugfs.h>
23#include <linux/list_sort.h>
24#include <linux/spinlock.h>
25#include <linux/workqueue.h>
26#include <linux/atomic.h>
27#include <linux/pm_runtime.h>
28#include <linux/greybus.h>
29#include <asm/div64.h>
30
31#define NSEC_PER_DAY 86400000000000ULL
32
33struct gb_loopback_stats {
34 u32 min;
35 u32 max;
36 u64 sum;
37 u32 count;
38};
39
40struct gb_loopback_device {
41 struct dentry *root;
42 u32 count;
43 size_t size_max;
44
45 /* We need to take a lock in atomic context */
46 spinlock_t lock;
47 wait_queue_head_t wq;
48};
49
50static struct gb_loopback_device gb_dev;
51
52struct gb_loopback_async_operation {
53 struct gb_loopback *gb;
54 struct gb_operation *operation;
55 ktime_t ts;
56 int (*completion)(struct gb_loopback_async_operation *op_async);
57};
58
59struct gb_loopback {
60 struct gb_connection *connection;
61
62 struct dentry *file;
63 struct kfifo kfifo_lat;
64 struct mutex mutex;
65 struct task_struct *task;
66 struct device *dev;
67 wait_queue_head_t wq;
68 wait_queue_head_t wq_completion;
69 atomic_t outstanding_operations;
70
71 /* Per connection stats */
72 ktime_t ts;
73 struct gb_loopback_stats latency;
74 struct gb_loopback_stats throughput;
75 struct gb_loopback_stats requests_per_second;
76 struct gb_loopback_stats apbridge_unipro_latency;
77 struct gb_loopback_stats gbphy_firmware_latency;
78
79 int type;
80 int async;
81 int id;
82 u32 size;
83 u32 iteration_max;
84 u32 iteration_count;
85 int us_wait;
86 u32 error;
87 u32 requests_completed;
88 u32 requests_timedout;
89 u32 timeout;
90 u32 jiffy_timeout;
91 u32 timeout_min;
92 u32 timeout_max;
93 u32 outstanding_operations_max;
94 u64 elapsed_nsecs;
95 u32 apbridge_latency_ts;
96 u32 gbphy_latency_ts;
97
98 u32 send_count;
99};
100
101static struct class loopback_class = {
102 .name = "gb_loopback",
103};
104static DEFINE_IDA(loopback_ida);
105
106/* Min/max values in jiffies */
107#define GB_LOOPBACK_TIMEOUT_MIN 1
108#define GB_LOOPBACK_TIMEOUT_MAX 10000
109
110#define GB_LOOPBACK_FIFO_DEFAULT 8192
111
112static unsigned int kfifo_depth = GB_LOOPBACK_FIFO_DEFAULT;
113module_param(kfifo_depth, uint, 0444);
114
115/* Maximum size of any one send data buffer we support */
116#define MAX_PACKET_SIZE (PAGE_SIZE * 2)
117
118#define GB_LOOPBACK_US_WAIT_MAX 1000000
119
120/* interface sysfs attributes */
121#define gb_loopback_ro_attr(field) \
122static ssize_t field##_show(struct device *dev, \
123 struct device_attribute *attr, \
124 char *buf) \
125{ \
126 struct gb_loopback *gb = dev_get_drvdata(dev); \
127 return sprintf(buf, "%u\n", gb->field); \
128} \
129static DEVICE_ATTR_RO(field)
130
131#define gb_loopback_ro_stats_attr(name, field, type) \
132static ssize_t name##_##field##_show(struct device *dev, \
133 struct device_attribute *attr, \
134 char *buf) \
135{ \
136 struct gb_loopback *gb = dev_get_drvdata(dev); \
137 /* Report 0 for min and max if no transfer succeeded */ \
138 if (!gb->requests_completed) \
139 return sprintf(buf, "0\n"); \
140 return sprintf(buf, "%" #type "\n", gb->name.field); \
141} \
142static DEVICE_ATTR_RO(name##_##field)
143
144#define gb_loopback_ro_avg_attr(name) \
145static ssize_t name##_avg_show(struct device *dev, \
146 struct device_attribute *attr, \
147 char *buf) \
148{ \
149 struct gb_loopback_stats *stats; \
150 struct gb_loopback *gb; \
151 u64 avg, rem; \
152 u32 count; \
153 gb = dev_get_drvdata(dev); \
154 stats = &gb->name; \
155 count = stats->count ? stats->count : 1; \
156 avg = stats->sum + count / 2000000; /* round closest */ \
157 rem = do_div(avg, count); \
158 rem *= 1000000; \
159 do_div(rem, count); \
160 return sprintf(buf, "%llu.%06u\n", avg, (u32)rem); \
161} \
162static DEVICE_ATTR_RO(name##_avg)
163
164#define gb_loopback_stats_attrs(field) \
165 gb_loopback_ro_stats_attr(field, min, u); \
166 gb_loopback_ro_stats_attr(field, max, u); \
167 gb_loopback_ro_avg_attr(field)
168
169#define gb_loopback_attr(field, type) \
170static ssize_t field##_show(struct device *dev, \
171 struct device_attribute *attr, \
172 char *buf) \
173{ \
174 struct gb_loopback *gb = dev_get_drvdata(dev); \
175 return sprintf(buf, "%" #type "\n", gb->field); \
176} \
177static ssize_t field##_store(struct device *dev, \
178 struct device_attribute *attr, \
179 const char *buf, \
180 size_t len) \
181{ \
182 int ret; \
183 struct gb_loopback *gb = dev_get_drvdata(dev); \
184 mutex_lock(&gb->mutex); \
185 ret = sscanf(buf, "%"#type, &gb->field); \
186 if (ret != 1) \
187 len = -EINVAL; \
188 else \
189 gb_loopback_check_attr(gb, bundle); \
190 mutex_unlock(&gb->mutex); \
191 return len; \
192} \
193static DEVICE_ATTR_RW(field)
194
195#define gb_dev_loopback_ro_attr(field, conn) \
196static ssize_t field##_show(struct device *dev, \
197 struct device_attribute *attr, \
198 char *buf) \
199{ \
200 struct gb_loopback *gb = dev_get_drvdata(dev); \
201 return sprintf(buf, "%u\n", gb->field); \
202} \
203static DEVICE_ATTR_RO(field)
204
205#define gb_dev_loopback_rw_attr(field, type) \
206static ssize_t field##_show(struct device *dev, \
207 struct device_attribute *attr, \
208 char *buf) \
209{ \
210 struct gb_loopback *gb = dev_get_drvdata(dev); \
211 return sprintf(buf, "%" #type "\n", gb->field); \
212} \
213static ssize_t field##_store(struct device *dev, \
214 struct device_attribute *attr, \
215 const char *buf, \
216 size_t len) \
217{ \
218 int ret; \
219 struct gb_loopback *gb = dev_get_drvdata(dev); \
220 mutex_lock(&gb->mutex); \
221 ret = sscanf(buf, "%"#type, &gb->field); \
222 if (ret != 1) \
223 len = -EINVAL; \
224 else \
225 gb_loopback_check_attr(gb); \
226 mutex_unlock(&gb->mutex); \
227 return len; \
228} \
229static DEVICE_ATTR_RW(field)
230
231static void gb_loopback_reset_stats(struct gb_loopback *gb);
232static void gb_loopback_check_attr(struct gb_loopback *gb)
233{
234 if (gb->us_wait > GB_LOOPBACK_US_WAIT_MAX)
235 gb->us_wait = GB_LOOPBACK_US_WAIT_MAX;
236 if (gb->size > gb_dev.size_max)
237 gb->size = gb_dev.size_max;
238 gb->requests_timedout = 0;
239 gb->requests_completed = 0;
240 gb->iteration_count = 0;
241 gb->send_count = 0;
242 gb->error = 0;
243
244 if (kfifo_depth < gb->iteration_max) {
245 dev_warn(gb->dev,
246 "cannot log bytes %u kfifo_depth %u\n",
247 gb->iteration_max, kfifo_depth);
248 }
249 kfifo_reset_out(&gb->kfifo_lat);
250
251 switch (gb->type) {
252 case GB_LOOPBACK_TYPE_PING:
253 case GB_LOOPBACK_TYPE_TRANSFER:
254 case GB_LOOPBACK_TYPE_SINK:
255 gb->jiffy_timeout = usecs_to_jiffies(gb->timeout);
256 if (!gb->jiffy_timeout)
257 gb->jiffy_timeout = GB_LOOPBACK_TIMEOUT_MIN;
258 else if (gb->jiffy_timeout > GB_LOOPBACK_TIMEOUT_MAX)
259 gb->jiffy_timeout = GB_LOOPBACK_TIMEOUT_MAX;
260 gb_loopback_reset_stats(gb);
261 wake_up(&gb->wq);
262 break;
263 default:
264 gb->type = 0;
265 break;
266 }
267}
268
269/* Time to send and receive one message */
270gb_loopback_stats_attrs(latency);
271/* Number of requests sent per second on this cport */
272gb_loopback_stats_attrs(requests_per_second);
273/* Quantity of data sent and received on this cport */
274gb_loopback_stats_attrs(throughput);
275/* Latency across the UniPro link from APBridge's perspective */
276gb_loopback_stats_attrs(apbridge_unipro_latency);
277/* Firmware induced overhead in the GPBridge */
278gb_loopback_stats_attrs(gbphy_firmware_latency);
279
280/* Number of errors encountered during loop */
281gb_loopback_ro_attr(error);
282/* Number of requests successfully completed async */
283gb_loopback_ro_attr(requests_completed);
284/* Number of requests timed out async */
285gb_loopback_ro_attr(requests_timedout);
286/* Timeout minimum in useconds */
287gb_loopback_ro_attr(timeout_min);
288/* Timeout minimum in useconds */
289gb_loopback_ro_attr(timeout_max);
290
291/*
292 * Type of loopback message to send based on protocol type definitions
293 * 0 => Don't send message
294 * 2 => Send ping message continuously (message without payload)
295 * 3 => Send transfer message continuously (message with payload,
296 * payload returned in response)
297 * 4 => Send a sink message (message with payload, no payload in response)
298 */
299gb_dev_loopback_rw_attr(type, d);
300/* Size of transfer message payload: 0-4096 bytes */
301gb_dev_loopback_rw_attr(size, u);
302/* Time to wait between two messages: 0-1000 ms */
303gb_dev_loopback_rw_attr(us_wait, d);
304/* Maximum iterations for a given operation: 1-(2^32-1), 0 implies infinite */
305gb_dev_loopback_rw_attr(iteration_max, u);
306/* The current index of the for (i = 0; i < iteration_max; i++) loop */
307gb_dev_loopback_ro_attr(iteration_count, false);
308/* A flag to indicate synchronous or asynchronous operations */
309gb_dev_loopback_rw_attr(async, u);
310/* Timeout of an individual asynchronous request */
311gb_dev_loopback_rw_attr(timeout, u);
312/* Maximum number of in-flight operations before back-off */
313gb_dev_loopback_rw_attr(outstanding_operations_max, u);
314
315static struct attribute *loopback_attrs[] = {
316 &dev_attr_latency_min.attr,
317 &dev_attr_latency_max.attr,
318 &dev_attr_latency_avg.attr,
319 &dev_attr_requests_per_second_min.attr,
320 &dev_attr_requests_per_second_max.attr,
321 &dev_attr_requests_per_second_avg.attr,
322 &dev_attr_throughput_min.attr,
323 &dev_attr_throughput_max.attr,
324 &dev_attr_throughput_avg.attr,
325 &dev_attr_apbridge_unipro_latency_min.attr,
326 &dev_attr_apbridge_unipro_latency_max.attr,
327 &dev_attr_apbridge_unipro_latency_avg.attr,
328 &dev_attr_gbphy_firmware_latency_min.attr,
329 &dev_attr_gbphy_firmware_latency_max.attr,
330 &dev_attr_gbphy_firmware_latency_avg.attr,
331 &dev_attr_type.attr,
332 &dev_attr_size.attr,
333 &dev_attr_us_wait.attr,
334 &dev_attr_iteration_count.attr,
335 &dev_attr_iteration_max.attr,
336 &dev_attr_async.attr,
337 &dev_attr_error.attr,
338 &dev_attr_requests_completed.attr,
339 &dev_attr_requests_timedout.attr,
340 &dev_attr_timeout.attr,
341 &dev_attr_outstanding_operations_max.attr,
342 &dev_attr_timeout_min.attr,
343 &dev_attr_timeout_max.attr,
344 NULL,
345};
346ATTRIBUTE_GROUPS(loopback);
347
348static void gb_loopback_calculate_stats(struct gb_loopback *gb, bool error);
349
350static u32 gb_loopback_nsec_to_usec_latency(u64 elapsed_nsecs)
351{
352 do_div(elapsed_nsecs, NSEC_PER_USEC);
353 return elapsed_nsecs;
354}
355
356static u64 __gb_loopback_calc_latency(u64 t1, u64 t2)
357{
358 if (t2 > t1)
359 return t2 - t1;
360 else
361 return NSEC_PER_DAY - t2 + t1;
362}
363
364static u64 gb_loopback_calc_latency(ktime_t ts, ktime_t te)
365{
366 return __gb_loopback_calc_latency(ktime_to_ns(ts), ktime_to_ns(te));
367}
368
369static int gb_loopback_operation_sync(struct gb_loopback *gb, int type,
370 void *request, int request_size,
371 void *response, int response_size)
372{
373 struct gb_operation *operation;
374 ktime_t ts, te;
375 int ret;
376
377 ts = ktime_get();
378 operation = gb_operation_create(gb->connection, type, request_size,
379 response_size, GFP_KERNEL);
380 if (!operation)
381 return -ENOMEM;
382
383 if (request_size)
384 memcpy(operation->request->payload, request, request_size);
385
386 ret = gb_operation_request_send_sync(operation);
387 if (ret) {
388 dev_err(&gb->connection->bundle->dev,
389 "synchronous operation failed: %d\n", ret);
390 goto out_put_operation;
391 } else {
392 if (response_size == operation->response->payload_size) {
393 memcpy(response, operation->response->payload,
394 response_size);
395 } else {
396 dev_err(&gb->connection->bundle->dev,
397 "response size %zu expected %d\n",
398 operation->response->payload_size,
399 response_size);
400 ret = -EINVAL;
401 goto out_put_operation;
402 }
403 }
404
405 te = ktime_get();
406
407 /* Calculate the total time the message took */
408 gb->elapsed_nsecs = gb_loopback_calc_latency(ts, te);
409
410out_put_operation:
411 gb_operation_put(operation);
412
413 return ret;
414}
415
416static void gb_loopback_async_wait_all(struct gb_loopback *gb)
417{
418 wait_event(gb->wq_completion,
419 !atomic_read(&gb->outstanding_operations));
420}
421
422static void gb_loopback_async_operation_callback(struct gb_operation *operation)
423{
424 struct gb_loopback_async_operation *op_async;
425 struct gb_loopback *gb;
426 ktime_t te;
427 int result;
428
429 te = ktime_get();
430 result = gb_operation_result(operation);
431 op_async = gb_operation_get_data(operation);
432 gb = op_async->gb;
433
434 mutex_lock(&gb->mutex);
435
436 if (!result && op_async->completion)
437 result = op_async->completion(op_async);
438
439 if (!result) {
440 gb->elapsed_nsecs = gb_loopback_calc_latency(op_async->ts, te);
441 } else {
442 gb->error++;
443 if (result == -ETIMEDOUT)
444 gb->requests_timedout++;
445 }
446
447 gb->iteration_count++;
448 gb_loopback_calculate_stats(gb, result);
449
450 mutex_unlock(&gb->mutex);
451
452 dev_dbg(&gb->connection->bundle->dev, "complete operation %d\n",
453 operation->id);
454
455 /* Wake up waiters */
456 atomic_dec(&op_async->gb->outstanding_operations);
457 wake_up(&gb->wq_completion);
458
459 /* Release resources */
460 gb_operation_put(operation);
461 kfree(op_async);
462}
463
464static int gb_loopback_async_operation(struct gb_loopback *gb, int type,
465 void *request, int request_size,
466 int response_size,
467 void *completion)
468{
469 struct gb_loopback_async_operation *op_async;
470 struct gb_operation *operation;
471 int ret;
472
473 op_async = kzalloc(sizeof(*op_async), GFP_KERNEL);
474 if (!op_async)
475 return -ENOMEM;
476
477 operation = gb_operation_create(gb->connection, type, request_size,
478 response_size, GFP_KERNEL);
479 if (!operation) {
480 kfree(op_async);
481 return -ENOMEM;
482 }
483
484 if (request_size)
485 memcpy(operation->request->payload, request, request_size);
486
487 gb_operation_set_data(operation, op_async);
488
489 op_async->gb = gb;
490 op_async->operation = operation;
491 op_async->completion = completion;
492
493 op_async->ts = ktime_get();
494
495 atomic_inc(&gb->outstanding_operations);
496 ret = gb_operation_request_send(operation,
497 gb_loopback_async_operation_callback,
498 jiffies_to_msecs(gb->jiffy_timeout),
499 GFP_KERNEL);
500 if (ret) {
501 atomic_dec(&gb->outstanding_operations);
502 gb_operation_put(operation);
503 kfree(op_async);
504 }
505 return ret;
506}
507
508static int gb_loopback_sync_sink(struct gb_loopback *gb, u32 len)
509{
510 struct gb_loopback_transfer_request *request;
511 int retval;
512
513 request = kmalloc(len + sizeof(*request), GFP_KERNEL);
514 if (!request)
515 return -ENOMEM;
516
517 request->len = cpu_to_le32(len);
518 retval = gb_loopback_operation_sync(gb, GB_LOOPBACK_TYPE_SINK,
519 request, len + sizeof(*request),
520 NULL, 0);
521 kfree(request);
522 return retval;
523}
524
525static int gb_loopback_sync_transfer(struct gb_loopback *gb, u32 len)
526{
527 struct gb_loopback_transfer_request *request;
528 struct gb_loopback_transfer_response *response;
529 int retval;
530
531 gb->apbridge_latency_ts = 0;
532 gb->gbphy_latency_ts = 0;
533
534 request = kmalloc(len + sizeof(*request), GFP_KERNEL);
535 if (!request)
536 return -ENOMEM;
537 response = kmalloc(len + sizeof(*response), GFP_KERNEL);
538 if (!response) {
539 kfree(request);
540 return -ENOMEM;
541 }
542
543 memset(request->data, 0x5A, len);
544
545 request->len = cpu_to_le32(len);
546 retval = gb_loopback_operation_sync(gb, GB_LOOPBACK_TYPE_TRANSFER,
547 request, len + sizeof(*request),
548 response, len + sizeof(*response));
549 if (retval)
550 goto gb_error;
551
552 if (memcmp(request->data, response->data, len)) {
553 dev_err(&gb->connection->bundle->dev,
554 "Loopback Data doesn't match\n");
555 retval = -EREMOTEIO;
556 }
557 gb->apbridge_latency_ts = (u32)__le32_to_cpu(response->reserved0);
558 gb->gbphy_latency_ts = (u32)__le32_to_cpu(response->reserved1);
559
560gb_error:
561 kfree(request);
562 kfree(response);
563
564 return retval;
565}
566
567static int gb_loopback_sync_ping(struct gb_loopback *gb)
568{
569 return gb_loopback_operation_sync(gb, GB_LOOPBACK_TYPE_PING,
570 NULL, 0, NULL, 0);
571}
572
573static int gb_loopback_async_sink(struct gb_loopback *gb, u32 len)
574{
575 struct gb_loopback_transfer_request *request;
576 int retval;
577
578 request = kmalloc(len + sizeof(*request), GFP_KERNEL);
579 if (!request)
580 return -ENOMEM;
581
582 request->len = cpu_to_le32(len);
583 retval = gb_loopback_async_operation(gb, GB_LOOPBACK_TYPE_SINK,
584 request, len + sizeof(*request),
585 0, NULL);
586 kfree(request);
587 return retval;
588}
589
590static int gb_loopback_async_transfer_complete(
591 struct gb_loopback_async_operation *op_async)
592{
593 struct gb_loopback *gb;
594 struct gb_operation *operation;
595 struct gb_loopback_transfer_request *request;
596 struct gb_loopback_transfer_response *response;
597 size_t len;
598 int retval = 0;
599
600 gb = op_async->gb;
601 operation = op_async->operation;
602 request = operation->request->payload;
603 response = operation->response->payload;
604 len = le32_to_cpu(request->len);
605
606 if (memcmp(request->data, response->data, len)) {
607 dev_err(&gb->connection->bundle->dev,
608 "Loopback Data doesn't match operation id %d\n",
609 operation->id);
610 retval = -EREMOTEIO;
611 } else {
612 gb->apbridge_latency_ts =
613 (u32)__le32_to_cpu(response->reserved0);
614 gb->gbphy_latency_ts =
615 (u32)__le32_to_cpu(response->reserved1);
616 }
617
618 return retval;
619}
620
621static int gb_loopback_async_transfer(struct gb_loopback *gb, u32 len)
622{
623 struct gb_loopback_transfer_request *request;
624 int retval, response_len;
625
626 request = kmalloc(len + sizeof(*request), GFP_KERNEL);
627 if (!request)
628 return -ENOMEM;
629
630 memset(request->data, 0x5A, len);
631
632 request->len = cpu_to_le32(len);
633 response_len = sizeof(struct gb_loopback_transfer_response);
634 retval = gb_loopback_async_operation(gb, GB_LOOPBACK_TYPE_TRANSFER,
635 request, len + sizeof(*request),
636 len + response_len,
637 gb_loopback_async_transfer_complete);
638 if (retval)
639 goto gb_error;
640
641gb_error:
642 kfree(request);
643 return retval;
644}
645
646static int gb_loopback_async_ping(struct gb_loopback *gb)
647{
648 return gb_loopback_async_operation(gb, GB_LOOPBACK_TYPE_PING,
649 NULL, 0, 0, NULL);
650}
651
652static int gb_loopback_request_handler(struct gb_operation *operation)
653{
654 struct gb_connection *connection = operation->connection;
655 struct gb_loopback_transfer_request *request;
656 struct gb_loopback_transfer_response *response;
657 struct device *dev = &connection->bundle->dev;
658 size_t len;
659
660 /* By convention, the AP initiates the version operation */
661 switch (operation->type) {
662 case GB_LOOPBACK_TYPE_PING:
663 case GB_LOOPBACK_TYPE_SINK:
664 return 0;
665 case GB_LOOPBACK_TYPE_TRANSFER:
666 if (operation->request->payload_size < sizeof(*request)) {
667 dev_err(dev, "transfer request too small (%zu < %zu)\n",
668 operation->request->payload_size,
669 sizeof(*request));
670 return -EINVAL; /* -EMSGSIZE */
671 }
672 request = operation->request->payload;
673 len = le32_to_cpu(request->len);
674 if (len > gb_dev.size_max) {
675 dev_err(dev, "transfer request too large (%zu > %zu)\n",
676 len, gb_dev.size_max);
677 return -EINVAL;
678 }
679
680 if (!gb_operation_response_alloc(operation,
681 len + sizeof(*response), GFP_KERNEL)) {
682 dev_err(dev, "error allocating response\n");
683 return -ENOMEM;
684 }
685 response = operation->response->payload;
686 response->len = cpu_to_le32(len);
687 if (len)
688 memcpy(response->data, request->data, len);
689
690 return 0;
691 default:
692 dev_err(dev, "unsupported request: %u\n", operation->type);
693 return -EINVAL;
694 }
695}
696
697static void gb_loopback_reset_stats(struct gb_loopback *gb)
698{
699 struct gb_loopback_stats reset = {
700 .min = U32_MAX,
701 };
702
703 /* Reset per-connection stats */
704 memcpy(&gb->latency, &reset,
705 sizeof(struct gb_loopback_stats));
706 memcpy(&gb->throughput, &reset,
707 sizeof(struct gb_loopback_stats));
708 memcpy(&gb->requests_per_second, &reset,
709 sizeof(struct gb_loopback_stats));
710 memcpy(&gb->apbridge_unipro_latency, &reset,
711 sizeof(struct gb_loopback_stats));
712 memcpy(&gb->gbphy_firmware_latency, &reset,
713 sizeof(struct gb_loopback_stats));
714
715 /* Should be initialized at least once per transaction set */
716 gb->apbridge_latency_ts = 0;
717 gb->gbphy_latency_ts = 0;
718 gb->ts = ktime_set(0, 0);
719}
720
721static void gb_loopback_update_stats(struct gb_loopback_stats *stats, u32 val)
722{
723 if (stats->min > val)
724 stats->min = val;
725 if (stats->max < val)
726 stats->max = val;
727 stats->sum += val;
728 stats->count++;
729}
730
731static void gb_loopback_update_stats_window(struct gb_loopback_stats *stats,
732 u64 val, u32 count)
733{
734 stats->sum += val;
735 stats->count += count;
736
737 do_div(val, count);
738 if (stats->min > val)
739 stats->min = val;
740 if (stats->max < val)
741 stats->max = val;
742}
743
744static void gb_loopback_requests_update(struct gb_loopback *gb, u32 latency)
745{
746 u64 req = gb->requests_completed * USEC_PER_SEC;
747
748 gb_loopback_update_stats_window(&gb->requests_per_second, req, latency);
749}
750
751static void gb_loopback_throughput_update(struct gb_loopback *gb, u32 latency)
752{
753 u64 aggregate_size = sizeof(struct gb_operation_msg_hdr) * 2;
754
755 switch (gb->type) {
756 case GB_LOOPBACK_TYPE_PING:
757 break;
758 case GB_LOOPBACK_TYPE_SINK:
759 aggregate_size += sizeof(struct gb_loopback_transfer_request) +
760 gb->size;
761 break;
762 case GB_LOOPBACK_TYPE_TRANSFER:
763 aggregate_size += sizeof(struct gb_loopback_transfer_request) +
764 sizeof(struct gb_loopback_transfer_response) +
765 gb->size * 2;
766 break;
767 default:
768 return;
769 }
770
771 aggregate_size *= gb->requests_completed;
772 aggregate_size *= USEC_PER_SEC;
773 gb_loopback_update_stats_window(&gb->throughput, aggregate_size,
774 latency);
775}
776
777static void gb_loopback_calculate_latency_stats(struct gb_loopback *gb)
778{
779 u32 lat;
780
781 /* Express latency in terms of microseconds */
782 lat = gb_loopback_nsec_to_usec_latency(gb->elapsed_nsecs);
783
784 /* Log latency stastic */
785 gb_loopback_update_stats(&gb->latency, lat);
786
787 /* Raw latency log on a per thread basis */
788 kfifo_in(&gb->kfifo_lat, (unsigned char *)&lat, sizeof(lat));
789
790 /* Log the firmware supplied latency values */
791 gb_loopback_update_stats(&gb->apbridge_unipro_latency,
792 gb->apbridge_latency_ts);
793 gb_loopback_update_stats(&gb->gbphy_firmware_latency,
794 gb->gbphy_latency_ts);
795}
796
797static void gb_loopback_calculate_stats(struct gb_loopback *gb, bool error)
798{
799 u64 nlat;
800 u32 lat;
801 ktime_t te;
802
803 if (!error) {
804 gb->requests_completed++;
805 gb_loopback_calculate_latency_stats(gb);
806 }
807
808 te = ktime_get();
809 nlat = gb_loopback_calc_latency(gb->ts, te);
810 if (nlat >= NSEC_PER_SEC || gb->iteration_count == gb->iteration_max) {
811 lat = gb_loopback_nsec_to_usec_latency(nlat);
812
813 gb_loopback_throughput_update(gb, lat);
814 gb_loopback_requests_update(gb, lat);
815
816 if (gb->iteration_count != gb->iteration_max) {
817 gb->ts = te;
818 gb->requests_completed = 0;
819 }
820 }
821}
822
823static void gb_loopback_async_wait_to_send(struct gb_loopback *gb)
824{
825 if (!(gb->async && gb->outstanding_operations_max))
826 return;
827 wait_event_interruptible(gb->wq_completion,
828 (atomic_read(&gb->outstanding_operations) <
829 gb->outstanding_operations_max) ||
830 kthread_should_stop());
831}
832
833static int gb_loopback_fn(void *data)
834{
835 int error = 0;
836 int us_wait = 0;
837 int type;
838 int ret;
839 u32 size;
840
841 struct gb_loopback *gb = data;
842 struct gb_bundle *bundle = gb->connection->bundle;
843
844 ret = gb_pm_runtime_get_sync(bundle);
845 if (ret)
846 return ret;
847
848 while (1) {
849 if (!gb->type) {
850 gb_pm_runtime_put_autosuspend(bundle);
851 wait_event_interruptible(gb->wq, gb->type ||
852 kthread_should_stop());
853 ret = gb_pm_runtime_get_sync(bundle);
854 if (ret)
855 return ret;
856 }
857
858 if (kthread_should_stop())
859 break;
860
861 /* Limit the maximum number of in-flight async operations */
862 gb_loopback_async_wait_to_send(gb);
863 if (kthread_should_stop())
864 break;
865
866 mutex_lock(&gb->mutex);
867
868 /* Optionally terminate */
869 if (gb->send_count == gb->iteration_max) {
870 mutex_unlock(&gb->mutex);
871
872 /* Wait for synchronous and asynchronous completion */
873 gb_loopback_async_wait_all(gb);
874
875 /* Mark complete unless user-space has poked us */
876 mutex_lock(&gb->mutex);
877 if (gb->iteration_count == gb->iteration_max) {
878 gb->type = 0;
879 gb->send_count = 0;
880 sysfs_notify(&gb->dev->kobj, NULL,
881 "iteration_count");
882 dev_dbg(&bundle->dev, "load test complete\n");
883 } else {
884 dev_dbg(&bundle->dev,
885 "continuing on with new test set\n");
886 }
887 mutex_unlock(&gb->mutex);
888 continue;
889 }
890 size = gb->size;
891 us_wait = gb->us_wait;
892 type = gb->type;
893 if (ktime_to_ns(gb->ts) == 0)
894 gb->ts = ktime_get();
895
896 /* Else operations to perform */
897 if (gb->async) {
898 if (type == GB_LOOPBACK_TYPE_PING)
899 error = gb_loopback_async_ping(gb);
900 else if (type == GB_LOOPBACK_TYPE_TRANSFER)
901 error = gb_loopback_async_transfer(gb, size);
902 else if (type == GB_LOOPBACK_TYPE_SINK)
903 error = gb_loopback_async_sink(gb, size);
904
905 if (error) {
906 gb->error++;
907 gb->iteration_count++;
908 }
909 } else {
910 /* We are effectively single threaded here */
911 if (type == GB_LOOPBACK_TYPE_PING)
912 error = gb_loopback_sync_ping(gb);
913 else if (type == GB_LOOPBACK_TYPE_TRANSFER)
914 error = gb_loopback_sync_transfer(gb, size);
915 else if (type == GB_LOOPBACK_TYPE_SINK)
916 error = gb_loopback_sync_sink(gb, size);
917
918 if (error)
919 gb->error++;
920 gb->iteration_count++;
921 gb_loopback_calculate_stats(gb, !!error);
922 }
923 gb->send_count++;
924 mutex_unlock(&gb->mutex);
925
926 if (us_wait) {
927 if (us_wait < 20000)
928 usleep_range(us_wait, us_wait + 100);
929 else
930 msleep(us_wait / 1000);
931 }
932 }
933
934 gb_pm_runtime_put_autosuspend(bundle);
935
936 return 0;
937}
938
939static int gb_loopback_dbgfs_latency_show_common(struct seq_file *s,
940 struct kfifo *kfifo,
941 struct mutex *mutex)
942{
943 u32 latency;
944 int retval;
945
946 if (kfifo_len(kfifo) == 0) {
947 retval = -EAGAIN;
948 goto done;
949 }
950
951 mutex_lock(mutex);
952 retval = kfifo_out(kfifo, &latency, sizeof(latency));
953 if (retval > 0) {
954 seq_printf(s, "%u", latency);
955 retval = 0;
956 }
957 mutex_unlock(mutex);
958done:
959 return retval;
960}
961
962static int gb_loopback_dbgfs_latency_show(struct seq_file *s, void *unused)
963{
964 struct gb_loopback *gb = s->private;
965
966 return gb_loopback_dbgfs_latency_show_common(s, &gb->kfifo_lat,
967 &gb->mutex);
968}
969DEFINE_SHOW_ATTRIBUTE(gb_loopback_dbgfs_latency);
970
971#define DEBUGFS_NAMELEN 32
972
973static int gb_loopback_probe(struct gb_bundle *bundle,
974 const struct greybus_bundle_id *id)
975{
976 struct greybus_descriptor_cport *cport_desc;
977 struct gb_connection *connection;
978 struct gb_loopback *gb;
979 struct device *dev;
980 int retval;
981 char name[DEBUGFS_NAMELEN];
982 unsigned long flags;
983
984 if (bundle->num_cports != 1)
985 return -ENODEV;
986
987 cport_desc = &bundle->cport_desc[0];
988 if (cport_desc->protocol_id != GREYBUS_PROTOCOL_LOOPBACK)
989 return -ENODEV;
990
991 gb = kzalloc(sizeof(*gb), GFP_KERNEL);
992 if (!gb)
993 return -ENOMEM;
994
995 connection = gb_connection_create(bundle, le16_to_cpu(cport_desc->id),
996 gb_loopback_request_handler);
997 if (IS_ERR(connection)) {
998 retval = PTR_ERR(connection);
999 goto out_kzalloc;
1000 }
1001
1002 gb->connection = connection;
1003 greybus_set_drvdata(bundle, gb);
1004
1005 init_waitqueue_head(&gb->wq);
1006 init_waitqueue_head(&gb->wq_completion);
1007 atomic_set(&gb->outstanding_operations, 0);
1008 gb_loopback_reset_stats(gb);
1009
1010 /* Reported values to user-space for min/max timeouts */
1011 gb->timeout_min = jiffies_to_usecs(GB_LOOPBACK_TIMEOUT_MIN);
1012 gb->timeout_max = jiffies_to_usecs(GB_LOOPBACK_TIMEOUT_MAX);
1013
1014 if (!gb_dev.count) {
1015 /* Calculate maximum payload */
1016 gb_dev.size_max = gb_operation_get_payload_size_max(connection);
1017 if (gb_dev.size_max <=
1018 sizeof(struct gb_loopback_transfer_request)) {
1019 retval = -EINVAL;
1020 goto out_connection_destroy;
1021 }
1022 gb_dev.size_max -= sizeof(struct gb_loopback_transfer_request);
1023 }
1024
1025 /* Create per-connection sysfs and debugfs data-points */
1026 snprintf(name, sizeof(name), "raw_latency_%s",
1027 dev_name(&connection->bundle->dev));
1028 gb->file = debugfs_create_file(name, S_IFREG | 0444, gb_dev.root, gb,
1029 &gb_loopback_dbgfs_latency_fops);
1030
1031 gb->id = ida_alloc(&loopback_ida, GFP_KERNEL);
1032 if (gb->id < 0) {
1033 retval = gb->id;
1034 goto out_debugfs_remove;
1035 }
1036
1037 retval = gb_connection_enable(connection);
1038 if (retval)
1039 goto out_ida_remove;
1040
1041 dev = device_create_with_groups(&loopback_class,
1042 &connection->bundle->dev,
1043 MKDEV(0, 0), gb, loopback_groups,
1044 "gb_loopback%d", gb->id);
1045 if (IS_ERR(dev)) {
1046 retval = PTR_ERR(dev);
1047 goto out_connection_disable;
1048 }
1049 gb->dev = dev;
1050
1051 /* Allocate kfifo */
1052 if (kfifo_alloc(&gb->kfifo_lat, kfifo_depth * sizeof(u32),
1053 GFP_KERNEL)) {
1054 retval = -ENOMEM;
1055 goto out_conn;
1056 }
1057 /* Fork worker thread */
1058 mutex_init(&gb->mutex);
1059 gb->task = kthread_run(gb_loopback_fn, gb, "gb_loopback");
1060 if (IS_ERR(gb->task)) {
1061 retval = PTR_ERR(gb->task);
1062 goto out_kfifo;
1063 }
1064
1065 spin_lock_irqsave(&gb_dev.lock, flags);
1066 gb_dev.count++;
1067 spin_unlock_irqrestore(&gb_dev.lock, flags);
1068
1069 gb_connection_latency_tag_enable(connection);
1070
1071 gb_pm_runtime_put_autosuspend(bundle);
1072
1073 return 0;
1074
1075out_kfifo:
1076 kfifo_free(&gb->kfifo_lat);
1077out_conn:
1078 device_unregister(dev);
1079out_connection_disable:
1080 gb_connection_disable(connection);
1081out_ida_remove:
1082 ida_free(&loopback_ida, gb->id);
1083out_debugfs_remove:
1084 debugfs_remove(gb->file);
1085out_connection_destroy:
1086 gb_connection_destroy(connection);
1087out_kzalloc:
1088 kfree(gb);
1089
1090 return retval;
1091}
1092
1093static void gb_loopback_disconnect(struct gb_bundle *bundle)
1094{
1095 struct gb_loopback *gb = greybus_get_drvdata(bundle);
1096 unsigned long flags;
1097 int ret;
1098
1099 ret = gb_pm_runtime_get_sync(bundle);
1100 if (ret)
1101 gb_pm_runtime_get_noresume(bundle);
1102
1103 gb_connection_disable(gb->connection);
1104
1105 if (!IS_ERR_OR_NULL(gb->task))
1106 kthread_stop(gb->task);
1107
1108 kfifo_free(&gb->kfifo_lat);
1109 gb_connection_latency_tag_disable(gb->connection);
1110 debugfs_remove(gb->file);
1111
1112 /*
1113 * FIXME: gb_loopback_async_wait_all() is redundant now, as connection
1114 * is disabled at the beginning and so we can't have any more
1115 * incoming/outgoing requests.
1116 */
1117 gb_loopback_async_wait_all(gb);
1118
1119 spin_lock_irqsave(&gb_dev.lock, flags);
1120 gb_dev.count--;
1121 spin_unlock_irqrestore(&gb_dev.lock, flags);
1122
1123 device_unregister(gb->dev);
1124 ida_free(&loopback_ida, gb->id);
1125
1126 gb_connection_destroy(gb->connection);
1127 kfree(gb);
1128}
1129
1130static const struct greybus_bundle_id gb_loopback_id_table[] = {
1131 { GREYBUS_DEVICE_CLASS(GREYBUS_CLASS_LOOPBACK) },
1132 { }
1133};
1134MODULE_DEVICE_TABLE(greybus, gb_loopback_id_table);
1135
1136static struct greybus_driver gb_loopback_driver = {
1137 .name = "loopback",
1138 .probe = gb_loopback_probe,
1139 .disconnect = gb_loopback_disconnect,
1140 .id_table = gb_loopback_id_table,
1141};
1142
1143static int loopback_init(void)
1144{
1145 int retval;
1146
1147 spin_lock_init(&gb_dev.lock);
1148 gb_dev.root = debugfs_create_dir("gb_loopback", NULL);
1149
1150 retval = class_register(&loopback_class);
1151 if (retval)
1152 goto err;
1153
1154 retval = greybus_register(&gb_loopback_driver);
1155 if (retval)
1156 goto err_unregister;
1157
1158 return 0;
1159
1160err_unregister:
1161 class_unregister(&loopback_class);
1162err:
1163 debugfs_remove_recursive(gb_dev.root);
1164 return retval;
1165}
1166module_init(loopback_init);
1167
1168static void __exit loopback_exit(void)
1169{
1170 debugfs_remove_recursive(gb_dev.root);
1171 greybus_deregister(&gb_loopback_driver);
1172 class_unregister(&loopback_class);
1173 ida_destroy(&loopback_ida);
1174}
1175module_exit(loopback_exit);
1176
1177MODULE_LICENSE("GPL v2");