1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Loopback bridge driver for the Greybus loopback module. 4 * 5 * Copyright 2014 Google Inc. 6 * Copyright 2014 Linaro Ltd. 7 */ 8 9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 10 11 #include <linux/kernel.h> 12 #include <linux/module.h> 13 #include <linux/mutex.h> 14 #include <linux/slab.h> 15 #include <linux/kthread.h> 16 #include <linux/delay.h> 17 #include <linux/random.h> 18 #include <linux/sizes.h> 19 #include <linux/cdev.h> 20 #include <linux/fs.h> 21 #include <linux/kfifo.h> 22 #include <linux/debugfs.h> 23 #include <linux/list_sort.h> 24 #include <linux/spinlock.h> 25 #include <linux/workqueue.h> 26 #include <linux/atomic.h> 27 #include <linux/pm_runtime.h> 28 29 #include <asm/div64.h> 30 31 #include "greybus.h" 32 #include "connection.h" 33 34 #define NSEC_PER_DAY 86400000000000ULL 35 36 struct gb_loopback_stats { 37 u32 min; 38 u32 max; 39 u64 sum; 40 u32 count; 41 }; 42 43 struct gb_loopback_device { 44 struct dentry *root; 45 u32 count; 46 size_t size_max; 47 48 /* We need to take a lock in atomic context */ 49 spinlock_t lock; 50 wait_queue_head_t wq; 51 }; 52 53 static struct gb_loopback_device gb_dev; 54 55 struct gb_loopback_async_operation { 56 struct gb_loopback *gb; 57 struct gb_operation *operation; 58 ktime_t ts; 59 int (*completion)(struct gb_loopback_async_operation *op_async); 60 }; 61 62 struct gb_loopback { 63 struct gb_connection *connection; 64 65 struct dentry *file; 66 struct kfifo kfifo_lat; 67 struct mutex mutex; 68 struct task_struct *task; 69 struct device *dev; 70 wait_queue_head_t wq; 71 wait_queue_head_t wq_completion; 72 atomic_t outstanding_operations; 73 74 /* Per connection stats */ 75 ktime_t ts; 76 struct gb_loopback_stats latency; 77 struct gb_loopback_stats throughput; 78 struct gb_loopback_stats requests_per_second; 79 struct gb_loopback_stats apbridge_unipro_latency; 80 struct gb_loopback_stats gbphy_firmware_latency; 81 82 int type; 83 int async; 84 int id; 85 u32 size; 86 u32 iteration_max; 87 u32 iteration_count; 88 int us_wait; 89 u32 error; 90 u32 requests_completed; 91 u32 requests_timedout; 92 u32 timeout; 93 u32 jiffy_timeout; 94 u32 timeout_min; 95 u32 timeout_max; 96 u32 outstanding_operations_max; 97 u64 elapsed_nsecs; 98 u32 apbridge_latency_ts; 99 u32 gbphy_latency_ts; 100 101 u32 send_count; 102 }; 103 104 static struct class loopback_class = { 105 .name = "gb_loopback", 106 .owner = THIS_MODULE, 107 }; 108 static DEFINE_IDA(loopback_ida); 109 110 /* Min/max values in jiffies */ 111 #define GB_LOOPBACK_TIMEOUT_MIN 1 112 #define GB_LOOPBACK_TIMEOUT_MAX 10000 113 114 #define GB_LOOPBACK_FIFO_DEFAULT 8192 115 116 static unsigned int kfifo_depth = GB_LOOPBACK_FIFO_DEFAULT; 117 module_param(kfifo_depth, uint, 0444); 118 119 /* Maximum size of any one send data buffer we support */ 120 #define MAX_PACKET_SIZE (PAGE_SIZE * 2) 121 122 #define GB_LOOPBACK_US_WAIT_MAX 1000000 123 124 /* interface sysfs attributes */ 125 #define gb_loopback_ro_attr(field) \ 126 static ssize_t field##_show(struct device *dev, \ 127 struct device_attribute *attr, \ 128 char *buf) \ 129 { \ 130 struct gb_loopback *gb = dev_get_drvdata(dev); \ 131 return sprintf(buf, "%u\n", gb->field); \ 132 } \ 133 static DEVICE_ATTR_RO(field) 134 135 #define gb_loopback_ro_stats_attr(name, field, type) \ 136 static ssize_t name##_##field##_show(struct device *dev, \ 137 struct device_attribute *attr, \ 138 char *buf) \ 139 { \ 140 struct gb_loopback *gb = dev_get_drvdata(dev); \ 141 /* Report 0 for min and max if no transfer successed */ \ 142 if (!gb->requests_completed) \ 143 return sprintf(buf, "0\n"); \ 144 return sprintf(buf, "%" #type "\n", gb->name.field); \ 145 } \ 146 static DEVICE_ATTR_RO(name##_##field) 147 148 #define gb_loopback_ro_avg_attr(name) \ 149 static ssize_t name##_avg_show(struct device *dev, \ 150 struct device_attribute *attr, \ 151 char *buf) \ 152 { \ 153 struct gb_loopback_stats *stats; \ 154 struct gb_loopback *gb; \ 155 u64 avg, rem; \ 156 u32 count; \ 157 gb = dev_get_drvdata(dev); \ 158 stats = &gb->name; \ 159 count = stats->count ? stats->count : 1; \ 160 avg = stats->sum + count / 2000000; /* round closest */ \ 161 rem = do_div(avg, count); \ 162 rem *= 1000000; \ 163 do_div(rem, count); \ 164 return sprintf(buf, "%llu.%06u\n", avg, (u32)rem); \ 165 } \ 166 static DEVICE_ATTR_RO(name##_avg) 167 168 #define gb_loopback_stats_attrs(field) \ 169 gb_loopback_ro_stats_attr(field, min, u); \ 170 gb_loopback_ro_stats_attr(field, max, u); \ 171 gb_loopback_ro_avg_attr(field) 172 173 #define gb_loopback_attr(field, type) \ 174 static ssize_t field##_show(struct device *dev, \ 175 struct device_attribute *attr, \ 176 char *buf) \ 177 { \ 178 struct gb_loopback *gb = dev_get_drvdata(dev); \ 179 return sprintf(buf, "%" #type "\n", gb->field); \ 180 } \ 181 static ssize_t field##_store(struct device *dev, \ 182 struct device_attribute *attr, \ 183 const char *buf, \ 184 size_t len) \ 185 { \ 186 int ret; \ 187 struct gb_loopback *gb = dev_get_drvdata(dev); \ 188 mutex_lock(&gb->mutex); \ 189 ret = sscanf(buf, "%"#type, &gb->field); \ 190 if (ret != 1) \ 191 len = -EINVAL; \ 192 else \ 193 gb_loopback_check_attr(gb, bundle); \ 194 mutex_unlock(&gb->mutex); \ 195 return len; \ 196 } \ 197 static DEVICE_ATTR_RW(field) 198 199 #define gb_dev_loopback_ro_attr(field, conn) \ 200 static ssize_t field##_show(struct device *dev, \ 201 struct device_attribute *attr, \ 202 char *buf) \ 203 { \ 204 struct gb_loopback *gb = dev_get_drvdata(dev); \ 205 return sprintf(buf, "%u\n", gb->field); \ 206 } \ 207 static DEVICE_ATTR_RO(field) 208 209 #define gb_dev_loopback_rw_attr(field, type) \ 210 static ssize_t field##_show(struct device *dev, \ 211 struct device_attribute *attr, \ 212 char *buf) \ 213 { \ 214 struct gb_loopback *gb = dev_get_drvdata(dev); \ 215 return sprintf(buf, "%" #type "\n", gb->field); \ 216 } \ 217 static ssize_t field##_store(struct device *dev, \ 218 struct device_attribute *attr, \ 219 const char *buf, \ 220 size_t len) \ 221 { \ 222 int ret; \ 223 struct gb_loopback *gb = dev_get_drvdata(dev); \ 224 mutex_lock(&gb->mutex); \ 225 ret = sscanf(buf, "%"#type, &gb->field); \ 226 if (ret != 1) \ 227 len = -EINVAL; \ 228 else \ 229 gb_loopback_check_attr(gb); \ 230 mutex_unlock(&gb->mutex); \ 231 return len; \ 232 } \ 233 static DEVICE_ATTR_RW(field) 234 235 static void gb_loopback_reset_stats(struct gb_loopback *gb); 236 static void gb_loopback_check_attr(struct gb_loopback *gb) 237 { 238 if (gb->us_wait > GB_LOOPBACK_US_WAIT_MAX) 239 gb->us_wait = GB_LOOPBACK_US_WAIT_MAX; 240 if (gb->size > gb_dev.size_max) 241 gb->size = gb_dev.size_max; 242 gb->requests_timedout = 0; 243 gb->requests_completed = 0; 244 gb->iteration_count = 0; 245 gb->send_count = 0; 246 gb->error = 0; 247 248 if (kfifo_depth < gb->iteration_max) { 249 dev_warn(gb->dev, 250 "cannot log bytes %u kfifo_depth %u\n", 251 gb->iteration_max, kfifo_depth); 252 } 253 kfifo_reset_out(&gb->kfifo_lat); 254 255 switch (gb->type) { 256 case GB_LOOPBACK_TYPE_PING: 257 case GB_LOOPBACK_TYPE_TRANSFER: 258 case GB_LOOPBACK_TYPE_SINK: 259 gb->jiffy_timeout = usecs_to_jiffies(gb->timeout); 260 if (!gb->jiffy_timeout) 261 gb->jiffy_timeout = GB_LOOPBACK_TIMEOUT_MIN; 262 else if (gb->jiffy_timeout > GB_LOOPBACK_TIMEOUT_MAX) 263 gb->jiffy_timeout = GB_LOOPBACK_TIMEOUT_MAX; 264 gb_loopback_reset_stats(gb); 265 wake_up(&gb->wq); 266 break; 267 default: 268 gb->type = 0; 269 break; 270 } 271 } 272 273 /* Time to send and receive one message */ 274 gb_loopback_stats_attrs(latency); 275 /* Number of requests sent per second on this cport */ 276 gb_loopback_stats_attrs(requests_per_second); 277 /* Quantity of data sent and received on this cport */ 278 gb_loopback_stats_attrs(throughput); 279 /* Latency across the UniPro link from APBridge's perspective */ 280 gb_loopback_stats_attrs(apbridge_unipro_latency); 281 /* Firmware induced overhead in the GPBridge */ 282 gb_loopback_stats_attrs(gbphy_firmware_latency); 283 284 /* Number of errors encountered during loop */ 285 gb_loopback_ro_attr(error); 286 /* Number of requests successfully completed async */ 287 gb_loopback_ro_attr(requests_completed); 288 /* Number of requests timed out async */ 289 gb_loopback_ro_attr(requests_timedout); 290 /* Timeout minimum in useconds */ 291 gb_loopback_ro_attr(timeout_min); 292 /* Timeout minimum in useconds */ 293 gb_loopback_ro_attr(timeout_max); 294 295 /* 296 * Type of loopback message to send based on protocol type definitions 297 * 0 => Don't send message 298 * 2 => Send ping message continuously (message without payload) 299 * 3 => Send transfer message continuously (message with payload, 300 * payload returned in response) 301 * 4 => Send a sink message (message with payload, no payload in response) 302 */ 303 gb_dev_loopback_rw_attr(type, d); 304 /* Size of transfer message payload: 0-4096 bytes */ 305 gb_dev_loopback_rw_attr(size, u); 306 /* Time to wait between two messages: 0-1000 ms */ 307 gb_dev_loopback_rw_attr(us_wait, d); 308 /* Maximum iterations for a given operation: 1-(2^32-1), 0 implies infinite */ 309 gb_dev_loopback_rw_attr(iteration_max, u); 310 /* The current index of the for (i = 0; i < iteration_max; i++) loop */ 311 gb_dev_loopback_ro_attr(iteration_count, false); 312 /* A flag to indicate synchronous or asynchronous operations */ 313 gb_dev_loopback_rw_attr(async, u); 314 /* Timeout of an individual asynchronous request */ 315 gb_dev_loopback_rw_attr(timeout, u); 316 /* Maximum number of in-flight operations before back-off */ 317 gb_dev_loopback_rw_attr(outstanding_operations_max, u); 318 319 static struct attribute *loopback_attrs[] = { 320 &dev_attr_latency_min.attr, 321 &dev_attr_latency_max.attr, 322 &dev_attr_latency_avg.attr, 323 &dev_attr_requests_per_second_min.attr, 324 &dev_attr_requests_per_second_max.attr, 325 &dev_attr_requests_per_second_avg.attr, 326 &dev_attr_throughput_min.attr, 327 &dev_attr_throughput_max.attr, 328 &dev_attr_throughput_avg.attr, 329 &dev_attr_apbridge_unipro_latency_min.attr, 330 &dev_attr_apbridge_unipro_latency_max.attr, 331 &dev_attr_apbridge_unipro_latency_avg.attr, 332 &dev_attr_gbphy_firmware_latency_min.attr, 333 &dev_attr_gbphy_firmware_latency_max.attr, 334 &dev_attr_gbphy_firmware_latency_avg.attr, 335 &dev_attr_type.attr, 336 &dev_attr_size.attr, 337 &dev_attr_us_wait.attr, 338 &dev_attr_iteration_count.attr, 339 &dev_attr_iteration_max.attr, 340 &dev_attr_async.attr, 341 &dev_attr_error.attr, 342 &dev_attr_requests_completed.attr, 343 &dev_attr_requests_timedout.attr, 344 &dev_attr_timeout.attr, 345 &dev_attr_outstanding_operations_max.attr, 346 &dev_attr_timeout_min.attr, 347 &dev_attr_timeout_max.attr, 348 NULL, 349 }; 350 ATTRIBUTE_GROUPS(loopback); 351 352 static void gb_loopback_calculate_stats(struct gb_loopback *gb, bool error); 353 354 static u32 gb_loopback_nsec_to_usec_latency(u64 elapsed_nsecs) 355 { 356 do_div(elapsed_nsecs, NSEC_PER_USEC); 357 return elapsed_nsecs; 358 } 359 360 static u64 __gb_loopback_calc_latency(u64 t1, u64 t2) 361 { 362 if (t2 > t1) 363 return t2 - t1; 364 else 365 return NSEC_PER_DAY - t2 + t1; 366 } 367 368 static u64 gb_loopback_calc_latency(ktime_t ts, ktime_t te) 369 { 370 return __gb_loopback_calc_latency(ktime_to_ns(ts), ktime_to_ns(te)); 371 } 372 373 static int gb_loopback_operation_sync(struct gb_loopback *gb, int type, 374 void *request, int request_size, 375 void *response, int response_size) 376 { 377 struct gb_operation *operation; 378 ktime_t ts, te; 379 int ret; 380 381 ts = ktime_get(); 382 operation = gb_operation_create(gb->connection, type, request_size, 383 response_size, GFP_KERNEL); 384 if (!operation) 385 return -ENOMEM; 386 387 if (request_size) 388 memcpy(operation->request->payload, request, request_size); 389 390 ret = gb_operation_request_send_sync(operation); 391 if (ret) { 392 dev_err(&gb->connection->bundle->dev, 393 "synchronous operation failed: %d\n", ret); 394 goto out_put_operation; 395 } else { 396 if (response_size == operation->response->payload_size) { 397 memcpy(response, operation->response->payload, 398 response_size); 399 } else { 400 dev_err(&gb->connection->bundle->dev, 401 "response size %zu expected %d\n", 402 operation->response->payload_size, 403 response_size); 404 ret = -EINVAL; 405 goto out_put_operation; 406 } 407 } 408 409 te = ktime_get(); 410 411 /* Calculate the total time the message took */ 412 gb->elapsed_nsecs = gb_loopback_calc_latency(ts, te); 413 414 out_put_operation: 415 gb_operation_put(operation); 416 417 return ret; 418 } 419 420 static void gb_loopback_async_wait_all(struct gb_loopback *gb) 421 { 422 wait_event(gb->wq_completion, 423 !atomic_read(&gb->outstanding_operations)); 424 } 425 426 static void gb_loopback_async_operation_callback(struct gb_operation *operation) 427 { 428 struct gb_loopback_async_operation *op_async; 429 struct gb_loopback *gb; 430 ktime_t te; 431 int result; 432 433 te = ktime_get(); 434 result = gb_operation_result(operation); 435 op_async = gb_operation_get_data(operation); 436 gb = op_async->gb; 437 438 mutex_lock(&gb->mutex); 439 440 if (!result && op_async->completion) 441 result = op_async->completion(op_async); 442 443 if (!result) { 444 gb->elapsed_nsecs = gb_loopback_calc_latency(op_async->ts, te); 445 } else { 446 gb->error++; 447 if (result == -ETIMEDOUT) 448 gb->requests_timedout++; 449 } 450 451 gb->iteration_count++; 452 gb_loopback_calculate_stats(gb, result); 453 454 mutex_unlock(&gb->mutex); 455 456 dev_dbg(&gb->connection->bundle->dev, "complete operation %d\n", 457 operation->id); 458 459 /* Wake up waiters */ 460 atomic_dec(&op_async->gb->outstanding_operations); 461 wake_up(&gb->wq_completion); 462 463 /* Release resources */ 464 gb_operation_put(operation); 465 kfree(op_async); 466 } 467 468 static int gb_loopback_async_operation(struct gb_loopback *gb, int type, 469 void *request, int request_size, 470 int response_size, 471 void *completion) 472 { 473 struct gb_loopback_async_operation *op_async; 474 struct gb_operation *operation; 475 int ret; 476 477 op_async = kzalloc(sizeof(*op_async), GFP_KERNEL); 478 if (!op_async) 479 return -ENOMEM; 480 481 operation = gb_operation_create(gb->connection, type, request_size, 482 response_size, GFP_KERNEL); 483 if (!operation) { 484 kfree(op_async); 485 return -ENOMEM; 486 } 487 488 if (request_size) 489 memcpy(operation->request->payload, request, request_size); 490 491 gb_operation_set_data(operation, op_async); 492 493 op_async->gb = gb; 494 op_async->operation = operation; 495 op_async->completion = completion; 496 497 op_async->ts = ktime_get(); 498 499 atomic_inc(&gb->outstanding_operations); 500 ret = gb_operation_request_send(operation, 501 gb_loopback_async_operation_callback, 502 jiffies_to_msecs(gb->jiffy_timeout), 503 GFP_KERNEL); 504 if (ret) { 505 atomic_dec(&gb->outstanding_operations); 506 gb_operation_put(operation); 507 kfree(op_async); 508 } 509 return ret; 510 } 511 512 static int gb_loopback_sync_sink(struct gb_loopback *gb, u32 len) 513 { 514 struct gb_loopback_transfer_request *request; 515 int retval; 516 517 request = kmalloc(len + sizeof(*request), GFP_KERNEL); 518 if (!request) 519 return -ENOMEM; 520 521 request->len = cpu_to_le32(len); 522 retval = gb_loopback_operation_sync(gb, GB_LOOPBACK_TYPE_SINK, 523 request, len + sizeof(*request), 524 NULL, 0); 525 kfree(request); 526 return retval; 527 } 528 529 static int gb_loopback_sync_transfer(struct gb_loopback *gb, u32 len) 530 { 531 struct gb_loopback_transfer_request *request; 532 struct gb_loopback_transfer_response *response; 533 int retval; 534 535 gb->apbridge_latency_ts = 0; 536 gb->gbphy_latency_ts = 0; 537 538 request = kmalloc(len + sizeof(*request), GFP_KERNEL); 539 if (!request) 540 return -ENOMEM; 541 response = kmalloc(len + sizeof(*response), GFP_KERNEL); 542 if (!response) { 543 kfree(request); 544 return -ENOMEM; 545 } 546 547 memset(request->data, 0x5A, len); 548 549 request->len = cpu_to_le32(len); 550 retval = gb_loopback_operation_sync(gb, GB_LOOPBACK_TYPE_TRANSFER, 551 request, len + sizeof(*request), 552 response, len + sizeof(*response)); 553 if (retval) 554 goto gb_error; 555 556 if (memcmp(request->data, response->data, len)) { 557 dev_err(&gb->connection->bundle->dev, 558 "Loopback Data doesn't match\n"); 559 retval = -EREMOTEIO; 560 } 561 gb->apbridge_latency_ts = (u32)__le32_to_cpu(response->reserved0); 562 gb->gbphy_latency_ts = (u32)__le32_to_cpu(response->reserved1); 563 564 gb_error: 565 kfree(request); 566 kfree(response); 567 568 return retval; 569 } 570 571 static int gb_loopback_sync_ping(struct gb_loopback *gb) 572 { 573 return gb_loopback_operation_sync(gb, GB_LOOPBACK_TYPE_PING, 574 NULL, 0, NULL, 0); 575 } 576 577 static int gb_loopback_async_sink(struct gb_loopback *gb, u32 len) 578 { 579 struct gb_loopback_transfer_request *request; 580 int retval; 581 582 request = kmalloc(len + sizeof(*request), GFP_KERNEL); 583 if (!request) 584 return -ENOMEM; 585 586 request->len = cpu_to_le32(len); 587 retval = gb_loopback_async_operation(gb, GB_LOOPBACK_TYPE_SINK, 588 request, len + sizeof(*request), 589 0, NULL); 590 kfree(request); 591 return retval; 592 } 593 594 static int gb_loopback_async_transfer_complete( 595 struct gb_loopback_async_operation *op_async) 596 { 597 struct gb_loopback *gb; 598 struct gb_operation *operation; 599 struct gb_loopback_transfer_request *request; 600 struct gb_loopback_transfer_response *response; 601 size_t len; 602 int retval = 0; 603 604 gb = op_async->gb; 605 operation = op_async->operation; 606 request = operation->request->payload; 607 response = operation->response->payload; 608 len = le32_to_cpu(request->len); 609 610 if (memcmp(request->data, response->data, len)) { 611 dev_err(&gb->connection->bundle->dev, 612 "Loopback Data doesn't match operation id %d\n", 613 operation->id); 614 retval = -EREMOTEIO; 615 } else { 616 gb->apbridge_latency_ts = 617 (u32)__le32_to_cpu(response->reserved0); 618 gb->gbphy_latency_ts = 619 (u32)__le32_to_cpu(response->reserved1); 620 } 621 622 return retval; 623 } 624 625 static int gb_loopback_async_transfer(struct gb_loopback *gb, u32 len) 626 { 627 struct gb_loopback_transfer_request *request; 628 int retval, response_len; 629 630 request = kmalloc(len + sizeof(*request), GFP_KERNEL); 631 if (!request) 632 return -ENOMEM; 633 634 memset(request->data, 0x5A, len); 635 636 request->len = cpu_to_le32(len); 637 response_len = sizeof(struct gb_loopback_transfer_response); 638 retval = gb_loopback_async_operation(gb, GB_LOOPBACK_TYPE_TRANSFER, 639 request, len + sizeof(*request), 640 len + response_len, 641 gb_loopback_async_transfer_complete); 642 if (retval) 643 goto gb_error; 644 645 gb_error: 646 kfree(request); 647 return retval; 648 } 649 650 static int gb_loopback_async_ping(struct gb_loopback *gb) 651 { 652 return gb_loopback_async_operation(gb, GB_LOOPBACK_TYPE_PING, 653 NULL, 0, 0, NULL); 654 } 655 656 static int gb_loopback_request_handler(struct gb_operation *operation) 657 { 658 struct gb_connection *connection = operation->connection; 659 struct gb_loopback_transfer_request *request; 660 struct gb_loopback_transfer_response *response; 661 struct device *dev = &connection->bundle->dev; 662 size_t len; 663 664 /* By convention, the AP initiates the version operation */ 665 switch (operation->type) { 666 case GB_LOOPBACK_TYPE_PING: 667 case GB_LOOPBACK_TYPE_SINK: 668 return 0; 669 case GB_LOOPBACK_TYPE_TRANSFER: 670 if (operation->request->payload_size < sizeof(*request)) { 671 dev_err(dev, "transfer request too small (%zu < %zu)\n", 672 operation->request->payload_size, 673 sizeof(*request)); 674 return -EINVAL; /* -EMSGSIZE */ 675 } 676 request = operation->request->payload; 677 len = le32_to_cpu(request->len); 678 if (len > gb_dev.size_max) { 679 dev_err(dev, "transfer request too large (%zu > %zu)\n", 680 len, gb_dev.size_max); 681 return -EINVAL; 682 } 683 684 if (!gb_operation_response_alloc(operation, 685 len + sizeof(*response), GFP_KERNEL)) { 686 dev_err(dev, "error allocating response\n"); 687 return -ENOMEM; 688 } 689 response = operation->response->payload; 690 response->len = cpu_to_le32(len); 691 if (len) 692 memcpy(response->data, request->data, len); 693 694 return 0; 695 default: 696 dev_err(dev, "unsupported request: %u\n", operation->type); 697 return -EINVAL; 698 } 699 } 700 701 static void gb_loopback_reset_stats(struct gb_loopback *gb) 702 { 703 struct gb_loopback_stats reset = { 704 .min = U32_MAX, 705 }; 706 707 /* Reset per-connection stats */ 708 memcpy(&gb->latency, &reset, 709 sizeof(struct gb_loopback_stats)); 710 memcpy(&gb->throughput, &reset, 711 sizeof(struct gb_loopback_stats)); 712 memcpy(&gb->requests_per_second, &reset, 713 sizeof(struct gb_loopback_stats)); 714 memcpy(&gb->apbridge_unipro_latency, &reset, 715 sizeof(struct gb_loopback_stats)); 716 memcpy(&gb->gbphy_firmware_latency, &reset, 717 sizeof(struct gb_loopback_stats)); 718 719 /* Should be initialized at least once per transaction set */ 720 gb->apbridge_latency_ts = 0; 721 gb->gbphy_latency_ts = 0; 722 gb->ts = ktime_set(0, 0); 723 } 724 725 static void gb_loopback_update_stats(struct gb_loopback_stats *stats, u32 val) 726 { 727 if (stats->min > val) 728 stats->min = val; 729 if (stats->max < val) 730 stats->max = val; 731 stats->sum += val; 732 stats->count++; 733 } 734 735 static void gb_loopback_update_stats_window(struct gb_loopback_stats *stats, 736 u64 val, u32 count) 737 { 738 stats->sum += val; 739 stats->count += count; 740 741 do_div(val, count); 742 if (stats->min > val) 743 stats->min = val; 744 if (stats->max < val) 745 stats->max = val; 746 } 747 748 static void gb_loopback_requests_update(struct gb_loopback *gb, u32 latency) 749 { 750 u64 req = gb->requests_completed * USEC_PER_SEC; 751 752 gb_loopback_update_stats_window(&gb->requests_per_second, req, latency); 753 } 754 755 static void gb_loopback_throughput_update(struct gb_loopback *gb, u32 latency) 756 { 757 u64 aggregate_size = sizeof(struct gb_operation_msg_hdr) * 2; 758 759 switch (gb->type) { 760 case GB_LOOPBACK_TYPE_PING: 761 break; 762 case GB_LOOPBACK_TYPE_SINK: 763 aggregate_size += sizeof(struct gb_loopback_transfer_request) + 764 gb->size; 765 break; 766 case GB_LOOPBACK_TYPE_TRANSFER: 767 aggregate_size += sizeof(struct gb_loopback_transfer_request) + 768 sizeof(struct gb_loopback_transfer_response) + 769 gb->size * 2; 770 break; 771 default: 772 return; 773 } 774 775 aggregate_size *= gb->requests_completed; 776 aggregate_size *= USEC_PER_SEC; 777 gb_loopback_update_stats_window(&gb->throughput, aggregate_size, 778 latency); 779 } 780 781 static void gb_loopback_calculate_latency_stats(struct gb_loopback *gb) 782 { 783 u32 lat; 784 785 /* Express latency in terms of microseconds */ 786 lat = gb_loopback_nsec_to_usec_latency(gb->elapsed_nsecs); 787 788 /* Log latency stastic */ 789 gb_loopback_update_stats(&gb->latency, lat); 790 791 /* Raw latency log on a per thread basis */ 792 kfifo_in(&gb->kfifo_lat, (unsigned char *)&lat, sizeof(lat)); 793 794 /* Log the firmware supplied latency values */ 795 gb_loopback_update_stats(&gb->apbridge_unipro_latency, 796 gb->apbridge_latency_ts); 797 gb_loopback_update_stats(&gb->gbphy_firmware_latency, 798 gb->gbphy_latency_ts); 799 } 800 801 static void gb_loopback_calculate_stats(struct gb_loopback *gb, bool error) 802 { 803 u64 nlat; 804 u32 lat; 805 ktime_t te; 806 807 if (!error) { 808 gb->requests_completed++; 809 gb_loopback_calculate_latency_stats(gb); 810 } 811 812 te = ktime_get(); 813 nlat = gb_loopback_calc_latency(gb->ts, te); 814 if (nlat >= NSEC_PER_SEC || gb->iteration_count == gb->iteration_max) { 815 lat = gb_loopback_nsec_to_usec_latency(nlat); 816 817 gb_loopback_throughput_update(gb, lat); 818 gb_loopback_requests_update(gb, lat); 819 820 if (gb->iteration_count != gb->iteration_max) { 821 gb->ts = te; 822 gb->requests_completed = 0; 823 } 824 } 825 } 826 827 static void gb_loopback_async_wait_to_send(struct gb_loopback *gb) 828 { 829 if (!(gb->async && gb->outstanding_operations_max)) 830 return; 831 wait_event_interruptible(gb->wq_completion, 832 (atomic_read(&gb->outstanding_operations) < 833 gb->outstanding_operations_max) || 834 kthread_should_stop()); 835 } 836 837 static int gb_loopback_fn(void *data) 838 { 839 int error = 0; 840 int us_wait = 0; 841 int type; 842 int ret; 843 u32 size; 844 845 struct gb_loopback *gb = data; 846 struct gb_bundle *bundle = gb->connection->bundle; 847 848 ret = gb_pm_runtime_get_sync(bundle); 849 if (ret) 850 return ret; 851 852 while (1) { 853 if (!gb->type) { 854 gb_pm_runtime_put_autosuspend(bundle); 855 wait_event_interruptible(gb->wq, gb->type || 856 kthread_should_stop()); 857 ret = gb_pm_runtime_get_sync(bundle); 858 if (ret) 859 return ret; 860 } 861 862 if (kthread_should_stop()) 863 break; 864 865 /* Limit the maximum number of in-flight async operations */ 866 gb_loopback_async_wait_to_send(gb); 867 if (kthread_should_stop()) 868 break; 869 870 mutex_lock(&gb->mutex); 871 872 /* Optionally terminate */ 873 if (gb->send_count == gb->iteration_max) { 874 mutex_unlock(&gb->mutex); 875 876 /* Wait for synchronous and asynchronus completion */ 877 gb_loopback_async_wait_all(gb); 878 879 /* Mark complete unless user-space has poked us */ 880 mutex_lock(&gb->mutex); 881 if (gb->iteration_count == gb->iteration_max) { 882 gb->type = 0; 883 gb->send_count = 0; 884 sysfs_notify(&gb->dev->kobj, NULL, 885 "iteration_count"); 886 dev_dbg(&bundle->dev, "load test complete\n"); 887 } else { 888 dev_dbg(&bundle->dev, 889 "continuing on with new test set\n"); 890 } 891 mutex_unlock(&gb->mutex); 892 continue; 893 } 894 size = gb->size; 895 us_wait = gb->us_wait; 896 type = gb->type; 897 if (ktime_to_ns(gb->ts) == 0) 898 gb->ts = ktime_get(); 899 900 /* Else operations to perform */ 901 if (gb->async) { 902 if (type == GB_LOOPBACK_TYPE_PING) 903 error = gb_loopback_async_ping(gb); 904 else if (type == GB_LOOPBACK_TYPE_TRANSFER) 905 error = gb_loopback_async_transfer(gb, size); 906 else if (type == GB_LOOPBACK_TYPE_SINK) 907 error = gb_loopback_async_sink(gb, size); 908 909 if (error) { 910 gb->error++; 911 gb->iteration_count++; 912 } 913 } else { 914 /* We are effectively single threaded here */ 915 if (type == GB_LOOPBACK_TYPE_PING) 916 error = gb_loopback_sync_ping(gb); 917 else if (type == GB_LOOPBACK_TYPE_TRANSFER) 918 error = gb_loopback_sync_transfer(gb, size); 919 else if (type == GB_LOOPBACK_TYPE_SINK) 920 error = gb_loopback_sync_sink(gb, size); 921 922 if (error) 923 gb->error++; 924 gb->iteration_count++; 925 gb_loopback_calculate_stats(gb, !!error); 926 } 927 gb->send_count++; 928 mutex_unlock(&gb->mutex); 929 930 if (us_wait) { 931 if (us_wait < 20000) 932 usleep_range(us_wait, us_wait + 100); 933 else 934 msleep(us_wait / 1000); 935 } 936 } 937 938 gb_pm_runtime_put_autosuspend(bundle); 939 940 return 0; 941 } 942 943 static int gb_loopback_dbgfs_latency_show_common(struct seq_file *s, 944 struct kfifo *kfifo, 945 struct mutex *mutex) 946 { 947 u32 latency; 948 int retval; 949 950 if (kfifo_len(kfifo) == 0) { 951 retval = -EAGAIN; 952 goto done; 953 } 954 955 mutex_lock(mutex); 956 retval = kfifo_out(kfifo, &latency, sizeof(latency)); 957 if (retval > 0) { 958 seq_printf(s, "%u", latency); 959 retval = 0; 960 } 961 mutex_unlock(mutex); 962 done: 963 return retval; 964 } 965 966 static int gb_loopback_dbgfs_latency_show(struct seq_file *s, void *unused) 967 { 968 struct gb_loopback *gb = s->private; 969 970 return gb_loopback_dbgfs_latency_show_common(s, &gb->kfifo_lat, 971 &gb->mutex); 972 } 973 DEFINE_SHOW_ATTRIBUTE(gb_loopback_dbgfs_latency); 974 975 #define DEBUGFS_NAMELEN 32 976 977 static int gb_loopback_probe(struct gb_bundle *bundle, 978 const struct greybus_bundle_id *id) 979 { 980 struct greybus_descriptor_cport *cport_desc; 981 struct gb_connection *connection; 982 struct gb_loopback *gb; 983 struct device *dev; 984 int retval; 985 char name[DEBUGFS_NAMELEN]; 986 unsigned long flags; 987 988 if (bundle->num_cports != 1) 989 return -ENODEV; 990 991 cport_desc = &bundle->cport_desc[0]; 992 if (cport_desc->protocol_id != GREYBUS_PROTOCOL_LOOPBACK) 993 return -ENODEV; 994 995 gb = kzalloc(sizeof(*gb), GFP_KERNEL); 996 if (!gb) 997 return -ENOMEM; 998 999 connection = gb_connection_create(bundle, le16_to_cpu(cport_desc->id), 1000 gb_loopback_request_handler); 1001 if (IS_ERR(connection)) { 1002 retval = PTR_ERR(connection); 1003 goto out_kzalloc; 1004 } 1005 1006 gb->connection = connection; 1007 greybus_set_drvdata(bundle, gb); 1008 1009 init_waitqueue_head(&gb->wq); 1010 init_waitqueue_head(&gb->wq_completion); 1011 atomic_set(&gb->outstanding_operations, 0); 1012 gb_loopback_reset_stats(gb); 1013 1014 /* Reported values to user-space for min/max timeouts */ 1015 gb->timeout_min = jiffies_to_usecs(GB_LOOPBACK_TIMEOUT_MIN); 1016 gb->timeout_max = jiffies_to_usecs(GB_LOOPBACK_TIMEOUT_MAX); 1017 1018 if (!gb_dev.count) { 1019 /* Calculate maximum payload */ 1020 gb_dev.size_max = gb_operation_get_payload_size_max(connection); 1021 if (gb_dev.size_max <= 1022 sizeof(struct gb_loopback_transfer_request)) { 1023 retval = -EINVAL; 1024 goto out_connection_destroy; 1025 } 1026 gb_dev.size_max -= sizeof(struct gb_loopback_transfer_request); 1027 } 1028 1029 /* Create per-connection sysfs and debugfs data-points */ 1030 snprintf(name, sizeof(name), "raw_latency_%s", 1031 dev_name(&connection->bundle->dev)); 1032 gb->file = debugfs_create_file(name, S_IFREG | 0444, gb_dev.root, gb, 1033 &gb_loopback_dbgfs_latency_fops); 1034 1035 gb->id = ida_simple_get(&loopback_ida, 0, 0, GFP_KERNEL); 1036 if (gb->id < 0) { 1037 retval = gb->id; 1038 goto out_debugfs_remove; 1039 } 1040 1041 retval = gb_connection_enable(connection); 1042 if (retval) 1043 goto out_ida_remove; 1044 1045 dev = device_create_with_groups(&loopback_class, 1046 &connection->bundle->dev, 1047 MKDEV(0, 0), gb, loopback_groups, 1048 "gb_loopback%d", gb->id); 1049 if (IS_ERR(dev)) { 1050 retval = PTR_ERR(dev); 1051 goto out_connection_disable; 1052 } 1053 gb->dev = dev; 1054 1055 /* Allocate kfifo */ 1056 if (kfifo_alloc(&gb->kfifo_lat, kfifo_depth * sizeof(u32), 1057 GFP_KERNEL)) { 1058 retval = -ENOMEM; 1059 goto out_conn; 1060 } 1061 /* Fork worker thread */ 1062 mutex_init(&gb->mutex); 1063 gb->task = kthread_run(gb_loopback_fn, gb, "gb_loopback"); 1064 if (IS_ERR(gb->task)) { 1065 retval = PTR_ERR(gb->task); 1066 goto out_kfifo; 1067 } 1068 1069 spin_lock_irqsave(&gb_dev.lock, flags); 1070 gb_dev.count++; 1071 spin_unlock_irqrestore(&gb_dev.lock, flags); 1072 1073 gb_connection_latency_tag_enable(connection); 1074 1075 gb_pm_runtime_put_autosuspend(bundle); 1076 1077 return 0; 1078 1079 out_kfifo: 1080 kfifo_free(&gb->kfifo_lat); 1081 out_conn: 1082 device_unregister(dev); 1083 out_connection_disable: 1084 gb_connection_disable(connection); 1085 out_ida_remove: 1086 ida_simple_remove(&loopback_ida, gb->id); 1087 out_debugfs_remove: 1088 debugfs_remove(gb->file); 1089 out_connection_destroy: 1090 gb_connection_destroy(connection); 1091 out_kzalloc: 1092 kfree(gb); 1093 1094 return retval; 1095 } 1096 1097 static void gb_loopback_disconnect(struct gb_bundle *bundle) 1098 { 1099 struct gb_loopback *gb = greybus_get_drvdata(bundle); 1100 unsigned long flags; 1101 int ret; 1102 1103 ret = gb_pm_runtime_get_sync(bundle); 1104 if (ret) 1105 gb_pm_runtime_get_noresume(bundle); 1106 1107 gb_connection_disable(gb->connection); 1108 1109 if (!IS_ERR_OR_NULL(gb->task)) 1110 kthread_stop(gb->task); 1111 1112 kfifo_free(&gb->kfifo_lat); 1113 gb_connection_latency_tag_disable(gb->connection); 1114 debugfs_remove(gb->file); 1115 1116 /* 1117 * FIXME: gb_loopback_async_wait_all() is redundant now, as connection 1118 * is disabled at the beginning and so we can't have any more 1119 * incoming/outgoing requests. 1120 */ 1121 gb_loopback_async_wait_all(gb); 1122 1123 spin_lock_irqsave(&gb_dev.lock, flags); 1124 gb_dev.count--; 1125 spin_unlock_irqrestore(&gb_dev.lock, flags); 1126 1127 device_unregister(gb->dev); 1128 ida_simple_remove(&loopback_ida, gb->id); 1129 1130 gb_connection_destroy(gb->connection); 1131 kfree(gb); 1132 } 1133 1134 static const struct greybus_bundle_id gb_loopback_id_table[] = { 1135 { GREYBUS_DEVICE_CLASS(GREYBUS_CLASS_LOOPBACK) }, 1136 { } 1137 }; 1138 MODULE_DEVICE_TABLE(greybus, gb_loopback_id_table); 1139 1140 static struct greybus_driver gb_loopback_driver = { 1141 .name = "loopback", 1142 .probe = gb_loopback_probe, 1143 .disconnect = gb_loopback_disconnect, 1144 .id_table = gb_loopback_id_table, 1145 }; 1146 1147 static int loopback_init(void) 1148 { 1149 int retval; 1150 1151 spin_lock_init(&gb_dev.lock); 1152 gb_dev.root = debugfs_create_dir("gb_loopback", NULL); 1153 1154 retval = class_register(&loopback_class); 1155 if (retval) 1156 goto err; 1157 1158 retval = greybus_register(&gb_loopback_driver); 1159 if (retval) 1160 goto err_unregister; 1161 1162 return 0; 1163 1164 err_unregister: 1165 class_unregister(&loopback_class); 1166 err: 1167 debugfs_remove_recursive(gb_dev.root); 1168 return retval; 1169 } 1170 module_init(loopback_init); 1171 1172 static void __exit loopback_exit(void) 1173 { 1174 debugfs_remove_recursive(gb_dev.root); 1175 greybus_deregister(&gb_loopback_driver); 1176 class_unregister(&loopback_class); 1177 ida_destroy(&loopback_ida); 1178 } 1179 module_exit(loopback_exit); 1180 1181 MODULE_LICENSE("GPL v2"); 1182