1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Greybus "AP" USB driver for "ES2" controller chips 4 * 5 * Copyright 2014-2015 Google Inc. 6 * Copyright 2014-2015 Linaro Ltd. 7 */ 8 #include <linux/kthread.h> 9 #include <linux/sizes.h> 10 #include <linux/usb.h> 11 #include <linux/kfifo.h> 12 #include <linux/debugfs.h> 13 #include <linux/list.h> 14 #include <linux/greybus.h> 15 #include <asm/unaligned.h> 16 17 #include "arpc.h" 18 #include "greybus_trace.h" 19 20 21 /* Default timeout for USB vendor requests. */ 22 #define ES2_USB_CTRL_TIMEOUT 500 23 24 /* Default timeout for ARPC CPort requests */ 25 #define ES2_ARPC_CPORT_TIMEOUT 500 26 27 /* Fixed CPort numbers */ 28 #define ES2_CPORT_CDSI0 16 29 #define ES2_CPORT_CDSI1 17 30 31 /* Memory sizes for the buffers sent to/from the ES2 controller */ 32 #define ES2_GBUF_MSG_SIZE_MAX 2048 33 34 /* Memory sizes for the ARPC buffers */ 35 #define ARPC_OUT_SIZE_MAX U16_MAX 36 #define ARPC_IN_SIZE_MAX 128 37 38 static const struct usb_device_id id_table[] = { 39 { USB_DEVICE(0x18d1, 0x1eaf) }, 40 { }, 41 }; 42 MODULE_DEVICE_TABLE(usb, id_table); 43 44 #define APB1_LOG_SIZE SZ_16K 45 46 /* 47 * Number of CPort IN urbs in flight at any point in time. 48 * Adjust if we are having stalls in the USB buffer due to not enough urbs in 49 * flight. 50 */ 51 #define NUM_CPORT_IN_URB 4 52 53 /* Number of CPort OUT urbs in flight at any point in time. 54 * Adjust if we get messages saying we are out of urbs in the system log. 55 */ 56 #define NUM_CPORT_OUT_URB 8 57 58 /* 59 * Number of ARPC in urbs in flight at any point in time. 60 */ 61 #define NUM_ARPC_IN_URB 2 62 63 /* 64 * @endpoint: bulk in endpoint for CPort data 65 * @urb: array of urbs for the CPort in messages 66 * @buffer: array of buffers for the @cport_in_urb urbs 67 */ 68 struct es2_cport_in { 69 __u8 endpoint; 70 struct urb *urb[NUM_CPORT_IN_URB]; 71 u8 *buffer[NUM_CPORT_IN_URB]; 72 }; 73 74 /** 75 * es2_ap_dev - ES2 USB Bridge to AP structure 76 * @usb_dev: pointer to the USB device we are. 77 * @usb_intf: pointer to the USB interface we are bound to. 78 * @hd: pointer to our gb_host_device structure 79 80 * @cport_in: endpoint, urbs and buffer for cport in messages 81 * @cport_out_endpoint: endpoint for for cport out messages 82 * @cport_out_urb: array of urbs for the CPort out messages 83 * @cport_out_urb_busy: array of flags to see if the @cport_out_urb is busy or 84 * not. 85 * @cport_out_urb_cancelled: array of flags indicating whether the 86 * corresponding @cport_out_urb is being cancelled 87 * @cport_out_urb_lock: locks the @cport_out_urb_busy "list" 88 * 89 * @apb_log_task: task pointer for logging thread 90 * @apb_log_dentry: file system entry for the log file interface 91 * @apb_log_enable_dentry: file system entry for enabling logging 92 * @apb_log_fifo: kernel FIFO to carry logged data 93 * @arpc_urb: array of urbs for the ARPC in messages 94 * @arpc_buffer: array of buffers for the @arpc_urb urbs 95 * @arpc_endpoint_in: bulk in endpoint for APBridgeA RPC 96 * @arpc_id_cycle: gives an unique id to ARPC 97 * @arpc_lock: locks ARPC list 98 * @arpcs: list of in progress ARPCs 99 */ 100 struct es2_ap_dev { 101 struct usb_device *usb_dev; 102 struct usb_interface *usb_intf; 103 struct gb_host_device *hd; 104 105 struct es2_cport_in cport_in; 106 __u8 cport_out_endpoint; 107 struct urb *cport_out_urb[NUM_CPORT_OUT_URB]; 108 bool cport_out_urb_busy[NUM_CPORT_OUT_URB]; 109 bool cport_out_urb_cancelled[NUM_CPORT_OUT_URB]; 110 spinlock_t cport_out_urb_lock; 111 112 bool cdsi1_in_use; 113 114 struct task_struct *apb_log_task; 115 struct dentry *apb_log_dentry; 116 struct dentry *apb_log_enable_dentry; 117 DECLARE_KFIFO(apb_log_fifo, char, APB1_LOG_SIZE); 118 119 __u8 arpc_endpoint_in; 120 struct urb *arpc_urb[NUM_ARPC_IN_URB]; 121 u8 *arpc_buffer[NUM_ARPC_IN_URB]; 122 123 int arpc_id_cycle; 124 spinlock_t arpc_lock; 125 struct list_head arpcs; 126 }; 127 128 struct arpc { 129 struct list_head list; 130 struct arpc_request_message *req; 131 struct arpc_response_message *resp; 132 struct completion response_received; 133 bool active; 134 }; 135 136 static inline struct es2_ap_dev *hd_to_es2(struct gb_host_device *hd) 137 { 138 return (struct es2_ap_dev *)&hd->hd_priv; 139 } 140 141 static void cport_out_callback(struct urb *urb); 142 static void usb_log_enable(struct es2_ap_dev *es2); 143 static void usb_log_disable(struct es2_ap_dev *es2); 144 static int arpc_sync(struct es2_ap_dev *es2, u8 type, void *payload, 145 size_t size, int *result, unsigned int timeout); 146 147 static int output_sync(struct es2_ap_dev *es2, void *req, u16 size, u8 cmd) 148 { 149 struct usb_device *udev = es2->usb_dev; 150 u8 *data; 151 int retval; 152 153 data = kmemdup(req, size, GFP_KERNEL); 154 if (!data) 155 return -ENOMEM; 156 157 retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 158 cmd, 159 USB_DIR_OUT | USB_TYPE_VENDOR | 160 USB_RECIP_INTERFACE, 161 0, 0, data, size, ES2_USB_CTRL_TIMEOUT); 162 if (retval < 0) 163 dev_err(&udev->dev, "%s: return error %d\n", __func__, retval); 164 else 165 retval = 0; 166 167 kfree(data); 168 return retval; 169 } 170 171 static void ap_urb_complete(struct urb *urb) 172 { 173 struct usb_ctrlrequest *dr = urb->context; 174 175 kfree(dr); 176 usb_free_urb(urb); 177 } 178 179 static int output_async(struct es2_ap_dev *es2, void *req, u16 size, u8 cmd) 180 { 181 struct usb_device *udev = es2->usb_dev; 182 struct urb *urb; 183 struct usb_ctrlrequest *dr; 184 u8 *buf; 185 int retval; 186 187 urb = usb_alloc_urb(0, GFP_ATOMIC); 188 if (!urb) 189 return -ENOMEM; 190 191 dr = kmalloc(sizeof(*dr) + size, GFP_ATOMIC); 192 if (!dr) { 193 usb_free_urb(urb); 194 return -ENOMEM; 195 } 196 197 buf = (u8 *)dr + sizeof(*dr); 198 memcpy(buf, req, size); 199 200 dr->bRequest = cmd; 201 dr->bRequestType = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE; 202 dr->wValue = 0; 203 dr->wIndex = 0; 204 dr->wLength = cpu_to_le16(size); 205 206 usb_fill_control_urb(urb, udev, usb_sndctrlpipe(udev, 0), 207 (unsigned char *)dr, buf, size, 208 ap_urb_complete, dr); 209 retval = usb_submit_urb(urb, GFP_ATOMIC); 210 if (retval) { 211 usb_free_urb(urb); 212 kfree(dr); 213 } 214 return retval; 215 } 216 217 static int output(struct gb_host_device *hd, void *req, u16 size, u8 cmd, 218 bool async) 219 { 220 struct es2_ap_dev *es2 = hd_to_es2(hd); 221 222 if (async) 223 return output_async(es2, req, size, cmd); 224 225 return output_sync(es2, req, size, cmd); 226 } 227 228 static int es2_cport_in_enable(struct es2_ap_dev *es2, 229 struct es2_cport_in *cport_in) 230 { 231 struct urb *urb; 232 int ret; 233 int i; 234 235 for (i = 0; i < NUM_CPORT_IN_URB; ++i) { 236 urb = cport_in->urb[i]; 237 238 ret = usb_submit_urb(urb, GFP_KERNEL); 239 if (ret) { 240 dev_err(&es2->usb_dev->dev, 241 "failed to submit in-urb: %d\n", ret); 242 goto err_kill_urbs; 243 } 244 } 245 246 return 0; 247 248 err_kill_urbs: 249 for (--i; i >= 0; --i) { 250 urb = cport_in->urb[i]; 251 usb_kill_urb(urb); 252 } 253 254 return ret; 255 } 256 257 static void es2_cport_in_disable(struct es2_ap_dev *es2, 258 struct es2_cport_in *cport_in) 259 { 260 struct urb *urb; 261 int i; 262 263 for (i = 0; i < NUM_CPORT_IN_URB; ++i) { 264 urb = cport_in->urb[i]; 265 usb_kill_urb(urb); 266 } 267 } 268 269 static int es2_arpc_in_enable(struct es2_ap_dev *es2) 270 { 271 struct urb *urb; 272 int ret; 273 int i; 274 275 for (i = 0; i < NUM_ARPC_IN_URB; ++i) { 276 urb = es2->arpc_urb[i]; 277 278 ret = usb_submit_urb(urb, GFP_KERNEL); 279 if (ret) { 280 dev_err(&es2->usb_dev->dev, 281 "failed to submit arpc in-urb: %d\n", ret); 282 goto err_kill_urbs; 283 } 284 } 285 286 return 0; 287 288 err_kill_urbs: 289 for (--i; i >= 0; --i) { 290 urb = es2->arpc_urb[i]; 291 usb_kill_urb(urb); 292 } 293 294 return ret; 295 } 296 297 static void es2_arpc_in_disable(struct es2_ap_dev *es2) 298 { 299 struct urb *urb; 300 int i; 301 302 for (i = 0; i < NUM_ARPC_IN_URB; ++i) { 303 urb = es2->arpc_urb[i]; 304 usb_kill_urb(urb); 305 } 306 } 307 308 static struct urb *next_free_urb(struct es2_ap_dev *es2, gfp_t gfp_mask) 309 { 310 struct urb *urb = NULL; 311 unsigned long flags; 312 int i; 313 314 spin_lock_irqsave(&es2->cport_out_urb_lock, flags); 315 316 /* Look in our pool of allocated urbs first, as that's the "fastest" */ 317 for (i = 0; i < NUM_CPORT_OUT_URB; ++i) { 318 if (!es2->cport_out_urb_busy[i] && 319 !es2->cport_out_urb_cancelled[i]) { 320 es2->cport_out_urb_busy[i] = true; 321 urb = es2->cport_out_urb[i]; 322 break; 323 } 324 } 325 spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags); 326 if (urb) 327 return urb; 328 329 /* 330 * Crap, pool is empty, complain to the syslog and go allocate one 331 * dynamically as we have to succeed. 332 */ 333 dev_dbg(&es2->usb_dev->dev, 334 "No free CPort OUT urbs, having to dynamically allocate one!\n"); 335 return usb_alloc_urb(0, gfp_mask); 336 } 337 338 static void free_urb(struct es2_ap_dev *es2, struct urb *urb) 339 { 340 unsigned long flags; 341 int i; 342 /* 343 * See if this was an urb in our pool, if so mark it "free", otherwise 344 * we need to free it ourselves. 345 */ 346 spin_lock_irqsave(&es2->cport_out_urb_lock, flags); 347 for (i = 0; i < NUM_CPORT_OUT_URB; ++i) { 348 if (urb == es2->cport_out_urb[i]) { 349 es2->cport_out_urb_busy[i] = false; 350 urb = NULL; 351 break; 352 } 353 } 354 spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags); 355 356 /* If urb is not NULL, then we need to free this urb */ 357 usb_free_urb(urb); 358 } 359 360 /* 361 * We (ab)use the operation-message header pad bytes to transfer the 362 * cport id in order to minimise overhead. 363 */ 364 static void 365 gb_message_cport_pack(struct gb_operation_msg_hdr *header, u16 cport_id) 366 { 367 header->pad[0] = cport_id; 368 } 369 370 /* Clear the pad bytes used for the CPort id */ 371 static void gb_message_cport_clear(struct gb_operation_msg_hdr *header) 372 { 373 header->pad[0] = 0; 374 } 375 376 /* Extract the CPort id packed into the header, and clear it */ 377 static u16 gb_message_cport_unpack(struct gb_operation_msg_hdr *header) 378 { 379 u16 cport_id = header->pad[0]; 380 381 gb_message_cport_clear(header); 382 383 return cport_id; 384 } 385 386 /* 387 * Returns zero if the message was successfully queued, or a negative errno 388 * otherwise. 389 */ 390 static int message_send(struct gb_host_device *hd, u16 cport_id, 391 struct gb_message *message, gfp_t gfp_mask) 392 { 393 struct es2_ap_dev *es2 = hd_to_es2(hd); 394 struct usb_device *udev = es2->usb_dev; 395 size_t buffer_size; 396 int retval; 397 struct urb *urb; 398 unsigned long flags; 399 400 /* 401 * The data actually transferred will include an indication 402 * of where the data should be sent. Do one last check of 403 * the target CPort id before filling it in. 404 */ 405 if (!cport_id_valid(hd, cport_id)) { 406 dev_err(&udev->dev, "invalid cport %u\n", cport_id); 407 return -EINVAL; 408 } 409 410 /* Find a free urb */ 411 urb = next_free_urb(es2, gfp_mask); 412 if (!urb) 413 return -ENOMEM; 414 415 spin_lock_irqsave(&es2->cport_out_urb_lock, flags); 416 message->hcpriv = urb; 417 spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags); 418 419 /* Pack the cport id into the message header */ 420 gb_message_cport_pack(message->header, cport_id); 421 422 buffer_size = sizeof(*message->header) + message->payload_size; 423 424 usb_fill_bulk_urb(urb, udev, 425 usb_sndbulkpipe(udev, 426 es2->cport_out_endpoint), 427 message->buffer, buffer_size, 428 cport_out_callback, message); 429 urb->transfer_flags |= URB_ZERO_PACKET; 430 431 trace_gb_message_submit(message); 432 433 retval = usb_submit_urb(urb, gfp_mask); 434 if (retval) { 435 dev_err(&udev->dev, "failed to submit out-urb: %d\n", retval); 436 437 spin_lock_irqsave(&es2->cport_out_urb_lock, flags); 438 message->hcpriv = NULL; 439 spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags); 440 441 free_urb(es2, urb); 442 gb_message_cport_clear(message->header); 443 444 return retval; 445 } 446 447 return 0; 448 } 449 450 /* 451 * Can not be called in atomic context. 452 */ 453 static void message_cancel(struct gb_message *message) 454 { 455 struct gb_host_device *hd = message->operation->connection->hd; 456 struct es2_ap_dev *es2 = hd_to_es2(hd); 457 struct urb *urb; 458 int i; 459 460 might_sleep(); 461 462 spin_lock_irq(&es2->cport_out_urb_lock); 463 urb = message->hcpriv; 464 465 /* Prevent dynamically allocated urb from being deallocated. */ 466 usb_get_urb(urb); 467 468 /* Prevent pre-allocated urb from being reused. */ 469 for (i = 0; i < NUM_CPORT_OUT_URB; ++i) { 470 if (urb == es2->cport_out_urb[i]) { 471 es2->cport_out_urb_cancelled[i] = true; 472 break; 473 } 474 } 475 spin_unlock_irq(&es2->cport_out_urb_lock); 476 477 usb_kill_urb(urb); 478 479 if (i < NUM_CPORT_OUT_URB) { 480 spin_lock_irq(&es2->cport_out_urb_lock); 481 es2->cport_out_urb_cancelled[i] = false; 482 spin_unlock_irq(&es2->cport_out_urb_lock); 483 } 484 485 usb_free_urb(urb); 486 } 487 488 static int es2_cport_allocate(struct gb_host_device *hd, int cport_id, 489 unsigned long flags) 490 { 491 struct es2_ap_dev *es2 = hd_to_es2(hd); 492 struct ida *id_map = &hd->cport_id_map; 493 int ida_start, ida_end; 494 495 switch (cport_id) { 496 case ES2_CPORT_CDSI0: 497 case ES2_CPORT_CDSI1: 498 dev_err(&hd->dev, "cport %d not available\n", cport_id); 499 return -EBUSY; 500 } 501 502 if (flags & GB_CONNECTION_FLAG_OFFLOADED && 503 flags & GB_CONNECTION_FLAG_CDSI1) { 504 if (es2->cdsi1_in_use) { 505 dev_err(&hd->dev, "CDSI1 already in use\n"); 506 return -EBUSY; 507 } 508 509 es2->cdsi1_in_use = true; 510 511 return ES2_CPORT_CDSI1; 512 } 513 514 if (cport_id < 0) { 515 ida_start = 0; 516 ida_end = hd->num_cports; 517 } else if (cport_id < hd->num_cports) { 518 ida_start = cport_id; 519 ida_end = cport_id + 1; 520 } else { 521 dev_err(&hd->dev, "cport %d not available\n", cport_id); 522 return -EINVAL; 523 } 524 525 return ida_simple_get(id_map, ida_start, ida_end, GFP_KERNEL); 526 } 527 528 static void es2_cport_release(struct gb_host_device *hd, u16 cport_id) 529 { 530 struct es2_ap_dev *es2 = hd_to_es2(hd); 531 532 switch (cport_id) { 533 case ES2_CPORT_CDSI1: 534 es2->cdsi1_in_use = false; 535 return; 536 } 537 538 ida_simple_remove(&hd->cport_id_map, cport_id); 539 } 540 541 static int cport_enable(struct gb_host_device *hd, u16 cport_id, 542 unsigned long flags) 543 { 544 struct es2_ap_dev *es2 = hd_to_es2(hd); 545 struct usb_device *udev = es2->usb_dev; 546 struct gb_apb_request_cport_flags *req; 547 u32 connection_flags; 548 int ret; 549 550 req = kzalloc(sizeof(*req), GFP_KERNEL); 551 if (!req) 552 return -ENOMEM; 553 554 connection_flags = 0; 555 if (flags & GB_CONNECTION_FLAG_CONTROL) 556 connection_flags |= GB_APB_CPORT_FLAG_CONTROL; 557 if (flags & GB_CONNECTION_FLAG_HIGH_PRIO) 558 connection_flags |= GB_APB_CPORT_FLAG_HIGH_PRIO; 559 560 req->flags = cpu_to_le32(connection_flags); 561 562 dev_dbg(&hd->dev, "%s - cport = %u, flags = %02x\n", __func__, 563 cport_id, connection_flags); 564 565 ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 566 GB_APB_REQUEST_CPORT_FLAGS, 567 USB_DIR_OUT | USB_TYPE_VENDOR | 568 USB_RECIP_INTERFACE, cport_id, 0, 569 req, sizeof(*req), ES2_USB_CTRL_TIMEOUT); 570 if (ret != sizeof(*req)) { 571 dev_err(&udev->dev, "failed to set cport flags for port %d\n", 572 cport_id); 573 if (ret >= 0) 574 ret = -EIO; 575 576 goto out; 577 } 578 579 ret = 0; 580 out: 581 kfree(req); 582 583 return ret; 584 } 585 586 static int es2_cport_connected(struct gb_host_device *hd, u16 cport_id) 587 { 588 struct es2_ap_dev *es2 = hd_to_es2(hd); 589 struct device *dev = &es2->usb_dev->dev; 590 struct arpc_cport_connected_req req; 591 int ret; 592 593 req.cport_id = cpu_to_le16(cport_id); 594 ret = arpc_sync(es2, ARPC_TYPE_CPORT_CONNECTED, &req, sizeof(req), 595 NULL, ES2_ARPC_CPORT_TIMEOUT); 596 if (ret) { 597 dev_err(dev, "failed to set connected state for cport %u: %d\n", 598 cport_id, ret); 599 return ret; 600 } 601 602 return 0; 603 } 604 605 static int es2_cport_flush(struct gb_host_device *hd, u16 cport_id) 606 { 607 struct es2_ap_dev *es2 = hd_to_es2(hd); 608 struct device *dev = &es2->usb_dev->dev; 609 struct arpc_cport_flush_req req; 610 int ret; 611 612 req.cport_id = cpu_to_le16(cport_id); 613 ret = arpc_sync(es2, ARPC_TYPE_CPORT_FLUSH, &req, sizeof(req), 614 NULL, ES2_ARPC_CPORT_TIMEOUT); 615 if (ret) { 616 dev_err(dev, "failed to flush cport %u: %d\n", cport_id, ret); 617 return ret; 618 } 619 620 return 0; 621 } 622 623 static int es2_cport_shutdown(struct gb_host_device *hd, u16 cport_id, 624 u8 phase, unsigned int timeout) 625 { 626 struct es2_ap_dev *es2 = hd_to_es2(hd); 627 struct device *dev = &es2->usb_dev->dev; 628 struct arpc_cport_shutdown_req req; 629 int result; 630 int ret; 631 632 if (timeout > U16_MAX) 633 return -EINVAL; 634 635 req.cport_id = cpu_to_le16(cport_id); 636 req.timeout = cpu_to_le16(timeout); 637 req.phase = phase; 638 ret = arpc_sync(es2, ARPC_TYPE_CPORT_SHUTDOWN, &req, sizeof(req), 639 &result, ES2_ARPC_CPORT_TIMEOUT + timeout); 640 if (ret) { 641 dev_err(dev, "failed to send shutdown over cport %u: %d (%d)\n", 642 cport_id, ret, result); 643 return ret; 644 } 645 646 return 0; 647 } 648 649 static int es2_cport_quiesce(struct gb_host_device *hd, u16 cport_id, 650 size_t peer_space, unsigned int timeout) 651 { 652 struct es2_ap_dev *es2 = hd_to_es2(hd); 653 struct device *dev = &es2->usb_dev->dev; 654 struct arpc_cport_quiesce_req req; 655 int result; 656 int ret; 657 658 if (peer_space > U16_MAX) 659 return -EINVAL; 660 661 if (timeout > U16_MAX) 662 return -EINVAL; 663 664 req.cport_id = cpu_to_le16(cport_id); 665 req.peer_space = cpu_to_le16(peer_space); 666 req.timeout = cpu_to_le16(timeout); 667 ret = arpc_sync(es2, ARPC_TYPE_CPORT_QUIESCE, &req, sizeof(req), 668 &result, ES2_ARPC_CPORT_TIMEOUT + timeout); 669 if (ret) { 670 dev_err(dev, "failed to quiesce cport %u: %d (%d)\n", 671 cport_id, ret, result); 672 return ret; 673 } 674 675 return 0; 676 } 677 678 static int es2_cport_clear(struct gb_host_device *hd, u16 cport_id) 679 { 680 struct es2_ap_dev *es2 = hd_to_es2(hd); 681 struct device *dev = &es2->usb_dev->dev; 682 struct arpc_cport_clear_req req; 683 int ret; 684 685 req.cport_id = cpu_to_le16(cport_id); 686 ret = arpc_sync(es2, ARPC_TYPE_CPORT_CLEAR, &req, sizeof(req), 687 NULL, ES2_ARPC_CPORT_TIMEOUT); 688 if (ret) { 689 dev_err(dev, "failed to clear cport %u: %d\n", cport_id, ret); 690 return ret; 691 } 692 693 return 0; 694 } 695 696 static int latency_tag_enable(struct gb_host_device *hd, u16 cport_id) 697 { 698 int retval; 699 struct es2_ap_dev *es2 = hd_to_es2(hd); 700 struct usb_device *udev = es2->usb_dev; 701 702 retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 703 GB_APB_REQUEST_LATENCY_TAG_EN, 704 USB_DIR_OUT | USB_TYPE_VENDOR | 705 USB_RECIP_INTERFACE, cport_id, 0, NULL, 706 0, ES2_USB_CTRL_TIMEOUT); 707 708 if (retval < 0) 709 dev_err(&udev->dev, "Cannot enable latency tag for cport %d\n", 710 cport_id); 711 return retval; 712 } 713 714 static int latency_tag_disable(struct gb_host_device *hd, u16 cport_id) 715 { 716 int retval; 717 struct es2_ap_dev *es2 = hd_to_es2(hd); 718 struct usb_device *udev = es2->usb_dev; 719 720 retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 721 GB_APB_REQUEST_LATENCY_TAG_DIS, 722 USB_DIR_OUT | USB_TYPE_VENDOR | 723 USB_RECIP_INTERFACE, cport_id, 0, NULL, 724 0, ES2_USB_CTRL_TIMEOUT); 725 726 if (retval < 0) 727 dev_err(&udev->dev, "Cannot disable latency tag for cport %d\n", 728 cport_id); 729 return retval; 730 } 731 732 static struct gb_hd_driver es2_driver = { 733 .hd_priv_size = sizeof(struct es2_ap_dev), 734 .message_send = message_send, 735 .message_cancel = message_cancel, 736 .cport_allocate = es2_cport_allocate, 737 .cport_release = es2_cport_release, 738 .cport_enable = cport_enable, 739 .cport_connected = es2_cport_connected, 740 .cport_flush = es2_cport_flush, 741 .cport_shutdown = es2_cport_shutdown, 742 .cport_quiesce = es2_cport_quiesce, 743 .cport_clear = es2_cport_clear, 744 .latency_tag_enable = latency_tag_enable, 745 .latency_tag_disable = latency_tag_disable, 746 .output = output, 747 }; 748 749 /* Common function to report consistent warnings based on URB status */ 750 static int check_urb_status(struct urb *urb) 751 { 752 struct device *dev = &urb->dev->dev; 753 int status = urb->status; 754 755 switch (status) { 756 case 0: 757 return 0; 758 759 case -EOVERFLOW: 760 dev_err(dev, "%s: overflow actual length is %d\n", 761 __func__, urb->actual_length); 762 fallthrough; 763 case -ECONNRESET: 764 case -ENOENT: 765 case -ESHUTDOWN: 766 case -EILSEQ: 767 case -EPROTO: 768 /* device is gone, stop sending */ 769 return status; 770 } 771 dev_err(dev, "%s: unknown status %d\n", __func__, status); 772 773 return -EAGAIN; 774 } 775 776 static void es2_destroy(struct es2_ap_dev *es2) 777 { 778 struct usb_device *udev; 779 struct urb *urb; 780 int i; 781 782 debugfs_remove(es2->apb_log_enable_dentry); 783 usb_log_disable(es2); 784 785 /* Tear down everything! */ 786 for (i = 0; i < NUM_CPORT_OUT_URB; ++i) { 787 urb = es2->cport_out_urb[i]; 788 usb_kill_urb(urb); 789 usb_free_urb(urb); 790 es2->cport_out_urb[i] = NULL; 791 es2->cport_out_urb_busy[i] = false; /* just to be anal */ 792 } 793 794 for (i = 0; i < NUM_ARPC_IN_URB; ++i) { 795 usb_free_urb(es2->arpc_urb[i]); 796 kfree(es2->arpc_buffer[i]); 797 es2->arpc_buffer[i] = NULL; 798 } 799 800 for (i = 0; i < NUM_CPORT_IN_URB; ++i) { 801 usb_free_urb(es2->cport_in.urb[i]); 802 kfree(es2->cport_in.buffer[i]); 803 es2->cport_in.buffer[i] = NULL; 804 } 805 806 /* release reserved CDSI0 and CDSI1 cports */ 807 gb_hd_cport_release_reserved(es2->hd, ES2_CPORT_CDSI1); 808 gb_hd_cport_release_reserved(es2->hd, ES2_CPORT_CDSI0); 809 810 udev = es2->usb_dev; 811 gb_hd_put(es2->hd); 812 813 usb_put_dev(udev); 814 } 815 816 static void cport_in_callback(struct urb *urb) 817 { 818 struct gb_host_device *hd = urb->context; 819 struct device *dev = &urb->dev->dev; 820 struct gb_operation_msg_hdr *header; 821 int status = check_urb_status(urb); 822 int retval; 823 u16 cport_id; 824 825 if (status) { 826 if ((status == -EAGAIN) || (status == -EPROTO)) 827 goto exit; 828 829 /* The urb is being unlinked */ 830 if (status == -ENOENT || status == -ESHUTDOWN) 831 return; 832 833 dev_err(dev, "urb cport in error %d (dropped)\n", status); 834 return; 835 } 836 837 if (urb->actual_length < sizeof(*header)) { 838 dev_err(dev, "short message received\n"); 839 goto exit; 840 } 841 842 /* Extract the CPort id, which is packed in the message header */ 843 header = urb->transfer_buffer; 844 cport_id = gb_message_cport_unpack(header); 845 846 if (cport_id_valid(hd, cport_id)) { 847 greybus_data_rcvd(hd, cport_id, urb->transfer_buffer, 848 urb->actual_length); 849 } else { 850 dev_err(dev, "invalid cport id %u received\n", cport_id); 851 } 852 exit: 853 /* put our urb back in the request pool */ 854 retval = usb_submit_urb(urb, GFP_ATOMIC); 855 if (retval) 856 dev_err(dev, "failed to resubmit in-urb: %d\n", retval); 857 } 858 859 static void cport_out_callback(struct urb *urb) 860 { 861 struct gb_message *message = urb->context; 862 struct gb_host_device *hd = message->operation->connection->hd; 863 struct es2_ap_dev *es2 = hd_to_es2(hd); 864 int status = check_urb_status(urb); 865 unsigned long flags; 866 867 gb_message_cport_clear(message->header); 868 869 spin_lock_irqsave(&es2->cport_out_urb_lock, flags); 870 message->hcpriv = NULL; 871 spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags); 872 873 /* 874 * Tell the submitter that the message send (attempt) is 875 * complete, and report the status. 876 */ 877 greybus_message_sent(hd, message, status); 878 879 free_urb(es2, urb); 880 } 881 882 static struct arpc *arpc_alloc(void *payload, u16 size, u8 type) 883 { 884 struct arpc *rpc; 885 886 if (size + sizeof(*rpc->req) > ARPC_OUT_SIZE_MAX) 887 return NULL; 888 889 rpc = kzalloc(sizeof(*rpc), GFP_KERNEL); 890 if (!rpc) 891 return NULL; 892 893 INIT_LIST_HEAD(&rpc->list); 894 rpc->req = kzalloc(sizeof(*rpc->req) + size, GFP_KERNEL); 895 if (!rpc->req) 896 goto err_free_rpc; 897 898 rpc->resp = kzalloc(sizeof(*rpc->resp), GFP_KERNEL); 899 if (!rpc->resp) 900 goto err_free_req; 901 902 rpc->req->type = type; 903 rpc->req->size = cpu_to_le16(sizeof(*rpc->req) + size); 904 memcpy(rpc->req->data, payload, size); 905 906 init_completion(&rpc->response_received); 907 908 return rpc; 909 910 err_free_req: 911 kfree(rpc->req); 912 err_free_rpc: 913 kfree(rpc); 914 915 return NULL; 916 } 917 918 static void arpc_free(struct arpc *rpc) 919 { 920 kfree(rpc->req); 921 kfree(rpc->resp); 922 kfree(rpc); 923 } 924 925 static struct arpc *arpc_find(struct es2_ap_dev *es2, __le16 id) 926 { 927 struct arpc *rpc; 928 929 list_for_each_entry(rpc, &es2->arpcs, list) { 930 if (rpc->req->id == id) 931 return rpc; 932 } 933 934 return NULL; 935 } 936 937 static void arpc_add(struct es2_ap_dev *es2, struct arpc *rpc) 938 { 939 rpc->active = true; 940 rpc->req->id = cpu_to_le16(es2->arpc_id_cycle++); 941 list_add_tail(&rpc->list, &es2->arpcs); 942 } 943 944 static void arpc_del(struct es2_ap_dev *es2, struct arpc *rpc) 945 { 946 if (rpc->active) { 947 rpc->active = false; 948 list_del(&rpc->list); 949 } 950 } 951 952 static int arpc_send(struct es2_ap_dev *es2, struct arpc *rpc, int timeout) 953 { 954 struct usb_device *udev = es2->usb_dev; 955 int retval; 956 957 retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 958 GB_APB_REQUEST_ARPC_RUN, 959 USB_DIR_OUT | USB_TYPE_VENDOR | 960 USB_RECIP_INTERFACE, 961 0, 0, 962 rpc->req, le16_to_cpu(rpc->req->size), 963 ES2_USB_CTRL_TIMEOUT); 964 if (retval != le16_to_cpu(rpc->req->size)) { 965 dev_err(&udev->dev, 966 "failed to send ARPC request %d: %d\n", 967 rpc->req->type, retval); 968 if (retval > 0) 969 retval = -EIO; 970 return retval; 971 } 972 973 return 0; 974 } 975 976 static int arpc_sync(struct es2_ap_dev *es2, u8 type, void *payload, 977 size_t size, int *result, unsigned int timeout) 978 { 979 struct arpc *rpc; 980 unsigned long flags; 981 int retval; 982 983 if (result) 984 *result = 0; 985 986 rpc = arpc_alloc(payload, size, type); 987 if (!rpc) 988 return -ENOMEM; 989 990 spin_lock_irqsave(&es2->arpc_lock, flags); 991 arpc_add(es2, rpc); 992 spin_unlock_irqrestore(&es2->arpc_lock, flags); 993 994 retval = arpc_send(es2, rpc, timeout); 995 if (retval) 996 goto out_arpc_del; 997 998 retval = wait_for_completion_interruptible_timeout( 999 &rpc->response_received, 1000 msecs_to_jiffies(timeout)); 1001 if (retval <= 0) { 1002 if (!retval) 1003 retval = -ETIMEDOUT; 1004 goto out_arpc_del; 1005 } 1006 1007 if (rpc->resp->result) { 1008 retval = -EREMOTEIO; 1009 if (result) 1010 *result = rpc->resp->result; 1011 } else { 1012 retval = 0; 1013 } 1014 1015 out_arpc_del: 1016 spin_lock_irqsave(&es2->arpc_lock, flags); 1017 arpc_del(es2, rpc); 1018 spin_unlock_irqrestore(&es2->arpc_lock, flags); 1019 arpc_free(rpc); 1020 1021 if (retval < 0 && retval != -EREMOTEIO) { 1022 dev_err(&es2->usb_dev->dev, 1023 "failed to execute ARPC: %d\n", retval); 1024 } 1025 1026 return retval; 1027 } 1028 1029 static void arpc_in_callback(struct urb *urb) 1030 { 1031 struct es2_ap_dev *es2 = urb->context; 1032 struct device *dev = &urb->dev->dev; 1033 int status = check_urb_status(urb); 1034 struct arpc *rpc; 1035 struct arpc_response_message *resp; 1036 unsigned long flags; 1037 int retval; 1038 1039 if (status) { 1040 if ((status == -EAGAIN) || (status == -EPROTO)) 1041 goto exit; 1042 1043 /* The urb is being unlinked */ 1044 if (status == -ENOENT || status == -ESHUTDOWN) 1045 return; 1046 1047 dev_err(dev, "arpc in-urb error %d (dropped)\n", status); 1048 return; 1049 } 1050 1051 if (urb->actual_length < sizeof(*resp)) { 1052 dev_err(dev, "short aprc response received\n"); 1053 goto exit; 1054 } 1055 1056 resp = urb->transfer_buffer; 1057 spin_lock_irqsave(&es2->arpc_lock, flags); 1058 rpc = arpc_find(es2, resp->id); 1059 if (!rpc) { 1060 dev_err(dev, "invalid arpc response id received: %u\n", 1061 le16_to_cpu(resp->id)); 1062 spin_unlock_irqrestore(&es2->arpc_lock, flags); 1063 goto exit; 1064 } 1065 1066 arpc_del(es2, rpc); 1067 memcpy(rpc->resp, resp, sizeof(*resp)); 1068 complete(&rpc->response_received); 1069 spin_unlock_irqrestore(&es2->arpc_lock, flags); 1070 1071 exit: 1072 /* put our urb back in the request pool */ 1073 retval = usb_submit_urb(urb, GFP_ATOMIC); 1074 if (retval) 1075 dev_err(dev, "failed to resubmit arpc in-urb: %d\n", retval); 1076 } 1077 1078 #define APB1_LOG_MSG_SIZE 64 1079 static void apb_log_get(struct es2_ap_dev *es2, char *buf) 1080 { 1081 int retval; 1082 1083 do { 1084 retval = usb_control_msg(es2->usb_dev, 1085 usb_rcvctrlpipe(es2->usb_dev, 0), 1086 GB_APB_REQUEST_LOG, 1087 USB_DIR_IN | USB_TYPE_VENDOR | 1088 USB_RECIP_INTERFACE, 1089 0x00, 0x00, 1090 buf, 1091 APB1_LOG_MSG_SIZE, 1092 ES2_USB_CTRL_TIMEOUT); 1093 if (retval > 0) 1094 kfifo_in(&es2->apb_log_fifo, buf, retval); 1095 } while (retval > 0); 1096 } 1097 1098 static int apb_log_poll(void *data) 1099 { 1100 struct es2_ap_dev *es2 = data; 1101 char *buf; 1102 1103 buf = kmalloc(APB1_LOG_MSG_SIZE, GFP_KERNEL); 1104 if (!buf) 1105 return -ENOMEM; 1106 1107 while (!kthread_should_stop()) { 1108 msleep(1000); 1109 apb_log_get(es2, buf); 1110 } 1111 1112 kfree(buf); 1113 1114 return 0; 1115 } 1116 1117 static ssize_t apb_log_read(struct file *f, char __user *buf, 1118 size_t count, loff_t *ppos) 1119 { 1120 struct es2_ap_dev *es2 = file_inode(f)->i_private; 1121 ssize_t ret; 1122 size_t copied; 1123 char *tmp_buf; 1124 1125 if (count > APB1_LOG_SIZE) 1126 count = APB1_LOG_SIZE; 1127 1128 tmp_buf = kmalloc(count, GFP_KERNEL); 1129 if (!tmp_buf) 1130 return -ENOMEM; 1131 1132 copied = kfifo_out(&es2->apb_log_fifo, tmp_buf, count); 1133 ret = simple_read_from_buffer(buf, count, ppos, tmp_buf, copied); 1134 1135 kfree(tmp_buf); 1136 1137 return ret; 1138 } 1139 1140 static const struct file_operations apb_log_fops = { 1141 .read = apb_log_read, 1142 }; 1143 1144 static void usb_log_enable(struct es2_ap_dev *es2) 1145 { 1146 if (!IS_ERR_OR_NULL(es2->apb_log_task)) 1147 return; 1148 1149 /* get log from APB1 */ 1150 es2->apb_log_task = kthread_run(apb_log_poll, es2, "apb_log"); 1151 if (IS_ERR(es2->apb_log_task)) 1152 return; 1153 /* XXX We will need to rename this per APB */ 1154 es2->apb_log_dentry = debugfs_create_file("apb_log", 0444, 1155 gb_debugfs_get(), es2, 1156 &apb_log_fops); 1157 } 1158 1159 static void usb_log_disable(struct es2_ap_dev *es2) 1160 { 1161 if (IS_ERR_OR_NULL(es2->apb_log_task)) 1162 return; 1163 1164 debugfs_remove(es2->apb_log_dentry); 1165 es2->apb_log_dentry = NULL; 1166 1167 kthread_stop(es2->apb_log_task); 1168 es2->apb_log_task = NULL; 1169 } 1170 1171 static ssize_t apb_log_enable_read(struct file *f, char __user *buf, 1172 size_t count, loff_t *ppos) 1173 { 1174 struct es2_ap_dev *es2 = file_inode(f)->i_private; 1175 int enable = !IS_ERR_OR_NULL(es2->apb_log_task); 1176 char tmp_buf[3]; 1177 1178 sprintf(tmp_buf, "%d\n", enable); 1179 return simple_read_from_buffer(buf, count, ppos, tmp_buf, 3); 1180 } 1181 1182 static ssize_t apb_log_enable_write(struct file *f, const char __user *buf, 1183 size_t count, loff_t *ppos) 1184 { 1185 int enable; 1186 ssize_t retval; 1187 struct es2_ap_dev *es2 = file_inode(f)->i_private; 1188 1189 retval = kstrtoint_from_user(buf, count, 10, &enable); 1190 if (retval) 1191 return retval; 1192 1193 if (enable) 1194 usb_log_enable(es2); 1195 else 1196 usb_log_disable(es2); 1197 1198 return count; 1199 } 1200 1201 static const struct file_operations apb_log_enable_fops = { 1202 .read = apb_log_enable_read, 1203 .write = apb_log_enable_write, 1204 }; 1205 1206 static int apb_get_cport_count(struct usb_device *udev) 1207 { 1208 int retval; 1209 __le16 *cport_count; 1210 1211 cport_count = kzalloc(sizeof(*cport_count), GFP_KERNEL); 1212 if (!cport_count) 1213 return -ENOMEM; 1214 1215 retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 1216 GB_APB_REQUEST_CPORT_COUNT, 1217 USB_DIR_IN | USB_TYPE_VENDOR | 1218 USB_RECIP_INTERFACE, 0, 0, cport_count, 1219 sizeof(*cport_count), ES2_USB_CTRL_TIMEOUT); 1220 if (retval != sizeof(*cport_count)) { 1221 dev_err(&udev->dev, "Cannot retrieve CPort count: %d\n", 1222 retval); 1223 1224 if (retval >= 0) 1225 retval = -EIO; 1226 1227 goto out; 1228 } 1229 1230 retval = le16_to_cpu(*cport_count); 1231 1232 /* We need to fit a CPort ID in one byte of a message header */ 1233 if (retval > U8_MAX) { 1234 retval = U8_MAX; 1235 dev_warn(&udev->dev, "Limiting number of CPorts to U8_MAX\n"); 1236 } 1237 1238 out: 1239 kfree(cport_count); 1240 return retval; 1241 } 1242 1243 /* 1244 * The ES2 USB Bridge device has 15 endpoints 1245 * 1 Control - usual USB stuff + AP -> APBridgeA messages 1246 * 7 Bulk IN - CPort data in 1247 * 7 Bulk OUT - CPort data out 1248 */ 1249 static int ap_probe(struct usb_interface *interface, 1250 const struct usb_device_id *id) 1251 { 1252 struct es2_ap_dev *es2; 1253 struct gb_host_device *hd; 1254 struct usb_device *udev; 1255 struct usb_host_interface *iface_desc; 1256 struct usb_endpoint_descriptor *endpoint; 1257 __u8 ep_addr; 1258 int retval; 1259 int i; 1260 int num_cports; 1261 bool bulk_out_found = false; 1262 bool bulk_in_found = false; 1263 bool arpc_in_found = false; 1264 1265 udev = usb_get_dev(interface_to_usbdev(interface)); 1266 1267 num_cports = apb_get_cport_count(udev); 1268 if (num_cports < 0) { 1269 usb_put_dev(udev); 1270 dev_err(&udev->dev, "Cannot retrieve CPort count: %d\n", 1271 num_cports); 1272 return num_cports; 1273 } 1274 1275 hd = gb_hd_create(&es2_driver, &udev->dev, ES2_GBUF_MSG_SIZE_MAX, 1276 num_cports); 1277 if (IS_ERR(hd)) { 1278 usb_put_dev(udev); 1279 return PTR_ERR(hd); 1280 } 1281 1282 es2 = hd_to_es2(hd); 1283 es2->hd = hd; 1284 es2->usb_intf = interface; 1285 es2->usb_dev = udev; 1286 spin_lock_init(&es2->cport_out_urb_lock); 1287 INIT_KFIFO(es2->apb_log_fifo); 1288 usb_set_intfdata(interface, es2); 1289 1290 /* 1291 * Reserve the CDSI0 and CDSI1 CPorts so they won't be allocated 1292 * dynamically. 1293 */ 1294 retval = gb_hd_cport_reserve(hd, ES2_CPORT_CDSI0); 1295 if (retval) 1296 goto error; 1297 retval = gb_hd_cport_reserve(hd, ES2_CPORT_CDSI1); 1298 if (retval) 1299 goto error; 1300 1301 /* find all bulk endpoints */ 1302 iface_desc = interface->cur_altsetting; 1303 for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { 1304 endpoint = &iface_desc->endpoint[i].desc; 1305 ep_addr = endpoint->bEndpointAddress; 1306 1307 if (usb_endpoint_is_bulk_in(endpoint)) { 1308 if (!bulk_in_found) { 1309 es2->cport_in.endpoint = ep_addr; 1310 bulk_in_found = true; 1311 } else if (!arpc_in_found) { 1312 es2->arpc_endpoint_in = ep_addr; 1313 arpc_in_found = true; 1314 } else { 1315 dev_warn(&udev->dev, 1316 "Unused bulk IN endpoint found: 0x%02x\n", 1317 ep_addr); 1318 } 1319 continue; 1320 } 1321 if (usb_endpoint_is_bulk_out(endpoint)) { 1322 if (!bulk_out_found) { 1323 es2->cport_out_endpoint = ep_addr; 1324 bulk_out_found = true; 1325 } else { 1326 dev_warn(&udev->dev, 1327 "Unused bulk OUT endpoint found: 0x%02x\n", 1328 ep_addr); 1329 } 1330 continue; 1331 } 1332 dev_warn(&udev->dev, 1333 "Unknown endpoint type found, address 0x%02x\n", 1334 ep_addr); 1335 } 1336 if (!bulk_in_found || !arpc_in_found || !bulk_out_found) { 1337 dev_err(&udev->dev, "Not enough endpoints found in device, aborting!\n"); 1338 retval = -ENODEV; 1339 goto error; 1340 } 1341 1342 /* Allocate buffers for our cport in messages */ 1343 for (i = 0; i < NUM_CPORT_IN_URB; ++i) { 1344 struct urb *urb; 1345 u8 *buffer; 1346 1347 urb = usb_alloc_urb(0, GFP_KERNEL); 1348 if (!urb) { 1349 retval = -ENOMEM; 1350 goto error; 1351 } 1352 es2->cport_in.urb[i] = urb; 1353 1354 buffer = kmalloc(ES2_GBUF_MSG_SIZE_MAX, GFP_KERNEL); 1355 if (!buffer) { 1356 retval = -ENOMEM; 1357 goto error; 1358 } 1359 1360 usb_fill_bulk_urb(urb, udev, 1361 usb_rcvbulkpipe(udev, es2->cport_in.endpoint), 1362 buffer, ES2_GBUF_MSG_SIZE_MAX, 1363 cport_in_callback, hd); 1364 1365 es2->cport_in.buffer[i] = buffer; 1366 } 1367 1368 /* Allocate buffers for ARPC in messages */ 1369 for (i = 0; i < NUM_ARPC_IN_URB; ++i) { 1370 struct urb *urb; 1371 u8 *buffer; 1372 1373 urb = usb_alloc_urb(0, GFP_KERNEL); 1374 if (!urb) { 1375 retval = -ENOMEM; 1376 goto error; 1377 } 1378 es2->arpc_urb[i] = urb; 1379 1380 buffer = kmalloc(ARPC_IN_SIZE_MAX, GFP_KERNEL); 1381 if (!buffer) { 1382 retval = -ENOMEM; 1383 goto error; 1384 } 1385 1386 usb_fill_bulk_urb(urb, udev, 1387 usb_rcvbulkpipe(udev, 1388 es2->arpc_endpoint_in), 1389 buffer, ARPC_IN_SIZE_MAX, 1390 arpc_in_callback, es2); 1391 1392 es2->arpc_buffer[i] = buffer; 1393 } 1394 1395 /* Allocate urbs for our CPort OUT messages */ 1396 for (i = 0; i < NUM_CPORT_OUT_URB; ++i) { 1397 struct urb *urb; 1398 1399 urb = usb_alloc_urb(0, GFP_KERNEL); 1400 if (!urb) { 1401 retval = -ENOMEM; 1402 goto error; 1403 } 1404 1405 es2->cport_out_urb[i] = urb; 1406 es2->cport_out_urb_busy[i] = false; /* just to be anal */ 1407 } 1408 1409 /* XXX We will need to rename this per APB */ 1410 es2->apb_log_enable_dentry = debugfs_create_file("apb_log_enable", 1411 0644, 1412 gb_debugfs_get(), es2, 1413 &apb_log_enable_fops); 1414 1415 INIT_LIST_HEAD(&es2->arpcs); 1416 spin_lock_init(&es2->arpc_lock); 1417 1418 retval = es2_arpc_in_enable(es2); 1419 if (retval) 1420 goto error; 1421 1422 retval = gb_hd_add(hd); 1423 if (retval) 1424 goto err_disable_arpc_in; 1425 1426 retval = es2_cport_in_enable(es2, &es2->cport_in); 1427 if (retval) 1428 goto err_hd_del; 1429 1430 return 0; 1431 1432 err_hd_del: 1433 gb_hd_del(hd); 1434 err_disable_arpc_in: 1435 es2_arpc_in_disable(es2); 1436 error: 1437 es2_destroy(es2); 1438 1439 return retval; 1440 } 1441 1442 static void ap_disconnect(struct usb_interface *interface) 1443 { 1444 struct es2_ap_dev *es2 = usb_get_intfdata(interface); 1445 1446 gb_hd_del(es2->hd); 1447 1448 es2_cport_in_disable(es2, &es2->cport_in); 1449 es2_arpc_in_disable(es2); 1450 1451 es2_destroy(es2); 1452 } 1453 1454 static struct usb_driver es2_ap_driver = { 1455 .name = "es2_ap_driver", 1456 .probe = ap_probe, 1457 .disconnect = ap_disconnect, 1458 .id_table = id_table, 1459 .soft_unbind = 1, 1460 }; 1461 1462 module_usb_driver(es2_ap_driver); 1463 1464 MODULE_LICENSE("GPL v2"); 1465 MODULE_AUTHOR("Greg Kroah-Hartman <gregkh@linuxfoundation.org>"); 1466