1 /* 2 * The NFC Controller Interface is the communication protocol between an 3 * NFC Controller (NFCC) and a Device Host (DH). 4 * 5 * Copyright (C) 2011 Texas Instruments, Inc. 6 * 7 * Written by Ilan Elias <ilane@ti.com> 8 * 9 * Acknowledgements: 10 * This file is based on hci_core.c, which was written 11 * by Maxim Krasnyansky. 12 * 13 * This program is free software; you can redistribute it and/or modify 14 * it under the terms of the GNU General Public License version 2 15 * as published by the Free Software Foundation 16 * 17 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details. 21 * 22 * You should have received a copy of the GNU General Public License 23 * along with this program; if not, write to the Free Software 24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 25 * 26 */ 27 28 #define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__ 29 30 #include <linux/types.h> 31 #include <linux/workqueue.h> 32 #include <linux/completion.h> 33 #include <linux/export.h> 34 #include <linux/sched.h> 35 #include <linux/bitops.h> 36 #include <linux/skbuff.h> 37 38 #include "../nfc.h" 39 #include <net/nfc/nci.h> 40 #include <net/nfc/nci_core.h> 41 #include <linux/nfc.h> 42 43 static void nci_cmd_work(struct work_struct *work); 44 static void nci_rx_work(struct work_struct *work); 45 static void nci_tx_work(struct work_struct *work); 46 47 /* ---- NCI requests ---- */ 48 49 void nci_req_complete(struct nci_dev *ndev, int result) 50 { 51 if (ndev->req_status == NCI_REQ_PEND) { 52 ndev->req_result = result; 53 ndev->req_status = NCI_REQ_DONE; 54 complete(&ndev->req_completion); 55 } 56 } 57 58 static void nci_req_cancel(struct nci_dev *ndev, int err) 59 { 60 if (ndev->req_status == NCI_REQ_PEND) { 61 ndev->req_result = err; 62 ndev->req_status = NCI_REQ_CANCELED; 63 complete(&ndev->req_completion); 64 } 65 } 66 67 /* Execute request and wait for completion. */ 68 static int __nci_request(struct nci_dev *ndev, 69 void (*req)(struct nci_dev *ndev, unsigned long opt), 70 unsigned long opt, __u32 timeout) 71 { 72 int rc = 0; 73 long completion_rc; 74 75 ndev->req_status = NCI_REQ_PEND; 76 77 init_completion(&ndev->req_completion); 78 req(ndev, opt); 79 completion_rc = 80 wait_for_completion_interruptible_timeout(&ndev->req_completion, 81 timeout); 82 83 pr_debug("wait_for_completion return %ld\n", completion_rc); 84 85 if (completion_rc > 0) { 86 switch (ndev->req_status) { 87 case NCI_REQ_DONE: 88 rc = nci_to_errno(ndev->req_result); 89 break; 90 91 case NCI_REQ_CANCELED: 92 rc = -ndev->req_result; 93 break; 94 95 default: 96 rc = -ETIMEDOUT; 97 break; 98 } 99 } else { 100 pr_err("wait_for_completion_interruptible_timeout failed %ld\n", 101 completion_rc); 102 103 rc = ((completion_rc == 0) ? (-ETIMEDOUT) : (completion_rc)); 104 } 105 106 ndev->req_status = ndev->req_result = 0; 107 108 return rc; 109 } 110 111 static inline int nci_request(struct nci_dev *ndev, 112 void (*req)(struct nci_dev *ndev, 113 unsigned long opt), 114 unsigned long opt, __u32 timeout) 115 { 116 int rc; 117 118 if (!test_bit(NCI_UP, &ndev->flags)) 119 return -ENETDOWN; 120 121 /* Serialize all requests */ 122 mutex_lock(&ndev->req_lock); 123 rc = __nci_request(ndev, req, opt, timeout); 124 mutex_unlock(&ndev->req_lock); 125 126 return rc; 127 } 128 129 static void nci_reset_req(struct nci_dev *ndev, unsigned long opt) 130 { 131 struct nci_core_reset_cmd cmd; 132 133 cmd.reset_type = NCI_RESET_TYPE_RESET_CONFIG; 134 nci_send_cmd(ndev, NCI_OP_CORE_RESET_CMD, 1, &cmd); 135 } 136 137 static void nci_init_req(struct nci_dev *ndev, unsigned long opt) 138 { 139 nci_send_cmd(ndev, NCI_OP_CORE_INIT_CMD, 0, NULL); 140 } 141 142 static void nci_init_complete_req(struct nci_dev *ndev, unsigned long opt) 143 { 144 struct nci_rf_disc_map_cmd cmd; 145 struct disc_map_config *cfg = cmd.mapping_configs; 146 __u8 *num = &cmd.num_mapping_configs; 147 int i; 148 149 /* set rf mapping configurations */ 150 *num = 0; 151 152 /* by default mapping is set to NCI_RF_INTERFACE_FRAME */ 153 for (i = 0; i < ndev->num_supported_rf_interfaces; i++) { 154 if (ndev->supported_rf_interfaces[i] == 155 NCI_RF_INTERFACE_ISO_DEP) { 156 cfg[*num].rf_protocol = NCI_RF_PROTOCOL_ISO_DEP; 157 cfg[*num].mode = NCI_DISC_MAP_MODE_POLL | 158 NCI_DISC_MAP_MODE_LISTEN; 159 cfg[*num].rf_interface = NCI_RF_INTERFACE_ISO_DEP; 160 (*num)++; 161 } else if (ndev->supported_rf_interfaces[i] == 162 NCI_RF_INTERFACE_NFC_DEP) { 163 cfg[*num].rf_protocol = NCI_RF_PROTOCOL_NFC_DEP; 164 cfg[*num].mode = NCI_DISC_MAP_MODE_POLL | 165 NCI_DISC_MAP_MODE_LISTEN; 166 cfg[*num].rf_interface = NCI_RF_INTERFACE_NFC_DEP; 167 (*num)++; 168 } 169 170 if (*num == NCI_MAX_NUM_MAPPING_CONFIGS) 171 break; 172 } 173 174 nci_send_cmd(ndev, NCI_OP_RF_DISCOVER_MAP_CMD, 175 (1 + ((*num) * sizeof(struct disc_map_config))), &cmd); 176 } 177 178 static void nci_rf_discover_req(struct nci_dev *ndev, unsigned long opt) 179 { 180 struct nci_rf_disc_cmd cmd; 181 __u32 protocols = opt; 182 183 cmd.num_disc_configs = 0; 184 185 if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) && 186 (protocols & NFC_PROTO_JEWEL_MASK 187 || protocols & NFC_PROTO_MIFARE_MASK 188 || protocols & NFC_PROTO_ISO14443_MASK 189 || protocols & NFC_PROTO_NFC_DEP_MASK)) { 190 cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode = 191 NCI_NFC_A_PASSIVE_POLL_MODE; 192 cmd.disc_configs[cmd.num_disc_configs].frequency = 1; 193 cmd.num_disc_configs++; 194 } 195 196 if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) && 197 (protocols & NFC_PROTO_ISO14443_MASK)) { 198 cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode = 199 NCI_NFC_B_PASSIVE_POLL_MODE; 200 cmd.disc_configs[cmd.num_disc_configs].frequency = 1; 201 cmd.num_disc_configs++; 202 } 203 204 if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) && 205 (protocols & NFC_PROTO_FELICA_MASK 206 || protocols & NFC_PROTO_NFC_DEP_MASK)) { 207 cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode = 208 NCI_NFC_F_PASSIVE_POLL_MODE; 209 cmd.disc_configs[cmd.num_disc_configs].frequency = 1; 210 cmd.num_disc_configs++; 211 } 212 213 nci_send_cmd(ndev, NCI_OP_RF_DISCOVER_CMD, 214 (1 + (cmd.num_disc_configs * sizeof(struct disc_config))), 215 &cmd); 216 } 217 218 struct nci_rf_discover_select_param { 219 __u8 rf_discovery_id; 220 __u8 rf_protocol; 221 }; 222 223 static void nci_rf_discover_select_req(struct nci_dev *ndev, unsigned long opt) 224 { 225 struct nci_rf_discover_select_param *param = 226 (struct nci_rf_discover_select_param *)opt; 227 struct nci_rf_discover_select_cmd cmd; 228 229 cmd.rf_discovery_id = param->rf_discovery_id; 230 cmd.rf_protocol = param->rf_protocol; 231 232 switch (cmd.rf_protocol) { 233 case NCI_RF_PROTOCOL_ISO_DEP: 234 cmd.rf_interface = NCI_RF_INTERFACE_ISO_DEP; 235 break; 236 237 case NCI_RF_PROTOCOL_NFC_DEP: 238 cmd.rf_interface = NCI_RF_INTERFACE_NFC_DEP; 239 break; 240 241 default: 242 cmd.rf_interface = NCI_RF_INTERFACE_FRAME; 243 break; 244 } 245 246 nci_send_cmd(ndev, NCI_OP_RF_DISCOVER_SELECT_CMD, 247 sizeof(struct nci_rf_discover_select_cmd), &cmd); 248 } 249 250 static void nci_rf_deactivate_req(struct nci_dev *ndev, unsigned long opt) 251 { 252 struct nci_rf_deactivate_cmd cmd; 253 254 cmd.type = NCI_DEACTIVATE_TYPE_IDLE_MODE; 255 256 nci_send_cmd(ndev, NCI_OP_RF_DEACTIVATE_CMD, 257 sizeof(struct nci_rf_deactivate_cmd), &cmd); 258 } 259 260 static int nci_open_device(struct nci_dev *ndev) 261 { 262 int rc = 0; 263 264 mutex_lock(&ndev->req_lock); 265 266 if (test_bit(NCI_UP, &ndev->flags)) { 267 rc = -EALREADY; 268 goto done; 269 } 270 271 if (ndev->ops->open(ndev)) { 272 rc = -EIO; 273 goto done; 274 } 275 276 atomic_set(&ndev->cmd_cnt, 1); 277 278 set_bit(NCI_INIT, &ndev->flags); 279 280 rc = __nci_request(ndev, nci_reset_req, 0, 281 msecs_to_jiffies(NCI_RESET_TIMEOUT)); 282 283 if (!rc) { 284 rc = __nci_request(ndev, nci_init_req, 0, 285 msecs_to_jiffies(NCI_INIT_TIMEOUT)); 286 } 287 288 if (!rc) { 289 rc = __nci_request(ndev, nci_init_complete_req, 0, 290 msecs_to_jiffies(NCI_INIT_TIMEOUT)); 291 } 292 293 clear_bit(NCI_INIT, &ndev->flags); 294 295 if (!rc) { 296 set_bit(NCI_UP, &ndev->flags); 297 nci_clear_target_list(ndev); 298 atomic_set(&ndev->state, NCI_IDLE); 299 } else { 300 /* Init failed, cleanup */ 301 skb_queue_purge(&ndev->cmd_q); 302 skb_queue_purge(&ndev->rx_q); 303 skb_queue_purge(&ndev->tx_q); 304 305 ndev->ops->close(ndev); 306 ndev->flags = 0; 307 } 308 309 done: 310 mutex_unlock(&ndev->req_lock); 311 return rc; 312 } 313 314 static int nci_close_device(struct nci_dev *ndev) 315 { 316 nci_req_cancel(ndev, ENODEV); 317 mutex_lock(&ndev->req_lock); 318 319 if (!test_and_clear_bit(NCI_UP, &ndev->flags)) { 320 del_timer_sync(&ndev->cmd_timer); 321 del_timer_sync(&ndev->data_timer); 322 mutex_unlock(&ndev->req_lock); 323 return 0; 324 } 325 326 /* Drop RX and TX queues */ 327 skb_queue_purge(&ndev->rx_q); 328 skb_queue_purge(&ndev->tx_q); 329 330 /* Flush RX and TX wq */ 331 flush_workqueue(ndev->rx_wq); 332 flush_workqueue(ndev->tx_wq); 333 334 /* Reset device */ 335 skb_queue_purge(&ndev->cmd_q); 336 atomic_set(&ndev->cmd_cnt, 1); 337 338 set_bit(NCI_INIT, &ndev->flags); 339 __nci_request(ndev, nci_reset_req, 0, 340 msecs_to_jiffies(NCI_RESET_TIMEOUT)); 341 clear_bit(NCI_INIT, &ndev->flags); 342 343 /* Flush cmd wq */ 344 flush_workqueue(ndev->cmd_wq); 345 346 /* After this point our queues are empty 347 * and no works are scheduled. */ 348 ndev->ops->close(ndev); 349 350 /* Clear flags */ 351 ndev->flags = 0; 352 353 mutex_unlock(&ndev->req_lock); 354 355 return 0; 356 } 357 358 /* NCI command timer function */ 359 static void nci_cmd_timer(unsigned long arg) 360 { 361 struct nci_dev *ndev = (void *) arg; 362 363 atomic_set(&ndev->cmd_cnt, 1); 364 queue_work(ndev->cmd_wq, &ndev->cmd_work); 365 } 366 367 /* NCI data exchange timer function */ 368 static void nci_data_timer(unsigned long arg) 369 { 370 struct nci_dev *ndev = (void *) arg; 371 372 set_bit(NCI_DATA_EXCHANGE_TO, &ndev->flags); 373 queue_work(ndev->rx_wq, &ndev->rx_work); 374 } 375 376 static int nci_dev_up(struct nfc_dev *nfc_dev) 377 { 378 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); 379 380 return nci_open_device(ndev); 381 } 382 383 static int nci_dev_down(struct nfc_dev *nfc_dev) 384 { 385 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); 386 387 return nci_close_device(ndev); 388 } 389 390 static int nci_start_poll(struct nfc_dev *nfc_dev, __u32 protocols) 391 { 392 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); 393 int rc; 394 395 if ((atomic_read(&ndev->state) == NCI_DISCOVERY) || 396 (atomic_read(&ndev->state) == NCI_W4_ALL_DISCOVERIES)) { 397 pr_err("unable to start poll, since poll is already active\n"); 398 return -EBUSY; 399 } 400 401 if (ndev->target_active_prot) { 402 pr_err("there is an active target\n"); 403 return -EBUSY; 404 } 405 406 if ((atomic_read(&ndev->state) == NCI_W4_HOST_SELECT) || 407 (atomic_read(&ndev->state) == NCI_POLL_ACTIVE)) { 408 pr_debug("target active or w4 select, implicitly deactivate\n"); 409 410 rc = nci_request(ndev, nci_rf_deactivate_req, 0, 411 msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT)); 412 if (rc) 413 return -EBUSY; 414 } 415 416 rc = nci_request(ndev, nci_rf_discover_req, protocols, 417 msecs_to_jiffies(NCI_RF_DISC_TIMEOUT)); 418 419 if (!rc) 420 ndev->poll_prots = protocols; 421 422 return rc; 423 } 424 425 static void nci_stop_poll(struct nfc_dev *nfc_dev) 426 { 427 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); 428 429 if ((atomic_read(&ndev->state) != NCI_DISCOVERY) && 430 (atomic_read(&ndev->state) != NCI_W4_ALL_DISCOVERIES)) { 431 pr_err("unable to stop poll, since poll is not active\n"); 432 return; 433 } 434 435 nci_request(ndev, nci_rf_deactivate_req, 0, 436 msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT)); 437 } 438 439 static int nci_activate_target(struct nfc_dev *nfc_dev, __u32 target_idx, 440 __u32 protocol) 441 { 442 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); 443 struct nci_rf_discover_select_param param; 444 struct nfc_target *target = NULL; 445 int i; 446 int rc = 0; 447 448 pr_debug("target_idx %d, protocol 0x%x\n", target_idx, protocol); 449 450 if ((atomic_read(&ndev->state) != NCI_W4_HOST_SELECT) && 451 (atomic_read(&ndev->state) != NCI_POLL_ACTIVE)) { 452 pr_err("there is no available target to activate\n"); 453 return -EINVAL; 454 } 455 456 if (ndev->target_active_prot) { 457 pr_err("there is already an active target\n"); 458 return -EBUSY; 459 } 460 461 for (i = 0; i < ndev->n_targets; i++) { 462 if (ndev->targets[i].idx == target_idx) { 463 target = &ndev->targets[i]; 464 break; 465 } 466 } 467 468 if (!target) { 469 pr_err("unable to find the selected target\n"); 470 return -EINVAL; 471 } 472 473 if (!(target->supported_protocols & (1 << protocol))) { 474 pr_err("target does not support the requested protocol 0x%x\n", 475 protocol); 476 return -EINVAL; 477 } 478 479 if (atomic_read(&ndev->state) == NCI_W4_HOST_SELECT) { 480 param.rf_discovery_id = target->idx; 481 482 if (protocol == NFC_PROTO_JEWEL) 483 param.rf_protocol = NCI_RF_PROTOCOL_T1T; 484 else if (protocol == NFC_PROTO_MIFARE) 485 param.rf_protocol = NCI_RF_PROTOCOL_T2T; 486 else if (protocol == NFC_PROTO_FELICA) 487 param.rf_protocol = NCI_RF_PROTOCOL_T3T; 488 else if (protocol == NFC_PROTO_ISO14443) 489 param.rf_protocol = NCI_RF_PROTOCOL_ISO_DEP; 490 else 491 param.rf_protocol = NCI_RF_PROTOCOL_NFC_DEP; 492 493 rc = nci_request(ndev, nci_rf_discover_select_req, 494 (unsigned long)¶m, 495 msecs_to_jiffies(NCI_RF_DISC_SELECT_TIMEOUT)); 496 } 497 498 if (!rc) 499 ndev->target_active_prot = protocol; 500 501 return rc; 502 } 503 504 static void nci_deactivate_target(struct nfc_dev *nfc_dev, __u32 target_idx) 505 { 506 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); 507 508 pr_debug("target_idx %d\n", target_idx); 509 510 if (!ndev->target_active_prot) { 511 pr_err("unable to deactivate target, no active target\n"); 512 return; 513 } 514 515 ndev->target_active_prot = 0; 516 517 if (atomic_read(&ndev->state) == NCI_POLL_ACTIVE) { 518 nci_request(ndev, nci_rf_deactivate_req, 0, 519 msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT)); 520 } 521 } 522 523 static int nci_data_exchange(struct nfc_dev *nfc_dev, __u32 target_idx, 524 struct sk_buff *skb, 525 data_exchange_cb_t cb, void *cb_context) 526 { 527 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); 528 int rc; 529 530 pr_debug("target_idx %d, len %d\n", target_idx, skb->len); 531 532 if (!ndev->target_active_prot) { 533 pr_err("unable to exchange data, no active target\n"); 534 return -EINVAL; 535 } 536 537 if (test_and_set_bit(NCI_DATA_EXCHANGE, &ndev->flags)) 538 return -EBUSY; 539 540 /* store cb and context to be used on receiving data */ 541 ndev->data_exchange_cb = cb; 542 ndev->data_exchange_cb_context = cb_context; 543 544 rc = nci_send_data(ndev, NCI_STATIC_RF_CONN_ID, skb); 545 if (rc) 546 clear_bit(NCI_DATA_EXCHANGE, &ndev->flags); 547 548 return rc; 549 } 550 551 static struct nfc_ops nci_nfc_ops = { 552 .dev_up = nci_dev_up, 553 .dev_down = nci_dev_down, 554 .start_poll = nci_start_poll, 555 .stop_poll = nci_stop_poll, 556 .activate_target = nci_activate_target, 557 .deactivate_target = nci_deactivate_target, 558 .data_exchange = nci_data_exchange, 559 }; 560 561 /* ---- Interface to NCI drivers ---- */ 562 563 /** 564 * nci_allocate_device - allocate a new nci device 565 * 566 * @ops: device operations 567 * @supported_protocols: NFC protocols supported by the device 568 */ 569 struct nci_dev *nci_allocate_device(struct nci_ops *ops, 570 __u32 supported_protocols, 571 int tx_headroom, int tx_tailroom) 572 { 573 struct nci_dev *ndev; 574 575 pr_debug("supported_protocols 0x%x\n", supported_protocols); 576 577 if (!ops->open || !ops->close || !ops->send) 578 return NULL; 579 580 if (!supported_protocols) 581 return NULL; 582 583 ndev = kzalloc(sizeof(struct nci_dev), GFP_KERNEL); 584 if (!ndev) 585 return NULL; 586 587 ndev->ops = ops; 588 ndev->tx_headroom = tx_headroom; 589 ndev->tx_tailroom = tx_tailroom; 590 591 ndev->nfc_dev = nfc_allocate_device(&nci_nfc_ops, 592 supported_protocols, 593 tx_headroom + NCI_DATA_HDR_SIZE, 594 tx_tailroom); 595 if (!ndev->nfc_dev) 596 goto free_exit; 597 598 nfc_set_drvdata(ndev->nfc_dev, ndev); 599 600 return ndev; 601 602 free_exit: 603 kfree(ndev); 604 return NULL; 605 } 606 EXPORT_SYMBOL(nci_allocate_device); 607 608 /** 609 * nci_free_device - deallocate nci device 610 * 611 * @ndev: The nci device to deallocate 612 */ 613 void nci_free_device(struct nci_dev *ndev) 614 { 615 nfc_free_device(ndev->nfc_dev); 616 kfree(ndev); 617 } 618 EXPORT_SYMBOL(nci_free_device); 619 620 /** 621 * nci_register_device - register a nci device in the nfc subsystem 622 * 623 * @dev: The nci device to register 624 */ 625 int nci_register_device(struct nci_dev *ndev) 626 { 627 int rc; 628 struct device *dev = &ndev->nfc_dev->dev; 629 char name[32]; 630 631 rc = nfc_register_device(ndev->nfc_dev); 632 if (rc) 633 goto exit; 634 635 ndev->flags = 0; 636 637 INIT_WORK(&ndev->cmd_work, nci_cmd_work); 638 snprintf(name, sizeof(name), "%s_nci_cmd_wq", dev_name(dev)); 639 ndev->cmd_wq = create_singlethread_workqueue(name); 640 if (!ndev->cmd_wq) { 641 rc = -ENOMEM; 642 goto unreg_exit; 643 } 644 645 INIT_WORK(&ndev->rx_work, nci_rx_work); 646 snprintf(name, sizeof(name), "%s_nci_rx_wq", dev_name(dev)); 647 ndev->rx_wq = create_singlethread_workqueue(name); 648 if (!ndev->rx_wq) { 649 rc = -ENOMEM; 650 goto destroy_cmd_wq_exit; 651 } 652 653 INIT_WORK(&ndev->tx_work, nci_tx_work); 654 snprintf(name, sizeof(name), "%s_nci_tx_wq", dev_name(dev)); 655 ndev->tx_wq = create_singlethread_workqueue(name); 656 if (!ndev->tx_wq) { 657 rc = -ENOMEM; 658 goto destroy_rx_wq_exit; 659 } 660 661 skb_queue_head_init(&ndev->cmd_q); 662 skb_queue_head_init(&ndev->rx_q); 663 skb_queue_head_init(&ndev->tx_q); 664 665 setup_timer(&ndev->cmd_timer, nci_cmd_timer, 666 (unsigned long) ndev); 667 setup_timer(&ndev->data_timer, nci_data_timer, 668 (unsigned long) ndev); 669 670 mutex_init(&ndev->req_lock); 671 672 goto exit; 673 674 destroy_rx_wq_exit: 675 destroy_workqueue(ndev->rx_wq); 676 677 destroy_cmd_wq_exit: 678 destroy_workqueue(ndev->cmd_wq); 679 680 unreg_exit: 681 nfc_unregister_device(ndev->nfc_dev); 682 683 exit: 684 return rc; 685 } 686 EXPORT_SYMBOL(nci_register_device); 687 688 /** 689 * nci_unregister_device - unregister a nci device in the nfc subsystem 690 * 691 * @dev: The nci device to unregister 692 */ 693 void nci_unregister_device(struct nci_dev *ndev) 694 { 695 nci_close_device(ndev); 696 697 destroy_workqueue(ndev->cmd_wq); 698 destroy_workqueue(ndev->rx_wq); 699 destroy_workqueue(ndev->tx_wq); 700 701 nfc_unregister_device(ndev->nfc_dev); 702 } 703 EXPORT_SYMBOL(nci_unregister_device); 704 705 /** 706 * nci_recv_frame - receive frame from NCI drivers 707 * 708 * @skb: The sk_buff to receive 709 */ 710 int nci_recv_frame(struct sk_buff *skb) 711 { 712 struct nci_dev *ndev = (struct nci_dev *) skb->dev; 713 714 pr_debug("len %d\n", skb->len); 715 716 if (!ndev || (!test_bit(NCI_UP, &ndev->flags) 717 && !test_bit(NCI_INIT, &ndev->flags))) { 718 kfree_skb(skb); 719 return -ENXIO; 720 } 721 722 /* Queue frame for rx worker thread */ 723 skb_queue_tail(&ndev->rx_q, skb); 724 queue_work(ndev->rx_wq, &ndev->rx_work); 725 726 return 0; 727 } 728 EXPORT_SYMBOL(nci_recv_frame); 729 730 static int nci_send_frame(struct sk_buff *skb) 731 { 732 struct nci_dev *ndev = (struct nci_dev *) skb->dev; 733 734 pr_debug("len %d\n", skb->len); 735 736 if (!ndev) { 737 kfree_skb(skb); 738 return -ENODEV; 739 } 740 741 /* Get rid of skb owner, prior to sending to the driver. */ 742 skb_orphan(skb); 743 744 return ndev->ops->send(skb); 745 } 746 747 /* Send NCI command */ 748 int nci_send_cmd(struct nci_dev *ndev, __u16 opcode, __u8 plen, void *payload) 749 { 750 struct nci_ctrl_hdr *hdr; 751 struct sk_buff *skb; 752 753 pr_debug("opcode 0x%x, plen %d\n", opcode, plen); 754 755 skb = nci_skb_alloc(ndev, (NCI_CTRL_HDR_SIZE + plen), GFP_KERNEL); 756 if (!skb) { 757 pr_err("no memory for command\n"); 758 return -ENOMEM; 759 } 760 761 hdr = (struct nci_ctrl_hdr *) skb_put(skb, NCI_CTRL_HDR_SIZE); 762 hdr->gid = nci_opcode_gid(opcode); 763 hdr->oid = nci_opcode_oid(opcode); 764 hdr->plen = plen; 765 766 nci_mt_set((__u8 *)hdr, NCI_MT_CMD_PKT); 767 nci_pbf_set((__u8 *)hdr, NCI_PBF_LAST); 768 769 if (plen) 770 memcpy(skb_put(skb, plen), payload, plen); 771 772 skb->dev = (void *) ndev; 773 774 skb_queue_tail(&ndev->cmd_q, skb); 775 queue_work(ndev->cmd_wq, &ndev->cmd_work); 776 777 return 0; 778 } 779 780 /* ---- NCI TX Data worker thread ---- */ 781 782 static void nci_tx_work(struct work_struct *work) 783 { 784 struct nci_dev *ndev = container_of(work, struct nci_dev, tx_work); 785 struct sk_buff *skb; 786 787 pr_debug("credits_cnt %d\n", atomic_read(&ndev->credits_cnt)); 788 789 /* Send queued tx data */ 790 while (atomic_read(&ndev->credits_cnt)) { 791 skb = skb_dequeue(&ndev->tx_q); 792 if (!skb) 793 return; 794 795 /* Check if data flow control is used */ 796 if (atomic_read(&ndev->credits_cnt) != 797 NCI_DATA_FLOW_CONTROL_NOT_USED) 798 atomic_dec(&ndev->credits_cnt); 799 800 pr_debug("NCI TX: MT=data, PBF=%d, conn_id=%d, plen=%d\n", 801 nci_pbf(skb->data), 802 nci_conn_id(skb->data), 803 nci_plen(skb->data)); 804 805 nci_send_frame(skb); 806 807 mod_timer(&ndev->data_timer, 808 jiffies + msecs_to_jiffies(NCI_DATA_TIMEOUT)); 809 } 810 } 811 812 /* ----- NCI RX worker thread (data & control) ----- */ 813 814 static void nci_rx_work(struct work_struct *work) 815 { 816 struct nci_dev *ndev = container_of(work, struct nci_dev, rx_work); 817 struct sk_buff *skb; 818 819 while ((skb = skb_dequeue(&ndev->rx_q))) { 820 /* Process frame */ 821 switch (nci_mt(skb->data)) { 822 case NCI_MT_RSP_PKT: 823 nci_rsp_packet(ndev, skb); 824 break; 825 826 case NCI_MT_NTF_PKT: 827 nci_ntf_packet(ndev, skb); 828 break; 829 830 case NCI_MT_DATA_PKT: 831 nci_rx_data_packet(ndev, skb); 832 break; 833 834 default: 835 pr_err("unknown MT 0x%x\n", nci_mt(skb->data)); 836 kfree_skb(skb); 837 break; 838 } 839 } 840 841 /* check if a data exchange timout has occurred */ 842 if (test_bit(NCI_DATA_EXCHANGE_TO, &ndev->flags)) { 843 /* complete the data exchange transaction, if exists */ 844 if (test_bit(NCI_DATA_EXCHANGE, &ndev->flags)) 845 nci_data_exchange_complete(ndev, NULL, -ETIMEDOUT); 846 847 clear_bit(NCI_DATA_EXCHANGE_TO, &ndev->flags); 848 } 849 } 850 851 /* ----- NCI TX CMD worker thread ----- */ 852 853 static void nci_cmd_work(struct work_struct *work) 854 { 855 struct nci_dev *ndev = container_of(work, struct nci_dev, cmd_work); 856 struct sk_buff *skb; 857 858 pr_debug("cmd_cnt %d\n", atomic_read(&ndev->cmd_cnt)); 859 860 /* Send queued command */ 861 if (atomic_read(&ndev->cmd_cnt)) { 862 skb = skb_dequeue(&ndev->cmd_q); 863 if (!skb) 864 return; 865 866 atomic_dec(&ndev->cmd_cnt); 867 868 pr_debug("NCI TX: MT=cmd, PBF=%d, GID=0x%x, OID=0x%x, plen=%d\n", 869 nci_pbf(skb->data), 870 nci_opcode_gid(nci_opcode(skb->data)), 871 nci_opcode_oid(nci_opcode(skb->data)), 872 nci_plen(skb->data)); 873 874 nci_send_frame(skb); 875 876 mod_timer(&ndev->cmd_timer, 877 jiffies + msecs_to_jiffies(NCI_CMD_TIMEOUT)); 878 } 879 } 880