1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * The NFC Controller Interface is the communication protocol between an 4 * NFC Controller (NFCC) and a Device Host (DH). 5 * 6 * Copyright (C) 2011 Texas Instruments, Inc. 7 * Copyright (C) 2014 Marvell International Ltd. 8 * 9 * Written by Ilan Elias <ilane@ti.com> 10 * 11 * Acknowledgements: 12 * This file is based on hci_core.c, which was written 13 * by Maxim Krasnyansky. 14 */ 15 16 #define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__ 17 18 #include <linux/module.h> 19 #include <linux/kernel.h> 20 #include <linux/types.h> 21 #include <linux/workqueue.h> 22 #include <linux/completion.h> 23 #include <linux/export.h> 24 #include <linux/sched.h> 25 #include <linux/bitops.h> 26 #include <linux/skbuff.h> 27 #include <linux/kcov.h> 28 29 #include "../nfc.h" 30 #include <net/nfc/nci.h> 31 #include <net/nfc/nci_core.h> 32 #include <linux/nfc.h> 33 34 struct core_conn_create_data { 35 int length; 36 struct nci_core_conn_create_cmd *cmd; 37 }; 38 39 static void nci_cmd_work(struct work_struct *work); 40 static void nci_rx_work(struct work_struct *work); 41 static void nci_tx_work(struct work_struct *work); 42 43 struct nci_conn_info *nci_get_conn_info_by_conn_id(struct nci_dev *ndev, 44 int conn_id) 45 { 46 struct nci_conn_info *conn_info; 47 48 list_for_each_entry(conn_info, &ndev->conn_info_list, list) { 49 if (conn_info->conn_id == conn_id) 50 return conn_info; 51 } 52 53 return NULL; 54 } 55 56 int nci_get_conn_info_by_dest_type_params(struct nci_dev *ndev, u8 dest_type, 57 const struct dest_spec_params *params) 58 { 59 const struct nci_conn_info *conn_info; 60 61 list_for_each_entry(conn_info, &ndev->conn_info_list, list) { 62 if (conn_info->dest_type == dest_type) { 63 if (!params) 64 return conn_info->conn_id; 65 66 if (params->id == conn_info->dest_params->id && 67 params->protocol == conn_info->dest_params->protocol) 68 return conn_info->conn_id; 69 } 70 } 71 72 return -EINVAL; 73 } 74 EXPORT_SYMBOL(nci_get_conn_info_by_dest_type_params); 75 76 /* ---- NCI requests ---- */ 77 78 void nci_req_complete(struct nci_dev *ndev, int result) 79 { 80 if (ndev->req_status == NCI_REQ_PEND) { 81 ndev->req_result = result; 82 ndev->req_status = NCI_REQ_DONE; 83 complete(&ndev->req_completion); 84 } 85 } 86 EXPORT_SYMBOL(nci_req_complete); 87 88 static void nci_req_cancel(struct nci_dev *ndev, int err) 89 { 90 if (ndev->req_status == NCI_REQ_PEND) { 91 ndev->req_result = err; 92 ndev->req_status = NCI_REQ_CANCELED; 93 complete(&ndev->req_completion); 94 } 95 } 96 97 /* Execute request and wait for completion. */ 98 static int __nci_request(struct nci_dev *ndev, 99 void (*req)(struct nci_dev *ndev, const void *opt), 100 const void *opt, __u32 timeout) 101 { 102 int rc = 0; 103 long completion_rc; 104 105 ndev->req_status = NCI_REQ_PEND; 106 107 reinit_completion(&ndev->req_completion); 108 req(ndev, opt); 109 completion_rc = 110 wait_for_completion_interruptible_timeout(&ndev->req_completion, 111 timeout); 112 113 pr_debug("wait_for_completion return %ld\n", completion_rc); 114 115 if (completion_rc > 0) { 116 switch (ndev->req_status) { 117 case NCI_REQ_DONE: 118 rc = nci_to_errno(ndev->req_result); 119 break; 120 121 case NCI_REQ_CANCELED: 122 rc = -ndev->req_result; 123 break; 124 125 default: 126 rc = -ETIMEDOUT; 127 break; 128 } 129 } else { 130 pr_err("wait_for_completion_interruptible_timeout failed %ld\n", 131 completion_rc); 132 133 rc = ((completion_rc == 0) ? (-ETIMEDOUT) : (completion_rc)); 134 } 135 136 ndev->req_status = ndev->req_result = 0; 137 138 return rc; 139 } 140 141 inline int nci_request(struct nci_dev *ndev, 142 void (*req)(struct nci_dev *ndev, 143 const void *opt), 144 const void *opt, __u32 timeout) 145 { 146 int rc; 147 148 /* Serialize all requests */ 149 mutex_lock(&ndev->req_lock); 150 /* check the state after obtaing the lock against any races 151 * from nci_close_device when the device gets removed. 152 */ 153 if (test_bit(NCI_UP, &ndev->flags)) 154 rc = __nci_request(ndev, req, opt, timeout); 155 else 156 rc = -ENETDOWN; 157 mutex_unlock(&ndev->req_lock); 158 159 return rc; 160 } 161 162 static void nci_reset_req(struct nci_dev *ndev, const void *opt) 163 { 164 struct nci_core_reset_cmd cmd; 165 166 cmd.reset_type = NCI_RESET_TYPE_RESET_CONFIG; 167 nci_send_cmd(ndev, NCI_OP_CORE_RESET_CMD, 1, &cmd); 168 } 169 170 static void nci_init_req(struct nci_dev *ndev, const void *opt) 171 { 172 u8 plen = 0; 173 174 if (opt) 175 plen = sizeof(struct nci_core_init_v2_cmd); 176 177 nci_send_cmd(ndev, NCI_OP_CORE_INIT_CMD, plen, opt); 178 } 179 180 static void nci_init_complete_req(struct nci_dev *ndev, const void *opt) 181 { 182 struct nci_rf_disc_map_cmd cmd; 183 struct disc_map_config *cfg = cmd.mapping_configs; 184 __u8 *num = &cmd.num_mapping_configs; 185 int i; 186 187 /* set rf mapping configurations */ 188 *num = 0; 189 190 /* by default mapping is set to NCI_RF_INTERFACE_FRAME */ 191 for (i = 0; i < ndev->num_supported_rf_interfaces; i++) { 192 if (ndev->supported_rf_interfaces[i] == 193 NCI_RF_INTERFACE_ISO_DEP) { 194 cfg[*num].rf_protocol = NCI_RF_PROTOCOL_ISO_DEP; 195 cfg[*num].mode = NCI_DISC_MAP_MODE_POLL | 196 NCI_DISC_MAP_MODE_LISTEN; 197 cfg[*num].rf_interface = NCI_RF_INTERFACE_ISO_DEP; 198 (*num)++; 199 } else if (ndev->supported_rf_interfaces[i] == 200 NCI_RF_INTERFACE_NFC_DEP) { 201 cfg[*num].rf_protocol = NCI_RF_PROTOCOL_NFC_DEP; 202 cfg[*num].mode = NCI_DISC_MAP_MODE_POLL | 203 NCI_DISC_MAP_MODE_LISTEN; 204 cfg[*num].rf_interface = NCI_RF_INTERFACE_NFC_DEP; 205 (*num)++; 206 } 207 208 if (*num == NCI_MAX_NUM_MAPPING_CONFIGS) 209 break; 210 } 211 212 nci_send_cmd(ndev, NCI_OP_RF_DISCOVER_MAP_CMD, 213 (1 + ((*num) * sizeof(struct disc_map_config))), &cmd); 214 } 215 216 struct nci_set_config_param { 217 __u8 id; 218 size_t len; 219 const __u8 *val; 220 }; 221 222 static void nci_set_config_req(struct nci_dev *ndev, const void *opt) 223 { 224 const struct nci_set_config_param *param = opt; 225 struct nci_core_set_config_cmd cmd; 226 227 BUG_ON(param->len > NCI_MAX_PARAM_LEN); 228 229 cmd.num_params = 1; 230 cmd.param.id = param->id; 231 cmd.param.len = param->len; 232 memcpy(cmd.param.val, param->val, param->len); 233 234 nci_send_cmd(ndev, NCI_OP_CORE_SET_CONFIG_CMD, (3 + param->len), &cmd); 235 } 236 237 struct nci_rf_discover_param { 238 __u32 im_protocols; 239 __u32 tm_protocols; 240 }; 241 242 static void nci_rf_discover_req(struct nci_dev *ndev, const void *opt) 243 { 244 const struct nci_rf_discover_param *param = opt; 245 struct nci_rf_disc_cmd cmd; 246 247 cmd.num_disc_configs = 0; 248 249 if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) && 250 (param->im_protocols & NFC_PROTO_JEWEL_MASK || 251 param->im_protocols & NFC_PROTO_MIFARE_MASK || 252 param->im_protocols & NFC_PROTO_ISO14443_MASK || 253 param->im_protocols & NFC_PROTO_NFC_DEP_MASK)) { 254 cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode = 255 NCI_NFC_A_PASSIVE_POLL_MODE; 256 cmd.disc_configs[cmd.num_disc_configs].frequency = 1; 257 cmd.num_disc_configs++; 258 } 259 260 if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) && 261 (param->im_protocols & NFC_PROTO_ISO14443_B_MASK)) { 262 cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode = 263 NCI_NFC_B_PASSIVE_POLL_MODE; 264 cmd.disc_configs[cmd.num_disc_configs].frequency = 1; 265 cmd.num_disc_configs++; 266 } 267 268 if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) && 269 (param->im_protocols & NFC_PROTO_FELICA_MASK || 270 param->im_protocols & NFC_PROTO_NFC_DEP_MASK)) { 271 cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode = 272 NCI_NFC_F_PASSIVE_POLL_MODE; 273 cmd.disc_configs[cmd.num_disc_configs].frequency = 1; 274 cmd.num_disc_configs++; 275 } 276 277 if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) && 278 (param->im_protocols & NFC_PROTO_ISO15693_MASK)) { 279 cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode = 280 NCI_NFC_V_PASSIVE_POLL_MODE; 281 cmd.disc_configs[cmd.num_disc_configs].frequency = 1; 282 cmd.num_disc_configs++; 283 } 284 285 if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS - 1) && 286 (param->tm_protocols & NFC_PROTO_NFC_DEP_MASK)) { 287 cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode = 288 NCI_NFC_A_PASSIVE_LISTEN_MODE; 289 cmd.disc_configs[cmd.num_disc_configs].frequency = 1; 290 cmd.num_disc_configs++; 291 cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode = 292 NCI_NFC_F_PASSIVE_LISTEN_MODE; 293 cmd.disc_configs[cmd.num_disc_configs].frequency = 1; 294 cmd.num_disc_configs++; 295 } 296 297 nci_send_cmd(ndev, NCI_OP_RF_DISCOVER_CMD, 298 (1 + (cmd.num_disc_configs * sizeof(struct disc_config))), 299 &cmd); 300 } 301 302 struct nci_rf_discover_select_param { 303 __u8 rf_discovery_id; 304 __u8 rf_protocol; 305 }; 306 307 static void nci_rf_discover_select_req(struct nci_dev *ndev, const void *opt) 308 { 309 const struct nci_rf_discover_select_param *param = opt; 310 struct nci_rf_discover_select_cmd cmd; 311 312 cmd.rf_discovery_id = param->rf_discovery_id; 313 cmd.rf_protocol = param->rf_protocol; 314 315 switch (cmd.rf_protocol) { 316 case NCI_RF_PROTOCOL_ISO_DEP: 317 cmd.rf_interface = NCI_RF_INTERFACE_ISO_DEP; 318 break; 319 320 case NCI_RF_PROTOCOL_NFC_DEP: 321 cmd.rf_interface = NCI_RF_INTERFACE_NFC_DEP; 322 break; 323 324 default: 325 cmd.rf_interface = NCI_RF_INTERFACE_FRAME; 326 break; 327 } 328 329 nci_send_cmd(ndev, NCI_OP_RF_DISCOVER_SELECT_CMD, 330 sizeof(struct nci_rf_discover_select_cmd), &cmd); 331 } 332 333 static void nci_rf_deactivate_req(struct nci_dev *ndev, const void *opt) 334 { 335 struct nci_rf_deactivate_cmd cmd; 336 337 cmd.type = (unsigned long)opt; 338 339 nci_send_cmd(ndev, NCI_OP_RF_DEACTIVATE_CMD, 340 sizeof(struct nci_rf_deactivate_cmd), &cmd); 341 } 342 343 struct nci_cmd_param { 344 __u16 opcode; 345 size_t len; 346 const __u8 *payload; 347 }; 348 349 static void nci_generic_req(struct nci_dev *ndev, const void *opt) 350 { 351 const struct nci_cmd_param *param = opt; 352 353 nci_send_cmd(ndev, param->opcode, param->len, param->payload); 354 } 355 356 int nci_prop_cmd(struct nci_dev *ndev, __u8 oid, size_t len, const __u8 *payload) 357 { 358 struct nci_cmd_param param; 359 360 param.opcode = nci_opcode_pack(NCI_GID_PROPRIETARY, oid); 361 param.len = len; 362 param.payload = payload; 363 364 return __nci_request(ndev, nci_generic_req, ¶m, 365 msecs_to_jiffies(NCI_CMD_TIMEOUT)); 366 } 367 EXPORT_SYMBOL(nci_prop_cmd); 368 369 int nci_core_cmd(struct nci_dev *ndev, __u16 opcode, size_t len, 370 const __u8 *payload) 371 { 372 struct nci_cmd_param param; 373 374 param.opcode = opcode; 375 param.len = len; 376 param.payload = payload; 377 378 return __nci_request(ndev, nci_generic_req, ¶m, 379 msecs_to_jiffies(NCI_CMD_TIMEOUT)); 380 } 381 EXPORT_SYMBOL(nci_core_cmd); 382 383 int nci_core_reset(struct nci_dev *ndev) 384 { 385 return __nci_request(ndev, nci_reset_req, (void *)0, 386 msecs_to_jiffies(NCI_RESET_TIMEOUT)); 387 } 388 EXPORT_SYMBOL(nci_core_reset); 389 390 int nci_core_init(struct nci_dev *ndev) 391 { 392 return __nci_request(ndev, nci_init_req, (void *)0, 393 msecs_to_jiffies(NCI_INIT_TIMEOUT)); 394 } 395 EXPORT_SYMBOL(nci_core_init); 396 397 struct nci_loopback_data { 398 u8 conn_id; 399 struct sk_buff *data; 400 }; 401 402 static void nci_send_data_req(struct nci_dev *ndev, const void *opt) 403 { 404 const struct nci_loopback_data *data = opt; 405 406 nci_send_data(ndev, data->conn_id, data->data); 407 } 408 409 static void nci_nfcc_loopback_cb(void *context, struct sk_buff *skb, int err) 410 { 411 struct nci_dev *ndev = (struct nci_dev *)context; 412 struct nci_conn_info *conn_info; 413 414 conn_info = nci_get_conn_info_by_conn_id(ndev, ndev->cur_conn_id); 415 if (!conn_info) { 416 nci_req_complete(ndev, NCI_STATUS_REJECTED); 417 return; 418 } 419 420 conn_info->rx_skb = skb; 421 422 nci_req_complete(ndev, NCI_STATUS_OK); 423 } 424 425 int nci_nfcc_loopback(struct nci_dev *ndev, const void *data, size_t data_len, 426 struct sk_buff **resp) 427 { 428 int r; 429 struct nci_loopback_data loopback_data; 430 struct nci_conn_info *conn_info; 431 struct sk_buff *skb; 432 int conn_id = nci_get_conn_info_by_dest_type_params(ndev, 433 NCI_DESTINATION_NFCC_LOOPBACK, NULL); 434 435 if (conn_id < 0) { 436 r = nci_core_conn_create(ndev, NCI_DESTINATION_NFCC_LOOPBACK, 437 0, 0, NULL); 438 if (r != NCI_STATUS_OK) 439 return r; 440 441 conn_id = nci_get_conn_info_by_dest_type_params(ndev, 442 NCI_DESTINATION_NFCC_LOOPBACK, 443 NULL); 444 } 445 446 conn_info = nci_get_conn_info_by_conn_id(ndev, conn_id); 447 if (!conn_info) 448 return -EPROTO; 449 450 /* store cb and context to be used on receiving data */ 451 conn_info->data_exchange_cb = nci_nfcc_loopback_cb; 452 conn_info->data_exchange_cb_context = ndev; 453 454 skb = nci_skb_alloc(ndev, NCI_DATA_HDR_SIZE + data_len, GFP_KERNEL); 455 if (!skb) 456 return -ENOMEM; 457 458 skb_reserve(skb, NCI_DATA_HDR_SIZE); 459 skb_put_data(skb, data, data_len); 460 461 loopback_data.conn_id = conn_id; 462 loopback_data.data = skb; 463 464 ndev->cur_conn_id = conn_id; 465 r = nci_request(ndev, nci_send_data_req, &loopback_data, 466 msecs_to_jiffies(NCI_DATA_TIMEOUT)); 467 if (r == NCI_STATUS_OK && resp) 468 *resp = conn_info->rx_skb; 469 470 return r; 471 } 472 EXPORT_SYMBOL(nci_nfcc_loopback); 473 474 static int nci_open_device(struct nci_dev *ndev) 475 { 476 int rc = 0; 477 478 mutex_lock(&ndev->req_lock); 479 480 if (test_bit(NCI_UNREG, &ndev->flags)) { 481 rc = -ENODEV; 482 goto done; 483 } 484 485 if (test_bit(NCI_UP, &ndev->flags)) { 486 rc = -EALREADY; 487 goto done; 488 } 489 490 if (ndev->ops->open(ndev)) { 491 rc = -EIO; 492 goto done; 493 } 494 495 atomic_set(&ndev->cmd_cnt, 1); 496 497 set_bit(NCI_INIT, &ndev->flags); 498 499 if (ndev->ops->init) 500 rc = ndev->ops->init(ndev); 501 502 if (!rc) { 503 rc = __nci_request(ndev, nci_reset_req, (void *)0, 504 msecs_to_jiffies(NCI_RESET_TIMEOUT)); 505 } 506 507 if (!rc && ndev->ops->setup) { 508 rc = ndev->ops->setup(ndev); 509 } 510 511 if (!rc) { 512 struct nci_core_init_v2_cmd nci_init_v2_cmd = { 513 .feature1 = NCI_FEATURE_DISABLE, 514 .feature2 = NCI_FEATURE_DISABLE 515 }; 516 const void *opt = NULL; 517 518 if (ndev->nci_ver & NCI_VER_2_MASK) 519 opt = &nci_init_v2_cmd; 520 521 rc = __nci_request(ndev, nci_init_req, opt, 522 msecs_to_jiffies(NCI_INIT_TIMEOUT)); 523 } 524 525 if (!rc && ndev->ops->post_setup) 526 rc = ndev->ops->post_setup(ndev); 527 528 if (!rc) { 529 rc = __nci_request(ndev, nci_init_complete_req, (void *)0, 530 msecs_to_jiffies(NCI_INIT_TIMEOUT)); 531 } 532 533 clear_bit(NCI_INIT, &ndev->flags); 534 535 if (!rc) { 536 set_bit(NCI_UP, &ndev->flags); 537 nci_clear_target_list(ndev); 538 atomic_set(&ndev->state, NCI_IDLE); 539 } else { 540 /* Init failed, cleanup */ 541 skb_queue_purge(&ndev->cmd_q); 542 skb_queue_purge(&ndev->rx_q); 543 skb_queue_purge(&ndev->tx_q); 544 545 ndev->ops->close(ndev); 546 ndev->flags &= BIT(NCI_UNREG); 547 } 548 549 done: 550 mutex_unlock(&ndev->req_lock); 551 return rc; 552 } 553 554 static int nci_close_device(struct nci_dev *ndev) 555 { 556 nci_req_cancel(ndev, ENODEV); 557 558 /* This mutex needs to be held as a barrier for 559 * caller nci_unregister_device 560 */ 561 mutex_lock(&ndev->req_lock); 562 563 if (!test_and_clear_bit(NCI_UP, &ndev->flags)) { 564 /* Need to flush the cmd wq in case 565 * there is a queued/running cmd_work 566 */ 567 flush_workqueue(ndev->cmd_wq); 568 del_timer_sync(&ndev->cmd_timer); 569 del_timer_sync(&ndev->data_timer); 570 mutex_unlock(&ndev->req_lock); 571 return 0; 572 } 573 574 /* Drop RX and TX queues */ 575 skb_queue_purge(&ndev->rx_q); 576 skb_queue_purge(&ndev->tx_q); 577 578 /* Flush RX and TX wq */ 579 flush_workqueue(ndev->rx_wq); 580 flush_workqueue(ndev->tx_wq); 581 582 /* Reset device */ 583 skb_queue_purge(&ndev->cmd_q); 584 atomic_set(&ndev->cmd_cnt, 1); 585 586 set_bit(NCI_INIT, &ndev->flags); 587 __nci_request(ndev, nci_reset_req, (void *)0, 588 msecs_to_jiffies(NCI_RESET_TIMEOUT)); 589 590 /* After this point our queues are empty 591 * and no works are scheduled. 592 */ 593 ndev->ops->close(ndev); 594 595 clear_bit(NCI_INIT, &ndev->flags); 596 597 /* Flush cmd wq */ 598 flush_workqueue(ndev->cmd_wq); 599 600 del_timer_sync(&ndev->cmd_timer); 601 602 /* Clear flags except NCI_UNREG */ 603 ndev->flags &= BIT(NCI_UNREG); 604 605 mutex_unlock(&ndev->req_lock); 606 607 return 0; 608 } 609 610 /* NCI command timer function */ 611 static void nci_cmd_timer(struct timer_list *t) 612 { 613 struct nci_dev *ndev = from_timer(ndev, t, cmd_timer); 614 615 atomic_set(&ndev->cmd_cnt, 1); 616 queue_work(ndev->cmd_wq, &ndev->cmd_work); 617 } 618 619 /* NCI data exchange timer function */ 620 static void nci_data_timer(struct timer_list *t) 621 { 622 struct nci_dev *ndev = from_timer(ndev, t, data_timer); 623 624 set_bit(NCI_DATA_EXCHANGE_TO, &ndev->flags); 625 queue_work(ndev->rx_wq, &ndev->rx_work); 626 } 627 628 static int nci_dev_up(struct nfc_dev *nfc_dev) 629 { 630 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); 631 632 return nci_open_device(ndev); 633 } 634 635 static int nci_dev_down(struct nfc_dev *nfc_dev) 636 { 637 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); 638 639 return nci_close_device(ndev); 640 } 641 642 int nci_set_config(struct nci_dev *ndev, __u8 id, size_t len, const __u8 *val) 643 { 644 struct nci_set_config_param param; 645 646 if (!val || !len) 647 return 0; 648 649 param.id = id; 650 param.len = len; 651 param.val = val; 652 653 return __nci_request(ndev, nci_set_config_req, ¶m, 654 msecs_to_jiffies(NCI_SET_CONFIG_TIMEOUT)); 655 } 656 EXPORT_SYMBOL(nci_set_config); 657 658 static void nci_nfcee_discover_req(struct nci_dev *ndev, const void *opt) 659 { 660 struct nci_nfcee_discover_cmd cmd; 661 __u8 action = (unsigned long)opt; 662 663 cmd.discovery_action = action; 664 665 nci_send_cmd(ndev, NCI_OP_NFCEE_DISCOVER_CMD, 1, &cmd); 666 } 667 668 int nci_nfcee_discover(struct nci_dev *ndev, u8 action) 669 { 670 unsigned long opt = action; 671 672 return __nci_request(ndev, nci_nfcee_discover_req, (void *)opt, 673 msecs_to_jiffies(NCI_CMD_TIMEOUT)); 674 } 675 EXPORT_SYMBOL(nci_nfcee_discover); 676 677 static void nci_nfcee_mode_set_req(struct nci_dev *ndev, const void *opt) 678 { 679 const struct nci_nfcee_mode_set_cmd *cmd = opt; 680 681 nci_send_cmd(ndev, NCI_OP_NFCEE_MODE_SET_CMD, 682 sizeof(struct nci_nfcee_mode_set_cmd), cmd); 683 } 684 685 int nci_nfcee_mode_set(struct nci_dev *ndev, u8 nfcee_id, u8 nfcee_mode) 686 { 687 struct nci_nfcee_mode_set_cmd cmd; 688 689 cmd.nfcee_id = nfcee_id; 690 cmd.nfcee_mode = nfcee_mode; 691 692 return __nci_request(ndev, nci_nfcee_mode_set_req, &cmd, 693 msecs_to_jiffies(NCI_CMD_TIMEOUT)); 694 } 695 EXPORT_SYMBOL(nci_nfcee_mode_set); 696 697 static void nci_core_conn_create_req(struct nci_dev *ndev, const void *opt) 698 { 699 const struct core_conn_create_data *data = opt; 700 701 nci_send_cmd(ndev, NCI_OP_CORE_CONN_CREATE_CMD, data->length, data->cmd); 702 } 703 704 int nci_core_conn_create(struct nci_dev *ndev, u8 destination_type, 705 u8 number_destination_params, 706 size_t params_len, 707 const struct core_conn_create_dest_spec_params *params) 708 { 709 int r; 710 struct nci_core_conn_create_cmd *cmd; 711 struct core_conn_create_data data; 712 713 data.length = params_len + sizeof(struct nci_core_conn_create_cmd); 714 cmd = kzalloc(data.length, GFP_KERNEL); 715 if (!cmd) 716 return -ENOMEM; 717 718 cmd->destination_type = destination_type; 719 cmd->number_destination_params = number_destination_params; 720 721 data.cmd = cmd; 722 723 if (params) { 724 memcpy(cmd->params, params, params_len); 725 if (params->length > 0) 726 memcpy(&ndev->cur_params, 727 ¶ms->value[DEST_SPEC_PARAMS_ID_INDEX], 728 sizeof(struct dest_spec_params)); 729 else 730 ndev->cur_params.id = 0; 731 } else { 732 ndev->cur_params.id = 0; 733 } 734 ndev->cur_dest_type = destination_type; 735 736 r = __nci_request(ndev, nci_core_conn_create_req, &data, 737 msecs_to_jiffies(NCI_CMD_TIMEOUT)); 738 kfree(cmd); 739 return r; 740 } 741 EXPORT_SYMBOL(nci_core_conn_create); 742 743 static void nci_core_conn_close_req(struct nci_dev *ndev, const void *opt) 744 { 745 __u8 conn_id = (unsigned long)opt; 746 747 nci_send_cmd(ndev, NCI_OP_CORE_CONN_CLOSE_CMD, 1, &conn_id); 748 } 749 750 int nci_core_conn_close(struct nci_dev *ndev, u8 conn_id) 751 { 752 unsigned long opt = conn_id; 753 754 ndev->cur_conn_id = conn_id; 755 return __nci_request(ndev, nci_core_conn_close_req, (void *)opt, 756 msecs_to_jiffies(NCI_CMD_TIMEOUT)); 757 } 758 EXPORT_SYMBOL(nci_core_conn_close); 759 760 static int nci_set_local_general_bytes(struct nfc_dev *nfc_dev) 761 { 762 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); 763 struct nci_set_config_param param; 764 int rc; 765 766 param.val = nfc_get_local_general_bytes(nfc_dev, ¶m.len); 767 if ((param.val == NULL) || (param.len == 0)) 768 return 0; 769 770 if (param.len > NFC_MAX_GT_LEN) 771 return -EINVAL; 772 773 param.id = NCI_PN_ATR_REQ_GEN_BYTES; 774 775 rc = nci_request(ndev, nci_set_config_req, ¶m, 776 msecs_to_jiffies(NCI_SET_CONFIG_TIMEOUT)); 777 if (rc) 778 return rc; 779 780 param.id = NCI_LN_ATR_RES_GEN_BYTES; 781 782 return nci_request(ndev, nci_set_config_req, ¶m, 783 msecs_to_jiffies(NCI_SET_CONFIG_TIMEOUT)); 784 } 785 786 static int nci_set_listen_parameters(struct nfc_dev *nfc_dev) 787 { 788 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); 789 int rc; 790 __u8 val; 791 792 val = NCI_LA_SEL_INFO_NFC_DEP_MASK; 793 794 rc = nci_set_config(ndev, NCI_LA_SEL_INFO, 1, &val); 795 if (rc) 796 return rc; 797 798 val = NCI_LF_PROTOCOL_TYPE_NFC_DEP_MASK; 799 800 rc = nci_set_config(ndev, NCI_LF_PROTOCOL_TYPE, 1, &val); 801 if (rc) 802 return rc; 803 804 val = NCI_LF_CON_BITR_F_212 | NCI_LF_CON_BITR_F_424; 805 806 return nci_set_config(ndev, NCI_LF_CON_BITR_F, 1, &val); 807 } 808 809 static int nci_start_poll(struct nfc_dev *nfc_dev, 810 __u32 im_protocols, __u32 tm_protocols) 811 { 812 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); 813 struct nci_rf_discover_param param; 814 int rc; 815 816 if ((atomic_read(&ndev->state) == NCI_DISCOVERY) || 817 (atomic_read(&ndev->state) == NCI_W4_ALL_DISCOVERIES)) { 818 pr_err("unable to start poll, since poll is already active\n"); 819 return -EBUSY; 820 } 821 822 if (ndev->target_active_prot) { 823 pr_err("there is an active target\n"); 824 return -EBUSY; 825 } 826 827 if ((atomic_read(&ndev->state) == NCI_W4_HOST_SELECT) || 828 (atomic_read(&ndev->state) == NCI_POLL_ACTIVE)) { 829 pr_debug("target active or w4 select, implicitly deactivate\n"); 830 831 rc = nci_request(ndev, nci_rf_deactivate_req, 832 (void *)NCI_DEACTIVATE_TYPE_IDLE_MODE, 833 msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT)); 834 if (rc) 835 return -EBUSY; 836 } 837 838 if ((im_protocols | tm_protocols) & NFC_PROTO_NFC_DEP_MASK) { 839 rc = nci_set_local_general_bytes(nfc_dev); 840 if (rc) { 841 pr_err("failed to set local general bytes\n"); 842 return rc; 843 } 844 } 845 846 if (tm_protocols & NFC_PROTO_NFC_DEP_MASK) { 847 rc = nci_set_listen_parameters(nfc_dev); 848 if (rc) 849 pr_err("failed to set listen parameters\n"); 850 } 851 852 param.im_protocols = im_protocols; 853 param.tm_protocols = tm_protocols; 854 rc = nci_request(ndev, nci_rf_discover_req, ¶m, 855 msecs_to_jiffies(NCI_RF_DISC_TIMEOUT)); 856 857 if (!rc) 858 ndev->poll_prots = im_protocols; 859 860 return rc; 861 } 862 863 static void nci_stop_poll(struct nfc_dev *nfc_dev) 864 { 865 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); 866 867 if ((atomic_read(&ndev->state) != NCI_DISCOVERY) && 868 (atomic_read(&ndev->state) != NCI_W4_ALL_DISCOVERIES)) { 869 pr_err("unable to stop poll, since poll is not active\n"); 870 return; 871 } 872 873 nci_request(ndev, nci_rf_deactivate_req, 874 (void *)NCI_DEACTIVATE_TYPE_IDLE_MODE, 875 msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT)); 876 } 877 878 static int nci_activate_target(struct nfc_dev *nfc_dev, 879 struct nfc_target *target, __u32 protocol) 880 { 881 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); 882 struct nci_rf_discover_select_param param; 883 const struct nfc_target *nci_target = NULL; 884 int i; 885 int rc = 0; 886 887 pr_debug("target_idx %d, protocol 0x%x\n", target->idx, protocol); 888 889 if ((atomic_read(&ndev->state) != NCI_W4_HOST_SELECT) && 890 (atomic_read(&ndev->state) != NCI_POLL_ACTIVE)) { 891 pr_err("there is no available target to activate\n"); 892 return -EINVAL; 893 } 894 895 if (ndev->target_active_prot) { 896 pr_err("there is already an active target\n"); 897 return -EBUSY; 898 } 899 900 for (i = 0; i < ndev->n_targets; i++) { 901 if (ndev->targets[i].idx == target->idx) { 902 nci_target = &ndev->targets[i]; 903 break; 904 } 905 } 906 907 if (!nci_target) { 908 pr_err("unable to find the selected target\n"); 909 return -EINVAL; 910 } 911 912 if (!(nci_target->supported_protocols & (1 << protocol))) { 913 pr_err("target does not support the requested protocol 0x%x\n", 914 protocol); 915 return -EINVAL; 916 } 917 918 if (atomic_read(&ndev->state) == NCI_W4_HOST_SELECT) { 919 param.rf_discovery_id = nci_target->logical_idx; 920 921 if (protocol == NFC_PROTO_JEWEL) 922 param.rf_protocol = NCI_RF_PROTOCOL_T1T; 923 else if (protocol == NFC_PROTO_MIFARE) 924 param.rf_protocol = NCI_RF_PROTOCOL_T2T; 925 else if (protocol == NFC_PROTO_FELICA) 926 param.rf_protocol = NCI_RF_PROTOCOL_T3T; 927 else if (protocol == NFC_PROTO_ISO14443 || 928 protocol == NFC_PROTO_ISO14443_B) 929 param.rf_protocol = NCI_RF_PROTOCOL_ISO_DEP; 930 else 931 param.rf_protocol = NCI_RF_PROTOCOL_NFC_DEP; 932 933 rc = nci_request(ndev, nci_rf_discover_select_req, ¶m, 934 msecs_to_jiffies(NCI_RF_DISC_SELECT_TIMEOUT)); 935 } 936 937 if (!rc) 938 ndev->target_active_prot = protocol; 939 940 return rc; 941 } 942 943 static void nci_deactivate_target(struct nfc_dev *nfc_dev, 944 struct nfc_target *target, 945 __u8 mode) 946 { 947 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); 948 unsigned long nci_mode = NCI_DEACTIVATE_TYPE_IDLE_MODE; 949 950 if (!ndev->target_active_prot) { 951 pr_err("unable to deactivate target, no active target\n"); 952 return; 953 } 954 955 ndev->target_active_prot = 0; 956 957 switch (mode) { 958 case NFC_TARGET_MODE_SLEEP: 959 nci_mode = NCI_DEACTIVATE_TYPE_SLEEP_MODE; 960 break; 961 } 962 963 if (atomic_read(&ndev->state) == NCI_POLL_ACTIVE) { 964 nci_request(ndev, nci_rf_deactivate_req, (void *)nci_mode, 965 msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT)); 966 } 967 } 968 969 static int nci_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target, 970 __u8 comm_mode, __u8 *gb, size_t gb_len) 971 { 972 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); 973 int rc; 974 975 pr_debug("target_idx %d, comm_mode %d\n", target->idx, comm_mode); 976 977 rc = nci_activate_target(nfc_dev, target, NFC_PROTO_NFC_DEP); 978 if (rc) 979 return rc; 980 981 rc = nfc_set_remote_general_bytes(nfc_dev, ndev->remote_gb, 982 ndev->remote_gb_len); 983 if (!rc) 984 rc = nfc_dep_link_is_up(nfc_dev, target->idx, NFC_COMM_PASSIVE, 985 NFC_RF_INITIATOR); 986 987 return rc; 988 } 989 990 static int nci_dep_link_down(struct nfc_dev *nfc_dev) 991 { 992 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); 993 int rc; 994 995 if (nfc_dev->rf_mode == NFC_RF_INITIATOR) { 996 nci_deactivate_target(nfc_dev, NULL, NCI_DEACTIVATE_TYPE_IDLE_MODE); 997 } else { 998 if (atomic_read(&ndev->state) == NCI_LISTEN_ACTIVE || 999 atomic_read(&ndev->state) == NCI_DISCOVERY) { 1000 nci_request(ndev, nci_rf_deactivate_req, (void *)0, 1001 msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT)); 1002 } 1003 1004 rc = nfc_tm_deactivated(nfc_dev); 1005 if (rc) 1006 pr_err("error when signaling tm deactivation\n"); 1007 } 1008 1009 return 0; 1010 } 1011 1012 1013 static int nci_transceive(struct nfc_dev *nfc_dev, struct nfc_target *target, 1014 struct sk_buff *skb, 1015 data_exchange_cb_t cb, void *cb_context) 1016 { 1017 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); 1018 int rc; 1019 struct nci_conn_info *conn_info; 1020 1021 conn_info = ndev->rf_conn_info; 1022 if (!conn_info) 1023 return -EPROTO; 1024 1025 pr_debug("target_idx %d, len %d\n", target->idx, skb->len); 1026 1027 if (!ndev->target_active_prot) { 1028 pr_err("unable to exchange data, no active target\n"); 1029 return -EINVAL; 1030 } 1031 1032 if (test_and_set_bit(NCI_DATA_EXCHANGE, &ndev->flags)) 1033 return -EBUSY; 1034 1035 /* store cb and context to be used on receiving data */ 1036 conn_info->data_exchange_cb = cb; 1037 conn_info->data_exchange_cb_context = cb_context; 1038 1039 rc = nci_send_data(ndev, NCI_STATIC_RF_CONN_ID, skb); 1040 if (rc) 1041 clear_bit(NCI_DATA_EXCHANGE, &ndev->flags); 1042 1043 return rc; 1044 } 1045 1046 static int nci_tm_send(struct nfc_dev *nfc_dev, struct sk_buff *skb) 1047 { 1048 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); 1049 int rc; 1050 1051 rc = nci_send_data(ndev, NCI_STATIC_RF_CONN_ID, skb); 1052 if (rc) 1053 pr_err("unable to send data\n"); 1054 1055 return rc; 1056 } 1057 1058 static int nci_enable_se(struct nfc_dev *nfc_dev, u32 se_idx) 1059 { 1060 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); 1061 1062 if (ndev->ops->enable_se) 1063 return ndev->ops->enable_se(ndev, se_idx); 1064 1065 return 0; 1066 } 1067 1068 static int nci_disable_se(struct nfc_dev *nfc_dev, u32 se_idx) 1069 { 1070 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); 1071 1072 if (ndev->ops->disable_se) 1073 return ndev->ops->disable_se(ndev, se_idx); 1074 1075 return 0; 1076 } 1077 1078 static int nci_discover_se(struct nfc_dev *nfc_dev) 1079 { 1080 int r; 1081 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); 1082 1083 if (ndev->ops->discover_se) { 1084 r = nci_nfcee_discover(ndev, NCI_NFCEE_DISCOVERY_ACTION_ENABLE); 1085 if (r != NCI_STATUS_OK) 1086 return -EPROTO; 1087 1088 return ndev->ops->discover_se(ndev); 1089 } 1090 1091 return 0; 1092 } 1093 1094 static int nci_se_io(struct nfc_dev *nfc_dev, u32 se_idx, 1095 u8 *apdu, size_t apdu_length, 1096 se_io_cb_t cb, void *cb_context) 1097 { 1098 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); 1099 1100 if (ndev->ops->se_io) 1101 return ndev->ops->se_io(ndev, se_idx, apdu, 1102 apdu_length, cb, cb_context); 1103 1104 return 0; 1105 } 1106 1107 static int nci_fw_download(struct nfc_dev *nfc_dev, const char *firmware_name) 1108 { 1109 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); 1110 1111 if (!ndev->ops->fw_download) 1112 return -ENOTSUPP; 1113 1114 return ndev->ops->fw_download(ndev, firmware_name); 1115 } 1116 1117 static const struct nfc_ops nci_nfc_ops = { 1118 .dev_up = nci_dev_up, 1119 .dev_down = nci_dev_down, 1120 .start_poll = nci_start_poll, 1121 .stop_poll = nci_stop_poll, 1122 .dep_link_up = nci_dep_link_up, 1123 .dep_link_down = nci_dep_link_down, 1124 .activate_target = nci_activate_target, 1125 .deactivate_target = nci_deactivate_target, 1126 .im_transceive = nci_transceive, 1127 .tm_send = nci_tm_send, 1128 .enable_se = nci_enable_se, 1129 .disable_se = nci_disable_se, 1130 .discover_se = nci_discover_se, 1131 .se_io = nci_se_io, 1132 .fw_download = nci_fw_download, 1133 }; 1134 1135 /* ---- Interface to NCI drivers ---- */ 1136 /** 1137 * nci_allocate_device - allocate a new nci device 1138 * 1139 * @ops: device operations 1140 * @supported_protocols: NFC protocols supported by the device 1141 * @tx_headroom: Reserved space at beginning of skb 1142 * @tx_tailroom: Reserved space at end of skb 1143 */ 1144 struct nci_dev *nci_allocate_device(const struct nci_ops *ops, 1145 __u32 supported_protocols, 1146 int tx_headroom, int tx_tailroom) 1147 { 1148 struct nci_dev *ndev; 1149 1150 pr_debug("supported_protocols 0x%x\n", supported_protocols); 1151 1152 if (!ops->open || !ops->close || !ops->send) 1153 return NULL; 1154 1155 if (!supported_protocols) 1156 return NULL; 1157 1158 ndev = kzalloc(sizeof(struct nci_dev), GFP_KERNEL); 1159 if (!ndev) 1160 return NULL; 1161 1162 ndev->ops = ops; 1163 1164 if (ops->n_prop_ops > NCI_MAX_PROPRIETARY_CMD) { 1165 pr_err("Too many proprietary commands: %zd\n", 1166 ops->n_prop_ops); 1167 goto free_nci; 1168 } 1169 1170 ndev->tx_headroom = tx_headroom; 1171 ndev->tx_tailroom = tx_tailroom; 1172 init_completion(&ndev->req_completion); 1173 1174 ndev->nfc_dev = nfc_allocate_device(&nci_nfc_ops, 1175 supported_protocols, 1176 tx_headroom + NCI_DATA_HDR_SIZE, 1177 tx_tailroom); 1178 if (!ndev->nfc_dev) 1179 goto free_nci; 1180 1181 ndev->hci_dev = nci_hci_allocate(ndev); 1182 if (!ndev->hci_dev) 1183 goto free_nfc; 1184 1185 nfc_set_drvdata(ndev->nfc_dev, ndev); 1186 1187 return ndev; 1188 1189 free_nfc: 1190 nfc_free_device(ndev->nfc_dev); 1191 free_nci: 1192 kfree(ndev); 1193 return NULL; 1194 } 1195 EXPORT_SYMBOL(nci_allocate_device); 1196 1197 /** 1198 * nci_free_device - deallocate nci device 1199 * 1200 * @ndev: The nci device to deallocate 1201 */ 1202 void nci_free_device(struct nci_dev *ndev) 1203 { 1204 nfc_free_device(ndev->nfc_dev); 1205 nci_hci_deallocate(ndev); 1206 kfree(ndev); 1207 } 1208 EXPORT_SYMBOL(nci_free_device); 1209 1210 /** 1211 * nci_register_device - register a nci device in the nfc subsystem 1212 * 1213 * @ndev: The nci device to register 1214 */ 1215 int nci_register_device(struct nci_dev *ndev) 1216 { 1217 int rc; 1218 struct device *dev = &ndev->nfc_dev->dev; 1219 char name[32]; 1220 1221 ndev->flags = 0; 1222 1223 INIT_WORK(&ndev->cmd_work, nci_cmd_work); 1224 snprintf(name, sizeof(name), "%s_nci_cmd_wq", dev_name(dev)); 1225 ndev->cmd_wq = create_singlethread_workqueue(name); 1226 if (!ndev->cmd_wq) { 1227 rc = -ENOMEM; 1228 goto exit; 1229 } 1230 1231 INIT_WORK(&ndev->rx_work, nci_rx_work); 1232 snprintf(name, sizeof(name), "%s_nci_rx_wq", dev_name(dev)); 1233 ndev->rx_wq = create_singlethread_workqueue(name); 1234 if (!ndev->rx_wq) { 1235 rc = -ENOMEM; 1236 goto destroy_cmd_wq_exit; 1237 } 1238 1239 INIT_WORK(&ndev->tx_work, nci_tx_work); 1240 snprintf(name, sizeof(name), "%s_nci_tx_wq", dev_name(dev)); 1241 ndev->tx_wq = create_singlethread_workqueue(name); 1242 if (!ndev->tx_wq) { 1243 rc = -ENOMEM; 1244 goto destroy_rx_wq_exit; 1245 } 1246 1247 skb_queue_head_init(&ndev->cmd_q); 1248 skb_queue_head_init(&ndev->rx_q); 1249 skb_queue_head_init(&ndev->tx_q); 1250 1251 timer_setup(&ndev->cmd_timer, nci_cmd_timer, 0); 1252 timer_setup(&ndev->data_timer, nci_data_timer, 0); 1253 1254 mutex_init(&ndev->req_lock); 1255 INIT_LIST_HEAD(&ndev->conn_info_list); 1256 1257 rc = nfc_register_device(ndev->nfc_dev); 1258 if (rc) 1259 goto destroy_tx_wq_exit; 1260 1261 goto exit; 1262 1263 destroy_tx_wq_exit: 1264 destroy_workqueue(ndev->tx_wq); 1265 1266 destroy_rx_wq_exit: 1267 destroy_workqueue(ndev->rx_wq); 1268 1269 destroy_cmd_wq_exit: 1270 destroy_workqueue(ndev->cmd_wq); 1271 1272 exit: 1273 return rc; 1274 } 1275 EXPORT_SYMBOL(nci_register_device); 1276 1277 /** 1278 * nci_unregister_device - unregister a nci device in the nfc subsystem 1279 * 1280 * @ndev: The nci device to unregister 1281 */ 1282 void nci_unregister_device(struct nci_dev *ndev) 1283 { 1284 struct nci_conn_info *conn_info, *n; 1285 1286 /* This set_bit is not protected with specialized barrier, 1287 * However, it is fine because the mutex_lock(&ndev->req_lock); 1288 * in nci_close_device() will help to emit one. 1289 */ 1290 set_bit(NCI_UNREG, &ndev->flags); 1291 1292 nci_close_device(ndev); 1293 1294 destroy_workqueue(ndev->cmd_wq); 1295 destroy_workqueue(ndev->rx_wq); 1296 destroy_workqueue(ndev->tx_wq); 1297 1298 list_for_each_entry_safe(conn_info, n, &ndev->conn_info_list, list) { 1299 list_del(&conn_info->list); 1300 /* conn_info is allocated with devm_kzalloc */ 1301 } 1302 1303 nfc_unregister_device(ndev->nfc_dev); 1304 } 1305 EXPORT_SYMBOL(nci_unregister_device); 1306 1307 /** 1308 * nci_recv_frame - receive frame from NCI drivers 1309 * 1310 * @ndev: The nci device 1311 * @skb: The sk_buff to receive 1312 */ 1313 int nci_recv_frame(struct nci_dev *ndev, struct sk_buff *skb) 1314 { 1315 pr_debug("len %d\n", skb->len); 1316 1317 if (!ndev || (!test_bit(NCI_UP, &ndev->flags) && 1318 !test_bit(NCI_INIT, &ndev->flags))) { 1319 kfree_skb(skb); 1320 return -ENXIO; 1321 } 1322 1323 /* Queue frame for rx worker thread */ 1324 skb_queue_tail(&ndev->rx_q, skb); 1325 queue_work(ndev->rx_wq, &ndev->rx_work); 1326 1327 return 0; 1328 } 1329 EXPORT_SYMBOL(nci_recv_frame); 1330 1331 int nci_send_frame(struct nci_dev *ndev, struct sk_buff *skb) 1332 { 1333 pr_debug("len %d\n", skb->len); 1334 1335 if (!ndev) { 1336 kfree_skb(skb); 1337 return -ENODEV; 1338 } 1339 1340 /* Get rid of skb owner, prior to sending to the driver. */ 1341 skb_orphan(skb); 1342 1343 /* Send copy to sniffer */ 1344 nfc_send_to_raw_sock(ndev->nfc_dev, skb, 1345 RAW_PAYLOAD_NCI, NFC_DIRECTION_TX); 1346 1347 return ndev->ops->send(ndev, skb); 1348 } 1349 EXPORT_SYMBOL(nci_send_frame); 1350 1351 /* Send NCI command */ 1352 int nci_send_cmd(struct nci_dev *ndev, __u16 opcode, __u8 plen, const void *payload) 1353 { 1354 struct nci_ctrl_hdr *hdr; 1355 struct sk_buff *skb; 1356 1357 pr_debug("opcode 0x%x, plen %d\n", opcode, plen); 1358 1359 skb = nci_skb_alloc(ndev, (NCI_CTRL_HDR_SIZE + plen), GFP_KERNEL); 1360 if (!skb) { 1361 pr_err("no memory for command\n"); 1362 return -ENOMEM; 1363 } 1364 1365 hdr = skb_put(skb, NCI_CTRL_HDR_SIZE); 1366 hdr->gid = nci_opcode_gid(opcode); 1367 hdr->oid = nci_opcode_oid(opcode); 1368 hdr->plen = plen; 1369 1370 nci_mt_set((__u8 *)hdr, NCI_MT_CMD_PKT); 1371 nci_pbf_set((__u8 *)hdr, NCI_PBF_LAST); 1372 1373 if (plen) 1374 skb_put_data(skb, payload, plen); 1375 1376 skb_queue_tail(&ndev->cmd_q, skb); 1377 queue_work(ndev->cmd_wq, &ndev->cmd_work); 1378 1379 return 0; 1380 } 1381 EXPORT_SYMBOL(nci_send_cmd); 1382 1383 /* Proprietary commands API */ 1384 static const struct nci_driver_ops *ops_cmd_lookup(const struct nci_driver_ops *ops, 1385 size_t n_ops, 1386 __u16 opcode) 1387 { 1388 size_t i; 1389 const struct nci_driver_ops *op; 1390 1391 if (!ops || !n_ops) 1392 return NULL; 1393 1394 for (i = 0; i < n_ops; i++) { 1395 op = &ops[i]; 1396 if (op->opcode == opcode) 1397 return op; 1398 } 1399 1400 return NULL; 1401 } 1402 1403 static int nci_op_rsp_packet(struct nci_dev *ndev, __u16 rsp_opcode, 1404 struct sk_buff *skb, const struct nci_driver_ops *ops, 1405 size_t n_ops) 1406 { 1407 const struct nci_driver_ops *op; 1408 1409 op = ops_cmd_lookup(ops, n_ops, rsp_opcode); 1410 if (!op || !op->rsp) 1411 return -ENOTSUPP; 1412 1413 return op->rsp(ndev, skb); 1414 } 1415 1416 static int nci_op_ntf_packet(struct nci_dev *ndev, __u16 ntf_opcode, 1417 struct sk_buff *skb, const struct nci_driver_ops *ops, 1418 size_t n_ops) 1419 { 1420 const struct nci_driver_ops *op; 1421 1422 op = ops_cmd_lookup(ops, n_ops, ntf_opcode); 1423 if (!op || !op->ntf) 1424 return -ENOTSUPP; 1425 1426 return op->ntf(ndev, skb); 1427 } 1428 1429 int nci_prop_rsp_packet(struct nci_dev *ndev, __u16 opcode, 1430 struct sk_buff *skb) 1431 { 1432 return nci_op_rsp_packet(ndev, opcode, skb, ndev->ops->prop_ops, 1433 ndev->ops->n_prop_ops); 1434 } 1435 1436 int nci_prop_ntf_packet(struct nci_dev *ndev, __u16 opcode, 1437 struct sk_buff *skb) 1438 { 1439 return nci_op_ntf_packet(ndev, opcode, skb, ndev->ops->prop_ops, 1440 ndev->ops->n_prop_ops); 1441 } 1442 1443 int nci_core_rsp_packet(struct nci_dev *ndev, __u16 opcode, 1444 struct sk_buff *skb) 1445 { 1446 return nci_op_rsp_packet(ndev, opcode, skb, ndev->ops->core_ops, 1447 ndev->ops->n_core_ops); 1448 } 1449 1450 int nci_core_ntf_packet(struct nci_dev *ndev, __u16 opcode, 1451 struct sk_buff *skb) 1452 { 1453 return nci_op_ntf_packet(ndev, opcode, skb, ndev->ops->core_ops, 1454 ndev->ops->n_core_ops); 1455 } 1456 1457 /* ---- NCI TX Data worker thread ---- */ 1458 1459 static void nci_tx_work(struct work_struct *work) 1460 { 1461 struct nci_dev *ndev = container_of(work, struct nci_dev, tx_work); 1462 struct nci_conn_info *conn_info; 1463 struct sk_buff *skb; 1464 1465 conn_info = nci_get_conn_info_by_conn_id(ndev, ndev->cur_conn_id); 1466 if (!conn_info) 1467 return; 1468 1469 pr_debug("credits_cnt %d\n", atomic_read(&conn_info->credits_cnt)); 1470 1471 /* Send queued tx data */ 1472 while (atomic_read(&conn_info->credits_cnt)) { 1473 skb = skb_dequeue(&ndev->tx_q); 1474 if (!skb) 1475 return; 1476 kcov_remote_start_common(skb_get_kcov_handle(skb)); 1477 1478 /* Check if data flow control is used */ 1479 if (atomic_read(&conn_info->credits_cnt) != 1480 NCI_DATA_FLOW_CONTROL_NOT_USED) 1481 atomic_dec(&conn_info->credits_cnt); 1482 1483 pr_debug("NCI TX: MT=data, PBF=%d, conn_id=%d, plen=%d\n", 1484 nci_pbf(skb->data), 1485 nci_conn_id(skb->data), 1486 nci_plen(skb->data)); 1487 1488 nci_send_frame(ndev, skb); 1489 1490 mod_timer(&ndev->data_timer, 1491 jiffies + msecs_to_jiffies(NCI_DATA_TIMEOUT)); 1492 kcov_remote_stop(); 1493 } 1494 } 1495 1496 /* ----- NCI RX worker thread (data & control) ----- */ 1497 1498 static void nci_rx_work(struct work_struct *work) 1499 { 1500 struct nci_dev *ndev = container_of(work, struct nci_dev, rx_work); 1501 struct sk_buff *skb; 1502 1503 for (; (skb = skb_dequeue(&ndev->rx_q)); kcov_remote_stop()) { 1504 kcov_remote_start_common(skb_get_kcov_handle(skb)); 1505 1506 /* Send copy to sniffer */ 1507 nfc_send_to_raw_sock(ndev->nfc_dev, skb, 1508 RAW_PAYLOAD_NCI, NFC_DIRECTION_RX); 1509 1510 /* Process frame */ 1511 switch (nci_mt(skb->data)) { 1512 case NCI_MT_RSP_PKT: 1513 nci_rsp_packet(ndev, skb); 1514 break; 1515 1516 case NCI_MT_NTF_PKT: 1517 nci_ntf_packet(ndev, skb); 1518 break; 1519 1520 case NCI_MT_DATA_PKT: 1521 nci_rx_data_packet(ndev, skb); 1522 break; 1523 1524 default: 1525 pr_err("unknown MT 0x%x\n", nci_mt(skb->data)); 1526 kfree_skb(skb); 1527 break; 1528 } 1529 } 1530 1531 /* check if a data exchange timeout has occurred */ 1532 if (test_bit(NCI_DATA_EXCHANGE_TO, &ndev->flags)) { 1533 /* complete the data exchange transaction, if exists */ 1534 if (test_bit(NCI_DATA_EXCHANGE, &ndev->flags)) 1535 nci_data_exchange_complete(ndev, NULL, 1536 ndev->cur_conn_id, 1537 -ETIMEDOUT); 1538 1539 clear_bit(NCI_DATA_EXCHANGE_TO, &ndev->flags); 1540 } 1541 } 1542 1543 /* ----- NCI TX CMD worker thread ----- */ 1544 1545 static void nci_cmd_work(struct work_struct *work) 1546 { 1547 struct nci_dev *ndev = container_of(work, struct nci_dev, cmd_work); 1548 struct sk_buff *skb; 1549 1550 pr_debug("cmd_cnt %d\n", atomic_read(&ndev->cmd_cnt)); 1551 1552 /* Send queued command */ 1553 if (atomic_read(&ndev->cmd_cnt)) { 1554 skb = skb_dequeue(&ndev->cmd_q); 1555 if (!skb) 1556 return; 1557 1558 kcov_remote_start_common(skb_get_kcov_handle(skb)); 1559 atomic_dec(&ndev->cmd_cnt); 1560 1561 pr_debug("NCI TX: MT=cmd, PBF=%d, GID=0x%x, OID=0x%x, plen=%d\n", 1562 nci_pbf(skb->data), 1563 nci_opcode_gid(nci_opcode(skb->data)), 1564 nci_opcode_oid(nci_opcode(skb->data)), 1565 nci_plen(skb->data)); 1566 1567 nci_send_frame(ndev, skb); 1568 1569 mod_timer(&ndev->cmd_timer, 1570 jiffies + msecs_to_jiffies(NCI_CMD_TIMEOUT)); 1571 kcov_remote_stop(); 1572 } 1573 } 1574 1575 MODULE_LICENSE("GPL"); 1576