1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2021 Intel Corporation 4 */ 5 6 #include <linux/etherdevice.h> 7 #include <linux/netdevice.h> 8 #include <linux/ieee80211.h> 9 #include <linux/rtnetlink.h> 10 #include <linux/module.h> 11 #include <linux/moduleparam.h> 12 #include <linux/mei_cl_bus.h> 13 #include <linux/rcupdate.h> 14 #include <linux/debugfs.h> 15 #include <linux/skbuff.h> 16 #include <linux/wait.h> 17 #include <linux/slab.h> 18 #include <linux/mm.h> 19 20 #include <net/cfg80211.h> 21 22 #include "internal.h" 23 #include "iwl-mei.h" 24 #include "trace.h" 25 #include "trace-data.h" 26 #include "sap.h" 27 28 MODULE_DESCRIPTION("The Intel(R) wireless / CSME firmware interface"); 29 MODULE_LICENSE("GPL"); 30 31 #define MEI_WLAN_UUID UUID_LE(0x13280904, 0x7792, 0x4fcb, \ 32 0xa1, 0xaa, 0x5e, 0x70, 0xcb, 0xb1, 0xe8, 0x65) 33 34 /* 35 * Since iwlwifi calls iwlmei without any context, hold a pointer to the 36 * mei_cl_device structure here. 37 * Define a mutex that will synchronize all the flows between iwlwifi and 38 * iwlmei. 39 * Note that iwlmei can't have several instances, so it ok to have static 40 * variables here. 41 */ 42 static struct mei_cl_device *iwl_mei_global_cldev; 43 static DEFINE_MUTEX(iwl_mei_mutex); 44 static unsigned long iwl_mei_status; 45 46 enum iwl_mei_status_bits { 47 IWL_MEI_STATUS_SAP_CONNECTED, 48 }; 49 50 bool iwl_mei_is_connected(void) 51 { 52 return test_bit(IWL_MEI_STATUS_SAP_CONNECTED, &iwl_mei_status); 53 } 54 EXPORT_SYMBOL_GPL(iwl_mei_is_connected); 55 56 #define SAP_VERSION 3 57 #define SAP_CONTROL_BLOCK_ID 0x21504153 /* SAP! in ASCII */ 58 59 struct iwl_sap_q_ctrl_blk { 60 __le32 wr_ptr; 61 __le32 rd_ptr; 62 __le32 size; 63 }; 64 65 enum iwl_sap_q_idx { 66 SAP_QUEUE_IDX_NOTIF = 0, 67 SAP_QUEUE_IDX_DATA, 68 SAP_QUEUE_IDX_MAX, 69 }; 70 71 struct iwl_sap_dir { 72 __le32 reserved; 73 struct iwl_sap_q_ctrl_blk q_ctrl_blk[SAP_QUEUE_IDX_MAX]; 74 }; 75 76 enum iwl_sap_dir_idx { 77 SAP_DIRECTION_HOST_TO_ME = 0, 78 SAP_DIRECTION_ME_TO_HOST, 79 SAP_DIRECTION_MAX, 80 }; 81 82 struct iwl_sap_shared_mem_ctrl_blk { 83 __le32 sap_id; 84 __le32 size; 85 struct iwl_sap_dir dir[SAP_DIRECTION_MAX]; 86 }; 87 88 /* 89 * The shared area has the following layout: 90 * 91 * +-----------------------------------+ 92 * |struct iwl_sap_shared_mem_ctrl_blk | 93 * +-----------------------------------+ 94 * |Host -> ME data queue | 95 * +-----------------------------------+ 96 * |Host -> ME notif queue | 97 * +-----------------------------------+ 98 * |ME -> Host data queue | 99 * +-----------------------------------+ 100 * |ME -> host notif queue | 101 * +-----------------------------------+ 102 * |SAP control block id (SAP!) | 103 * +-----------------------------------+ 104 */ 105 106 #define SAP_H2M_DATA_Q_SZ 48256 107 #define SAP_M2H_DATA_Q_SZ 24128 108 #define SAP_H2M_NOTIF_Q_SZ 2240 109 #define SAP_M2H_NOTIF_Q_SZ 62720 110 111 #define _IWL_MEI_SAP_SHARED_MEM_SZ \ 112 (sizeof(struct iwl_sap_shared_mem_ctrl_blk) + \ 113 SAP_H2M_DATA_Q_SZ + SAP_H2M_NOTIF_Q_SZ + \ 114 SAP_M2H_DATA_Q_SZ + SAP_M2H_NOTIF_Q_SZ + 4) 115 116 #define IWL_MEI_SAP_SHARED_MEM_SZ \ 117 (roundup(_IWL_MEI_SAP_SHARED_MEM_SZ, PAGE_SIZE)) 118 119 struct iwl_mei_shared_mem_ptrs { 120 struct iwl_sap_shared_mem_ctrl_blk *ctrl; 121 void *q_head[SAP_DIRECTION_MAX][SAP_QUEUE_IDX_MAX]; 122 size_t q_size[SAP_DIRECTION_MAX][SAP_QUEUE_IDX_MAX]; 123 }; 124 125 struct iwl_mei_filters { 126 struct rcu_head rcu_head; 127 struct iwl_sap_oob_filters filters; 128 }; 129 130 /** 131 * struct iwl_mei - holds the private date for iwl_mei 132 * 133 * @get_nvm_wq: the wait queue for the get_nvm flow 134 * @send_csa_msg_wk: used to defer the transmission of the CHECK_SHARED_AREA 135 * message. Used so that we can send CHECK_SHARED_AREA from atomic 136 * contexts. 137 * @get_ownership_wq: the wait queue for the get_ownership_flow 138 * @shared_mem: the memory that is shared between CSME and the host 139 * @cldev: the pointer to the MEI client device 140 * @nvm: the data returned by the CSME for the NVM 141 * @filters: the filters sent by CSME 142 * @got_ownership: true if we own the device 143 * @amt_enabled: true if CSME has wireless enabled 144 * @csa_throttled: when true, we can't send CHECK_SHARED_AREA over the MEI 145 * bus, but rather need to wait until send_csa_msg_wk runs 146 * @csme_taking_ownership: true when CSME is taking ownership. Used to remember 147 * to send CSME_OWNERSHIP_CONFIRMED when the driver completes its down 148 * flow. 149 * @csa_throttle_end_wk: used when &csa_throttled is true 150 * @data_q_lock: protects the access to the data queues which are 151 * accessed without the mutex. 152 * @sap_seq_no: the sequence number for the SAP messages 153 * @seq_no: the sequence number for the SAP messages 154 * @dbgfs_dir: the debugfs dir entry 155 */ 156 struct iwl_mei { 157 wait_queue_head_t get_nvm_wq; 158 struct work_struct send_csa_msg_wk; 159 wait_queue_head_t get_ownership_wq; 160 struct iwl_mei_shared_mem_ptrs shared_mem; 161 struct mei_cl_device *cldev; 162 struct iwl_mei_nvm *nvm; 163 struct iwl_mei_filters __rcu *filters; 164 bool got_ownership; 165 bool amt_enabled; 166 bool csa_throttled; 167 bool csme_taking_ownership; 168 struct delayed_work csa_throttle_end_wk; 169 spinlock_t data_q_lock; 170 171 atomic_t sap_seq_no; 172 atomic_t seq_no; 173 174 struct dentry *dbgfs_dir; 175 }; 176 177 /** 178 * struct iwl_mei_cache - cache for the parameters from iwlwifi 179 * @ops: Callbacks to iwlwifi. 180 * @netdev: The netdev that will be used to transmit / receive packets. 181 * @conn_info: The connection info message triggered by iwlwifi's association. 182 * @power_limit: pointer to an array of 10 elements (le16) represents the power 183 * restrictions per chain. 184 * @rf_kill: rf kill state. 185 * @mcc: MCC info 186 * @mac_address: interface MAC address. 187 * @nvm_address: NVM MAC address. 188 * @priv: A pointer to iwlwifi. 189 * 190 * This used to cache the configurations coming from iwlwifi's way. The data 191 * is cached here so that we can buffer the configuration even if we don't have 192 * a bind from the mei bus and hence, on iwl_mei structure. 193 */ 194 struct iwl_mei_cache { 195 const struct iwl_mei_ops *ops; 196 struct net_device __rcu *netdev; 197 const struct iwl_sap_notif_connection_info *conn_info; 198 const __le16 *power_limit; 199 u32 rf_kill; 200 u16 mcc; 201 u8 mac_address[6]; 202 u8 nvm_address[6]; 203 void *priv; 204 }; 205 206 static struct iwl_mei_cache iwl_mei_cache = { 207 .rf_kill = SAP_HW_RFKILL_DEASSERTED | SAP_SW_RFKILL_DEASSERTED 208 }; 209 210 static void iwl_mei_free_shared_mem(struct mei_cl_device *cldev) 211 { 212 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev); 213 214 if (mei_cldev_dma_unmap(cldev)) 215 dev_err(&cldev->dev, "Couldn't unmap the shared mem properly\n"); 216 memset(&mei->shared_mem, 0, sizeof(mei->shared_mem)); 217 } 218 219 #define HBM_DMA_BUF_ID_WLAN 1 220 221 static int iwl_mei_alloc_shared_mem(struct mei_cl_device *cldev) 222 { 223 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev); 224 struct iwl_mei_shared_mem_ptrs *mem = &mei->shared_mem; 225 226 mem->ctrl = mei_cldev_dma_map(cldev, HBM_DMA_BUF_ID_WLAN, 227 IWL_MEI_SAP_SHARED_MEM_SZ); 228 229 if (IS_ERR(mem->ctrl)) { 230 int ret = PTR_ERR(mem->ctrl); 231 232 dev_err(&cldev->dev, "Couldn't allocate the shared memory: %d\n", 233 ret); 234 mem->ctrl = NULL; 235 236 return ret; 237 } 238 239 memset(mem->ctrl, 0, IWL_MEI_SAP_SHARED_MEM_SZ); 240 241 return 0; 242 } 243 244 static void iwl_mei_init_shared_mem(struct iwl_mei *mei) 245 { 246 struct iwl_mei_shared_mem_ptrs *mem = &mei->shared_mem; 247 struct iwl_sap_dir *h2m; 248 struct iwl_sap_dir *m2h; 249 int dir, queue; 250 u8 *q_head; 251 252 mem->ctrl->sap_id = cpu_to_le32(SAP_CONTROL_BLOCK_ID); 253 254 mem->ctrl->size = cpu_to_le32(sizeof(*mem->ctrl)); 255 256 h2m = &mem->ctrl->dir[SAP_DIRECTION_HOST_TO_ME]; 257 m2h = &mem->ctrl->dir[SAP_DIRECTION_ME_TO_HOST]; 258 259 h2m->q_ctrl_blk[SAP_QUEUE_IDX_DATA].size = 260 cpu_to_le32(SAP_H2M_DATA_Q_SZ); 261 h2m->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF].size = 262 cpu_to_le32(SAP_H2M_NOTIF_Q_SZ); 263 m2h->q_ctrl_blk[SAP_QUEUE_IDX_DATA].size = 264 cpu_to_le32(SAP_M2H_DATA_Q_SZ); 265 m2h->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF].size = 266 cpu_to_le32(SAP_M2H_NOTIF_Q_SZ); 267 268 /* q_head points to the start of the first queue */ 269 q_head = (void *)(mem->ctrl + 1); 270 271 /* Initialize the queue heads */ 272 for (dir = 0; dir < SAP_DIRECTION_MAX; dir++) { 273 for (queue = 0; queue < SAP_QUEUE_IDX_MAX; queue++) { 274 mem->q_head[dir][queue] = q_head; 275 q_head += 276 le32_to_cpu(mem->ctrl->dir[dir].q_ctrl_blk[queue].size); 277 mem->q_size[dir][queue] = 278 le32_to_cpu(mem->ctrl->dir[dir].q_ctrl_blk[queue].size); 279 } 280 } 281 282 *(__le32 *)q_head = cpu_to_le32(SAP_CONTROL_BLOCK_ID); 283 } 284 285 static ssize_t iwl_mei_write_cyclic_buf(struct mei_cl_device *cldev, 286 struct iwl_sap_q_ctrl_blk *notif_q, 287 u8 *q_head, 288 const struct iwl_sap_hdr *hdr, 289 u32 q_sz) 290 { 291 u32 rd = le32_to_cpu(READ_ONCE(notif_q->rd_ptr)); 292 u32 wr = le32_to_cpu(READ_ONCE(notif_q->wr_ptr)); 293 size_t room_in_buf; 294 size_t tx_sz = sizeof(*hdr) + le16_to_cpu(hdr->len); 295 296 if (rd > q_sz || wr > q_sz) { 297 dev_err(&cldev->dev, 298 "Pointers are past the end of the buffer\n"); 299 return -EINVAL; 300 } 301 302 room_in_buf = wr >= rd ? q_sz - wr + rd : rd - wr; 303 304 /* we don't have enough room for the data to write */ 305 if (room_in_buf < tx_sz) { 306 dev_err(&cldev->dev, 307 "Not enough room in the buffer\n"); 308 return -ENOSPC; 309 } 310 311 if (wr + tx_sz <= q_sz) { 312 memcpy(q_head + wr, hdr, tx_sz); 313 } else { 314 memcpy(q_head + wr, hdr, q_sz - wr); 315 memcpy(q_head, (u8 *)hdr + q_sz - wr, tx_sz - (q_sz - wr)); 316 } 317 318 WRITE_ONCE(notif_q->wr_ptr, cpu_to_le32((wr + tx_sz) % q_sz)); 319 return 0; 320 } 321 322 static bool iwl_mei_host_to_me_data_pending(const struct iwl_mei *mei) 323 { 324 struct iwl_sap_q_ctrl_blk *notif_q; 325 struct iwl_sap_dir *dir; 326 327 dir = &mei->shared_mem.ctrl->dir[SAP_DIRECTION_HOST_TO_ME]; 328 notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_DATA]; 329 330 if (READ_ONCE(notif_q->wr_ptr) != READ_ONCE(notif_q->rd_ptr)) 331 return true; 332 333 notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF]; 334 return READ_ONCE(notif_q->wr_ptr) != READ_ONCE(notif_q->rd_ptr); 335 } 336 337 static int iwl_mei_send_check_shared_area(struct mei_cl_device *cldev) 338 { 339 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev); 340 struct iwl_sap_me_msg_start msg = { 341 .hdr.type = cpu_to_le32(SAP_ME_MSG_CHECK_SHARED_AREA), 342 .hdr.seq_num = cpu_to_le32(atomic_inc_return(&mei->seq_no)), 343 }; 344 int ret; 345 346 lockdep_assert_held(&iwl_mei_mutex); 347 348 if (mei->csa_throttled) 349 return 0; 350 351 trace_iwlmei_me_msg(&msg.hdr, true); 352 ret = mei_cldev_send(cldev, (void *)&msg, sizeof(msg)); 353 if (ret != sizeof(msg)) { 354 dev_err(&cldev->dev, 355 "failed to send the SAP_ME_MSG_CHECK_SHARED_AREA message %d\n", 356 ret); 357 return ret; 358 } 359 360 mei->csa_throttled = true; 361 362 schedule_delayed_work(&mei->csa_throttle_end_wk, 363 msecs_to_jiffies(100)); 364 365 return 0; 366 } 367 368 static void iwl_mei_csa_throttle_end_wk(struct work_struct *wk) 369 { 370 struct iwl_mei *mei = 371 container_of(wk, struct iwl_mei, csa_throttle_end_wk.work); 372 373 mutex_lock(&iwl_mei_mutex); 374 375 mei->csa_throttled = false; 376 377 if (iwl_mei_host_to_me_data_pending(mei)) 378 iwl_mei_send_check_shared_area(mei->cldev); 379 380 mutex_unlock(&iwl_mei_mutex); 381 } 382 383 static int iwl_mei_send_sap_msg_payload(struct mei_cl_device *cldev, 384 struct iwl_sap_hdr *hdr) 385 { 386 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev); 387 struct iwl_sap_q_ctrl_blk *notif_q; 388 struct iwl_sap_dir *dir; 389 void *q_head; 390 u32 q_sz; 391 int ret; 392 393 lockdep_assert_held(&iwl_mei_mutex); 394 395 if (!mei->shared_mem.ctrl) { 396 dev_err(&cldev->dev, 397 "No shared memory, can't send any SAP message\n"); 398 return -EINVAL; 399 } 400 401 if (!iwl_mei_is_connected()) { 402 dev_err(&cldev->dev, 403 "Can't send a SAP message if we're not connected\n"); 404 return -ENODEV; 405 } 406 407 hdr->seq_num = cpu_to_le32(atomic_inc_return(&mei->sap_seq_no)); 408 dev_dbg(&cldev->dev, "Sending %d\n", hdr->type); 409 410 dir = &mei->shared_mem.ctrl->dir[SAP_DIRECTION_HOST_TO_ME]; 411 notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF]; 412 q_head = mei->shared_mem.q_head[SAP_DIRECTION_HOST_TO_ME][SAP_QUEUE_IDX_NOTIF]; 413 q_sz = mei->shared_mem.q_size[SAP_DIRECTION_HOST_TO_ME][SAP_QUEUE_IDX_NOTIF]; 414 ret = iwl_mei_write_cyclic_buf(q_head, notif_q, q_head, hdr, q_sz); 415 416 if (ret < 0) 417 return ret; 418 419 trace_iwlmei_sap_cmd(hdr, true); 420 421 return iwl_mei_send_check_shared_area(cldev); 422 } 423 424 void iwl_mei_add_data_to_ring(struct sk_buff *skb, bool cb_tx) 425 { 426 struct iwl_sap_q_ctrl_blk *notif_q; 427 struct iwl_sap_dir *dir; 428 struct iwl_mei *mei; 429 size_t room_in_buf; 430 size_t tx_sz; 431 size_t hdr_sz; 432 u32 q_sz; 433 u32 rd; 434 u32 wr; 435 void *q_head; 436 437 if (!iwl_mei_global_cldev) 438 return; 439 440 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); 441 442 /* 443 * We access this path for Rx packets (the more common case) 444 * and from Tx path when we send DHCP packets, the latter is 445 * very unlikely. 446 * Take the lock already here to make sure we see that remove() 447 * might have cleared the IWL_MEI_STATUS_SAP_CONNECTED bit. 448 */ 449 spin_lock_bh(&mei->data_q_lock); 450 451 if (!iwl_mei_is_connected()) { 452 spin_unlock_bh(&mei->data_q_lock); 453 return; 454 } 455 456 /* 457 * We are in a RCU critical section and the remove from the CSME bus 458 * which would free this memory waits for the readers to complete (this 459 * is done in netdev_rx_handler_unregister). 460 */ 461 dir = &mei->shared_mem.ctrl->dir[SAP_DIRECTION_HOST_TO_ME]; 462 notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_DATA]; 463 q_head = mei->shared_mem.q_head[SAP_DIRECTION_HOST_TO_ME][SAP_QUEUE_IDX_DATA]; 464 q_sz = mei->shared_mem.q_size[SAP_DIRECTION_HOST_TO_ME][SAP_QUEUE_IDX_DATA]; 465 466 rd = le32_to_cpu(READ_ONCE(notif_q->rd_ptr)); 467 wr = le32_to_cpu(READ_ONCE(notif_q->wr_ptr)); 468 hdr_sz = cb_tx ? sizeof(struct iwl_sap_cb_data) : 469 sizeof(struct iwl_sap_hdr); 470 tx_sz = skb->len + hdr_sz; 471 472 if (rd > q_sz || wr > q_sz) { 473 dev_err(&mei->cldev->dev, 474 "can't write the data: pointers are past the end of the buffer\n"); 475 goto out; 476 } 477 478 room_in_buf = wr >= rd ? q_sz - wr + rd : rd - wr; 479 480 /* we don't have enough room for the data to write */ 481 if (room_in_buf < tx_sz) { 482 dev_err(&mei->cldev->dev, 483 "Not enough room in the buffer for this data\n"); 484 goto out; 485 } 486 487 if (skb_headroom(skb) < hdr_sz) { 488 dev_err(&mei->cldev->dev, 489 "Not enough headroom in the skb to write the SAP header\n"); 490 goto out; 491 } 492 493 if (cb_tx) { 494 struct iwl_sap_cb_data *cb_hdr = skb_push(skb, sizeof(*cb_hdr)); 495 496 cb_hdr->hdr.type = cpu_to_le16(SAP_MSG_CB_DATA_PACKET); 497 cb_hdr->hdr.len = cpu_to_le16(skb->len - sizeof(cb_hdr->hdr)); 498 cb_hdr->hdr.seq_num = cpu_to_le32(atomic_inc_return(&mei->sap_seq_no)); 499 cb_hdr->to_me_filt_status = cpu_to_le32(BIT(CB_TX_DHCP_FILT_IDX)); 500 cb_hdr->data_len = cpu_to_le32(skb->len - sizeof(*cb_hdr)); 501 trace_iwlmei_sap_data(skb, IWL_SAP_TX_DHCP); 502 } else { 503 struct iwl_sap_hdr *hdr = skb_push(skb, sizeof(*hdr)); 504 505 hdr->type = cpu_to_le16(SAP_MSG_DATA_PACKET); 506 hdr->len = cpu_to_le16(skb->len - sizeof(*hdr)); 507 hdr->seq_num = cpu_to_le32(atomic_inc_return(&mei->sap_seq_no)); 508 trace_iwlmei_sap_data(skb, IWL_SAP_TX_DATA_FROM_AIR); 509 } 510 511 if (wr + tx_sz <= q_sz) { 512 skb_copy_bits(skb, 0, q_head + wr, tx_sz); 513 } else { 514 skb_copy_bits(skb, 0, q_head + wr, q_sz - wr); 515 skb_copy_bits(skb, q_sz - wr, q_head, tx_sz - (q_sz - wr)); 516 } 517 518 WRITE_ONCE(notif_q->wr_ptr, cpu_to_le32((wr + tx_sz) % q_sz)); 519 520 out: 521 spin_unlock_bh(&mei->data_q_lock); 522 } 523 524 static int 525 iwl_mei_send_sap_msg(struct mei_cl_device *cldev, u16 type) 526 { 527 struct iwl_sap_hdr msg = { 528 .type = cpu_to_le16(type), 529 }; 530 531 return iwl_mei_send_sap_msg_payload(cldev, &msg); 532 } 533 534 static void iwl_mei_send_csa_msg_wk(struct work_struct *wk) 535 { 536 struct iwl_mei *mei = 537 container_of(wk, struct iwl_mei, send_csa_msg_wk); 538 539 if (!iwl_mei_is_connected()) 540 return; 541 542 mutex_lock(&iwl_mei_mutex); 543 544 iwl_mei_send_check_shared_area(mei->cldev); 545 546 mutex_unlock(&iwl_mei_mutex); 547 } 548 549 /* Called in a RCU read critical section from netif_receive_skb */ 550 static rx_handler_result_t iwl_mei_rx_handler(struct sk_buff **pskb) 551 { 552 struct sk_buff *skb = *pskb; 553 struct iwl_mei *mei = 554 rcu_dereference(skb->dev->rx_handler_data); 555 struct iwl_mei_filters *filters = rcu_dereference(mei->filters); 556 bool rx_for_csme = false; 557 rx_handler_result_t res; 558 559 /* 560 * remove() unregisters this handler and synchronize_net, so this 561 * should never happen. 562 */ 563 if (!iwl_mei_is_connected()) { 564 dev_err(&mei->cldev->dev, 565 "Got an Rx packet, but we're not connected to SAP?\n"); 566 return RX_HANDLER_PASS; 567 } 568 569 if (filters) 570 res = iwl_mei_rx_filter(skb, &filters->filters, &rx_for_csme); 571 else 572 res = RX_HANDLER_PASS; 573 574 /* 575 * The data is already on the ring of the shared area, all we 576 * need to do is to tell the CSME firmware to check what we have 577 * there. 578 */ 579 if (rx_for_csme) 580 schedule_work(&mei->send_csa_msg_wk); 581 582 if (res != RX_HANDLER_PASS) { 583 trace_iwlmei_sap_data(skb, IWL_SAP_RX_DATA_DROPPED_FROM_AIR); 584 dev_kfree_skb(skb); 585 } 586 587 return res; 588 } 589 590 static void 591 iwl_mei_handle_rx_start_ok(struct mei_cl_device *cldev, 592 const struct iwl_sap_me_msg_start_ok *rsp, 593 ssize_t len) 594 { 595 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev); 596 597 if (len != sizeof(*rsp)) { 598 dev_err(&cldev->dev, 599 "got invalid SAP_ME_MSG_START_OK from CSME firmware\n"); 600 dev_err(&cldev->dev, 601 "size is incorrect: %zd instead of %zu\n", 602 len, sizeof(*rsp)); 603 return; 604 } 605 606 if (rsp->supported_version != SAP_VERSION) { 607 dev_err(&cldev->dev, 608 "didn't get the expected version: got %d\n", 609 rsp->supported_version); 610 return; 611 } 612 613 mutex_lock(&iwl_mei_mutex); 614 set_bit(IWL_MEI_STATUS_SAP_CONNECTED, &iwl_mei_status); 615 /* wifi driver has registered already */ 616 if (iwl_mei_cache.ops) { 617 iwl_mei_send_sap_msg(mei->cldev, 618 SAP_MSG_NOTIF_WIFIDR_UP); 619 iwl_mei_cache.ops->sap_connected(iwl_mei_cache.priv); 620 } 621 622 mutex_unlock(&iwl_mei_mutex); 623 } 624 625 static void iwl_mei_handle_csme_filters(struct mei_cl_device *cldev, 626 const struct iwl_sap_csme_filters *filters) 627 { 628 struct iwl_mei *mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); 629 struct iwl_mei_filters *new_filters; 630 struct iwl_mei_filters *old_filters; 631 632 old_filters = 633 rcu_dereference_protected(mei->filters, 634 lockdep_is_held(&iwl_mei_mutex)); 635 636 new_filters = kzalloc(sizeof(*new_filters), GFP_KERNEL); 637 if (!new_filters) 638 return; 639 640 /* Copy the OOB filters */ 641 new_filters->filters = filters->filters; 642 643 rcu_assign_pointer(mei->filters, new_filters); 644 645 if (old_filters) 646 kfree_rcu(old_filters, rcu_head); 647 } 648 649 static void 650 iwl_mei_handle_conn_status(struct mei_cl_device *cldev, 651 const struct iwl_sap_notif_conn_status *status) 652 { 653 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev); 654 struct iwl_mei_conn_info conn_info = { 655 .lp_state = le32_to_cpu(status->link_prot_state), 656 .ssid_len = le32_to_cpu(status->conn_info.ssid_len), 657 .channel = status->conn_info.channel, 658 .band = status->conn_info.band, 659 .auth_mode = le32_to_cpu(status->conn_info.auth_mode), 660 .pairwise_cipher = le32_to_cpu(status->conn_info.pairwise_cipher), 661 }; 662 663 if (!iwl_mei_cache.ops || 664 conn_info.ssid_len > ARRAY_SIZE(conn_info.ssid)) 665 return; 666 667 memcpy(conn_info.ssid, status->conn_info.ssid, conn_info.ssid_len); 668 ether_addr_copy(conn_info.bssid, status->conn_info.bssid); 669 670 iwl_mei_cache.ops->me_conn_status(iwl_mei_cache.priv, &conn_info); 671 672 /* 673 * Update the Rfkill state in case the host does not own the device: 674 * if we are in Link Protection, ask to not touch the device, else, 675 * unblock rfkill. 676 * If the host owns the device, inform the user space whether it can 677 * roam. 678 */ 679 if (mei->got_ownership) 680 iwl_mei_cache.ops->roaming_forbidden(iwl_mei_cache.priv, 681 status->link_prot_state); 682 else 683 iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, 684 status->link_prot_state); 685 } 686 687 static void iwl_mei_set_init_conf(struct iwl_mei *mei) 688 { 689 struct iwl_sap_notif_host_link_up link_msg = { 690 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_HOST_LINK_UP), 691 .hdr.len = cpu_to_le16(sizeof(link_msg) - sizeof(link_msg.hdr)), 692 }; 693 struct iwl_sap_notif_country_code mcc_msg = { 694 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_COUNTRY_CODE), 695 .hdr.len = cpu_to_le16(sizeof(mcc_msg) - sizeof(mcc_msg.hdr)), 696 .mcc = cpu_to_le16(iwl_mei_cache.mcc), 697 }; 698 struct iwl_sap_notif_sar_limits sar_msg = { 699 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_SAR_LIMITS), 700 .hdr.len = cpu_to_le16(sizeof(sar_msg) - sizeof(sar_msg.hdr)), 701 }; 702 struct iwl_sap_notif_host_nic_info nic_info_msg = { 703 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_NIC_INFO), 704 .hdr.len = cpu_to_le16(sizeof(nic_info_msg) - sizeof(nic_info_msg.hdr)), 705 }; 706 struct iwl_sap_msg_dw rfkill_msg = { 707 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_RADIO_STATE), 708 .hdr.len = cpu_to_le16(sizeof(rfkill_msg) - sizeof(rfkill_msg.hdr)), 709 .val = cpu_to_le32(iwl_mei_cache.rf_kill), 710 }; 711 712 iwl_mei_send_sap_msg(mei->cldev, SAP_MSG_NOTIF_WHO_OWNS_NIC); 713 714 if (iwl_mei_cache.conn_info) { 715 link_msg.conn_info = *iwl_mei_cache.conn_info; 716 iwl_mei_send_sap_msg_payload(mei->cldev, &link_msg.hdr); 717 } 718 719 iwl_mei_send_sap_msg_payload(mei->cldev, &mcc_msg.hdr); 720 721 if (iwl_mei_cache.power_limit) { 722 memcpy(sar_msg.sar_chain_info_table, iwl_mei_cache.power_limit, 723 sizeof(sar_msg.sar_chain_info_table)); 724 iwl_mei_send_sap_msg_payload(mei->cldev, &sar_msg.hdr); 725 } 726 727 ether_addr_copy(nic_info_msg.mac_address, iwl_mei_cache.mac_address); 728 ether_addr_copy(nic_info_msg.nvm_address, iwl_mei_cache.nvm_address); 729 iwl_mei_send_sap_msg_payload(mei->cldev, &nic_info_msg.hdr); 730 731 iwl_mei_send_sap_msg_payload(mei->cldev, &rfkill_msg.hdr); 732 } 733 734 static void iwl_mei_handle_amt_state(struct mei_cl_device *cldev, 735 const struct iwl_sap_msg_dw *dw) 736 { 737 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev); 738 struct net_device *netdev; 739 740 /* 741 * First take rtnl and only then the mutex to avoid an ABBA 742 * with iwl_mei_set_netdev() 743 */ 744 rtnl_lock(); 745 mutex_lock(&iwl_mei_mutex); 746 747 netdev = rcu_dereference_protected(iwl_mei_cache.netdev, 748 lockdep_is_held(&iwl_mei_mutex)); 749 750 if (mei->amt_enabled == !!le32_to_cpu(dw->val)) 751 goto out; 752 753 mei->amt_enabled = dw->val; 754 755 if (mei->amt_enabled) { 756 if (netdev) 757 netdev_rx_handler_register(netdev, iwl_mei_rx_handler, mei); 758 759 iwl_mei_set_init_conf(mei); 760 } else { 761 if (iwl_mei_cache.ops) 762 iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, false); 763 if (netdev) 764 netdev_rx_handler_unregister(netdev); 765 } 766 767 out: 768 mutex_unlock(&iwl_mei_mutex); 769 rtnl_unlock(); 770 } 771 772 static void iwl_mei_handle_nic_owner(struct mei_cl_device *cldev, 773 const struct iwl_sap_msg_dw *dw) 774 { 775 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev); 776 777 mei->got_ownership = dw->val != cpu_to_le32(SAP_NIC_OWNER_ME); 778 } 779 780 static void iwl_mei_handle_can_release_ownership(struct mei_cl_device *cldev, 781 const void *payload) 782 { 783 /* We can get ownership and driver is registered, go ahead */ 784 if (iwl_mei_cache.ops) 785 iwl_mei_send_sap_msg(cldev, 786 SAP_MSG_NOTIF_HOST_ASKS_FOR_NIC_OWNERSHIP); 787 } 788 789 static void iwl_mei_handle_csme_taking_ownership(struct mei_cl_device *cldev, 790 const void *payload) 791 { 792 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev); 793 794 dev_info(&cldev->dev, "CSME takes ownership\n"); 795 796 mei->got_ownership = false; 797 798 /* 799 * Remember to send CSME_OWNERSHIP_CONFIRMED when the wifi driver 800 * is finished taking the device down. 801 */ 802 mei->csme_taking_ownership = true; 803 804 if (iwl_mei_cache.ops) 805 iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, true); 806 } 807 808 static void iwl_mei_handle_nvm(struct mei_cl_device *cldev, 809 const struct iwl_sap_nvm *sap_nvm) 810 { 811 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev); 812 const struct iwl_mei_nvm *mei_nvm = (const void *)sap_nvm; 813 int i; 814 815 kfree(mei->nvm); 816 mei->nvm = kzalloc(sizeof(*mei_nvm), GFP_KERNEL); 817 if (!mei->nvm) 818 return; 819 820 ether_addr_copy(mei->nvm->hw_addr, sap_nvm->hw_addr); 821 mei->nvm->n_hw_addrs = sap_nvm->n_hw_addrs; 822 mei->nvm->radio_cfg = le32_to_cpu(sap_nvm->radio_cfg); 823 mei->nvm->caps = le32_to_cpu(sap_nvm->caps); 824 mei->nvm->nvm_version = le32_to_cpu(sap_nvm->nvm_version); 825 826 for (i = 0; i < ARRAY_SIZE(mei->nvm->channels); i++) 827 mei->nvm->channels[i] = le32_to_cpu(sap_nvm->channels[i]); 828 829 wake_up_all(&mei->get_nvm_wq); 830 } 831 832 static void iwl_mei_handle_rx_host_own_req(struct mei_cl_device *cldev, 833 const struct iwl_sap_msg_dw *dw) 834 { 835 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev); 836 837 /* 838 * This means that we can't use the wifi device right now, CSME is not 839 * ready to let us use it. 840 */ 841 if (!dw->val) { 842 dev_info(&cldev->dev, "Ownership req denied\n"); 843 return; 844 } 845 846 mei->got_ownership = true; 847 wake_up_all(&mei->get_ownership_wq); 848 849 iwl_mei_send_sap_msg(cldev, 850 SAP_MSG_NOTIF_HOST_OWNERSHIP_CONFIRMED); 851 852 /* We can now start the connection, unblock rfkill */ 853 if (iwl_mei_cache.ops) 854 iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, false); 855 } 856 857 static void iwl_mei_handle_ping(struct mei_cl_device *cldev, 858 const struct iwl_sap_hdr *hdr) 859 { 860 iwl_mei_send_sap_msg(cldev, SAP_MSG_NOTIF_PONG); 861 } 862 863 static void iwl_mei_handle_sap_msg(struct mei_cl_device *cldev, 864 const struct iwl_sap_hdr *hdr) 865 { 866 u16 len = le16_to_cpu(hdr->len) + sizeof(*hdr); 867 u16 type = le16_to_cpu(hdr->type); 868 869 dev_dbg(&cldev->dev, 870 "Got a new SAP message: type %d, len %d, seq %d\n", 871 le16_to_cpu(hdr->type), len, 872 le32_to_cpu(hdr->seq_num)); 873 874 #define SAP_MSG_HANDLER(_cmd, _handler, _sz) \ 875 case SAP_MSG_NOTIF_ ## _cmd: \ 876 if (len < _sz) { \ 877 dev_err(&cldev->dev, \ 878 "Bad size for %d: %u < %u\n", \ 879 le16_to_cpu(hdr->type), \ 880 (unsigned int)len, \ 881 (unsigned int)_sz); \ 882 break; \ 883 } \ 884 mutex_lock(&iwl_mei_mutex); \ 885 _handler(cldev, (const void *)hdr); \ 886 mutex_unlock(&iwl_mei_mutex); \ 887 break 888 889 #define SAP_MSG_HANDLER_NO_LOCK(_cmd, _handler, _sz) \ 890 case SAP_MSG_NOTIF_ ## _cmd: \ 891 if (len < _sz) { \ 892 dev_err(&cldev->dev, \ 893 "Bad size for %d: %u < %u\n", \ 894 le16_to_cpu(hdr->type), \ 895 (unsigned int)len, \ 896 (unsigned int)_sz); \ 897 break; \ 898 } \ 899 _handler(cldev, (const void *)hdr); \ 900 break 901 902 #define SAP_MSG_HANDLER_NO_HANDLER(_cmd, _sz) \ 903 case SAP_MSG_NOTIF_ ## _cmd: \ 904 if (len < _sz) { \ 905 dev_err(&cldev->dev, \ 906 "Bad size for %d: %u < %u\n", \ 907 le16_to_cpu(hdr->type), \ 908 (unsigned int)len, \ 909 (unsigned int)_sz); \ 910 break; \ 911 } \ 912 break 913 914 switch (type) { 915 SAP_MSG_HANDLER(PING, iwl_mei_handle_ping, 0); 916 SAP_MSG_HANDLER(CSME_FILTERS, 917 iwl_mei_handle_csme_filters, 918 sizeof(struct iwl_sap_csme_filters)); 919 SAP_MSG_HANDLER(CSME_CONN_STATUS, 920 iwl_mei_handle_conn_status, 921 sizeof(struct iwl_sap_notif_conn_status)); 922 SAP_MSG_HANDLER_NO_LOCK(AMT_STATE, 923 iwl_mei_handle_amt_state, 924 sizeof(struct iwl_sap_msg_dw)); 925 SAP_MSG_HANDLER_NO_HANDLER(PONG, 0); 926 SAP_MSG_HANDLER(NVM, iwl_mei_handle_nvm, 927 sizeof(struct iwl_sap_nvm)); 928 SAP_MSG_HANDLER(CSME_REPLY_TO_HOST_OWNERSHIP_REQ, 929 iwl_mei_handle_rx_host_own_req, 930 sizeof(struct iwl_sap_msg_dw)); 931 SAP_MSG_HANDLER(NIC_OWNER, iwl_mei_handle_nic_owner, 932 sizeof(struct iwl_sap_msg_dw)); 933 SAP_MSG_HANDLER(CSME_CAN_RELEASE_OWNERSHIP, 934 iwl_mei_handle_can_release_ownership, 0); 935 SAP_MSG_HANDLER(CSME_TAKING_OWNERSHIP, 936 iwl_mei_handle_csme_taking_ownership, 0); 937 default: 938 /* 939 * This is not really an error, there are message that we decided 940 * to ignore, yet, it is useful to be able to leave a note if debug 941 * is enabled. 942 */ 943 dev_dbg(&cldev->dev, "Unsupported message: type %d, len %d\n", 944 le16_to_cpu(hdr->type), len); 945 } 946 947 #undef SAP_MSG_HANDLER 948 #undef SAP_MSG_HANDLER_NO_LOCK 949 } 950 951 static void iwl_mei_read_from_q(const u8 *q_head, u32 q_sz, 952 u32 *_rd, u32 wr, 953 void *_buf, u32 len) 954 { 955 u8 *buf = _buf; 956 u32 rd = *_rd; 957 958 if (rd + len <= q_sz) { 959 memcpy(buf, q_head + rd, len); 960 rd += len; 961 } else { 962 memcpy(buf, q_head + rd, q_sz - rd); 963 memcpy(buf + q_sz - rd, q_head, len - (q_sz - rd)); 964 rd = len - (q_sz - rd); 965 } 966 967 *_rd = rd; 968 } 969 970 #define QOS_HDR_IV_SNAP_LEN (sizeof(struct ieee80211_qos_hdr) + \ 971 IEEE80211_TKIP_IV_LEN + \ 972 sizeof(rfc1042_header) + ETH_TLEN) 973 974 static void iwl_mei_handle_sap_data(struct mei_cl_device *cldev, 975 const u8 *q_head, u32 q_sz, 976 u32 rd, u32 wr, ssize_t valid_rx_sz, 977 struct sk_buff_head *tx_skbs) 978 { 979 struct iwl_sap_hdr hdr; 980 struct net_device *netdev = 981 rcu_dereference_protected(iwl_mei_cache.netdev, 982 lockdep_is_held(&iwl_mei_mutex)); 983 984 if (!netdev) 985 return; 986 987 while (valid_rx_sz >= sizeof(hdr)) { 988 struct ethhdr *ethhdr; 989 unsigned char *data; 990 struct sk_buff *skb; 991 u16 len; 992 993 iwl_mei_read_from_q(q_head, q_sz, &rd, wr, &hdr, sizeof(hdr)); 994 valid_rx_sz -= sizeof(hdr); 995 len = le16_to_cpu(hdr.len); 996 997 if (valid_rx_sz < len) { 998 dev_err(&cldev->dev, 999 "Data queue is corrupted: valid data len %zd, len %d\n", 1000 valid_rx_sz, len); 1001 break; 1002 } 1003 1004 if (len < sizeof(*ethhdr)) { 1005 dev_err(&cldev->dev, 1006 "Data len is smaller than an ethernet header? len = %d\n", 1007 len); 1008 } 1009 1010 valid_rx_sz -= len; 1011 1012 if (le16_to_cpu(hdr.type) != SAP_MSG_DATA_PACKET) { 1013 dev_err(&cldev->dev, "Unsupported Rx data: type %d, len %d\n", 1014 le16_to_cpu(hdr.type), len); 1015 continue; 1016 } 1017 1018 /* We need enough room for the WiFi header + SNAP + IV */ 1019 skb = netdev_alloc_skb(netdev, len + QOS_HDR_IV_SNAP_LEN); 1020 1021 skb_reserve(skb, QOS_HDR_IV_SNAP_LEN); 1022 ethhdr = skb_push(skb, sizeof(*ethhdr)); 1023 1024 iwl_mei_read_from_q(q_head, q_sz, &rd, wr, 1025 ethhdr, sizeof(*ethhdr)); 1026 len -= sizeof(*ethhdr); 1027 1028 skb_reset_mac_header(skb); 1029 skb_reset_network_header(skb); 1030 skb->protocol = ethhdr->h_proto; 1031 1032 data = skb_put(skb, len); 1033 iwl_mei_read_from_q(q_head, q_sz, &rd, wr, data, len); 1034 1035 /* 1036 * Enqueue the skb here so that it can be sent later when we 1037 * do not hold the mutex. TX'ing a packet with a mutex held is 1038 * possible, but it wouldn't be nice to forbid the TX path to 1039 * call any of iwlmei's functions, since every API from iwlmei 1040 * needs the mutex. 1041 */ 1042 __skb_queue_tail(tx_skbs, skb); 1043 } 1044 } 1045 1046 static void iwl_mei_handle_sap_rx_cmd(struct mei_cl_device *cldev, 1047 const u8 *q_head, u32 q_sz, 1048 u32 rd, u32 wr, ssize_t valid_rx_sz) 1049 { 1050 struct page *p = alloc_page(GFP_KERNEL); 1051 struct iwl_sap_hdr *hdr; 1052 1053 if (!p) 1054 return; 1055 1056 hdr = page_address(p); 1057 1058 while (valid_rx_sz >= sizeof(*hdr)) { 1059 u16 len; 1060 1061 iwl_mei_read_from_q(q_head, q_sz, &rd, wr, hdr, sizeof(*hdr)); 1062 valid_rx_sz -= sizeof(*hdr); 1063 len = le16_to_cpu(hdr->len); 1064 1065 if (valid_rx_sz < len) 1066 break; 1067 1068 iwl_mei_read_from_q(q_head, q_sz, &rd, wr, hdr + 1, len); 1069 1070 trace_iwlmei_sap_cmd(hdr, false); 1071 iwl_mei_handle_sap_msg(cldev, hdr); 1072 valid_rx_sz -= len; 1073 } 1074 1075 /* valid_rx_sz must be 0 now... */ 1076 if (valid_rx_sz) 1077 dev_err(&cldev->dev, 1078 "More data in the buffer although we read it all\n"); 1079 1080 __free_page(p); 1081 } 1082 1083 static void iwl_mei_handle_sap_rx(struct mei_cl_device *cldev, 1084 struct iwl_sap_q_ctrl_blk *notif_q, 1085 const u8 *q_head, 1086 struct sk_buff_head *skbs, 1087 u32 q_sz) 1088 { 1089 u32 rd = le32_to_cpu(READ_ONCE(notif_q->rd_ptr)); 1090 u32 wr = le32_to_cpu(READ_ONCE(notif_q->wr_ptr)); 1091 ssize_t valid_rx_sz; 1092 1093 if (rd > q_sz || wr > q_sz) { 1094 dev_err(&cldev->dev, 1095 "Pointers are past the buffer limit\n"); 1096 return; 1097 } 1098 1099 if (rd == wr) 1100 return; 1101 1102 valid_rx_sz = wr > rd ? wr - rd : q_sz - rd + wr; 1103 1104 if (skbs) 1105 iwl_mei_handle_sap_data(cldev, q_head, q_sz, rd, wr, 1106 valid_rx_sz, skbs); 1107 else 1108 iwl_mei_handle_sap_rx_cmd(cldev, q_head, q_sz, rd, wr, 1109 valid_rx_sz); 1110 1111 /* Increment the read pointer to point to the write pointer */ 1112 WRITE_ONCE(notif_q->rd_ptr, cpu_to_le32(wr)); 1113 } 1114 1115 static void iwl_mei_handle_check_shared_area(struct mei_cl_device *cldev) 1116 { 1117 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev); 1118 struct iwl_sap_q_ctrl_blk *notif_q; 1119 struct sk_buff_head tx_skbs; 1120 struct iwl_sap_dir *dir; 1121 void *q_head; 1122 u32 q_sz; 1123 1124 if (!mei->shared_mem.ctrl) 1125 return; 1126 1127 dir = &mei->shared_mem.ctrl->dir[SAP_DIRECTION_ME_TO_HOST]; 1128 notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF]; 1129 q_head = mei->shared_mem.q_head[SAP_DIRECTION_ME_TO_HOST][SAP_QUEUE_IDX_NOTIF]; 1130 q_sz = mei->shared_mem.q_size[SAP_DIRECTION_ME_TO_HOST][SAP_QUEUE_IDX_NOTIF]; 1131 1132 /* 1133 * Do not hold the mutex here, but rather each and every message 1134 * handler takes it. 1135 * This allows message handlers to take it at a certain time. 1136 */ 1137 iwl_mei_handle_sap_rx(cldev, notif_q, q_head, NULL, q_sz); 1138 1139 mutex_lock(&iwl_mei_mutex); 1140 dir = &mei->shared_mem.ctrl->dir[SAP_DIRECTION_ME_TO_HOST]; 1141 notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_DATA]; 1142 q_head = mei->shared_mem.q_head[SAP_DIRECTION_ME_TO_HOST][SAP_QUEUE_IDX_DATA]; 1143 q_sz = mei->shared_mem.q_size[SAP_DIRECTION_ME_TO_HOST][SAP_QUEUE_IDX_DATA]; 1144 1145 __skb_queue_head_init(&tx_skbs); 1146 1147 iwl_mei_handle_sap_rx(cldev, notif_q, q_head, &tx_skbs, q_sz); 1148 1149 if (skb_queue_empty(&tx_skbs)) { 1150 mutex_unlock(&iwl_mei_mutex); 1151 return; 1152 } 1153 1154 /* 1155 * Take the RCU read lock before we unlock the mutex to make sure that 1156 * even if the netdev is replaced by another non-NULL netdev right after 1157 * we unlock the mutex, the old netdev will still be valid when we 1158 * transmit the frames. We can't allow to replace the netdev here because 1159 * the skbs hold a pointer to the netdev. 1160 */ 1161 rcu_read_lock(); 1162 1163 mutex_unlock(&iwl_mei_mutex); 1164 1165 if (!rcu_access_pointer(iwl_mei_cache.netdev)) { 1166 dev_err(&cldev->dev, "Can't Tx without a netdev\n"); 1167 skb_queue_purge(&tx_skbs); 1168 goto out; 1169 } 1170 1171 while (!skb_queue_empty(&tx_skbs)) { 1172 struct sk_buff *skb = __skb_dequeue(&tx_skbs); 1173 1174 trace_iwlmei_sap_data(skb, IWL_SAP_RX_DATA_TO_AIR); 1175 dev_queue_xmit(skb); 1176 } 1177 1178 out: 1179 rcu_read_unlock(); 1180 } 1181 1182 static void iwl_mei_rx(struct mei_cl_device *cldev) 1183 { 1184 struct iwl_sap_me_msg_hdr *hdr; 1185 u8 msg[100]; 1186 ssize_t ret; 1187 1188 ret = mei_cldev_recv(cldev, (u8 *)&msg, sizeof(msg)); 1189 if (ret < 0) { 1190 dev_err(&cldev->dev, "failed to receive data: %zd\n", ret); 1191 return; 1192 } 1193 1194 if (ret == 0) { 1195 dev_err(&cldev->dev, "got an empty response\n"); 1196 return; 1197 } 1198 1199 hdr = (void *)msg; 1200 trace_iwlmei_me_msg(hdr, false); 1201 1202 switch (le32_to_cpu(hdr->type)) { 1203 case SAP_ME_MSG_START_OK: 1204 BUILD_BUG_ON(sizeof(struct iwl_sap_me_msg_start_ok) > 1205 sizeof(msg)); 1206 1207 iwl_mei_handle_rx_start_ok(cldev, (void *)msg, ret); 1208 break; 1209 case SAP_ME_MSG_CHECK_SHARED_AREA: 1210 iwl_mei_handle_check_shared_area(cldev); 1211 break; 1212 default: 1213 dev_err(&cldev->dev, "got a RX notification: %d\n", 1214 le32_to_cpu(hdr->type)); 1215 break; 1216 } 1217 } 1218 1219 static int iwl_mei_send_start(struct mei_cl_device *cldev) 1220 { 1221 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev); 1222 struct iwl_sap_me_msg_start msg = { 1223 .hdr.type = cpu_to_le32(SAP_ME_MSG_START), 1224 .hdr.seq_num = cpu_to_le32(atomic_inc_return(&mei->seq_no)), 1225 .hdr.len = cpu_to_le32(sizeof(msg)), 1226 .supported_versions[0] = SAP_VERSION, 1227 .init_data_seq_num = cpu_to_le16(0x100), 1228 .init_notif_seq_num = cpu_to_le16(0x800), 1229 }; 1230 int ret; 1231 1232 trace_iwlmei_me_msg(&msg.hdr, true); 1233 ret = mei_cldev_send(cldev, (void *)&msg, sizeof(msg)); 1234 if (ret != sizeof(msg)) { 1235 dev_err(&cldev->dev, 1236 "failed to send the SAP_ME_MSG_START message %d\n", 1237 ret); 1238 return ret; 1239 } 1240 1241 return 0; 1242 } 1243 1244 static int iwl_mei_enable(struct mei_cl_device *cldev) 1245 { 1246 int ret; 1247 1248 ret = mei_cldev_enable(cldev); 1249 if (ret < 0) { 1250 dev_err(&cldev->dev, "failed to enable the device: %d\n", ret); 1251 return ret; 1252 } 1253 1254 ret = mei_cldev_register_rx_cb(cldev, iwl_mei_rx); 1255 if (ret) { 1256 dev_err(&cldev->dev, 1257 "failed to register to the rx cb: %d\n", ret); 1258 mei_cldev_disable(cldev); 1259 return ret; 1260 } 1261 1262 return 0; 1263 } 1264 1265 struct iwl_mei_nvm *iwl_mei_get_nvm(void) 1266 { 1267 struct iwl_mei_nvm *nvm = NULL; 1268 struct iwl_mei *mei; 1269 int ret; 1270 1271 mutex_lock(&iwl_mei_mutex); 1272 1273 if (!iwl_mei_is_connected()) 1274 goto out; 1275 1276 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); 1277 1278 if (!mei) 1279 goto out; 1280 1281 ret = iwl_mei_send_sap_msg(iwl_mei_global_cldev, 1282 SAP_MSG_NOTIF_GET_NVM); 1283 if (ret) 1284 goto out; 1285 1286 mutex_unlock(&iwl_mei_mutex); 1287 1288 ret = wait_event_timeout(mei->get_nvm_wq, mei->nvm, 2 * HZ); 1289 if (!ret) 1290 return NULL; 1291 1292 mutex_lock(&iwl_mei_mutex); 1293 1294 if (!iwl_mei_is_connected()) 1295 goto out; 1296 1297 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); 1298 1299 if (!mei) 1300 goto out; 1301 1302 if (mei->nvm) 1303 nvm = kmemdup(mei->nvm, sizeof(*mei->nvm), GFP_KERNEL); 1304 1305 out: 1306 mutex_unlock(&iwl_mei_mutex); 1307 return nvm; 1308 } 1309 EXPORT_SYMBOL_GPL(iwl_mei_get_nvm); 1310 1311 int iwl_mei_get_ownership(void) 1312 { 1313 struct iwl_mei *mei; 1314 int ret; 1315 1316 mutex_lock(&iwl_mei_mutex); 1317 1318 /* In case we didn't have a bind */ 1319 if (!iwl_mei_is_connected()) { 1320 ret = 0; 1321 goto out; 1322 } 1323 1324 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); 1325 1326 if (!mei) { 1327 ret = -ENODEV; 1328 goto out; 1329 } 1330 1331 if (!mei->amt_enabled) { 1332 ret = 0; 1333 goto out; 1334 } 1335 1336 if (mei->got_ownership) { 1337 ret = 0; 1338 goto out; 1339 } 1340 1341 ret = iwl_mei_send_sap_msg(mei->cldev, 1342 SAP_MSG_NOTIF_HOST_ASKS_FOR_NIC_OWNERSHIP); 1343 if (ret) 1344 goto out; 1345 1346 mutex_unlock(&iwl_mei_mutex); 1347 1348 ret = wait_event_timeout(mei->get_ownership_wq, 1349 mei->got_ownership, HZ / 2); 1350 if (!ret) 1351 return -ETIMEDOUT; 1352 1353 mutex_lock(&iwl_mei_mutex); 1354 1355 /* In case we didn't have a bind */ 1356 if (!iwl_mei_is_connected()) { 1357 ret = 0; 1358 goto out; 1359 } 1360 1361 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); 1362 1363 if (!mei) { 1364 ret = -ENODEV; 1365 goto out; 1366 } 1367 1368 ret = !mei->got_ownership; 1369 1370 out: 1371 mutex_unlock(&iwl_mei_mutex); 1372 return ret; 1373 } 1374 EXPORT_SYMBOL_GPL(iwl_mei_get_ownership); 1375 1376 void iwl_mei_host_associated(const struct iwl_mei_conn_info *conn_info, 1377 const struct iwl_mei_colloc_info *colloc_info) 1378 { 1379 struct iwl_sap_notif_host_link_up msg = { 1380 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_HOST_LINK_UP), 1381 .hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)), 1382 .conn_info = { 1383 .ssid_len = cpu_to_le32(conn_info->ssid_len), 1384 .channel = conn_info->channel, 1385 .band = conn_info->band, 1386 .pairwise_cipher = cpu_to_le32(conn_info->pairwise_cipher), 1387 .auth_mode = cpu_to_le32(conn_info->auth_mode), 1388 }, 1389 }; 1390 struct iwl_mei *mei; 1391 1392 if (conn_info->ssid_len > ARRAY_SIZE(msg.conn_info.ssid)) 1393 return; 1394 1395 memcpy(msg.conn_info.ssid, conn_info->ssid, conn_info->ssid_len); 1396 memcpy(msg.conn_info.bssid, conn_info->bssid, ETH_ALEN); 1397 1398 if (colloc_info) { 1399 msg.colloc_channel = colloc_info->channel; 1400 msg.colloc_band = colloc_info->channel <= 14 ? 0 : 1; 1401 memcpy(msg.colloc_bssid, colloc_info->bssid, ETH_ALEN); 1402 } 1403 1404 mutex_lock(&iwl_mei_mutex); 1405 1406 if (!iwl_mei_is_connected()) 1407 goto out; 1408 1409 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); 1410 1411 if (!mei) 1412 goto out; 1413 1414 if (!mei->amt_enabled) 1415 goto out; 1416 1417 iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr); 1418 1419 out: 1420 kfree(iwl_mei_cache.conn_info); 1421 iwl_mei_cache.conn_info = 1422 kmemdup(&msg.conn_info, sizeof(msg.conn_info), GFP_KERNEL); 1423 mutex_unlock(&iwl_mei_mutex); 1424 } 1425 EXPORT_SYMBOL_GPL(iwl_mei_host_associated); 1426 1427 void iwl_mei_host_disassociated(void) 1428 { 1429 struct iwl_mei *mei; 1430 struct iwl_sap_notif_host_link_down msg = { 1431 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_HOST_LINK_DOWN), 1432 .hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)), 1433 .type = HOST_LINK_DOWN_TYPE_LONG, 1434 }; 1435 1436 mutex_lock(&iwl_mei_mutex); 1437 1438 if (!iwl_mei_is_connected()) 1439 goto out; 1440 1441 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); 1442 1443 if (!mei) 1444 goto out; 1445 1446 iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr); 1447 1448 out: 1449 kfree(iwl_mei_cache.conn_info); 1450 iwl_mei_cache.conn_info = NULL; 1451 mutex_unlock(&iwl_mei_mutex); 1452 } 1453 EXPORT_SYMBOL_GPL(iwl_mei_host_disassociated); 1454 1455 void iwl_mei_set_rfkill_state(bool hw_rfkill, bool sw_rfkill) 1456 { 1457 struct iwl_mei *mei; 1458 u32 rfkill_state = 0; 1459 struct iwl_sap_msg_dw msg = { 1460 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_RADIO_STATE), 1461 .hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)), 1462 }; 1463 1464 if (!sw_rfkill) 1465 rfkill_state |= SAP_SW_RFKILL_DEASSERTED; 1466 1467 if (!hw_rfkill) 1468 rfkill_state |= SAP_HW_RFKILL_DEASSERTED; 1469 1470 mutex_lock(&iwl_mei_mutex); 1471 1472 if (!iwl_mei_is_connected()) 1473 goto out; 1474 1475 msg.val = cpu_to_le32(rfkill_state); 1476 1477 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); 1478 1479 if (!mei) 1480 goto out; 1481 1482 iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr); 1483 1484 out: 1485 iwl_mei_cache.rf_kill = rfkill_state; 1486 mutex_unlock(&iwl_mei_mutex); 1487 } 1488 EXPORT_SYMBOL_GPL(iwl_mei_set_rfkill_state); 1489 1490 void iwl_mei_set_nic_info(const u8 *mac_address, const u8 *nvm_address) 1491 { 1492 struct iwl_mei *mei; 1493 struct iwl_sap_notif_host_nic_info msg = { 1494 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_NIC_INFO), 1495 .hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)), 1496 }; 1497 1498 mutex_lock(&iwl_mei_mutex); 1499 1500 if (!iwl_mei_is_connected()) 1501 goto out; 1502 1503 ether_addr_copy(msg.mac_address, mac_address); 1504 ether_addr_copy(msg.nvm_address, nvm_address); 1505 1506 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); 1507 1508 if (!mei) 1509 goto out; 1510 1511 iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr); 1512 1513 out: 1514 ether_addr_copy(iwl_mei_cache.mac_address, mac_address); 1515 ether_addr_copy(iwl_mei_cache.nvm_address, nvm_address); 1516 mutex_unlock(&iwl_mei_mutex); 1517 } 1518 EXPORT_SYMBOL_GPL(iwl_mei_set_nic_info); 1519 1520 void iwl_mei_set_country_code(u16 mcc) 1521 { 1522 struct iwl_mei *mei; 1523 struct iwl_sap_notif_country_code msg = { 1524 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_COUNTRY_CODE), 1525 .hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)), 1526 .mcc = cpu_to_le16(mcc), 1527 }; 1528 1529 mutex_lock(&iwl_mei_mutex); 1530 1531 if (!iwl_mei_is_connected()) 1532 goto out; 1533 1534 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); 1535 1536 if (!mei) 1537 goto out; 1538 1539 iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr); 1540 1541 out: 1542 iwl_mei_cache.mcc = mcc; 1543 mutex_unlock(&iwl_mei_mutex); 1544 } 1545 EXPORT_SYMBOL_GPL(iwl_mei_set_country_code); 1546 1547 void iwl_mei_set_power_limit(const __le16 *power_limit) 1548 { 1549 struct iwl_mei *mei; 1550 struct iwl_sap_notif_sar_limits msg = { 1551 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_SAR_LIMITS), 1552 .hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)), 1553 }; 1554 1555 mutex_lock(&iwl_mei_mutex); 1556 1557 if (!iwl_mei_is_connected()) 1558 goto out; 1559 1560 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); 1561 1562 if (!mei) 1563 goto out; 1564 1565 memcpy(msg.sar_chain_info_table, power_limit, sizeof(msg.sar_chain_info_table)); 1566 1567 iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr); 1568 1569 out: 1570 kfree(iwl_mei_cache.power_limit); 1571 iwl_mei_cache.power_limit = kmemdup(power_limit, 1572 sizeof(msg.sar_chain_info_table), GFP_KERNEL); 1573 mutex_unlock(&iwl_mei_mutex); 1574 } 1575 EXPORT_SYMBOL_GPL(iwl_mei_set_power_limit); 1576 1577 void iwl_mei_set_netdev(struct net_device *netdev) 1578 { 1579 struct iwl_mei *mei; 1580 1581 mutex_lock(&iwl_mei_mutex); 1582 1583 if (!iwl_mei_is_connected()) { 1584 rcu_assign_pointer(iwl_mei_cache.netdev, netdev); 1585 goto out; 1586 } 1587 1588 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); 1589 1590 if (!mei) 1591 goto out; 1592 1593 if (!netdev) { 1594 struct net_device *dev = 1595 rcu_dereference_protected(iwl_mei_cache.netdev, 1596 lockdep_is_held(&iwl_mei_mutex)); 1597 1598 if (!dev) 1599 goto out; 1600 1601 netdev_rx_handler_unregister(dev); 1602 } 1603 1604 rcu_assign_pointer(iwl_mei_cache.netdev, netdev); 1605 1606 if (netdev && mei->amt_enabled) 1607 netdev_rx_handler_register(netdev, iwl_mei_rx_handler, mei); 1608 1609 out: 1610 mutex_unlock(&iwl_mei_mutex); 1611 } 1612 EXPORT_SYMBOL_GPL(iwl_mei_set_netdev); 1613 1614 void iwl_mei_device_down(void) 1615 { 1616 struct iwl_mei *mei; 1617 1618 mutex_lock(&iwl_mei_mutex); 1619 1620 if (!iwl_mei_is_connected()) 1621 goto out; 1622 1623 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); 1624 1625 if (!mei) 1626 goto out; 1627 1628 if (!mei->csme_taking_ownership) 1629 goto out; 1630 1631 iwl_mei_send_sap_msg(mei->cldev, 1632 SAP_MSG_NOTIF_CSME_OWNERSHIP_CONFIRMED); 1633 mei->csme_taking_ownership = false; 1634 out: 1635 mutex_unlock(&iwl_mei_mutex); 1636 } 1637 EXPORT_SYMBOL_GPL(iwl_mei_device_down); 1638 1639 int iwl_mei_register(void *priv, const struct iwl_mei_ops *ops) 1640 { 1641 int ret; 1642 1643 /* 1644 * We must have a non-NULL priv pointer to not crash when there are 1645 * multiple WiFi devices. 1646 */ 1647 if (!priv) 1648 return -EINVAL; 1649 1650 mutex_lock(&iwl_mei_mutex); 1651 1652 /* do not allow registration if someone else already registered */ 1653 if (iwl_mei_cache.priv || iwl_mei_cache.ops) { 1654 ret = -EBUSY; 1655 goto out; 1656 } 1657 1658 iwl_mei_cache.priv = priv; 1659 iwl_mei_cache.ops = ops; 1660 1661 if (iwl_mei_global_cldev) { 1662 struct iwl_mei *mei = 1663 mei_cldev_get_drvdata(iwl_mei_global_cldev); 1664 1665 /* we have already a SAP connection */ 1666 if (iwl_mei_is_connected()) 1667 iwl_mei_send_sap_msg(mei->cldev, 1668 SAP_MSG_NOTIF_WIFIDR_UP); 1669 } 1670 ret = 0; 1671 1672 out: 1673 mutex_unlock(&iwl_mei_mutex); 1674 return ret; 1675 } 1676 EXPORT_SYMBOL_GPL(iwl_mei_register); 1677 1678 void iwl_mei_start_unregister(void) 1679 { 1680 mutex_lock(&iwl_mei_mutex); 1681 1682 /* At this point, the wifi driver should have removed the netdev */ 1683 if (rcu_access_pointer(iwl_mei_cache.netdev)) 1684 pr_err("Still had a netdev pointer set upon unregister\n"); 1685 1686 kfree(iwl_mei_cache.conn_info); 1687 iwl_mei_cache.conn_info = NULL; 1688 kfree(iwl_mei_cache.power_limit); 1689 iwl_mei_cache.power_limit = NULL; 1690 iwl_mei_cache.ops = NULL; 1691 /* leave iwl_mei_cache.priv non-NULL to prevent any new registration */ 1692 1693 mutex_unlock(&iwl_mei_mutex); 1694 } 1695 EXPORT_SYMBOL_GPL(iwl_mei_start_unregister); 1696 1697 void iwl_mei_unregister_complete(void) 1698 { 1699 mutex_lock(&iwl_mei_mutex); 1700 1701 iwl_mei_cache.priv = NULL; 1702 1703 if (iwl_mei_global_cldev) { 1704 struct iwl_mei *mei = 1705 mei_cldev_get_drvdata(iwl_mei_global_cldev); 1706 1707 iwl_mei_send_sap_msg(mei->cldev, SAP_MSG_NOTIF_WIFIDR_DOWN); 1708 mei->got_ownership = false; 1709 } 1710 1711 mutex_unlock(&iwl_mei_mutex); 1712 } 1713 EXPORT_SYMBOL_GPL(iwl_mei_unregister_complete); 1714 1715 #if IS_ENABLED(CONFIG_DEBUG_FS) 1716 1717 static ssize_t 1718 iwl_mei_dbgfs_send_start_message_write(struct file *file, 1719 const char __user *user_buf, 1720 size_t count, loff_t *ppos) 1721 { 1722 int ret; 1723 1724 mutex_lock(&iwl_mei_mutex); 1725 1726 if (!iwl_mei_global_cldev) { 1727 ret = -ENODEV; 1728 goto out; 1729 } 1730 1731 ret = iwl_mei_send_start(iwl_mei_global_cldev); 1732 1733 out: 1734 mutex_unlock(&iwl_mei_mutex); 1735 return ret ?: count; 1736 } 1737 1738 static const struct file_operations iwl_mei_dbgfs_send_start_message_ops = { 1739 .write = iwl_mei_dbgfs_send_start_message_write, 1740 .open = simple_open, 1741 .llseek = default_llseek, 1742 }; 1743 1744 static ssize_t iwl_mei_dbgfs_req_ownership_write(struct file *file, 1745 const char __user *user_buf, 1746 size_t count, loff_t *ppos) 1747 { 1748 iwl_mei_get_ownership(); 1749 1750 return count; 1751 } 1752 1753 static const struct file_operations iwl_mei_dbgfs_req_ownership_ops = { 1754 .write = iwl_mei_dbgfs_req_ownership_write, 1755 .open = simple_open, 1756 .llseek = default_llseek, 1757 }; 1758 1759 static void iwl_mei_dbgfs_register(struct iwl_mei *mei) 1760 { 1761 mei->dbgfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL); 1762 1763 if (!mei->dbgfs_dir) 1764 return; 1765 1766 debugfs_create_ulong("status", S_IRUSR, 1767 mei->dbgfs_dir, &iwl_mei_status); 1768 debugfs_create_file("send_start_message", S_IWUSR, mei->dbgfs_dir, 1769 mei, &iwl_mei_dbgfs_send_start_message_ops); 1770 debugfs_create_file("req_ownership", S_IWUSR, mei->dbgfs_dir, 1771 mei, &iwl_mei_dbgfs_req_ownership_ops); 1772 } 1773 1774 static void iwl_mei_dbgfs_unregister(struct iwl_mei *mei) 1775 { 1776 debugfs_remove_recursive(mei->dbgfs_dir); 1777 mei->dbgfs_dir = NULL; 1778 } 1779 1780 #else 1781 1782 static void iwl_mei_dbgfs_register(struct iwl_mei *mei) {} 1783 static void iwl_mei_dbgfs_unregister(struct iwl_mei *mei) {} 1784 1785 #endif /* CONFIG_DEBUG_FS */ 1786 1787 /* 1788 * iwl_mei_probe - the probe function called by the mei bus enumeration 1789 * 1790 * This allocates the data needed by iwlmei and sets a pointer to this data 1791 * into the mei_cl_device's drvdata. 1792 * It starts the SAP protocol by sending the SAP_ME_MSG_START without 1793 * waiting for the answer. The answer will be caught later by the Rx callback. 1794 */ 1795 static int iwl_mei_probe(struct mei_cl_device *cldev, 1796 const struct mei_cl_device_id *id) 1797 { 1798 struct iwl_mei *mei; 1799 int ret; 1800 1801 mei = devm_kzalloc(&cldev->dev, sizeof(*mei), GFP_KERNEL); 1802 if (!mei) 1803 return -ENOMEM; 1804 1805 init_waitqueue_head(&mei->get_nvm_wq); 1806 INIT_WORK(&mei->send_csa_msg_wk, iwl_mei_send_csa_msg_wk); 1807 INIT_DELAYED_WORK(&mei->csa_throttle_end_wk, 1808 iwl_mei_csa_throttle_end_wk); 1809 init_waitqueue_head(&mei->get_ownership_wq); 1810 spin_lock_init(&mei->data_q_lock); 1811 1812 mei_cldev_set_drvdata(cldev, mei); 1813 mei->cldev = cldev; 1814 1815 /* 1816 * The CSME firmware needs to boot the internal WLAN client. Wait here 1817 * so that the DMA map request will succeed. 1818 */ 1819 msleep(20); 1820 1821 ret = iwl_mei_alloc_shared_mem(cldev); 1822 if (ret) 1823 goto free; 1824 1825 iwl_mei_init_shared_mem(mei); 1826 1827 ret = iwl_mei_enable(cldev); 1828 if (ret) 1829 goto free_shared_mem; 1830 1831 iwl_mei_dbgfs_register(mei); 1832 1833 /* 1834 * We now have a Rx function in place, start the SAP procotol 1835 * we expect to get the SAP_ME_MSG_START_OK response later on. 1836 */ 1837 mutex_lock(&iwl_mei_mutex); 1838 ret = iwl_mei_send_start(cldev); 1839 mutex_unlock(&iwl_mei_mutex); 1840 if (ret) 1841 goto debugfs_unregister; 1842 1843 /* must be last */ 1844 iwl_mei_global_cldev = cldev; 1845 1846 return 0; 1847 1848 debugfs_unregister: 1849 iwl_mei_dbgfs_unregister(mei); 1850 mei_cldev_disable(cldev); 1851 free_shared_mem: 1852 iwl_mei_free_shared_mem(cldev); 1853 free: 1854 mei_cldev_set_drvdata(cldev, NULL); 1855 devm_kfree(&cldev->dev, mei); 1856 1857 return ret; 1858 } 1859 1860 #define SEND_SAP_MAX_WAIT_ITERATION 10 1861 1862 static void iwl_mei_remove(struct mei_cl_device *cldev) 1863 { 1864 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev); 1865 int i; 1866 1867 /* 1868 * We are being removed while the bus is active, it means we are 1869 * going to suspend/ shutdown, so the NIC will disappear. 1870 */ 1871 if (mei_cldev_enabled(cldev) && iwl_mei_cache.ops) 1872 iwl_mei_cache.ops->nic_stolen(iwl_mei_cache.priv); 1873 1874 if (rcu_access_pointer(iwl_mei_cache.netdev)) { 1875 struct net_device *dev; 1876 1877 /* 1878 * First take rtnl and only then the mutex to avoid an ABBA 1879 * with iwl_mei_set_netdev() 1880 */ 1881 rtnl_lock(); 1882 mutex_lock(&iwl_mei_mutex); 1883 1884 /* 1885 * If we are suspending and the wifi driver hasn't removed it's netdev 1886 * yet, do it now. In any case, don't change the cache.netdev pointer. 1887 */ 1888 dev = rcu_dereference_protected(iwl_mei_cache.netdev, 1889 lockdep_is_held(&iwl_mei_mutex)); 1890 1891 netdev_rx_handler_unregister(dev); 1892 mutex_unlock(&iwl_mei_mutex); 1893 rtnl_unlock(); 1894 } 1895 1896 mutex_lock(&iwl_mei_mutex); 1897 1898 /* 1899 * Tell CSME that we are going down so that it won't access the 1900 * memory anymore, make sure this message goes through immediately. 1901 */ 1902 mei->csa_throttled = false; 1903 iwl_mei_send_sap_msg(mei->cldev, 1904 SAP_MSG_NOTIF_HOST_GOES_DOWN); 1905 1906 for (i = 0; i < SEND_SAP_MAX_WAIT_ITERATION; i++) { 1907 if (!iwl_mei_host_to_me_data_pending(mei)) 1908 break; 1909 1910 msleep(5); 1911 } 1912 1913 /* 1914 * If we couldn't make sure that CSME saw the HOST_GOES_DOWN message, 1915 * it means that it will probably keep reading memory that we are going 1916 * to unmap and free, expect IOMMU error messages. 1917 */ 1918 if (i == SEND_SAP_MAX_WAIT_ITERATION) 1919 dev_err(&mei->cldev->dev, 1920 "Couldn't get ACK from CSME on HOST_GOES_DOWN message\n"); 1921 1922 mutex_unlock(&iwl_mei_mutex); 1923 1924 /* 1925 * This looks strange, but this lock is taken here to make sure that 1926 * iwl_mei_add_data_to_ring called from the Tx path sees that we 1927 * clear the IWL_MEI_STATUS_SAP_CONNECTED bit. 1928 * Rx isn't a problem because the rx_handler can't be called after 1929 * having been unregistered. 1930 */ 1931 spin_lock_bh(&mei->data_q_lock); 1932 clear_bit(IWL_MEI_STATUS_SAP_CONNECTED, &iwl_mei_status); 1933 spin_unlock_bh(&mei->data_q_lock); 1934 1935 if (iwl_mei_cache.ops) 1936 iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, false); 1937 1938 /* 1939 * mei_cldev_disable will return only after all the MEI Rx is done. 1940 * It must be called when iwl_mei_mutex is *not* held, since it waits 1941 * for our Rx handler to complete. 1942 * After it returns, no new Rx will start. 1943 */ 1944 mei_cldev_disable(cldev); 1945 1946 /* 1947 * Since the netdev was already removed and the netdev's removal 1948 * includes a call to synchronize_net() so that we know there won't be 1949 * any new Rx that will trigger the following workers. 1950 */ 1951 cancel_work_sync(&mei->send_csa_msg_wk); 1952 cancel_delayed_work_sync(&mei->csa_throttle_end_wk); 1953 1954 /* 1955 * If someone waits for the ownership, let him know that we are going 1956 * down and that we are not connected anymore. He'll be able to take 1957 * the device. 1958 */ 1959 wake_up_all(&mei->get_ownership_wq); 1960 1961 mutex_lock(&iwl_mei_mutex); 1962 1963 iwl_mei_global_cldev = NULL; 1964 1965 wake_up_all(&mei->get_nvm_wq); 1966 1967 iwl_mei_free_shared_mem(cldev); 1968 1969 iwl_mei_dbgfs_unregister(mei); 1970 1971 mei_cldev_set_drvdata(cldev, NULL); 1972 1973 kfree(mei->nvm); 1974 1975 kfree(rcu_access_pointer(mei->filters)); 1976 1977 devm_kfree(&cldev->dev, mei); 1978 1979 mutex_unlock(&iwl_mei_mutex); 1980 } 1981 1982 static const struct mei_cl_device_id iwl_mei_tbl[] = { 1983 { KBUILD_MODNAME, MEI_WLAN_UUID, MEI_CL_VERSION_ANY}, 1984 1985 /* required last entry */ 1986 { } 1987 }; 1988 1989 /* 1990 * Do not export the device table because this module is loaded by 1991 * iwlwifi's dependency. 1992 */ 1993 1994 static struct mei_cl_driver iwl_mei_cl_driver = { 1995 .id_table = iwl_mei_tbl, 1996 .name = KBUILD_MODNAME, 1997 .probe = iwl_mei_probe, 1998 .remove = iwl_mei_remove, 1999 }; 2000 2001 module_mei_cl_driver(iwl_mei_cl_driver); 2002