1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2021-2022 Intel Corporation
4  */
5 
6 #include <linux/etherdevice.h>
7 #include <linux/netdevice.h>
8 #include <linux/ieee80211.h>
9 #include <linux/rtnetlink.h>
10 #include <linux/module.h>
11 #include <linux/moduleparam.h>
12 #include <linux/mei_cl_bus.h>
13 #include <linux/rcupdate.h>
14 #include <linux/debugfs.h>
15 #include <linux/skbuff.h>
16 #include <linux/wait.h>
17 #include <linux/slab.h>
18 #include <linux/mm.h>
19 
20 #include <net/cfg80211.h>
21 
22 #include "internal.h"
23 #include "iwl-mei.h"
24 #include "trace.h"
25 #include "trace-data.h"
26 #include "sap.h"
27 
28 MODULE_DESCRIPTION("The Intel(R) wireless / CSME firmware interface");
29 MODULE_LICENSE("GPL");
30 
31 #define MEI_WLAN_UUID UUID_LE(0x13280904, 0x7792, 0x4fcb, \
32 			      0xa1, 0xaa, 0x5e, 0x70, 0xcb, 0xb1, 0xe8, 0x65)
33 
34 /*
35  * Since iwlwifi calls iwlmei without any context, hold a pointer to the
36  * mei_cl_device structure here.
37  * Define a mutex that will synchronize all the flows between iwlwifi and
38  * iwlmei.
39  * Note that iwlmei can't have several instances, so it ok to have static
40  * variables here.
41  */
42 static struct mei_cl_device *iwl_mei_global_cldev;
43 static DEFINE_MUTEX(iwl_mei_mutex);
44 static unsigned long iwl_mei_status;
45 
46 enum iwl_mei_status_bits {
47 	IWL_MEI_STATUS_SAP_CONNECTED,
48 };
49 
50 bool iwl_mei_is_connected(void)
51 {
52 	return test_bit(IWL_MEI_STATUS_SAP_CONNECTED, &iwl_mei_status);
53 }
54 EXPORT_SYMBOL_GPL(iwl_mei_is_connected);
55 
56 #define SAP_VERSION	3
57 #define SAP_CONTROL_BLOCK_ID 0x21504153 /* SAP! in ASCII */
58 
59 struct iwl_sap_q_ctrl_blk {
60 	__le32 wr_ptr;
61 	__le32 rd_ptr;
62 	__le32 size;
63 };
64 
65 enum iwl_sap_q_idx {
66 	SAP_QUEUE_IDX_NOTIF = 0,
67 	SAP_QUEUE_IDX_DATA,
68 	SAP_QUEUE_IDX_MAX,
69 };
70 
71 struct iwl_sap_dir {
72 	__le32 reserved;
73 	struct iwl_sap_q_ctrl_blk q_ctrl_blk[SAP_QUEUE_IDX_MAX];
74 };
75 
76 enum iwl_sap_dir_idx {
77 	SAP_DIRECTION_HOST_TO_ME = 0,
78 	SAP_DIRECTION_ME_TO_HOST,
79 	SAP_DIRECTION_MAX,
80 };
81 
82 struct iwl_sap_shared_mem_ctrl_blk {
83 	__le32 sap_id;
84 	__le32 size;
85 	struct iwl_sap_dir dir[SAP_DIRECTION_MAX];
86 };
87 
88 /*
89  * The shared area has the following layout:
90  *
91  * +-----------------------------------+
92  * |struct iwl_sap_shared_mem_ctrl_blk |
93  * +-----------------------------------+
94  * |Host -> ME data queue              |
95  * +-----------------------------------+
96  * |Host -> ME notif queue             |
97  * +-----------------------------------+
98  * |ME -> Host data queue              |
99  * +-----------------------------------+
100  * |ME -> host notif queue             |
101  * +-----------------------------------+
102  * |SAP control block id (SAP!)        |
103  * +-----------------------------------+
104  */
105 
106 #define SAP_H2M_DATA_Q_SZ	48256
107 #define SAP_M2H_DATA_Q_SZ	24128
108 #define SAP_H2M_NOTIF_Q_SZ	2240
109 #define SAP_M2H_NOTIF_Q_SZ	62720
110 
111 #define _IWL_MEI_SAP_SHARED_MEM_SZ \
112 	(sizeof(struct iwl_sap_shared_mem_ctrl_blk) + \
113 	 SAP_H2M_DATA_Q_SZ + SAP_H2M_NOTIF_Q_SZ + \
114 	 SAP_M2H_DATA_Q_SZ + SAP_M2H_NOTIF_Q_SZ + 4)
115 
116 #define IWL_MEI_SAP_SHARED_MEM_SZ \
117 	(roundup(_IWL_MEI_SAP_SHARED_MEM_SZ, PAGE_SIZE))
118 
119 struct iwl_mei_shared_mem_ptrs {
120 	struct iwl_sap_shared_mem_ctrl_blk *ctrl;
121 	void *q_head[SAP_DIRECTION_MAX][SAP_QUEUE_IDX_MAX];
122 	size_t q_size[SAP_DIRECTION_MAX][SAP_QUEUE_IDX_MAX];
123 };
124 
125 struct iwl_mei_filters {
126 	struct rcu_head rcu_head;
127 	struct iwl_sap_oob_filters filters;
128 };
129 
130 /**
131  * struct iwl_mei - holds the private date for iwl_mei
132  *
133  * @get_nvm_wq: the wait queue for the get_nvm flow
134  * @send_csa_msg_wk: used to defer the transmission of the CHECK_SHARED_AREA
135  *	message. Used so that we can send CHECK_SHARED_AREA from atomic
136  *	contexts.
137  * @get_ownership_wq: the wait queue for the get_ownership_flow
138  * @shared_mem: the memory that is shared between CSME and the host
139  * @cldev: the pointer to the MEI client device
140  * @nvm: the data returned by the CSME for the NVM
141  * @filters: the filters sent by CSME
142  * @got_ownership: true if we own the device
143  * @amt_enabled: true if CSME has wireless enabled
144  * @csa_throttled: when true, we can't send CHECK_SHARED_AREA over the MEI
145  *	bus, but rather need to wait until send_csa_msg_wk runs
146  * @csme_taking_ownership: true when CSME is taking ownership. Used to remember
147  *	to send CSME_OWNERSHIP_CONFIRMED when the driver completes its down
148  *	flow.
149  * @link_prot_state: true when we are in link protection PASSIVE
150  * @device_down: true if the device is down. Used to remember to send
151  *	CSME_OWNERSHIP_CONFIRMED when the driver is already down.
152  * @csa_throttle_end_wk: used when &csa_throttled is true
153  * @pldr_wq: the wait queue for PLDR flow
154  * @pldr_active: PLDR flow is in progress
155  * @data_q_lock: protects the access to the data queues which are
156  *	accessed without the mutex.
157  * @netdev_work: used to defer registering and unregistering of the netdev to
158  *	avoid taking the rtnl lock in the SAP messages handlers.
159  * @sap_seq_no: the sequence number for the SAP messages
160  * @seq_no: the sequence number for the SAP messages
161  * @dbgfs_dir: the debugfs dir entry
162  */
163 struct iwl_mei {
164 	wait_queue_head_t get_nvm_wq;
165 	struct work_struct send_csa_msg_wk;
166 	wait_queue_head_t get_ownership_wq;
167 	struct iwl_mei_shared_mem_ptrs shared_mem;
168 	struct mei_cl_device *cldev;
169 	struct iwl_mei_nvm *nvm;
170 	struct iwl_mei_filters __rcu *filters;
171 	bool got_ownership;
172 	bool amt_enabled;
173 	bool csa_throttled;
174 	bool csme_taking_ownership;
175 	bool link_prot_state;
176 	bool device_down;
177 	struct delayed_work csa_throttle_end_wk;
178 	wait_queue_head_t pldr_wq;
179 	bool pldr_active;
180 	spinlock_t data_q_lock;
181 	struct work_struct netdev_work;
182 
183 	atomic_t sap_seq_no;
184 	atomic_t seq_no;
185 
186 	struct dentry *dbgfs_dir;
187 };
188 
189 /**
190  * struct iwl_mei_cache - cache for the parameters from iwlwifi
191  * @ops: Callbacks to iwlwifi.
192  * @netdev: The netdev that will be used to transmit / receive packets.
193  * @conn_info: The connection info message triggered by iwlwifi's association.
194  * @power_limit: pointer to an array of 10 elements (le16) represents the power
195  *	restrictions per chain.
196  * @rf_kill: rf kill state.
197  * @mcc: MCC info
198  * @mac_address: interface MAC address.
199  * @nvm_address: NVM MAC address.
200  * @priv: A pointer to iwlwifi.
201  *
202  * This used to cache the configurations coming from iwlwifi's way. The data
203  * is cached here so that we can buffer the configuration even if we don't have
204  * a bind from the mei bus and hence, on iwl_mei structure.
205  */
206 struct iwl_mei_cache {
207 	const struct iwl_mei_ops *ops;
208 	struct net_device __rcu *netdev;
209 	const struct iwl_sap_notif_connection_info *conn_info;
210 	const __le16 *power_limit;
211 	u32 rf_kill;
212 	u16 mcc;
213 	u8 mac_address[6];
214 	u8 nvm_address[6];
215 	void *priv;
216 };
217 
218 static struct iwl_mei_cache iwl_mei_cache = {
219 	.rf_kill = SAP_HW_RFKILL_DEASSERTED | SAP_SW_RFKILL_DEASSERTED
220 };
221 
222 static void iwl_mei_free_shared_mem(struct mei_cl_device *cldev)
223 {
224 	struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
225 
226 	if (mei_cldev_dma_unmap(cldev))
227 		dev_err(&cldev->dev, "Couldn't unmap the shared mem properly\n");
228 	memset(&mei->shared_mem, 0, sizeof(mei->shared_mem));
229 }
230 
231 #define HBM_DMA_BUF_ID_WLAN 1
232 
233 static int iwl_mei_alloc_shared_mem(struct mei_cl_device *cldev)
234 {
235 	struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
236 	struct iwl_mei_shared_mem_ptrs *mem = &mei->shared_mem;
237 
238 	mem->ctrl = mei_cldev_dma_map(cldev, HBM_DMA_BUF_ID_WLAN,
239 				       IWL_MEI_SAP_SHARED_MEM_SZ);
240 
241 	if (IS_ERR(mem->ctrl)) {
242 		int ret = PTR_ERR(mem->ctrl);
243 
244 		mem->ctrl = NULL;
245 
246 		return ret;
247 	}
248 
249 	memset(mem->ctrl, 0, IWL_MEI_SAP_SHARED_MEM_SZ);
250 
251 	return 0;
252 }
253 
254 static void iwl_mei_init_shared_mem(struct iwl_mei *mei)
255 {
256 	struct iwl_mei_shared_mem_ptrs *mem = &mei->shared_mem;
257 	struct iwl_sap_dir *h2m;
258 	struct iwl_sap_dir *m2h;
259 	int dir, queue;
260 	u8 *q_head;
261 
262 	mem->ctrl->sap_id = cpu_to_le32(SAP_CONTROL_BLOCK_ID);
263 
264 	mem->ctrl->size = cpu_to_le32(sizeof(*mem->ctrl));
265 
266 	h2m = &mem->ctrl->dir[SAP_DIRECTION_HOST_TO_ME];
267 	m2h = &mem->ctrl->dir[SAP_DIRECTION_ME_TO_HOST];
268 
269 	h2m->q_ctrl_blk[SAP_QUEUE_IDX_DATA].size =
270 		cpu_to_le32(SAP_H2M_DATA_Q_SZ);
271 	h2m->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF].size =
272 		cpu_to_le32(SAP_H2M_NOTIF_Q_SZ);
273 	m2h->q_ctrl_blk[SAP_QUEUE_IDX_DATA].size =
274 		cpu_to_le32(SAP_M2H_DATA_Q_SZ);
275 	m2h->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF].size =
276 		cpu_to_le32(SAP_M2H_NOTIF_Q_SZ);
277 
278 	/* q_head points to the start of the first queue */
279 	q_head = (void *)(mem->ctrl + 1);
280 
281 	/* Initialize the queue heads */
282 	for (dir = 0; dir < SAP_DIRECTION_MAX; dir++) {
283 		for (queue = 0; queue < SAP_QUEUE_IDX_MAX; queue++) {
284 			mem->q_head[dir][queue] = q_head;
285 			q_head +=
286 				le32_to_cpu(mem->ctrl->dir[dir].q_ctrl_blk[queue].size);
287 			mem->q_size[dir][queue] =
288 				le32_to_cpu(mem->ctrl->dir[dir].q_ctrl_blk[queue].size);
289 		}
290 	}
291 
292 	*(__le32 *)q_head = cpu_to_le32(SAP_CONTROL_BLOCK_ID);
293 }
294 
295 static ssize_t iwl_mei_write_cyclic_buf(struct mei_cl_device *cldev,
296 					struct iwl_sap_q_ctrl_blk *notif_q,
297 					u8 *q_head,
298 					const struct iwl_sap_hdr *hdr,
299 					u32 q_sz)
300 {
301 	u32 rd = le32_to_cpu(READ_ONCE(notif_q->rd_ptr));
302 	u32 wr = le32_to_cpu(READ_ONCE(notif_q->wr_ptr));
303 	size_t room_in_buf;
304 	size_t tx_sz = sizeof(*hdr) + le16_to_cpu(hdr->len);
305 
306 	if (rd > q_sz || wr > q_sz) {
307 		dev_err(&cldev->dev,
308 			"Pointers are past the end of the buffer\n");
309 		return -EINVAL;
310 	}
311 
312 	room_in_buf = wr >= rd ? q_sz - wr + rd : rd - wr;
313 
314 	/* we don't have enough room for the data to write */
315 	if (room_in_buf < tx_sz) {
316 		dev_err(&cldev->dev,
317 			"Not enough room in the buffer\n");
318 		return -ENOSPC;
319 	}
320 
321 	if (wr + tx_sz <= q_sz) {
322 		memcpy(q_head + wr, hdr, tx_sz);
323 	} else {
324 		memcpy(q_head + wr, hdr, q_sz - wr);
325 		memcpy(q_head, (const u8 *)hdr + q_sz - wr, tx_sz - (q_sz - wr));
326 	}
327 
328 	WRITE_ONCE(notif_q->wr_ptr, cpu_to_le32((wr + tx_sz) % q_sz));
329 	return 0;
330 }
331 
332 static bool iwl_mei_host_to_me_data_pending(const struct iwl_mei *mei)
333 {
334 	struct iwl_sap_q_ctrl_blk *notif_q;
335 	struct iwl_sap_dir *dir;
336 
337 	dir = &mei->shared_mem.ctrl->dir[SAP_DIRECTION_HOST_TO_ME];
338 	notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_DATA];
339 
340 	if (READ_ONCE(notif_q->wr_ptr) != READ_ONCE(notif_q->rd_ptr))
341 		return true;
342 
343 	notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF];
344 	return READ_ONCE(notif_q->wr_ptr) != READ_ONCE(notif_q->rd_ptr);
345 }
346 
347 static int iwl_mei_send_check_shared_area(struct mei_cl_device *cldev)
348 {
349 	struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
350 	struct iwl_sap_me_msg_start msg = {
351 		.hdr.type = cpu_to_le32(SAP_ME_MSG_CHECK_SHARED_AREA),
352 		.hdr.seq_num = cpu_to_le32(atomic_inc_return(&mei->seq_no)),
353 	};
354 	int ret;
355 
356 	lockdep_assert_held(&iwl_mei_mutex);
357 
358 	if (mei->csa_throttled)
359 		return 0;
360 
361 	trace_iwlmei_me_msg(&msg.hdr, true);
362 	ret = mei_cldev_send(cldev, (void *)&msg, sizeof(msg));
363 	if (ret != sizeof(msg)) {
364 		dev_err(&cldev->dev,
365 			"failed to send the SAP_ME_MSG_CHECK_SHARED_AREA message %d\n",
366 			ret);
367 		return ret;
368 	}
369 
370 	mei->csa_throttled = true;
371 
372 	schedule_delayed_work(&mei->csa_throttle_end_wk,
373 			      msecs_to_jiffies(100));
374 
375 	return 0;
376 }
377 
378 static void iwl_mei_csa_throttle_end_wk(struct work_struct *wk)
379 {
380 	struct iwl_mei *mei =
381 		container_of(wk, struct iwl_mei, csa_throttle_end_wk.work);
382 
383 	mutex_lock(&iwl_mei_mutex);
384 
385 	mei->csa_throttled = false;
386 
387 	if (iwl_mei_host_to_me_data_pending(mei))
388 		iwl_mei_send_check_shared_area(mei->cldev);
389 
390 	mutex_unlock(&iwl_mei_mutex);
391 }
392 
393 static int iwl_mei_send_sap_msg_payload(struct mei_cl_device *cldev,
394 					struct iwl_sap_hdr *hdr)
395 {
396 	struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
397 	struct iwl_sap_q_ctrl_blk *notif_q;
398 	struct iwl_sap_dir *dir;
399 	void *q_head;
400 	u32 q_sz;
401 	int ret;
402 
403 	lockdep_assert_held(&iwl_mei_mutex);
404 
405 	if (!mei->shared_mem.ctrl) {
406 		dev_err(&cldev->dev,
407 			"No shared memory, can't send any SAP message\n");
408 		return -EINVAL;
409 	}
410 
411 	if (!iwl_mei_is_connected()) {
412 		dev_err(&cldev->dev,
413 			"Can't send a SAP message if we're not connected\n");
414 		return -ENODEV;
415 	}
416 
417 	hdr->seq_num = cpu_to_le32(atomic_inc_return(&mei->sap_seq_no));
418 	dev_dbg(&cldev->dev, "Sending %d\n", hdr->type);
419 
420 	dir = &mei->shared_mem.ctrl->dir[SAP_DIRECTION_HOST_TO_ME];
421 	notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF];
422 	q_head = mei->shared_mem.q_head[SAP_DIRECTION_HOST_TO_ME][SAP_QUEUE_IDX_NOTIF];
423 	q_sz = mei->shared_mem.q_size[SAP_DIRECTION_HOST_TO_ME][SAP_QUEUE_IDX_NOTIF];
424 	ret = iwl_mei_write_cyclic_buf(q_head, notif_q, q_head, hdr, q_sz);
425 
426 	if (ret < 0)
427 		return ret;
428 
429 	trace_iwlmei_sap_cmd(hdr, true);
430 
431 	return iwl_mei_send_check_shared_area(cldev);
432 }
433 
434 void iwl_mei_add_data_to_ring(struct sk_buff *skb, bool cb_tx)
435 {
436 	struct iwl_sap_q_ctrl_blk *notif_q;
437 	struct iwl_sap_dir *dir;
438 	struct iwl_mei *mei;
439 	size_t room_in_buf;
440 	size_t tx_sz;
441 	size_t hdr_sz;
442 	u32 q_sz;
443 	u32 rd;
444 	u32 wr;
445 	u8 *q_head;
446 
447 	if (!iwl_mei_global_cldev)
448 		return;
449 
450 	mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
451 
452 	/*
453 	 * We access this path for Rx packets (the more common case)
454 	 * and from Tx path when we send DHCP packets, the latter is
455 	 * very unlikely.
456 	 * Take the lock already here to make sure we see that remove()
457 	 * might have cleared the IWL_MEI_STATUS_SAP_CONNECTED bit.
458 	 */
459 	spin_lock_bh(&mei->data_q_lock);
460 
461 	if (!iwl_mei_is_connected()) {
462 		spin_unlock_bh(&mei->data_q_lock);
463 		return;
464 	}
465 
466 	/*
467 	 * We are in a RCU critical section and the remove from the CSME bus
468 	 * which would free this memory waits for the readers to complete (this
469 	 * is done in netdev_rx_handler_unregister).
470 	 */
471 	dir = &mei->shared_mem.ctrl->dir[SAP_DIRECTION_HOST_TO_ME];
472 	notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_DATA];
473 	q_head = mei->shared_mem.q_head[SAP_DIRECTION_HOST_TO_ME][SAP_QUEUE_IDX_DATA];
474 	q_sz = mei->shared_mem.q_size[SAP_DIRECTION_HOST_TO_ME][SAP_QUEUE_IDX_DATA];
475 
476 	rd = le32_to_cpu(READ_ONCE(notif_q->rd_ptr));
477 	wr = le32_to_cpu(READ_ONCE(notif_q->wr_ptr));
478 	hdr_sz = cb_tx ? sizeof(struct iwl_sap_cb_data) :
479 			 sizeof(struct iwl_sap_hdr);
480 	tx_sz = skb->len + hdr_sz;
481 
482 	if (rd > q_sz || wr > q_sz) {
483 		dev_err(&mei->cldev->dev,
484 			"can't write the data: pointers are past the end of the buffer\n");
485 		goto out;
486 	}
487 
488 	room_in_buf = wr >= rd ? q_sz - wr + rd : rd - wr;
489 
490 	/* we don't have enough room for the data to write */
491 	if (room_in_buf < tx_sz) {
492 		dev_err(&mei->cldev->dev,
493 			"Not enough room in the buffer for this data\n");
494 		goto out;
495 	}
496 
497 	if (skb_headroom(skb) < hdr_sz) {
498 		dev_err(&mei->cldev->dev,
499 			"Not enough headroom in the skb to write the SAP header\n");
500 		goto out;
501 	}
502 
503 	if (cb_tx) {
504 		struct iwl_sap_cb_data *cb_hdr = skb_push(skb, sizeof(*cb_hdr));
505 
506 		memset(cb_hdr, 0, sizeof(*cb_hdr));
507 		cb_hdr->hdr.type = cpu_to_le16(SAP_MSG_CB_DATA_PACKET);
508 		cb_hdr->hdr.len = cpu_to_le16(skb->len - sizeof(cb_hdr->hdr));
509 		cb_hdr->hdr.seq_num = cpu_to_le32(atomic_inc_return(&mei->sap_seq_no));
510 		cb_hdr->to_me_filt_status = cpu_to_le32(BIT(CB_TX_DHCP_FILT_IDX));
511 		cb_hdr->data_len = cpu_to_le32(skb->len - sizeof(*cb_hdr));
512 		trace_iwlmei_sap_data(skb, IWL_SAP_TX_DHCP);
513 	} else {
514 		struct iwl_sap_hdr *hdr = skb_push(skb, sizeof(*hdr));
515 
516 		hdr->type = cpu_to_le16(SAP_MSG_DATA_PACKET);
517 		hdr->len = cpu_to_le16(skb->len - sizeof(*hdr));
518 		hdr->seq_num = cpu_to_le32(atomic_inc_return(&mei->sap_seq_no));
519 		trace_iwlmei_sap_data(skb, IWL_SAP_TX_DATA_FROM_AIR);
520 	}
521 
522 	if (wr + tx_sz <= q_sz) {
523 		skb_copy_bits(skb, 0, q_head + wr, tx_sz);
524 	} else {
525 		skb_copy_bits(skb, 0, q_head + wr, q_sz - wr);
526 		skb_copy_bits(skb, q_sz - wr, q_head, tx_sz - (q_sz - wr));
527 	}
528 
529 	WRITE_ONCE(notif_q->wr_ptr, cpu_to_le32((wr + tx_sz) % q_sz));
530 
531 out:
532 	spin_unlock_bh(&mei->data_q_lock);
533 }
534 
535 static int
536 iwl_mei_send_sap_msg(struct mei_cl_device *cldev, u16 type)
537 {
538 	struct iwl_sap_hdr msg = {
539 		.type = cpu_to_le16(type),
540 	};
541 
542 	return iwl_mei_send_sap_msg_payload(cldev, &msg);
543 }
544 
545 static void iwl_mei_send_csa_msg_wk(struct work_struct *wk)
546 {
547 	struct iwl_mei *mei =
548 		container_of(wk, struct iwl_mei, send_csa_msg_wk);
549 
550 	if (!iwl_mei_is_connected())
551 		return;
552 
553 	mutex_lock(&iwl_mei_mutex);
554 
555 	iwl_mei_send_check_shared_area(mei->cldev);
556 
557 	mutex_unlock(&iwl_mei_mutex);
558 }
559 
560 /* Called in a RCU read critical section from netif_receive_skb */
561 static rx_handler_result_t iwl_mei_rx_handler(struct sk_buff **pskb)
562 {
563 	struct sk_buff *skb = *pskb;
564 	struct iwl_mei *mei =
565 		rcu_dereference(skb->dev->rx_handler_data);
566 	struct iwl_mei_filters *filters = rcu_dereference(mei->filters);
567 	bool rx_for_csme = false;
568 	rx_handler_result_t res;
569 
570 	/*
571 	 * remove() unregisters this handler and synchronize_net, so this
572 	 * should never happen.
573 	 */
574 	if (!iwl_mei_is_connected()) {
575 		dev_err(&mei->cldev->dev,
576 			"Got an Rx packet, but we're not connected to SAP?\n");
577 		return RX_HANDLER_PASS;
578 	}
579 
580 	if (filters)
581 		res = iwl_mei_rx_filter(skb, &filters->filters, &rx_for_csme);
582 	else
583 		res = RX_HANDLER_PASS;
584 
585 	/*
586 	 * The data is already on the ring of the shared area, all we
587 	 * need to do is to tell the CSME firmware to check what we have
588 	 * there.
589 	 */
590 	if (rx_for_csme)
591 		schedule_work(&mei->send_csa_msg_wk);
592 
593 	if (res != RX_HANDLER_PASS) {
594 		trace_iwlmei_sap_data(skb, IWL_SAP_RX_DATA_DROPPED_FROM_AIR);
595 		dev_kfree_skb(skb);
596 	}
597 
598 	return res;
599 }
600 
601 static void iwl_mei_netdev_work(struct work_struct *wk)
602 {
603 	struct iwl_mei *mei =
604 		container_of(wk, struct iwl_mei, netdev_work);
605 	struct net_device *netdev;
606 
607 	/*
608 	 * First take rtnl and only then the mutex to avoid an ABBA
609 	 * with iwl_mei_set_netdev()
610 	 */
611 	rtnl_lock();
612 	mutex_lock(&iwl_mei_mutex);
613 
614 	netdev = rcu_dereference_protected(iwl_mei_cache.netdev,
615 					   lockdep_is_held(&iwl_mei_mutex));
616 	if (netdev) {
617 		if (mei->amt_enabled)
618 			netdev_rx_handler_register(netdev, iwl_mei_rx_handler,
619 						   mei);
620 		else
621 			netdev_rx_handler_unregister(netdev);
622 	}
623 
624 	mutex_unlock(&iwl_mei_mutex);
625 	rtnl_unlock();
626 }
627 
628 static void
629 iwl_mei_handle_rx_start_ok(struct mei_cl_device *cldev,
630 			   const struct iwl_sap_me_msg_start_ok *rsp,
631 			   ssize_t len)
632 {
633 	if (len != sizeof(*rsp)) {
634 		dev_err(&cldev->dev,
635 			"got invalid SAP_ME_MSG_START_OK from CSME firmware\n");
636 		dev_err(&cldev->dev,
637 			"size is incorrect: %zd instead of %zu\n",
638 			len, sizeof(*rsp));
639 		return;
640 	}
641 
642 	if (rsp->supported_version != SAP_VERSION) {
643 		dev_err(&cldev->dev,
644 			"didn't get the expected version: got %d\n",
645 			rsp->supported_version);
646 		return;
647 	}
648 
649 	mutex_lock(&iwl_mei_mutex);
650 	set_bit(IWL_MEI_STATUS_SAP_CONNECTED, &iwl_mei_status);
651 	/*
652 	 * We'll receive AMT_STATE SAP message in a bit and
653 	 * that will continue the flow
654 	 */
655 	mutex_unlock(&iwl_mei_mutex);
656 }
657 
658 static void iwl_mei_handle_csme_filters(struct mei_cl_device *cldev,
659 					const struct iwl_sap_csme_filters *filters)
660 {
661 	struct iwl_mei *mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
662 	struct iwl_mei_filters *new_filters;
663 	struct iwl_mei_filters *old_filters;
664 
665 	old_filters =
666 		rcu_dereference_protected(mei->filters,
667 					  lockdep_is_held(&iwl_mei_mutex));
668 
669 	new_filters = kzalloc(sizeof(*new_filters), GFP_KERNEL);
670 	if (!new_filters)
671 		return;
672 
673 	/* Copy the OOB filters */
674 	new_filters->filters = filters->filters;
675 
676 	rcu_assign_pointer(mei->filters, new_filters);
677 
678 	if (old_filters)
679 		kfree_rcu(old_filters, rcu_head);
680 }
681 
682 static void
683 iwl_mei_handle_conn_status(struct mei_cl_device *cldev,
684 			   const struct iwl_sap_notif_conn_status *status)
685 {
686 	struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
687 	struct iwl_mei_conn_info conn_info = {
688 		.lp_state = le32_to_cpu(status->link_prot_state),
689 		.ssid_len = le32_to_cpu(status->conn_info.ssid_len),
690 		.channel = status->conn_info.channel,
691 		.band = status->conn_info.band,
692 		.auth_mode = le32_to_cpu(status->conn_info.auth_mode),
693 		.pairwise_cipher = le32_to_cpu(status->conn_info.pairwise_cipher),
694 	};
695 
696 	if (!iwl_mei_cache.ops ||
697 	    conn_info.ssid_len > ARRAY_SIZE(conn_info.ssid))
698 		return;
699 
700 	memcpy(conn_info.ssid, status->conn_info.ssid, conn_info.ssid_len);
701 	ether_addr_copy(conn_info.bssid, status->conn_info.bssid);
702 
703 	iwl_mei_cache.ops->me_conn_status(iwl_mei_cache.priv, &conn_info);
704 
705 	mei->link_prot_state = status->link_prot_state;
706 
707 	/*
708 	 * Update the Rfkill state in case the host does not own the device:
709 	 * if we are in Link Protection, ask to not touch the device, else,
710 	 * unblock rfkill.
711 	 * If the host owns the device, inform the user space whether it can
712 	 * roam.
713 	 */
714 	if (mei->got_ownership)
715 		iwl_mei_cache.ops->roaming_forbidden(iwl_mei_cache.priv,
716 						     status->link_prot_state);
717 	else
718 		iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv,
719 					  status->link_prot_state);
720 }
721 
722 static void iwl_mei_set_init_conf(struct iwl_mei *mei)
723 {
724 	struct iwl_sap_notif_host_link_up link_msg = {
725 		.hdr.type = cpu_to_le16(SAP_MSG_NOTIF_HOST_LINK_UP),
726 		.hdr.len = cpu_to_le16(sizeof(link_msg) - sizeof(link_msg.hdr)),
727 	};
728 	struct iwl_sap_notif_country_code mcc_msg = {
729 		.hdr.type = cpu_to_le16(SAP_MSG_NOTIF_COUNTRY_CODE),
730 		.hdr.len = cpu_to_le16(sizeof(mcc_msg) - sizeof(mcc_msg.hdr)),
731 		.mcc = cpu_to_le16(iwl_mei_cache.mcc),
732 	};
733 	struct iwl_sap_notif_sar_limits sar_msg = {
734 		.hdr.type = cpu_to_le16(SAP_MSG_NOTIF_SAR_LIMITS),
735 		.hdr.len = cpu_to_le16(sizeof(sar_msg) - sizeof(sar_msg.hdr)),
736 	};
737 	struct iwl_sap_notif_host_nic_info nic_info_msg = {
738 		.hdr.type = cpu_to_le16(SAP_MSG_NOTIF_NIC_INFO),
739 		.hdr.len = cpu_to_le16(sizeof(nic_info_msg) - sizeof(nic_info_msg.hdr)),
740 	};
741 	struct iwl_sap_msg_dw rfkill_msg = {
742 		.hdr.type = cpu_to_le16(SAP_MSG_NOTIF_RADIO_STATE),
743 		.hdr.len = cpu_to_le16(sizeof(rfkill_msg) - sizeof(rfkill_msg.hdr)),
744 		.val = cpu_to_le32(iwl_mei_cache.rf_kill),
745 	};
746 
747 	/* wifi driver has registered already */
748 	if (iwl_mei_cache.ops) {
749 		iwl_mei_send_sap_msg(mei->cldev,
750 				     SAP_MSG_NOTIF_WIFIDR_UP);
751 		iwl_mei_cache.ops->sap_connected(iwl_mei_cache.priv);
752 	}
753 
754 	iwl_mei_send_sap_msg(mei->cldev, SAP_MSG_NOTIF_WHO_OWNS_NIC);
755 
756 	if (iwl_mei_cache.conn_info) {
757 		link_msg.conn_info = *iwl_mei_cache.conn_info;
758 		iwl_mei_send_sap_msg_payload(mei->cldev, &link_msg.hdr);
759 	}
760 
761 	iwl_mei_send_sap_msg_payload(mei->cldev, &mcc_msg.hdr);
762 
763 	if (iwl_mei_cache.power_limit) {
764 		memcpy(sar_msg.sar_chain_info_table, iwl_mei_cache.power_limit,
765 		       sizeof(sar_msg.sar_chain_info_table));
766 		iwl_mei_send_sap_msg_payload(mei->cldev, &sar_msg.hdr);
767 	}
768 
769 	ether_addr_copy(nic_info_msg.mac_address, iwl_mei_cache.mac_address);
770 	ether_addr_copy(nic_info_msg.nvm_address, iwl_mei_cache.nvm_address);
771 	iwl_mei_send_sap_msg_payload(mei->cldev, &nic_info_msg.hdr);
772 
773 	iwl_mei_send_sap_msg_payload(mei->cldev, &rfkill_msg.hdr);
774 }
775 
776 static void iwl_mei_handle_amt_state(struct mei_cl_device *cldev,
777 				     const struct iwl_sap_msg_dw *dw)
778 {
779 	struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
780 
781 	mutex_lock(&iwl_mei_mutex);
782 
783 	if (mei->amt_enabled == !!le32_to_cpu(dw->val))
784 		goto out;
785 
786 	mei->amt_enabled = dw->val;
787 
788 	if (mei->amt_enabled)
789 		iwl_mei_set_init_conf(mei);
790 	else if (iwl_mei_cache.ops)
791 		iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, false, false);
792 
793 	schedule_work(&mei->netdev_work);
794 
795 out:
796 	mutex_unlock(&iwl_mei_mutex);
797 }
798 
799 static void iwl_mei_handle_nic_owner(struct mei_cl_device *cldev,
800 				     const struct iwl_sap_msg_dw *dw)
801 {
802 	struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
803 
804 	mei->got_ownership = dw->val != cpu_to_le32(SAP_NIC_OWNER_ME);
805 }
806 
807 static void iwl_mei_handle_can_release_ownership(struct mei_cl_device *cldev,
808 						 const void *payload)
809 {
810 	/* We can get ownership and driver is registered, go ahead */
811 	if (iwl_mei_cache.ops)
812 		iwl_mei_send_sap_msg(cldev,
813 				     SAP_MSG_NOTIF_HOST_ASKS_FOR_NIC_OWNERSHIP);
814 }
815 
816 static void iwl_mei_handle_csme_taking_ownership(struct mei_cl_device *cldev,
817 						 const void *payload)
818 {
819 	struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
820 
821 	dev_info(&cldev->dev, "CSME takes ownership\n");
822 
823 	mei->got_ownership = false;
824 
825 	if (iwl_mei_cache.ops && !mei->device_down) {
826 		/*
827 		 * Remember to send CSME_OWNERSHIP_CONFIRMED when the wifi
828 		 * driver is finished taking the device down.
829 		 */
830 		mei->csme_taking_ownership = true;
831 
832 		iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, true, true);
833 	} else {
834 		iwl_mei_send_sap_msg(cldev,
835 				     SAP_MSG_NOTIF_CSME_OWNERSHIP_CONFIRMED);
836 	}
837 }
838 
839 static void iwl_mei_handle_nvm(struct mei_cl_device *cldev,
840 			       const struct iwl_sap_nvm *sap_nvm)
841 {
842 	struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
843 	const struct iwl_mei_nvm *mei_nvm = (const void *)sap_nvm;
844 	int i;
845 
846 	kfree(mei->nvm);
847 	mei->nvm = kzalloc(sizeof(*mei_nvm), GFP_KERNEL);
848 	if (!mei->nvm)
849 		return;
850 
851 	ether_addr_copy(mei->nvm->hw_addr, sap_nvm->hw_addr);
852 	mei->nvm->n_hw_addrs = sap_nvm->n_hw_addrs;
853 	mei->nvm->radio_cfg = le32_to_cpu(sap_nvm->radio_cfg);
854 	mei->nvm->caps = le32_to_cpu(sap_nvm->caps);
855 	mei->nvm->nvm_version = le32_to_cpu(sap_nvm->nvm_version);
856 
857 	for (i = 0; i < ARRAY_SIZE(mei->nvm->channels); i++)
858 		mei->nvm->channels[i] = le32_to_cpu(sap_nvm->channels[i]);
859 
860 	wake_up_all(&mei->get_nvm_wq);
861 }
862 
863 static void iwl_mei_handle_rx_host_own_req(struct mei_cl_device *cldev,
864 					   const struct iwl_sap_msg_dw *dw)
865 {
866 	struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
867 
868 	/*
869 	 * This means that we can't use the wifi device right now, CSME is not
870 	 * ready to let us use it.
871 	 */
872 	if (!dw->val) {
873 		dev_info(&cldev->dev, "Ownership req denied\n");
874 		return;
875 	}
876 
877 	mei->got_ownership = true;
878 	wake_up_all(&mei->get_ownership_wq);
879 
880 	iwl_mei_send_sap_msg(cldev,
881 			     SAP_MSG_NOTIF_HOST_OWNERSHIP_CONFIRMED);
882 
883 	/* We can now start the connection, unblock rfkill */
884 	if (iwl_mei_cache.ops)
885 		iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, false);
886 }
887 
888 static void iwl_mei_handle_pldr_ack(struct mei_cl_device *cldev,
889 				    const struct iwl_sap_pldr_ack_data *ack)
890 {
891 	struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
892 
893 	mei->pldr_active = le32_to_cpu(ack->status) == SAP_PLDR_STATUS_SUCCESS;
894 	wake_up_all(&mei->pldr_wq);
895 }
896 
897 static void iwl_mei_handle_ping(struct mei_cl_device *cldev,
898 				const struct iwl_sap_hdr *hdr)
899 {
900 	iwl_mei_send_sap_msg(cldev, SAP_MSG_NOTIF_PONG);
901 }
902 
903 static void iwl_mei_handle_sap_msg(struct mei_cl_device *cldev,
904 				   const struct iwl_sap_hdr *hdr)
905 {
906 	u16 len = le16_to_cpu(hdr->len) + sizeof(*hdr);
907 	u16 type = le16_to_cpu(hdr->type);
908 
909 	dev_dbg(&cldev->dev,
910 		"Got a new SAP message: type %d, len %d, seq %d\n",
911 		le16_to_cpu(hdr->type), len,
912 		le32_to_cpu(hdr->seq_num));
913 
914 #define SAP_MSG_HANDLER(_cmd, _handler, _sz)				\
915 	case SAP_MSG_NOTIF_ ## _cmd:					\
916 		if (len < _sz) {					\
917 			dev_err(&cldev->dev,				\
918 				"Bad size for %d: %u < %u\n",		\
919 				le16_to_cpu(hdr->type),			\
920 				(unsigned int)len,			\
921 				(unsigned int)_sz);			\
922 			break;						\
923 		}							\
924 		mutex_lock(&iwl_mei_mutex);				\
925 		_handler(cldev, (const void *)hdr);			\
926 		mutex_unlock(&iwl_mei_mutex);				\
927 		break
928 
929 #define SAP_MSG_HANDLER_NO_LOCK(_cmd, _handler, _sz)			\
930 	case SAP_MSG_NOTIF_ ## _cmd:					\
931 		if (len < _sz) {					\
932 			dev_err(&cldev->dev,				\
933 				"Bad size for %d: %u < %u\n",		\
934 				le16_to_cpu(hdr->type),			\
935 				(unsigned int)len,			\
936 				(unsigned int)_sz);			\
937 			break;						\
938 		}							\
939 		_handler(cldev, (const void *)hdr);			\
940 		break
941 
942 #define SAP_MSG_HANDLER_NO_HANDLER(_cmd, _sz)				\
943 	case SAP_MSG_NOTIF_ ## _cmd:					\
944 		if (len < _sz) {					\
945 			dev_err(&cldev->dev,				\
946 				"Bad size for %d: %u < %u\n",		\
947 				le16_to_cpu(hdr->type),			\
948 				(unsigned int)len,			\
949 				(unsigned int)_sz);			\
950 			break;						\
951 		}							\
952 		break
953 
954 	switch (type) {
955 	SAP_MSG_HANDLER(PING, iwl_mei_handle_ping, 0);
956 	SAP_MSG_HANDLER(CSME_FILTERS,
957 			iwl_mei_handle_csme_filters,
958 			sizeof(struct iwl_sap_csme_filters));
959 	SAP_MSG_HANDLER(CSME_CONN_STATUS,
960 			iwl_mei_handle_conn_status,
961 			sizeof(struct iwl_sap_notif_conn_status));
962 	SAP_MSG_HANDLER_NO_LOCK(AMT_STATE,
963 				iwl_mei_handle_amt_state,
964 				sizeof(struct iwl_sap_msg_dw));
965 	SAP_MSG_HANDLER_NO_HANDLER(PONG, 0);
966 	SAP_MSG_HANDLER(NVM, iwl_mei_handle_nvm,
967 			sizeof(struct iwl_sap_nvm));
968 	SAP_MSG_HANDLER(CSME_REPLY_TO_HOST_OWNERSHIP_REQ,
969 			iwl_mei_handle_rx_host_own_req,
970 			sizeof(struct iwl_sap_msg_dw));
971 	SAP_MSG_HANDLER(NIC_OWNER, iwl_mei_handle_nic_owner,
972 			sizeof(struct iwl_sap_msg_dw));
973 	SAP_MSG_HANDLER(CSME_CAN_RELEASE_OWNERSHIP,
974 			iwl_mei_handle_can_release_ownership, 0);
975 	SAP_MSG_HANDLER(CSME_TAKING_OWNERSHIP,
976 			iwl_mei_handle_csme_taking_ownership, 0);
977 	SAP_MSG_HANDLER(PLDR_ACK, iwl_mei_handle_pldr_ack,
978 			sizeof(struct iwl_sap_pldr_ack_data));
979 	default:
980 	/*
981 	 * This is not really an error, there are message that we decided
982 	 * to ignore, yet, it is useful to be able to leave a note if debug
983 	 * is enabled.
984 	 */
985 	dev_dbg(&cldev->dev, "Unsupported message: type %d, len %d\n",
986 		le16_to_cpu(hdr->type), len);
987 	}
988 
989 #undef SAP_MSG_HANDLER
990 #undef SAP_MSG_HANDLER_NO_LOCK
991 }
992 
993 static void iwl_mei_read_from_q(const u8 *q_head, u32 q_sz,
994 				u32 *_rd, u32 wr,
995 				void *_buf, u32 len)
996 {
997 	u8 *buf = _buf;
998 	u32 rd = *_rd;
999 
1000 	if (rd + len <= q_sz) {
1001 		memcpy(buf, q_head + rd, len);
1002 		rd += len;
1003 	} else {
1004 		memcpy(buf, q_head + rd, q_sz - rd);
1005 		memcpy(buf + q_sz - rd, q_head, len - (q_sz - rd));
1006 		rd = len - (q_sz - rd);
1007 	}
1008 
1009 	*_rd = rd;
1010 }
1011 
1012 #define QOS_HDR_IV_SNAP_LEN (sizeof(struct ieee80211_qos_hdr) +      \
1013 			     IEEE80211_TKIP_IV_LEN +                 \
1014 			     sizeof(rfc1042_header) + ETH_TLEN)
1015 
1016 static void iwl_mei_handle_sap_data(struct mei_cl_device *cldev,
1017 				    const u8 *q_head, u32 q_sz,
1018 				    u32 rd, u32 wr, ssize_t valid_rx_sz,
1019 				    struct sk_buff_head *tx_skbs)
1020 {
1021 	struct iwl_sap_hdr hdr;
1022 	struct net_device *netdev =
1023 		rcu_dereference_protected(iwl_mei_cache.netdev,
1024 					  lockdep_is_held(&iwl_mei_mutex));
1025 
1026 	if (!netdev)
1027 		return;
1028 
1029 	while (valid_rx_sz >= sizeof(hdr)) {
1030 		struct ethhdr *ethhdr;
1031 		unsigned char *data;
1032 		struct sk_buff *skb;
1033 		u16 len;
1034 
1035 		iwl_mei_read_from_q(q_head, q_sz, &rd, wr, &hdr, sizeof(hdr));
1036 		valid_rx_sz -= sizeof(hdr);
1037 		len = le16_to_cpu(hdr.len);
1038 
1039 		if (valid_rx_sz < len) {
1040 			dev_err(&cldev->dev,
1041 				"Data queue is corrupted: valid data len %zd, len %d\n",
1042 				valid_rx_sz, len);
1043 			break;
1044 		}
1045 
1046 		if (len < sizeof(*ethhdr)) {
1047 			dev_err(&cldev->dev,
1048 				"Data len is smaller than an ethernet header? len = %d\n",
1049 				len);
1050 		}
1051 
1052 		valid_rx_sz -= len;
1053 
1054 		if (le16_to_cpu(hdr.type) != SAP_MSG_DATA_PACKET) {
1055 			dev_err(&cldev->dev, "Unsupported Rx data: type %d, len %d\n",
1056 				le16_to_cpu(hdr.type), len);
1057 			continue;
1058 		}
1059 
1060 		/* We need enough room for the WiFi header + SNAP + IV */
1061 		skb = netdev_alloc_skb(netdev, len + QOS_HDR_IV_SNAP_LEN);
1062 		if (!skb)
1063 			continue;
1064 
1065 		skb_reserve(skb, QOS_HDR_IV_SNAP_LEN);
1066 		ethhdr = skb_push(skb, sizeof(*ethhdr));
1067 
1068 		iwl_mei_read_from_q(q_head, q_sz, &rd, wr,
1069 				    ethhdr, sizeof(*ethhdr));
1070 		len -= sizeof(*ethhdr);
1071 
1072 		skb_reset_mac_header(skb);
1073 		skb_reset_network_header(skb);
1074 		skb->protocol = ethhdr->h_proto;
1075 
1076 		data = skb_put(skb, len);
1077 		iwl_mei_read_from_q(q_head, q_sz, &rd, wr, data, len);
1078 
1079 		/*
1080 		 * Enqueue the skb here so that it can be sent later when we
1081 		 * do not hold the mutex. TX'ing a packet with a mutex held is
1082 		 * possible, but it wouldn't be nice to forbid the TX path to
1083 		 * call any of iwlmei's functions, since every API from iwlmei
1084 		 * needs the mutex.
1085 		 */
1086 		__skb_queue_tail(tx_skbs, skb);
1087 	}
1088 }
1089 
1090 static void iwl_mei_handle_sap_rx_cmd(struct mei_cl_device *cldev,
1091 				      const u8 *q_head, u32 q_sz,
1092 				      u32 rd, u32 wr, ssize_t valid_rx_sz)
1093 {
1094 	struct page *p = alloc_page(GFP_KERNEL);
1095 	struct iwl_sap_hdr *hdr;
1096 
1097 	if (!p)
1098 		return;
1099 
1100 	hdr = page_address(p);
1101 
1102 	while (valid_rx_sz >= sizeof(*hdr)) {
1103 		u16 len;
1104 
1105 		iwl_mei_read_from_q(q_head, q_sz, &rd, wr, hdr, sizeof(*hdr));
1106 		valid_rx_sz -= sizeof(*hdr);
1107 		len = le16_to_cpu(hdr->len);
1108 
1109 		if (valid_rx_sz < len)
1110 			break;
1111 
1112 		iwl_mei_read_from_q(q_head, q_sz, &rd, wr, hdr + 1, len);
1113 
1114 		trace_iwlmei_sap_cmd(hdr, false);
1115 		iwl_mei_handle_sap_msg(cldev, hdr);
1116 		valid_rx_sz -= len;
1117 	}
1118 
1119 	/* valid_rx_sz must be 0 now... */
1120 	if (valid_rx_sz)
1121 		dev_err(&cldev->dev,
1122 			"More data in the buffer although we read it all\n");
1123 
1124 	__free_page(p);
1125 }
1126 
1127 static void iwl_mei_handle_sap_rx(struct mei_cl_device *cldev,
1128 				  struct iwl_sap_q_ctrl_blk *notif_q,
1129 				  const u8 *q_head,
1130 				  struct sk_buff_head *skbs,
1131 				  u32 q_sz)
1132 {
1133 	u32 rd = le32_to_cpu(READ_ONCE(notif_q->rd_ptr));
1134 	u32 wr = le32_to_cpu(READ_ONCE(notif_q->wr_ptr));
1135 	ssize_t valid_rx_sz;
1136 
1137 	if (rd > q_sz || wr > q_sz) {
1138 		dev_err(&cldev->dev,
1139 			"Pointers are past the buffer limit\n");
1140 		return;
1141 	}
1142 
1143 	if (rd == wr)
1144 		return;
1145 
1146 	valid_rx_sz = wr > rd ? wr - rd : q_sz - rd + wr;
1147 
1148 	if (skbs)
1149 		iwl_mei_handle_sap_data(cldev, q_head, q_sz, rd, wr,
1150 					valid_rx_sz, skbs);
1151 	else
1152 		iwl_mei_handle_sap_rx_cmd(cldev, q_head, q_sz, rd, wr,
1153 					  valid_rx_sz);
1154 
1155 	/* Increment the read pointer to point to the write pointer */
1156 	WRITE_ONCE(notif_q->rd_ptr, cpu_to_le32(wr));
1157 }
1158 
1159 static void iwl_mei_handle_check_shared_area(struct mei_cl_device *cldev)
1160 {
1161 	struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
1162 	struct iwl_sap_q_ctrl_blk *notif_q;
1163 	struct sk_buff_head tx_skbs;
1164 	struct iwl_sap_dir *dir;
1165 	void *q_head;
1166 	u32 q_sz;
1167 
1168 	if (!mei->shared_mem.ctrl)
1169 		return;
1170 
1171 	dir = &mei->shared_mem.ctrl->dir[SAP_DIRECTION_ME_TO_HOST];
1172 	notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF];
1173 	q_head = mei->shared_mem.q_head[SAP_DIRECTION_ME_TO_HOST][SAP_QUEUE_IDX_NOTIF];
1174 	q_sz = mei->shared_mem.q_size[SAP_DIRECTION_ME_TO_HOST][SAP_QUEUE_IDX_NOTIF];
1175 
1176 	/*
1177 	 * Do not hold the mutex here, but rather each and every message
1178 	 * handler takes it.
1179 	 * This allows message handlers to take it at a certain time.
1180 	 */
1181 	iwl_mei_handle_sap_rx(cldev, notif_q, q_head, NULL, q_sz);
1182 
1183 	mutex_lock(&iwl_mei_mutex);
1184 	dir = &mei->shared_mem.ctrl->dir[SAP_DIRECTION_ME_TO_HOST];
1185 	notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_DATA];
1186 	q_head = mei->shared_mem.q_head[SAP_DIRECTION_ME_TO_HOST][SAP_QUEUE_IDX_DATA];
1187 	q_sz = mei->shared_mem.q_size[SAP_DIRECTION_ME_TO_HOST][SAP_QUEUE_IDX_DATA];
1188 
1189 	__skb_queue_head_init(&tx_skbs);
1190 
1191 	iwl_mei_handle_sap_rx(cldev, notif_q, q_head, &tx_skbs, q_sz);
1192 
1193 	if (skb_queue_empty(&tx_skbs)) {
1194 		mutex_unlock(&iwl_mei_mutex);
1195 		return;
1196 	}
1197 
1198 	/*
1199 	 * Take the RCU read lock before we unlock the mutex to make sure that
1200 	 * even if the netdev is replaced by another non-NULL netdev right after
1201 	 * we unlock the mutex, the old netdev will still be valid when we
1202 	 * transmit the frames. We can't allow to replace the netdev here because
1203 	 * the skbs hold a pointer to the netdev.
1204 	 */
1205 	rcu_read_lock();
1206 
1207 	mutex_unlock(&iwl_mei_mutex);
1208 
1209 	if (!rcu_access_pointer(iwl_mei_cache.netdev)) {
1210 		dev_err(&cldev->dev, "Can't Tx without a netdev\n");
1211 		skb_queue_purge(&tx_skbs);
1212 		goto out;
1213 	}
1214 
1215 	while (!skb_queue_empty(&tx_skbs)) {
1216 		struct sk_buff *skb = __skb_dequeue(&tx_skbs);
1217 
1218 		trace_iwlmei_sap_data(skb, IWL_SAP_RX_DATA_TO_AIR);
1219 		dev_queue_xmit(skb);
1220 	}
1221 
1222 out:
1223 	rcu_read_unlock();
1224 }
1225 
1226 static void iwl_mei_rx(struct mei_cl_device *cldev)
1227 {
1228 	struct iwl_sap_me_msg_hdr *hdr;
1229 	u8 msg[100];
1230 	ssize_t ret;
1231 
1232 	ret = mei_cldev_recv(cldev, (u8 *)&msg, sizeof(msg));
1233 	if (ret < 0) {
1234 		dev_err(&cldev->dev, "failed to receive data: %zd\n", ret);
1235 		return;
1236 	}
1237 
1238 	if (ret == 0) {
1239 		dev_err(&cldev->dev, "got an empty response\n");
1240 		return;
1241 	}
1242 
1243 	hdr = (void *)msg;
1244 	trace_iwlmei_me_msg(hdr, false);
1245 
1246 	switch (le32_to_cpu(hdr->type)) {
1247 	case SAP_ME_MSG_START_OK:
1248 		BUILD_BUG_ON(sizeof(struct iwl_sap_me_msg_start_ok) >
1249 			     sizeof(msg));
1250 
1251 		iwl_mei_handle_rx_start_ok(cldev, (void *)msg, ret);
1252 		break;
1253 	case SAP_ME_MSG_CHECK_SHARED_AREA:
1254 		iwl_mei_handle_check_shared_area(cldev);
1255 		break;
1256 	default:
1257 		dev_err(&cldev->dev, "got a RX notification: %d\n",
1258 			le32_to_cpu(hdr->type));
1259 		break;
1260 	}
1261 }
1262 
1263 static int iwl_mei_send_start(struct mei_cl_device *cldev)
1264 {
1265 	struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
1266 	struct iwl_sap_me_msg_start msg = {
1267 		.hdr.type = cpu_to_le32(SAP_ME_MSG_START),
1268 		.hdr.seq_num = cpu_to_le32(atomic_inc_return(&mei->seq_no)),
1269 		.hdr.len = cpu_to_le32(sizeof(msg)),
1270 		.supported_versions[0] = SAP_VERSION,
1271 		.init_data_seq_num = cpu_to_le16(0x100),
1272 		.init_notif_seq_num = cpu_to_le16(0x800),
1273 	};
1274 	int ret;
1275 
1276 	trace_iwlmei_me_msg(&msg.hdr, true);
1277 	ret = mei_cldev_send(cldev, (void *)&msg, sizeof(msg));
1278 	if (ret != sizeof(msg)) {
1279 		dev_err(&cldev->dev,
1280 			"failed to send the SAP_ME_MSG_START message %d\n",
1281 			ret);
1282 		return ret;
1283 	}
1284 
1285 	return 0;
1286 }
1287 
1288 static int iwl_mei_enable(struct mei_cl_device *cldev)
1289 {
1290 	int ret;
1291 
1292 	ret = mei_cldev_enable(cldev);
1293 	if (ret < 0) {
1294 		dev_err(&cldev->dev, "failed to enable the device: %d\n", ret);
1295 		return ret;
1296 	}
1297 
1298 	ret = mei_cldev_register_rx_cb(cldev, iwl_mei_rx);
1299 	if (ret) {
1300 		dev_err(&cldev->dev,
1301 			"failed to register to the rx cb: %d\n", ret);
1302 		mei_cldev_disable(cldev);
1303 		return ret;
1304 	}
1305 
1306 	return 0;
1307 }
1308 
1309 struct iwl_mei_nvm *iwl_mei_get_nvm(void)
1310 {
1311 	struct iwl_mei_nvm *nvm = NULL;
1312 	struct iwl_mei *mei;
1313 	int ret;
1314 
1315 	mutex_lock(&iwl_mei_mutex);
1316 
1317 	if (!iwl_mei_is_connected())
1318 		goto out;
1319 
1320 	mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1321 
1322 	if (!mei)
1323 		goto out;
1324 
1325 	ret = iwl_mei_send_sap_msg(iwl_mei_global_cldev,
1326 				   SAP_MSG_NOTIF_GET_NVM);
1327 	if (ret)
1328 		goto out;
1329 
1330 	mutex_unlock(&iwl_mei_mutex);
1331 
1332 	ret = wait_event_timeout(mei->get_nvm_wq, mei->nvm, 2 * HZ);
1333 	if (!ret)
1334 		return NULL;
1335 
1336 	mutex_lock(&iwl_mei_mutex);
1337 
1338 	if (!iwl_mei_is_connected())
1339 		goto out;
1340 
1341 	mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1342 
1343 	if (!mei)
1344 		goto out;
1345 
1346 	if (mei->nvm)
1347 		nvm = kmemdup(mei->nvm, sizeof(*mei->nvm), GFP_KERNEL);
1348 
1349 out:
1350 	mutex_unlock(&iwl_mei_mutex);
1351 	return nvm;
1352 }
1353 EXPORT_SYMBOL_GPL(iwl_mei_get_nvm);
1354 
1355 #define IWL_MEI_PLDR_NUM_RETRIES	3
1356 
1357 int iwl_mei_pldr_req(void)
1358 {
1359 	struct iwl_mei *mei;
1360 	int ret;
1361 	struct iwl_sap_pldr_data msg = {
1362 		.hdr.type = cpu_to_le16(SAP_MSG_NOTIF_PLDR),
1363 		.hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)),
1364 	};
1365 	int i;
1366 
1367 	mutex_lock(&iwl_mei_mutex);
1368 
1369 	/* In case we didn't have a bind */
1370 	if (!iwl_mei_is_connected()) {
1371 		ret = 0;
1372 		goto out;
1373 	}
1374 
1375 	mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1376 
1377 	if (!mei) {
1378 		ret = -ENODEV;
1379 		goto out;
1380 	}
1381 
1382 	if (!mei->amt_enabled) {
1383 		ret = 0;
1384 		goto out;
1385 	}
1386 
1387 	for (i = 0; i < IWL_MEI_PLDR_NUM_RETRIES; i++) {
1388 		ret = iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
1389 		mutex_unlock(&iwl_mei_mutex);
1390 		if (ret)
1391 			return ret;
1392 
1393 		ret = wait_event_timeout(mei->pldr_wq, mei->pldr_active, HZ / 2);
1394 		if (ret)
1395 			break;
1396 
1397 		/* Take the mutex for the next iteration */
1398 		mutex_lock(&iwl_mei_mutex);
1399 	}
1400 
1401 	if (ret)
1402 		return 0;
1403 
1404 	ret = -ETIMEDOUT;
1405 out:
1406 	mutex_unlock(&iwl_mei_mutex);
1407 	return ret;
1408 }
1409 EXPORT_SYMBOL_GPL(iwl_mei_pldr_req);
1410 
1411 int iwl_mei_get_ownership(void)
1412 {
1413 	struct iwl_mei *mei;
1414 	int ret;
1415 
1416 	mutex_lock(&iwl_mei_mutex);
1417 
1418 	/* In case we didn't have a bind */
1419 	if (!iwl_mei_is_connected()) {
1420 		ret = 0;
1421 		goto out;
1422 	}
1423 
1424 	mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1425 
1426 	if (!mei) {
1427 		ret = -ENODEV;
1428 		goto out;
1429 	}
1430 
1431 	if (!mei->amt_enabled) {
1432 		ret = 0;
1433 		goto out;
1434 	}
1435 
1436 	if (mei->got_ownership) {
1437 		ret = 0;
1438 		goto out;
1439 	}
1440 
1441 	ret = iwl_mei_send_sap_msg(mei->cldev,
1442 				   SAP_MSG_NOTIF_HOST_ASKS_FOR_NIC_OWNERSHIP);
1443 	if (ret)
1444 		goto out;
1445 
1446 	mutex_unlock(&iwl_mei_mutex);
1447 
1448 	ret = wait_event_timeout(mei->get_ownership_wq,
1449 				 mei->got_ownership, HZ / 2);
1450 	return (!ret) ? -ETIMEDOUT : 0;
1451 out:
1452 	mutex_unlock(&iwl_mei_mutex);
1453 	return ret;
1454 }
1455 EXPORT_SYMBOL_GPL(iwl_mei_get_ownership);
1456 
1457 void iwl_mei_alive_notif(bool success)
1458 {
1459 	struct iwl_mei *mei;
1460 	struct iwl_sap_pldr_end_data msg = {
1461 		.hdr.type = cpu_to_le16(SAP_MSG_NOTIF_PLDR_END),
1462 		.hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)),
1463 		.status = success ? cpu_to_le32(SAP_PLDR_STATUS_SUCCESS) :
1464 			cpu_to_le32(SAP_PLDR_STATUS_FAILURE),
1465 	};
1466 
1467 	mutex_lock(&iwl_mei_mutex);
1468 
1469 	if (!iwl_mei_is_connected())
1470 		goto out;
1471 
1472 	mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1473 	if (!mei || !mei->pldr_active)
1474 		goto out;
1475 
1476 	mei->pldr_active = false;
1477 
1478 	iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
1479 out:
1480 	mutex_unlock(&iwl_mei_mutex);
1481 }
1482 EXPORT_SYMBOL_GPL(iwl_mei_alive_notif);
1483 
1484 void iwl_mei_host_associated(const struct iwl_mei_conn_info *conn_info,
1485 			     const struct iwl_mei_colloc_info *colloc_info)
1486 {
1487 	struct iwl_sap_notif_host_link_up msg = {
1488 		.hdr.type = cpu_to_le16(SAP_MSG_NOTIF_HOST_LINK_UP),
1489 		.hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)),
1490 		.conn_info = {
1491 			.ssid_len = cpu_to_le32(conn_info->ssid_len),
1492 			.channel = conn_info->channel,
1493 			.band = conn_info->band,
1494 			.pairwise_cipher = cpu_to_le32(conn_info->pairwise_cipher),
1495 			.auth_mode = cpu_to_le32(conn_info->auth_mode),
1496 		},
1497 	};
1498 	struct iwl_mei *mei;
1499 
1500 	if (conn_info->ssid_len > ARRAY_SIZE(msg.conn_info.ssid))
1501 		return;
1502 
1503 	memcpy(msg.conn_info.ssid, conn_info->ssid, conn_info->ssid_len);
1504 	memcpy(msg.conn_info.bssid, conn_info->bssid, ETH_ALEN);
1505 
1506 	if (colloc_info) {
1507 		msg.colloc_channel = colloc_info->channel;
1508 		msg.colloc_band = colloc_info->channel <= 14 ? 0 : 1;
1509 		memcpy(msg.colloc_bssid, colloc_info->bssid, ETH_ALEN);
1510 	}
1511 
1512 	mutex_lock(&iwl_mei_mutex);
1513 
1514 	if (!iwl_mei_is_connected())
1515 		goto out;
1516 
1517 	mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1518 
1519 	if (!mei && !mei->amt_enabled)
1520 		goto out;
1521 
1522 	iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
1523 
1524 out:
1525 	kfree(iwl_mei_cache.conn_info);
1526 	iwl_mei_cache.conn_info =
1527 		kmemdup(&msg.conn_info, sizeof(msg.conn_info), GFP_KERNEL);
1528 	mutex_unlock(&iwl_mei_mutex);
1529 }
1530 EXPORT_SYMBOL_GPL(iwl_mei_host_associated);
1531 
1532 void iwl_mei_host_disassociated(void)
1533 {
1534 	struct iwl_mei *mei;
1535 	struct iwl_sap_notif_host_link_down msg = {
1536 		.hdr.type = cpu_to_le16(SAP_MSG_NOTIF_HOST_LINK_DOWN),
1537 		.hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)),
1538 		.type = HOST_LINK_DOWN_TYPE_TEMPORARY,
1539 	};
1540 
1541 	mutex_lock(&iwl_mei_mutex);
1542 
1543 	if (!iwl_mei_is_connected())
1544 		goto out;
1545 
1546 	mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1547 
1548 	if (!mei && !mei->amt_enabled)
1549 		goto out;
1550 
1551 	iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
1552 
1553 out:
1554 	kfree(iwl_mei_cache.conn_info);
1555 	iwl_mei_cache.conn_info = NULL;
1556 	mutex_unlock(&iwl_mei_mutex);
1557 }
1558 EXPORT_SYMBOL_GPL(iwl_mei_host_disassociated);
1559 
1560 void iwl_mei_set_rfkill_state(bool hw_rfkill, bool sw_rfkill)
1561 {
1562 	struct iwl_mei *mei;
1563 	u32 rfkill_state = 0;
1564 	struct iwl_sap_msg_dw msg = {
1565 		.hdr.type = cpu_to_le16(SAP_MSG_NOTIF_RADIO_STATE),
1566 		.hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)),
1567 	};
1568 
1569 	if (!sw_rfkill)
1570 		rfkill_state |= SAP_SW_RFKILL_DEASSERTED;
1571 
1572 	if (!hw_rfkill)
1573 		rfkill_state |= SAP_HW_RFKILL_DEASSERTED;
1574 
1575 	mutex_lock(&iwl_mei_mutex);
1576 
1577 	if (!iwl_mei_is_connected())
1578 		goto out;
1579 
1580 	msg.val = cpu_to_le32(rfkill_state);
1581 
1582 	mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1583 
1584 	if (!mei && !mei->amt_enabled)
1585 		goto out;
1586 
1587 	iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
1588 
1589 out:
1590 	iwl_mei_cache.rf_kill = rfkill_state;
1591 	mutex_unlock(&iwl_mei_mutex);
1592 }
1593 EXPORT_SYMBOL_GPL(iwl_mei_set_rfkill_state);
1594 
1595 void iwl_mei_set_nic_info(const u8 *mac_address, const u8 *nvm_address)
1596 {
1597 	struct iwl_mei *mei;
1598 	struct iwl_sap_notif_host_nic_info msg = {
1599 		.hdr.type = cpu_to_le16(SAP_MSG_NOTIF_NIC_INFO),
1600 		.hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)),
1601 	};
1602 
1603 	mutex_lock(&iwl_mei_mutex);
1604 
1605 	if (!iwl_mei_is_connected())
1606 		goto out;
1607 
1608 	ether_addr_copy(msg.mac_address, mac_address);
1609 	ether_addr_copy(msg.nvm_address, nvm_address);
1610 
1611 	mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1612 
1613 	if (!mei && !mei->amt_enabled)
1614 		goto out;
1615 
1616 	iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
1617 
1618 out:
1619 	ether_addr_copy(iwl_mei_cache.mac_address, mac_address);
1620 	ether_addr_copy(iwl_mei_cache.nvm_address, nvm_address);
1621 	mutex_unlock(&iwl_mei_mutex);
1622 }
1623 EXPORT_SYMBOL_GPL(iwl_mei_set_nic_info);
1624 
1625 void iwl_mei_set_country_code(u16 mcc)
1626 {
1627 	struct iwl_mei *mei;
1628 	struct iwl_sap_notif_country_code msg = {
1629 		.hdr.type = cpu_to_le16(SAP_MSG_NOTIF_COUNTRY_CODE),
1630 		.hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)),
1631 		.mcc = cpu_to_le16(mcc),
1632 	};
1633 
1634 	mutex_lock(&iwl_mei_mutex);
1635 
1636 	if (!iwl_mei_is_connected())
1637 		goto out;
1638 
1639 	mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1640 
1641 	if (!mei && !mei->amt_enabled)
1642 		goto out;
1643 
1644 	iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
1645 
1646 out:
1647 	iwl_mei_cache.mcc = mcc;
1648 	mutex_unlock(&iwl_mei_mutex);
1649 }
1650 EXPORT_SYMBOL_GPL(iwl_mei_set_country_code);
1651 
1652 void iwl_mei_set_power_limit(const __le16 *power_limit)
1653 {
1654 	struct iwl_mei *mei;
1655 	struct iwl_sap_notif_sar_limits msg = {
1656 		.hdr.type = cpu_to_le16(SAP_MSG_NOTIF_SAR_LIMITS),
1657 		.hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)),
1658 	};
1659 
1660 	mutex_lock(&iwl_mei_mutex);
1661 
1662 	if (!iwl_mei_is_connected())
1663 		goto out;
1664 
1665 	mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1666 
1667 	if (!mei && !mei->amt_enabled)
1668 		goto out;
1669 
1670 	memcpy(msg.sar_chain_info_table, power_limit, sizeof(msg.sar_chain_info_table));
1671 
1672 	iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr);
1673 
1674 out:
1675 	kfree(iwl_mei_cache.power_limit);
1676 	iwl_mei_cache.power_limit = kmemdup(power_limit,
1677 					    sizeof(msg.sar_chain_info_table), GFP_KERNEL);
1678 	mutex_unlock(&iwl_mei_mutex);
1679 }
1680 EXPORT_SYMBOL_GPL(iwl_mei_set_power_limit);
1681 
1682 void iwl_mei_set_netdev(struct net_device *netdev)
1683 {
1684 	struct iwl_mei *mei;
1685 
1686 	mutex_lock(&iwl_mei_mutex);
1687 
1688 	if (!iwl_mei_is_connected()) {
1689 		rcu_assign_pointer(iwl_mei_cache.netdev, netdev);
1690 		goto out;
1691 	}
1692 
1693 	mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1694 
1695 	if (!mei)
1696 		goto out;
1697 
1698 	if (!netdev) {
1699 		struct net_device *dev =
1700 			rcu_dereference_protected(iwl_mei_cache.netdev,
1701 						  lockdep_is_held(&iwl_mei_mutex));
1702 
1703 		if (!dev)
1704 			goto out;
1705 
1706 		netdev_rx_handler_unregister(dev);
1707 	}
1708 
1709 	rcu_assign_pointer(iwl_mei_cache.netdev, netdev);
1710 
1711 	if (netdev && mei->amt_enabled)
1712 		netdev_rx_handler_register(netdev, iwl_mei_rx_handler, mei);
1713 
1714 out:
1715 	mutex_unlock(&iwl_mei_mutex);
1716 }
1717 EXPORT_SYMBOL_GPL(iwl_mei_set_netdev);
1718 
1719 void iwl_mei_device_state(bool up)
1720 {
1721 	struct iwl_mei *mei;
1722 
1723 	mutex_lock(&iwl_mei_mutex);
1724 
1725 	if (!iwl_mei_is_connected())
1726 		goto out;
1727 
1728 	mei = mei_cldev_get_drvdata(iwl_mei_global_cldev);
1729 
1730 	if (!mei)
1731 		goto out;
1732 
1733 	mei->device_down = !up;
1734 
1735 	if (up || !mei->csme_taking_ownership)
1736 		goto out;
1737 
1738 	iwl_mei_send_sap_msg(mei->cldev,
1739 			     SAP_MSG_NOTIF_CSME_OWNERSHIP_CONFIRMED);
1740 	mei->csme_taking_ownership = false;
1741 out:
1742 	mutex_unlock(&iwl_mei_mutex);
1743 }
1744 EXPORT_SYMBOL_GPL(iwl_mei_device_state);
1745 
1746 int iwl_mei_register(void *priv, const struct iwl_mei_ops *ops)
1747 {
1748 	int ret;
1749 
1750 	/*
1751 	 * We must have a non-NULL priv pointer to not crash when there are
1752 	 * multiple WiFi devices.
1753 	 */
1754 	if (!priv)
1755 		return -EINVAL;
1756 
1757 	mutex_lock(&iwl_mei_mutex);
1758 
1759 	/* do not allow registration if someone else already registered */
1760 	if (iwl_mei_cache.priv || iwl_mei_cache.ops) {
1761 		ret = -EBUSY;
1762 		goto out;
1763 	}
1764 
1765 	iwl_mei_cache.priv = priv;
1766 	iwl_mei_cache.ops = ops;
1767 
1768 	if (iwl_mei_global_cldev) {
1769 		struct iwl_mei *mei =
1770 			mei_cldev_get_drvdata(iwl_mei_global_cldev);
1771 
1772 		/* we have already a SAP connection */
1773 		if (iwl_mei_is_connected()) {
1774 			if (mei->amt_enabled)
1775 				iwl_mei_send_sap_msg(mei->cldev,
1776 						     SAP_MSG_NOTIF_WIFIDR_UP);
1777 			ops->rfkill(priv, mei->link_prot_state, false);
1778 		}
1779 	}
1780 	ret = 0;
1781 
1782 out:
1783 	mutex_unlock(&iwl_mei_mutex);
1784 	return ret;
1785 }
1786 EXPORT_SYMBOL_GPL(iwl_mei_register);
1787 
1788 void iwl_mei_start_unregister(void)
1789 {
1790 	mutex_lock(&iwl_mei_mutex);
1791 
1792 	/* At this point, the wifi driver should have removed the netdev */
1793 	if (rcu_access_pointer(iwl_mei_cache.netdev))
1794 		pr_err("Still had a netdev pointer set upon unregister\n");
1795 
1796 	kfree(iwl_mei_cache.conn_info);
1797 	iwl_mei_cache.conn_info = NULL;
1798 	kfree(iwl_mei_cache.power_limit);
1799 	iwl_mei_cache.power_limit = NULL;
1800 	iwl_mei_cache.ops = NULL;
1801 	/* leave iwl_mei_cache.priv non-NULL to prevent any new registration */
1802 
1803 	mutex_unlock(&iwl_mei_mutex);
1804 }
1805 EXPORT_SYMBOL_GPL(iwl_mei_start_unregister);
1806 
1807 void iwl_mei_unregister_complete(void)
1808 {
1809 	mutex_lock(&iwl_mei_mutex);
1810 
1811 	iwl_mei_cache.priv = NULL;
1812 
1813 	if (iwl_mei_global_cldev) {
1814 		struct iwl_mei *mei =
1815 			mei_cldev_get_drvdata(iwl_mei_global_cldev);
1816 
1817 		iwl_mei_send_sap_msg(mei->cldev, SAP_MSG_NOTIF_WIFIDR_DOWN);
1818 		mei->got_ownership = false;
1819 	}
1820 
1821 	mutex_unlock(&iwl_mei_mutex);
1822 }
1823 EXPORT_SYMBOL_GPL(iwl_mei_unregister_complete);
1824 
1825 #if IS_ENABLED(CONFIG_DEBUG_FS)
1826 
1827 static ssize_t
1828 iwl_mei_dbgfs_send_start_message_write(struct file *file,
1829 				       const char __user *user_buf,
1830 				       size_t count, loff_t *ppos)
1831 {
1832 	int ret;
1833 
1834 	mutex_lock(&iwl_mei_mutex);
1835 
1836 	if (!iwl_mei_global_cldev) {
1837 		ret = -ENODEV;
1838 		goto out;
1839 	}
1840 
1841 	ret = iwl_mei_send_start(iwl_mei_global_cldev);
1842 
1843 out:
1844 	mutex_unlock(&iwl_mei_mutex);
1845 	return ret ?: count;
1846 }
1847 
1848 static const struct file_operations iwl_mei_dbgfs_send_start_message_ops = {
1849 	.write = iwl_mei_dbgfs_send_start_message_write,
1850 	.open = simple_open,
1851 	.llseek = default_llseek,
1852 };
1853 
1854 static ssize_t iwl_mei_dbgfs_req_ownership_write(struct file *file,
1855 						 const char __user *user_buf,
1856 						 size_t count, loff_t *ppos)
1857 {
1858 	iwl_mei_get_ownership();
1859 
1860 	return count;
1861 }
1862 
1863 static const struct file_operations iwl_mei_dbgfs_req_ownership_ops = {
1864 	.write = iwl_mei_dbgfs_req_ownership_write,
1865 	.open = simple_open,
1866 	.llseek = default_llseek,
1867 };
1868 
1869 static void iwl_mei_dbgfs_register(struct iwl_mei *mei)
1870 {
1871 	mei->dbgfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
1872 
1873 	if (!mei->dbgfs_dir)
1874 		return;
1875 
1876 	debugfs_create_ulong("status", S_IRUSR,
1877 			     mei->dbgfs_dir, &iwl_mei_status);
1878 	debugfs_create_file("send_start_message", S_IWUSR, mei->dbgfs_dir,
1879 			    mei, &iwl_mei_dbgfs_send_start_message_ops);
1880 	debugfs_create_file("req_ownership", S_IWUSR, mei->dbgfs_dir,
1881 			    mei, &iwl_mei_dbgfs_req_ownership_ops);
1882 }
1883 
1884 static void iwl_mei_dbgfs_unregister(struct iwl_mei *mei)
1885 {
1886 	debugfs_remove_recursive(mei->dbgfs_dir);
1887 	mei->dbgfs_dir = NULL;
1888 }
1889 
1890 #else
1891 
1892 static void iwl_mei_dbgfs_register(struct iwl_mei *mei) {}
1893 static void iwl_mei_dbgfs_unregister(struct iwl_mei *mei) {}
1894 
1895 #endif /* CONFIG_DEBUG_FS */
1896 
1897 #define ALLOC_SHARED_MEM_RETRY_MAX_NUM	3
1898 
1899 /*
1900  * iwl_mei_probe - the probe function called by the mei bus enumeration
1901  *
1902  * This allocates the data needed by iwlmei and sets a pointer to this data
1903  * into the mei_cl_device's drvdata.
1904  * It starts the SAP protocol by sending the SAP_ME_MSG_START without
1905  * waiting for the answer. The answer will be caught later by the Rx callback.
1906  */
1907 static int iwl_mei_probe(struct mei_cl_device *cldev,
1908 			 const struct mei_cl_device_id *id)
1909 {
1910 	int alloc_retry = ALLOC_SHARED_MEM_RETRY_MAX_NUM;
1911 	struct iwl_mei *mei;
1912 	int ret;
1913 
1914 	mei = devm_kzalloc(&cldev->dev, sizeof(*mei), GFP_KERNEL);
1915 	if (!mei)
1916 		return -ENOMEM;
1917 
1918 	init_waitqueue_head(&mei->get_nvm_wq);
1919 	INIT_WORK(&mei->send_csa_msg_wk, iwl_mei_send_csa_msg_wk);
1920 	INIT_DELAYED_WORK(&mei->csa_throttle_end_wk,
1921 			  iwl_mei_csa_throttle_end_wk);
1922 	init_waitqueue_head(&mei->get_ownership_wq);
1923 	init_waitqueue_head(&mei->pldr_wq);
1924 	spin_lock_init(&mei->data_q_lock);
1925 	INIT_WORK(&mei->netdev_work, iwl_mei_netdev_work);
1926 
1927 	mei_cldev_set_drvdata(cldev, mei);
1928 	mei->cldev = cldev;
1929 	mei->device_down = true;
1930 
1931 	do {
1932 		ret = iwl_mei_alloc_shared_mem(cldev);
1933 		if (!ret)
1934 			break;
1935 		/*
1936 		 * The CSME firmware needs to boot the internal WLAN client.
1937 		 * This can take time in certain configurations (usually
1938 		 * upon resume and when the whole CSME firmware is shut down
1939 		 * during suspend).
1940 		 *
1941 		 * Wait a bit before retrying and hope we'll succeed next time.
1942 		 */
1943 
1944 		dev_dbg(&cldev->dev,
1945 			"Couldn't allocate the shared memory: %d, attempt %d / %d\n",
1946 			ret, alloc_retry, ALLOC_SHARED_MEM_RETRY_MAX_NUM);
1947 		msleep(100);
1948 		alloc_retry--;
1949 	} while (alloc_retry);
1950 
1951 	if (ret) {
1952 		dev_err(&cldev->dev, "Couldn't allocate the shared memory: %d\n",
1953 			ret);
1954 		goto free;
1955 	}
1956 
1957 	iwl_mei_init_shared_mem(mei);
1958 
1959 	ret = iwl_mei_enable(cldev);
1960 	if (ret)
1961 		goto free_shared_mem;
1962 
1963 	iwl_mei_dbgfs_register(mei);
1964 
1965 	/*
1966 	 * We now have a Rx function in place, start the SAP protocol
1967 	 * we expect to get the SAP_ME_MSG_START_OK response later on.
1968 	 */
1969 	mutex_lock(&iwl_mei_mutex);
1970 	ret = iwl_mei_send_start(cldev);
1971 	mutex_unlock(&iwl_mei_mutex);
1972 	if (ret)
1973 		goto debugfs_unregister;
1974 
1975 	/* must be last */
1976 	iwl_mei_global_cldev = cldev;
1977 
1978 	return 0;
1979 
1980 debugfs_unregister:
1981 	iwl_mei_dbgfs_unregister(mei);
1982 	mei_cldev_disable(cldev);
1983 free_shared_mem:
1984 	iwl_mei_free_shared_mem(cldev);
1985 free:
1986 	mei_cldev_set_drvdata(cldev, NULL);
1987 	devm_kfree(&cldev->dev, mei);
1988 
1989 	return ret;
1990 }
1991 
1992 #define SEND_SAP_MAX_WAIT_ITERATION 10
1993 #define IWLMEI_DEVICE_DOWN_WAIT_ITERATION 50
1994 
1995 static void iwl_mei_remove(struct mei_cl_device *cldev)
1996 {
1997 	struct iwl_mei *mei = mei_cldev_get_drvdata(cldev);
1998 	int i;
1999 
2000 	/*
2001 	 * We are being removed while the bus is active, it means we are
2002 	 * going to suspend/ shutdown, so the NIC will disappear.
2003 	 */
2004 	if (mei_cldev_enabled(cldev) && iwl_mei_cache.ops) {
2005 		unsigned int iter = IWLMEI_DEVICE_DOWN_WAIT_ITERATION;
2006 		bool down = false;
2007 
2008 		/*
2009 		 * In case of suspend, wait for the mac to stop and don't remove
2010 		 * the interface. This will allow the interface to come back
2011 		 * on resume.
2012 		 */
2013 		while (!down && iter--) {
2014 			mdelay(1);
2015 
2016 			mutex_lock(&iwl_mei_mutex);
2017 			down = mei->device_down;
2018 			mutex_unlock(&iwl_mei_mutex);
2019 		}
2020 
2021 		if (!down)
2022 			iwl_mei_cache.ops->nic_stolen(iwl_mei_cache.priv);
2023 	}
2024 
2025 	if (rcu_access_pointer(iwl_mei_cache.netdev)) {
2026 		struct net_device *dev;
2027 
2028 		/*
2029 		 * First take rtnl and only then the mutex to avoid an ABBA
2030 		 * with iwl_mei_set_netdev()
2031 		 */
2032 		rtnl_lock();
2033 		mutex_lock(&iwl_mei_mutex);
2034 
2035 		/*
2036 		 * If we are suspending and the wifi driver hasn't removed it's netdev
2037 		 * yet, do it now. In any case, don't change the cache.netdev pointer.
2038 		 */
2039 		dev = rcu_dereference_protected(iwl_mei_cache.netdev,
2040 						lockdep_is_held(&iwl_mei_mutex));
2041 
2042 		netdev_rx_handler_unregister(dev);
2043 		mutex_unlock(&iwl_mei_mutex);
2044 		rtnl_unlock();
2045 	}
2046 
2047 	mutex_lock(&iwl_mei_mutex);
2048 
2049 	if (mei->amt_enabled) {
2050 		/*
2051 		 * Tell CSME that we are going down so that it won't access the
2052 		 * memory anymore, make sure this message goes through immediately.
2053 		 */
2054 		mei->csa_throttled = false;
2055 		iwl_mei_send_sap_msg(mei->cldev,
2056 				     SAP_MSG_NOTIF_HOST_GOES_DOWN);
2057 
2058 		for (i = 0; i < SEND_SAP_MAX_WAIT_ITERATION; i++) {
2059 			if (!iwl_mei_host_to_me_data_pending(mei))
2060 				break;
2061 
2062 			msleep(20);
2063 		}
2064 
2065 		/*
2066 		 * If we couldn't make sure that CSME saw the HOST_GOES_DOWN
2067 		 * message, it means that it will probably keep reading memory
2068 		 * that we are going to unmap and free, expect IOMMU error
2069 		 * messages.
2070 		 */
2071 		if (i == SEND_SAP_MAX_WAIT_ITERATION)
2072 			dev_err(&mei->cldev->dev,
2073 				"Couldn't get ACK from CSME on HOST_GOES_DOWN message\n");
2074 	}
2075 
2076 	mutex_unlock(&iwl_mei_mutex);
2077 
2078 	/*
2079 	 * This looks strange, but this lock is taken here to make sure that
2080 	 * iwl_mei_add_data_to_ring called from the Tx path sees that we
2081 	 * clear the IWL_MEI_STATUS_SAP_CONNECTED bit.
2082 	 * Rx isn't a problem because the rx_handler can't be called after
2083 	 * having been unregistered.
2084 	 */
2085 	spin_lock_bh(&mei->data_q_lock);
2086 	clear_bit(IWL_MEI_STATUS_SAP_CONNECTED, &iwl_mei_status);
2087 	spin_unlock_bh(&mei->data_q_lock);
2088 
2089 	if (iwl_mei_cache.ops)
2090 		iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, false);
2091 
2092 	/*
2093 	 * mei_cldev_disable will return only after all the MEI Rx is done.
2094 	 * It must be called when iwl_mei_mutex is *not* held, since it waits
2095 	 * for our Rx handler to complete.
2096 	 * After it returns, no new Rx will start.
2097 	 */
2098 	mei_cldev_disable(cldev);
2099 
2100 	/*
2101 	 * Since the netdev was already removed and the netdev's removal
2102 	 * includes a call to synchronize_net() so that we know there won't be
2103 	 * any new Rx that will trigger the following workers.
2104 	 */
2105 	cancel_work_sync(&mei->send_csa_msg_wk);
2106 	cancel_delayed_work_sync(&mei->csa_throttle_end_wk);
2107 	cancel_work_sync(&mei->netdev_work);
2108 
2109 	/*
2110 	 * If someone waits for the ownership, let him know that we are going
2111 	 * down and that we are not connected anymore. He'll be able to take
2112 	 * the device.
2113 	 */
2114 	wake_up_all(&mei->get_ownership_wq);
2115 	wake_up_all(&mei->pldr_wq);
2116 
2117 	mutex_lock(&iwl_mei_mutex);
2118 
2119 	iwl_mei_global_cldev = NULL;
2120 
2121 	wake_up_all(&mei->get_nvm_wq);
2122 
2123 	iwl_mei_free_shared_mem(cldev);
2124 
2125 	iwl_mei_dbgfs_unregister(mei);
2126 
2127 	mei_cldev_set_drvdata(cldev, NULL);
2128 
2129 	kfree(mei->nvm);
2130 
2131 	kfree(rcu_access_pointer(mei->filters));
2132 
2133 	devm_kfree(&cldev->dev, mei);
2134 
2135 	mutex_unlock(&iwl_mei_mutex);
2136 }
2137 
2138 static const struct mei_cl_device_id iwl_mei_tbl[] = {
2139 	{
2140 		.name = KBUILD_MODNAME,
2141 		.uuid = MEI_WLAN_UUID,
2142 		.version = MEI_CL_VERSION_ANY,
2143 	},
2144 
2145 	/* required last entry */
2146 	{ }
2147 };
2148 
2149 /*
2150  * Do not export the device table because this module is loaded by
2151  * iwlwifi's dependency.
2152  */
2153 
2154 static struct mei_cl_driver iwl_mei_cl_driver = {
2155 	.id_table = iwl_mei_tbl,
2156 	.name = KBUILD_MODNAME,
2157 	.probe = iwl_mei_probe,
2158 	.remove = iwl_mei_remove,
2159 };
2160 
2161 module_mei_cl_driver(iwl_mei_cl_driver);
2162