1 // SPDX-License-Identifier: ISC
2 /*
3  * Copyright (c) 2018 The Linux Foundation. All rights reserved.
4  */
5 
6 #include <linux/clk.h>
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/of.h>
10 #include <linux/of_device.h>
11 #include <linux/platform_device.h>
12 #include <linux/property.h>
13 #include <linux/regulator/consumer.h>
14 
15 #include "ce.h"
16 #include "coredump.h"
17 #include "debug.h"
18 #include "hif.h"
19 #include "htc.h"
20 #include "snoc.h"
21 
22 #define ATH10K_SNOC_RX_POST_RETRY_MS 50
23 #define CE_POLL_PIPE 4
24 #define ATH10K_SNOC_WAKE_IRQ 2
25 
26 static char *const ce_name[] = {
27 	"WLAN_CE_0",
28 	"WLAN_CE_1",
29 	"WLAN_CE_2",
30 	"WLAN_CE_3",
31 	"WLAN_CE_4",
32 	"WLAN_CE_5",
33 	"WLAN_CE_6",
34 	"WLAN_CE_7",
35 	"WLAN_CE_8",
36 	"WLAN_CE_9",
37 	"WLAN_CE_10",
38 	"WLAN_CE_11",
39 };
40 
41 static const char * const ath10k_regulators[] = {
42 	"vdd-0.8-cx-mx",
43 	"vdd-1.8-xo",
44 	"vdd-1.3-rfa",
45 	"vdd-3.3-ch0",
46 };
47 
48 static const char * const ath10k_clocks[] = {
49 	"cxo_ref_clk_pin",
50 };
51 
52 static void ath10k_snoc_htc_tx_cb(struct ath10k_ce_pipe *ce_state);
53 static void ath10k_snoc_htt_tx_cb(struct ath10k_ce_pipe *ce_state);
54 static void ath10k_snoc_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
55 static void ath10k_snoc_htt_rx_cb(struct ath10k_ce_pipe *ce_state);
56 static void ath10k_snoc_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
57 static void ath10k_snoc_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state);
58 
59 static const struct ath10k_snoc_drv_priv drv_priv = {
60 	.hw_rev = ATH10K_HW_WCN3990,
61 	.dma_mask = DMA_BIT_MASK(35),
62 	.msa_size = 0x100000,
63 };
64 
65 #define WCN3990_SRC_WR_IDX_OFFSET 0x3C
66 #define WCN3990_DST_WR_IDX_OFFSET 0x40
67 
68 static struct ath10k_shadow_reg_cfg target_shadow_reg_cfg_map[] = {
69 		{
70 			.ce_id = __cpu_to_le16(0),
71 			.reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
72 		},
73 
74 		{
75 			.ce_id = __cpu_to_le16(3),
76 			.reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
77 		},
78 
79 		{
80 			.ce_id = __cpu_to_le16(4),
81 			.reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
82 		},
83 
84 		{
85 			.ce_id = __cpu_to_le16(5),
86 			.reg_offset =  __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
87 		},
88 
89 		{
90 			.ce_id = __cpu_to_le16(7),
91 			.reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
92 		},
93 
94 		{
95 			.ce_id = __cpu_to_le16(1),
96 			.reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
97 		},
98 
99 		{
100 			.ce_id = __cpu_to_le16(2),
101 			.reg_offset =  __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
102 		},
103 
104 		{
105 			.ce_id = __cpu_to_le16(7),
106 			.reg_offset =  __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
107 		},
108 
109 		{
110 			.ce_id = __cpu_to_le16(8),
111 			.reg_offset =  __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
112 		},
113 
114 		{
115 			.ce_id = __cpu_to_le16(9),
116 			.reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
117 		},
118 
119 		{
120 			.ce_id = __cpu_to_le16(10),
121 			.reg_offset =  __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
122 		},
123 
124 		{
125 			.ce_id = __cpu_to_le16(11),
126 			.reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
127 		},
128 };
129 
130 static struct ce_attr host_ce_config_wlan[] = {
131 	/* CE0: host->target HTC control streams */
132 	{
133 		.flags = CE_ATTR_FLAGS,
134 		.src_nentries = 16,
135 		.src_sz_max = 2048,
136 		.dest_nentries = 0,
137 		.send_cb = ath10k_snoc_htc_tx_cb,
138 	},
139 
140 	/* CE1: target->host HTT + HTC control */
141 	{
142 		.flags = CE_ATTR_FLAGS,
143 		.src_nentries = 0,
144 		.src_sz_max = 2048,
145 		.dest_nentries = 512,
146 		.recv_cb = ath10k_snoc_htt_htc_rx_cb,
147 	},
148 
149 	/* CE2: target->host WMI */
150 	{
151 		.flags = CE_ATTR_FLAGS,
152 		.src_nentries = 0,
153 		.src_sz_max = 2048,
154 		.dest_nentries = 64,
155 		.recv_cb = ath10k_snoc_htc_rx_cb,
156 	},
157 
158 	/* CE3: host->target WMI */
159 	{
160 		.flags = CE_ATTR_FLAGS,
161 		.src_nentries = 32,
162 		.src_sz_max = 2048,
163 		.dest_nentries = 0,
164 		.send_cb = ath10k_snoc_htc_tx_cb,
165 	},
166 
167 	/* CE4: host->target HTT */
168 	{
169 		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
170 		.src_nentries = 2048,
171 		.src_sz_max = 256,
172 		.dest_nentries = 0,
173 		.send_cb = ath10k_snoc_htt_tx_cb,
174 	},
175 
176 	/* CE5: target->host HTT (ipa_uc->target ) */
177 	{
178 		.flags = CE_ATTR_FLAGS,
179 		.src_nentries = 0,
180 		.src_sz_max = 512,
181 		.dest_nentries = 512,
182 		.recv_cb = ath10k_snoc_htt_rx_cb,
183 	},
184 
185 	/* CE6: target autonomous hif_memcpy */
186 	{
187 		.flags = CE_ATTR_FLAGS,
188 		.src_nentries = 0,
189 		.src_sz_max = 0,
190 		.dest_nentries = 0,
191 	},
192 
193 	/* CE7: ce_diag, the Diagnostic Window */
194 	{
195 		.flags = CE_ATTR_FLAGS,
196 		.src_nentries = 2,
197 		.src_sz_max = 2048,
198 		.dest_nentries = 2,
199 	},
200 
201 	/* CE8: Target to uMC */
202 	{
203 		.flags = CE_ATTR_FLAGS,
204 		.src_nentries = 0,
205 		.src_sz_max = 2048,
206 		.dest_nentries = 128,
207 	},
208 
209 	/* CE9 target->host HTT */
210 	{
211 		.flags = CE_ATTR_FLAGS,
212 		.src_nentries = 0,
213 		.src_sz_max = 2048,
214 		.dest_nentries = 512,
215 		.recv_cb = ath10k_snoc_htt_htc_rx_cb,
216 	},
217 
218 	/* CE10: target->host HTT */
219 	{
220 		.flags = CE_ATTR_FLAGS,
221 		.src_nentries = 0,
222 		.src_sz_max = 2048,
223 		.dest_nentries = 512,
224 		.recv_cb = ath10k_snoc_htt_htc_rx_cb,
225 	},
226 
227 	/* CE11: target -> host PKTLOG */
228 	{
229 		.flags = CE_ATTR_FLAGS,
230 		.src_nentries = 0,
231 		.src_sz_max = 2048,
232 		.dest_nentries = 512,
233 		.recv_cb = ath10k_snoc_pktlog_rx_cb,
234 	},
235 };
236 
237 static struct ce_pipe_config target_ce_config_wlan[] = {
238 	/* CE0: host->target HTC control and raw streams */
239 	{
240 		.pipenum = __cpu_to_le32(0),
241 		.pipedir = __cpu_to_le32(PIPEDIR_OUT),
242 		.nentries = __cpu_to_le32(32),
243 		.nbytes_max = __cpu_to_le32(2048),
244 		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
245 		.reserved = __cpu_to_le32(0),
246 	},
247 
248 	/* CE1: target->host HTT + HTC control */
249 	{
250 		.pipenum = __cpu_to_le32(1),
251 		.pipedir = __cpu_to_le32(PIPEDIR_IN),
252 		.nentries = __cpu_to_le32(32),
253 		.nbytes_max = __cpu_to_le32(2048),
254 		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
255 		.reserved = __cpu_to_le32(0),
256 	},
257 
258 	/* CE2: target->host WMI */
259 	{
260 		.pipenum = __cpu_to_le32(2),
261 		.pipedir = __cpu_to_le32(PIPEDIR_IN),
262 		.nentries = __cpu_to_le32(64),
263 		.nbytes_max = __cpu_to_le32(2048),
264 		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
265 		.reserved = __cpu_to_le32(0),
266 	},
267 
268 	/* CE3: host->target WMI */
269 	{
270 		.pipenum = __cpu_to_le32(3),
271 		.pipedir = __cpu_to_le32(PIPEDIR_OUT),
272 		.nentries = __cpu_to_le32(32),
273 		.nbytes_max = __cpu_to_le32(2048),
274 		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
275 		.reserved = __cpu_to_le32(0),
276 	},
277 
278 	/* CE4: host->target HTT */
279 	{
280 		.pipenum = __cpu_to_le32(4),
281 		.pipedir = __cpu_to_le32(PIPEDIR_OUT),
282 		.nentries = __cpu_to_le32(256),
283 		.nbytes_max = __cpu_to_le32(256),
284 		.flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
285 		.reserved = __cpu_to_le32(0),
286 	},
287 
288 	/* CE5: target->host HTT (HIF->HTT) */
289 	{
290 		.pipenum = __cpu_to_le32(5),
291 		.pipedir = __cpu_to_le32(PIPEDIR_OUT),
292 		.nentries = __cpu_to_le32(1024),
293 		.nbytes_max = __cpu_to_le32(64),
294 		.flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
295 		.reserved = __cpu_to_le32(0),
296 	},
297 
298 	/* CE6: Reserved for target autonomous hif_memcpy */
299 	{
300 		.pipenum = __cpu_to_le32(6),
301 		.pipedir = __cpu_to_le32(PIPEDIR_INOUT),
302 		.nentries = __cpu_to_le32(32),
303 		.nbytes_max = __cpu_to_le32(16384),
304 		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
305 		.reserved = __cpu_to_le32(0),
306 	},
307 
308 	/* CE7 used only by Host */
309 	{
310 		.pipenum = __cpu_to_le32(7),
311 		.pipedir = __cpu_to_le32(4),
312 		.nentries = __cpu_to_le32(0),
313 		.nbytes_max = __cpu_to_le32(0),
314 		.flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
315 		.reserved = __cpu_to_le32(0),
316 	},
317 
318 	/* CE8 Target to uMC */
319 	{
320 		.pipenum = __cpu_to_le32(8),
321 		.pipedir = __cpu_to_le32(PIPEDIR_IN),
322 		.nentries = __cpu_to_le32(32),
323 		.nbytes_max = __cpu_to_le32(2048),
324 		.flags = __cpu_to_le32(0),
325 		.reserved = __cpu_to_le32(0),
326 	},
327 
328 	/* CE9 target->host HTT */
329 	{
330 		.pipenum = __cpu_to_le32(9),
331 		.pipedir = __cpu_to_le32(PIPEDIR_IN),
332 		.nentries = __cpu_to_le32(32),
333 		.nbytes_max = __cpu_to_le32(2048),
334 		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
335 		.reserved = __cpu_to_le32(0),
336 	},
337 
338 	/* CE10 target->host HTT */
339 	{
340 		.pipenum = __cpu_to_le32(10),
341 		.pipedir = __cpu_to_le32(PIPEDIR_IN),
342 		.nentries = __cpu_to_le32(32),
343 		.nbytes_max = __cpu_to_le32(2048),
344 		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
345 		.reserved = __cpu_to_le32(0),
346 	},
347 
348 	/* CE11 target autonomous qcache memcpy */
349 	{
350 		.pipenum = __cpu_to_le32(11),
351 		.pipedir = __cpu_to_le32(PIPEDIR_IN),
352 		.nentries = __cpu_to_le32(32),
353 		.nbytes_max = __cpu_to_le32(2048),
354 		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
355 		.reserved = __cpu_to_le32(0),
356 	},
357 };
358 
359 static struct service_to_pipe target_service_to_ce_map_wlan[] = {
360 	{
361 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
362 		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
363 		__cpu_to_le32(3),
364 	},
365 	{
366 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
367 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
368 		__cpu_to_le32(2),
369 	},
370 	{
371 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
372 		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
373 		__cpu_to_le32(3),
374 	},
375 	{
376 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
377 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
378 		__cpu_to_le32(2),
379 	},
380 	{
381 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
382 		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
383 		__cpu_to_le32(3),
384 	},
385 	{
386 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
387 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
388 		__cpu_to_le32(2),
389 	},
390 	{
391 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
392 		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
393 		__cpu_to_le32(3),
394 	},
395 	{
396 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
397 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
398 		__cpu_to_le32(2),
399 	},
400 	{
401 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
402 		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
403 		__cpu_to_le32(3),
404 	},
405 	{
406 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
407 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
408 		__cpu_to_le32(2),
409 	},
410 	{
411 		__cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
412 		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
413 		__cpu_to_le32(0),
414 	},
415 	{
416 		__cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
417 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
418 		__cpu_to_le32(2),
419 	},
420 	{ /* not used */
421 		__cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
422 		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
423 		__cpu_to_le32(0),
424 	},
425 	{ /* not used */
426 		__cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
427 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
428 		__cpu_to_le32(2),
429 	},
430 	{
431 		__cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
432 		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
433 		__cpu_to_le32(4),
434 	},
435 	{
436 		__cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
437 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
438 		__cpu_to_le32(1),
439 	},
440 	{ /* not used */
441 		__cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
442 		__cpu_to_le32(PIPEDIR_OUT),
443 		__cpu_to_le32(5),
444 	},
445 	{ /* in = DL = target -> host */
446 		__cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA2_MSG),
447 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
448 		__cpu_to_le32(9),
449 	},
450 	{ /* in = DL = target -> host */
451 		__cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA3_MSG),
452 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
453 		__cpu_to_le32(10),
454 	},
455 	{ /* in = DL = target -> host pktlog */
456 		__cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_LOG_MSG),
457 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
458 		__cpu_to_le32(11),
459 	},
460 	/* (Additions here) */
461 
462 	{ /* must be last */
463 		__cpu_to_le32(0),
464 		__cpu_to_le32(0),
465 		__cpu_to_le32(0),
466 	},
467 };
468 
469 static void ath10k_snoc_write32(struct ath10k *ar, u32 offset, u32 value)
470 {
471 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
472 
473 	iowrite32(value, ar_snoc->mem + offset);
474 }
475 
476 static u32 ath10k_snoc_read32(struct ath10k *ar, u32 offset)
477 {
478 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
479 	u32 val;
480 
481 	val = ioread32(ar_snoc->mem + offset);
482 
483 	return val;
484 }
485 
486 static int __ath10k_snoc_rx_post_buf(struct ath10k_snoc_pipe *pipe)
487 {
488 	struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
489 	struct ath10k *ar = pipe->hif_ce_state;
490 	struct ath10k_ce *ce = ath10k_ce_priv(ar);
491 	struct sk_buff *skb;
492 	dma_addr_t paddr;
493 	int ret;
494 
495 	skb = dev_alloc_skb(pipe->buf_sz);
496 	if (!skb)
497 		return -ENOMEM;
498 
499 	WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
500 
501 	paddr = dma_map_single(ar->dev, skb->data,
502 			       skb->len + skb_tailroom(skb),
503 			       DMA_FROM_DEVICE);
504 	if (unlikely(dma_mapping_error(ar->dev, paddr))) {
505 		ath10k_warn(ar, "failed to dma map snoc rx buf\n");
506 		dev_kfree_skb_any(skb);
507 		return -EIO;
508 	}
509 
510 	ATH10K_SKB_RXCB(skb)->paddr = paddr;
511 
512 	spin_lock_bh(&ce->ce_lock);
513 	ret = ce_pipe->ops->ce_rx_post_buf(ce_pipe, skb, paddr);
514 	spin_unlock_bh(&ce->ce_lock);
515 	if (ret) {
516 		dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb),
517 				 DMA_FROM_DEVICE);
518 		dev_kfree_skb_any(skb);
519 		return ret;
520 	}
521 
522 	return 0;
523 }
524 
525 static void ath10k_snoc_rx_post_pipe(struct ath10k_snoc_pipe *pipe)
526 {
527 	struct ath10k *ar = pipe->hif_ce_state;
528 	struct ath10k_ce *ce = ath10k_ce_priv(ar);
529 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
530 	struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
531 	int ret, num;
532 
533 	if (pipe->buf_sz == 0)
534 		return;
535 
536 	if (!ce_pipe->dest_ring)
537 		return;
538 
539 	spin_lock_bh(&ce->ce_lock);
540 	num = __ath10k_ce_rx_num_free_bufs(ce_pipe);
541 	spin_unlock_bh(&ce->ce_lock);
542 	while (num--) {
543 		ret = __ath10k_snoc_rx_post_buf(pipe);
544 		if (ret) {
545 			if (ret == -ENOSPC)
546 				break;
547 			ath10k_warn(ar, "failed to post rx buf: %d\n", ret);
548 			mod_timer(&ar_snoc->rx_post_retry, jiffies +
549 				  ATH10K_SNOC_RX_POST_RETRY_MS);
550 			break;
551 		}
552 	}
553 }
554 
555 static void ath10k_snoc_rx_post(struct ath10k *ar)
556 {
557 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
558 	int i;
559 
560 	for (i = 0; i < CE_COUNT; i++)
561 		ath10k_snoc_rx_post_pipe(&ar_snoc->pipe_info[i]);
562 }
563 
564 static void ath10k_snoc_process_rx_cb(struct ath10k_ce_pipe *ce_state,
565 				      void (*callback)(struct ath10k *ar,
566 						       struct sk_buff *skb))
567 {
568 	struct ath10k *ar = ce_state->ar;
569 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
570 	struct ath10k_snoc_pipe *pipe_info =  &ar_snoc->pipe_info[ce_state->id];
571 	struct sk_buff *skb;
572 	struct sk_buff_head list;
573 	void *transfer_context;
574 	unsigned int nbytes, max_nbytes;
575 
576 	__skb_queue_head_init(&list);
577 	while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
578 					     &nbytes) == 0) {
579 		skb = transfer_context;
580 		max_nbytes = skb->len + skb_tailroom(skb);
581 		dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
582 				 max_nbytes, DMA_FROM_DEVICE);
583 
584 		if (unlikely(max_nbytes < nbytes)) {
585 			ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
586 				    nbytes, max_nbytes);
587 			dev_kfree_skb_any(skb);
588 			continue;
589 		}
590 
591 		skb_put(skb, nbytes);
592 		__skb_queue_tail(&list, skb);
593 	}
594 
595 	while ((skb = __skb_dequeue(&list))) {
596 		ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc rx ce pipe %d len %d\n",
597 			   ce_state->id, skb->len);
598 
599 		callback(ar, skb);
600 	}
601 
602 	ath10k_snoc_rx_post_pipe(pipe_info);
603 }
604 
605 static void ath10k_snoc_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
606 {
607 	ath10k_snoc_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
608 }
609 
610 static void ath10k_snoc_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
611 {
612 	/* CE4 polling needs to be done whenever CE pipe which transports
613 	 * HTT Rx (target->host) is processed.
614 	 */
615 	ath10k_ce_per_engine_service(ce_state->ar, CE_POLL_PIPE);
616 
617 	ath10k_snoc_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
618 }
619 
620 /* Called by lower (CE) layer when data is received from the Target.
621  * WCN3990 firmware uses separate CE(CE11) to transfer pktlog data.
622  */
623 static void ath10k_snoc_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state)
624 {
625 	ath10k_snoc_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
626 }
627 
628 static void ath10k_snoc_htt_rx_deliver(struct ath10k *ar, struct sk_buff *skb)
629 {
630 	skb_pull(skb, sizeof(struct ath10k_htc_hdr));
631 	ath10k_htt_t2h_msg_handler(ar, skb);
632 }
633 
634 static void ath10k_snoc_htt_rx_cb(struct ath10k_ce_pipe *ce_state)
635 {
636 	ath10k_ce_per_engine_service(ce_state->ar, CE_POLL_PIPE);
637 	ath10k_snoc_process_rx_cb(ce_state, ath10k_snoc_htt_rx_deliver);
638 }
639 
640 static void ath10k_snoc_rx_replenish_retry(struct timer_list *t)
641 {
642 	struct ath10k_snoc *ar_snoc = from_timer(ar_snoc, t, rx_post_retry);
643 	struct ath10k *ar = ar_snoc->ar;
644 
645 	ath10k_snoc_rx_post(ar);
646 }
647 
648 static void ath10k_snoc_htc_tx_cb(struct ath10k_ce_pipe *ce_state)
649 {
650 	struct ath10k *ar = ce_state->ar;
651 	struct sk_buff_head list;
652 	struct sk_buff *skb;
653 
654 	__skb_queue_head_init(&list);
655 	while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
656 		if (!skb)
657 			continue;
658 
659 		__skb_queue_tail(&list, skb);
660 	}
661 
662 	while ((skb = __skb_dequeue(&list)))
663 		ath10k_htc_tx_completion_handler(ar, skb);
664 }
665 
666 static void ath10k_snoc_htt_tx_cb(struct ath10k_ce_pipe *ce_state)
667 {
668 	struct ath10k *ar = ce_state->ar;
669 	struct sk_buff *skb;
670 
671 	while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
672 		if (!skb)
673 			continue;
674 
675 		dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
676 				 skb->len, DMA_TO_DEVICE);
677 		ath10k_htt_hif_tx_complete(ar, skb);
678 	}
679 }
680 
681 static int ath10k_snoc_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
682 				 struct ath10k_hif_sg_item *items, int n_items)
683 {
684 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
685 	struct ath10k_ce *ce = ath10k_ce_priv(ar);
686 	struct ath10k_snoc_pipe *snoc_pipe;
687 	struct ath10k_ce_pipe *ce_pipe;
688 	int err, i = 0;
689 
690 	snoc_pipe = &ar_snoc->pipe_info[pipe_id];
691 	ce_pipe = snoc_pipe->ce_hdl;
692 	spin_lock_bh(&ce->ce_lock);
693 
694 	for (i = 0; i < n_items - 1; i++) {
695 		ath10k_dbg(ar, ATH10K_DBG_SNOC,
696 			   "snoc tx item %d paddr %pad len %d n_items %d\n",
697 			   i, &items[i].paddr, items[i].len, n_items);
698 
699 		err = ath10k_ce_send_nolock(ce_pipe,
700 					    items[i].transfer_context,
701 					    items[i].paddr,
702 					    items[i].len,
703 					    items[i].transfer_id,
704 					    CE_SEND_FLAG_GATHER);
705 		if (err)
706 			goto err;
707 	}
708 
709 	ath10k_dbg(ar, ATH10K_DBG_SNOC,
710 		   "snoc tx item %d paddr %pad len %d n_items %d\n",
711 		   i, &items[i].paddr, items[i].len, n_items);
712 
713 	err = ath10k_ce_send_nolock(ce_pipe,
714 				    items[i].transfer_context,
715 				    items[i].paddr,
716 				    items[i].len,
717 				    items[i].transfer_id,
718 				    0);
719 	if (err)
720 		goto err;
721 
722 	spin_unlock_bh(&ce->ce_lock);
723 
724 	return 0;
725 
726 err:
727 	for (; i > 0; i--)
728 		__ath10k_ce_send_revert(ce_pipe);
729 
730 	spin_unlock_bh(&ce->ce_lock);
731 	return err;
732 }
733 
734 static int ath10k_snoc_hif_get_target_info(struct ath10k *ar,
735 					   struct bmi_target_info *target_info)
736 {
737 	target_info->version = ATH10K_HW_WCN3990;
738 	target_info->type = ATH10K_HW_WCN3990;
739 
740 	return 0;
741 }
742 
743 static u16 ath10k_snoc_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
744 {
745 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
746 
747 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "hif get free queue number\n");
748 
749 	return ath10k_ce_num_free_src_entries(ar_snoc->pipe_info[pipe].ce_hdl);
750 }
751 
752 static void ath10k_snoc_hif_send_complete_check(struct ath10k *ar, u8 pipe,
753 						int force)
754 {
755 	int resources;
756 
757 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif send complete check\n");
758 
759 	if (!force) {
760 		resources = ath10k_snoc_hif_get_free_queue_number(ar, pipe);
761 
762 		if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
763 			return;
764 	}
765 	ath10k_ce_per_engine_service(ar, pipe);
766 }
767 
768 static int ath10k_snoc_hif_map_service_to_pipe(struct ath10k *ar,
769 					       u16 service_id,
770 					       u8 *ul_pipe, u8 *dl_pipe)
771 {
772 	const struct service_to_pipe *entry;
773 	bool ul_set = false, dl_set = false;
774 	int i;
775 
776 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif map service\n");
777 
778 	for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) {
779 		entry = &target_service_to_ce_map_wlan[i];
780 
781 		if (__le32_to_cpu(entry->service_id) != service_id)
782 			continue;
783 
784 		switch (__le32_to_cpu(entry->pipedir)) {
785 		case PIPEDIR_NONE:
786 			break;
787 		case PIPEDIR_IN:
788 			WARN_ON(dl_set);
789 			*dl_pipe = __le32_to_cpu(entry->pipenum);
790 			dl_set = true;
791 			break;
792 		case PIPEDIR_OUT:
793 			WARN_ON(ul_set);
794 			*ul_pipe = __le32_to_cpu(entry->pipenum);
795 			ul_set = true;
796 			break;
797 		case PIPEDIR_INOUT:
798 			WARN_ON(dl_set);
799 			WARN_ON(ul_set);
800 			*dl_pipe = __le32_to_cpu(entry->pipenum);
801 			*ul_pipe = __le32_to_cpu(entry->pipenum);
802 			dl_set = true;
803 			ul_set = true;
804 			break;
805 		}
806 	}
807 
808 	if (!ul_set || !dl_set)
809 		return -ENOENT;
810 
811 	return 0;
812 }
813 
814 static void ath10k_snoc_hif_get_default_pipe(struct ath10k *ar,
815 					     u8 *ul_pipe, u8 *dl_pipe)
816 {
817 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif get default pipe\n");
818 
819 	(void)ath10k_snoc_hif_map_service_to_pipe(ar,
820 						 ATH10K_HTC_SVC_ID_RSVD_CTRL,
821 						 ul_pipe, dl_pipe);
822 }
823 
824 static inline void ath10k_snoc_irq_disable(struct ath10k *ar)
825 {
826 	ath10k_ce_disable_interrupts(ar);
827 }
828 
829 static inline void ath10k_snoc_irq_enable(struct ath10k *ar)
830 {
831 	ath10k_ce_enable_interrupts(ar);
832 }
833 
834 static void ath10k_snoc_rx_pipe_cleanup(struct ath10k_snoc_pipe *snoc_pipe)
835 {
836 	struct ath10k_ce_pipe *ce_pipe;
837 	struct ath10k_ce_ring *ce_ring;
838 	struct sk_buff *skb;
839 	struct ath10k *ar;
840 	int i;
841 
842 	ar = snoc_pipe->hif_ce_state;
843 	ce_pipe = snoc_pipe->ce_hdl;
844 	ce_ring = ce_pipe->dest_ring;
845 
846 	if (!ce_ring)
847 		return;
848 
849 	if (!snoc_pipe->buf_sz)
850 		return;
851 
852 	for (i = 0; i < ce_ring->nentries; i++) {
853 		skb = ce_ring->per_transfer_context[i];
854 		if (!skb)
855 			continue;
856 
857 		ce_ring->per_transfer_context[i] = NULL;
858 
859 		dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
860 				 skb->len + skb_tailroom(skb),
861 				 DMA_FROM_DEVICE);
862 		dev_kfree_skb_any(skb);
863 	}
864 }
865 
866 static void ath10k_snoc_tx_pipe_cleanup(struct ath10k_snoc_pipe *snoc_pipe)
867 {
868 	struct ath10k_ce_pipe *ce_pipe;
869 	struct ath10k_ce_ring *ce_ring;
870 	struct sk_buff *skb;
871 	struct ath10k *ar;
872 	int i;
873 
874 	ar = snoc_pipe->hif_ce_state;
875 	ce_pipe = snoc_pipe->ce_hdl;
876 	ce_ring = ce_pipe->src_ring;
877 
878 	if (!ce_ring)
879 		return;
880 
881 	if (!snoc_pipe->buf_sz)
882 		return;
883 
884 	for (i = 0; i < ce_ring->nentries; i++) {
885 		skb = ce_ring->per_transfer_context[i];
886 		if (!skb)
887 			continue;
888 
889 		ce_ring->per_transfer_context[i] = NULL;
890 
891 		ath10k_htc_tx_completion_handler(ar, skb);
892 	}
893 }
894 
895 static void ath10k_snoc_buffer_cleanup(struct ath10k *ar)
896 {
897 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
898 	struct ath10k_snoc_pipe *pipe_info;
899 	int pipe_num;
900 
901 	del_timer_sync(&ar_snoc->rx_post_retry);
902 	for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
903 		pipe_info = &ar_snoc->pipe_info[pipe_num];
904 		ath10k_snoc_rx_pipe_cleanup(pipe_info);
905 		ath10k_snoc_tx_pipe_cleanup(pipe_info);
906 	}
907 }
908 
909 static void ath10k_snoc_hif_stop(struct ath10k *ar)
910 {
911 	if (!test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
912 		ath10k_snoc_irq_disable(ar);
913 
914 	napi_synchronize(&ar->napi);
915 	napi_disable(&ar->napi);
916 	ath10k_snoc_buffer_cleanup(ar);
917 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
918 }
919 
920 static int ath10k_snoc_hif_start(struct ath10k *ar)
921 {
922 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
923 
924 	napi_enable(&ar->napi);
925 	ath10k_snoc_irq_enable(ar);
926 	ath10k_snoc_rx_post(ar);
927 
928 	clear_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags);
929 
930 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
931 
932 	return 0;
933 }
934 
935 static int ath10k_snoc_init_pipes(struct ath10k *ar)
936 {
937 	int i, ret;
938 
939 	for (i = 0; i < CE_COUNT; i++) {
940 		ret = ath10k_ce_init_pipe(ar, i, &host_ce_config_wlan[i]);
941 		if (ret) {
942 			ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n",
943 				   i, ret);
944 			return ret;
945 		}
946 	}
947 
948 	return 0;
949 }
950 
951 static int ath10k_snoc_wlan_enable(struct ath10k *ar,
952 				   enum ath10k_firmware_mode fw_mode)
953 {
954 	struct ath10k_tgt_pipe_cfg tgt_cfg[CE_COUNT_MAX];
955 	struct ath10k_qmi_wlan_enable_cfg cfg;
956 	enum wlfw_driver_mode_enum_v01 mode;
957 	int pipe_num;
958 
959 	for (pipe_num = 0; pipe_num < CE_COUNT_MAX; pipe_num++) {
960 		tgt_cfg[pipe_num].pipe_num =
961 				target_ce_config_wlan[pipe_num].pipenum;
962 		tgt_cfg[pipe_num].pipe_dir =
963 				target_ce_config_wlan[pipe_num].pipedir;
964 		tgt_cfg[pipe_num].nentries =
965 				target_ce_config_wlan[pipe_num].nentries;
966 		tgt_cfg[pipe_num].nbytes_max =
967 				target_ce_config_wlan[pipe_num].nbytes_max;
968 		tgt_cfg[pipe_num].flags =
969 				target_ce_config_wlan[pipe_num].flags;
970 		tgt_cfg[pipe_num].reserved = 0;
971 	}
972 
973 	cfg.num_ce_tgt_cfg = sizeof(target_ce_config_wlan) /
974 				sizeof(struct ath10k_tgt_pipe_cfg);
975 	cfg.ce_tgt_cfg = (struct ath10k_tgt_pipe_cfg *)
976 		&tgt_cfg;
977 	cfg.num_ce_svc_pipe_cfg = sizeof(target_service_to_ce_map_wlan) /
978 				  sizeof(struct ath10k_svc_pipe_cfg);
979 	cfg.ce_svc_cfg = (struct ath10k_svc_pipe_cfg *)
980 		&target_service_to_ce_map_wlan;
981 	cfg.num_shadow_reg_cfg = ARRAY_SIZE(target_shadow_reg_cfg_map);
982 	cfg.shadow_reg_cfg = (struct ath10k_shadow_reg_cfg *)
983 		&target_shadow_reg_cfg_map;
984 
985 	switch (fw_mode) {
986 	case ATH10K_FIRMWARE_MODE_NORMAL:
987 		mode = QMI_WLFW_MISSION_V01;
988 		break;
989 	case ATH10K_FIRMWARE_MODE_UTF:
990 		mode = QMI_WLFW_FTM_V01;
991 		break;
992 	default:
993 		ath10k_err(ar, "invalid firmware mode %d\n", fw_mode);
994 		return -EINVAL;
995 	}
996 
997 	return ath10k_qmi_wlan_enable(ar, &cfg, mode,
998 				       NULL);
999 }
1000 
1001 static void ath10k_snoc_wlan_disable(struct ath10k *ar)
1002 {
1003 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1004 
1005 	/* If both ATH10K_FLAG_CRASH_FLUSH and ATH10K_SNOC_FLAG_RECOVERY
1006 	 * flags are not set, it means that the driver has restarted
1007 	 * due to a crash inject via debugfs. In this case, the driver
1008 	 * needs to restart the firmware and hence send qmi wlan disable,
1009 	 * during the driver restart sequence.
1010 	 */
1011 	if (!test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags) ||
1012 	    !test_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags))
1013 		ath10k_qmi_wlan_disable(ar);
1014 }
1015 
1016 static void ath10k_snoc_hif_power_down(struct ath10k *ar)
1017 {
1018 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n");
1019 
1020 	ath10k_snoc_wlan_disable(ar);
1021 	ath10k_ce_free_rri(ar);
1022 }
1023 
1024 static int ath10k_snoc_hif_power_up(struct ath10k *ar,
1025 				    enum ath10k_firmware_mode fw_mode)
1026 {
1027 	int ret;
1028 
1029 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "%s:WCN3990 driver state = %d\n",
1030 		   __func__, ar->state);
1031 
1032 	ret = ath10k_snoc_wlan_enable(ar, fw_mode);
1033 	if (ret) {
1034 		ath10k_err(ar, "failed to enable wcn3990: %d\n", ret);
1035 		return ret;
1036 	}
1037 
1038 	ath10k_ce_alloc_rri(ar);
1039 
1040 	ret = ath10k_snoc_init_pipes(ar);
1041 	if (ret) {
1042 		ath10k_err(ar, "failed to initialize CE: %d\n", ret);
1043 		goto err_wlan_enable;
1044 	}
1045 
1046 	return 0;
1047 
1048 err_wlan_enable:
1049 	ath10k_snoc_wlan_disable(ar);
1050 
1051 	return ret;
1052 }
1053 
1054 static int ath10k_snoc_hif_set_target_log_mode(struct ath10k *ar,
1055 					       u8 fw_log_mode)
1056 {
1057 	u8 fw_dbg_mode;
1058 
1059 	if (fw_log_mode)
1060 		fw_dbg_mode = ATH10K_ENABLE_FW_LOG_CE;
1061 	else
1062 		fw_dbg_mode = ATH10K_ENABLE_FW_LOG_DIAG;
1063 
1064 	return ath10k_qmi_set_fw_log_mode(ar, fw_dbg_mode);
1065 }
1066 
1067 #ifdef CONFIG_PM
1068 static int ath10k_snoc_hif_suspend(struct ath10k *ar)
1069 {
1070 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1071 	int ret;
1072 
1073 	if (!device_may_wakeup(ar->dev))
1074 		return -EPERM;
1075 
1076 	ret = enable_irq_wake(ar_snoc->ce_irqs[ATH10K_SNOC_WAKE_IRQ].irq_line);
1077 	if (ret) {
1078 		ath10k_err(ar, "failed to enable wakeup irq :%d\n", ret);
1079 		return ret;
1080 	}
1081 
1082 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc device suspended\n");
1083 
1084 	return ret;
1085 }
1086 
1087 static int ath10k_snoc_hif_resume(struct ath10k *ar)
1088 {
1089 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1090 	int ret;
1091 
1092 	if (!device_may_wakeup(ar->dev))
1093 		return -EPERM;
1094 
1095 	ret = disable_irq_wake(ar_snoc->ce_irqs[ATH10K_SNOC_WAKE_IRQ].irq_line);
1096 	if (ret) {
1097 		ath10k_err(ar, "failed to disable wakeup irq: %d\n", ret);
1098 		return ret;
1099 	}
1100 
1101 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc device resumed\n");
1102 
1103 	return ret;
1104 }
1105 #endif
1106 
1107 static const struct ath10k_hif_ops ath10k_snoc_hif_ops = {
1108 	.read32		= ath10k_snoc_read32,
1109 	.write32	= ath10k_snoc_write32,
1110 	.start		= ath10k_snoc_hif_start,
1111 	.stop		= ath10k_snoc_hif_stop,
1112 	.map_service_to_pipe	= ath10k_snoc_hif_map_service_to_pipe,
1113 	.get_default_pipe	= ath10k_snoc_hif_get_default_pipe,
1114 	.power_up		= ath10k_snoc_hif_power_up,
1115 	.power_down		= ath10k_snoc_hif_power_down,
1116 	.tx_sg			= ath10k_snoc_hif_tx_sg,
1117 	.send_complete_check	= ath10k_snoc_hif_send_complete_check,
1118 	.get_free_queue_number	= ath10k_snoc_hif_get_free_queue_number,
1119 	.get_target_info	= ath10k_snoc_hif_get_target_info,
1120 	.set_target_log_mode    = ath10k_snoc_hif_set_target_log_mode,
1121 
1122 #ifdef CONFIG_PM
1123 	.suspend                = ath10k_snoc_hif_suspend,
1124 	.resume                 = ath10k_snoc_hif_resume,
1125 #endif
1126 };
1127 
1128 static const struct ath10k_bus_ops ath10k_snoc_bus_ops = {
1129 	.read32		= ath10k_snoc_read32,
1130 	.write32	= ath10k_snoc_write32,
1131 };
1132 
1133 static int ath10k_snoc_get_ce_id_from_irq(struct ath10k *ar, int irq)
1134 {
1135 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1136 	int i;
1137 
1138 	for (i = 0; i < CE_COUNT_MAX; i++) {
1139 		if (ar_snoc->ce_irqs[i].irq_line == irq)
1140 			return i;
1141 	}
1142 	ath10k_err(ar, "No matching CE id for irq %d\n", irq);
1143 
1144 	return -EINVAL;
1145 }
1146 
1147 static irqreturn_t ath10k_snoc_per_engine_handler(int irq, void *arg)
1148 {
1149 	struct ath10k *ar = arg;
1150 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1151 	int ce_id = ath10k_snoc_get_ce_id_from_irq(ar, irq);
1152 
1153 	if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_snoc->pipe_info)) {
1154 		ath10k_warn(ar, "unexpected/invalid irq %d ce_id %d\n", irq,
1155 			    ce_id);
1156 		return IRQ_HANDLED;
1157 	}
1158 
1159 	ath10k_snoc_irq_disable(ar);
1160 	napi_schedule(&ar->napi);
1161 
1162 	return IRQ_HANDLED;
1163 }
1164 
1165 static int ath10k_snoc_napi_poll(struct napi_struct *ctx, int budget)
1166 {
1167 	struct ath10k *ar = container_of(ctx, struct ath10k, napi);
1168 	int done = 0;
1169 
1170 	if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags)) {
1171 		napi_complete(ctx);
1172 		return done;
1173 	}
1174 
1175 	ath10k_ce_per_engine_service_any(ar);
1176 	done = ath10k_htt_txrx_compl_task(ar, budget);
1177 
1178 	if (done < budget) {
1179 		napi_complete(ctx);
1180 		ath10k_snoc_irq_enable(ar);
1181 	}
1182 
1183 	return done;
1184 }
1185 
1186 static void ath10k_snoc_init_napi(struct ath10k *ar)
1187 {
1188 	netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_snoc_napi_poll,
1189 		       ATH10K_NAPI_BUDGET);
1190 }
1191 
1192 static int ath10k_snoc_request_irq(struct ath10k *ar)
1193 {
1194 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1195 	int irqflags = IRQF_TRIGGER_RISING;
1196 	int ret, id;
1197 
1198 	for (id = 0; id < CE_COUNT_MAX; id++) {
1199 		ret = request_irq(ar_snoc->ce_irqs[id].irq_line,
1200 				  ath10k_snoc_per_engine_handler,
1201 				  irqflags, ce_name[id], ar);
1202 		if (ret) {
1203 			ath10k_err(ar,
1204 				   "failed to register IRQ handler for CE %d: %d",
1205 				   id, ret);
1206 			goto err_irq;
1207 		}
1208 	}
1209 
1210 	return 0;
1211 
1212 err_irq:
1213 	for (id -= 1; id >= 0; id--)
1214 		free_irq(ar_snoc->ce_irqs[id].irq_line, ar);
1215 
1216 	return ret;
1217 }
1218 
1219 static void ath10k_snoc_free_irq(struct ath10k *ar)
1220 {
1221 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1222 	int id;
1223 
1224 	for (id = 0; id < CE_COUNT_MAX; id++)
1225 		free_irq(ar_snoc->ce_irqs[id].irq_line, ar);
1226 }
1227 
1228 static int ath10k_snoc_resource_init(struct ath10k *ar)
1229 {
1230 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1231 	struct platform_device *pdev;
1232 	struct resource *res;
1233 	int i, ret = 0;
1234 
1235 	pdev = ar_snoc->dev;
1236 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "membase");
1237 	if (!res) {
1238 		ath10k_err(ar, "Memory base not found in DT\n");
1239 		return -EINVAL;
1240 	}
1241 
1242 	ar_snoc->mem_pa = res->start;
1243 	ar_snoc->mem = devm_ioremap(&pdev->dev, ar_snoc->mem_pa,
1244 				    resource_size(res));
1245 	if (!ar_snoc->mem) {
1246 		ath10k_err(ar, "Memory base ioremap failed with physical address %pa\n",
1247 			   &ar_snoc->mem_pa);
1248 		return -EINVAL;
1249 	}
1250 
1251 	for (i = 0; i < CE_COUNT; i++) {
1252 		res = platform_get_resource(ar_snoc->dev, IORESOURCE_IRQ, i);
1253 		if (!res) {
1254 			ath10k_err(ar, "failed to get IRQ%d\n", i);
1255 			ret = -ENODEV;
1256 			goto out;
1257 		}
1258 		ar_snoc->ce_irqs[i].irq_line = res->start;
1259 	}
1260 
1261 	ret = device_property_read_u32(&pdev->dev, "qcom,xo-cal-data",
1262 				       &ar_snoc->xo_cal_data);
1263 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc xo-cal-data return %d\n", ret);
1264 	if (ret == 0) {
1265 		ar_snoc->xo_cal_supported = true;
1266 		ath10k_dbg(ar, ATH10K_DBG_SNOC, "xo cal data %x\n",
1267 			   ar_snoc->xo_cal_data);
1268 	}
1269 	ret = 0;
1270 
1271 out:
1272 	return ret;
1273 }
1274 
1275 static void ath10k_snoc_quirks_init(struct ath10k *ar)
1276 {
1277 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1278 	struct device *dev = &ar_snoc->dev->dev;
1279 
1280 	if (of_property_read_bool(dev->of_node, "qcom,snoc-host-cap-8bit-quirk"))
1281 		set_bit(ATH10K_SNOC_FLAG_8BIT_HOST_CAP_QUIRK, &ar_snoc->flags);
1282 }
1283 
1284 int ath10k_snoc_fw_indication(struct ath10k *ar, u64 type)
1285 {
1286 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1287 	struct ath10k_bus_params bus_params = {};
1288 	int ret;
1289 
1290 	if (test_bit(ATH10K_SNOC_FLAG_UNREGISTERING, &ar_snoc->flags))
1291 		return 0;
1292 
1293 	switch (type) {
1294 	case ATH10K_QMI_EVENT_FW_READY_IND:
1295 		if (test_bit(ATH10K_SNOC_FLAG_REGISTERED, &ar_snoc->flags)) {
1296 			queue_work(ar->workqueue, &ar->restart_work);
1297 			break;
1298 		}
1299 
1300 		bus_params.dev_type = ATH10K_DEV_TYPE_LL;
1301 		bus_params.chip_id = ar_snoc->target_info.soc_version;
1302 		ret = ath10k_core_register(ar, &bus_params);
1303 		if (ret) {
1304 			ath10k_err(ar, "Failed to register driver core: %d\n",
1305 				   ret);
1306 			return ret;
1307 		}
1308 		set_bit(ATH10K_SNOC_FLAG_REGISTERED, &ar_snoc->flags);
1309 		break;
1310 	case ATH10K_QMI_EVENT_FW_DOWN_IND:
1311 		set_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags);
1312 		set_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags);
1313 		break;
1314 	default:
1315 		ath10k_err(ar, "invalid fw indication: %llx\n", type);
1316 		return -EINVAL;
1317 	}
1318 
1319 	return 0;
1320 }
1321 
1322 static int ath10k_snoc_setup_resource(struct ath10k *ar)
1323 {
1324 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1325 	struct ath10k_ce *ce = ath10k_ce_priv(ar);
1326 	struct ath10k_snoc_pipe *pipe;
1327 	int i, ret;
1328 
1329 	timer_setup(&ar_snoc->rx_post_retry, ath10k_snoc_rx_replenish_retry, 0);
1330 	spin_lock_init(&ce->ce_lock);
1331 	for (i = 0; i < CE_COUNT; i++) {
1332 		pipe = &ar_snoc->pipe_info[i];
1333 		pipe->ce_hdl = &ce->ce_states[i];
1334 		pipe->pipe_num = i;
1335 		pipe->hif_ce_state = ar;
1336 
1337 		ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]);
1338 		if (ret) {
1339 			ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n",
1340 				   i, ret);
1341 			return ret;
1342 		}
1343 
1344 		pipe->buf_sz = host_ce_config_wlan[i].src_sz_max;
1345 	}
1346 	ath10k_snoc_init_napi(ar);
1347 
1348 	return 0;
1349 }
1350 
1351 static void ath10k_snoc_release_resource(struct ath10k *ar)
1352 {
1353 	int i;
1354 
1355 	netif_napi_del(&ar->napi);
1356 	for (i = 0; i < CE_COUNT; i++)
1357 		ath10k_ce_free_pipe(ar, i);
1358 }
1359 
1360 static int ath10k_hw_power_on(struct ath10k *ar)
1361 {
1362 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1363 	int ret;
1364 
1365 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "soc power on\n");
1366 
1367 	ret = regulator_bulk_enable(ar_snoc->num_vregs, ar_snoc->vregs);
1368 	if (ret)
1369 		return ret;
1370 
1371 	ret = clk_bulk_prepare_enable(ar_snoc->num_clks, ar_snoc->clks);
1372 	if (ret)
1373 		goto vreg_off;
1374 
1375 	return ret;
1376 
1377 vreg_off:
1378 	regulator_bulk_disable(ar_snoc->num_vregs, ar_snoc->vregs);
1379 	return ret;
1380 }
1381 
1382 static int ath10k_hw_power_off(struct ath10k *ar)
1383 {
1384 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1385 
1386 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "soc power off\n");
1387 
1388 	clk_bulk_disable_unprepare(ar_snoc->num_clks, ar_snoc->clks);
1389 
1390 	return regulator_bulk_disable(ar_snoc->num_vregs, ar_snoc->vregs);
1391 }
1392 
1393 static void ath10k_msa_dump_memory(struct ath10k *ar,
1394 				   struct ath10k_fw_crash_data *crash_data)
1395 {
1396 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1397 	const struct ath10k_hw_mem_layout *mem_layout;
1398 	const struct ath10k_mem_region *current_region;
1399 	struct ath10k_dump_ram_data_hdr *hdr;
1400 	size_t buf_len;
1401 	u8 *buf;
1402 
1403 	if (!crash_data || !crash_data->ramdump_buf)
1404 		return;
1405 
1406 	mem_layout = ath10k_coredump_get_mem_layout(ar);
1407 	if (!mem_layout)
1408 		return;
1409 
1410 	current_region = &mem_layout->region_table.regions[0];
1411 
1412 	buf = crash_data->ramdump_buf;
1413 	buf_len = crash_data->ramdump_buf_len;
1414 	memset(buf, 0, buf_len);
1415 
1416 	/* Reserve space for the header. */
1417 	hdr = (void *)buf;
1418 	buf += sizeof(*hdr);
1419 	buf_len -= sizeof(*hdr);
1420 
1421 	hdr->region_type = cpu_to_le32(current_region->type);
1422 	hdr->start = cpu_to_le32((unsigned long)ar_snoc->qmi->msa_va);
1423 	hdr->length = cpu_to_le32(ar_snoc->qmi->msa_mem_size);
1424 
1425 	if (current_region->len < ar_snoc->qmi->msa_mem_size) {
1426 		memcpy(buf, ar_snoc->qmi->msa_va, current_region->len);
1427 		ath10k_warn(ar, "msa dump length is less than msa size %x, %x\n",
1428 			    current_region->len, ar_snoc->qmi->msa_mem_size);
1429 	} else {
1430 		memcpy(buf, ar_snoc->qmi->msa_va, ar_snoc->qmi->msa_mem_size);
1431 	}
1432 }
1433 
1434 void ath10k_snoc_fw_crashed_dump(struct ath10k *ar)
1435 {
1436 	struct ath10k_fw_crash_data *crash_data;
1437 	char guid[UUID_STRING_LEN + 1];
1438 
1439 	mutex_lock(&ar->dump_mutex);
1440 
1441 	spin_lock_bh(&ar->data_lock);
1442 	ar->stats.fw_crash_counter++;
1443 	spin_unlock_bh(&ar->data_lock);
1444 
1445 	crash_data = ath10k_coredump_new(ar);
1446 
1447 	if (crash_data)
1448 		scnprintf(guid, sizeof(guid), "%pUl", &crash_data->guid);
1449 	else
1450 		scnprintf(guid, sizeof(guid), "n/a");
1451 
1452 	ath10k_err(ar, "firmware crashed! (guid %s)\n", guid);
1453 	ath10k_print_driver_info(ar);
1454 	ath10k_msa_dump_memory(ar, crash_data);
1455 	mutex_unlock(&ar->dump_mutex);
1456 }
1457 
1458 static const struct of_device_id ath10k_snoc_dt_match[] = {
1459 	{ .compatible = "qcom,wcn3990-wifi",
1460 	 .data = &drv_priv,
1461 	},
1462 	{ }
1463 };
1464 MODULE_DEVICE_TABLE(of, ath10k_snoc_dt_match);
1465 
1466 static int ath10k_snoc_probe(struct platform_device *pdev)
1467 {
1468 	const struct ath10k_snoc_drv_priv *drv_data;
1469 	const struct of_device_id *of_id;
1470 	struct ath10k_snoc *ar_snoc;
1471 	struct device *dev;
1472 	struct ath10k *ar;
1473 	u32 msa_size;
1474 	int ret;
1475 	u32 i;
1476 
1477 	of_id = of_match_device(ath10k_snoc_dt_match, &pdev->dev);
1478 	if (!of_id) {
1479 		dev_err(&pdev->dev, "failed to find matching device tree id\n");
1480 		return -EINVAL;
1481 	}
1482 
1483 	drv_data = of_id->data;
1484 	dev = &pdev->dev;
1485 
1486 	ret = dma_set_mask_and_coherent(dev, drv_data->dma_mask);
1487 	if (ret) {
1488 		dev_err(dev, "failed to set dma mask: %d", ret);
1489 		return ret;
1490 	}
1491 
1492 	ar = ath10k_core_create(sizeof(*ar_snoc), dev, ATH10K_BUS_SNOC,
1493 				drv_data->hw_rev, &ath10k_snoc_hif_ops);
1494 	if (!ar) {
1495 		dev_err(dev, "failed to allocate core\n");
1496 		return -ENOMEM;
1497 	}
1498 
1499 	ar_snoc = ath10k_snoc_priv(ar);
1500 	ar_snoc->dev = pdev;
1501 	platform_set_drvdata(pdev, ar);
1502 	ar_snoc->ar = ar;
1503 	ar_snoc->ce.bus_ops = &ath10k_snoc_bus_ops;
1504 	ar->ce_priv = &ar_snoc->ce;
1505 	msa_size = drv_data->msa_size;
1506 
1507 	ath10k_snoc_quirks_init(ar);
1508 
1509 	ret = ath10k_snoc_resource_init(ar);
1510 	if (ret) {
1511 		ath10k_warn(ar, "failed to initialize resource: %d\n", ret);
1512 		goto err_core_destroy;
1513 	}
1514 
1515 	ret = ath10k_snoc_setup_resource(ar);
1516 	if (ret) {
1517 		ath10k_warn(ar, "failed to setup resource: %d\n", ret);
1518 		goto err_core_destroy;
1519 	}
1520 	ret = ath10k_snoc_request_irq(ar);
1521 	if (ret) {
1522 		ath10k_warn(ar, "failed to request irqs: %d\n", ret);
1523 		goto err_release_resource;
1524 	}
1525 
1526 	ar_snoc->num_vregs = ARRAY_SIZE(ath10k_regulators);
1527 	ar_snoc->vregs = devm_kcalloc(&pdev->dev, ar_snoc->num_vregs,
1528 				      sizeof(*ar_snoc->vregs), GFP_KERNEL);
1529 	if (!ar_snoc->vregs) {
1530 		ret = -ENOMEM;
1531 		goto err_free_irq;
1532 	}
1533 	for (i = 0; i < ar_snoc->num_vregs; i++)
1534 		ar_snoc->vregs[i].supply = ath10k_regulators[i];
1535 
1536 	ret = devm_regulator_bulk_get(&pdev->dev, ar_snoc->num_vregs,
1537 				      ar_snoc->vregs);
1538 	if (ret < 0)
1539 		goto err_free_irq;
1540 
1541 	ar_snoc->num_clks = ARRAY_SIZE(ath10k_clocks);
1542 	ar_snoc->clks = devm_kcalloc(&pdev->dev, ar_snoc->num_clks,
1543 				     sizeof(*ar_snoc->clks), GFP_KERNEL);
1544 	if (!ar_snoc->clks) {
1545 		ret = -ENOMEM;
1546 		goto err_free_irq;
1547 	}
1548 
1549 	for (i = 0; i < ar_snoc->num_clks; i++)
1550 		ar_snoc->clks[i].id = ath10k_clocks[i];
1551 
1552 	ret = devm_clk_bulk_get_optional(&pdev->dev, ar_snoc->num_clks,
1553 					 ar_snoc->clks);
1554 	if (ret)
1555 		goto err_free_irq;
1556 
1557 	ret = ath10k_hw_power_on(ar);
1558 	if (ret) {
1559 		ath10k_err(ar, "failed to power on device: %d\n", ret);
1560 		goto err_free_irq;
1561 	}
1562 
1563 	ret = ath10k_qmi_init(ar, msa_size);
1564 	if (ret) {
1565 		ath10k_warn(ar, "failed to register wlfw qmi client: %d\n", ret);
1566 		goto err_core_destroy;
1567 	}
1568 
1569 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc probe\n");
1570 
1571 	return 0;
1572 
1573 err_free_irq:
1574 	ath10k_snoc_free_irq(ar);
1575 
1576 err_release_resource:
1577 	ath10k_snoc_release_resource(ar);
1578 
1579 err_core_destroy:
1580 	ath10k_core_destroy(ar);
1581 
1582 	return ret;
1583 }
1584 
1585 static int ath10k_snoc_remove(struct platform_device *pdev)
1586 {
1587 	struct ath10k *ar = platform_get_drvdata(pdev);
1588 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1589 
1590 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc remove\n");
1591 
1592 	reinit_completion(&ar->driver_recovery);
1593 
1594 	if (test_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags))
1595 		wait_for_completion_timeout(&ar->driver_recovery, 3 * HZ);
1596 
1597 	set_bit(ATH10K_SNOC_FLAG_UNREGISTERING, &ar_snoc->flags);
1598 
1599 	ath10k_core_unregister(ar);
1600 	ath10k_hw_power_off(ar);
1601 	ath10k_snoc_free_irq(ar);
1602 	ath10k_snoc_release_resource(ar);
1603 	ath10k_qmi_deinit(ar);
1604 	ath10k_core_destroy(ar);
1605 
1606 	return 0;
1607 }
1608 
1609 static struct platform_driver ath10k_snoc_driver = {
1610 	.probe  = ath10k_snoc_probe,
1611 	.remove = ath10k_snoc_remove,
1612 	.driver = {
1613 		.name   = "ath10k_snoc",
1614 		.of_match_table = ath10k_snoc_dt_match,
1615 	},
1616 };
1617 module_platform_driver(ath10k_snoc_driver);
1618 
1619 MODULE_AUTHOR("Qualcomm");
1620 MODULE_LICENSE("Dual BSD/GPL");
1621 MODULE_DESCRIPTION("Driver support for Atheros WCN3990 SNOC devices");
1622