1 // SPDX-License-Identifier: ISC
2 /*
3  * Copyright (c) 2018 The Linux Foundation. All rights reserved.
4  */
5 
6 #include <linux/clk.h>
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/of.h>
10 #include <linux/of_device.h>
11 #include <linux/platform_device.h>
12 #include <linux/regulator/consumer.h>
13 
14 #include "ce.h"
15 #include "debug.h"
16 #include "hif.h"
17 #include "htc.h"
18 #include "snoc.h"
19 
20 #define ATH10K_SNOC_RX_POST_RETRY_MS 50
21 #define CE_POLL_PIPE 4
22 #define ATH10K_SNOC_WAKE_IRQ 2
23 
24 static char *const ce_name[] = {
25 	"WLAN_CE_0",
26 	"WLAN_CE_1",
27 	"WLAN_CE_2",
28 	"WLAN_CE_3",
29 	"WLAN_CE_4",
30 	"WLAN_CE_5",
31 	"WLAN_CE_6",
32 	"WLAN_CE_7",
33 	"WLAN_CE_8",
34 	"WLAN_CE_9",
35 	"WLAN_CE_10",
36 	"WLAN_CE_11",
37 };
38 
39 static struct ath10k_vreg_info vreg_cfg[] = {
40 	{NULL, "vdd-0.8-cx-mx", 800000, 850000, 0, 0, false},
41 	{NULL, "vdd-1.8-xo", 1800000, 1850000, 0, 0, false},
42 	{NULL, "vdd-1.3-rfa", 1300000, 1350000, 0, 0, false},
43 	{NULL, "vdd-3.3-ch0", 3300000, 3350000, 0, 0, false},
44 };
45 
46 static struct ath10k_clk_info clk_cfg[] = {
47 	{NULL, "cxo_ref_clk_pin", 0, false},
48 };
49 
50 static void ath10k_snoc_htc_tx_cb(struct ath10k_ce_pipe *ce_state);
51 static void ath10k_snoc_htt_tx_cb(struct ath10k_ce_pipe *ce_state);
52 static void ath10k_snoc_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
53 static void ath10k_snoc_htt_rx_cb(struct ath10k_ce_pipe *ce_state);
54 static void ath10k_snoc_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
55 static void ath10k_snoc_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state);
56 
57 static const struct ath10k_snoc_drv_priv drv_priv = {
58 	.hw_rev = ATH10K_HW_WCN3990,
59 	.dma_mask = DMA_BIT_MASK(35),
60 	.msa_size = 0x100000,
61 };
62 
63 #define WCN3990_SRC_WR_IDX_OFFSET 0x3C
64 #define WCN3990_DST_WR_IDX_OFFSET 0x40
65 
66 static struct ath10k_shadow_reg_cfg target_shadow_reg_cfg_map[] = {
67 		{
68 			.ce_id = __cpu_to_le16(0),
69 			.reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
70 		},
71 
72 		{
73 			.ce_id = __cpu_to_le16(3),
74 			.reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
75 		},
76 
77 		{
78 			.ce_id = __cpu_to_le16(4),
79 			.reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
80 		},
81 
82 		{
83 			.ce_id = __cpu_to_le16(5),
84 			.reg_offset =  __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
85 		},
86 
87 		{
88 			.ce_id = __cpu_to_le16(7),
89 			.reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
90 		},
91 
92 		{
93 			.ce_id = __cpu_to_le16(1),
94 			.reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
95 		},
96 
97 		{
98 			.ce_id = __cpu_to_le16(2),
99 			.reg_offset =  __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
100 		},
101 
102 		{
103 			.ce_id = __cpu_to_le16(7),
104 			.reg_offset =  __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
105 		},
106 
107 		{
108 			.ce_id = __cpu_to_le16(8),
109 			.reg_offset =  __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
110 		},
111 
112 		{
113 			.ce_id = __cpu_to_le16(9),
114 			.reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
115 		},
116 
117 		{
118 			.ce_id = __cpu_to_le16(10),
119 			.reg_offset =  __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
120 		},
121 
122 		{
123 			.ce_id = __cpu_to_le16(11),
124 			.reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
125 		},
126 };
127 
128 static struct ce_attr host_ce_config_wlan[] = {
129 	/* CE0: host->target HTC control streams */
130 	{
131 		.flags = CE_ATTR_FLAGS,
132 		.src_nentries = 16,
133 		.src_sz_max = 2048,
134 		.dest_nentries = 0,
135 		.send_cb = ath10k_snoc_htc_tx_cb,
136 	},
137 
138 	/* CE1: target->host HTT + HTC control */
139 	{
140 		.flags = CE_ATTR_FLAGS,
141 		.src_nentries = 0,
142 		.src_sz_max = 2048,
143 		.dest_nentries = 512,
144 		.recv_cb = ath10k_snoc_htt_htc_rx_cb,
145 	},
146 
147 	/* CE2: target->host WMI */
148 	{
149 		.flags = CE_ATTR_FLAGS,
150 		.src_nentries = 0,
151 		.src_sz_max = 2048,
152 		.dest_nentries = 64,
153 		.recv_cb = ath10k_snoc_htc_rx_cb,
154 	},
155 
156 	/* CE3: host->target WMI */
157 	{
158 		.flags = CE_ATTR_FLAGS,
159 		.src_nentries = 32,
160 		.src_sz_max = 2048,
161 		.dest_nentries = 0,
162 		.send_cb = ath10k_snoc_htc_tx_cb,
163 	},
164 
165 	/* CE4: host->target HTT */
166 	{
167 		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
168 		.src_nentries = 256,
169 		.src_sz_max = 256,
170 		.dest_nentries = 0,
171 		.send_cb = ath10k_snoc_htt_tx_cb,
172 	},
173 
174 	/* CE5: target->host HTT (ipa_uc->target ) */
175 	{
176 		.flags = CE_ATTR_FLAGS,
177 		.src_nentries = 0,
178 		.src_sz_max = 512,
179 		.dest_nentries = 512,
180 		.recv_cb = ath10k_snoc_htt_rx_cb,
181 	},
182 
183 	/* CE6: target autonomous hif_memcpy */
184 	{
185 		.flags = CE_ATTR_FLAGS,
186 		.src_nentries = 0,
187 		.src_sz_max = 0,
188 		.dest_nentries = 0,
189 	},
190 
191 	/* CE7: ce_diag, the Diagnostic Window */
192 	{
193 		.flags = CE_ATTR_FLAGS,
194 		.src_nentries = 2,
195 		.src_sz_max = 2048,
196 		.dest_nentries = 2,
197 	},
198 
199 	/* CE8: Target to uMC */
200 	{
201 		.flags = CE_ATTR_FLAGS,
202 		.src_nentries = 0,
203 		.src_sz_max = 2048,
204 		.dest_nentries = 128,
205 	},
206 
207 	/* CE9 target->host HTT */
208 	{
209 		.flags = CE_ATTR_FLAGS,
210 		.src_nentries = 0,
211 		.src_sz_max = 2048,
212 		.dest_nentries = 512,
213 		.recv_cb = ath10k_snoc_htt_htc_rx_cb,
214 	},
215 
216 	/* CE10: target->host HTT */
217 	{
218 		.flags = CE_ATTR_FLAGS,
219 		.src_nentries = 0,
220 		.src_sz_max = 2048,
221 		.dest_nentries = 512,
222 		.recv_cb = ath10k_snoc_htt_htc_rx_cb,
223 	},
224 
225 	/* CE11: target -> host PKTLOG */
226 	{
227 		.flags = CE_ATTR_FLAGS,
228 		.src_nentries = 0,
229 		.src_sz_max = 2048,
230 		.dest_nentries = 512,
231 		.recv_cb = ath10k_snoc_pktlog_rx_cb,
232 	},
233 };
234 
235 static struct ce_pipe_config target_ce_config_wlan[] = {
236 	/* CE0: host->target HTC control and raw streams */
237 	{
238 		.pipenum = __cpu_to_le32(0),
239 		.pipedir = __cpu_to_le32(PIPEDIR_OUT),
240 		.nentries = __cpu_to_le32(32),
241 		.nbytes_max = __cpu_to_le32(2048),
242 		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
243 		.reserved = __cpu_to_le32(0),
244 	},
245 
246 	/* CE1: target->host HTT + HTC control */
247 	{
248 		.pipenum = __cpu_to_le32(1),
249 		.pipedir = __cpu_to_le32(PIPEDIR_IN),
250 		.nentries = __cpu_to_le32(32),
251 		.nbytes_max = __cpu_to_le32(2048),
252 		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
253 		.reserved = __cpu_to_le32(0),
254 	},
255 
256 	/* CE2: target->host WMI */
257 	{
258 		.pipenum = __cpu_to_le32(2),
259 		.pipedir = __cpu_to_le32(PIPEDIR_IN),
260 		.nentries = __cpu_to_le32(64),
261 		.nbytes_max = __cpu_to_le32(2048),
262 		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
263 		.reserved = __cpu_to_le32(0),
264 	},
265 
266 	/* CE3: host->target WMI */
267 	{
268 		.pipenum = __cpu_to_le32(3),
269 		.pipedir = __cpu_to_le32(PIPEDIR_OUT),
270 		.nentries = __cpu_to_le32(32),
271 		.nbytes_max = __cpu_to_le32(2048),
272 		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
273 		.reserved = __cpu_to_le32(0),
274 	},
275 
276 	/* CE4: host->target HTT */
277 	{
278 		.pipenum = __cpu_to_le32(4),
279 		.pipedir = __cpu_to_le32(PIPEDIR_OUT),
280 		.nentries = __cpu_to_le32(256),
281 		.nbytes_max = __cpu_to_le32(256),
282 		.flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
283 		.reserved = __cpu_to_le32(0),
284 	},
285 
286 	/* CE5: target->host HTT (HIF->HTT) */
287 	{
288 		.pipenum = __cpu_to_le32(5),
289 		.pipedir = __cpu_to_le32(PIPEDIR_OUT),
290 		.nentries = __cpu_to_le32(1024),
291 		.nbytes_max = __cpu_to_le32(64),
292 		.flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
293 		.reserved = __cpu_to_le32(0),
294 	},
295 
296 	/* CE6: Reserved for target autonomous hif_memcpy */
297 	{
298 		.pipenum = __cpu_to_le32(6),
299 		.pipedir = __cpu_to_le32(PIPEDIR_INOUT),
300 		.nentries = __cpu_to_le32(32),
301 		.nbytes_max = __cpu_to_le32(16384),
302 		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
303 		.reserved = __cpu_to_le32(0),
304 	},
305 
306 	/* CE7 used only by Host */
307 	{
308 		.pipenum = __cpu_to_le32(7),
309 		.pipedir = __cpu_to_le32(4),
310 		.nentries = __cpu_to_le32(0),
311 		.nbytes_max = __cpu_to_le32(0),
312 		.flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
313 		.reserved = __cpu_to_le32(0),
314 	},
315 
316 	/* CE8 Target to uMC */
317 	{
318 		.pipenum = __cpu_to_le32(8),
319 		.pipedir = __cpu_to_le32(PIPEDIR_IN),
320 		.nentries = __cpu_to_le32(32),
321 		.nbytes_max = __cpu_to_le32(2048),
322 		.flags = __cpu_to_le32(0),
323 		.reserved = __cpu_to_le32(0),
324 	},
325 
326 	/* CE9 target->host HTT */
327 	{
328 		.pipenum = __cpu_to_le32(9),
329 		.pipedir = __cpu_to_le32(PIPEDIR_IN),
330 		.nentries = __cpu_to_le32(32),
331 		.nbytes_max = __cpu_to_le32(2048),
332 		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
333 		.reserved = __cpu_to_le32(0),
334 	},
335 
336 	/* CE10 target->host HTT */
337 	{
338 		.pipenum = __cpu_to_le32(10),
339 		.pipedir = __cpu_to_le32(PIPEDIR_IN),
340 		.nentries = __cpu_to_le32(32),
341 		.nbytes_max = __cpu_to_le32(2048),
342 		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
343 		.reserved = __cpu_to_le32(0),
344 	},
345 
346 	/* CE11 target autonomous qcache memcpy */
347 	{
348 		.pipenum = __cpu_to_le32(11),
349 		.pipedir = __cpu_to_le32(PIPEDIR_IN),
350 		.nentries = __cpu_to_le32(32),
351 		.nbytes_max = __cpu_to_le32(2048),
352 		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
353 		.reserved = __cpu_to_le32(0),
354 	},
355 };
356 
357 static struct service_to_pipe target_service_to_ce_map_wlan[] = {
358 	{
359 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
360 		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
361 		__cpu_to_le32(3),
362 	},
363 	{
364 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
365 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
366 		__cpu_to_le32(2),
367 	},
368 	{
369 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
370 		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
371 		__cpu_to_le32(3),
372 	},
373 	{
374 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
375 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
376 		__cpu_to_le32(2),
377 	},
378 	{
379 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
380 		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
381 		__cpu_to_le32(3),
382 	},
383 	{
384 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
385 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
386 		__cpu_to_le32(2),
387 	},
388 	{
389 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
390 		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
391 		__cpu_to_le32(3),
392 	},
393 	{
394 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
395 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
396 		__cpu_to_le32(2),
397 	},
398 	{
399 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
400 		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
401 		__cpu_to_le32(3),
402 	},
403 	{
404 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
405 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
406 		__cpu_to_le32(2),
407 	},
408 	{
409 		__cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
410 		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
411 		__cpu_to_le32(0),
412 	},
413 	{
414 		__cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
415 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
416 		__cpu_to_le32(2),
417 	},
418 	{ /* not used */
419 		__cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
420 		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
421 		__cpu_to_le32(0),
422 	},
423 	{ /* not used */
424 		__cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
425 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
426 		__cpu_to_le32(2),
427 	},
428 	{
429 		__cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
430 		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
431 		__cpu_to_le32(4),
432 	},
433 	{
434 		__cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
435 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
436 		__cpu_to_le32(1),
437 	},
438 	{ /* not used */
439 		__cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
440 		__cpu_to_le32(PIPEDIR_OUT),
441 		__cpu_to_le32(5),
442 	},
443 	{ /* in = DL = target -> host */
444 		__cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA2_MSG),
445 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
446 		__cpu_to_le32(9),
447 	},
448 	{ /* in = DL = target -> host */
449 		__cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA3_MSG),
450 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
451 		__cpu_to_le32(10),
452 	},
453 	{ /* in = DL = target -> host pktlog */
454 		__cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_LOG_MSG),
455 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
456 		__cpu_to_le32(11),
457 	},
458 	/* (Additions here) */
459 
460 	{ /* must be last */
461 		__cpu_to_le32(0),
462 		__cpu_to_le32(0),
463 		__cpu_to_le32(0),
464 	},
465 };
466 
467 static void ath10k_snoc_write32(struct ath10k *ar, u32 offset, u32 value)
468 {
469 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
470 
471 	iowrite32(value, ar_snoc->mem + offset);
472 }
473 
474 static u32 ath10k_snoc_read32(struct ath10k *ar, u32 offset)
475 {
476 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
477 	u32 val;
478 
479 	val = ioread32(ar_snoc->mem + offset);
480 
481 	return val;
482 }
483 
484 static int __ath10k_snoc_rx_post_buf(struct ath10k_snoc_pipe *pipe)
485 {
486 	struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
487 	struct ath10k *ar = pipe->hif_ce_state;
488 	struct ath10k_ce *ce = ath10k_ce_priv(ar);
489 	struct sk_buff *skb;
490 	dma_addr_t paddr;
491 	int ret;
492 
493 	skb = dev_alloc_skb(pipe->buf_sz);
494 	if (!skb)
495 		return -ENOMEM;
496 
497 	WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
498 
499 	paddr = dma_map_single(ar->dev, skb->data,
500 			       skb->len + skb_tailroom(skb),
501 			       DMA_FROM_DEVICE);
502 	if (unlikely(dma_mapping_error(ar->dev, paddr))) {
503 		ath10k_warn(ar, "failed to dma map snoc rx buf\n");
504 		dev_kfree_skb_any(skb);
505 		return -EIO;
506 	}
507 
508 	ATH10K_SKB_RXCB(skb)->paddr = paddr;
509 
510 	spin_lock_bh(&ce->ce_lock);
511 	ret = ce_pipe->ops->ce_rx_post_buf(ce_pipe, skb, paddr);
512 	spin_unlock_bh(&ce->ce_lock);
513 	if (ret) {
514 		dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb),
515 				 DMA_FROM_DEVICE);
516 		dev_kfree_skb_any(skb);
517 		return ret;
518 	}
519 
520 	return 0;
521 }
522 
523 static void ath10k_snoc_rx_post_pipe(struct ath10k_snoc_pipe *pipe)
524 {
525 	struct ath10k *ar = pipe->hif_ce_state;
526 	struct ath10k_ce *ce = ath10k_ce_priv(ar);
527 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
528 	struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
529 	int ret, num;
530 
531 	if (pipe->buf_sz == 0)
532 		return;
533 
534 	if (!ce_pipe->dest_ring)
535 		return;
536 
537 	spin_lock_bh(&ce->ce_lock);
538 	num = __ath10k_ce_rx_num_free_bufs(ce_pipe);
539 	spin_unlock_bh(&ce->ce_lock);
540 	while (num--) {
541 		ret = __ath10k_snoc_rx_post_buf(pipe);
542 		if (ret) {
543 			if (ret == -ENOSPC)
544 				break;
545 			ath10k_warn(ar, "failed to post rx buf: %d\n", ret);
546 			mod_timer(&ar_snoc->rx_post_retry, jiffies +
547 				  ATH10K_SNOC_RX_POST_RETRY_MS);
548 			break;
549 		}
550 	}
551 }
552 
553 static void ath10k_snoc_rx_post(struct ath10k *ar)
554 {
555 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
556 	int i;
557 
558 	for (i = 0; i < CE_COUNT; i++)
559 		ath10k_snoc_rx_post_pipe(&ar_snoc->pipe_info[i]);
560 }
561 
562 static void ath10k_snoc_process_rx_cb(struct ath10k_ce_pipe *ce_state,
563 				      void (*callback)(struct ath10k *ar,
564 						       struct sk_buff *skb))
565 {
566 	struct ath10k *ar = ce_state->ar;
567 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
568 	struct ath10k_snoc_pipe *pipe_info =  &ar_snoc->pipe_info[ce_state->id];
569 	struct sk_buff *skb;
570 	struct sk_buff_head list;
571 	void *transfer_context;
572 	unsigned int nbytes, max_nbytes;
573 
574 	__skb_queue_head_init(&list);
575 	while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
576 					     &nbytes) == 0) {
577 		skb = transfer_context;
578 		max_nbytes = skb->len + skb_tailroom(skb);
579 		dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
580 				 max_nbytes, DMA_FROM_DEVICE);
581 
582 		if (unlikely(max_nbytes < nbytes)) {
583 			ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
584 				    nbytes, max_nbytes);
585 			dev_kfree_skb_any(skb);
586 			continue;
587 		}
588 
589 		skb_put(skb, nbytes);
590 		__skb_queue_tail(&list, skb);
591 	}
592 
593 	while ((skb = __skb_dequeue(&list))) {
594 		ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc rx ce pipe %d len %d\n",
595 			   ce_state->id, skb->len);
596 
597 		callback(ar, skb);
598 	}
599 
600 	ath10k_snoc_rx_post_pipe(pipe_info);
601 }
602 
603 static void ath10k_snoc_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
604 {
605 	ath10k_snoc_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
606 }
607 
608 static void ath10k_snoc_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
609 {
610 	/* CE4 polling needs to be done whenever CE pipe which transports
611 	 * HTT Rx (target->host) is processed.
612 	 */
613 	ath10k_ce_per_engine_service(ce_state->ar, CE_POLL_PIPE);
614 
615 	ath10k_snoc_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
616 }
617 
618 /* Called by lower (CE) layer when data is received from the Target.
619  * WCN3990 firmware uses separate CE(CE11) to transfer pktlog data.
620  */
621 static void ath10k_snoc_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state)
622 {
623 	ath10k_snoc_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
624 }
625 
626 static void ath10k_snoc_htt_rx_deliver(struct ath10k *ar, struct sk_buff *skb)
627 {
628 	skb_pull(skb, sizeof(struct ath10k_htc_hdr));
629 	ath10k_htt_t2h_msg_handler(ar, skb);
630 }
631 
632 static void ath10k_snoc_htt_rx_cb(struct ath10k_ce_pipe *ce_state)
633 {
634 	ath10k_ce_per_engine_service(ce_state->ar, CE_POLL_PIPE);
635 	ath10k_snoc_process_rx_cb(ce_state, ath10k_snoc_htt_rx_deliver);
636 }
637 
638 static void ath10k_snoc_rx_replenish_retry(struct timer_list *t)
639 {
640 	struct ath10k_snoc *ar_snoc = from_timer(ar_snoc, t, rx_post_retry);
641 	struct ath10k *ar = ar_snoc->ar;
642 
643 	ath10k_snoc_rx_post(ar);
644 }
645 
646 static void ath10k_snoc_htc_tx_cb(struct ath10k_ce_pipe *ce_state)
647 {
648 	struct ath10k *ar = ce_state->ar;
649 	struct sk_buff_head list;
650 	struct sk_buff *skb;
651 
652 	__skb_queue_head_init(&list);
653 	while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
654 		if (!skb)
655 			continue;
656 
657 		__skb_queue_tail(&list, skb);
658 	}
659 
660 	while ((skb = __skb_dequeue(&list)))
661 		ath10k_htc_tx_completion_handler(ar, skb);
662 }
663 
664 static void ath10k_snoc_htt_tx_cb(struct ath10k_ce_pipe *ce_state)
665 {
666 	struct ath10k *ar = ce_state->ar;
667 	struct sk_buff *skb;
668 
669 	while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
670 		if (!skb)
671 			continue;
672 
673 		dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
674 				 skb->len, DMA_TO_DEVICE);
675 		ath10k_htt_hif_tx_complete(ar, skb);
676 	}
677 }
678 
679 static int ath10k_snoc_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
680 				 struct ath10k_hif_sg_item *items, int n_items)
681 {
682 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
683 	struct ath10k_ce *ce = ath10k_ce_priv(ar);
684 	struct ath10k_snoc_pipe *snoc_pipe;
685 	struct ath10k_ce_pipe *ce_pipe;
686 	int err, i = 0;
687 
688 	snoc_pipe = &ar_snoc->pipe_info[pipe_id];
689 	ce_pipe = snoc_pipe->ce_hdl;
690 	spin_lock_bh(&ce->ce_lock);
691 
692 	for (i = 0; i < n_items - 1; i++) {
693 		ath10k_dbg(ar, ATH10K_DBG_SNOC,
694 			   "snoc tx item %d paddr %pad len %d n_items %d\n",
695 			   i, &items[i].paddr, items[i].len, n_items);
696 
697 		err = ath10k_ce_send_nolock(ce_pipe,
698 					    items[i].transfer_context,
699 					    items[i].paddr,
700 					    items[i].len,
701 					    items[i].transfer_id,
702 					    CE_SEND_FLAG_GATHER);
703 		if (err)
704 			goto err;
705 	}
706 
707 	ath10k_dbg(ar, ATH10K_DBG_SNOC,
708 		   "snoc tx item %d paddr %pad len %d n_items %d\n",
709 		   i, &items[i].paddr, items[i].len, n_items);
710 
711 	err = ath10k_ce_send_nolock(ce_pipe,
712 				    items[i].transfer_context,
713 				    items[i].paddr,
714 				    items[i].len,
715 				    items[i].transfer_id,
716 				    0);
717 	if (err)
718 		goto err;
719 
720 	spin_unlock_bh(&ce->ce_lock);
721 
722 	return 0;
723 
724 err:
725 	for (; i > 0; i--)
726 		__ath10k_ce_send_revert(ce_pipe);
727 
728 	spin_unlock_bh(&ce->ce_lock);
729 	return err;
730 }
731 
732 static int ath10k_snoc_hif_get_target_info(struct ath10k *ar,
733 					   struct bmi_target_info *target_info)
734 {
735 	target_info->version = ATH10K_HW_WCN3990;
736 	target_info->type = ATH10K_HW_WCN3990;
737 
738 	return 0;
739 }
740 
741 static u16 ath10k_snoc_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
742 {
743 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
744 
745 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "hif get free queue number\n");
746 
747 	return ath10k_ce_num_free_src_entries(ar_snoc->pipe_info[pipe].ce_hdl);
748 }
749 
750 static void ath10k_snoc_hif_send_complete_check(struct ath10k *ar, u8 pipe,
751 						int force)
752 {
753 	int resources;
754 
755 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif send complete check\n");
756 
757 	if (!force) {
758 		resources = ath10k_snoc_hif_get_free_queue_number(ar, pipe);
759 
760 		if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
761 			return;
762 	}
763 	ath10k_ce_per_engine_service(ar, pipe);
764 }
765 
766 static int ath10k_snoc_hif_map_service_to_pipe(struct ath10k *ar,
767 					       u16 service_id,
768 					       u8 *ul_pipe, u8 *dl_pipe)
769 {
770 	const struct service_to_pipe *entry;
771 	bool ul_set = false, dl_set = false;
772 	int i;
773 
774 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif map service\n");
775 
776 	for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) {
777 		entry = &target_service_to_ce_map_wlan[i];
778 
779 		if (__le32_to_cpu(entry->service_id) != service_id)
780 			continue;
781 
782 		switch (__le32_to_cpu(entry->pipedir)) {
783 		case PIPEDIR_NONE:
784 			break;
785 		case PIPEDIR_IN:
786 			WARN_ON(dl_set);
787 			*dl_pipe = __le32_to_cpu(entry->pipenum);
788 			dl_set = true;
789 			break;
790 		case PIPEDIR_OUT:
791 			WARN_ON(ul_set);
792 			*ul_pipe = __le32_to_cpu(entry->pipenum);
793 			ul_set = true;
794 			break;
795 		case PIPEDIR_INOUT:
796 			WARN_ON(dl_set);
797 			WARN_ON(ul_set);
798 			*dl_pipe = __le32_to_cpu(entry->pipenum);
799 			*ul_pipe = __le32_to_cpu(entry->pipenum);
800 			dl_set = true;
801 			ul_set = true;
802 			break;
803 		}
804 	}
805 
806 	if (!ul_set || !dl_set)
807 		return -ENOENT;
808 
809 	return 0;
810 }
811 
812 static void ath10k_snoc_hif_get_default_pipe(struct ath10k *ar,
813 					     u8 *ul_pipe, u8 *dl_pipe)
814 {
815 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif get default pipe\n");
816 
817 	(void)ath10k_snoc_hif_map_service_to_pipe(ar,
818 						 ATH10K_HTC_SVC_ID_RSVD_CTRL,
819 						 ul_pipe, dl_pipe);
820 }
821 
822 static inline void ath10k_snoc_irq_disable(struct ath10k *ar)
823 {
824 	ath10k_ce_disable_interrupts(ar);
825 }
826 
827 static inline void ath10k_snoc_irq_enable(struct ath10k *ar)
828 {
829 	ath10k_ce_enable_interrupts(ar);
830 }
831 
832 static void ath10k_snoc_rx_pipe_cleanup(struct ath10k_snoc_pipe *snoc_pipe)
833 {
834 	struct ath10k_ce_pipe *ce_pipe;
835 	struct ath10k_ce_ring *ce_ring;
836 	struct sk_buff *skb;
837 	struct ath10k *ar;
838 	int i;
839 
840 	ar = snoc_pipe->hif_ce_state;
841 	ce_pipe = snoc_pipe->ce_hdl;
842 	ce_ring = ce_pipe->dest_ring;
843 
844 	if (!ce_ring)
845 		return;
846 
847 	if (!snoc_pipe->buf_sz)
848 		return;
849 
850 	for (i = 0; i < ce_ring->nentries; i++) {
851 		skb = ce_ring->per_transfer_context[i];
852 		if (!skb)
853 			continue;
854 
855 		ce_ring->per_transfer_context[i] = NULL;
856 
857 		dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
858 				 skb->len + skb_tailroom(skb),
859 				 DMA_FROM_DEVICE);
860 		dev_kfree_skb_any(skb);
861 	}
862 }
863 
864 static void ath10k_snoc_tx_pipe_cleanup(struct ath10k_snoc_pipe *snoc_pipe)
865 {
866 	struct ath10k_ce_pipe *ce_pipe;
867 	struct ath10k_ce_ring *ce_ring;
868 	struct sk_buff *skb;
869 	struct ath10k *ar;
870 	int i;
871 
872 	ar = snoc_pipe->hif_ce_state;
873 	ce_pipe = snoc_pipe->ce_hdl;
874 	ce_ring = ce_pipe->src_ring;
875 
876 	if (!ce_ring)
877 		return;
878 
879 	if (!snoc_pipe->buf_sz)
880 		return;
881 
882 	for (i = 0; i < ce_ring->nentries; i++) {
883 		skb = ce_ring->per_transfer_context[i];
884 		if (!skb)
885 			continue;
886 
887 		ce_ring->per_transfer_context[i] = NULL;
888 
889 		ath10k_htc_tx_completion_handler(ar, skb);
890 	}
891 }
892 
893 static void ath10k_snoc_buffer_cleanup(struct ath10k *ar)
894 {
895 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
896 	struct ath10k_snoc_pipe *pipe_info;
897 	int pipe_num;
898 
899 	del_timer_sync(&ar_snoc->rx_post_retry);
900 	for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
901 		pipe_info = &ar_snoc->pipe_info[pipe_num];
902 		ath10k_snoc_rx_pipe_cleanup(pipe_info);
903 		ath10k_snoc_tx_pipe_cleanup(pipe_info);
904 	}
905 }
906 
907 static void ath10k_snoc_hif_stop(struct ath10k *ar)
908 {
909 	if (!test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
910 		ath10k_snoc_irq_disable(ar);
911 
912 	napi_synchronize(&ar->napi);
913 	napi_disable(&ar->napi);
914 	ath10k_snoc_buffer_cleanup(ar);
915 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
916 }
917 
918 static int ath10k_snoc_hif_start(struct ath10k *ar)
919 {
920 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
921 
922 	napi_enable(&ar->napi);
923 	ath10k_snoc_irq_enable(ar);
924 	ath10k_snoc_rx_post(ar);
925 
926 	clear_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags);
927 
928 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
929 
930 	return 0;
931 }
932 
933 static int ath10k_snoc_init_pipes(struct ath10k *ar)
934 {
935 	int i, ret;
936 
937 	for (i = 0; i < CE_COUNT; i++) {
938 		ret = ath10k_ce_init_pipe(ar, i, &host_ce_config_wlan[i]);
939 		if (ret) {
940 			ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n",
941 				   i, ret);
942 			return ret;
943 		}
944 	}
945 
946 	return 0;
947 }
948 
949 static int ath10k_snoc_wlan_enable(struct ath10k *ar,
950 				   enum ath10k_firmware_mode fw_mode)
951 {
952 	struct ath10k_tgt_pipe_cfg tgt_cfg[CE_COUNT_MAX];
953 	struct ath10k_qmi_wlan_enable_cfg cfg;
954 	enum wlfw_driver_mode_enum_v01 mode;
955 	int pipe_num;
956 
957 	for (pipe_num = 0; pipe_num < CE_COUNT_MAX; pipe_num++) {
958 		tgt_cfg[pipe_num].pipe_num =
959 				target_ce_config_wlan[pipe_num].pipenum;
960 		tgt_cfg[pipe_num].pipe_dir =
961 				target_ce_config_wlan[pipe_num].pipedir;
962 		tgt_cfg[pipe_num].nentries =
963 				target_ce_config_wlan[pipe_num].nentries;
964 		tgt_cfg[pipe_num].nbytes_max =
965 				target_ce_config_wlan[pipe_num].nbytes_max;
966 		tgt_cfg[pipe_num].flags =
967 				target_ce_config_wlan[pipe_num].flags;
968 		tgt_cfg[pipe_num].reserved = 0;
969 	}
970 
971 	cfg.num_ce_tgt_cfg = sizeof(target_ce_config_wlan) /
972 				sizeof(struct ath10k_tgt_pipe_cfg);
973 	cfg.ce_tgt_cfg = (struct ath10k_tgt_pipe_cfg *)
974 		&tgt_cfg;
975 	cfg.num_ce_svc_pipe_cfg = sizeof(target_service_to_ce_map_wlan) /
976 				  sizeof(struct ath10k_svc_pipe_cfg);
977 	cfg.ce_svc_cfg = (struct ath10k_svc_pipe_cfg *)
978 		&target_service_to_ce_map_wlan;
979 	cfg.num_shadow_reg_cfg = sizeof(target_shadow_reg_cfg_map) /
980 					sizeof(struct ath10k_shadow_reg_cfg);
981 	cfg.shadow_reg_cfg = (struct ath10k_shadow_reg_cfg *)
982 		&target_shadow_reg_cfg_map;
983 
984 	switch (fw_mode) {
985 	case ATH10K_FIRMWARE_MODE_NORMAL:
986 		mode = QMI_WLFW_MISSION_V01;
987 		break;
988 	case ATH10K_FIRMWARE_MODE_UTF:
989 		mode = QMI_WLFW_FTM_V01;
990 		break;
991 	default:
992 		ath10k_err(ar, "invalid firmware mode %d\n", fw_mode);
993 		return -EINVAL;
994 	}
995 
996 	return ath10k_qmi_wlan_enable(ar, &cfg, mode,
997 				       NULL);
998 }
999 
1000 static void ath10k_snoc_wlan_disable(struct ath10k *ar)
1001 {
1002 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1003 
1004 	/* If both ATH10K_FLAG_CRASH_FLUSH and ATH10K_SNOC_FLAG_RECOVERY
1005 	 * flags are not set, it means that the driver has restarted
1006 	 * due to a crash inject via debugfs. In this case, the driver
1007 	 * needs to restart the firmware and hence send qmi wlan disable,
1008 	 * during the driver restart sequence.
1009 	 */
1010 	if (!test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags) ||
1011 	    !test_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags))
1012 		ath10k_qmi_wlan_disable(ar);
1013 }
1014 
1015 static void ath10k_snoc_hif_power_down(struct ath10k *ar)
1016 {
1017 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n");
1018 
1019 	ath10k_snoc_wlan_disable(ar);
1020 	ath10k_ce_free_rri(ar);
1021 }
1022 
1023 static int ath10k_snoc_hif_power_up(struct ath10k *ar,
1024 				    enum ath10k_firmware_mode fw_mode)
1025 {
1026 	int ret;
1027 
1028 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "%s:WCN3990 driver state = %d\n",
1029 		   __func__, ar->state);
1030 
1031 	ret = ath10k_snoc_wlan_enable(ar, fw_mode);
1032 	if (ret) {
1033 		ath10k_err(ar, "failed to enable wcn3990: %d\n", ret);
1034 		return ret;
1035 	}
1036 
1037 	ath10k_ce_alloc_rri(ar);
1038 
1039 	ret = ath10k_snoc_init_pipes(ar);
1040 	if (ret) {
1041 		ath10k_err(ar, "failed to initialize CE: %d\n", ret);
1042 		goto err_wlan_enable;
1043 	}
1044 
1045 	return 0;
1046 
1047 err_wlan_enable:
1048 	ath10k_snoc_wlan_disable(ar);
1049 
1050 	return ret;
1051 }
1052 
1053 #ifdef CONFIG_PM
1054 static int ath10k_snoc_hif_suspend(struct ath10k *ar)
1055 {
1056 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1057 	int ret;
1058 
1059 	if (!device_may_wakeup(ar->dev))
1060 		return -EPERM;
1061 
1062 	ret = enable_irq_wake(ar_snoc->ce_irqs[ATH10K_SNOC_WAKE_IRQ].irq_line);
1063 	if (ret) {
1064 		ath10k_err(ar, "failed to enable wakeup irq :%d\n", ret);
1065 		return ret;
1066 	}
1067 
1068 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc device suspended\n");
1069 
1070 	return ret;
1071 }
1072 
1073 static int ath10k_snoc_hif_resume(struct ath10k *ar)
1074 {
1075 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1076 	int ret;
1077 
1078 	if (!device_may_wakeup(ar->dev))
1079 		return -EPERM;
1080 
1081 	ret = disable_irq_wake(ar_snoc->ce_irqs[ATH10K_SNOC_WAKE_IRQ].irq_line);
1082 	if (ret) {
1083 		ath10k_err(ar, "failed to disable wakeup irq: %d\n", ret);
1084 		return ret;
1085 	}
1086 
1087 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc device resumed\n");
1088 
1089 	return ret;
1090 }
1091 #endif
1092 
1093 static const struct ath10k_hif_ops ath10k_snoc_hif_ops = {
1094 	.read32		= ath10k_snoc_read32,
1095 	.write32	= ath10k_snoc_write32,
1096 	.start		= ath10k_snoc_hif_start,
1097 	.stop		= ath10k_snoc_hif_stop,
1098 	.map_service_to_pipe	= ath10k_snoc_hif_map_service_to_pipe,
1099 	.get_default_pipe	= ath10k_snoc_hif_get_default_pipe,
1100 	.power_up		= ath10k_snoc_hif_power_up,
1101 	.power_down		= ath10k_snoc_hif_power_down,
1102 	.tx_sg			= ath10k_snoc_hif_tx_sg,
1103 	.send_complete_check	= ath10k_snoc_hif_send_complete_check,
1104 	.get_free_queue_number	= ath10k_snoc_hif_get_free_queue_number,
1105 	.get_target_info	= ath10k_snoc_hif_get_target_info,
1106 #ifdef CONFIG_PM
1107 	.suspend                = ath10k_snoc_hif_suspend,
1108 	.resume                 = ath10k_snoc_hif_resume,
1109 #endif
1110 };
1111 
1112 static const struct ath10k_bus_ops ath10k_snoc_bus_ops = {
1113 	.read32		= ath10k_snoc_read32,
1114 	.write32	= ath10k_snoc_write32,
1115 };
1116 
1117 static int ath10k_snoc_get_ce_id_from_irq(struct ath10k *ar, int irq)
1118 {
1119 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1120 	int i;
1121 
1122 	for (i = 0; i < CE_COUNT_MAX; i++) {
1123 		if (ar_snoc->ce_irqs[i].irq_line == irq)
1124 			return i;
1125 	}
1126 	ath10k_err(ar, "No matching CE id for irq %d\n", irq);
1127 
1128 	return -EINVAL;
1129 }
1130 
1131 static irqreturn_t ath10k_snoc_per_engine_handler(int irq, void *arg)
1132 {
1133 	struct ath10k *ar = arg;
1134 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1135 	int ce_id = ath10k_snoc_get_ce_id_from_irq(ar, irq);
1136 
1137 	if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_snoc->pipe_info)) {
1138 		ath10k_warn(ar, "unexpected/invalid irq %d ce_id %d\n", irq,
1139 			    ce_id);
1140 		return IRQ_HANDLED;
1141 	}
1142 
1143 	ath10k_snoc_irq_disable(ar);
1144 	napi_schedule(&ar->napi);
1145 
1146 	return IRQ_HANDLED;
1147 }
1148 
1149 static int ath10k_snoc_napi_poll(struct napi_struct *ctx, int budget)
1150 {
1151 	struct ath10k *ar = container_of(ctx, struct ath10k, napi);
1152 	int done = 0;
1153 
1154 	if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags)) {
1155 		napi_complete(ctx);
1156 		return done;
1157 	}
1158 
1159 	ath10k_ce_per_engine_service_any(ar);
1160 	done = ath10k_htt_txrx_compl_task(ar, budget);
1161 
1162 	if (done < budget) {
1163 		napi_complete(ctx);
1164 		ath10k_snoc_irq_enable(ar);
1165 	}
1166 
1167 	return done;
1168 }
1169 
1170 static void ath10k_snoc_init_napi(struct ath10k *ar)
1171 {
1172 	netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_snoc_napi_poll,
1173 		       ATH10K_NAPI_BUDGET);
1174 }
1175 
1176 static int ath10k_snoc_request_irq(struct ath10k *ar)
1177 {
1178 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1179 	int irqflags = IRQF_TRIGGER_RISING;
1180 	int ret, id;
1181 
1182 	for (id = 0; id < CE_COUNT_MAX; id++) {
1183 		ret = request_irq(ar_snoc->ce_irqs[id].irq_line,
1184 				  ath10k_snoc_per_engine_handler,
1185 				  irqflags, ce_name[id], ar);
1186 		if (ret) {
1187 			ath10k_err(ar,
1188 				   "failed to register IRQ handler for CE %d: %d",
1189 				   id, ret);
1190 			goto err_irq;
1191 		}
1192 	}
1193 
1194 	return 0;
1195 
1196 err_irq:
1197 	for (id -= 1; id >= 0; id--)
1198 		free_irq(ar_snoc->ce_irqs[id].irq_line, ar);
1199 
1200 	return ret;
1201 }
1202 
1203 static void ath10k_snoc_free_irq(struct ath10k *ar)
1204 {
1205 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1206 	int id;
1207 
1208 	for (id = 0; id < CE_COUNT_MAX; id++)
1209 		free_irq(ar_snoc->ce_irqs[id].irq_line, ar);
1210 }
1211 
1212 static int ath10k_snoc_resource_init(struct ath10k *ar)
1213 {
1214 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1215 	struct platform_device *pdev;
1216 	struct resource *res;
1217 	int i, ret = 0;
1218 
1219 	pdev = ar_snoc->dev;
1220 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "membase");
1221 	if (!res) {
1222 		ath10k_err(ar, "Memory base not found in DT\n");
1223 		return -EINVAL;
1224 	}
1225 
1226 	ar_snoc->mem_pa = res->start;
1227 	ar_snoc->mem = devm_ioremap(&pdev->dev, ar_snoc->mem_pa,
1228 				    resource_size(res));
1229 	if (!ar_snoc->mem) {
1230 		ath10k_err(ar, "Memory base ioremap failed with physical address %pa\n",
1231 			   &ar_snoc->mem_pa);
1232 		return -EINVAL;
1233 	}
1234 
1235 	for (i = 0; i < CE_COUNT; i++) {
1236 		res = platform_get_resource(ar_snoc->dev, IORESOURCE_IRQ, i);
1237 		if (!res) {
1238 			ath10k_err(ar, "failed to get IRQ%d\n", i);
1239 			ret = -ENODEV;
1240 			goto out;
1241 		}
1242 		ar_snoc->ce_irqs[i].irq_line = res->start;
1243 	}
1244 
1245 out:
1246 	return ret;
1247 }
1248 
1249 int ath10k_snoc_fw_indication(struct ath10k *ar, u64 type)
1250 {
1251 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1252 	struct ath10k_bus_params bus_params;
1253 	int ret;
1254 
1255 	if (test_bit(ATH10K_SNOC_FLAG_UNREGISTERING, &ar_snoc->flags))
1256 		return 0;
1257 
1258 	switch (type) {
1259 	case ATH10K_QMI_EVENT_FW_READY_IND:
1260 		if (test_bit(ATH10K_SNOC_FLAG_REGISTERED, &ar_snoc->flags)) {
1261 			queue_work(ar->workqueue, &ar->restart_work);
1262 			break;
1263 		}
1264 
1265 		bus_params.dev_type = ATH10K_DEV_TYPE_LL;
1266 		bus_params.chip_id = ar_snoc->target_info.soc_version;
1267 		ret = ath10k_core_register(ar, &bus_params);
1268 		if (ret) {
1269 			ath10k_err(ar, "Failed to register driver core: %d\n",
1270 				   ret);
1271 			return ret;
1272 		}
1273 		set_bit(ATH10K_SNOC_FLAG_REGISTERED, &ar_snoc->flags);
1274 		break;
1275 	case ATH10K_QMI_EVENT_FW_DOWN_IND:
1276 		set_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags);
1277 		set_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags);
1278 		break;
1279 	default:
1280 		ath10k_err(ar, "invalid fw indication: %llx\n", type);
1281 		return -EINVAL;
1282 	}
1283 
1284 	return 0;
1285 }
1286 
1287 static int ath10k_snoc_setup_resource(struct ath10k *ar)
1288 {
1289 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1290 	struct ath10k_ce *ce = ath10k_ce_priv(ar);
1291 	struct ath10k_snoc_pipe *pipe;
1292 	int i, ret;
1293 
1294 	timer_setup(&ar_snoc->rx_post_retry, ath10k_snoc_rx_replenish_retry, 0);
1295 	spin_lock_init(&ce->ce_lock);
1296 	for (i = 0; i < CE_COUNT; i++) {
1297 		pipe = &ar_snoc->pipe_info[i];
1298 		pipe->ce_hdl = &ce->ce_states[i];
1299 		pipe->pipe_num = i;
1300 		pipe->hif_ce_state = ar;
1301 
1302 		ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]);
1303 		if (ret) {
1304 			ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n",
1305 				   i, ret);
1306 			return ret;
1307 		}
1308 
1309 		pipe->buf_sz = host_ce_config_wlan[i].src_sz_max;
1310 	}
1311 	ath10k_snoc_init_napi(ar);
1312 
1313 	return 0;
1314 }
1315 
1316 static void ath10k_snoc_release_resource(struct ath10k *ar)
1317 {
1318 	int i;
1319 
1320 	netif_napi_del(&ar->napi);
1321 	for (i = 0; i < CE_COUNT; i++)
1322 		ath10k_ce_free_pipe(ar, i);
1323 }
1324 
1325 static int ath10k_get_vreg_info(struct ath10k *ar, struct device *dev,
1326 				struct ath10k_vreg_info *vreg_info)
1327 {
1328 	struct regulator *reg;
1329 	int ret = 0;
1330 
1331 	reg = devm_regulator_get_optional(dev, vreg_info->name);
1332 
1333 	if (IS_ERR(reg)) {
1334 		ret = PTR_ERR(reg);
1335 
1336 		if (ret  == -EPROBE_DEFER) {
1337 			ath10k_err(ar, "EPROBE_DEFER for regulator: %s\n",
1338 				   vreg_info->name);
1339 			return ret;
1340 		}
1341 		if (vreg_info->required) {
1342 			ath10k_err(ar, "Regulator %s doesn't exist: %d\n",
1343 				   vreg_info->name, ret);
1344 			return ret;
1345 		}
1346 		ath10k_dbg(ar, ATH10K_DBG_SNOC,
1347 			   "Optional regulator %s doesn't exist: %d\n",
1348 			   vreg_info->name, ret);
1349 		goto done;
1350 	}
1351 
1352 	vreg_info->reg = reg;
1353 
1354 done:
1355 	ath10k_dbg(ar, ATH10K_DBG_SNOC,
1356 		   "snog vreg %s min_v %u max_v %u load_ua %u settle_delay %lu\n",
1357 		   vreg_info->name, vreg_info->min_v, vreg_info->max_v,
1358 		   vreg_info->load_ua, vreg_info->settle_delay);
1359 
1360 	return 0;
1361 }
1362 
1363 static int ath10k_get_clk_info(struct ath10k *ar, struct device *dev,
1364 			       struct ath10k_clk_info *clk_info)
1365 {
1366 	struct clk *handle;
1367 	int ret = 0;
1368 
1369 	handle = devm_clk_get(dev, clk_info->name);
1370 	if (IS_ERR(handle)) {
1371 		ret = PTR_ERR(handle);
1372 		if (clk_info->required) {
1373 			ath10k_err(ar, "snoc clock %s isn't available: %d\n",
1374 				   clk_info->name, ret);
1375 			return ret;
1376 		}
1377 		ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc ignoring clock %s: %d\n",
1378 			   clk_info->name,
1379 			   ret);
1380 		return 0;
1381 	}
1382 
1383 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc clock %s freq %u\n",
1384 		   clk_info->name, clk_info->freq);
1385 
1386 	clk_info->handle = handle;
1387 
1388 	return ret;
1389 }
1390 
1391 static int __ath10k_snoc_vreg_on(struct ath10k *ar,
1392 				 struct ath10k_vreg_info *vreg_info)
1393 {
1394 	int ret;
1395 
1396 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc regulator %s being enabled\n",
1397 		   vreg_info->name);
1398 
1399 	ret = regulator_set_voltage(vreg_info->reg, vreg_info->min_v,
1400 				    vreg_info->max_v);
1401 	if (ret) {
1402 		ath10k_err(ar,
1403 			   "failed to set regulator %s voltage-min: %d voltage-max: %d\n",
1404 			   vreg_info->name, vreg_info->min_v, vreg_info->max_v);
1405 		return ret;
1406 	}
1407 
1408 	if (vreg_info->load_ua) {
1409 		ret = regulator_set_load(vreg_info->reg, vreg_info->load_ua);
1410 		if (ret < 0) {
1411 			ath10k_err(ar, "failed to set regulator %s load: %d\n",
1412 				   vreg_info->name, vreg_info->load_ua);
1413 			goto err_set_load;
1414 		}
1415 	}
1416 
1417 	ret = regulator_enable(vreg_info->reg);
1418 	if (ret) {
1419 		ath10k_err(ar, "failed to enable regulator %s\n",
1420 			   vreg_info->name);
1421 		goto err_enable;
1422 	}
1423 
1424 	if (vreg_info->settle_delay)
1425 		udelay(vreg_info->settle_delay);
1426 
1427 	return 0;
1428 
1429 err_enable:
1430 	regulator_set_load(vreg_info->reg, 0);
1431 err_set_load:
1432 	regulator_set_voltage(vreg_info->reg, 0, vreg_info->max_v);
1433 
1434 	return ret;
1435 }
1436 
1437 static int __ath10k_snoc_vreg_off(struct ath10k *ar,
1438 				  struct ath10k_vreg_info *vreg_info)
1439 {
1440 	int ret;
1441 
1442 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc regulator %s being disabled\n",
1443 		   vreg_info->name);
1444 
1445 	ret = regulator_disable(vreg_info->reg);
1446 	if (ret)
1447 		ath10k_err(ar, "failed to disable regulator %s\n",
1448 			   vreg_info->name);
1449 
1450 	ret = regulator_set_load(vreg_info->reg, 0);
1451 	if (ret < 0)
1452 		ath10k_err(ar, "failed to set load %s\n", vreg_info->name);
1453 
1454 	ret = regulator_set_voltage(vreg_info->reg, 0, vreg_info->max_v);
1455 	if (ret)
1456 		ath10k_err(ar, "failed to set voltage %s\n", vreg_info->name);
1457 
1458 	return ret;
1459 }
1460 
1461 static int ath10k_snoc_vreg_on(struct ath10k *ar)
1462 {
1463 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1464 	struct ath10k_vreg_info *vreg_info;
1465 	int ret = 0;
1466 	int i;
1467 
1468 	for (i = 0; i < ARRAY_SIZE(vreg_cfg); i++) {
1469 		vreg_info = &ar_snoc->vreg[i];
1470 
1471 		if (!vreg_info->reg)
1472 			continue;
1473 
1474 		ret = __ath10k_snoc_vreg_on(ar, vreg_info);
1475 		if (ret)
1476 			goto err_reg_config;
1477 	}
1478 
1479 	return 0;
1480 
1481 err_reg_config:
1482 	for (i = i - 1; i >= 0; i--) {
1483 		vreg_info = &ar_snoc->vreg[i];
1484 
1485 		if (!vreg_info->reg)
1486 			continue;
1487 
1488 		__ath10k_snoc_vreg_off(ar, vreg_info);
1489 	}
1490 
1491 	return ret;
1492 }
1493 
1494 static int ath10k_snoc_vreg_off(struct ath10k *ar)
1495 {
1496 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1497 	struct ath10k_vreg_info *vreg_info;
1498 	int ret = 0;
1499 	int i;
1500 
1501 	for (i = ARRAY_SIZE(vreg_cfg) - 1; i >= 0; i--) {
1502 		vreg_info = &ar_snoc->vreg[i];
1503 
1504 		if (!vreg_info->reg)
1505 			continue;
1506 
1507 		ret = __ath10k_snoc_vreg_off(ar, vreg_info);
1508 	}
1509 
1510 	return ret;
1511 }
1512 
1513 static int ath10k_snoc_clk_init(struct ath10k *ar)
1514 {
1515 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1516 	struct ath10k_clk_info *clk_info;
1517 	int ret = 0;
1518 	int i;
1519 
1520 	for (i = 0; i < ARRAY_SIZE(clk_cfg); i++) {
1521 		clk_info = &ar_snoc->clk[i];
1522 
1523 		if (!clk_info->handle)
1524 			continue;
1525 
1526 		ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc clock %s being enabled\n",
1527 			   clk_info->name);
1528 
1529 		if (clk_info->freq) {
1530 			ret = clk_set_rate(clk_info->handle, clk_info->freq);
1531 
1532 			if (ret) {
1533 				ath10k_err(ar, "failed to set clock %s freq %u\n",
1534 					   clk_info->name, clk_info->freq);
1535 				goto err_clock_config;
1536 			}
1537 		}
1538 
1539 		ret = clk_prepare_enable(clk_info->handle);
1540 		if (ret) {
1541 			ath10k_err(ar, "failed to enable clock %s\n",
1542 				   clk_info->name);
1543 			goto err_clock_config;
1544 		}
1545 	}
1546 
1547 	return 0;
1548 
1549 err_clock_config:
1550 	for (i = i - 1; i >= 0; i--) {
1551 		clk_info = &ar_snoc->clk[i];
1552 
1553 		if (!clk_info->handle)
1554 			continue;
1555 
1556 		clk_disable_unprepare(clk_info->handle);
1557 	}
1558 
1559 	return ret;
1560 }
1561 
1562 static int ath10k_snoc_clk_deinit(struct ath10k *ar)
1563 {
1564 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1565 	struct ath10k_clk_info *clk_info;
1566 	int i;
1567 
1568 	for (i = 0; i < ARRAY_SIZE(clk_cfg); i++) {
1569 		clk_info = &ar_snoc->clk[i];
1570 
1571 		if (!clk_info->handle)
1572 			continue;
1573 
1574 		ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc clock %s being disabled\n",
1575 			   clk_info->name);
1576 
1577 		clk_disable_unprepare(clk_info->handle);
1578 	}
1579 
1580 	return 0;
1581 }
1582 
1583 static int ath10k_hw_power_on(struct ath10k *ar)
1584 {
1585 	int ret;
1586 
1587 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "soc power on\n");
1588 
1589 	ret = ath10k_snoc_vreg_on(ar);
1590 	if (ret)
1591 		return ret;
1592 
1593 	ret = ath10k_snoc_clk_init(ar);
1594 	if (ret)
1595 		goto vreg_off;
1596 
1597 	return ret;
1598 
1599 vreg_off:
1600 	ath10k_snoc_vreg_off(ar);
1601 	return ret;
1602 }
1603 
1604 static int ath10k_hw_power_off(struct ath10k *ar)
1605 {
1606 	int ret;
1607 
1608 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "soc power off\n");
1609 
1610 	ath10k_snoc_clk_deinit(ar);
1611 
1612 	ret = ath10k_snoc_vreg_off(ar);
1613 
1614 	return ret;
1615 }
1616 
1617 static const struct of_device_id ath10k_snoc_dt_match[] = {
1618 	{ .compatible = "qcom,wcn3990-wifi",
1619 	 .data = &drv_priv,
1620 	},
1621 	{ }
1622 };
1623 MODULE_DEVICE_TABLE(of, ath10k_snoc_dt_match);
1624 
1625 static int ath10k_snoc_probe(struct platform_device *pdev)
1626 {
1627 	const struct ath10k_snoc_drv_priv *drv_data;
1628 	const struct of_device_id *of_id;
1629 	struct ath10k_snoc *ar_snoc;
1630 	struct device *dev;
1631 	struct ath10k *ar;
1632 	u32 msa_size;
1633 	int ret;
1634 	u32 i;
1635 
1636 	of_id = of_match_device(ath10k_snoc_dt_match, &pdev->dev);
1637 	if (!of_id) {
1638 		dev_err(&pdev->dev, "failed to find matching device tree id\n");
1639 		return -EINVAL;
1640 	}
1641 
1642 	drv_data = of_id->data;
1643 	dev = &pdev->dev;
1644 
1645 	ret = dma_set_mask_and_coherent(dev, drv_data->dma_mask);
1646 	if (ret) {
1647 		dev_err(dev, "failed to set dma mask: %d", ret);
1648 		return ret;
1649 	}
1650 
1651 	ar = ath10k_core_create(sizeof(*ar_snoc), dev, ATH10K_BUS_SNOC,
1652 				drv_data->hw_rev, &ath10k_snoc_hif_ops);
1653 	if (!ar) {
1654 		dev_err(dev, "failed to allocate core\n");
1655 		return -ENOMEM;
1656 	}
1657 
1658 	ar_snoc = ath10k_snoc_priv(ar);
1659 	ar_snoc->dev = pdev;
1660 	platform_set_drvdata(pdev, ar);
1661 	ar_snoc->ar = ar;
1662 	ar_snoc->ce.bus_ops = &ath10k_snoc_bus_ops;
1663 	ar->ce_priv = &ar_snoc->ce;
1664 	msa_size = drv_data->msa_size;
1665 
1666 	ret = ath10k_snoc_resource_init(ar);
1667 	if (ret) {
1668 		ath10k_warn(ar, "failed to initialize resource: %d\n", ret);
1669 		goto err_core_destroy;
1670 	}
1671 
1672 	ret = ath10k_snoc_setup_resource(ar);
1673 	if (ret) {
1674 		ath10k_warn(ar, "failed to setup resource: %d\n", ret);
1675 		goto err_core_destroy;
1676 	}
1677 	ret = ath10k_snoc_request_irq(ar);
1678 	if (ret) {
1679 		ath10k_warn(ar, "failed to request irqs: %d\n", ret);
1680 		goto err_release_resource;
1681 	}
1682 
1683 	ar_snoc->vreg = vreg_cfg;
1684 	for (i = 0; i < ARRAY_SIZE(vreg_cfg); i++) {
1685 		ret = ath10k_get_vreg_info(ar, dev, &ar_snoc->vreg[i]);
1686 		if (ret)
1687 			goto err_free_irq;
1688 	}
1689 
1690 	ar_snoc->clk = clk_cfg;
1691 	for (i = 0; i < ARRAY_SIZE(clk_cfg); i++) {
1692 		ret = ath10k_get_clk_info(ar, dev, &ar_snoc->clk[i]);
1693 		if (ret)
1694 			goto err_free_irq;
1695 	}
1696 
1697 	ret = ath10k_hw_power_on(ar);
1698 	if (ret) {
1699 		ath10k_err(ar, "failed to power on device: %d\n", ret);
1700 		goto err_free_irq;
1701 	}
1702 
1703 	ret = ath10k_qmi_init(ar, msa_size);
1704 	if (ret) {
1705 		ath10k_warn(ar, "failed to register wlfw qmi client: %d\n", ret);
1706 		goto err_core_destroy;
1707 	}
1708 
1709 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc probe\n");
1710 
1711 	return 0;
1712 
1713 err_free_irq:
1714 	ath10k_snoc_free_irq(ar);
1715 
1716 err_release_resource:
1717 	ath10k_snoc_release_resource(ar);
1718 
1719 err_core_destroy:
1720 	ath10k_core_destroy(ar);
1721 
1722 	return ret;
1723 }
1724 
1725 static int ath10k_snoc_remove(struct platform_device *pdev)
1726 {
1727 	struct ath10k *ar = platform_get_drvdata(pdev);
1728 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1729 
1730 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc remove\n");
1731 
1732 	reinit_completion(&ar->driver_recovery);
1733 
1734 	if (test_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags))
1735 		wait_for_completion_timeout(&ar->driver_recovery, 3 * HZ);
1736 
1737 	set_bit(ATH10K_SNOC_FLAG_UNREGISTERING, &ar_snoc->flags);
1738 
1739 	ath10k_core_unregister(ar);
1740 	ath10k_hw_power_off(ar);
1741 	ath10k_snoc_free_irq(ar);
1742 	ath10k_snoc_release_resource(ar);
1743 	ath10k_qmi_deinit(ar);
1744 	ath10k_core_destroy(ar);
1745 
1746 	return 0;
1747 }
1748 
1749 static struct platform_driver ath10k_snoc_driver = {
1750 	.probe  = ath10k_snoc_probe,
1751 	.remove = ath10k_snoc_remove,
1752 	.driver = {
1753 		.name   = "ath10k_snoc",
1754 		.of_match_table = ath10k_snoc_dt_match,
1755 	},
1756 };
1757 module_platform_driver(ath10k_snoc_driver);
1758 
1759 MODULE_AUTHOR("Qualcomm");
1760 MODULE_LICENSE("Dual BSD/GPL");
1761 MODULE_DESCRIPTION("Driver support for Atheros WCN3990 SNOC devices");
1762