1 /*
2  * Copyright (c) 2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/of.h>
21 #include <linux/of_device.h>
22 #include <linux/platform_device.h>
23 #include <linux/regulator/consumer.h>
24 
25 #include "ce.h"
26 #include "debug.h"
27 #include "hif.h"
28 #include "htc.h"
29 #include "snoc.h"
30 
31 #define ATH10K_SNOC_RX_POST_RETRY_MS 50
32 #define CE_POLL_PIPE 4
33 
34 static char *const ce_name[] = {
35 	"WLAN_CE_0",
36 	"WLAN_CE_1",
37 	"WLAN_CE_2",
38 	"WLAN_CE_3",
39 	"WLAN_CE_4",
40 	"WLAN_CE_5",
41 	"WLAN_CE_6",
42 	"WLAN_CE_7",
43 	"WLAN_CE_8",
44 	"WLAN_CE_9",
45 	"WLAN_CE_10",
46 	"WLAN_CE_11",
47 };
48 
49 static struct ath10k_wcn3990_vreg_info vreg_cfg[] = {
50 	{NULL, "vdd-0.8-cx-mx", 800000, 800000, 0, 0, false},
51 	{NULL, "vdd-1.8-xo", 1800000, 1800000, 0, 0, false},
52 	{NULL, "vdd-1.3-rfa", 1304000, 1304000, 0, 0, false},
53 	{NULL, "vdd-3.3-ch0", 3312000, 3312000, 0, 0, false},
54 };
55 
56 static struct ath10k_wcn3990_clk_info clk_cfg[] = {
57 	{NULL, "cxo_ref_clk_pin", 0, false},
58 };
59 
60 static void ath10k_snoc_htc_tx_cb(struct ath10k_ce_pipe *ce_state);
61 static void ath10k_snoc_htt_tx_cb(struct ath10k_ce_pipe *ce_state);
62 static void ath10k_snoc_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
63 static void ath10k_snoc_htt_rx_cb(struct ath10k_ce_pipe *ce_state);
64 static void ath10k_snoc_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
65 static void ath10k_snoc_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state);
66 
67 static const struct ath10k_snoc_drv_priv drv_priv = {
68 	.hw_rev = ATH10K_HW_WCN3990,
69 	.dma_mask = DMA_BIT_MASK(37),
70 	.msa_size = 0x100000,
71 };
72 
73 #define WCN3990_SRC_WR_IDX_OFFSET 0x3C
74 #define WCN3990_DST_WR_IDX_OFFSET 0x40
75 
76 static struct ath10k_shadow_reg_cfg target_shadow_reg_cfg_map[] = {
77 		{
78 			.ce_id = __cpu_to_le16(0),
79 			.reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
80 		},
81 
82 		{
83 			.ce_id = __cpu_to_le16(3),
84 			.reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
85 		},
86 
87 		{
88 			.ce_id = __cpu_to_le16(4),
89 			.reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
90 		},
91 
92 		{
93 			.ce_id = __cpu_to_le16(5),
94 			.reg_offset =  __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
95 		},
96 
97 		{
98 			.ce_id = __cpu_to_le16(7),
99 			.reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
100 		},
101 
102 		{
103 			.ce_id = __cpu_to_le16(1),
104 			.reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
105 		},
106 
107 		{
108 			.ce_id = __cpu_to_le16(2),
109 			.reg_offset =  __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
110 		},
111 
112 		{
113 			.ce_id = __cpu_to_le16(7),
114 			.reg_offset =  __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
115 		},
116 
117 		{
118 			.ce_id = __cpu_to_le16(8),
119 			.reg_offset =  __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
120 		},
121 
122 		{
123 			.ce_id = __cpu_to_le16(9),
124 			.reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
125 		},
126 
127 		{
128 			.ce_id = __cpu_to_le16(10),
129 			.reg_offset =  __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
130 		},
131 
132 		{
133 			.ce_id = __cpu_to_le16(11),
134 			.reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
135 		},
136 };
137 
138 static struct ce_attr host_ce_config_wlan[] = {
139 	/* CE0: host->target HTC control streams */
140 	{
141 		.flags = CE_ATTR_FLAGS,
142 		.src_nentries = 16,
143 		.src_sz_max = 2048,
144 		.dest_nentries = 0,
145 		.send_cb = ath10k_snoc_htc_tx_cb,
146 	},
147 
148 	/* CE1: target->host HTT + HTC control */
149 	{
150 		.flags = CE_ATTR_FLAGS,
151 		.src_nentries = 0,
152 		.src_sz_max = 2048,
153 		.dest_nentries = 512,
154 		.recv_cb = ath10k_snoc_htt_htc_rx_cb,
155 	},
156 
157 	/* CE2: target->host WMI */
158 	{
159 		.flags = CE_ATTR_FLAGS,
160 		.src_nentries = 0,
161 		.src_sz_max = 2048,
162 		.dest_nentries = 64,
163 		.recv_cb = ath10k_snoc_htc_rx_cb,
164 	},
165 
166 	/* CE3: host->target WMI */
167 	{
168 		.flags = CE_ATTR_FLAGS,
169 		.src_nentries = 32,
170 		.src_sz_max = 2048,
171 		.dest_nentries = 0,
172 		.send_cb = ath10k_snoc_htc_tx_cb,
173 	},
174 
175 	/* CE4: host->target HTT */
176 	{
177 		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
178 		.src_nentries = 256,
179 		.src_sz_max = 256,
180 		.dest_nentries = 0,
181 		.send_cb = ath10k_snoc_htt_tx_cb,
182 	},
183 
184 	/* CE5: target->host HTT (ipa_uc->target ) */
185 	{
186 		.flags = CE_ATTR_FLAGS,
187 		.src_nentries = 0,
188 		.src_sz_max = 512,
189 		.dest_nentries = 512,
190 		.recv_cb = ath10k_snoc_htt_rx_cb,
191 	},
192 
193 	/* CE6: target autonomous hif_memcpy */
194 	{
195 		.flags = CE_ATTR_FLAGS,
196 		.src_nentries = 0,
197 		.src_sz_max = 0,
198 		.dest_nentries = 0,
199 	},
200 
201 	/* CE7: ce_diag, the Diagnostic Window */
202 	{
203 		.flags = CE_ATTR_FLAGS,
204 		.src_nentries = 2,
205 		.src_sz_max = 2048,
206 		.dest_nentries = 2,
207 	},
208 
209 	/* CE8: Target to uMC */
210 	{
211 		.flags = CE_ATTR_FLAGS,
212 		.src_nentries = 0,
213 		.src_sz_max = 2048,
214 		.dest_nentries = 128,
215 	},
216 
217 	/* CE9 target->host HTT */
218 	{
219 		.flags = CE_ATTR_FLAGS,
220 		.src_nentries = 0,
221 		.src_sz_max = 2048,
222 		.dest_nentries = 512,
223 		.recv_cb = ath10k_snoc_htt_htc_rx_cb,
224 	},
225 
226 	/* CE10: target->host HTT */
227 	{
228 		.flags = CE_ATTR_FLAGS,
229 		.src_nentries = 0,
230 		.src_sz_max = 2048,
231 		.dest_nentries = 512,
232 		.recv_cb = ath10k_snoc_htt_htc_rx_cb,
233 	},
234 
235 	/* CE11: target -> host PKTLOG */
236 	{
237 		.flags = CE_ATTR_FLAGS,
238 		.src_nentries = 0,
239 		.src_sz_max = 2048,
240 		.dest_nentries = 512,
241 		.recv_cb = ath10k_snoc_pktlog_rx_cb,
242 	},
243 };
244 
245 static struct ce_pipe_config target_ce_config_wlan[] = {
246 	/* CE0: host->target HTC control and raw streams */
247 	{
248 		.pipenum = __cpu_to_le32(0),
249 		.pipedir = __cpu_to_le32(PIPEDIR_OUT),
250 		.nentries = __cpu_to_le32(32),
251 		.nbytes_max = __cpu_to_le32(2048),
252 		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
253 		.reserved = __cpu_to_le32(0),
254 	},
255 
256 	/* CE1: target->host HTT + HTC control */
257 	{
258 		.pipenum = __cpu_to_le32(1),
259 		.pipedir = __cpu_to_le32(PIPEDIR_IN),
260 		.nentries = __cpu_to_le32(32),
261 		.nbytes_max = __cpu_to_le32(2048),
262 		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
263 		.reserved = __cpu_to_le32(0),
264 	},
265 
266 	/* CE2: target->host WMI */
267 	{
268 		.pipenum = __cpu_to_le32(2),
269 		.pipedir = __cpu_to_le32(PIPEDIR_IN),
270 		.nentries = __cpu_to_le32(64),
271 		.nbytes_max = __cpu_to_le32(2048),
272 		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
273 		.reserved = __cpu_to_le32(0),
274 	},
275 
276 	/* CE3: host->target WMI */
277 	{
278 		.pipenum = __cpu_to_le32(3),
279 		.pipedir = __cpu_to_le32(PIPEDIR_OUT),
280 		.nentries = __cpu_to_le32(32),
281 		.nbytes_max = __cpu_to_le32(2048),
282 		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
283 		.reserved = __cpu_to_le32(0),
284 	},
285 
286 	/* CE4: host->target HTT */
287 	{
288 		.pipenum = __cpu_to_le32(4),
289 		.pipedir = __cpu_to_le32(PIPEDIR_OUT),
290 		.nentries = __cpu_to_le32(256),
291 		.nbytes_max = __cpu_to_le32(256),
292 		.flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
293 		.reserved = __cpu_to_le32(0),
294 	},
295 
296 	/* CE5: target->host HTT (HIF->HTT) */
297 	{
298 		.pipenum = __cpu_to_le32(5),
299 		.pipedir = __cpu_to_le32(PIPEDIR_OUT),
300 		.nentries = __cpu_to_le32(1024),
301 		.nbytes_max = __cpu_to_le32(64),
302 		.flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
303 		.reserved = __cpu_to_le32(0),
304 	},
305 
306 	/* CE6: Reserved for target autonomous hif_memcpy */
307 	{
308 		.pipenum = __cpu_to_le32(6),
309 		.pipedir = __cpu_to_le32(PIPEDIR_INOUT),
310 		.nentries = __cpu_to_le32(32),
311 		.nbytes_max = __cpu_to_le32(16384),
312 		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
313 		.reserved = __cpu_to_le32(0),
314 	},
315 
316 	/* CE7 used only by Host */
317 	{
318 		.pipenum = __cpu_to_le32(7),
319 		.pipedir = __cpu_to_le32(4),
320 		.nentries = __cpu_to_le32(0),
321 		.nbytes_max = __cpu_to_le32(0),
322 		.flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
323 		.reserved = __cpu_to_le32(0),
324 	},
325 
326 	/* CE8 Target to uMC */
327 	{
328 		.pipenum = __cpu_to_le32(8),
329 		.pipedir = __cpu_to_le32(PIPEDIR_IN),
330 		.nentries = __cpu_to_le32(32),
331 		.nbytes_max = __cpu_to_le32(2048),
332 		.flags = __cpu_to_le32(0),
333 		.reserved = __cpu_to_le32(0),
334 	},
335 
336 	/* CE9 target->host HTT */
337 	{
338 		.pipenum = __cpu_to_le32(9),
339 		.pipedir = __cpu_to_le32(PIPEDIR_IN),
340 		.nentries = __cpu_to_le32(32),
341 		.nbytes_max = __cpu_to_le32(2048),
342 		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
343 		.reserved = __cpu_to_le32(0),
344 	},
345 
346 	/* CE10 target->host HTT */
347 	{
348 		.pipenum = __cpu_to_le32(10),
349 		.pipedir = __cpu_to_le32(PIPEDIR_IN),
350 		.nentries = __cpu_to_le32(32),
351 		.nbytes_max = __cpu_to_le32(2048),
352 		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
353 		.reserved = __cpu_to_le32(0),
354 	},
355 
356 	/* CE11 target autonomous qcache memcpy */
357 	{
358 		.pipenum = __cpu_to_le32(11),
359 		.pipedir = __cpu_to_le32(PIPEDIR_IN),
360 		.nentries = __cpu_to_le32(32),
361 		.nbytes_max = __cpu_to_le32(2048),
362 		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
363 		.reserved = __cpu_to_le32(0),
364 	},
365 };
366 
367 static struct service_to_pipe target_service_to_ce_map_wlan[] = {
368 	{
369 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
370 		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
371 		__cpu_to_le32(3),
372 	},
373 	{
374 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
375 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
376 		__cpu_to_le32(2),
377 	},
378 	{
379 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
380 		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
381 		__cpu_to_le32(3),
382 	},
383 	{
384 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
385 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
386 		__cpu_to_le32(2),
387 	},
388 	{
389 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
390 		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
391 		__cpu_to_le32(3),
392 	},
393 	{
394 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
395 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
396 		__cpu_to_le32(2),
397 	},
398 	{
399 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
400 		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
401 		__cpu_to_le32(3),
402 	},
403 	{
404 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
405 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
406 		__cpu_to_le32(2),
407 	},
408 	{
409 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
410 		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
411 		__cpu_to_le32(3),
412 	},
413 	{
414 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
415 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
416 		__cpu_to_le32(2),
417 	},
418 	{
419 		__cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
420 		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
421 		__cpu_to_le32(0),
422 	},
423 	{
424 		__cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
425 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
426 		__cpu_to_le32(2),
427 	},
428 	{ /* not used */
429 		__cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
430 		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
431 		__cpu_to_le32(0),
432 	},
433 	{ /* not used */
434 		__cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
435 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
436 		__cpu_to_le32(2),
437 	},
438 	{
439 		__cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
440 		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
441 		__cpu_to_le32(4),
442 	},
443 	{
444 		__cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
445 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
446 		__cpu_to_le32(1),
447 	},
448 	{ /* not used */
449 		__cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
450 		__cpu_to_le32(PIPEDIR_OUT),
451 		__cpu_to_le32(5),
452 	},
453 	{ /* in = DL = target -> host */
454 		__cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA2_MSG),
455 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
456 		__cpu_to_le32(9),
457 	},
458 	{ /* in = DL = target -> host */
459 		__cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA3_MSG),
460 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
461 		__cpu_to_le32(10),
462 	},
463 	{ /* in = DL = target -> host pktlog */
464 		__cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_LOG_MSG),
465 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
466 		__cpu_to_le32(11),
467 	},
468 	/* (Additions here) */
469 
470 	{ /* must be last */
471 		__cpu_to_le32(0),
472 		__cpu_to_le32(0),
473 		__cpu_to_le32(0),
474 	},
475 };
476 
477 void ath10k_snoc_write32(struct ath10k *ar, u32 offset, u32 value)
478 {
479 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
480 
481 	iowrite32(value, ar_snoc->mem + offset);
482 }
483 
484 u32 ath10k_snoc_read32(struct ath10k *ar, u32 offset)
485 {
486 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
487 	u32 val;
488 
489 	val = ioread32(ar_snoc->mem + offset);
490 
491 	return val;
492 }
493 
494 static int __ath10k_snoc_rx_post_buf(struct ath10k_snoc_pipe *pipe)
495 {
496 	struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
497 	struct ath10k *ar = pipe->hif_ce_state;
498 	struct ath10k_ce *ce = ath10k_ce_priv(ar);
499 	struct sk_buff *skb;
500 	dma_addr_t paddr;
501 	int ret;
502 
503 	skb = dev_alloc_skb(pipe->buf_sz);
504 	if (!skb)
505 		return -ENOMEM;
506 
507 	WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
508 
509 	paddr = dma_map_single(ar->dev, skb->data,
510 			       skb->len + skb_tailroom(skb),
511 			       DMA_FROM_DEVICE);
512 	if (unlikely(dma_mapping_error(ar->dev, paddr))) {
513 		ath10k_warn(ar, "failed to dma map snoc rx buf\n");
514 		dev_kfree_skb_any(skb);
515 		return -EIO;
516 	}
517 
518 	ATH10K_SKB_RXCB(skb)->paddr = paddr;
519 
520 	spin_lock_bh(&ce->ce_lock);
521 	ret = ce_pipe->ops->ce_rx_post_buf(ce_pipe, skb, paddr);
522 	spin_unlock_bh(&ce->ce_lock);
523 	if (ret) {
524 		dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb),
525 				 DMA_FROM_DEVICE);
526 		dev_kfree_skb_any(skb);
527 		return ret;
528 	}
529 
530 	return 0;
531 }
532 
533 static void ath10k_snoc_rx_post_pipe(struct ath10k_snoc_pipe *pipe)
534 {
535 	struct ath10k *ar = pipe->hif_ce_state;
536 	struct ath10k_ce *ce = ath10k_ce_priv(ar);
537 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
538 	struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
539 	int ret, num;
540 
541 	if (pipe->buf_sz == 0)
542 		return;
543 
544 	if (!ce_pipe->dest_ring)
545 		return;
546 
547 	spin_lock_bh(&ce->ce_lock);
548 	num = __ath10k_ce_rx_num_free_bufs(ce_pipe);
549 	spin_unlock_bh(&ce->ce_lock);
550 	while (num--) {
551 		ret = __ath10k_snoc_rx_post_buf(pipe);
552 		if (ret) {
553 			if (ret == -ENOSPC)
554 				break;
555 			ath10k_warn(ar, "failed to post rx buf: %d\n", ret);
556 			mod_timer(&ar_snoc->rx_post_retry, jiffies +
557 				  ATH10K_SNOC_RX_POST_RETRY_MS);
558 			break;
559 		}
560 	}
561 }
562 
563 static void ath10k_snoc_rx_post(struct ath10k *ar)
564 {
565 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
566 	int i;
567 
568 	for (i = 0; i < CE_COUNT; i++)
569 		ath10k_snoc_rx_post_pipe(&ar_snoc->pipe_info[i]);
570 }
571 
572 static void ath10k_snoc_process_rx_cb(struct ath10k_ce_pipe *ce_state,
573 				      void (*callback)(struct ath10k *ar,
574 						       struct sk_buff *skb))
575 {
576 	struct ath10k *ar = ce_state->ar;
577 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
578 	struct ath10k_snoc_pipe *pipe_info =  &ar_snoc->pipe_info[ce_state->id];
579 	struct sk_buff *skb;
580 	struct sk_buff_head list;
581 	void *transfer_context;
582 	unsigned int nbytes, max_nbytes;
583 
584 	__skb_queue_head_init(&list);
585 	while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
586 					     &nbytes) == 0) {
587 		skb = transfer_context;
588 		max_nbytes = skb->len + skb_tailroom(skb);
589 		dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
590 				 max_nbytes, DMA_FROM_DEVICE);
591 
592 		if (unlikely(max_nbytes < nbytes)) {
593 			ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
594 				    nbytes, max_nbytes);
595 			dev_kfree_skb_any(skb);
596 			continue;
597 		}
598 
599 		skb_put(skb, nbytes);
600 		__skb_queue_tail(&list, skb);
601 	}
602 
603 	while ((skb = __skb_dequeue(&list))) {
604 		ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc rx ce pipe %d len %d\n",
605 			   ce_state->id, skb->len);
606 
607 		callback(ar, skb);
608 	}
609 
610 	ath10k_snoc_rx_post_pipe(pipe_info);
611 }
612 
613 static void ath10k_snoc_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
614 {
615 	ath10k_snoc_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
616 }
617 
618 static void ath10k_snoc_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
619 {
620 	/* CE4 polling needs to be done whenever CE pipe which transports
621 	 * HTT Rx (target->host) is processed.
622 	 */
623 	ath10k_ce_per_engine_service(ce_state->ar, CE_POLL_PIPE);
624 
625 	ath10k_snoc_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
626 }
627 
628 /* Called by lower (CE) layer when data is received from the Target.
629  * WCN3990 firmware uses separate CE(CE11) to transfer pktlog data.
630  */
631 static void ath10k_snoc_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state)
632 {
633 	ath10k_snoc_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
634 }
635 
636 static void ath10k_snoc_htt_rx_deliver(struct ath10k *ar, struct sk_buff *skb)
637 {
638 	skb_pull(skb, sizeof(struct ath10k_htc_hdr));
639 	ath10k_htt_t2h_msg_handler(ar, skb);
640 }
641 
642 static void ath10k_snoc_htt_rx_cb(struct ath10k_ce_pipe *ce_state)
643 {
644 	ath10k_ce_per_engine_service(ce_state->ar, CE_POLL_PIPE);
645 	ath10k_snoc_process_rx_cb(ce_state, ath10k_snoc_htt_rx_deliver);
646 }
647 
648 static void ath10k_snoc_rx_replenish_retry(struct timer_list *t)
649 {
650 	struct ath10k_snoc *ar_snoc = from_timer(ar_snoc, t, rx_post_retry);
651 	struct ath10k *ar = ar_snoc->ar;
652 
653 	ath10k_snoc_rx_post(ar);
654 }
655 
656 static void ath10k_snoc_htc_tx_cb(struct ath10k_ce_pipe *ce_state)
657 {
658 	struct ath10k *ar = ce_state->ar;
659 	struct sk_buff_head list;
660 	struct sk_buff *skb;
661 
662 	__skb_queue_head_init(&list);
663 	while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
664 		if (!skb)
665 			continue;
666 
667 		__skb_queue_tail(&list, skb);
668 	}
669 
670 	while ((skb = __skb_dequeue(&list)))
671 		ath10k_htc_tx_completion_handler(ar, skb);
672 }
673 
674 static void ath10k_snoc_htt_tx_cb(struct ath10k_ce_pipe *ce_state)
675 {
676 	struct ath10k *ar = ce_state->ar;
677 	struct sk_buff *skb;
678 
679 	while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
680 		if (!skb)
681 			continue;
682 
683 		dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
684 				 skb->len, DMA_TO_DEVICE);
685 		ath10k_htt_hif_tx_complete(ar, skb);
686 	}
687 }
688 
689 static int ath10k_snoc_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
690 				 struct ath10k_hif_sg_item *items, int n_items)
691 {
692 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
693 	struct ath10k_ce *ce = ath10k_ce_priv(ar);
694 	struct ath10k_snoc_pipe *snoc_pipe;
695 	struct ath10k_ce_pipe *ce_pipe;
696 	int err, i = 0;
697 
698 	snoc_pipe = &ar_snoc->pipe_info[pipe_id];
699 	ce_pipe = snoc_pipe->ce_hdl;
700 	spin_lock_bh(&ce->ce_lock);
701 
702 	for (i = 0; i < n_items - 1; i++) {
703 		ath10k_dbg(ar, ATH10K_DBG_SNOC,
704 			   "snoc tx item %d paddr %pad len %d n_items %d\n",
705 			   i, &items[i].paddr, items[i].len, n_items);
706 
707 		err = ath10k_ce_send_nolock(ce_pipe,
708 					    items[i].transfer_context,
709 					    items[i].paddr,
710 					    items[i].len,
711 					    items[i].transfer_id,
712 					    CE_SEND_FLAG_GATHER);
713 		if (err)
714 			goto err;
715 	}
716 
717 	ath10k_dbg(ar, ATH10K_DBG_SNOC,
718 		   "snoc tx item %d paddr %pad len %d n_items %d\n",
719 		   i, &items[i].paddr, items[i].len, n_items);
720 
721 	err = ath10k_ce_send_nolock(ce_pipe,
722 				    items[i].transfer_context,
723 				    items[i].paddr,
724 				    items[i].len,
725 				    items[i].transfer_id,
726 				    0);
727 	if (err)
728 		goto err;
729 
730 	spin_unlock_bh(&ce->ce_lock);
731 
732 	return 0;
733 
734 err:
735 	for (; i > 0; i--)
736 		__ath10k_ce_send_revert(ce_pipe);
737 
738 	spin_unlock_bh(&ce->ce_lock);
739 	return err;
740 }
741 
742 static int ath10k_snoc_hif_get_target_info(struct ath10k *ar,
743 					   struct bmi_target_info *target_info)
744 {
745 	target_info->version = ATH10K_HW_WCN3990;
746 	target_info->type = ATH10K_HW_WCN3990;
747 
748 	return 0;
749 }
750 
751 static u16 ath10k_snoc_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
752 {
753 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
754 
755 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "hif get free queue number\n");
756 
757 	return ath10k_ce_num_free_src_entries(ar_snoc->pipe_info[pipe].ce_hdl);
758 }
759 
760 static void ath10k_snoc_hif_send_complete_check(struct ath10k *ar, u8 pipe,
761 						int force)
762 {
763 	int resources;
764 
765 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif send complete check\n");
766 
767 	if (!force) {
768 		resources = ath10k_snoc_hif_get_free_queue_number(ar, pipe);
769 
770 		if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
771 			return;
772 	}
773 	ath10k_ce_per_engine_service(ar, pipe);
774 }
775 
776 static int ath10k_snoc_hif_map_service_to_pipe(struct ath10k *ar,
777 					       u16 service_id,
778 					       u8 *ul_pipe, u8 *dl_pipe)
779 {
780 	const struct service_to_pipe *entry;
781 	bool ul_set = false, dl_set = false;
782 	int i;
783 
784 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif map service\n");
785 
786 	for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) {
787 		entry = &target_service_to_ce_map_wlan[i];
788 
789 		if (__le32_to_cpu(entry->service_id) != service_id)
790 			continue;
791 
792 		switch (__le32_to_cpu(entry->pipedir)) {
793 		case PIPEDIR_NONE:
794 			break;
795 		case PIPEDIR_IN:
796 			WARN_ON(dl_set);
797 			*dl_pipe = __le32_to_cpu(entry->pipenum);
798 			dl_set = true;
799 			break;
800 		case PIPEDIR_OUT:
801 			WARN_ON(ul_set);
802 			*ul_pipe = __le32_to_cpu(entry->pipenum);
803 			ul_set = true;
804 			break;
805 		case PIPEDIR_INOUT:
806 			WARN_ON(dl_set);
807 			WARN_ON(ul_set);
808 			*dl_pipe = __le32_to_cpu(entry->pipenum);
809 			*ul_pipe = __le32_to_cpu(entry->pipenum);
810 			dl_set = true;
811 			ul_set = true;
812 			break;
813 		}
814 	}
815 
816 	if (!ul_set || !dl_set)
817 		return -ENOENT;
818 
819 	return 0;
820 }
821 
822 static void ath10k_snoc_hif_get_default_pipe(struct ath10k *ar,
823 					     u8 *ul_pipe, u8 *dl_pipe)
824 {
825 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif get default pipe\n");
826 
827 	(void)ath10k_snoc_hif_map_service_to_pipe(ar,
828 						 ATH10K_HTC_SVC_ID_RSVD_CTRL,
829 						 ul_pipe, dl_pipe);
830 }
831 
832 static inline void ath10k_snoc_irq_disable(struct ath10k *ar)
833 {
834 	ath10k_ce_disable_interrupts(ar);
835 }
836 
837 static inline void ath10k_snoc_irq_enable(struct ath10k *ar)
838 {
839 	ath10k_ce_enable_interrupts(ar);
840 }
841 
842 static void ath10k_snoc_rx_pipe_cleanup(struct ath10k_snoc_pipe *snoc_pipe)
843 {
844 	struct ath10k_ce_pipe *ce_pipe;
845 	struct ath10k_ce_ring *ce_ring;
846 	struct sk_buff *skb;
847 	struct ath10k *ar;
848 	int i;
849 
850 	ar = snoc_pipe->hif_ce_state;
851 	ce_pipe = snoc_pipe->ce_hdl;
852 	ce_ring = ce_pipe->dest_ring;
853 
854 	if (!ce_ring)
855 		return;
856 
857 	if (!snoc_pipe->buf_sz)
858 		return;
859 
860 	for (i = 0; i < ce_ring->nentries; i++) {
861 		skb = ce_ring->per_transfer_context[i];
862 		if (!skb)
863 			continue;
864 
865 		ce_ring->per_transfer_context[i] = NULL;
866 
867 		dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
868 				 skb->len + skb_tailroom(skb),
869 				 DMA_FROM_DEVICE);
870 		dev_kfree_skb_any(skb);
871 	}
872 }
873 
874 static void ath10k_snoc_tx_pipe_cleanup(struct ath10k_snoc_pipe *snoc_pipe)
875 {
876 	struct ath10k_ce_pipe *ce_pipe;
877 	struct ath10k_ce_ring *ce_ring;
878 	struct ath10k_snoc *ar_snoc;
879 	struct sk_buff *skb;
880 	struct ath10k *ar;
881 	int i;
882 
883 	ar = snoc_pipe->hif_ce_state;
884 	ar_snoc = ath10k_snoc_priv(ar);
885 	ce_pipe = snoc_pipe->ce_hdl;
886 	ce_ring = ce_pipe->src_ring;
887 
888 	if (!ce_ring)
889 		return;
890 
891 	if (!snoc_pipe->buf_sz)
892 		return;
893 
894 	for (i = 0; i < ce_ring->nentries; i++) {
895 		skb = ce_ring->per_transfer_context[i];
896 		if (!skb)
897 			continue;
898 
899 		ce_ring->per_transfer_context[i] = NULL;
900 
901 		ath10k_htc_tx_completion_handler(ar, skb);
902 	}
903 }
904 
905 static void ath10k_snoc_buffer_cleanup(struct ath10k *ar)
906 {
907 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
908 	struct ath10k_snoc_pipe *pipe_info;
909 	int pipe_num;
910 
911 	del_timer_sync(&ar_snoc->rx_post_retry);
912 	for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
913 		pipe_info = &ar_snoc->pipe_info[pipe_num];
914 		ath10k_snoc_rx_pipe_cleanup(pipe_info);
915 		ath10k_snoc_tx_pipe_cleanup(pipe_info);
916 	}
917 }
918 
919 static void ath10k_snoc_hif_stop(struct ath10k *ar)
920 {
921 	ath10k_snoc_irq_disable(ar);
922 	napi_synchronize(&ar->napi);
923 	napi_disable(&ar->napi);
924 	ath10k_snoc_buffer_cleanup(ar);
925 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
926 }
927 
928 static int ath10k_snoc_hif_start(struct ath10k *ar)
929 {
930 	napi_enable(&ar->napi);
931 	ath10k_snoc_irq_enable(ar);
932 	ath10k_snoc_rx_post(ar);
933 
934 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
935 
936 	return 0;
937 }
938 
939 static int ath10k_snoc_init_pipes(struct ath10k *ar)
940 {
941 	int i, ret;
942 
943 	for (i = 0; i < CE_COUNT; i++) {
944 		ret = ath10k_ce_init_pipe(ar, i, &host_ce_config_wlan[i]);
945 		if (ret) {
946 			ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n",
947 				   i, ret);
948 			return ret;
949 		}
950 	}
951 
952 	return 0;
953 }
954 
955 static int ath10k_snoc_wlan_enable(struct ath10k *ar)
956 {
957 	struct ath10k_tgt_pipe_cfg tgt_cfg[CE_COUNT_MAX];
958 	struct ath10k_qmi_wlan_enable_cfg cfg;
959 	enum wlfw_driver_mode_enum_v01 mode;
960 	int pipe_num;
961 
962 	for (pipe_num = 0; pipe_num < CE_COUNT_MAX; pipe_num++) {
963 		tgt_cfg[pipe_num].pipe_num =
964 				target_ce_config_wlan[pipe_num].pipenum;
965 		tgt_cfg[pipe_num].pipe_dir =
966 				target_ce_config_wlan[pipe_num].pipedir;
967 		tgt_cfg[pipe_num].nentries =
968 				target_ce_config_wlan[pipe_num].nentries;
969 		tgt_cfg[pipe_num].nbytes_max =
970 				target_ce_config_wlan[pipe_num].nbytes_max;
971 		tgt_cfg[pipe_num].flags =
972 				target_ce_config_wlan[pipe_num].flags;
973 		tgt_cfg[pipe_num].reserved = 0;
974 	}
975 
976 	cfg.num_ce_tgt_cfg = sizeof(target_ce_config_wlan) /
977 				sizeof(struct ath10k_tgt_pipe_cfg);
978 	cfg.ce_tgt_cfg = (struct ath10k_tgt_pipe_cfg *)
979 		&tgt_cfg;
980 	cfg.num_ce_svc_pipe_cfg = sizeof(target_service_to_ce_map_wlan) /
981 				  sizeof(struct ath10k_svc_pipe_cfg);
982 	cfg.ce_svc_cfg = (struct ath10k_svc_pipe_cfg *)
983 		&target_service_to_ce_map_wlan;
984 	cfg.num_shadow_reg_cfg = sizeof(target_shadow_reg_cfg_map) /
985 					sizeof(struct ath10k_shadow_reg_cfg);
986 	cfg.shadow_reg_cfg = (struct ath10k_shadow_reg_cfg *)
987 		&target_shadow_reg_cfg_map;
988 
989 	mode = QMI_WLFW_MISSION_V01;
990 
991 	return ath10k_qmi_wlan_enable(ar, &cfg, mode,
992 				       NULL);
993 }
994 
995 static void ath10k_snoc_wlan_disable(struct ath10k *ar)
996 {
997 	ath10k_qmi_wlan_disable(ar);
998 }
999 
1000 static void ath10k_snoc_hif_power_down(struct ath10k *ar)
1001 {
1002 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n");
1003 
1004 	ath10k_snoc_wlan_disable(ar);
1005 	ath10k_ce_free_rri(ar);
1006 }
1007 
1008 static int ath10k_snoc_hif_power_up(struct ath10k *ar)
1009 {
1010 	int ret;
1011 
1012 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "%s:WCN3990 driver state = %d\n",
1013 		   __func__, ar->state);
1014 
1015 	ret = ath10k_snoc_wlan_enable(ar);
1016 	if (ret) {
1017 		ath10k_err(ar, "failed to enable wcn3990: %d\n", ret);
1018 		return ret;
1019 	}
1020 
1021 	ath10k_ce_alloc_rri(ar);
1022 
1023 	ret = ath10k_snoc_init_pipes(ar);
1024 	if (ret) {
1025 		ath10k_err(ar, "failed to initialize CE: %d\n", ret);
1026 		goto err_wlan_enable;
1027 	}
1028 
1029 	return 0;
1030 
1031 err_wlan_enable:
1032 	ath10k_snoc_wlan_disable(ar);
1033 
1034 	return ret;
1035 }
1036 
1037 static const struct ath10k_hif_ops ath10k_snoc_hif_ops = {
1038 	.read32		= ath10k_snoc_read32,
1039 	.write32	= ath10k_snoc_write32,
1040 	.start		= ath10k_snoc_hif_start,
1041 	.stop		= ath10k_snoc_hif_stop,
1042 	.map_service_to_pipe	= ath10k_snoc_hif_map_service_to_pipe,
1043 	.get_default_pipe	= ath10k_snoc_hif_get_default_pipe,
1044 	.power_up		= ath10k_snoc_hif_power_up,
1045 	.power_down		= ath10k_snoc_hif_power_down,
1046 	.tx_sg			= ath10k_snoc_hif_tx_sg,
1047 	.send_complete_check	= ath10k_snoc_hif_send_complete_check,
1048 	.get_free_queue_number	= ath10k_snoc_hif_get_free_queue_number,
1049 	.get_target_info	= ath10k_snoc_hif_get_target_info,
1050 };
1051 
1052 static const struct ath10k_bus_ops ath10k_snoc_bus_ops = {
1053 	.read32		= ath10k_snoc_read32,
1054 	.write32	= ath10k_snoc_write32,
1055 };
1056 
1057 static int ath10k_snoc_get_ce_id_from_irq(struct ath10k *ar, int irq)
1058 {
1059 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1060 	int i;
1061 
1062 	for (i = 0; i < CE_COUNT_MAX; i++) {
1063 		if (ar_snoc->ce_irqs[i].irq_line == irq)
1064 			return i;
1065 	}
1066 	ath10k_err(ar, "No matching CE id for irq %d\n", irq);
1067 
1068 	return -EINVAL;
1069 }
1070 
1071 static irqreturn_t ath10k_snoc_per_engine_handler(int irq, void *arg)
1072 {
1073 	struct ath10k *ar = arg;
1074 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1075 	int ce_id = ath10k_snoc_get_ce_id_from_irq(ar, irq);
1076 
1077 	if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_snoc->pipe_info)) {
1078 		ath10k_warn(ar, "unexpected/invalid irq %d ce_id %d\n", irq,
1079 			    ce_id);
1080 		return IRQ_HANDLED;
1081 	}
1082 
1083 	ath10k_snoc_irq_disable(ar);
1084 	napi_schedule(&ar->napi);
1085 
1086 	return IRQ_HANDLED;
1087 }
1088 
1089 static int ath10k_snoc_napi_poll(struct napi_struct *ctx, int budget)
1090 {
1091 	struct ath10k *ar = container_of(ctx, struct ath10k, napi);
1092 	int done = 0;
1093 
1094 	ath10k_ce_per_engine_service_any(ar);
1095 	done = ath10k_htt_txrx_compl_task(ar, budget);
1096 
1097 	if (done < budget) {
1098 		napi_complete(ctx);
1099 		ath10k_snoc_irq_enable(ar);
1100 	}
1101 
1102 	return done;
1103 }
1104 
1105 static void ath10k_snoc_init_napi(struct ath10k *ar)
1106 {
1107 	netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_snoc_napi_poll,
1108 		       ATH10K_NAPI_BUDGET);
1109 }
1110 
1111 static int ath10k_snoc_request_irq(struct ath10k *ar)
1112 {
1113 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1114 	int irqflags = IRQF_TRIGGER_RISING;
1115 	int ret, id;
1116 
1117 	for (id = 0; id < CE_COUNT_MAX; id++) {
1118 		ret = request_irq(ar_snoc->ce_irqs[id].irq_line,
1119 				  ath10k_snoc_per_engine_handler,
1120 				  irqflags, ce_name[id], ar);
1121 		if (ret) {
1122 			ath10k_err(ar,
1123 				   "failed to register IRQ handler for CE %d: %d",
1124 				   id, ret);
1125 			goto err_irq;
1126 		}
1127 	}
1128 
1129 	return 0;
1130 
1131 err_irq:
1132 	for (id -= 1; id >= 0; id--)
1133 		free_irq(ar_snoc->ce_irqs[id].irq_line, ar);
1134 
1135 	return ret;
1136 }
1137 
1138 static void ath10k_snoc_free_irq(struct ath10k *ar)
1139 {
1140 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1141 	int id;
1142 
1143 	for (id = 0; id < CE_COUNT_MAX; id++)
1144 		free_irq(ar_snoc->ce_irqs[id].irq_line, ar);
1145 }
1146 
1147 static int ath10k_snoc_resource_init(struct ath10k *ar)
1148 {
1149 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1150 	struct platform_device *pdev;
1151 	struct resource *res;
1152 	int i, ret = 0;
1153 
1154 	pdev = ar_snoc->dev;
1155 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "membase");
1156 	if (!res) {
1157 		ath10k_err(ar, "Memory base not found in DT\n");
1158 		return -EINVAL;
1159 	}
1160 
1161 	ar_snoc->mem_pa = res->start;
1162 	ar_snoc->mem = devm_ioremap(&pdev->dev, ar_snoc->mem_pa,
1163 				    resource_size(res));
1164 	if (!ar_snoc->mem) {
1165 		ath10k_err(ar, "Memory base ioremap failed with physical address %pa\n",
1166 			   &ar_snoc->mem_pa);
1167 		return -EINVAL;
1168 	}
1169 
1170 	for (i = 0; i < CE_COUNT; i++) {
1171 		res = platform_get_resource(ar_snoc->dev, IORESOURCE_IRQ, i);
1172 		if (!res) {
1173 			ath10k_err(ar, "failed to get IRQ%d\n", i);
1174 			ret = -ENODEV;
1175 			goto out;
1176 		}
1177 		ar_snoc->ce_irqs[i].irq_line = res->start;
1178 	}
1179 
1180 out:
1181 	return ret;
1182 }
1183 
1184 int ath10k_snoc_fw_indication(struct ath10k *ar, u64 type)
1185 {
1186 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1187 	struct ath10k_bus_params bus_params;
1188 	int ret;
1189 
1190 	switch (type) {
1191 	case ATH10K_QMI_EVENT_FW_READY_IND:
1192 		bus_params.dev_type = ATH10K_DEV_TYPE_LL;
1193 		bus_params.chip_id = ar_snoc->target_info.soc_version;
1194 		ret = ath10k_core_register(ar, &bus_params);
1195 		if (ret) {
1196 			ath10k_err(ar, "failed to register driver core: %d\n",
1197 				   ret);
1198 		}
1199 		break;
1200 	case ATH10K_QMI_EVENT_FW_DOWN_IND:
1201 		break;
1202 	default:
1203 		ath10k_err(ar, "invalid fw indication: %llx\n", type);
1204 		return -EINVAL;
1205 	}
1206 
1207 	return 0;
1208 }
1209 
1210 static int ath10k_snoc_setup_resource(struct ath10k *ar)
1211 {
1212 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1213 	struct ath10k_ce *ce = ath10k_ce_priv(ar);
1214 	struct ath10k_snoc_pipe *pipe;
1215 	int i, ret;
1216 
1217 	timer_setup(&ar_snoc->rx_post_retry, ath10k_snoc_rx_replenish_retry, 0);
1218 	spin_lock_init(&ce->ce_lock);
1219 	for (i = 0; i < CE_COUNT; i++) {
1220 		pipe = &ar_snoc->pipe_info[i];
1221 		pipe->ce_hdl = &ce->ce_states[i];
1222 		pipe->pipe_num = i;
1223 		pipe->hif_ce_state = ar;
1224 
1225 		ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]);
1226 		if (ret) {
1227 			ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n",
1228 				   i, ret);
1229 			return ret;
1230 		}
1231 
1232 		pipe->buf_sz = host_ce_config_wlan[i].src_sz_max;
1233 	}
1234 	ath10k_snoc_init_napi(ar);
1235 
1236 	return 0;
1237 }
1238 
1239 static void ath10k_snoc_release_resource(struct ath10k *ar)
1240 {
1241 	int i;
1242 
1243 	netif_napi_del(&ar->napi);
1244 	for (i = 0; i < CE_COUNT; i++)
1245 		ath10k_ce_free_pipe(ar, i);
1246 }
1247 
1248 static int ath10k_get_vreg_info(struct ath10k *ar, struct device *dev,
1249 				struct ath10k_wcn3990_vreg_info *vreg_info)
1250 {
1251 	struct regulator *reg;
1252 	int ret = 0;
1253 
1254 	reg = devm_regulator_get_optional(dev, vreg_info->name);
1255 
1256 	if (IS_ERR(reg)) {
1257 		ret = PTR_ERR(reg);
1258 
1259 		if (ret  == -EPROBE_DEFER) {
1260 			ath10k_err(ar, "EPROBE_DEFER for regulator: %s\n",
1261 				   vreg_info->name);
1262 			return ret;
1263 		}
1264 		if (vreg_info->required) {
1265 			ath10k_err(ar, "Regulator %s doesn't exist: %d\n",
1266 				   vreg_info->name, ret);
1267 			return ret;
1268 		}
1269 		ath10k_dbg(ar, ATH10K_DBG_SNOC,
1270 			   "Optional regulator %s doesn't exist: %d\n",
1271 			   vreg_info->name, ret);
1272 		goto done;
1273 	}
1274 
1275 	vreg_info->reg = reg;
1276 
1277 done:
1278 	ath10k_dbg(ar, ATH10K_DBG_SNOC,
1279 		   "snog vreg %s min_v %u max_v %u load_ua %u settle_delay %lu\n",
1280 		   vreg_info->name, vreg_info->min_v, vreg_info->max_v,
1281 		   vreg_info->load_ua, vreg_info->settle_delay);
1282 
1283 	return 0;
1284 }
1285 
1286 static int ath10k_get_clk_info(struct ath10k *ar, struct device *dev,
1287 			       struct ath10k_wcn3990_clk_info *clk_info)
1288 {
1289 	struct clk *handle;
1290 	int ret = 0;
1291 
1292 	handle = devm_clk_get(dev, clk_info->name);
1293 	if (IS_ERR(handle)) {
1294 		ret = PTR_ERR(handle);
1295 		if (clk_info->required) {
1296 			ath10k_err(ar, "snoc clock %s isn't available: %d\n",
1297 				   clk_info->name, ret);
1298 			return ret;
1299 		}
1300 		ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc ignoring clock %s: %d\n",
1301 			   clk_info->name,
1302 			   ret);
1303 		return 0;
1304 	}
1305 
1306 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc clock %s freq %u\n",
1307 		   clk_info->name, clk_info->freq);
1308 
1309 	clk_info->handle = handle;
1310 
1311 	return ret;
1312 }
1313 
1314 static int ath10k_wcn3990_vreg_on(struct ath10k *ar)
1315 {
1316 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1317 	struct ath10k_wcn3990_vreg_info *vreg_info;
1318 	int ret = 0;
1319 	int i;
1320 
1321 	for (i = 0; i < ARRAY_SIZE(vreg_cfg); i++) {
1322 		vreg_info = &ar_snoc->vreg[i];
1323 
1324 		if (!vreg_info->reg)
1325 			continue;
1326 
1327 		ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc regulator %s being enabled\n",
1328 			   vreg_info->name);
1329 
1330 		ret = regulator_set_voltage(vreg_info->reg, vreg_info->min_v,
1331 					    vreg_info->max_v);
1332 		if (ret) {
1333 			ath10k_err(ar,
1334 				   "failed to set regulator %s voltage-min: %d voltage-max: %d\n",
1335 				   vreg_info->name, vreg_info->min_v, vreg_info->max_v);
1336 			goto err_reg_config;
1337 		}
1338 
1339 		if (vreg_info->load_ua) {
1340 			ret = regulator_set_load(vreg_info->reg,
1341 						 vreg_info->load_ua);
1342 			if (ret < 0) {
1343 				ath10k_err(ar,
1344 					   "failed to set regulator %s load: %d\n",
1345 					   vreg_info->name,
1346 					   vreg_info->load_ua);
1347 				goto err_reg_config;
1348 			}
1349 		}
1350 
1351 		ret = regulator_enable(vreg_info->reg);
1352 		if (ret) {
1353 			ath10k_err(ar, "failed to enable regulator %s\n",
1354 				   vreg_info->name);
1355 			goto err_reg_config;
1356 		}
1357 
1358 		if (vreg_info->settle_delay)
1359 			udelay(vreg_info->settle_delay);
1360 	}
1361 
1362 	return 0;
1363 
1364 err_reg_config:
1365 	for (; i >= 0; i--) {
1366 		vreg_info = &ar_snoc->vreg[i];
1367 
1368 		if (!vreg_info->reg)
1369 			continue;
1370 
1371 		regulator_disable(vreg_info->reg);
1372 		regulator_set_load(vreg_info->reg, 0);
1373 		regulator_set_voltage(vreg_info->reg, 0, vreg_info->max_v);
1374 	}
1375 
1376 	return ret;
1377 }
1378 
1379 static int ath10k_wcn3990_vreg_off(struct ath10k *ar)
1380 {
1381 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1382 	struct ath10k_wcn3990_vreg_info *vreg_info;
1383 	int ret = 0;
1384 	int i;
1385 
1386 	for (i = ARRAY_SIZE(vreg_cfg) - 1; i >= 0; i--) {
1387 		vreg_info = &ar_snoc->vreg[i];
1388 
1389 		if (!vreg_info->reg)
1390 			continue;
1391 
1392 		ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc regulator %s being disabled\n",
1393 			   vreg_info->name);
1394 
1395 		ret = regulator_disable(vreg_info->reg);
1396 		if (ret)
1397 			ath10k_err(ar, "failed to disable regulator %s\n",
1398 				   vreg_info->name);
1399 
1400 		ret = regulator_set_load(vreg_info->reg, 0);
1401 		if (ret < 0)
1402 			ath10k_err(ar, "failed to set load %s\n",
1403 				   vreg_info->name);
1404 
1405 		ret = regulator_set_voltage(vreg_info->reg, 0,
1406 					    vreg_info->max_v);
1407 		if (ret)
1408 			ath10k_err(ar, "failed to set voltage %s\n",
1409 				   vreg_info->name);
1410 	}
1411 
1412 	return ret;
1413 }
1414 
1415 static int ath10k_wcn3990_clk_init(struct ath10k *ar)
1416 {
1417 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1418 	struct ath10k_wcn3990_clk_info *clk_info;
1419 	int ret = 0;
1420 	int i;
1421 
1422 	for (i = 0; i < ARRAY_SIZE(clk_cfg); i++) {
1423 		clk_info = &ar_snoc->clk[i];
1424 
1425 		if (!clk_info->handle)
1426 			continue;
1427 
1428 		ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc clock %s being enabled\n",
1429 			   clk_info->name);
1430 
1431 		if (clk_info->freq) {
1432 			ret = clk_set_rate(clk_info->handle, clk_info->freq);
1433 
1434 			if (ret) {
1435 				ath10k_err(ar, "failed to set clock %s freq %u\n",
1436 					   clk_info->name, clk_info->freq);
1437 				goto err_clock_config;
1438 			}
1439 		}
1440 
1441 		ret = clk_prepare_enable(clk_info->handle);
1442 		if (ret) {
1443 			ath10k_err(ar, "failed to enable clock %s\n",
1444 				   clk_info->name);
1445 			goto err_clock_config;
1446 		}
1447 	}
1448 
1449 	return 0;
1450 
1451 err_clock_config:
1452 	for (; i >= 0; i--) {
1453 		clk_info = &ar_snoc->clk[i];
1454 
1455 		if (!clk_info->handle)
1456 			continue;
1457 
1458 		clk_disable_unprepare(clk_info->handle);
1459 	}
1460 
1461 	return ret;
1462 }
1463 
1464 static int ath10k_wcn3990_clk_deinit(struct ath10k *ar)
1465 {
1466 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1467 	struct ath10k_wcn3990_clk_info *clk_info;
1468 	int i;
1469 
1470 	for (i = 0; i < ARRAY_SIZE(clk_cfg); i++) {
1471 		clk_info = &ar_snoc->clk[i];
1472 
1473 		if (!clk_info->handle)
1474 			continue;
1475 
1476 		ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc clock %s being disabled\n",
1477 			   clk_info->name);
1478 
1479 		clk_disable_unprepare(clk_info->handle);
1480 	}
1481 
1482 	return 0;
1483 }
1484 
1485 static int ath10k_hw_power_on(struct ath10k *ar)
1486 {
1487 	int ret;
1488 
1489 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "soc power on\n");
1490 
1491 	ret = ath10k_wcn3990_vreg_on(ar);
1492 	if (ret)
1493 		return ret;
1494 
1495 	ret = ath10k_wcn3990_clk_init(ar);
1496 	if (ret)
1497 		goto vreg_off;
1498 
1499 	return ret;
1500 
1501 vreg_off:
1502 	ath10k_wcn3990_vreg_off(ar);
1503 	return ret;
1504 }
1505 
1506 static int ath10k_hw_power_off(struct ath10k *ar)
1507 {
1508 	int ret;
1509 
1510 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "soc power off\n");
1511 
1512 	ath10k_wcn3990_clk_deinit(ar);
1513 
1514 	ret = ath10k_wcn3990_vreg_off(ar);
1515 
1516 	return ret;
1517 }
1518 
1519 static const struct of_device_id ath10k_snoc_dt_match[] = {
1520 	{ .compatible = "qcom,wcn3990-wifi",
1521 	 .data = &drv_priv,
1522 	},
1523 	{ }
1524 };
1525 MODULE_DEVICE_TABLE(of, ath10k_snoc_dt_match);
1526 
1527 static int ath10k_snoc_probe(struct platform_device *pdev)
1528 {
1529 	const struct ath10k_snoc_drv_priv *drv_data;
1530 	const struct of_device_id *of_id;
1531 	struct ath10k_snoc *ar_snoc;
1532 	struct device *dev;
1533 	struct ath10k *ar;
1534 	u32 msa_size;
1535 	int ret;
1536 	u32 i;
1537 
1538 	of_id = of_match_device(ath10k_snoc_dt_match, &pdev->dev);
1539 	if (!of_id) {
1540 		dev_err(&pdev->dev, "failed to find matching device tree id\n");
1541 		return -EINVAL;
1542 	}
1543 
1544 	drv_data = of_id->data;
1545 	dev = &pdev->dev;
1546 
1547 	ret = dma_set_mask_and_coherent(dev, drv_data->dma_mask);
1548 	if (ret) {
1549 		dev_err(dev, "failed to set dma mask: %d", ret);
1550 		return ret;
1551 	}
1552 
1553 	ar = ath10k_core_create(sizeof(*ar_snoc), dev, ATH10K_BUS_SNOC,
1554 				drv_data->hw_rev, &ath10k_snoc_hif_ops);
1555 	if (!ar) {
1556 		dev_err(dev, "failed to allocate core\n");
1557 		return -ENOMEM;
1558 	}
1559 
1560 	ar_snoc = ath10k_snoc_priv(ar);
1561 	ar_snoc->dev = pdev;
1562 	platform_set_drvdata(pdev, ar);
1563 	ar_snoc->ar = ar;
1564 	ar_snoc->ce.bus_ops = &ath10k_snoc_bus_ops;
1565 	ar->ce_priv = &ar_snoc->ce;
1566 	msa_size = drv_data->msa_size;
1567 
1568 	ret = ath10k_snoc_resource_init(ar);
1569 	if (ret) {
1570 		ath10k_warn(ar, "failed to initialize resource: %d\n", ret);
1571 		goto err_core_destroy;
1572 	}
1573 
1574 	ret = ath10k_snoc_setup_resource(ar);
1575 	if (ret) {
1576 		ath10k_warn(ar, "failed to setup resource: %d\n", ret);
1577 		goto err_core_destroy;
1578 	}
1579 	ret = ath10k_snoc_request_irq(ar);
1580 	if (ret) {
1581 		ath10k_warn(ar, "failed to request irqs: %d\n", ret);
1582 		goto err_release_resource;
1583 	}
1584 
1585 	ar_snoc->vreg = vreg_cfg;
1586 	for (i = 0; i < ARRAY_SIZE(vreg_cfg); i++) {
1587 		ret = ath10k_get_vreg_info(ar, dev, &ar_snoc->vreg[i]);
1588 		if (ret)
1589 			goto err_free_irq;
1590 	}
1591 
1592 	ar_snoc->clk = clk_cfg;
1593 	for (i = 0; i < ARRAY_SIZE(clk_cfg); i++) {
1594 		ret = ath10k_get_clk_info(ar, dev, &ar_snoc->clk[i]);
1595 		if (ret)
1596 			goto err_free_irq;
1597 	}
1598 
1599 	ret = ath10k_hw_power_on(ar);
1600 	if (ret) {
1601 		ath10k_err(ar, "failed to power on device: %d\n", ret);
1602 		goto err_free_irq;
1603 	}
1604 
1605 	ret = ath10k_qmi_init(ar, msa_size);
1606 	if (ret) {
1607 		ath10k_warn(ar, "failed to register wlfw qmi client: %d\n", ret);
1608 		goto err_core_destroy;
1609 	}
1610 
1611 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc probe\n");
1612 	ath10k_warn(ar, "Warning: SNOC support is still work-in-progress, it will not work properly!");
1613 
1614 	return 0;
1615 
1616 err_free_irq:
1617 	ath10k_snoc_free_irq(ar);
1618 
1619 err_release_resource:
1620 	ath10k_snoc_release_resource(ar);
1621 
1622 err_core_destroy:
1623 	ath10k_core_destroy(ar);
1624 
1625 	return ret;
1626 }
1627 
1628 static int ath10k_snoc_remove(struct platform_device *pdev)
1629 {
1630 	struct ath10k *ar = platform_get_drvdata(pdev);
1631 
1632 	ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc remove\n");
1633 	ath10k_core_unregister(ar);
1634 	ath10k_hw_power_off(ar);
1635 	ath10k_snoc_free_irq(ar);
1636 	ath10k_snoc_release_resource(ar);
1637 	ath10k_qmi_deinit(ar);
1638 	ath10k_core_destroy(ar);
1639 
1640 	return 0;
1641 }
1642 
1643 static struct platform_driver ath10k_snoc_driver = {
1644 		.probe  = ath10k_snoc_probe,
1645 		.remove = ath10k_snoc_remove,
1646 		.driver = {
1647 			.name   = "ath10k_snoc",
1648 			.of_match_table = ath10k_snoc_dt_match,
1649 		},
1650 };
1651 module_platform_driver(ath10k_snoc_driver);
1652 
1653 MODULE_AUTHOR("Qualcomm");
1654 MODULE_LICENSE("Dual BSD/GPL");
1655 MODULE_DESCRIPTION("Driver support for Atheros WCN3990 SNOC devices");
1656