1 /*
2  * Copyright (c) 2007-2011 Atheros Communications Inc.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 #include "core.h"
18 #include "debug.h"
19 #include "hif-ops.h"
20 
21 #define HTC_PACKET_CONTAINER_ALLOCATION 32
22 #define HTC_CONTROL_BUFFER_SIZE (HTC_MAX_CTRL_MSG_LEN + HTC_HDR_LENGTH)
23 
24 static int ath6kl_htc_pipe_tx(struct htc_target *handle,
25 			      struct htc_packet *packet);
26 static void ath6kl_htc_pipe_cleanup(struct htc_target *handle);
27 
28 /* htc pipe tx path */
29 static inline void restore_tx_packet(struct htc_packet *packet)
30 {
31 	if (packet->info.tx.flags & HTC_FLAGS_TX_FIXUP_NETBUF) {
32 		skb_pull(packet->skb, sizeof(struct htc_frame_hdr));
33 		packet->info.tx.flags &= ~HTC_FLAGS_TX_FIXUP_NETBUF;
34 	}
35 }
36 
37 static void do_send_completion(struct htc_endpoint *ep,
38 			       struct list_head *queue_to_indicate)
39 {
40 	struct htc_packet *packet;
41 
42 	if (list_empty(queue_to_indicate)) {
43 		/* nothing to indicate */
44 		return;
45 	}
46 
47 	if (ep->ep_cb.tx_comp_multi != NULL) {
48 		ath6kl_dbg(ATH6KL_DBG_HTC,
49 			   "%s: calling ep %d, send complete multiple callback (%d pkts)\n",
50 			   __func__, ep->eid,
51 			   get_queue_depth(queue_to_indicate));
52 		/*
53 		 * a multiple send complete handler is being used,
54 		 * pass the queue to the handler
55 		 */
56 		ep->ep_cb.tx_comp_multi(ep->target, queue_to_indicate);
57 		/*
58 		 * all packets are now owned by the callback,
59 		 * reset queue to be safe
60 		 */
61 		INIT_LIST_HEAD(queue_to_indicate);
62 	} else {
63 		/* using legacy EpTxComplete */
64 		do {
65 			packet = list_first_entry(queue_to_indicate,
66 						  struct htc_packet, list);
67 
68 			list_del(&packet->list);
69 			ath6kl_dbg(ATH6KL_DBG_HTC,
70 				   "%s: calling ep %d send complete callback on packet 0x%p\n",
71 				   __func__, ep->eid, packet);
72 			ep->ep_cb.tx_complete(ep->target, packet);
73 		} while (!list_empty(queue_to_indicate));
74 	}
75 }
76 
77 static void send_packet_completion(struct htc_target *target,
78 				   struct htc_packet *packet)
79 {
80 	struct htc_endpoint *ep = &target->endpoint[packet->endpoint];
81 	struct list_head container;
82 
83 	restore_tx_packet(packet);
84 	INIT_LIST_HEAD(&container);
85 	list_add_tail(&packet->list, &container);
86 
87 	/* do completion */
88 	do_send_completion(ep, &container);
89 }
90 
91 static void get_htc_packet_credit_based(struct htc_target *target,
92 					struct htc_endpoint *ep,
93 					struct list_head *queue)
94 {
95 	int credits_required;
96 	int remainder;
97 	u8 send_flags;
98 	struct htc_packet *packet;
99 	unsigned int transfer_len;
100 
101 	/* NOTE : the TX lock is held when this function is called */
102 
103 	/* loop until we can grab as many packets out of the queue as we can */
104 	while (true) {
105 		send_flags = 0;
106 		if (list_empty(&ep->txq))
107 			break;
108 
109 		/* get packet at head, but don't remove it */
110 		packet = list_first_entry(&ep->txq, struct htc_packet, list);
111 
112 		ath6kl_dbg(ATH6KL_DBG_HTC,
113 			   "%s: got head packet:0x%p , queue depth: %d\n",
114 			   __func__, packet, get_queue_depth(&ep->txq));
115 
116 		transfer_len = packet->act_len + HTC_HDR_LENGTH;
117 
118 		if (transfer_len <= target->tgt_cred_sz) {
119 			credits_required = 1;
120 		} else {
121 			/* figure out how many credits this message requires */
122 			credits_required = transfer_len / target->tgt_cred_sz;
123 			remainder = transfer_len % target->tgt_cred_sz;
124 
125 			if (remainder)
126 				credits_required++;
127 		}
128 
129 		ath6kl_dbg(ATH6KL_DBG_HTC, "%s: creds required:%d got:%d\n",
130 			   __func__, credits_required, ep->cred_dist.credits);
131 
132 		if (ep->eid == ENDPOINT_0) {
133 			/*
134 			 * endpoint 0 is special, it always has a credit and
135 			 * does not require credit based flow control
136 			 */
137 			credits_required = 0;
138 
139 		} else {
140 			if (ep->cred_dist.credits < credits_required)
141 				break;
142 
143 			ep->cred_dist.credits -= credits_required;
144 			ep->ep_st.cred_cosumd += credits_required;
145 
146 			/* check if we need credits back from the target */
147 			if (ep->cred_dist.credits <
148 					ep->cred_dist.cred_per_msg) {
149 				/* tell the target we need credits ASAP! */
150 				send_flags |= HTC_FLAGS_NEED_CREDIT_UPDATE;
151 				ep->ep_st.cred_low_indicate += 1;
152 				ath6kl_dbg(ATH6KL_DBG_HTC,
153 					   "%s: host needs credits\n",
154 					   __func__);
155 			}
156 		}
157 
158 		/* now we can fully dequeue */
159 		packet = list_first_entry(&ep->txq, struct htc_packet, list);
160 
161 		list_del(&packet->list);
162 		/* save the number of credits this packet consumed */
163 		packet->info.tx.cred_used = credits_required;
164 		/* save send flags */
165 		packet->info.tx.flags = send_flags;
166 		packet->info.tx.seqno = ep->seqno;
167 		ep->seqno++;
168 		/* queue this packet into the caller's queue */
169 		list_add_tail(&packet->list, queue);
170 	}
171 }
172 
173 static void get_htc_packet(struct htc_target *target,
174 			   struct htc_endpoint *ep,
175 			   struct list_head *queue, int resources)
176 {
177 	struct htc_packet *packet;
178 
179 	/* NOTE : the TX lock is held when this function is called */
180 
181 	/* loop until we can grab as many packets out of the queue as we can */
182 	while (resources) {
183 		if (list_empty(&ep->txq))
184 			break;
185 
186 		packet = list_first_entry(&ep->txq, struct htc_packet, list);
187 		list_del(&packet->list);
188 
189 		ath6kl_dbg(ATH6KL_DBG_HTC,
190 			   "%s: got packet:0x%p , new queue depth: %d\n",
191 			   __func__, packet, get_queue_depth(&ep->txq));
192 		packet->info.tx.seqno = ep->seqno;
193 		packet->info.tx.flags = 0;
194 		packet->info.tx.cred_used = 0;
195 		ep->seqno++;
196 
197 		/* queue this packet into the caller's queue */
198 		list_add_tail(&packet->list, queue);
199 		resources--;
200 	}
201 }
202 
203 static int htc_issue_packets(struct htc_target *target,
204 			     struct htc_endpoint *ep,
205 			     struct list_head *pkt_queue)
206 {
207 	int status = 0;
208 	u16 payload_len;
209 	struct sk_buff *skb;
210 	struct htc_frame_hdr *htc_hdr;
211 	struct htc_packet *packet;
212 
213 	ath6kl_dbg(ATH6KL_DBG_HTC,
214 		   "%s: queue: 0x%p, pkts %d\n", __func__,
215 		   pkt_queue, get_queue_depth(pkt_queue));
216 
217 	while (!list_empty(pkt_queue)) {
218 		packet = list_first_entry(pkt_queue, struct htc_packet, list);
219 		list_del(&packet->list);
220 
221 		skb = packet->skb;
222 		if (!skb) {
223 			WARN_ON_ONCE(1);
224 			status = -EINVAL;
225 			break;
226 		}
227 
228 		payload_len = packet->act_len;
229 
230 		/* setup HTC frame header */
231 		htc_hdr = (struct htc_frame_hdr *) skb_push(skb,
232 							    sizeof(*htc_hdr));
233 		if (!htc_hdr) {
234 			WARN_ON_ONCE(1);
235 			status = -EINVAL;
236 			break;
237 		}
238 
239 		packet->info.tx.flags |= HTC_FLAGS_TX_FIXUP_NETBUF;
240 
241 		/* Endianess? */
242 		put_unaligned((u16) payload_len, &htc_hdr->payld_len);
243 		htc_hdr->flags = packet->info.tx.flags;
244 		htc_hdr->eid = (u8) packet->endpoint;
245 		htc_hdr->ctrl[0] = 0;
246 		htc_hdr->ctrl[1] = (u8) packet->info.tx.seqno;
247 
248 		spin_lock_bh(&target->tx_lock);
249 
250 		/* store in look up queue to match completions */
251 		list_add_tail(&packet->list, &ep->pipe.tx_lookup_queue);
252 		ep->ep_st.tx_issued += 1;
253 		spin_unlock_bh(&target->tx_lock);
254 
255 		status = ath6kl_hif_pipe_send(target->dev->ar,
256 					      ep->pipe.pipeid_ul, NULL, skb);
257 
258 		if (status != 0) {
259 			if (status != -ENOMEM) {
260 				/* TODO: if more than 1 endpoint maps to the
261 				 * same PipeID, it is possible to run out of
262 				 * resources in the HIF layer.
263 				 * Don't emit the error
264 				 */
265 				ath6kl_dbg(ATH6KL_DBG_HTC,
266 					   "%s: failed status:%d\n",
267 					   __func__, status);
268 			}
269 			spin_lock_bh(&target->tx_lock);
270 			list_del(&packet->list);
271 
272 			/* reclaim credits */
273 			ep->cred_dist.credits += packet->info.tx.cred_used;
274 			spin_unlock_bh(&target->tx_lock);
275 
276 			/* put it back into the callers queue */
277 			list_add(&packet->list, pkt_queue);
278 			break;
279 		}
280 	}
281 
282 	if (status != 0) {
283 		while (!list_empty(pkt_queue)) {
284 			if (status != -ENOMEM) {
285 				ath6kl_dbg(ATH6KL_DBG_HTC,
286 					   "%s: failed pkt:0x%p status:%d\n",
287 					   __func__, packet, status);
288 			}
289 
290 			packet = list_first_entry(pkt_queue,
291 						  struct htc_packet, list);
292 			list_del(&packet->list);
293 			packet->status = status;
294 			send_packet_completion(target, packet);
295 		}
296 	}
297 
298 	return status;
299 }
300 
301 static enum htc_send_queue_result htc_try_send(struct htc_target *target,
302 					       struct htc_endpoint *ep,
303 					       struct list_head *txq)
304 {
305 	struct list_head send_queue;	/* temp queue to hold packets */
306 	struct htc_packet *packet, *tmp_pkt;
307 	struct ath6kl *ar = target->dev->ar;
308 	enum htc_send_full_action action;
309 	int tx_resources, overflow, txqueue_depth, i, good_pkts;
310 	u8 pipeid;
311 
312 	ath6kl_dbg(ATH6KL_DBG_HTC, "%s: (queue:0x%p depth:%d)\n",
313 		   __func__, txq,
314 		   (txq == NULL) ? 0 : get_queue_depth(txq));
315 
316 	/* init the local send queue */
317 	INIT_LIST_HEAD(&send_queue);
318 
319 	/*
320 	 * txq equals to NULL means
321 	 * caller didn't provide a queue, just wants us to
322 	 * check queues and send
323 	 */
324 	if (txq != NULL) {
325 		if (list_empty(txq)) {
326 			/* empty queue */
327 			return HTC_SEND_QUEUE_DROP;
328 		}
329 
330 		spin_lock_bh(&target->tx_lock);
331 		txqueue_depth = get_queue_depth(&ep->txq);
332 		spin_unlock_bh(&target->tx_lock);
333 
334 		if (txqueue_depth >= ep->max_txq_depth) {
335 			/* we've already overflowed */
336 			overflow = get_queue_depth(txq);
337 		} else {
338 			/* get how much we will overflow by */
339 			overflow = txqueue_depth;
340 			overflow += get_queue_depth(txq);
341 			/* get how much we will overflow the TX queue by */
342 			overflow -= ep->max_txq_depth;
343 		}
344 
345 		/* if overflow is negative or zero, we are okay */
346 		if (overflow > 0) {
347 			ath6kl_dbg(ATH6KL_DBG_HTC,
348 				   "%s: Endpoint %d, TX queue will overflow :%d, Tx Depth:%d, Max:%d\n",
349 				   __func__, ep->eid, overflow, txqueue_depth,
350 				   ep->max_txq_depth);
351 		}
352 		if ((overflow <= 0) ||
353 		    (ep->ep_cb.tx_full == NULL)) {
354 			/*
355 			 * all packets will fit or caller did not provide send
356 			 * full indication handler -- just move all of them
357 			 * to the local send_queue object
358 			 */
359 			list_splice_tail_init(txq, &send_queue);
360 		} else {
361 			good_pkts = get_queue_depth(txq) - overflow;
362 			if (good_pkts < 0) {
363 				WARN_ON_ONCE(1);
364 				return HTC_SEND_QUEUE_DROP;
365 			}
366 
367 			/* we have overflowed, and a callback is provided */
368 			/* dequeue all non-overflow packets to the sendqueue */
369 			for (i = 0; i < good_pkts; i++) {
370 				/* pop off caller's queue */
371 				packet = list_first_entry(txq,
372 							  struct htc_packet,
373 							  list);
374 				/* move to local queue */
375 				list_move_tail(&packet->list, &send_queue);
376 			}
377 
378 			/*
379 			 * the caller's queue has all the packets that won't fit
380 			 * walk through the caller's queue and indicate each to
381 			 * the send full handler
382 			 */
383 			list_for_each_entry_safe(packet, tmp_pkt,
384 						 txq, list) {
385 				ath6kl_dbg(ATH6KL_DBG_HTC,
386 					   "%s: Indicat overflowed TX pkts: %p\n",
387 					   __func__, packet);
388 				action = ep->ep_cb.tx_full(ep->target, packet);
389 				if (action == HTC_SEND_FULL_DROP) {
390 					/* callback wants the packet dropped */
391 					ep->ep_st.tx_dropped += 1;
392 
393 					/* leave this one in the caller's queue
394 					 * for cleanup */
395 				} else {
396 					/* callback wants to keep this packet,
397 					 * move from caller's queue to the send
398 					 * queue */
399 					list_move_tail(&packet->list,
400 						       &send_queue);
401 				}
402 			}
403 
404 			if (list_empty(&send_queue)) {
405 				/* no packets made it in, caller will cleanup */
406 				return HTC_SEND_QUEUE_DROP;
407 			}
408 		}
409 	}
410 
411 	if (!ep->pipe.tx_credit_flow_enabled) {
412 		tx_resources =
413 		    ath6kl_hif_pipe_get_free_queue_number(ar,
414 							  ep->pipe.pipeid_ul);
415 	} else {
416 		tx_resources = 0;
417 	}
418 
419 	spin_lock_bh(&target->tx_lock);
420 	if (!list_empty(&send_queue)) {
421 		/* transfer packets to tail */
422 		list_splice_tail_init(&send_queue, &ep->txq);
423 		if (!list_empty(&send_queue)) {
424 			WARN_ON_ONCE(1);
425 			spin_unlock_bh(&target->tx_lock);
426 			return HTC_SEND_QUEUE_DROP;
427 		}
428 		INIT_LIST_HEAD(&send_queue);
429 	}
430 
431 	/* increment tx processing count on entry */
432 	ep->tx_proc_cnt++;
433 
434 	if (ep->tx_proc_cnt > 1) {
435 		/*
436 		 * Another thread or task is draining the TX queues on this
437 		 * endpoint that thread will reset the tx processing count
438 		 * when the queue is drained.
439 		 */
440 		ep->tx_proc_cnt--;
441 		spin_unlock_bh(&target->tx_lock);
442 		return HTC_SEND_QUEUE_OK;
443 	}
444 
445 	/***** beyond this point only 1 thread may enter ******/
446 
447 	/*
448 	 * Now drain the endpoint TX queue for transmission as long as we have
449 	 * enough transmit resources.
450 	 */
451 	while (true) {
452 		if (get_queue_depth(&ep->txq) == 0)
453 			break;
454 
455 		if (ep->pipe.tx_credit_flow_enabled) {
456 			/*
457 			 * Credit based mechanism provides flow control
458 			 * based on target transmit resource availability,
459 			 * we assume that the HIF layer will always have
460 			 * bus resources greater than target transmit
461 			 * resources.
462 			 */
463 			get_htc_packet_credit_based(target, ep, &send_queue);
464 		} else {
465 			/*
466 			 * Get all packets for this endpoint that we can
467 			 * for this pass.
468 			 */
469 			get_htc_packet(target, ep, &send_queue, tx_resources);
470 		}
471 
472 		if (get_queue_depth(&send_queue) == 0) {
473 			/*
474 			 * Didn't get packets due to out of resources or TX
475 			 * queue was drained.
476 			 */
477 			break;
478 		}
479 
480 		spin_unlock_bh(&target->tx_lock);
481 
482 		/* send what we can */
483 		htc_issue_packets(target, ep, &send_queue);
484 
485 		if (!ep->pipe.tx_credit_flow_enabled) {
486 			pipeid = ep->pipe.pipeid_ul;
487 			tx_resources =
488 			    ath6kl_hif_pipe_get_free_queue_number(ar, pipeid);
489 		}
490 
491 		spin_lock_bh(&target->tx_lock);
492 	}
493 
494 	/* done with this endpoint, we can clear the count */
495 	ep->tx_proc_cnt = 0;
496 	spin_unlock_bh(&target->tx_lock);
497 
498 	return HTC_SEND_QUEUE_OK;
499 }
500 
501 /* htc control packet manipulation */
502 static void destroy_htc_txctrl_packet(struct htc_packet *packet)
503 {
504 	struct sk_buff *skb;
505 	skb = packet->skb;
506 	dev_kfree_skb(skb);
507 	kfree(packet);
508 }
509 
510 static struct htc_packet *build_htc_txctrl_packet(void)
511 {
512 	struct htc_packet *packet = NULL;
513 	struct sk_buff *skb;
514 
515 	packet = kzalloc(sizeof(struct htc_packet), GFP_KERNEL);
516 	if (packet == NULL)
517 		return NULL;
518 
519 	skb = __dev_alloc_skb(HTC_CONTROL_BUFFER_SIZE, GFP_KERNEL);
520 
521 	if (skb == NULL) {
522 		kfree(packet);
523 		return NULL;
524 	}
525 	packet->skb = skb;
526 
527 	return packet;
528 }
529 
530 static void htc_free_txctrl_packet(struct htc_target *target,
531 				   struct htc_packet *packet)
532 {
533 	destroy_htc_txctrl_packet(packet);
534 }
535 
536 static struct htc_packet *htc_alloc_txctrl_packet(struct htc_target *target)
537 {
538 	return build_htc_txctrl_packet();
539 }
540 
541 static void htc_txctrl_complete(struct htc_target *target,
542 				struct htc_packet *packet)
543 {
544 	htc_free_txctrl_packet(target, packet);
545 }
546 
547 #define MAX_MESSAGE_SIZE 1536
548 
549 static int htc_setup_target_buffer_assignments(struct htc_target *target)
550 {
551 	int status, credits, credit_per_maxmsg, i;
552 	struct htc_pipe_txcredit_alloc *entry;
553 	unsigned int hif_usbaudioclass = 0;
554 
555 	credit_per_maxmsg = MAX_MESSAGE_SIZE / target->tgt_cred_sz;
556 	if (MAX_MESSAGE_SIZE % target->tgt_cred_sz)
557 		credit_per_maxmsg++;
558 
559 	/* TODO, this should be configured by the caller! */
560 
561 	credits = target->tgt_creds;
562 	entry = &target->pipe.txcredit_alloc[0];
563 
564 	status = -ENOMEM;
565 
566 	/* FIXME: hif_usbaudioclass is always zero */
567 	if (hif_usbaudioclass) {
568 		ath6kl_dbg(ATH6KL_DBG_HTC,
569 			   "%s: For USB Audio Class- Total:%d\n",
570 			   __func__, credits);
571 		entry++;
572 		entry++;
573 		/* Setup VO Service To have Max Credits */
574 		entry->service_id = WMI_DATA_VO_SVC;
575 		entry->credit_alloc = (credits - 6);
576 		if (entry->credit_alloc == 0)
577 			entry->credit_alloc++;
578 
579 		credits -= (int) entry->credit_alloc;
580 		if (credits <= 0)
581 			return status;
582 
583 		entry++;
584 		entry->service_id = WMI_CONTROL_SVC;
585 		entry->credit_alloc = credit_per_maxmsg;
586 		credits -= (int) entry->credit_alloc;
587 		if (credits <= 0)
588 			return status;
589 
590 		/* leftovers go to best effort */
591 		entry++;
592 		entry++;
593 		entry->service_id = WMI_DATA_BE_SVC;
594 		entry->credit_alloc = (u8) credits;
595 		status = 0;
596 	} else {
597 		entry++;
598 		entry->service_id = WMI_DATA_VI_SVC;
599 		entry->credit_alloc = credits / 4;
600 		if (entry->credit_alloc == 0)
601 			entry->credit_alloc++;
602 
603 		credits -= (int) entry->credit_alloc;
604 		if (credits <= 0)
605 			return status;
606 
607 		entry++;
608 		entry->service_id = WMI_DATA_VO_SVC;
609 		entry->credit_alloc = credits / 4;
610 		if (entry->credit_alloc == 0)
611 			entry->credit_alloc++;
612 
613 		credits -= (int) entry->credit_alloc;
614 		if (credits <= 0)
615 			return status;
616 
617 		entry++;
618 		entry->service_id = WMI_CONTROL_SVC;
619 		entry->credit_alloc = credit_per_maxmsg;
620 		credits -= (int) entry->credit_alloc;
621 		if (credits <= 0)
622 			return status;
623 
624 		entry++;
625 		entry->service_id = WMI_DATA_BK_SVC;
626 		entry->credit_alloc = credit_per_maxmsg;
627 		credits -= (int) entry->credit_alloc;
628 		if (credits <= 0)
629 			return status;
630 
631 		/* leftovers go to best effort */
632 		entry++;
633 		entry->service_id = WMI_DATA_BE_SVC;
634 		entry->credit_alloc = (u8) credits;
635 		status = 0;
636 	}
637 
638 	if (status == 0) {
639 		for (i = 0; i < ENDPOINT_MAX; i++) {
640 			if (target->pipe.txcredit_alloc[i].service_id != 0) {
641 				ath6kl_dbg(ATH6KL_DBG_HTC,
642 					   "HTC Service Index : %d TX : 0x%2.2X : alloc:%d\n",
643 					   i,
644 					   target->pipe.txcredit_alloc[i].
645 					   service_id,
646 					   target->pipe.txcredit_alloc[i].
647 					   credit_alloc);
648 			}
649 		}
650 	}
651 	return status;
652 }
653 
654 /* process credit reports and call distribution function */
655 static void htc_process_credit_report(struct htc_target *target,
656 				      struct htc_credit_report *rpt,
657 				      int num_entries,
658 				      enum htc_endpoint_id from_ep)
659 {
660 	int total_credits = 0, i;
661 	struct htc_endpoint *ep;
662 
663 	/* lock out TX while we update credits */
664 	spin_lock_bh(&target->tx_lock);
665 
666 	for (i = 0; i < num_entries; i++, rpt++) {
667 		if (rpt->eid >= ENDPOINT_MAX) {
668 			WARN_ON_ONCE(1);
669 			spin_unlock_bh(&target->tx_lock);
670 			return;
671 		}
672 
673 		ep = &target->endpoint[rpt->eid];
674 		ep->cred_dist.credits += rpt->credits;
675 
676 		if (ep->cred_dist.credits && get_queue_depth(&ep->txq)) {
677 			spin_unlock_bh(&target->tx_lock);
678 			htc_try_send(target, ep, NULL);
679 			spin_lock_bh(&target->tx_lock);
680 		}
681 
682 		total_credits += rpt->credits;
683 	}
684 	ath6kl_dbg(ATH6KL_DBG_HTC,
685 		   "Report indicated %d credits to distribute\n",
686 		   total_credits);
687 
688 	spin_unlock_bh(&target->tx_lock);
689 }
690 
691 /* flush endpoint TX queue */
692 static void htc_flush_tx_endpoint(struct htc_target *target,
693 				  struct htc_endpoint *ep, u16 tag)
694 {
695 	struct htc_packet *packet;
696 
697 	spin_lock_bh(&target->tx_lock);
698 	while (get_queue_depth(&ep->txq)) {
699 		packet = list_first_entry(&ep->txq, struct htc_packet, list);
700 		list_del(&packet->list);
701 		packet->status = 0;
702 		send_packet_completion(target, packet);
703 	}
704 	spin_unlock_bh(&target->tx_lock);
705 }
706 
707 /*
708  * In the adapted HIF layer, struct sk_buff * are passed between HIF and HTC,
709  * since upper layers expects struct htc_packet containers we use the completed
710  * skb and lookup it's corresponding HTC packet buffer from a lookup list.
711  * This is extra overhead that can be fixed by re-aligning HIF interfaces with
712  * HTC.
713  */
714 static struct htc_packet *htc_lookup_tx_packet(struct htc_target *target,
715 					       struct htc_endpoint *ep,
716 					       struct sk_buff *skb)
717 {
718 	struct htc_packet *packet, *tmp_pkt, *found_packet = NULL;
719 
720 	spin_lock_bh(&target->tx_lock);
721 
722 	/*
723 	 * interate from the front of tx lookup queue
724 	 * this lookup should be fast since lower layers completes in-order and
725 	 * so the completed packet should be at the head of the list generally
726 	 */
727 	list_for_each_entry_safe(packet, tmp_pkt, &ep->pipe.tx_lookup_queue,
728 				 list) {
729 		/* check for removal */
730 		if (skb == packet->skb) {
731 			/* found it */
732 			list_del(&packet->list);
733 			found_packet = packet;
734 			break;
735 		}
736 	}
737 
738 	spin_unlock_bh(&target->tx_lock);
739 
740 	return found_packet;
741 }
742 
743 static int ath6kl_htc_pipe_tx_complete(struct ath6kl *ar, struct sk_buff *skb)
744 {
745 	struct htc_target *target = ar->htc_target;
746 	struct htc_frame_hdr *htc_hdr;
747 	struct htc_endpoint *ep;
748 	struct htc_packet *packet;
749 	u8 ep_id, *netdata;
750 	u32 netlen;
751 
752 	netdata = skb->data;
753 	netlen = skb->len;
754 
755 	htc_hdr = (struct htc_frame_hdr *) netdata;
756 
757 	ep_id = htc_hdr->eid;
758 	ep = &target->endpoint[ep_id];
759 
760 	packet = htc_lookup_tx_packet(target, ep, skb);
761 	if (packet == NULL) {
762 		/* may have already been flushed and freed */
763 		ath6kl_err("HTC TX lookup failed!\n");
764 	} else {
765 		/* will be giving this buffer back to upper layers */
766 		packet->status = 0;
767 		send_packet_completion(target, packet);
768 	}
769 	skb = NULL;
770 
771 	if (!ep->pipe.tx_credit_flow_enabled) {
772 		/*
773 		 * note: when using TX credit flow, the re-checking of queues
774 		 * happens when credits flow back from the target. in the
775 		 * non-TX credit case, we recheck after the packet completes
776 		 */
777 		htc_try_send(target, ep, NULL);
778 	}
779 
780 	return 0;
781 }
782 
783 static int htc_send_packets_multiple(struct htc_target *target,
784 				     struct list_head *pkt_queue)
785 {
786 	struct htc_endpoint *ep;
787 	struct htc_packet *packet, *tmp_pkt;
788 
789 	if (list_empty(pkt_queue))
790 		return -EINVAL;
791 
792 	/* get first packet to find out which ep the packets will go into */
793 	packet = list_first_entry(pkt_queue, struct htc_packet, list);
794 
795 	if (packet->endpoint >= ENDPOINT_MAX) {
796 		WARN_ON_ONCE(1);
797 		return -EINVAL;
798 	}
799 	ep = &target->endpoint[packet->endpoint];
800 
801 	htc_try_send(target, ep, pkt_queue);
802 
803 	/* do completion on any packets that couldn't get in */
804 	if (!list_empty(pkt_queue)) {
805 		list_for_each_entry_safe(packet, tmp_pkt, pkt_queue, list) {
806 			packet->status = -ENOMEM;
807 		}
808 
809 		do_send_completion(ep, pkt_queue);
810 	}
811 
812 	return 0;
813 }
814 
815 /* htc pipe rx path */
816 static struct htc_packet *alloc_htc_packet_container(struct htc_target *target)
817 {
818 	struct htc_packet *packet;
819 	spin_lock_bh(&target->rx_lock);
820 
821 	if (target->pipe.htc_packet_pool == NULL) {
822 		spin_unlock_bh(&target->rx_lock);
823 		return NULL;
824 	}
825 
826 	packet = target->pipe.htc_packet_pool;
827 	target->pipe.htc_packet_pool = (struct htc_packet *) packet->list.next;
828 
829 	spin_unlock_bh(&target->rx_lock);
830 
831 	packet->list.next = NULL;
832 	return packet;
833 }
834 
835 static void free_htc_packet_container(struct htc_target *target,
836 				      struct htc_packet *packet)
837 {
838 	struct list_head *lh;
839 
840 	spin_lock_bh(&target->rx_lock);
841 
842 	if (target->pipe.htc_packet_pool == NULL) {
843 		target->pipe.htc_packet_pool = packet;
844 		packet->list.next = NULL;
845 	} else {
846 		lh = (struct list_head *) target->pipe.htc_packet_pool;
847 		packet->list.next = lh;
848 		target->pipe.htc_packet_pool = packet;
849 	}
850 
851 	spin_unlock_bh(&target->rx_lock);
852 }
853 
854 static int htc_process_trailer(struct htc_target *target, u8 *buffer,
855 			       int len, enum htc_endpoint_id from_ep)
856 {
857 	struct htc_credit_report *report;
858 	struct htc_record_hdr *record;
859 	u8 *record_buf, *orig_buf;
860 	int orig_len, status;
861 
862 	orig_buf = buffer;
863 	orig_len = len;
864 	status = 0;
865 
866 	while (len > 0) {
867 		if (len < sizeof(struct htc_record_hdr)) {
868 			status = -EINVAL;
869 			break;
870 		}
871 
872 		/* these are byte aligned structs */
873 		record = (struct htc_record_hdr *) buffer;
874 		len -= sizeof(struct htc_record_hdr);
875 		buffer += sizeof(struct htc_record_hdr);
876 
877 		if (record->len > len) {
878 			/* no room left in buffer for record */
879 			ath6kl_dbg(ATH6KL_DBG_HTC,
880 				   "invalid length: %d (id:%d) buffer has: %d bytes left\n",
881 				   record->len, record->rec_id, len);
882 			status = -EINVAL;
883 			break;
884 		}
885 
886 		/* start of record follows the header */
887 		record_buf = buffer;
888 
889 		switch (record->rec_id) {
890 		case HTC_RECORD_CREDITS:
891 			if (record->len < sizeof(struct htc_credit_report)) {
892 				WARN_ON_ONCE(1);
893 				return -EINVAL;
894 			}
895 
896 			report = (struct htc_credit_report *) record_buf;
897 			htc_process_credit_report(target, report,
898 						  record->len / sizeof(*report),
899 						  from_ep);
900 			break;
901 		default:
902 			ath6kl_dbg(ATH6KL_DBG_HTC,
903 				   "unhandled record: id:%d length:%d\n",
904 				   record->rec_id, record->len);
905 			break;
906 		}
907 
908 		if (status != 0)
909 			break;
910 
911 		/* advance buffer past this record for next time around */
912 		buffer += record->len;
913 		len -= record->len;
914 	}
915 
916 	return status;
917 }
918 
919 static void do_recv_completion(struct htc_endpoint *ep,
920 			       struct list_head *queue_to_indicate)
921 {
922 	struct htc_packet *packet;
923 
924 	if (list_empty(queue_to_indicate)) {
925 		/* nothing to indicate */
926 		return;
927 	}
928 
929 	/* using legacy EpRecv */
930 	while (!list_empty(queue_to_indicate)) {
931 		packet = list_first_entry(queue_to_indicate,
932 					  struct htc_packet, list);
933 		list_del(&packet->list);
934 		ep->ep_cb.rx(ep->target, packet);
935 	}
936 
937 	return;
938 }
939 
940 static void recv_packet_completion(struct htc_target *target,
941 				   struct htc_endpoint *ep,
942 				   struct htc_packet *packet)
943 {
944 	struct list_head container;
945 	INIT_LIST_HEAD(&container);
946 	list_add_tail(&packet->list, &container);
947 
948 	/* do completion */
949 	do_recv_completion(ep, &container);
950 }
951 
952 static int ath6kl_htc_pipe_rx_complete(struct ath6kl *ar, struct sk_buff *skb,
953 				       u8 pipeid)
954 {
955 	struct htc_target *target = ar->htc_target;
956 	u8 *netdata, *trailer, hdr_info;
957 	struct htc_frame_hdr *htc_hdr;
958 	u32 netlen, trailerlen = 0;
959 	struct htc_packet *packet;
960 	struct htc_endpoint *ep;
961 	u16 payload_len;
962 	int status = 0;
963 
964 	/*
965 	 * ar->htc_target can be NULL due to a race condition that can occur
966 	 * during driver initialization(we do 'ath6kl_hif_power_on' before
967 	 * initializing 'ar->htc_target' via 'ath6kl_htc_create').
968 	 * 'ath6kl_hif_power_on' assigns 'ath6kl_recv_complete' as
969 	 * usb_complete_t/callback function for 'usb_fill_bulk_urb'.
970 	 * Thus the possibility of ar->htc_target being NULL
971 	 * via ath6kl_recv_complete -> ath6kl_usb_io_comp_work.
972 	 */
973 	if (WARN_ON_ONCE(!target)) {
974 		ath6kl_err("Target not yet initialized\n");
975 		status = -EINVAL;
976 		goto free_skb;
977 	}
978 
979 
980 	netdata = skb->data;
981 	netlen = skb->len;
982 
983 	htc_hdr = (struct htc_frame_hdr *) netdata;
984 
985 	if (htc_hdr->eid >= ENDPOINT_MAX) {
986 		ath6kl_dbg(ATH6KL_DBG_HTC,
987 			   "HTC Rx: invalid EndpointID=%d\n",
988 			   htc_hdr->eid);
989 		status = -EINVAL;
990 		goto free_skb;
991 	}
992 	ep = &target->endpoint[htc_hdr->eid];
993 
994 	payload_len = le16_to_cpu(get_unaligned(&htc_hdr->payld_len));
995 
996 	if (netlen < (payload_len + HTC_HDR_LENGTH)) {
997 		ath6kl_dbg(ATH6KL_DBG_HTC,
998 			   "HTC Rx: insufficient length, got:%d expected =%u\n",
999 			   netlen, payload_len + HTC_HDR_LENGTH);
1000 		status = -EINVAL;
1001 		goto free_skb;
1002 	}
1003 
1004 	/* get flags to check for trailer */
1005 	hdr_info = htc_hdr->flags;
1006 	if (hdr_info & HTC_FLG_RX_TRAILER) {
1007 		/* extract the trailer length */
1008 		hdr_info = htc_hdr->ctrl[0];
1009 		if ((hdr_info < sizeof(struct htc_record_hdr)) ||
1010 		    (hdr_info > payload_len)) {
1011 			ath6kl_dbg(ATH6KL_DBG_HTC,
1012 				   "invalid header: payloadlen should be %d, CB[0]: %d\n",
1013 				   payload_len, hdr_info);
1014 			status = -EINVAL;
1015 			goto free_skb;
1016 		}
1017 
1018 		trailerlen = hdr_info;
1019 		/* process trailer after hdr/apps payload */
1020 		trailer = (u8 *) htc_hdr + HTC_HDR_LENGTH +
1021 			payload_len - hdr_info;
1022 		status = htc_process_trailer(target, trailer, hdr_info,
1023 					     htc_hdr->eid);
1024 		if (status != 0)
1025 			goto free_skb;
1026 	}
1027 
1028 	if (((int) payload_len - (int) trailerlen) <= 0) {
1029 		/* zero length packet with trailer, just drop these */
1030 		goto free_skb;
1031 	}
1032 
1033 	if (htc_hdr->eid == ENDPOINT_0) {
1034 		/* handle HTC control message */
1035 		if (target->htc_flags & HTC_OP_STATE_SETUP_COMPLETE) {
1036 			/*
1037 			 * fatal: target should not send unsolicited
1038 			 * messageson the endpoint 0
1039 			 */
1040 			ath6kl_dbg(ATH6KL_DBG_HTC,
1041 				   "HTC ignores Rx Ctrl after setup complete\n");
1042 			status = -EINVAL;
1043 			goto free_skb;
1044 		}
1045 
1046 		/* remove HTC header */
1047 		skb_pull(skb, HTC_HDR_LENGTH);
1048 
1049 		netdata = skb->data;
1050 		netlen = skb->len;
1051 
1052 		spin_lock_bh(&target->rx_lock);
1053 
1054 		target->pipe.ctrl_response_valid = true;
1055 		target->pipe.ctrl_response_len = min_t(int, netlen,
1056 						       HTC_MAX_CTRL_MSG_LEN);
1057 		memcpy(target->pipe.ctrl_response_buf, netdata,
1058 		       target->pipe.ctrl_response_len);
1059 
1060 		spin_unlock_bh(&target->rx_lock);
1061 
1062 		dev_kfree_skb(skb);
1063 		skb = NULL;
1064 
1065 		goto free_skb;
1066 	}
1067 
1068 	/*
1069 	 * TODO: the message based HIF architecture allocates net bufs
1070 	 * for recv packets since it bridges that HIF to upper layers,
1071 	 * which expects HTC packets, we form the packets here
1072 	 */
1073 	packet = alloc_htc_packet_container(target);
1074 	if (packet == NULL) {
1075 		status = -ENOMEM;
1076 		goto free_skb;
1077 	}
1078 
1079 	packet->status = 0;
1080 	packet->endpoint = htc_hdr->eid;
1081 	packet->pkt_cntxt = skb;
1082 
1083 	/* TODO: for backwards compatibility */
1084 	packet->buf = skb_push(skb, 0) + HTC_HDR_LENGTH;
1085 	packet->act_len = netlen - HTC_HDR_LENGTH - trailerlen;
1086 
1087 	/*
1088 	 * TODO: this is a hack because the driver layer will set the
1089 	 * actual len of the skb again which will just double the len
1090 	 */
1091 	skb_trim(skb, 0);
1092 
1093 	recv_packet_completion(target, ep, packet);
1094 
1095 	/* recover the packet container */
1096 	free_htc_packet_container(target, packet);
1097 	skb = NULL;
1098 
1099 free_skb:
1100 	dev_kfree_skb(skb);
1101 
1102 	return status;
1103 }
1104 
1105 static void htc_flush_rx_queue(struct htc_target *target,
1106 			       struct htc_endpoint *ep)
1107 {
1108 	struct list_head container;
1109 	struct htc_packet *packet;
1110 
1111 	spin_lock_bh(&target->rx_lock);
1112 
1113 	while (1) {
1114 		if (list_empty(&ep->rx_bufq))
1115 			break;
1116 
1117 		packet = list_first_entry(&ep->rx_bufq,
1118 					  struct htc_packet, list);
1119 		list_del(&packet->list);
1120 
1121 		spin_unlock_bh(&target->rx_lock);
1122 		packet->status = -ECANCELED;
1123 		packet->act_len = 0;
1124 
1125 		ath6kl_dbg(ATH6KL_DBG_HTC,
1126 			   "Flushing RX packet:0x%p, length:%d, ep:%d\n",
1127 			   packet, packet->buf_len,
1128 			   packet->endpoint);
1129 
1130 		INIT_LIST_HEAD(&container);
1131 		list_add_tail(&packet->list, &container);
1132 
1133 		/* give the packet back */
1134 		do_recv_completion(ep, &container);
1135 		spin_lock_bh(&target->rx_lock);
1136 	}
1137 
1138 	spin_unlock_bh(&target->rx_lock);
1139 }
1140 
1141 /* polling routine to wait for a control packet to be received */
1142 static int htc_wait_recv_ctrl_message(struct htc_target *target)
1143 {
1144 	int count = HTC_TARGET_RESPONSE_POLL_COUNT;
1145 
1146 	while (count > 0) {
1147 		spin_lock_bh(&target->rx_lock);
1148 
1149 		if (target->pipe.ctrl_response_valid) {
1150 			target->pipe.ctrl_response_valid = false;
1151 			spin_unlock_bh(&target->rx_lock);
1152 			break;
1153 		}
1154 
1155 		spin_unlock_bh(&target->rx_lock);
1156 
1157 		count--;
1158 
1159 		msleep_interruptible(HTC_TARGET_RESPONSE_POLL_WAIT);
1160 	}
1161 
1162 	if (count <= 0) {
1163 		ath6kl_warn("htc pipe control receive timeout!\n");
1164 		return -ETIMEDOUT;
1165 	}
1166 
1167 	return 0;
1168 }
1169 
1170 static void htc_rxctrl_complete(struct htc_target *context,
1171 				struct htc_packet *packet)
1172 {
1173 	struct sk_buff *skb = packet->skb;
1174 
1175 	if (packet->endpoint == ENDPOINT_0 &&
1176 	    packet->status == -ECANCELED &&
1177 	    skb != NULL)
1178 		dev_kfree_skb(skb);
1179 }
1180 
1181 /* htc pipe initialization */
1182 static void reset_endpoint_states(struct htc_target *target)
1183 {
1184 	struct htc_endpoint *ep;
1185 	int i;
1186 
1187 	for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
1188 		ep = &target->endpoint[i];
1189 		ep->svc_id = 0;
1190 		ep->len_max = 0;
1191 		ep->max_txq_depth = 0;
1192 		ep->eid = i;
1193 		INIT_LIST_HEAD(&ep->txq);
1194 		INIT_LIST_HEAD(&ep->pipe.tx_lookup_queue);
1195 		INIT_LIST_HEAD(&ep->rx_bufq);
1196 		ep->target = target;
1197 		ep->pipe.tx_credit_flow_enabled = true;
1198 	}
1199 }
1200 
1201 /* start HTC, this is called after all services are connected */
1202 static int htc_config_target_hif_pipe(struct htc_target *target)
1203 {
1204 	return 0;
1205 }
1206 
1207 /* htc service functions */
1208 static u8 htc_get_credit_alloc(struct htc_target *target, u16 service_id)
1209 {
1210 	u8 allocation = 0;
1211 	int i;
1212 
1213 	for (i = 0; i < ENDPOINT_MAX; i++) {
1214 		if (target->pipe.txcredit_alloc[i].service_id == service_id)
1215 			allocation =
1216 				target->pipe.txcredit_alloc[i].credit_alloc;
1217 	}
1218 
1219 	if (allocation == 0) {
1220 		ath6kl_dbg(ATH6KL_DBG_HTC,
1221 			   "HTC Service TX : 0x%2.2X : allocation is zero!\n",
1222 			   service_id);
1223 	}
1224 
1225 	return allocation;
1226 }
1227 
1228 static int ath6kl_htc_pipe_conn_service(struct htc_target *target,
1229 		     struct htc_service_connect_req *conn_req,
1230 		     struct htc_service_connect_resp *conn_resp)
1231 {
1232 	struct ath6kl *ar = target->dev->ar;
1233 	struct htc_packet *packet = NULL;
1234 	struct htc_conn_service_resp *resp_msg;
1235 	struct htc_conn_service_msg *conn_msg;
1236 	enum htc_endpoint_id assigned_epid = ENDPOINT_MAX;
1237 	bool disable_credit_flowctrl = false;
1238 	unsigned int max_msg_size = 0;
1239 	struct htc_endpoint *ep;
1240 	int length, status = 0;
1241 	struct sk_buff *skb;
1242 	u8 tx_alloc;
1243 	u16 flags;
1244 
1245 	if (conn_req->svc_id == 0) {
1246 		WARN_ON_ONCE(1);
1247 		status = -EINVAL;
1248 		goto free_packet;
1249 	}
1250 
1251 	if (conn_req->svc_id == HTC_CTRL_RSVD_SVC) {
1252 		/* special case for pseudo control service */
1253 		assigned_epid = ENDPOINT_0;
1254 		max_msg_size = HTC_MAX_CTRL_MSG_LEN;
1255 		tx_alloc = 0;
1256 
1257 	} else {
1258 		tx_alloc = htc_get_credit_alloc(target, conn_req->svc_id);
1259 		if (tx_alloc == 0) {
1260 			status = -ENOMEM;
1261 			goto free_packet;
1262 		}
1263 
1264 		/* allocate a packet to send to the target */
1265 		packet = htc_alloc_txctrl_packet(target);
1266 
1267 		if (packet == NULL) {
1268 			WARN_ON_ONCE(1);
1269 			status = -ENOMEM;
1270 			goto free_packet;
1271 		}
1272 
1273 		skb = packet->skb;
1274 		length = sizeof(struct htc_conn_service_msg);
1275 
1276 		/* assemble connect service message */
1277 		conn_msg = (struct htc_conn_service_msg *) skb_put(skb,
1278 								   length);
1279 		if (conn_msg == NULL) {
1280 			WARN_ON_ONCE(1);
1281 			status = -EINVAL;
1282 			goto free_packet;
1283 		}
1284 
1285 		memset(conn_msg, 0,
1286 		       sizeof(struct htc_conn_service_msg));
1287 		conn_msg->msg_id = cpu_to_le16(HTC_MSG_CONN_SVC_ID);
1288 		conn_msg->svc_id = cpu_to_le16(conn_req->svc_id);
1289 		conn_msg->conn_flags = cpu_to_le16(conn_req->conn_flags &
1290 					~HTC_CONN_FLGS_SET_RECV_ALLOC_MASK);
1291 
1292 		/* tell target desired recv alloc for this ep */
1293 		flags = tx_alloc << HTC_CONN_FLGS_SET_RECV_ALLOC_SHIFT;
1294 		conn_msg->conn_flags |= cpu_to_le16(flags);
1295 
1296 		if (conn_req->conn_flags &
1297 		    HTC_CONN_FLGS_DISABLE_CRED_FLOW_CTRL) {
1298 			disable_credit_flowctrl = true;
1299 		}
1300 
1301 		set_htc_pkt_info(packet, NULL, (u8 *) conn_msg,
1302 				 length,
1303 				 ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
1304 
1305 		status = ath6kl_htc_pipe_tx(target, packet);
1306 
1307 		/* we don't own it anymore */
1308 		packet = NULL;
1309 		if (status != 0)
1310 			goto free_packet;
1311 
1312 		/* wait for response */
1313 		status = htc_wait_recv_ctrl_message(target);
1314 		if (status != 0)
1315 			goto free_packet;
1316 
1317 		/* we controlled the buffer creation so it has to be
1318 		 * properly aligned
1319 		 */
1320 		resp_msg = (struct htc_conn_service_resp *)
1321 		    target->pipe.ctrl_response_buf;
1322 
1323 		if (resp_msg->msg_id != cpu_to_le16(HTC_MSG_CONN_SVC_RESP_ID) ||
1324 		    (target->pipe.ctrl_response_len < sizeof(*resp_msg))) {
1325 			/* this message is not valid */
1326 			WARN_ON_ONCE(1);
1327 			status = -EINVAL;
1328 			goto free_packet;
1329 		}
1330 
1331 		ath6kl_dbg(ATH6KL_DBG_TRC,
1332 			   "%s: service 0x%X conn resp: status: %d ep: %d\n",
1333 			   __func__, resp_msg->svc_id, resp_msg->status,
1334 			   resp_msg->eid);
1335 
1336 		conn_resp->resp_code = resp_msg->status;
1337 		/* check response status */
1338 		if (resp_msg->status != HTC_SERVICE_SUCCESS) {
1339 			ath6kl_dbg(ATH6KL_DBG_HTC,
1340 				   "Target failed service 0x%X connect request (status:%d)\n",
1341 				   resp_msg->svc_id, resp_msg->status);
1342 			status = -EINVAL;
1343 			goto free_packet;
1344 		}
1345 
1346 		assigned_epid = (enum htc_endpoint_id) resp_msg->eid;
1347 		max_msg_size = le16_to_cpu(resp_msg->max_msg_sz);
1348 	}
1349 
1350 	/* the rest are parameter checks so set the error status */
1351 	status = -EINVAL;
1352 
1353 	if (assigned_epid >= ENDPOINT_MAX) {
1354 		WARN_ON_ONCE(1);
1355 		goto free_packet;
1356 	}
1357 
1358 	if (max_msg_size == 0) {
1359 		WARN_ON_ONCE(1);
1360 		goto free_packet;
1361 	}
1362 
1363 	ep = &target->endpoint[assigned_epid];
1364 	ep->eid = assigned_epid;
1365 	if (ep->svc_id != 0) {
1366 		/* endpoint already in use! */
1367 		WARN_ON_ONCE(1);
1368 		goto free_packet;
1369 	}
1370 
1371 	/* return assigned endpoint to caller */
1372 	conn_resp->endpoint = assigned_epid;
1373 	conn_resp->len_max = max_msg_size;
1374 
1375 	/* setup the endpoint */
1376 	ep->svc_id = conn_req->svc_id; /* this marks ep in use */
1377 	ep->max_txq_depth = conn_req->max_txq_depth;
1378 	ep->len_max = max_msg_size;
1379 	ep->cred_dist.credits = tx_alloc;
1380 	ep->cred_dist.cred_sz = target->tgt_cred_sz;
1381 	ep->cred_dist.cred_per_msg = max_msg_size / target->tgt_cred_sz;
1382 	if (max_msg_size % target->tgt_cred_sz)
1383 		ep->cred_dist.cred_per_msg++;
1384 
1385 	/* copy all the callbacks */
1386 	ep->ep_cb = conn_req->ep_cb;
1387 
1388 	/* initialize tx_drop_packet_threshold */
1389 	ep->tx_drop_packet_threshold = MAX_HI_COOKIE_NUM;
1390 
1391 	status = ath6kl_hif_pipe_map_service(ar, ep->svc_id,
1392 					     &ep->pipe.pipeid_ul,
1393 					     &ep->pipe.pipeid_dl);
1394 	if (status != 0)
1395 		goto free_packet;
1396 
1397 	ath6kl_dbg(ATH6KL_DBG_HTC,
1398 		   "SVC Ready: 0x%4.4X: ULpipe:%d DLpipe:%d id:%d\n",
1399 		   ep->svc_id, ep->pipe.pipeid_ul,
1400 		   ep->pipe.pipeid_dl, ep->eid);
1401 
1402 	if (disable_credit_flowctrl && ep->pipe.tx_credit_flow_enabled) {
1403 		ep->pipe.tx_credit_flow_enabled = false;
1404 		ath6kl_dbg(ATH6KL_DBG_HTC,
1405 			   "SVC: 0x%4.4X ep:%d TX flow control off\n",
1406 			   ep->svc_id, assigned_epid);
1407 	}
1408 
1409 free_packet:
1410 	if (packet != NULL)
1411 		htc_free_txctrl_packet(target, packet);
1412 	return status;
1413 }
1414 
1415 /* htc export functions */
1416 static void *ath6kl_htc_pipe_create(struct ath6kl *ar)
1417 {
1418 	int status = 0;
1419 	struct htc_endpoint *ep = NULL;
1420 	struct htc_target *target = NULL;
1421 	struct htc_packet *packet;
1422 	int i;
1423 
1424 	target = kzalloc(sizeof(struct htc_target), GFP_KERNEL);
1425 	if (target == NULL) {
1426 		ath6kl_err("htc create unable to allocate memory\n");
1427 		status = -ENOMEM;
1428 		goto fail_htc_create;
1429 	}
1430 
1431 	spin_lock_init(&target->htc_lock);
1432 	spin_lock_init(&target->rx_lock);
1433 	spin_lock_init(&target->tx_lock);
1434 
1435 	reset_endpoint_states(target);
1436 
1437 	for (i = 0; i < HTC_PACKET_CONTAINER_ALLOCATION; i++) {
1438 		packet = kzalloc(sizeof(struct htc_packet), GFP_KERNEL);
1439 
1440 		if (packet != NULL)
1441 			free_htc_packet_container(target, packet);
1442 	}
1443 
1444 	target->dev = kzalloc(sizeof(*target->dev), GFP_KERNEL);
1445 	if (!target->dev) {
1446 		ath6kl_err("unable to allocate memory\n");
1447 		status = -ENOMEM;
1448 		goto fail_htc_create;
1449 	}
1450 	target->dev->ar = ar;
1451 	target->dev->htc_cnxt = target;
1452 
1453 	/* Get HIF default pipe for HTC message exchange */
1454 	ep = &target->endpoint[ENDPOINT_0];
1455 
1456 	ath6kl_hif_pipe_get_default(ar, &ep->pipe.pipeid_ul,
1457 				    &ep->pipe.pipeid_dl);
1458 
1459 	return target;
1460 
1461 fail_htc_create:
1462 	if (status != 0) {
1463 		if (target != NULL)
1464 			ath6kl_htc_pipe_cleanup(target);
1465 
1466 		target = NULL;
1467 	}
1468 	return target;
1469 }
1470 
1471 /* cleanup the HTC instance */
1472 static void ath6kl_htc_pipe_cleanup(struct htc_target *target)
1473 {
1474 	struct htc_packet *packet;
1475 
1476 	while (true) {
1477 		packet = alloc_htc_packet_container(target);
1478 		if (packet == NULL)
1479 			break;
1480 		kfree(packet);
1481 	}
1482 
1483 	kfree(target->dev);
1484 
1485 	/* kfree our instance */
1486 	kfree(target);
1487 }
1488 
1489 static int ath6kl_htc_pipe_start(struct htc_target *target)
1490 {
1491 	struct sk_buff *skb;
1492 	struct htc_setup_comp_ext_msg *setup;
1493 	struct htc_packet *packet;
1494 
1495 	htc_config_target_hif_pipe(target);
1496 
1497 	/* allocate a buffer to send */
1498 	packet = htc_alloc_txctrl_packet(target);
1499 	if (packet == NULL) {
1500 		WARN_ON_ONCE(1);
1501 		return -ENOMEM;
1502 	}
1503 
1504 	skb = packet->skb;
1505 
1506 	/* assemble setup complete message */
1507 	setup = (struct htc_setup_comp_ext_msg *) skb_put(skb,
1508 							  sizeof(*setup));
1509 	memset(setup, 0, sizeof(struct htc_setup_comp_ext_msg));
1510 	setup->msg_id = cpu_to_le16(HTC_MSG_SETUP_COMPLETE_EX_ID);
1511 
1512 	ath6kl_dbg(ATH6KL_DBG_HTC, "HTC using TX credit flow control\n");
1513 
1514 	set_htc_pkt_info(packet, NULL, (u8 *) setup,
1515 			 sizeof(struct htc_setup_comp_ext_msg),
1516 			 ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
1517 
1518 	target->htc_flags |= HTC_OP_STATE_SETUP_COMPLETE;
1519 
1520 	return ath6kl_htc_pipe_tx(target, packet);
1521 }
1522 
1523 static void ath6kl_htc_pipe_stop(struct htc_target *target)
1524 {
1525 	int i;
1526 	struct htc_endpoint *ep;
1527 
1528 	/* cleanup endpoints */
1529 	for (i = 0; i < ENDPOINT_MAX; i++) {
1530 		ep = &target->endpoint[i];
1531 		htc_flush_rx_queue(target, ep);
1532 		htc_flush_tx_endpoint(target, ep, HTC_TX_PACKET_TAG_ALL);
1533 	}
1534 
1535 	reset_endpoint_states(target);
1536 	target->htc_flags &= ~HTC_OP_STATE_SETUP_COMPLETE;
1537 }
1538 
1539 static int ath6kl_htc_pipe_get_rxbuf_num(struct htc_target *target,
1540 					 enum htc_endpoint_id endpoint)
1541 {
1542 	int num;
1543 
1544 	spin_lock_bh(&target->rx_lock);
1545 	num = get_queue_depth(&(target->endpoint[endpoint].rx_bufq));
1546 	spin_unlock_bh(&target->rx_lock);
1547 
1548 	return num;
1549 }
1550 
1551 static int ath6kl_htc_pipe_tx(struct htc_target *target,
1552 			      struct htc_packet *packet)
1553 {
1554 	struct list_head queue;
1555 
1556 	ath6kl_dbg(ATH6KL_DBG_HTC,
1557 		   "%s: endPointId: %d, buffer: 0x%p, length: %d\n",
1558 		   __func__, packet->endpoint, packet->buf,
1559 		   packet->act_len);
1560 
1561 	INIT_LIST_HEAD(&queue);
1562 	list_add_tail(&packet->list, &queue);
1563 
1564 	return htc_send_packets_multiple(target, &queue);
1565 }
1566 
1567 static int ath6kl_htc_pipe_wait_target(struct htc_target *target)
1568 {
1569 	struct htc_ready_ext_msg *ready_msg;
1570 	struct htc_service_connect_req connect;
1571 	struct htc_service_connect_resp resp;
1572 	int status = 0;
1573 
1574 	status = htc_wait_recv_ctrl_message(target);
1575 
1576 	if (status != 0)
1577 		return status;
1578 
1579 	if (target->pipe.ctrl_response_len < sizeof(*ready_msg)) {
1580 		ath6kl_warn("invalid htc pipe ready msg len: %d\n",
1581 			    target->pipe.ctrl_response_len);
1582 		return -ECOMM;
1583 	}
1584 
1585 	ready_msg = (struct htc_ready_ext_msg *) target->pipe.ctrl_response_buf;
1586 
1587 	if (ready_msg->ver2_0_info.msg_id != cpu_to_le16(HTC_MSG_READY_ID)) {
1588 		ath6kl_warn("invalid htc pipe ready msg: 0x%x\n",
1589 			    ready_msg->ver2_0_info.msg_id);
1590 		return -ECOMM;
1591 	}
1592 
1593 	ath6kl_dbg(ATH6KL_DBG_HTC,
1594 		   "Target Ready! : transmit resources : %d size:%d\n",
1595 		   ready_msg->ver2_0_info.cred_cnt,
1596 		   ready_msg->ver2_0_info.cred_sz);
1597 
1598 	target->tgt_creds = le16_to_cpu(ready_msg->ver2_0_info.cred_cnt);
1599 	target->tgt_cred_sz = le16_to_cpu(ready_msg->ver2_0_info.cred_sz);
1600 
1601 	if ((target->tgt_creds == 0) || (target->tgt_cred_sz == 0))
1602 		return -ECOMM;
1603 
1604 	htc_setup_target_buffer_assignments(target);
1605 
1606 	/* setup our pseudo HTC control endpoint connection */
1607 	memset(&connect, 0, sizeof(connect));
1608 	memset(&resp, 0, sizeof(resp));
1609 	connect.ep_cb.tx_complete = htc_txctrl_complete;
1610 	connect.ep_cb.rx = htc_rxctrl_complete;
1611 	connect.max_txq_depth = NUM_CONTROL_TX_BUFFERS;
1612 	connect.svc_id = HTC_CTRL_RSVD_SVC;
1613 
1614 	/* connect fake service */
1615 	status = ath6kl_htc_pipe_conn_service(target, &connect, &resp);
1616 
1617 	return status;
1618 }
1619 
1620 static void ath6kl_htc_pipe_flush_txep(struct htc_target *target,
1621 				       enum htc_endpoint_id endpoint, u16 tag)
1622 {
1623 	struct htc_endpoint *ep = &target->endpoint[endpoint];
1624 
1625 	if (ep->svc_id == 0) {
1626 		WARN_ON_ONCE(1);
1627 		/* not in use.. */
1628 		return;
1629 	}
1630 
1631 	htc_flush_tx_endpoint(target, ep, tag);
1632 }
1633 
1634 static int ath6kl_htc_pipe_add_rxbuf_multiple(struct htc_target *target,
1635 					      struct list_head *pkt_queue)
1636 {
1637 	struct htc_packet *packet, *tmp_pkt, *first;
1638 	struct htc_endpoint *ep;
1639 	int status = 0;
1640 
1641 	if (list_empty(pkt_queue))
1642 		return -EINVAL;
1643 
1644 	first = list_first_entry(pkt_queue, struct htc_packet, list);
1645 
1646 	if (first->endpoint >= ENDPOINT_MAX) {
1647 		WARN_ON_ONCE(1);
1648 		return -EINVAL;
1649 	}
1650 
1651 	ath6kl_dbg(ATH6KL_DBG_HTC, "%s: epid: %d, cnt:%d, len: %d\n",
1652 		   __func__, first->endpoint, get_queue_depth(pkt_queue),
1653 		   first->buf_len);
1654 
1655 	ep = &target->endpoint[first->endpoint];
1656 
1657 	spin_lock_bh(&target->rx_lock);
1658 
1659 	/* store receive packets */
1660 	list_splice_tail_init(pkt_queue, &ep->rx_bufq);
1661 
1662 	spin_unlock_bh(&target->rx_lock);
1663 
1664 	if (status != 0) {
1665 		/* walk through queue and mark each one canceled */
1666 		list_for_each_entry_safe(packet, tmp_pkt, pkt_queue, list) {
1667 			packet->status = -ECANCELED;
1668 		}
1669 
1670 		do_recv_completion(ep, pkt_queue);
1671 	}
1672 
1673 	return status;
1674 }
1675 
1676 static void ath6kl_htc_pipe_activity_changed(struct htc_target *target,
1677 					     enum htc_endpoint_id ep,
1678 					     bool active)
1679 {
1680 	/* TODO */
1681 }
1682 
1683 static void ath6kl_htc_pipe_flush_rx_buf(struct htc_target *target)
1684 {
1685 	struct htc_endpoint *endpoint;
1686 	struct htc_packet *packet, *tmp_pkt;
1687 	int i;
1688 
1689 	for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
1690 		endpoint = &target->endpoint[i];
1691 
1692 		spin_lock_bh(&target->rx_lock);
1693 
1694 		list_for_each_entry_safe(packet, tmp_pkt,
1695 					 &endpoint->rx_bufq, list) {
1696 			list_del(&packet->list);
1697 			spin_unlock_bh(&target->rx_lock);
1698 			ath6kl_dbg(ATH6KL_DBG_HTC,
1699 				   "htc rx flush pkt 0x%p len %d ep %d\n",
1700 				   packet, packet->buf_len,
1701 				   packet->endpoint);
1702 			dev_kfree_skb(packet->pkt_cntxt);
1703 			spin_lock_bh(&target->rx_lock);
1704 		}
1705 
1706 		spin_unlock_bh(&target->rx_lock);
1707 	}
1708 }
1709 
1710 static int ath6kl_htc_pipe_credit_setup(struct htc_target *target,
1711 					struct ath6kl_htc_credit_info *info)
1712 {
1713 	return 0;
1714 }
1715 
1716 static const struct ath6kl_htc_ops ath6kl_htc_pipe_ops = {
1717 	.create = ath6kl_htc_pipe_create,
1718 	.wait_target = ath6kl_htc_pipe_wait_target,
1719 	.start = ath6kl_htc_pipe_start,
1720 	.conn_service = ath6kl_htc_pipe_conn_service,
1721 	.tx = ath6kl_htc_pipe_tx,
1722 	.stop = ath6kl_htc_pipe_stop,
1723 	.cleanup = ath6kl_htc_pipe_cleanup,
1724 	.flush_txep = ath6kl_htc_pipe_flush_txep,
1725 	.flush_rx_buf = ath6kl_htc_pipe_flush_rx_buf,
1726 	.activity_changed = ath6kl_htc_pipe_activity_changed,
1727 	.get_rxbuf_num = ath6kl_htc_pipe_get_rxbuf_num,
1728 	.add_rxbuf_multiple = ath6kl_htc_pipe_add_rxbuf_multiple,
1729 	.credit_setup = ath6kl_htc_pipe_credit_setup,
1730 	.tx_complete = ath6kl_htc_pipe_tx_complete,
1731 	.rx_complete = ath6kl_htc_pipe_rx_complete,
1732 };
1733 
1734 void ath6kl_htc_pipe_attach(struct ath6kl *ar)
1735 {
1736 	ar->htc_ops = &ath6kl_htc_pipe_ops;
1737 }
1738