xref: /openbmc/linux/drivers/net/ipa/gsi_trans.c (revision 7d764b68)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
4  * Copyright (C) 2019-2020 Linaro Ltd.
5  */
6 
7 #include <linux/types.h>
8 #include <linux/bits.h>
9 #include <linux/bitfield.h>
10 #include <linux/refcount.h>
11 #include <linux/scatterlist.h>
12 #include <linux/dma-direction.h>
13 
14 #include "gsi.h"
15 #include "gsi_private.h"
16 #include "gsi_trans.h"
17 #include "ipa_gsi.h"
18 #include "ipa_data.h"
19 #include "ipa_cmd.h"
20 
21 /**
22  * DOC: GSI Transactions
23  *
24  * A GSI transaction abstracts the behavior of a GSI channel by representing
25  * everything about a related group of IPA commands in a single structure.
26  * (A "command" in this sense is either a data transfer or an IPA immediate
27  * command.)  Most details of interaction with the GSI hardware are managed
28  * by the GSI transaction core, allowing users to simply describe commands
29  * to be performed.  When a transaction has completed a callback function
30  * (dependent on the type of endpoint associated with the channel) allows
31  * cleanup of resources associated with the transaction.
32  *
33  * To perform a command (or set of them), a user of the GSI transaction
34  * interface allocates a transaction, indicating the number of TREs required
35  * (one per command).  If sufficient TREs are available, they are reserved
36  * for use in the transaction and the allocation succeeds.  This way
37  * exhaustion of the available TREs in a channel ring is detected
38  * as early as possible.  All resources required to complete a transaction
39  * are allocated at transaction allocation time.
40  *
41  * Commands performed as part of a transaction are represented in an array
42  * of Linux scatterlist structures.  This array is allocated with the
43  * transaction, and its entries are initialized using standard scatterlist
44  * functions (such as sg_set_buf() or skb_to_sgvec()).
45  *
46  * Once a transaction's scatterlist structures have been initialized, the
47  * transaction is committed.  The caller is responsible for mapping buffers
48  * for DMA if necessary, and this should be done *before* allocating
49  * the transaction.  Between a successful allocation and commit of a
50  * transaction no errors should occur.
51  *
52  * Committing transfers ownership of the entire transaction to the GSI
53  * transaction core.  The GSI transaction code formats the content of
54  * the scatterlist array into the channel ring buffer and informs the
55  * hardware that new TREs are available to process.
56  *
57  * The last TRE in each transaction is marked to interrupt the AP when the
58  * GSI hardware has completed it.  Because transfers described by TREs are
59  * performed strictly in order, signaling the completion of just the last
60  * TRE in the transaction is sufficient to indicate the full transaction
61  * is complete.
62  *
63  * When a transaction is complete, ipa_gsi_trans_complete() is called by the
64  * GSI code into the IPA layer, allowing it to perform any final cleanup
65  * required before the transaction is freed.
66  */
67 
68 /* Hardware values representing a transfer element type */
69 enum gsi_tre_type {
70 	GSI_RE_XFER	= 0x2,
71 	GSI_RE_IMMD_CMD	= 0x3,
72 };
73 
74 /* An entry in a channel ring */
75 struct gsi_tre {
76 	__le64 addr;		/* DMA address */
77 	__le16 len_opcode;	/* length in bytes or enum IPA_CMD_* */
78 	__le16 reserved;
79 	__le32 flags;		/* TRE_FLAGS_* */
80 };
81 
82 /* gsi_tre->flags mask values (in CPU byte order) */
83 #define TRE_FLAGS_CHAIN_FMASK	GENMASK(0, 0)
84 #define TRE_FLAGS_IEOT_FMASK	GENMASK(9, 9)
85 #define TRE_FLAGS_BEI_FMASK	GENMASK(10, 10)
86 #define TRE_FLAGS_TYPE_FMASK	GENMASK(23, 16)
87 
88 int gsi_trans_pool_init(struct gsi_trans_pool *pool, size_t size, u32 count,
89 			u32 max_alloc)
90 {
91 	void *virt;
92 
93 #ifdef IPA_VALIDATE
94 	if (!size || size % 8)
95 		return -EINVAL;
96 	if (count < max_alloc)
97 		return -EINVAL;
98 	if (!max_alloc)
99 		return -EINVAL;
100 #endif /* IPA_VALIDATE */
101 
102 	/* By allocating a few extra entries in our pool (one less
103 	 * than the maximum number that will be requested in a
104 	 * single allocation), we can always satisfy requests without
105 	 * ever worrying about straddling the end of the pool array.
106 	 * If there aren't enough entries starting at the free index,
107 	 * we just allocate free entries from the beginning of the pool.
108 	 */
109 	virt = kcalloc(count + max_alloc - 1, size, GFP_KERNEL);
110 	if (!virt)
111 		return -ENOMEM;
112 
113 	pool->base = virt;
114 	/* If the allocator gave us any extra memory, use it */
115 	pool->count = ksize(pool->base) / size;
116 	pool->free = 0;
117 	pool->max_alloc = max_alloc;
118 	pool->size = size;
119 	pool->addr = 0;		/* Only used for DMA pools */
120 
121 	return 0;
122 }
123 
124 void gsi_trans_pool_exit(struct gsi_trans_pool *pool)
125 {
126 	kfree(pool->base);
127 	memset(pool, 0, sizeof(*pool));
128 }
129 
130 /* Allocate the requested number of (zeroed) entries from the pool */
131 /* Home-grown DMA pool.  This way we can preallocate and use the tre_count
132  * to guarantee allocations will succeed.  Even though we specify max_alloc
133  * (and it can be more than one), we only allow allocation of a single
134  * element from a DMA pool.
135  */
136 int gsi_trans_pool_init_dma(struct device *dev, struct gsi_trans_pool *pool,
137 			    size_t size, u32 count, u32 max_alloc)
138 {
139 	size_t total_size;
140 	dma_addr_t addr;
141 	void *virt;
142 
143 #ifdef IPA_VALIDATE
144 	if (!size || size % 8)
145 		return -EINVAL;
146 	if (count < max_alloc)
147 		return -EINVAL;
148 	if (!max_alloc)
149 		return -EINVAL;
150 #endif /* IPA_VALIDATE */
151 
152 	/* Don't let allocations cross a power-of-two boundary */
153 	size = __roundup_pow_of_two(size);
154 	total_size = (count + max_alloc - 1) * size;
155 
156 	/* The allocator will give us a power-of-2 number of pages.  But we
157 	 * can't guarantee that, so request it.  That way we won't waste any
158 	 * memory that would be available beyond the required space.
159 	 */
160 	total_size = get_order(total_size) << PAGE_SHIFT;
161 
162 	virt = dma_alloc_coherent(dev, total_size, &addr, GFP_KERNEL);
163 	if (!virt)
164 		return -ENOMEM;
165 
166 	pool->base = virt;
167 	pool->count = total_size / size;
168 	pool->free = 0;
169 	pool->size = size;
170 	pool->max_alloc = max_alloc;
171 	pool->addr = addr;
172 
173 	return 0;
174 }
175 
176 void gsi_trans_pool_exit_dma(struct device *dev, struct gsi_trans_pool *pool)
177 {
178 	dma_free_coherent(dev, pool->size, pool->base, pool->addr);
179 	memset(pool, 0, sizeof(*pool));
180 }
181 
182 /* Return the byte offset of the next free entry in the pool */
183 static u32 gsi_trans_pool_alloc_common(struct gsi_trans_pool *pool, u32 count)
184 {
185 	u32 offset;
186 
187 	/* assert(count > 0); */
188 	/* assert(count <= pool->max_alloc); */
189 
190 	/* Allocate from beginning if wrap would occur */
191 	if (count > pool->count - pool->free)
192 		pool->free = 0;
193 
194 	offset = pool->free * pool->size;
195 	pool->free += count;
196 	memset(pool->base + offset, 0, count * pool->size);
197 
198 	return offset;
199 }
200 
201 /* Allocate a contiguous block of zeroed entries from a pool */
202 void *gsi_trans_pool_alloc(struct gsi_trans_pool *pool, u32 count)
203 {
204 	return pool->base + gsi_trans_pool_alloc_common(pool, count);
205 }
206 
207 /* Allocate a single zeroed entry from a DMA pool */
208 void *gsi_trans_pool_alloc_dma(struct gsi_trans_pool *pool, dma_addr_t *addr)
209 {
210 	u32 offset = gsi_trans_pool_alloc_common(pool, 1);
211 
212 	*addr = pool->addr + offset;
213 
214 	return pool->base + offset;
215 }
216 
217 /* Return the pool element that immediately follows the one given.
218  * This only works done if elements are allocated one at a time.
219  */
220 void *gsi_trans_pool_next(struct gsi_trans_pool *pool, void *element)
221 {
222 	void *end = pool->base + pool->count * pool->size;
223 
224 	/* assert(element >= pool->base); */
225 	/* assert(element < end); */
226 	/* assert(pool->max_alloc == 1); */
227 	element += pool->size;
228 
229 	return element < end ? element : pool->base;
230 }
231 
232 /* Map a given ring entry index to the transaction associated with it */
233 static void gsi_channel_trans_map(struct gsi_channel *channel, u32 index,
234 				  struct gsi_trans *trans)
235 {
236 	/* Note: index *must* be used modulo the ring count here */
237 	channel->trans_info.map[index % channel->tre_ring.count] = trans;
238 }
239 
240 /* Return the transaction mapped to a given ring entry */
241 struct gsi_trans *
242 gsi_channel_trans_mapped(struct gsi_channel *channel, u32 index)
243 {
244 	/* Note: index *must* be used modulo the ring count here */
245 	return channel->trans_info.map[index % channel->tre_ring.count];
246 }
247 
248 /* Return the oldest completed transaction for a channel (or null) */
249 struct gsi_trans *gsi_channel_trans_complete(struct gsi_channel *channel)
250 {
251 	return list_first_entry_or_null(&channel->trans_info.complete,
252 					struct gsi_trans, links);
253 }
254 
255 /* Move a transaction from the allocated list to the pending list */
256 static void gsi_trans_move_pending(struct gsi_trans *trans)
257 {
258 	struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
259 	struct gsi_trans_info *trans_info = &channel->trans_info;
260 
261 	spin_lock_bh(&trans_info->spinlock);
262 
263 	list_move_tail(&trans->links, &trans_info->pending);
264 
265 	spin_unlock_bh(&trans_info->spinlock);
266 }
267 
268 /* Move a transaction and all of its predecessors from the pending list
269  * to the completed list.
270  */
271 void gsi_trans_move_complete(struct gsi_trans *trans)
272 {
273 	struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
274 	struct gsi_trans_info *trans_info = &channel->trans_info;
275 	struct list_head list;
276 
277 	spin_lock_bh(&trans_info->spinlock);
278 
279 	/* Move this transaction and all predecessors to completed list */
280 	list_cut_position(&list, &trans_info->pending, &trans->links);
281 	list_splice_tail(&list, &trans_info->complete);
282 
283 	spin_unlock_bh(&trans_info->spinlock);
284 }
285 
286 /* Move a transaction from the completed list to the polled list */
287 void gsi_trans_move_polled(struct gsi_trans *trans)
288 {
289 	struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
290 	struct gsi_trans_info *trans_info = &channel->trans_info;
291 
292 	spin_lock_bh(&trans_info->spinlock);
293 
294 	list_move_tail(&trans->links, &trans_info->polled);
295 
296 	spin_unlock_bh(&trans_info->spinlock);
297 }
298 
299 /* Reserve some number of TREs on a channel.  Returns true if successful */
300 static bool
301 gsi_trans_tre_reserve(struct gsi_trans_info *trans_info, u32 tre_count)
302 {
303 	int avail = atomic_read(&trans_info->tre_avail);
304 	int new;
305 
306 	do {
307 		new = avail - (int)tre_count;
308 		if (unlikely(new < 0))
309 			return false;
310 	} while (!atomic_try_cmpxchg(&trans_info->tre_avail, &avail, new));
311 
312 	return true;
313 }
314 
315 /* Release previously-reserved TRE entries to a channel */
316 static void
317 gsi_trans_tre_release(struct gsi_trans_info *trans_info, u32 tre_count)
318 {
319 	atomic_add(tre_count, &trans_info->tre_avail);
320 }
321 
322 /* Allocate a GSI transaction on a channel */
323 struct gsi_trans *gsi_channel_trans_alloc(struct gsi *gsi, u32 channel_id,
324 					  u32 tre_count,
325 					  enum dma_data_direction direction)
326 {
327 	struct gsi_channel *channel = &gsi->channel[channel_id];
328 	struct gsi_trans_info *trans_info;
329 	struct gsi_trans *trans;
330 
331 	/* assert(tre_count <= gsi_channel_trans_tre_max(gsi, channel_id)); */
332 
333 	trans_info = &channel->trans_info;
334 
335 	/* We reserve the TREs now, but consume them at commit time.
336 	 * If there aren't enough available, we're done.
337 	 */
338 	if (!gsi_trans_tre_reserve(trans_info, tre_count))
339 		return NULL;
340 
341 	/* Allocate and initialize non-zero fields in the the transaction */
342 	trans = gsi_trans_pool_alloc(&trans_info->pool, 1);
343 	trans->gsi = gsi;
344 	trans->channel_id = channel_id;
345 	trans->tre_count = tre_count;
346 	init_completion(&trans->completion);
347 
348 	/* Allocate the scatterlist and (if requested) info entries. */
349 	trans->sgl = gsi_trans_pool_alloc(&trans_info->sg_pool, tre_count);
350 	sg_init_marker(trans->sgl, tre_count);
351 
352 	trans->direction = direction;
353 
354 	spin_lock_bh(&trans_info->spinlock);
355 
356 	list_add_tail(&trans->links, &trans_info->alloc);
357 
358 	spin_unlock_bh(&trans_info->spinlock);
359 
360 	refcount_set(&trans->refcount, 1);
361 
362 	return trans;
363 }
364 
365 /* Free a previously-allocated transaction (used only in case of error) */
366 void gsi_trans_free(struct gsi_trans *trans)
367 {
368 	struct gsi_trans_info *trans_info;
369 
370 	if (!refcount_dec_and_test(&trans->refcount))
371 		return;
372 
373 	trans_info = &trans->gsi->channel[trans->channel_id].trans_info;
374 
375 	spin_lock_bh(&trans_info->spinlock);
376 
377 	list_del(&trans->links);
378 
379 	spin_unlock_bh(&trans_info->spinlock);
380 
381 	ipa_gsi_trans_release(trans);
382 
383 	/* Releasing the reserved TREs implicitly frees the sgl[] and
384 	 * (if present) info[] arrays, plus the transaction itself.
385 	 */
386 	gsi_trans_tre_release(trans_info, trans->tre_count);
387 }
388 
389 /* Add an immediate command to a transaction */
390 void gsi_trans_cmd_add(struct gsi_trans *trans, void *buf, u32 size,
391 		       dma_addr_t addr, enum dma_data_direction direction,
392 		       enum ipa_cmd_opcode opcode)
393 {
394 	struct ipa_cmd_info *info;
395 	u32 which = trans->used++;
396 	struct scatterlist *sg;
397 
398 	/* assert(which < trans->tre_count); */
399 
400 	/* Set the page information for the buffer.  We also need to fill in
401 	 * the DMA address and length for the buffer (something dma_map_sg()
402 	 * normally does).
403 	 */
404 	sg = &trans->sgl[which];
405 
406 	sg_set_buf(sg, buf, size);
407 	sg_dma_address(sg) = addr;
408 	sg_dma_len(sg) = sg->length;
409 
410 	info = &trans->info[which];
411 	info->opcode = opcode;
412 	info->direction = direction;
413 }
414 
415 /* Add a page transfer to a transaction.  It will fill the only TRE. */
416 int gsi_trans_page_add(struct gsi_trans *trans, struct page *page, u32 size,
417 		       u32 offset)
418 {
419 	struct scatterlist *sg = &trans->sgl[0];
420 	int ret;
421 
422 	/* assert(trans->tre_count == 1); */
423 	/* assert(!trans->used); */
424 
425 	sg_set_page(sg, page, size, offset);
426 	ret = dma_map_sg(trans->gsi->dev, sg, 1, trans->direction);
427 	if (!ret)
428 		return -ENOMEM;
429 
430 	trans->used++;	/* Transaction now owns the (DMA mapped) page */
431 
432 	return 0;
433 }
434 
435 /* Add an SKB transfer to a transaction.  No other TREs will be used. */
436 int gsi_trans_skb_add(struct gsi_trans *trans, struct sk_buff *skb)
437 {
438 	struct scatterlist *sg = &trans->sgl[0];
439 	u32 used;
440 	int ret;
441 
442 	/* assert(trans->tre_count == 1); */
443 	/* assert(!trans->used); */
444 
445 	/* skb->len will not be 0 (checked early) */
446 	ret = skb_to_sgvec(skb, sg, 0, skb->len);
447 	if (ret < 0)
448 		return ret;
449 	used = ret;
450 
451 	ret = dma_map_sg(trans->gsi->dev, sg, used, trans->direction);
452 	if (!ret)
453 		return -ENOMEM;
454 
455 	trans->used += used;	/* Transaction now owns the (DMA mapped) skb */
456 
457 	return 0;
458 }
459 
460 /* Compute the length/opcode value to use for a TRE */
461 static __le16 gsi_tre_len_opcode(enum ipa_cmd_opcode opcode, u32 len)
462 {
463 	return opcode == IPA_CMD_NONE ? cpu_to_le16((u16)len)
464 				      : cpu_to_le16((u16)opcode);
465 }
466 
467 /* Compute the flags value to use for a given TRE */
468 static __le32 gsi_tre_flags(bool last_tre, bool bei, enum ipa_cmd_opcode opcode)
469 {
470 	enum gsi_tre_type tre_type;
471 	u32 tre_flags;
472 
473 	tre_type = opcode == IPA_CMD_NONE ? GSI_RE_XFER : GSI_RE_IMMD_CMD;
474 	tre_flags = u32_encode_bits(tre_type, TRE_FLAGS_TYPE_FMASK);
475 
476 	/* Last TRE contains interrupt flags */
477 	if (last_tre) {
478 		/* All transactions end in a transfer completion interrupt */
479 		tre_flags |= TRE_FLAGS_IEOT_FMASK;
480 		/* Don't interrupt when outbound commands are acknowledged */
481 		if (bei)
482 			tre_flags |= TRE_FLAGS_BEI_FMASK;
483 	} else {	/* All others indicate there's more to come */
484 		tre_flags |= TRE_FLAGS_CHAIN_FMASK;
485 	}
486 
487 	return cpu_to_le32(tre_flags);
488 }
489 
490 static void gsi_trans_tre_fill(struct gsi_tre *dest_tre, dma_addr_t addr,
491 			       u32 len, bool last_tre, bool bei,
492 			       enum ipa_cmd_opcode opcode)
493 {
494 	struct gsi_tre tre;
495 
496 	tre.addr = cpu_to_le64(addr);
497 	tre.len_opcode = gsi_tre_len_opcode(opcode, len);
498 	tre.reserved = 0;
499 	tre.flags = gsi_tre_flags(last_tre, bei, opcode);
500 
501 	/* ARM64 can write 16 bytes as a unit with a single instruction.
502 	 * Doing the assignment this way is an attempt to make that happen.
503 	 */
504 	*dest_tre = tre;
505 }
506 
507 /**
508  * __gsi_trans_commit() - Common GSI transaction commit code
509  * @trans:	Transaction to commit
510  * @ring_db:	Whether to tell the hardware about these queued transfers
511  *
512  * Formats channel ring TRE entries based on the content of the scatterlist.
513  * Maps a transaction pointer to the last ring entry used for the transaction,
514  * so it can be recovered when it completes.  Moves the transaction to the
515  * pending list.  Finally, updates the channel ring pointer and optionally
516  * rings the doorbell.
517  */
518 static void __gsi_trans_commit(struct gsi_trans *trans, bool ring_db)
519 {
520 	struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
521 	struct gsi_ring *ring = &channel->tre_ring;
522 	enum ipa_cmd_opcode opcode = IPA_CMD_NONE;
523 	bool bei = channel->toward_ipa;
524 	struct ipa_cmd_info *info;
525 	struct gsi_tre *dest_tre;
526 	struct scatterlist *sg;
527 	u32 byte_count = 0;
528 	u32 avail;
529 	u32 i;
530 
531 	/* assert(trans->used > 0); */
532 
533 	/* Consume the entries.  If we cross the end of the ring while
534 	 * filling them we'll switch to the beginning to finish.
535 	 * If there is no info array we're doing a simple data
536 	 * transfer request, whose opcode is IPA_CMD_NONE.
537 	 */
538 	info = trans->info ? &trans->info[0] : NULL;
539 	avail = ring->count - ring->index % ring->count;
540 	dest_tre = gsi_ring_virt(ring, ring->index);
541 	for_each_sg(trans->sgl, sg, trans->used, i) {
542 		bool last_tre = i == trans->used - 1;
543 		dma_addr_t addr = sg_dma_address(sg);
544 		u32 len = sg_dma_len(sg);
545 
546 		byte_count += len;
547 		if (!avail--)
548 			dest_tre = gsi_ring_virt(ring, 0);
549 		if (info)
550 			opcode = info++->opcode;
551 
552 		gsi_trans_tre_fill(dest_tre, addr, len, last_tre, bei, opcode);
553 		dest_tre++;
554 	}
555 	ring->index += trans->used;
556 
557 	if (channel->toward_ipa) {
558 		/* We record TX bytes when they are sent */
559 		trans->len = byte_count;
560 		trans->trans_count = channel->trans_count;
561 		trans->byte_count = channel->byte_count;
562 		channel->trans_count++;
563 		channel->byte_count += byte_count;
564 	}
565 
566 	/* Associate the last TRE with the transaction */
567 	gsi_channel_trans_map(channel, ring->index - 1, trans);
568 
569 	gsi_trans_move_pending(trans);
570 
571 	/* Ring doorbell if requested, or if all TREs are allocated */
572 	if (ring_db || !atomic_read(&channel->trans_info.tre_avail)) {
573 		/* Report what we're handing off to hardware for TX channels */
574 		if (channel->toward_ipa)
575 			gsi_channel_tx_queued(channel);
576 		gsi_channel_doorbell(channel);
577 	}
578 }
579 
580 /* Commit a GSI transaction */
581 void gsi_trans_commit(struct gsi_trans *trans, bool ring_db)
582 {
583 	if (trans->used)
584 		__gsi_trans_commit(trans, ring_db);
585 	else
586 		gsi_trans_free(trans);
587 }
588 
589 /* Commit a GSI transaction and wait for it to complete */
590 void gsi_trans_commit_wait(struct gsi_trans *trans)
591 {
592 	if (!trans->used)
593 		goto out_trans_free;
594 
595 	refcount_inc(&trans->refcount);
596 
597 	__gsi_trans_commit(trans, true);
598 
599 	wait_for_completion(&trans->completion);
600 
601 out_trans_free:
602 	gsi_trans_free(trans);
603 }
604 
605 /* Commit a GSI transaction and wait for it to complete, with timeout */
606 int gsi_trans_commit_wait_timeout(struct gsi_trans *trans,
607 				  unsigned long timeout)
608 {
609 	unsigned long timeout_jiffies = msecs_to_jiffies(timeout);
610 	unsigned long remaining = 1;	/* In case of empty transaction */
611 
612 	if (!trans->used)
613 		goto out_trans_free;
614 
615 	refcount_inc(&trans->refcount);
616 
617 	__gsi_trans_commit(trans, true);
618 
619 	remaining = wait_for_completion_timeout(&trans->completion,
620 						timeout_jiffies);
621 out_trans_free:
622 	gsi_trans_free(trans);
623 
624 	return remaining ? 0 : -ETIMEDOUT;
625 }
626 
627 /* Process the completion of a transaction; called while polling */
628 void gsi_trans_complete(struct gsi_trans *trans)
629 {
630 	/* If the entire SGL was mapped when added, unmap it now */
631 	if (trans->direction != DMA_NONE)
632 		dma_unmap_sg(trans->gsi->dev, trans->sgl, trans->used,
633 			     trans->direction);
634 
635 	ipa_gsi_trans_complete(trans);
636 
637 	complete(&trans->completion);
638 
639 	gsi_trans_free(trans);
640 }
641 
642 /* Cancel a channel's pending transactions */
643 void gsi_channel_trans_cancel_pending(struct gsi_channel *channel)
644 {
645 	struct gsi_trans_info *trans_info = &channel->trans_info;
646 	struct gsi_trans *trans;
647 	bool cancelled;
648 
649 	/* channel->gsi->mutex is held by caller */
650 	spin_lock_bh(&trans_info->spinlock);
651 
652 	cancelled = !list_empty(&trans_info->pending);
653 	list_for_each_entry(trans, &trans_info->pending, links)
654 		trans->cancelled = true;
655 
656 	list_splice_tail_init(&trans_info->pending, &trans_info->complete);
657 
658 	spin_unlock_bh(&trans_info->spinlock);
659 
660 	/* Schedule NAPI polling to complete the cancelled transactions */
661 	if (cancelled)
662 		napi_schedule(&channel->napi);
663 }
664 
665 /* Issue a command to read a single byte from a channel */
666 int gsi_trans_read_byte(struct gsi *gsi, u32 channel_id, dma_addr_t addr)
667 {
668 	struct gsi_channel *channel = &gsi->channel[channel_id];
669 	struct gsi_ring *ring = &channel->tre_ring;
670 	struct gsi_trans_info *trans_info;
671 	struct gsi_tre *dest_tre;
672 
673 	trans_info = &channel->trans_info;
674 
675 	/* First reserve the TRE, if possible */
676 	if (!gsi_trans_tre_reserve(trans_info, 1))
677 		return -EBUSY;
678 
679 	/* Now fill the the reserved TRE and tell the hardware */
680 
681 	dest_tre = gsi_ring_virt(ring, ring->index);
682 	gsi_trans_tre_fill(dest_tre, addr, 1, true, false, IPA_CMD_NONE);
683 
684 	ring->index++;
685 	gsi_channel_doorbell(channel);
686 
687 	return 0;
688 }
689 
690 /* Mark a gsi_trans_read_byte() request done */
691 void gsi_trans_read_byte_done(struct gsi *gsi, u32 channel_id)
692 {
693 	struct gsi_channel *channel = &gsi->channel[channel_id];
694 
695 	gsi_trans_tre_release(&channel->trans_info, 1);
696 }
697 
698 /* Initialize a channel's GSI transaction info */
699 int gsi_channel_trans_init(struct gsi *gsi, u32 channel_id)
700 {
701 	struct gsi_channel *channel = &gsi->channel[channel_id];
702 	struct gsi_trans_info *trans_info;
703 	u32 tre_max;
704 	int ret;
705 
706 	/* Ensure the size of a channel element is what's expected */
707 	BUILD_BUG_ON(sizeof(struct gsi_tre) != GSI_RING_ELEMENT_SIZE);
708 
709 	/* The map array is used to determine what transaction is associated
710 	 * with a TRE that the hardware reports has completed.  We need one
711 	 * map entry per TRE.
712 	 */
713 	trans_info = &channel->trans_info;
714 	trans_info->map = kcalloc(channel->tre_count, sizeof(*trans_info->map),
715 				  GFP_KERNEL);
716 	if (!trans_info->map)
717 		return -ENOMEM;
718 
719 	/* We can't use more TREs than there are available in the ring.
720 	 * This limits the number of transactions that can be oustanding.
721 	 * Worst case is one TRE per transaction (but we actually limit
722 	 * it to something a little less than that).  We allocate resources
723 	 * for transactions (including transaction structures) based on
724 	 * this maximum number.
725 	 */
726 	tre_max = gsi_channel_tre_max(channel->gsi, channel_id);
727 
728 	/* Transactions are allocated one at a time. */
729 	ret = gsi_trans_pool_init(&trans_info->pool, sizeof(struct gsi_trans),
730 				  tre_max, 1);
731 	if (ret)
732 		goto err_kfree;
733 
734 	/* A transaction uses a scatterlist array to represent the data
735 	 * transfers implemented by the transaction.  Each scatterlist
736 	 * element is used to fill a single TRE when the transaction is
737 	 * committed.  So we need as many scatterlist elements as the
738 	 * maximum number of TREs that can be outstanding.
739 	 *
740 	 * All TREs in a transaction must fit within the channel's TLV FIFO.
741 	 * A transaction on a channel can allocate as many TREs as that but
742 	 * no more.
743 	 */
744 	ret = gsi_trans_pool_init(&trans_info->sg_pool,
745 				  sizeof(struct scatterlist),
746 				  tre_max, channel->tlv_count);
747 	if (ret)
748 		goto err_trans_pool_exit;
749 
750 	/* Finally, the tre_avail field is what ultimately limits the number
751 	 * of outstanding transactions and their resources.  A transaction
752 	 * allocation succeeds only if the TREs available are sufficient for
753 	 * what the transaction might need.  Transaction resource pools are
754 	 * sized based on the maximum number of outstanding TREs, so there
755 	 * will always be resources available if there are TREs available.
756 	 */
757 	atomic_set(&trans_info->tre_avail, tre_max);
758 
759 	spin_lock_init(&trans_info->spinlock);
760 	INIT_LIST_HEAD(&trans_info->alloc);
761 	INIT_LIST_HEAD(&trans_info->pending);
762 	INIT_LIST_HEAD(&trans_info->complete);
763 	INIT_LIST_HEAD(&trans_info->polled);
764 
765 	return 0;
766 
767 err_trans_pool_exit:
768 	gsi_trans_pool_exit(&trans_info->pool);
769 err_kfree:
770 	kfree(trans_info->map);
771 
772 	dev_err(gsi->dev, "error %d initializing channel %u transactions\n",
773 		ret, channel_id);
774 
775 	return ret;
776 }
777 
778 /* Inverse of gsi_channel_trans_init() */
779 void gsi_channel_trans_exit(struct gsi_channel *channel)
780 {
781 	struct gsi_trans_info *trans_info = &channel->trans_info;
782 
783 	gsi_trans_pool_exit(&trans_info->sg_pool);
784 	gsi_trans_pool_exit(&trans_info->pool);
785 	kfree(trans_info->map);
786 }
787