xref: /openbmc/linux/drivers/firmware/tegra/ivc.c (revision d4fd6347)
1 /*
2  * Copyright (c) 2014-2016, NVIDIA CORPORATION.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  */
13 
14 #include <soc/tegra/ivc.h>
15 
16 #define TEGRA_IVC_ALIGN 64
17 
18 /*
19  * IVC channel reset protocol.
20  *
21  * Each end uses its tx_channel.state to indicate its synchronization state.
22  */
23 enum tegra_ivc_state {
24 	/*
25 	 * This value is zero for backwards compatibility with services that
26 	 * assume channels to be initially zeroed. Such channels are in an
27 	 * initially valid state, but cannot be asynchronously reset, and must
28 	 * maintain a valid state at all times.
29 	 *
30 	 * The transmitting end can enter the established state from the sync or
31 	 * ack state when it observes the receiving endpoint in the ack or
32 	 * established state, indicating that has cleared the counters in our
33 	 * rx_channel.
34 	 */
35 	TEGRA_IVC_STATE_ESTABLISHED = 0,
36 
37 	/*
38 	 * If an endpoint is observed in the sync state, the remote endpoint is
39 	 * allowed to clear the counters it owns asynchronously with respect to
40 	 * the current endpoint. Therefore, the current endpoint is no longer
41 	 * allowed to communicate.
42 	 */
43 	TEGRA_IVC_STATE_SYNC,
44 
45 	/*
46 	 * When the transmitting end observes the receiving end in the sync
47 	 * state, it can clear the w_count and r_count and transition to the ack
48 	 * state. If the remote endpoint observes us in the ack state, it can
49 	 * return to the established state once it has cleared its counters.
50 	 */
51 	TEGRA_IVC_STATE_ACK
52 };
53 
54 /*
55  * This structure is divided into two-cache aligned parts, the first is only
56  * written through the tx.channel pointer, while the second is only written
57  * through the rx.channel pointer. This delineates ownership of the cache
58  * lines, which is critical to performance and necessary in non-cache coherent
59  * implementations.
60  */
61 struct tegra_ivc_header {
62 	union {
63 		struct {
64 			/* fields owned by the transmitting end */
65 			u32 count;
66 			u32 state;
67 		};
68 
69 		u8 pad[TEGRA_IVC_ALIGN];
70 	} tx;
71 
72 	union {
73 		/* fields owned by the receiving end */
74 		u32 count;
75 		u8 pad[TEGRA_IVC_ALIGN];
76 	} rx;
77 };
78 
79 static inline void tegra_ivc_invalidate(struct tegra_ivc *ivc, dma_addr_t phys)
80 {
81 	if (!ivc->peer)
82 		return;
83 
84 	dma_sync_single_for_cpu(ivc->peer, phys, TEGRA_IVC_ALIGN,
85 				DMA_FROM_DEVICE);
86 }
87 
88 static inline void tegra_ivc_flush(struct tegra_ivc *ivc, dma_addr_t phys)
89 {
90 	if (!ivc->peer)
91 		return;
92 
93 	dma_sync_single_for_device(ivc->peer, phys, TEGRA_IVC_ALIGN,
94 				   DMA_TO_DEVICE);
95 }
96 
97 static inline bool tegra_ivc_empty(struct tegra_ivc *ivc,
98 				   struct tegra_ivc_header *header)
99 {
100 	/*
101 	 * This function performs multiple checks on the same values with
102 	 * security implications, so create snapshots with READ_ONCE() to
103 	 * ensure that these checks use the same values.
104 	 */
105 	u32 tx = READ_ONCE(header->tx.count);
106 	u32 rx = READ_ONCE(header->rx.count);
107 
108 	/*
109 	 * Perform an over-full check to prevent denial of service attacks
110 	 * where a server could be easily fooled into believing that there's
111 	 * an extremely large number of frames ready, since receivers are not
112 	 * expected to check for full or over-full conditions.
113 	 *
114 	 * Although the channel isn't empty, this is an invalid case caused by
115 	 * a potentially malicious peer, so returning empty is safer, because
116 	 * it gives the impression that the channel has gone silent.
117 	 */
118 	if (tx - rx > ivc->num_frames)
119 		return true;
120 
121 	return tx == rx;
122 }
123 
124 static inline bool tegra_ivc_full(struct tegra_ivc *ivc,
125 				  struct tegra_ivc_header *header)
126 {
127 	u32 tx = READ_ONCE(header->tx.count);
128 	u32 rx = READ_ONCE(header->rx.count);
129 
130 	/*
131 	 * Invalid cases where the counters indicate that the queue is over
132 	 * capacity also appear full.
133 	 */
134 	return tx - rx >= ivc->num_frames;
135 }
136 
137 static inline u32 tegra_ivc_available(struct tegra_ivc *ivc,
138 				      struct tegra_ivc_header *header)
139 {
140 	u32 tx = READ_ONCE(header->tx.count);
141 	u32 rx = READ_ONCE(header->rx.count);
142 
143 	/*
144 	 * This function isn't expected to be used in scenarios where an
145 	 * over-full situation can lead to denial of service attacks. See the
146 	 * comment in tegra_ivc_empty() for an explanation about special
147 	 * over-full considerations.
148 	 */
149 	return tx - rx;
150 }
151 
152 static inline void tegra_ivc_advance_tx(struct tegra_ivc *ivc)
153 {
154 	WRITE_ONCE(ivc->tx.channel->tx.count,
155 		   READ_ONCE(ivc->tx.channel->tx.count) + 1);
156 
157 	if (ivc->tx.position == ivc->num_frames - 1)
158 		ivc->tx.position = 0;
159 	else
160 		ivc->tx.position++;
161 }
162 
163 static inline void tegra_ivc_advance_rx(struct tegra_ivc *ivc)
164 {
165 	WRITE_ONCE(ivc->rx.channel->rx.count,
166 		   READ_ONCE(ivc->rx.channel->rx.count) + 1);
167 
168 	if (ivc->rx.position == ivc->num_frames - 1)
169 		ivc->rx.position = 0;
170 	else
171 		ivc->rx.position++;
172 }
173 
174 static inline int tegra_ivc_check_read(struct tegra_ivc *ivc)
175 {
176 	unsigned int offset = offsetof(struct tegra_ivc_header, tx.count);
177 
178 	/*
179 	 * tx.channel->state is set locally, so it is not synchronized with
180 	 * state from the remote peer. The remote peer cannot reset its
181 	 * transmit counters until we've acknowledged its synchronization
182 	 * request, so no additional synchronization is required because an
183 	 * asynchronous transition of rx.channel->state to
184 	 * TEGRA_IVC_STATE_ACK is not allowed.
185 	 */
186 	if (ivc->tx.channel->tx.state != TEGRA_IVC_STATE_ESTABLISHED)
187 		return -ECONNRESET;
188 
189 	/*
190 	 * Avoid unnecessary invalidations when performing repeated accesses
191 	 * to an IVC channel by checking the old queue pointers first.
192 	 *
193 	 * Synchronization is only necessary when these pointers indicate
194 	 * empty or full.
195 	 */
196 	if (!tegra_ivc_empty(ivc, ivc->rx.channel))
197 		return 0;
198 
199 	tegra_ivc_invalidate(ivc, ivc->rx.phys + offset);
200 
201 	if (tegra_ivc_empty(ivc, ivc->rx.channel))
202 		return -ENOSPC;
203 
204 	return 0;
205 }
206 
207 static inline int tegra_ivc_check_write(struct tegra_ivc *ivc)
208 {
209 	unsigned int offset = offsetof(struct tegra_ivc_header, rx.count);
210 
211 	if (ivc->tx.channel->tx.state != TEGRA_IVC_STATE_ESTABLISHED)
212 		return -ECONNRESET;
213 
214 	if (!tegra_ivc_full(ivc, ivc->tx.channel))
215 		return 0;
216 
217 	tegra_ivc_invalidate(ivc, ivc->tx.phys + offset);
218 
219 	if (tegra_ivc_full(ivc, ivc->tx.channel))
220 		return -ENOSPC;
221 
222 	return 0;
223 }
224 
225 static void *tegra_ivc_frame_virt(struct tegra_ivc *ivc,
226 				  struct tegra_ivc_header *header,
227 				  unsigned int frame)
228 {
229 	if (WARN_ON(frame >= ivc->num_frames))
230 		return ERR_PTR(-EINVAL);
231 
232 	return (void *)(header + 1) + ivc->frame_size * frame;
233 }
234 
235 static inline dma_addr_t tegra_ivc_frame_phys(struct tegra_ivc *ivc,
236 					      dma_addr_t phys,
237 					      unsigned int frame)
238 {
239 	unsigned long offset;
240 
241 	offset = sizeof(struct tegra_ivc_header) + ivc->frame_size * frame;
242 
243 	return phys + offset;
244 }
245 
246 static inline void tegra_ivc_invalidate_frame(struct tegra_ivc *ivc,
247 					      dma_addr_t phys,
248 					      unsigned int frame,
249 					      unsigned int offset,
250 					      size_t size)
251 {
252 	if (!ivc->peer || WARN_ON(frame >= ivc->num_frames))
253 		return;
254 
255 	phys = tegra_ivc_frame_phys(ivc, phys, frame) + offset;
256 
257 	dma_sync_single_for_cpu(ivc->peer, phys, size, DMA_FROM_DEVICE);
258 }
259 
260 static inline void tegra_ivc_flush_frame(struct tegra_ivc *ivc,
261 					 dma_addr_t phys,
262 					 unsigned int frame,
263 					 unsigned int offset,
264 					 size_t size)
265 {
266 	if (!ivc->peer || WARN_ON(frame >= ivc->num_frames))
267 		return;
268 
269 	phys = tegra_ivc_frame_phys(ivc, phys, frame) + offset;
270 
271 	dma_sync_single_for_device(ivc->peer, phys, size, DMA_TO_DEVICE);
272 }
273 
274 /* directly peek at the next frame rx'ed */
275 void *tegra_ivc_read_get_next_frame(struct tegra_ivc *ivc)
276 {
277 	int err;
278 
279 	if (WARN_ON(ivc == NULL))
280 		return ERR_PTR(-EINVAL);
281 
282 	err = tegra_ivc_check_read(ivc);
283 	if (err < 0)
284 		return ERR_PTR(err);
285 
286 	/*
287 	 * Order observation of ivc->rx.position potentially indicating new
288 	 * data before data read.
289 	 */
290 	smp_rmb();
291 
292 	tegra_ivc_invalidate_frame(ivc, ivc->rx.phys, ivc->rx.position, 0,
293 				   ivc->frame_size);
294 
295 	return tegra_ivc_frame_virt(ivc, ivc->rx.channel, ivc->rx.position);
296 }
297 EXPORT_SYMBOL(tegra_ivc_read_get_next_frame);
298 
299 int tegra_ivc_read_advance(struct tegra_ivc *ivc)
300 {
301 	unsigned int rx = offsetof(struct tegra_ivc_header, rx.count);
302 	unsigned int tx = offsetof(struct tegra_ivc_header, tx.count);
303 	int err;
304 
305 	/*
306 	 * No read barriers or synchronization here: the caller is expected to
307 	 * have already observed the channel non-empty. This check is just to
308 	 * catch programming errors.
309 	 */
310 	err = tegra_ivc_check_read(ivc);
311 	if (err < 0)
312 		return err;
313 
314 	tegra_ivc_advance_rx(ivc);
315 
316 	tegra_ivc_flush(ivc, ivc->rx.phys + rx);
317 
318 	/*
319 	 * Ensure our write to ivc->rx.position occurs before our read from
320 	 * ivc->tx.position.
321 	 */
322 	smp_mb();
323 
324 	/*
325 	 * Notify only upon transition from full to non-full. The available
326 	 * count can only asynchronously increase, so the worst possible
327 	 * side-effect will be a spurious notification.
328 	 */
329 	tegra_ivc_invalidate(ivc, ivc->rx.phys + tx);
330 
331 	if (tegra_ivc_available(ivc, ivc->rx.channel) == ivc->num_frames - 1)
332 		ivc->notify(ivc, ivc->notify_data);
333 
334 	return 0;
335 }
336 EXPORT_SYMBOL(tegra_ivc_read_advance);
337 
338 /* directly poke at the next frame to be tx'ed */
339 void *tegra_ivc_write_get_next_frame(struct tegra_ivc *ivc)
340 {
341 	int err;
342 
343 	err = tegra_ivc_check_write(ivc);
344 	if (err < 0)
345 		return ERR_PTR(err);
346 
347 	return tegra_ivc_frame_virt(ivc, ivc->tx.channel, ivc->tx.position);
348 }
349 EXPORT_SYMBOL(tegra_ivc_write_get_next_frame);
350 
351 /* advance the tx buffer */
352 int tegra_ivc_write_advance(struct tegra_ivc *ivc)
353 {
354 	unsigned int tx = offsetof(struct tegra_ivc_header, tx.count);
355 	unsigned int rx = offsetof(struct tegra_ivc_header, rx.count);
356 	int err;
357 
358 	err = tegra_ivc_check_write(ivc);
359 	if (err < 0)
360 		return err;
361 
362 	tegra_ivc_flush_frame(ivc, ivc->tx.phys, ivc->tx.position, 0,
363 			      ivc->frame_size);
364 
365 	/*
366 	 * Order any possible stores to the frame before update of
367 	 * ivc->tx.position.
368 	 */
369 	smp_wmb();
370 
371 	tegra_ivc_advance_tx(ivc);
372 	tegra_ivc_flush(ivc, ivc->tx.phys + tx);
373 
374 	/*
375 	 * Ensure our write to ivc->tx.position occurs before our read from
376 	 * ivc->rx.position.
377 	 */
378 	smp_mb();
379 
380 	/*
381 	 * Notify only upon transition from empty to non-empty. The available
382 	 * count can only asynchronously decrease, so the worst possible
383 	 * side-effect will be a spurious notification.
384 	 */
385 	tegra_ivc_invalidate(ivc, ivc->tx.phys + rx);
386 
387 	if (tegra_ivc_available(ivc, ivc->tx.channel) == 1)
388 		ivc->notify(ivc, ivc->notify_data);
389 
390 	return 0;
391 }
392 EXPORT_SYMBOL(tegra_ivc_write_advance);
393 
394 void tegra_ivc_reset(struct tegra_ivc *ivc)
395 {
396 	unsigned int offset = offsetof(struct tegra_ivc_header, tx.count);
397 
398 	ivc->tx.channel->tx.state = TEGRA_IVC_STATE_SYNC;
399 	tegra_ivc_flush(ivc, ivc->tx.phys + offset);
400 	ivc->notify(ivc, ivc->notify_data);
401 }
402 EXPORT_SYMBOL(tegra_ivc_reset);
403 
404 /*
405  * =======================================================
406  *  IVC State Transition Table - see tegra_ivc_notified()
407  * =======================================================
408  *
409  *	local	remote	action
410  *	-----	------	-----------------------------------
411  *	SYNC	EST	<none>
412  *	SYNC	ACK	reset counters; move to EST; notify
413  *	SYNC	SYNC	reset counters; move to ACK; notify
414  *	ACK	EST	move to EST; notify
415  *	ACK	ACK	move to EST; notify
416  *	ACK	SYNC	reset counters; move to ACK; notify
417  *	EST	EST	<none>
418  *	EST	ACK	<none>
419  *	EST	SYNC	reset counters; move to ACK; notify
420  *
421  * ===============================================================
422  */
423 
424 int tegra_ivc_notified(struct tegra_ivc *ivc)
425 {
426 	unsigned int offset = offsetof(struct tegra_ivc_header, tx.count);
427 	enum tegra_ivc_state state;
428 
429 	/* Copy the receiver's state out of shared memory. */
430 	tegra_ivc_invalidate(ivc, ivc->rx.phys + offset);
431 	state = READ_ONCE(ivc->rx.channel->tx.state);
432 
433 	if (state == TEGRA_IVC_STATE_SYNC) {
434 		offset = offsetof(struct tegra_ivc_header, tx.count);
435 
436 		/*
437 		 * Order observation of TEGRA_IVC_STATE_SYNC before stores
438 		 * clearing tx.channel.
439 		 */
440 		smp_rmb();
441 
442 		/*
443 		 * Reset tx.channel counters. The remote end is in the SYNC
444 		 * state and won't make progress until we change our state,
445 		 * so the counters are not in use at this time.
446 		 */
447 		ivc->tx.channel->tx.count = 0;
448 		ivc->rx.channel->rx.count = 0;
449 
450 		ivc->tx.position = 0;
451 		ivc->rx.position = 0;
452 
453 		/*
454 		 * Ensure that counters appear cleared before new state can be
455 		 * observed.
456 		 */
457 		smp_wmb();
458 
459 		/*
460 		 * Move to ACK state. We have just cleared our counters, so it
461 		 * is now safe for the remote end to start using these values.
462 		 */
463 		ivc->tx.channel->tx.state = TEGRA_IVC_STATE_ACK;
464 		tegra_ivc_flush(ivc, ivc->tx.phys + offset);
465 
466 		/*
467 		 * Notify remote end to observe state transition.
468 		 */
469 		ivc->notify(ivc, ivc->notify_data);
470 
471 	} else if (ivc->tx.channel->tx.state == TEGRA_IVC_STATE_SYNC &&
472 		   state == TEGRA_IVC_STATE_ACK) {
473 		offset = offsetof(struct tegra_ivc_header, tx.count);
474 
475 		/*
476 		 * Order observation of ivc_state_sync before stores clearing
477 		 * tx_channel.
478 		 */
479 		smp_rmb();
480 
481 		/*
482 		 * Reset tx.channel counters. The remote end is in the ACK
483 		 * state and won't make progress until we change our state,
484 		 * so the counters are not in use at this time.
485 		 */
486 		ivc->tx.channel->tx.count = 0;
487 		ivc->rx.channel->rx.count = 0;
488 
489 		ivc->tx.position = 0;
490 		ivc->rx.position = 0;
491 
492 		/*
493 		 * Ensure that counters appear cleared before new state can be
494 		 * observed.
495 		 */
496 		smp_wmb();
497 
498 		/*
499 		 * Move to ESTABLISHED state. We know that the remote end has
500 		 * already cleared its counters, so it is safe to start
501 		 * writing/reading on this channel.
502 		 */
503 		ivc->tx.channel->tx.state = TEGRA_IVC_STATE_ESTABLISHED;
504 		tegra_ivc_flush(ivc, ivc->tx.phys + offset);
505 
506 		/*
507 		 * Notify remote end to observe state transition.
508 		 */
509 		ivc->notify(ivc, ivc->notify_data);
510 
511 	} else if (ivc->tx.channel->tx.state == TEGRA_IVC_STATE_ACK) {
512 		offset = offsetof(struct tegra_ivc_header, tx.count);
513 
514 		/*
515 		 * At this point, we have observed the peer to be in either
516 		 * the ACK or ESTABLISHED state. Next, order observation of
517 		 * peer state before storing to tx.channel.
518 		 */
519 		smp_rmb();
520 
521 		/*
522 		 * Move to ESTABLISHED state. We know that we have previously
523 		 * cleared our counters, and we know that the remote end has
524 		 * cleared its counters, so it is safe to start writing/reading
525 		 * on this channel.
526 		 */
527 		ivc->tx.channel->tx.state = TEGRA_IVC_STATE_ESTABLISHED;
528 		tegra_ivc_flush(ivc, ivc->tx.phys + offset);
529 
530 		/*
531 		 * Notify remote end to observe state transition.
532 		 */
533 		ivc->notify(ivc, ivc->notify_data);
534 
535 	} else {
536 		/*
537 		 * There is no need to handle any further action. Either the
538 		 * channel is already fully established, or we are waiting for
539 		 * the remote end to catch up with our current state. Refer
540 		 * to the diagram in "IVC State Transition Table" above.
541 		 */
542 	}
543 
544 	if (ivc->tx.channel->tx.state != TEGRA_IVC_STATE_ESTABLISHED)
545 		return -EAGAIN;
546 
547 	return 0;
548 }
549 EXPORT_SYMBOL(tegra_ivc_notified);
550 
551 size_t tegra_ivc_align(size_t size)
552 {
553 	return ALIGN(size, TEGRA_IVC_ALIGN);
554 }
555 EXPORT_SYMBOL(tegra_ivc_align);
556 
557 unsigned tegra_ivc_total_queue_size(unsigned queue_size)
558 {
559 	if (!IS_ALIGNED(queue_size, TEGRA_IVC_ALIGN)) {
560 		pr_err("%s: queue_size (%u) must be %u-byte aligned\n",
561 		       __func__, queue_size, TEGRA_IVC_ALIGN);
562 		return 0;
563 	}
564 
565 	return queue_size + sizeof(struct tegra_ivc_header);
566 }
567 EXPORT_SYMBOL(tegra_ivc_total_queue_size);
568 
569 static int tegra_ivc_check_params(unsigned long rx, unsigned long tx,
570 				  unsigned int num_frames, size_t frame_size)
571 {
572 	BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct tegra_ivc_header, tx.count),
573 				 TEGRA_IVC_ALIGN));
574 	BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct tegra_ivc_header, rx.count),
575 				 TEGRA_IVC_ALIGN));
576 	BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct tegra_ivc_header),
577 				 TEGRA_IVC_ALIGN));
578 
579 	if ((uint64_t)num_frames * (uint64_t)frame_size >= 0x100000000UL) {
580 		pr_err("num_frames * frame_size overflows\n");
581 		return -EINVAL;
582 	}
583 
584 	if (!IS_ALIGNED(frame_size, TEGRA_IVC_ALIGN)) {
585 		pr_err("frame size not adequately aligned: %zu\n", frame_size);
586 		return -EINVAL;
587 	}
588 
589 	/*
590 	 * The headers must at least be aligned enough for counters
591 	 * to be accessed atomically.
592 	 */
593 	if (!IS_ALIGNED(rx, TEGRA_IVC_ALIGN)) {
594 		pr_err("IVC channel start not aligned: %#lx\n", rx);
595 		return -EINVAL;
596 	}
597 
598 	if (!IS_ALIGNED(tx, TEGRA_IVC_ALIGN)) {
599 		pr_err("IVC channel start not aligned: %#lx\n", tx);
600 		return -EINVAL;
601 	}
602 
603 	if (rx < tx) {
604 		if (rx + frame_size * num_frames > tx) {
605 			pr_err("queue regions overlap: %#lx + %zx > %#lx\n",
606 			       rx, frame_size * num_frames, tx);
607 			return -EINVAL;
608 		}
609 	} else {
610 		if (tx + frame_size * num_frames > rx) {
611 			pr_err("queue regions overlap: %#lx + %zx > %#lx\n",
612 			       tx, frame_size * num_frames, rx);
613 			return -EINVAL;
614 		}
615 	}
616 
617 	return 0;
618 }
619 
620 int tegra_ivc_init(struct tegra_ivc *ivc, struct device *peer, void *rx,
621 		   dma_addr_t rx_phys, void *tx, dma_addr_t tx_phys,
622 		   unsigned int num_frames, size_t frame_size,
623 		   void (*notify)(struct tegra_ivc *ivc, void *data),
624 		   void *data)
625 {
626 	size_t queue_size;
627 	int err;
628 
629 	if (WARN_ON(!ivc || !notify))
630 		return -EINVAL;
631 
632 	/*
633 	 * All sizes that can be returned by communication functions should
634 	 * fit in an int.
635 	 */
636 	if (frame_size > INT_MAX)
637 		return -E2BIG;
638 
639 	err = tegra_ivc_check_params((unsigned long)rx, (unsigned long)tx,
640 				     num_frames, frame_size);
641 	if (err < 0)
642 		return err;
643 
644 	queue_size = tegra_ivc_total_queue_size(num_frames * frame_size);
645 
646 	if (peer) {
647 		ivc->rx.phys = dma_map_single(peer, rx, queue_size,
648 					      DMA_BIDIRECTIONAL);
649 		if (dma_mapping_error(peer, ivc->rx.phys))
650 			return -ENOMEM;
651 
652 		ivc->tx.phys = dma_map_single(peer, tx, queue_size,
653 					      DMA_BIDIRECTIONAL);
654 		if (dma_mapping_error(peer, ivc->tx.phys)) {
655 			dma_unmap_single(peer, ivc->rx.phys, queue_size,
656 					 DMA_BIDIRECTIONAL);
657 			return -ENOMEM;
658 		}
659 	} else {
660 		ivc->rx.phys = rx_phys;
661 		ivc->tx.phys = tx_phys;
662 	}
663 
664 	ivc->rx.channel = rx;
665 	ivc->tx.channel = tx;
666 	ivc->peer = peer;
667 	ivc->notify = notify;
668 	ivc->notify_data = data;
669 	ivc->frame_size = frame_size;
670 	ivc->num_frames = num_frames;
671 
672 	/*
673 	 * These values aren't necessarily correct until the channel has been
674 	 * reset.
675 	 */
676 	ivc->tx.position = 0;
677 	ivc->rx.position = 0;
678 
679 	return 0;
680 }
681 EXPORT_SYMBOL(tegra_ivc_init);
682 
683 void tegra_ivc_cleanup(struct tegra_ivc *ivc)
684 {
685 	if (ivc->peer) {
686 		size_t size = tegra_ivc_total_queue_size(ivc->num_frames *
687 							 ivc->frame_size);
688 
689 		dma_unmap_single(ivc->peer, ivc->rx.phys, size,
690 				 DMA_BIDIRECTIONAL);
691 		dma_unmap_single(ivc->peer, ivc->tx.phys, size,
692 				 DMA_BIDIRECTIONAL);
693 	}
694 }
695 EXPORT_SYMBOL(tegra_ivc_cleanup);
696