xref: /openbmc/linux/drivers/net/ipa/gsi.c (revision cefd754d)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
4  * Copyright (C) 2018-2020 Linaro Ltd.
5  */
6 
7 #include <linux/types.h>
8 #include <linux/bits.h>
9 #include <linux/bitfield.h>
10 #include <linux/mutex.h>
11 #include <linux/completion.h>
12 #include <linux/io.h>
13 #include <linux/bug.h>
14 #include <linux/interrupt.h>
15 #include <linux/platform_device.h>
16 #include <linux/netdevice.h>
17 
18 #include "gsi.h"
19 #include "gsi_reg.h"
20 #include "gsi_private.h"
21 #include "gsi_trans.h"
22 #include "ipa_gsi.h"
23 #include "ipa_data.h"
24 #include "ipa_version.h"
25 
26 /**
27  * DOC: The IPA Generic Software Interface
28  *
29  * The generic software interface (GSI) is an integral component of the IPA,
30  * providing a well-defined communication layer between the AP subsystem
31  * and the IPA core.  The modem uses the GSI layer as well.
32  *
33  *	--------	     ---------
34  *	|      |	     |	     |
35  *	|  AP  +<---.	.----+ Modem |
36  *	|      +--. |	| .->+	     |
37  *	|      |  | |	| |  |	     |
38  *	--------  | |	| |  ---------
39  *		  v |	v |
40  *		--+-+---+-+--
41  *		|    GSI    |
42  *		|-----------|
43  *		|	    |
44  *		|    IPA    |
45  *		|	    |
46  *		-------------
47  *
48  * In the above diagram, the AP and Modem represent "execution environments"
49  * (EEs), which are independent operating environments that use the IPA for
50  * data transfer.
51  *
52  * Each EE uses a set of unidirectional GSI "channels," which allow transfer
53  * of data to or from the IPA.  A channel is implemented as a ring buffer,
54  * with a DRAM-resident array of "transfer elements" (TREs) available to
55  * describe transfers to or from other EEs through the IPA.  A transfer
56  * element can also contain an immediate command, requesting the IPA perform
57  * actions other than data transfer.
58  *
59  * Each TRE refers to a block of data--also located DRAM.  After writing one
60  * or more TREs to a channel, the writer (either the IPA or an EE) writes a
61  * doorbell register to inform the receiving side how many elements have
62  * been written.
63  *
64  * Each channel has a GSI "event ring" associated with it.  An event ring
65  * is implemented very much like a channel ring, but is always directed from
66  * the IPA to an EE.  The IPA notifies an EE (such as the AP) about channel
67  * events by adding an entry to the event ring associated with the channel.
68  * The GSI then writes its doorbell for the event ring, causing the target
69  * EE to be interrupted.  Each entry in an event ring contains a pointer
70  * to the channel TRE whose completion the event represents.
71  *
72  * Each TRE in a channel ring has a set of flags.  One flag indicates whether
73  * the completion of the transfer operation generates an entry (and possibly
74  * an interrupt) in the channel's event ring.  Other flags allow transfer
75  * elements to be chained together, forming a single logical transaction.
76  * TRE flags are used to control whether and when interrupts are generated
77  * to signal completion of channel transfers.
78  *
79  * Elements in channel and event rings are completed (or consumed) strictly
80  * in order.  Completion of one entry implies the completion of all preceding
81  * entries.  A single completion interrupt can therefore communicate the
82  * completion of many transfers.
83  *
84  * Note that all GSI registers are little-endian, which is the assumed
85  * endianness of I/O space accesses.  The accessor functions perform byte
86  * swapping if needed (i.e., for a big endian CPU).
87  */
88 
89 /* Delay period for interrupt moderation (in 32KHz IPA internal timer ticks) */
90 #define GSI_EVT_RING_INT_MODT		(32 * 1) /* 1ms under 32KHz clock */
91 
92 #define GSI_CMD_TIMEOUT			5	/* seconds */
93 
94 #define GSI_CHANNEL_STOP_RX_RETRIES	10
95 
96 #define GSI_MHI_EVENT_ID_START		10	/* 1st reserved event id */
97 #define GSI_MHI_EVENT_ID_END		16	/* Last reserved event id */
98 
99 #define GSI_ISR_MAX_ITER		50	/* Detect interrupt storms */
100 
101 /* An entry in an event ring */
102 struct gsi_event {
103 	__le64 xfer_ptr;
104 	__le16 len;
105 	u8 reserved1;
106 	u8 code;
107 	__le16 reserved2;
108 	u8 type;
109 	u8 chid;
110 };
111 
112 /** gsi_channel_scratch_gpi - GPI protocol scratch register
113  * @max_outstanding_tre:
114  *	Defines the maximum number of TREs allowed in a single transaction
115  *	on a channel (in bytes).  This determines the amount of prefetch
116  *	performed by the hardware.  We configure this to equal the size of
117  *	the TLV FIFO for the channel.
118  * @outstanding_threshold:
119  *	Defines the threshold (in bytes) determining when the sequencer
120  *	should update the channel doorbell.  We configure this to equal
121  *	the size of two TREs.
122  */
123 struct gsi_channel_scratch_gpi {
124 	u64 reserved1;
125 	u16 reserved2;
126 	u16 max_outstanding_tre;
127 	u16 reserved3;
128 	u16 outstanding_threshold;
129 };
130 
131 /** gsi_channel_scratch - channel scratch configuration area
132  *
133  * The exact interpretation of this register is protocol-specific.
134  * We only use GPI channels; see struct gsi_channel_scratch_gpi, above.
135  */
136 union gsi_channel_scratch {
137 	struct gsi_channel_scratch_gpi gpi;
138 	struct {
139 		u32 word1;
140 		u32 word2;
141 		u32 word3;
142 		u32 word4;
143 	} data;
144 };
145 
146 /* Check things that can be validated at build time. */
147 static void gsi_validate_build(void)
148 {
149 	/* This is used as a divisor */
150 	BUILD_BUG_ON(!GSI_RING_ELEMENT_SIZE);
151 
152 	/* Code assumes the size of channel and event ring element are
153 	 * the same (and fixed).  Make sure the size of an event ring
154 	 * element is what's expected.
155 	 */
156 	BUILD_BUG_ON(sizeof(struct gsi_event) != GSI_RING_ELEMENT_SIZE);
157 
158 	/* Hardware requires a 2^n ring size.  We ensure the number of
159 	 * elements in an event ring is a power of 2 elsewhere; this
160 	 * ensure the elements themselves meet the requirement.
161 	 */
162 	BUILD_BUG_ON(!is_power_of_2(GSI_RING_ELEMENT_SIZE));
163 
164 	/* The channel element size must fit in this field */
165 	BUILD_BUG_ON(GSI_RING_ELEMENT_SIZE > field_max(ELEMENT_SIZE_FMASK));
166 
167 	/* The event ring element size must fit in this field */
168 	BUILD_BUG_ON(GSI_RING_ELEMENT_SIZE > field_max(EV_ELEMENT_SIZE_FMASK));
169 }
170 
171 /* Return the channel id associated with a given channel */
172 static u32 gsi_channel_id(struct gsi_channel *channel)
173 {
174 	return channel - &channel->gsi->channel[0];
175 }
176 
177 /* Update the GSI IRQ type register with the cached value */
178 static void gsi_irq_type_update(struct gsi *gsi, u32 val)
179 {
180 	gsi->type_enabled_bitmap = val;
181 	iowrite32(val, gsi->virt + GSI_CNTXT_TYPE_IRQ_MSK_OFFSET);
182 }
183 
184 static void gsi_irq_type_enable(struct gsi *gsi, enum gsi_irq_type_id type_id)
185 {
186 	gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(type_id));
187 }
188 
189 static void gsi_irq_type_disable(struct gsi *gsi, enum gsi_irq_type_id type_id)
190 {
191 	gsi_irq_type_update(gsi, gsi->type_enabled_bitmap & ~BIT(type_id));
192 }
193 
194 /* Turn off all GSI interrupts initially */
195 static void gsi_irq_setup(struct gsi *gsi)
196 {
197 	/* Disable all interrupt types */
198 	gsi_irq_type_update(gsi, 0);
199 
200 	/* Clear all type-specific interrupt masks */
201 	iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
202 	iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
203 	iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
204 	iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
205 	iowrite32(0, gsi->virt + GSI_INTER_EE_SRC_CH_IRQ_OFFSET);
206 	iowrite32(0, gsi->virt + GSI_INTER_EE_SRC_EV_CH_IRQ_OFFSET);
207 	iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
208 }
209 
210 /* Turn off all GSI interrupts when we're all done */
211 static void gsi_irq_teardown(struct gsi *gsi)
212 {
213 	/* Nothing to do */
214 }
215 
216 static void gsi_irq_ieob_enable(struct gsi *gsi, u32 evt_ring_id)
217 {
218 	bool enable_ieob = !gsi->ieob_enabled_bitmap;
219 	u32 val;
220 
221 	gsi->ieob_enabled_bitmap |= BIT(evt_ring_id);
222 	val = gsi->ieob_enabled_bitmap;
223 	iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
224 
225 	/* Enable the interrupt type if this is the first channel enabled */
226 	if (enable_ieob)
227 		gsi_irq_type_enable(gsi, GSI_IEOB);
228 }
229 
230 static void gsi_irq_ieob_disable(struct gsi *gsi, u32 evt_ring_id)
231 {
232 	u32 val;
233 
234 	gsi->ieob_enabled_bitmap &= ~BIT(evt_ring_id);
235 
236 	/* Disable the interrupt type if this was the last enabled channel */
237 	if (!gsi->ieob_enabled_bitmap)
238 		gsi_irq_type_disable(gsi, GSI_IEOB);
239 
240 	val = gsi->ieob_enabled_bitmap;
241 	iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
242 }
243 
244 /* Enable all GSI_interrupt types */
245 static void gsi_irq_enable(struct gsi *gsi)
246 {
247 	u32 val;
248 
249 	/* Global interrupts include hardware error reports.  Enable
250 	 * that so we can at least report the error should it occur.
251 	 */
252 	iowrite32(BIT(ERROR_INT), gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
253 	gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(GSI_GLOB_EE));
254 
255 	/* General GSI interrupts are reported to all EEs; if they occur
256 	 * they are unrecoverable (without reset).  A breakpoint interrupt
257 	 * also exists, but we don't support that.  We want to be notified
258 	 * of errors so we can report them, even if they can't be handled.
259 	 */
260 	val = BIT(BUS_ERROR);
261 	val |= BIT(CMD_FIFO_OVRFLOW);
262 	val |= BIT(MCS_STACK_OVRFLOW);
263 	iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
264 	gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(GSI_GENERAL));
265 }
266 
267 /* Disable all GSI interrupt types */
268 static void gsi_irq_disable(struct gsi *gsi)
269 {
270 	gsi_irq_type_update(gsi, 0);
271 
272 	/* Clear the type-specific interrupt masks set by gsi_irq_enable() */
273 	iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
274 	iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
275 }
276 
277 /* Return the virtual address associated with a ring index */
278 void *gsi_ring_virt(struct gsi_ring *ring, u32 index)
279 {
280 	/* Note: index *must* be used modulo the ring count here */
281 	return ring->virt + (index % ring->count) * GSI_RING_ELEMENT_SIZE;
282 }
283 
284 /* Return the 32-bit DMA address associated with a ring index */
285 static u32 gsi_ring_addr(struct gsi_ring *ring, u32 index)
286 {
287 	return (ring->addr & GENMASK(31, 0)) + index * GSI_RING_ELEMENT_SIZE;
288 }
289 
290 /* Return the ring index of a 32-bit ring offset */
291 static u32 gsi_ring_index(struct gsi_ring *ring, u32 offset)
292 {
293 	return (offset - gsi_ring_addr(ring, 0)) / GSI_RING_ELEMENT_SIZE;
294 }
295 
296 /* Issue a GSI command by writing a value to a register, then wait for
297  * completion to be signaled.  Returns true if the command completes
298  * or false if it times out.
299  */
300 static bool
301 gsi_command(struct gsi *gsi, u32 reg, u32 val, struct completion *completion)
302 {
303 	reinit_completion(completion);
304 
305 	iowrite32(val, gsi->virt + reg);
306 
307 	return !!wait_for_completion_timeout(completion, GSI_CMD_TIMEOUT * HZ);
308 }
309 
310 /* Return the hardware's notion of the current state of an event ring */
311 static enum gsi_evt_ring_state
312 gsi_evt_ring_state(struct gsi *gsi, u32 evt_ring_id)
313 {
314 	u32 val;
315 
316 	val = ioread32(gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id));
317 
318 	return u32_get_bits(val, EV_CHSTATE_FMASK);
319 }
320 
321 /* Issue an event ring command and wait for it to complete */
322 static int evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
323 			    enum gsi_evt_cmd_opcode opcode)
324 {
325 	struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
326 	struct completion *completion = &evt_ring->completion;
327 	struct device *dev = gsi->dev;
328 	bool success;
329 	u32 val;
330 
331 	/* We only perform one event ring command at a time, and event
332 	 * control interrupts should only occur when such a command
333 	 * is issued here.  Only permit *this* event ring to trigger
334 	 * an interrupt, and only enable the event control IRQ type
335 	 * when we expect it to occur.
336 	 */
337 	val = BIT(evt_ring_id);
338 	iowrite32(val, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
339 	gsi_irq_type_enable(gsi, GSI_EV_CTRL);
340 
341 	val = u32_encode_bits(evt_ring_id, EV_CHID_FMASK);
342 	val |= u32_encode_bits(opcode, EV_OPCODE_FMASK);
343 
344 	success = gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val, completion);
345 
346 	/* Disable the interrupt again */
347 	gsi_irq_type_disable(gsi, GSI_EV_CTRL);
348 	iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
349 
350 	if (success)
351 		return 0;
352 
353 	dev_err(dev, "GSI command %u for event ring %u timed out, state %u\n",
354 		opcode, evt_ring_id, evt_ring->state);
355 
356 	return -ETIMEDOUT;
357 }
358 
359 /* Allocate an event ring in NOT_ALLOCATED state */
360 static int gsi_evt_ring_alloc_command(struct gsi *gsi, u32 evt_ring_id)
361 {
362 	struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
363 	int ret;
364 
365 	/* Get initial event ring state */
366 	evt_ring->state = gsi_evt_ring_state(gsi, evt_ring_id);
367 	if (evt_ring->state != GSI_EVT_RING_STATE_NOT_ALLOCATED) {
368 		dev_err(gsi->dev, "bad event ring state %u before alloc\n",
369 			evt_ring->state);
370 		return -EINVAL;
371 	}
372 
373 	ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_ALLOCATE);
374 	if (!ret && evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) {
375 		dev_err(gsi->dev, "bad event ring state %u after alloc\n",
376 			evt_ring->state);
377 		ret = -EIO;
378 	}
379 
380 	return ret;
381 }
382 
383 /* Reset a GSI event ring in ALLOCATED or ERROR state. */
384 static void gsi_evt_ring_reset_command(struct gsi *gsi, u32 evt_ring_id)
385 {
386 	struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
387 	enum gsi_evt_ring_state state = evt_ring->state;
388 	int ret;
389 
390 	if (state != GSI_EVT_RING_STATE_ALLOCATED &&
391 	    state != GSI_EVT_RING_STATE_ERROR) {
392 		dev_err(gsi->dev, "bad event ring state %u before reset\n",
393 			evt_ring->state);
394 		return;
395 	}
396 
397 	ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_RESET);
398 	if (!ret && evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED)
399 		dev_err(gsi->dev, "bad event ring state %u after reset\n",
400 			evt_ring->state);
401 }
402 
403 /* Issue a hardware de-allocation request for an allocated event ring */
404 static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id)
405 {
406 	struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
407 	int ret;
408 
409 	if (evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) {
410 		dev_err(gsi->dev, "bad event ring state %u before dealloc\n",
411 			evt_ring->state);
412 		return;
413 	}
414 
415 	ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_DE_ALLOC);
416 	if (!ret && evt_ring->state != GSI_EVT_RING_STATE_NOT_ALLOCATED)
417 		dev_err(gsi->dev, "bad event ring state %u after dealloc\n",
418 			evt_ring->state);
419 }
420 
421 /* Fetch the current state of a channel from hardware */
422 static enum gsi_channel_state gsi_channel_state(struct gsi_channel *channel)
423 {
424 	u32 channel_id = gsi_channel_id(channel);
425 	void *virt = channel->gsi->virt;
426 	u32 val;
427 
428 	val = ioread32(virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id));
429 
430 	return u32_get_bits(val, CHSTATE_FMASK);
431 }
432 
433 /* Issue a channel command and wait for it to complete */
434 static int
435 gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
436 {
437 	struct completion *completion = &channel->completion;
438 	u32 channel_id = gsi_channel_id(channel);
439 	struct gsi *gsi = channel->gsi;
440 	struct device *dev = gsi->dev;
441 	bool success;
442 	u32 val;
443 
444 	/* We only perform one channel command at a time, and channel
445 	 * control interrupts should only occur when such a command is
446 	 * issued here.  So we only permit *this* channel to trigger
447 	 * an interrupt and only enable the channel control IRQ type
448 	 * when we expect it to occur.
449 	 */
450 	val = BIT(channel_id);
451 	iowrite32(val, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
452 	gsi_irq_type_enable(gsi, GSI_CH_CTRL);
453 
454 	val = u32_encode_bits(channel_id, CH_CHID_FMASK);
455 	val |= u32_encode_bits(opcode, CH_OPCODE_FMASK);
456 	success = gsi_command(gsi, GSI_CH_CMD_OFFSET, val, completion);
457 
458 	/* Disable the interrupt again */
459 	gsi_irq_type_disable(gsi, GSI_CH_CTRL);
460 	iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
461 
462 	if (success)
463 		return 0;
464 
465 	dev_err(dev, "GSI command %u for channel %u timed out, state %u\n",
466 		opcode, channel_id, gsi_channel_state(channel));
467 
468 	return -ETIMEDOUT;
469 }
470 
471 /* Allocate GSI channel in NOT_ALLOCATED state */
472 static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id)
473 {
474 	struct gsi_channel *channel = &gsi->channel[channel_id];
475 	struct device *dev = gsi->dev;
476 	enum gsi_channel_state state;
477 	int ret;
478 
479 	/* Get initial channel state */
480 	state = gsi_channel_state(channel);
481 	if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED) {
482 		dev_err(dev, "bad channel state %u before alloc\n", state);
483 		return -EINVAL;
484 	}
485 
486 	ret = gsi_channel_command(channel, GSI_CH_ALLOCATE);
487 
488 	/* Channel state will normally have been updated */
489 	state = gsi_channel_state(channel);
490 	if (!ret && state != GSI_CHANNEL_STATE_ALLOCATED) {
491 		dev_err(dev, "bad channel state %u after alloc\n", state);
492 		ret = -EIO;
493 	}
494 
495 	return ret;
496 }
497 
498 /* Start an ALLOCATED channel */
499 static int gsi_channel_start_command(struct gsi_channel *channel)
500 {
501 	struct device *dev = channel->gsi->dev;
502 	enum gsi_channel_state state;
503 	int ret;
504 
505 	state = gsi_channel_state(channel);
506 	if (state != GSI_CHANNEL_STATE_ALLOCATED &&
507 	    state != GSI_CHANNEL_STATE_STOPPED) {
508 		dev_err(dev, "bad channel state %u before start\n", state);
509 		return -EINVAL;
510 	}
511 
512 	ret = gsi_channel_command(channel, GSI_CH_START);
513 
514 	/* Channel state will normally have been updated */
515 	state = gsi_channel_state(channel);
516 	if (!ret && state != GSI_CHANNEL_STATE_STARTED) {
517 		dev_err(dev, "bad channel state %u after start\n", state);
518 		ret = -EIO;
519 	}
520 
521 	return ret;
522 }
523 
524 /* Stop a GSI channel in STARTED state */
525 static int gsi_channel_stop_command(struct gsi_channel *channel)
526 {
527 	struct device *dev = channel->gsi->dev;
528 	enum gsi_channel_state state;
529 	int ret;
530 
531 	state = gsi_channel_state(channel);
532 
533 	/* Channel could have entered STOPPED state since last call
534 	 * if it timed out.  If so, we're done.
535 	 */
536 	if (state == GSI_CHANNEL_STATE_STOPPED)
537 		return 0;
538 
539 	if (state != GSI_CHANNEL_STATE_STARTED &&
540 	    state != GSI_CHANNEL_STATE_STOP_IN_PROC) {
541 		dev_err(dev, "bad channel state %u before stop\n", state);
542 		return -EINVAL;
543 	}
544 
545 	ret = gsi_channel_command(channel, GSI_CH_STOP);
546 
547 	/* Channel state will normally have been updated */
548 	state = gsi_channel_state(channel);
549 	if (ret || state == GSI_CHANNEL_STATE_STOPPED)
550 		return ret;
551 
552 	/* We may have to try again if stop is in progress */
553 	if (state == GSI_CHANNEL_STATE_STOP_IN_PROC)
554 		return -EAGAIN;
555 
556 	dev_err(dev, "bad channel state %u after stop\n", state);
557 
558 	return -EIO;
559 }
560 
561 /* Reset a GSI channel in ALLOCATED or ERROR state. */
562 static void gsi_channel_reset_command(struct gsi_channel *channel)
563 {
564 	struct device *dev = channel->gsi->dev;
565 	enum gsi_channel_state state;
566 	int ret;
567 
568 	msleep(1);	/* A short delay is required before a RESET command */
569 
570 	state = gsi_channel_state(channel);
571 	if (state != GSI_CHANNEL_STATE_STOPPED &&
572 	    state != GSI_CHANNEL_STATE_ERROR) {
573 		dev_err(dev, "bad channel state %u before reset\n", state);
574 		return;
575 	}
576 
577 	ret = gsi_channel_command(channel, GSI_CH_RESET);
578 
579 	/* Channel state will normally have been updated */
580 	state = gsi_channel_state(channel);
581 	if (!ret && state != GSI_CHANNEL_STATE_ALLOCATED)
582 		dev_err(dev, "bad channel state %u after reset\n", state);
583 }
584 
585 /* Deallocate an ALLOCATED GSI channel */
586 static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id)
587 {
588 	struct gsi_channel *channel = &gsi->channel[channel_id];
589 	struct device *dev = gsi->dev;
590 	enum gsi_channel_state state;
591 	int ret;
592 
593 	state = gsi_channel_state(channel);
594 	if (state != GSI_CHANNEL_STATE_ALLOCATED) {
595 		dev_err(dev, "bad channel state %u before dealloc\n", state);
596 		return;
597 	}
598 
599 	ret = gsi_channel_command(channel, GSI_CH_DE_ALLOC);
600 
601 	/* Channel state will normally have been updated */
602 	state = gsi_channel_state(channel);
603 	if (!ret && state != GSI_CHANNEL_STATE_NOT_ALLOCATED)
604 		dev_err(dev, "bad channel state %u after dealloc\n", state);
605 }
606 
607 /* Ring an event ring doorbell, reporting the last entry processed by the AP.
608  * The index argument (modulo the ring count) is the first unfilled entry, so
609  * we supply one less than that with the doorbell.  Update the event ring
610  * index field with the value provided.
611  */
612 static void gsi_evt_ring_doorbell(struct gsi *gsi, u32 evt_ring_id, u32 index)
613 {
614 	struct gsi_ring *ring = &gsi->evt_ring[evt_ring_id].ring;
615 	u32 val;
616 
617 	ring->index = index;	/* Next unused entry */
618 
619 	/* Note: index *must* be used modulo the ring count here */
620 	val = gsi_ring_addr(ring, (index - 1) % ring->count);
621 	iowrite32(val, gsi->virt + GSI_EV_CH_E_DOORBELL_0_OFFSET(evt_ring_id));
622 }
623 
624 /* Program an event ring for use */
625 static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id)
626 {
627 	struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
628 	size_t size = evt_ring->ring.count * GSI_RING_ELEMENT_SIZE;
629 	u32 val;
630 
631 	/* We program all event rings as GPI type/protocol */
632 	val = u32_encode_bits(GSI_CHANNEL_TYPE_GPI, EV_CHTYPE_FMASK);
633 	val |= EV_INTYPE_FMASK;
634 	val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, EV_ELEMENT_SIZE_FMASK);
635 	iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id));
636 
637 	val = u32_encode_bits(size, EV_R_LENGTH_FMASK);
638 	iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_1_OFFSET(evt_ring_id));
639 
640 	/* The context 2 and 3 registers store the low-order and
641 	 * high-order 32 bits of the address of the event ring,
642 	 * respectively.
643 	 */
644 	val = evt_ring->ring.addr & GENMASK(31, 0);
645 	iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_2_OFFSET(evt_ring_id));
646 
647 	val = evt_ring->ring.addr >> 32;
648 	iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_3_OFFSET(evt_ring_id));
649 
650 	/* Enable interrupt moderation by setting the moderation delay */
651 	val = u32_encode_bits(GSI_EVT_RING_INT_MODT, MODT_FMASK);
652 	val |= u32_encode_bits(1, MODC_FMASK);	/* comes from channel */
653 	iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_8_OFFSET(evt_ring_id));
654 
655 	/* No MSI write data, and MSI address high and low address is 0 */
656 	iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_9_OFFSET(evt_ring_id));
657 	iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_10_OFFSET(evt_ring_id));
658 	iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_11_OFFSET(evt_ring_id));
659 
660 	/* We don't need to get event read pointer updates */
661 	iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_12_OFFSET(evt_ring_id));
662 	iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_13_OFFSET(evt_ring_id));
663 
664 	/* Finally, tell the hardware we've completed event 0 (arbitrary) */
665 	gsi_evt_ring_doorbell(gsi, evt_ring_id, 0);
666 }
667 
668 /* Return the last (most recent) transaction completed on a channel. */
669 static struct gsi_trans *gsi_channel_trans_last(struct gsi_channel *channel)
670 {
671 	struct gsi_trans_info *trans_info = &channel->trans_info;
672 	struct gsi_trans *trans;
673 
674 	spin_lock_bh(&trans_info->spinlock);
675 
676 	if (!list_empty(&trans_info->complete))
677 		trans = list_last_entry(&trans_info->complete,
678 					struct gsi_trans, links);
679 	else if (!list_empty(&trans_info->polled))
680 		trans = list_last_entry(&trans_info->polled,
681 					struct gsi_trans, links);
682 	else
683 		trans = NULL;
684 
685 	/* Caller will wait for this, so take a reference */
686 	if (trans)
687 		refcount_inc(&trans->refcount);
688 
689 	spin_unlock_bh(&trans_info->spinlock);
690 
691 	return trans;
692 }
693 
694 /* Wait for transaction activity on a channel to complete */
695 static void gsi_channel_trans_quiesce(struct gsi_channel *channel)
696 {
697 	struct gsi_trans *trans;
698 
699 	/* Get the last transaction, and wait for it to complete */
700 	trans = gsi_channel_trans_last(channel);
701 	if (trans) {
702 		wait_for_completion(&trans->completion);
703 		gsi_trans_free(trans);
704 	}
705 }
706 
707 /* Stop channel activity.  Transactions may not be allocated until thawed. */
708 static void gsi_channel_freeze(struct gsi_channel *channel)
709 {
710 	gsi_channel_trans_quiesce(channel);
711 
712 	napi_disable(&channel->napi);
713 
714 	gsi_irq_ieob_disable(channel->gsi, channel->evt_ring_id);
715 }
716 
717 /* Allow transactions to be used on the channel again. */
718 static void gsi_channel_thaw(struct gsi_channel *channel)
719 {
720 	gsi_irq_ieob_enable(channel->gsi, channel->evt_ring_id);
721 
722 	napi_enable(&channel->napi);
723 }
724 
725 /* Program a channel for use */
726 static void gsi_channel_program(struct gsi_channel *channel, bool doorbell)
727 {
728 	size_t size = channel->tre_ring.count * GSI_RING_ELEMENT_SIZE;
729 	u32 channel_id = gsi_channel_id(channel);
730 	union gsi_channel_scratch scr = { };
731 	struct gsi_channel_scratch_gpi *gpi;
732 	struct gsi *gsi = channel->gsi;
733 	u32 wrr_weight = 0;
734 	u32 val;
735 
736 	/* Arbitrarily pick TRE 0 as the first channel element to use */
737 	channel->tre_ring.index = 0;
738 
739 	/* We program all channels as GPI type/protocol */
740 	val = u32_encode_bits(GSI_CHANNEL_TYPE_GPI, CHTYPE_PROTOCOL_FMASK);
741 	if (channel->toward_ipa)
742 		val |= CHTYPE_DIR_FMASK;
743 	val |= u32_encode_bits(channel->evt_ring_id, ERINDEX_FMASK);
744 	val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, ELEMENT_SIZE_FMASK);
745 	iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id));
746 
747 	val = u32_encode_bits(size, R_LENGTH_FMASK);
748 	iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_1_OFFSET(channel_id));
749 
750 	/* The context 2 and 3 registers store the low-order and
751 	 * high-order 32 bits of the address of the channel ring,
752 	 * respectively.
753 	 */
754 	val = channel->tre_ring.addr & GENMASK(31, 0);
755 	iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_2_OFFSET(channel_id));
756 
757 	val = channel->tre_ring.addr >> 32;
758 	iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_3_OFFSET(channel_id));
759 
760 	/* Command channel gets low weighted round-robin priority */
761 	if (channel->command)
762 		wrr_weight = field_max(WRR_WEIGHT_FMASK);
763 	val = u32_encode_bits(wrr_weight, WRR_WEIGHT_FMASK);
764 
765 	/* Max prefetch is 1 segment (do not set MAX_PREFETCH_FMASK) */
766 
767 	/* We enable the doorbell engine for IPA v3.5.1 */
768 	if (gsi->version == IPA_VERSION_3_5_1 && doorbell)
769 		val |= USE_DB_ENG_FMASK;
770 
771 	/* Starting with IPA v4.0 the command channel uses the escape buffer */
772 	if (gsi->version != IPA_VERSION_3_5_1 && channel->command)
773 		val |= USE_ESCAPE_BUF_ONLY_FMASK;
774 
775 	iowrite32(val, gsi->virt + GSI_CH_C_QOS_OFFSET(channel_id));
776 
777 	/* Now update the scratch registers for GPI protocol */
778 	gpi = &scr.gpi;
779 	gpi->max_outstanding_tre = gsi_channel_trans_tre_max(gsi, channel_id) *
780 					GSI_RING_ELEMENT_SIZE;
781 	gpi->outstanding_threshold = 2 * GSI_RING_ELEMENT_SIZE;
782 
783 	val = scr.data.word1;
784 	iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_0_OFFSET(channel_id));
785 
786 	val = scr.data.word2;
787 	iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_1_OFFSET(channel_id));
788 
789 	val = scr.data.word3;
790 	iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_2_OFFSET(channel_id));
791 
792 	/* We must preserve the upper 16 bits of the last scratch register.
793 	 * The next sequence assumes those bits remain unchanged between the
794 	 * read and the write.
795 	 */
796 	val = ioread32(gsi->virt + GSI_CH_C_SCRATCH_3_OFFSET(channel_id));
797 	val = (scr.data.word4 & GENMASK(31, 16)) | (val & GENMASK(15, 0));
798 	iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_3_OFFSET(channel_id));
799 
800 	/* All done! */
801 }
802 
803 static void gsi_channel_deprogram(struct gsi_channel *channel)
804 {
805 	/* Nothing to do */
806 }
807 
808 /* Start an allocated GSI channel */
809 int gsi_channel_start(struct gsi *gsi, u32 channel_id)
810 {
811 	struct gsi_channel *channel = &gsi->channel[channel_id];
812 	int ret;
813 
814 	mutex_lock(&gsi->mutex);
815 
816 	ret = gsi_channel_start_command(channel);
817 
818 	mutex_unlock(&gsi->mutex);
819 
820 	gsi_channel_thaw(channel);
821 
822 	return ret;
823 }
824 
825 /* Stop a started channel */
826 int gsi_channel_stop(struct gsi *gsi, u32 channel_id)
827 {
828 	struct gsi_channel *channel = &gsi->channel[channel_id];
829 	u32 retries;
830 	int ret;
831 
832 	gsi_channel_freeze(channel);
833 
834 	/* RX channels might require a little time to enter STOPPED state */
835 	retries = channel->toward_ipa ? 0 : GSI_CHANNEL_STOP_RX_RETRIES;
836 
837 	mutex_lock(&gsi->mutex);
838 
839 	do {
840 		ret = gsi_channel_stop_command(channel);
841 		if (ret != -EAGAIN)
842 			break;
843 		msleep(1);
844 	} while (retries--);
845 
846 	mutex_unlock(&gsi->mutex);
847 
848 	/* Thaw the channel if we need to retry (or on error) */
849 	if (ret)
850 		gsi_channel_thaw(channel);
851 
852 	return ret;
853 }
854 
855 /* Reset and reconfigure a channel, (possibly) enabling the doorbell engine */
856 void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool doorbell)
857 {
858 	struct gsi_channel *channel = &gsi->channel[channel_id];
859 
860 	mutex_lock(&gsi->mutex);
861 
862 	gsi_channel_reset_command(channel);
863 	/* Due to a hardware quirk we may need to reset RX channels twice. */
864 	if (gsi->version == IPA_VERSION_3_5_1 && !channel->toward_ipa)
865 		gsi_channel_reset_command(channel);
866 
867 	gsi_channel_program(channel, doorbell);
868 	gsi_channel_trans_cancel_pending(channel);
869 
870 	mutex_unlock(&gsi->mutex);
871 }
872 
873 /* Stop a STARTED channel for suspend (using stop if requested) */
874 int gsi_channel_suspend(struct gsi *gsi, u32 channel_id, bool stop)
875 {
876 	struct gsi_channel *channel = &gsi->channel[channel_id];
877 
878 	if (stop)
879 		return gsi_channel_stop(gsi, channel_id);
880 
881 	gsi_channel_freeze(channel);
882 
883 	return 0;
884 }
885 
886 /* Resume a suspended channel (starting will be requested if STOPPED) */
887 int gsi_channel_resume(struct gsi *gsi, u32 channel_id, bool start)
888 {
889 	struct gsi_channel *channel = &gsi->channel[channel_id];
890 
891 	if (start)
892 		return gsi_channel_start(gsi, channel_id);
893 
894 	gsi_channel_thaw(channel);
895 
896 	return 0;
897 }
898 
899 /**
900  * gsi_channel_tx_queued() - Report queued TX transfers for a channel
901  * @channel:	Channel for which to report
902  *
903  * Report to the network stack the number of bytes and transactions that
904  * have been queued to hardware since last call.  This and the next function
905  * supply information used by the network stack for throttling.
906  *
907  * For each channel we track the number of transactions used and bytes of
908  * data those transactions represent.  We also track what those values are
909  * each time this function is called.  Subtracting the two tells us
910  * the number of bytes and transactions that have been added between
911  * successive calls.
912  *
913  * Calling this each time we ring the channel doorbell allows us to
914  * provide accurate information to the network stack about how much
915  * work we've given the hardware at any point in time.
916  */
917 void gsi_channel_tx_queued(struct gsi_channel *channel)
918 {
919 	u32 trans_count;
920 	u32 byte_count;
921 
922 	byte_count = channel->byte_count - channel->queued_byte_count;
923 	trans_count = channel->trans_count - channel->queued_trans_count;
924 	channel->queued_byte_count = channel->byte_count;
925 	channel->queued_trans_count = channel->trans_count;
926 
927 	ipa_gsi_channel_tx_queued(channel->gsi, gsi_channel_id(channel),
928 				  trans_count, byte_count);
929 }
930 
931 /**
932  * gsi_channel_tx_update() - Report completed TX transfers
933  * @channel:	Channel that has completed transmitting packets
934  * @trans:	Last transation known to be complete
935  *
936  * Compute the number of transactions and bytes that have been transferred
937  * over a TX channel since the given transaction was committed.  Report this
938  * information to the network stack.
939  *
940  * At the time a transaction is committed, we record its channel's
941  * committed transaction and byte counts *in the transaction*.
942  * Completions are signaled by the hardware with an interrupt, and
943  * we can determine the latest completed transaction at that time.
944  *
945  * The difference between the byte/transaction count recorded in
946  * the transaction and the count last time we recorded a completion
947  * tells us exactly how much data has been transferred between
948  * completions.
949  *
950  * Calling this each time we learn of a newly-completed transaction
951  * allows us to provide accurate information to the network stack
952  * about how much work has been completed by the hardware at a given
953  * point in time.
954  */
955 static void
956 gsi_channel_tx_update(struct gsi_channel *channel, struct gsi_trans *trans)
957 {
958 	u64 byte_count = trans->byte_count + trans->len;
959 	u64 trans_count = trans->trans_count + 1;
960 
961 	byte_count -= channel->compl_byte_count;
962 	channel->compl_byte_count += byte_count;
963 	trans_count -= channel->compl_trans_count;
964 	channel->compl_trans_count += trans_count;
965 
966 	ipa_gsi_channel_tx_completed(channel->gsi, gsi_channel_id(channel),
967 				     trans_count, byte_count);
968 }
969 
970 /* Channel control interrupt handler */
971 static void gsi_isr_chan_ctrl(struct gsi *gsi)
972 {
973 	u32 channel_mask;
974 
975 	channel_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_CH_IRQ_OFFSET);
976 	iowrite32(channel_mask, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET);
977 
978 	while (channel_mask) {
979 		u32 channel_id = __ffs(channel_mask);
980 		struct gsi_channel *channel;
981 
982 		channel_mask ^= BIT(channel_id);
983 
984 		channel = &gsi->channel[channel_id];
985 
986 		complete(&channel->completion);
987 	}
988 }
989 
990 /* Event ring control interrupt handler */
991 static void gsi_isr_evt_ctrl(struct gsi *gsi)
992 {
993 	u32 event_mask;
994 
995 	event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_OFFSET);
996 	iowrite32(event_mask, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET);
997 
998 	while (event_mask) {
999 		u32 evt_ring_id = __ffs(event_mask);
1000 		struct gsi_evt_ring *evt_ring;
1001 
1002 		event_mask ^= BIT(evt_ring_id);
1003 
1004 		evt_ring = &gsi->evt_ring[evt_ring_id];
1005 		evt_ring->state = gsi_evt_ring_state(gsi, evt_ring_id);
1006 
1007 		complete(&evt_ring->completion);
1008 	}
1009 }
1010 
1011 /* Global channel error interrupt handler */
1012 static void
1013 gsi_isr_glob_chan_err(struct gsi *gsi, u32 err_ee, u32 channel_id, u32 code)
1014 {
1015 	if (code == GSI_OUT_OF_RESOURCES) {
1016 		dev_err(gsi->dev, "channel %u out of resources\n", channel_id);
1017 		complete(&gsi->channel[channel_id].completion);
1018 		return;
1019 	}
1020 
1021 	/* Report, but otherwise ignore all other error codes */
1022 	dev_err(gsi->dev, "channel %u global error ee 0x%08x code 0x%08x\n",
1023 		channel_id, err_ee, code);
1024 }
1025 
1026 /* Global event error interrupt handler */
1027 static void
1028 gsi_isr_glob_evt_err(struct gsi *gsi, u32 err_ee, u32 evt_ring_id, u32 code)
1029 {
1030 	if (code == GSI_OUT_OF_RESOURCES) {
1031 		struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
1032 		u32 channel_id = gsi_channel_id(evt_ring->channel);
1033 
1034 		complete(&evt_ring->completion);
1035 		dev_err(gsi->dev, "evt_ring for channel %u out of resources\n",
1036 			channel_id);
1037 		return;
1038 	}
1039 
1040 	/* Report, but otherwise ignore all other error codes */
1041 	dev_err(gsi->dev, "event ring %u global error ee %u code 0x%08x\n",
1042 		evt_ring_id, err_ee, code);
1043 }
1044 
1045 /* Global error interrupt handler */
1046 static void gsi_isr_glob_err(struct gsi *gsi)
1047 {
1048 	enum gsi_err_type type;
1049 	enum gsi_err_code code;
1050 	u32 which;
1051 	u32 val;
1052 	u32 ee;
1053 
1054 	/* Get the logged error, then reinitialize the log */
1055 	val = ioread32(gsi->virt + GSI_ERROR_LOG_OFFSET);
1056 	iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET);
1057 	iowrite32(~0, gsi->virt + GSI_ERROR_LOG_CLR_OFFSET);
1058 
1059 	ee = u32_get_bits(val, ERR_EE_FMASK);
1060 	type = u32_get_bits(val, ERR_TYPE_FMASK);
1061 	which = u32_get_bits(val, ERR_VIRT_IDX_FMASK);
1062 	code = u32_get_bits(val, ERR_CODE_FMASK);
1063 
1064 	if (type == GSI_ERR_TYPE_CHAN)
1065 		gsi_isr_glob_chan_err(gsi, ee, which, code);
1066 	else if (type == GSI_ERR_TYPE_EVT)
1067 		gsi_isr_glob_evt_err(gsi, ee, which, code);
1068 	else	/* type GSI_ERR_TYPE_GLOB should be fatal */
1069 		dev_err(gsi->dev, "unexpected global error 0x%08x\n", type);
1070 }
1071 
1072 /* Generic EE interrupt handler */
1073 static void gsi_isr_gp_int1(struct gsi *gsi)
1074 {
1075 	u32 result;
1076 	u32 val;
1077 
1078 	val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
1079 	result = u32_get_bits(val, GENERIC_EE_RESULT_FMASK);
1080 	if (result != GENERIC_EE_SUCCESS)
1081 		dev_err(gsi->dev, "global INT1 generic result %u\n", result);
1082 
1083 	complete(&gsi->completion);
1084 }
1085 
1086 /* Inter-EE interrupt handler */
1087 static void gsi_isr_glob_ee(struct gsi *gsi)
1088 {
1089 	u32 val;
1090 
1091 	val = ioread32(gsi->virt + GSI_CNTXT_GLOB_IRQ_STTS_OFFSET);
1092 
1093 	if (val & BIT(ERROR_INT))
1094 		gsi_isr_glob_err(gsi);
1095 
1096 	iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_CLR_OFFSET);
1097 
1098 	val &= ~BIT(ERROR_INT);
1099 
1100 	if (val & BIT(GP_INT1)) {
1101 		val ^= BIT(GP_INT1);
1102 		gsi_isr_gp_int1(gsi);
1103 	}
1104 
1105 	if (val)
1106 		dev_err(gsi->dev, "unexpected global interrupt 0x%08x\n", val);
1107 }
1108 
1109 /* I/O completion interrupt event */
1110 static void gsi_isr_ieob(struct gsi *gsi)
1111 {
1112 	u32 event_mask;
1113 
1114 	event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_OFFSET);
1115 	iowrite32(event_mask, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_CLR_OFFSET);
1116 
1117 	while (event_mask) {
1118 		u32 evt_ring_id = __ffs(event_mask);
1119 
1120 		event_mask ^= BIT(evt_ring_id);
1121 
1122 		gsi_irq_ieob_disable(gsi, evt_ring_id);
1123 		napi_schedule(&gsi->evt_ring[evt_ring_id].channel->napi);
1124 	}
1125 }
1126 
1127 /* General event interrupts represent serious problems, so report them */
1128 static void gsi_isr_general(struct gsi *gsi)
1129 {
1130 	struct device *dev = gsi->dev;
1131 	u32 val;
1132 
1133 	val = ioread32(gsi->virt + GSI_CNTXT_GSI_IRQ_STTS_OFFSET);
1134 	iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_CLR_OFFSET);
1135 
1136 	dev_err(dev, "unexpected general interrupt 0x%08x\n", val);
1137 }
1138 
1139 /**
1140  * gsi_isr() - Top level GSI interrupt service routine
1141  * @irq:	Interrupt number (ignored)
1142  * @dev_id:	GSI pointer supplied to request_irq()
1143  *
1144  * This is the main handler function registered for the GSI IRQ. Each type
1145  * of interrupt has a separate handler function that is called from here.
1146  */
1147 static irqreturn_t gsi_isr(int irq, void *dev_id)
1148 {
1149 	struct gsi *gsi = dev_id;
1150 	u32 intr_mask;
1151 	u32 cnt = 0;
1152 
1153 	/* enum gsi_irq_type_id defines GSI interrupt types */
1154 	while ((intr_mask = ioread32(gsi->virt + GSI_CNTXT_TYPE_IRQ_OFFSET))) {
1155 		/* intr_mask contains bitmask of pending GSI interrupts */
1156 		do {
1157 			u32 gsi_intr = BIT(__ffs(intr_mask));
1158 
1159 			intr_mask ^= gsi_intr;
1160 
1161 			switch (gsi_intr) {
1162 			case BIT(GSI_CH_CTRL):
1163 				gsi_isr_chan_ctrl(gsi);
1164 				break;
1165 			case BIT(GSI_EV_CTRL):
1166 				gsi_isr_evt_ctrl(gsi);
1167 				break;
1168 			case BIT(GSI_GLOB_EE):
1169 				gsi_isr_glob_ee(gsi);
1170 				break;
1171 			case BIT(GSI_IEOB):
1172 				gsi_isr_ieob(gsi);
1173 				break;
1174 			case BIT(GSI_GENERAL):
1175 				gsi_isr_general(gsi);
1176 				break;
1177 			default:
1178 				dev_err(gsi->dev,
1179 					"unrecognized interrupt type 0x%08x\n",
1180 					gsi_intr);
1181 				break;
1182 			}
1183 		} while (intr_mask);
1184 
1185 		if (++cnt > GSI_ISR_MAX_ITER) {
1186 			dev_err(gsi->dev, "interrupt flood\n");
1187 			break;
1188 		}
1189 	}
1190 
1191 	return IRQ_HANDLED;
1192 }
1193 
1194 static int gsi_irq_init(struct gsi *gsi, struct platform_device *pdev)
1195 {
1196 	struct device *dev = &pdev->dev;
1197 	unsigned int irq;
1198 	int ret;
1199 
1200 	ret = platform_get_irq_byname(pdev, "gsi");
1201 	if (ret <= 0) {
1202 		dev_err(dev, "DT error %d getting \"gsi\" IRQ property\n", ret);
1203 		return ret ? : -EINVAL;
1204 	}
1205 	irq = ret;
1206 
1207 	ret = request_irq(irq, gsi_isr, 0, "gsi", gsi);
1208 	if (ret) {
1209 		dev_err(dev, "error %d requesting \"gsi\" IRQ\n", ret);
1210 		return ret;
1211 	}
1212 	gsi->irq = irq;
1213 
1214 	return 0;
1215 }
1216 
1217 static void gsi_irq_exit(struct gsi *gsi)
1218 {
1219 	free_irq(gsi->irq, gsi);
1220 }
1221 
1222 /* Return the transaction associated with a transfer completion event */
1223 static struct gsi_trans *gsi_event_trans(struct gsi_channel *channel,
1224 					 struct gsi_event *event)
1225 {
1226 	u32 tre_offset;
1227 	u32 tre_index;
1228 
1229 	/* Event xfer_ptr records the TRE it's associated with */
1230 	tre_offset = le64_to_cpu(event->xfer_ptr) & GENMASK(31, 0);
1231 	tre_index = gsi_ring_index(&channel->tre_ring, tre_offset);
1232 
1233 	return gsi_channel_trans_mapped(channel, tre_index);
1234 }
1235 
1236 /**
1237  * gsi_evt_ring_rx_update() - Record lengths of received data
1238  * @evt_ring:	Event ring associated with channel that received packets
1239  * @index:	Event index in ring reported by hardware
1240  *
1241  * Events for RX channels contain the actual number of bytes received into
1242  * the buffer.  Every event has a transaction associated with it, and here
1243  * we update transactions to record their actual received lengths.
1244  *
1245  * This function is called whenever we learn that the GSI hardware has filled
1246  * new events since the last time we checked.  The ring's index field tells
1247  * the first entry in need of processing.  The index provided is the
1248  * first *unfilled* event in the ring (following the last filled one).
1249  *
1250  * Events are sequential within the event ring, and transactions are
1251  * sequential within the transaction pool.
1252  *
1253  * Note that @index always refers to an element *within* the event ring.
1254  */
1255 static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index)
1256 {
1257 	struct gsi_channel *channel = evt_ring->channel;
1258 	struct gsi_ring *ring = &evt_ring->ring;
1259 	struct gsi_trans_info *trans_info;
1260 	struct gsi_event *event_done;
1261 	struct gsi_event *event;
1262 	struct gsi_trans *trans;
1263 	u32 byte_count = 0;
1264 	u32 old_index;
1265 	u32 event_avail;
1266 
1267 	trans_info = &channel->trans_info;
1268 
1269 	/* We'll start with the oldest un-processed event.  RX channels
1270 	 * replenish receive buffers in single-TRE transactions, so we
1271 	 * can just map that event to its transaction.  Transactions
1272 	 * associated with completion events are consecutive.
1273 	 */
1274 	old_index = ring->index;
1275 	event = gsi_ring_virt(ring, old_index);
1276 	trans = gsi_event_trans(channel, event);
1277 
1278 	/* Compute the number of events to process before we wrap,
1279 	 * and determine when we'll be done processing events.
1280 	 */
1281 	event_avail = ring->count - old_index % ring->count;
1282 	event_done = gsi_ring_virt(ring, index);
1283 	do {
1284 		trans->len = __le16_to_cpu(event->len);
1285 		byte_count += trans->len;
1286 
1287 		/* Move on to the next event and transaction */
1288 		if (--event_avail)
1289 			event++;
1290 		else
1291 			event = gsi_ring_virt(ring, 0);
1292 		trans = gsi_trans_pool_next(&trans_info->pool, trans);
1293 	} while (event != event_done);
1294 
1295 	/* We record RX bytes when they are received */
1296 	channel->byte_count += byte_count;
1297 	channel->trans_count++;
1298 }
1299 
1300 /* Initialize a ring, including allocating DMA memory for its entries */
1301 static int gsi_ring_alloc(struct gsi *gsi, struct gsi_ring *ring, u32 count)
1302 {
1303 	size_t size = count * GSI_RING_ELEMENT_SIZE;
1304 	struct device *dev = gsi->dev;
1305 	dma_addr_t addr;
1306 
1307 	/* Hardware requires a 2^n ring size, with alignment equal to size */
1308 	ring->virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL);
1309 	if (ring->virt && addr % size) {
1310 		dma_free_coherent(dev, size, ring->virt, ring->addr);
1311 		dev_err(dev, "unable to alloc 0x%zx-aligned ring buffer\n",
1312 			size);
1313 		return -EINVAL;	/* Not a good error value, but distinct */
1314 	} else if (!ring->virt) {
1315 		return -ENOMEM;
1316 	}
1317 	ring->addr = addr;
1318 	ring->count = count;
1319 
1320 	return 0;
1321 }
1322 
1323 /* Free a previously-allocated ring */
1324 static void gsi_ring_free(struct gsi *gsi, struct gsi_ring *ring)
1325 {
1326 	size_t size = ring->count * GSI_RING_ELEMENT_SIZE;
1327 
1328 	dma_free_coherent(gsi->dev, size, ring->virt, ring->addr);
1329 }
1330 
1331 /* Allocate an available event ring id */
1332 static int gsi_evt_ring_id_alloc(struct gsi *gsi)
1333 {
1334 	u32 evt_ring_id;
1335 
1336 	if (gsi->event_bitmap == ~0U) {
1337 		dev_err(gsi->dev, "event rings exhausted\n");
1338 		return -ENOSPC;
1339 	}
1340 
1341 	evt_ring_id = ffz(gsi->event_bitmap);
1342 	gsi->event_bitmap |= BIT(evt_ring_id);
1343 
1344 	return (int)evt_ring_id;
1345 }
1346 
1347 /* Free a previously-allocated event ring id */
1348 static void gsi_evt_ring_id_free(struct gsi *gsi, u32 evt_ring_id)
1349 {
1350 	gsi->event_bitmap &= ~BIT(evt_ring_id);
1351 }
1352 
1353 /* Ring a channel doorbell, reporting the first un-filled entry */
1354 void gsi_channel_doorbell(struct gsi_channel *channel)
1355 {
1356 	struct gsi_ring *tre_ring = &channel->tre_ring;
1357 	u32 channel_id = gsi_channel_id(channel);
1358 	struct gsi *gsi = channel->gsi;
1359 	u32 val;
1360 
1361 	/* Note: index *must* be used modulo the ring count here */
1362 	val = gsi_ring_addr(tre_ring, tre_ring->index % tre_ring->count);
1363 	iowrite32(val, gsi->virt + GSI_CH_C_DOORBELL_0_OFFSET(channel_id));
1364 }
1365 
1366 /* Consult hardware, move any newly completed transactions to completed list */
1367 static void gsi_channel_update(struct gsi_channel *channel)
1368 {
1369 	u32 evt_ring_id = channel->evt_ring_id;
1370 	struct gsi *gsi = channel->gsi;
1371 	struct gsi_evt_ring *evt_ring;
1372 	struct gsi_trans *trans;
1373 	struct gsi_ring *ring;
1374 	u32 offset;
1375 	u32 index;
1376 
1377 	evt_ring = &gsi->evt_ring[evt_ring_id];
1378 	ring = &evt_ring->ring;
1379 
1380 	/* See if there's anything new to process; if not, we're done.  Note
1381 	 * that index always refers to an entry *within* the event ring.
1382 	 */
1383 	offset = GSI_EV_CH_E_CNTXT_4_OFFSET(evt_ring_id);
1384 	index = gsi_ring_index(ring, ioread32(gsi->virt + offset));
1385 	if (index == ring->index % ring->count)
1386 		return;
1387 
1388 	/* Get the transaction for the latest completed event.  Take a
1389 	 * reference to keep it from completing before we give the events
1390 	 * for this and previous transactions back to the hardware.
1391 	 */
1392 	trans = gsi_event_trans(channel, gsi_ring_virt(ring, index - 1));
1393 	refcount_inc(&trans->refcount);
1394 
1395 	/* For RX channels, update each completed transaction with the number
1396 	 * of bytes that were actually received.  For TX channels, report
1397 	 * the number of transactions and bytes this completion represents
1398 	 * up the network stack.
1399 	 */
1400 	if (channel->toward_ipa)
1401 		gsi_channel_tx_update(channel, trans);
1402 	else
1403 		gsi_evt_ring_rx_update(evt_ring, index);
1404 
1405 	gsi_trans_move_complete(trans);
1406 
1407 	/* Tell the hardware we've handled these events */
1408 	gsi_evt_ring_doorbell(channel->gsi, channel->evt_ring_id, index);
1409 
1410 	gsi_trans_free(trans);
1411 }
1412 
1413 /**
1414  * gsi_channel_poll_one() - Return a single completed transaction on a channel
1415  * @channel:	Channel to be polled
1416  *
1417  * Return:	Transaction pointer, or null if none are available
1418  *
1419  * This function returns the first entry on a channel's completed transaction
1420  * list.  If that list is empty, the hardware is consulted to determine
1421  * whether any new transactions have completed.  If so, they're moved to the
1422  * completed list and the new first entry is returned.  If there are no more
1423  * completed transactions, a null pointer is returned.
1424  */
1425 static struct gsi_trans *gsi_channel_poll_one(struct gsi_channel *channel)
1426 {
1427 	struct gsi_trans *trans;
1428 
1429 	/* Get the first transaction from the completed list */
1430 	trans = gsi_channel_trans_complete(channel);
1431 	if (!trans) {
1432 		/* List is empty; see if there's more to do */
1433 		gsi_channel_update(channel);
1434 		trans = gsi_channel_trans_complete(channel);
1435 	}
1436 
1437 	if (trans)
1438 		gsi_trans_move_polled(trans);
1439 
1440 	return trans;
1441 }
1442 
1443 /**
1444  * gsi_channel_poll() - NAPI poll function for a channel
1445  * @napi:	NAPI structure for the channel
1446  * @budget:	Budget supplied by NAPI core
1447  *
1448  * Return:	Number of items polled (<= budget)
1449  *
1450  * Single transactions completed by hardware are polled until either
1451  * the budget is exhausted, or there are no more.  Each transaction
1452  * polled is passed to gsi_trans_complete(), to perform remaining
1453  * completion processing and retire/free the transaction.
1454  */
1455 static int gsi_channel_poll(struct napi_struct *napi, int budget)
1456 {
1457 	struct gsi_channel *channel;
1458 	int count = 0;
1459 
1460 	channel = container_of(napi, struct gsi_channel, napi);
1461 	while (count < budget) {
1462 		struct gsi_trans *trans;
1463 
1464 		count++;
1465 		trans = gsi_channel_poll_one(channel);
1466 		if (!trans)
1467 			break;
1468 		gsi_trans_complete(trans);
1469 	}
1470 
1471 	if (count < budget) {
1472 		napi_complete(&channel->napi);
1473 		gsi_irq_ieob_enable(channel->gsi, channel->evt_ring_id);
1474 	}
1475 
1476 	return count;
1477 }
1478 
1479 /* The event bitmap represents which event ids are available for allocation.
1480  * Set bits are not available, clear bits can be used.  This function
1481  * initializes the map so all events supported by the hardware are available,
1482  * then precludes any reserved events from being allocated.
1483  */
1484 static u32 gsi_event_bitmap_init(u32 evt_ring_max)
1485 {
1486 	u32 event_bitmap = GENMASK(BITS_PER_LONG - 1, evt_ring_max);
1487 
1488 	event_bitmap |= GENMASK(GSI_MHI_EVENT_ID_END, GSI_MHI_EVENT_ID_START);
1489 
1490 	return event_bitmap;
1491 }
1492 
1493 /* Setup function for event rings */
1494 static void gsi_evt_ring_setup(struct gsi *gsi)
1495 {
1496 	/* Nothing to do */
1497 }
1498 
1499 /* Inverse of gsi_evt_ring_setup() */
1500 static void gsi_evt_ring_teardown(struct gsi *gsi)
1501 {
1502 	/* Nothing to do */
1503 }
1504 
1505 /* Setup function for a single channel */
1506 static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id)
1507 {
1508 	struct gsi_channel *channel = &gsi->channel[channel_id];
1509 	u32 evt_ring_id = channel->evt_ring_id;
1510 	int ret;
1511 
1512 	if (!channel->gsi)
1513 		return 0;	/* Ignore uninitialized channels */
1514 
1515 	ret = gsi_evt_ring_alloc_command(gsi, evt_ring_id);
1516 	if (ret)
1517 		return ret;
1518 
1519 	gsi_evt_ring_program(gsi, evt_ring_id);
1520 
1521 	ret = gsi_channel_alloc_command(gsi, channel_id);
1522 	if (ret)
1523 		goto err_evt_ring_de_alloc;
1524 
1525 	gsi_channel_program(channel, true);
1526 
1527 	if (channel->toward_ipa)
1528 		netif_tx_napi_add(&gsi->dummy_dev, &channel->napi,
1529 				  gsi_channel_poll, NAPI_POLL_WEIGHT);
1530 	else
1531 		netif_napi_add(&gsi->dummy_dev, &channel->napi,
1532 			       gsi_channel_poll, NAPI_POLL_WEIGHT);
1533 
1534 	return 0;
1535 
1536 err_evt_ring_de_alloc:
1537 	/* We've done nothing with the event ring yet so don't reset */
1538 	gsi_evt_ring_de_alloc_command(gsi, evt_ring_id);
1539 
1540 	return ret;
1541 }
1542 
1543 /* Inverse of gsi_channel_setup_one() */
1544 static void gsi_channel_teardown_one(struct gsi *gsi, u32 channel_id)
1545 {
1546 	struct gsi_channel *channel = &gsi->channel[channel_id];
1547 	u32 evt_ring_id = channel->evt_ring_id;
1548 
1549 	if (!channel->gsi)
1550 		return;		/* Ignore uninitialized channels */
1551 
1552 	netif_napi_del(&channel->napi);
1553 
1554 	gsi_channel_deprogram(channel);
1555 	gsi_channel_de_alloc_command(gsi, channel_id);
1556 	gsi_evt_ring_reset_command(gsi, evt_ring_id);
1557 	gsi_evt_ring_de_alloc_command(gsi, evt_ring_id);
1558 }
1559 
1560 static int gsi_generic_command(struct gsi *gsi, u32 channel_id,
1561 			       enum gsi_generic_cmd_opcode opcode)
1562 {
1563 	struct completion *completion = &gsi->completion;
1564 	bool success;
1565 	u32 val;
1566 
1567 	/* The error global interrupt type is always enabled (until we
1568 	 * teardown), so we won't change that.  A generic EE command
1569 	 * completes with a GSI global interrupt of type GP_INT1.  We
1570 	 * only perform one generic command at a time (to allocate or
1571 	 * halt a modem channel) and only from this function.  So we
1572 	 * enable the GP_INT1 IRQ type here while we're expecting it.
1573 	 */
1574 	val = BIT(ERROR_INT) | BIT(GP_INT1);
1575 	iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
1576 
1577 	/* First zero the result code field */
1578 	val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
1579 	val &= ~GENERIC_EE_RESULT_FMASK;
1580 	iowrite32(val, gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
1581 
1582 	/* Now issue the command */
1583 	val = u32_encode_bits(opcode, GENERIC_OPCODE_FMASK);
1584 	val |= u32_encode_bits(channel_id, GENERIC_CHID_FMASK);
1585 	val |= u32_encode_bits(GSI_EE_MODEM, GENERIC_EE_FMASK);
1586 
1587 	success = gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val, completion);
1588 
1589 	/* Disable the GP_INT1 IRQ type again */
1590 	iowrite32(BIT(ERROR_INT), gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
1591 
1592 	if (success)
1593 		return 0;
1594 
1595 	dev_err(gsi->dev, "GSI generic command %u to channel %u timed out\n",
1596 		opcode, channel_id);
1597 
1598 	return -ETIMEDOUT;
1599 }
1600 
1601 static int gsi_modem_channel_alloc(struct gsi *gsi, u32 channel_id)
1602 {
1603 	return gsi_generic_command(gsi, channel_id,
1604 				   GSI_GENERIC_ALLOCATE_CHANNEL);
1605 }
1606 
1607 static void gsi_modem_channel_halt(struct gsi *gsi, u32 channel_id)
1608 {
1609 	(void)gsi_generic_command(gsi, channel_id, GSI_GENERIC_HALT_CHANNEL);
1610 }
1611 
1612 /* Setup function for channels */
1613 static int gsi_channel_setup(struct gsi *gsi)
1614 {
1615 	u32 channel_id = 0;
1616 	u32 mask;
1617 	int ret;
1618 
1619 	gsi_evt_ring_setup(gsi);
1620 	gsi_irq_enable(gsi);
1621 
1622 	mutex_lock(&gsi->mutex);
1623 
1624 	do {
1625 		ret = gsi_channel_setup_one(gsi, channel_id);
1626 		if (ret)
1627 			goto err_unwind;
1628 	} while (++channel_id < gsi->channel_count);
1629 
1630 	/* Make sure no channels were defined that hardware does not support */
1631 	while (channel_id < GSI_CHANNEL_COUNT_MAX) {
1632 		struct gsi_channel *channel = &gsi->channel[channel_id++];
1633 
1634 		if (!channel->gsi)
1635 			continue;	/* Ignore uninitialized channels */
1636 
1637 		dev_err(gsi->dev, "channel %u not supported by hardware\n",
1638 			channel_id - 1);
1639 		channel_id = gsi->channel_count;
1640 		goto err_unwind;
1641 	}
1642 
1643 	/* Allocate modem channels if necessary */
1644 	mask = gsi->modem_channel_bitmap;
1645 	while (mask) {
1646 		u32 modem_channel_id = __ffs(mask);
1647 
1648 		ret = gsi_modem_channel_alloc(gsi, modem_channel_id);
1649 		if (ret)
1650 			goto err_unwind_modem;
1651 
1652 		/* Clear bit from mask only after success (for unwind) */
1653 		mask ^= BIT(modem_channel_id);
1654 	}
1655 
1656 	mutex_unlock(&gsi->mutex);
1657 
1658 	return 0;
1659 
1660 err_unwind_modem:
1661 	/* Compute which modem channels need to be deallocated */
1662 	mask ^= gsi->modem_channel_bitmap;
1663 	while (mask) {
1664 		channel_id = __fls(mask);
1665 
1666 		mask ^= BIT(channel_id);
1667 
1668 		gsi_modem_channel_halt(gsi, channel_id);
1669 	}
1670 
1671 err_unwind:
1672 	while (channel_id--)
1673 		gsi_channel_teardown_one(gsi, channel_id);
1674 
1675 	mutex_unlock(&gsi->mutex);
1676 
1677 	gsi_irq_disable(gsi);
1678 	gsi_evt_ring_teardown(gsi);
1679 
1680 	return ret;
1681 }
1682 
1683 /* Inverse of gsi_channel_setup() */
1684 static void gsi_channel_teardown(struct gsi *gsi)
1685 {
1686 	u32 mask = gsi->modem_channel_bitmap;
1687 	u32 channel_id;
1688 
1689 	mutex_lock(&gsi->mutex);
1690 
1691 	while (mask) {
1692 		channel_id = __fls(mask);
1693 
1694 		mask ^= BIT(channel_id);
1695 
1696 		gsi_modem_channel_halt(gsi, channel_id);
1697 	}
1698 
1699 	channel_id = gsi->channel_count - 1;
1700 	do
1701 		gsi_channel_teardown_one(gsi, channel_id);
1702 	while (channel_id--);
1703 
1704 	mutex_unlock(&gsi->mutex);
1705 
1706 	gsi_irq_disable(gsi);
1707 	gsi_evt_ring_teardown(gsi);
1708 }
1709 
1710 /* Setup function for GSI.  GSI firmware must be loaded and initialized */
1711 int gsi_setup(struct gsi *gsi)
1712 {
1713 	struct device *dev = gsi->dev;
1714 	u32 val;
1715 	int ret;
1716 
1717 	/* Here is where we first touch the GSI hardware */
1718 	val = ioread32(gsi->virt + GSI_GSI_STATUS_OFFSET);
1719 	if (!(val & ENABLED_FMASK)) {
1720 		dev_err(dev, "GSI has not been enabled\n");
1721 		return -EIO;
1722 	}
1723 
1724 	gsi_irq_setup(gsi);
1725 
1726 	val = ioread32(gsi->virt + GSI_GSI_HW_PARAM_2_OFFSET);
1727 
1728 	gsi->channel_count = u32_get_bits(val, NUM_CH_PER_EE_FMASK);
1729 	if (!gsi->channel_count) {
1730 		dev_err(dev, "GSI reports zero channels supported\n");
1731 		return -EINVAL;
1732 	}
1733 	if (gsi->channel_count > GSI_CHANNEL_COUNT_MAX) {
1734 		dev_warn(dev,
1735 			 "limiting to %u channels; hardware supports %u\n",
1736 			 GSI_CHANNEL_COUNT_MAX, gsi->channel_count);
1737 		gsi->channel_count = GSI_CHANNEL_COUNT_MAX;
1738 	}
1739 
1740 	gsi->evt_ring_count = u32_get_bits(val, NUM_EV_PER_EE_FMASK);
1741 	if (!gsi->evt_ring_count) {
1742 		dev_err(dev, "GSI reports zero event rings supported\n");
1743 		return -EINVAL;
1744 	}
1745 	if (gsi->evt_ring_count > GSI_EVT_RING_COUNT_MAX) {
1746 		dev_warn(dev,
1747 			 "limiting to %u event rings; hardware supports %u\n",
1748 			 GSI_EVT_RING_COUNT_MAX, gsi->evt_ring_count);
1749 		gsi->evt_ring_count = GSI_EVT_RING_COUNT_MAX;
1750 	}
1751 
1752 	/* Initialize the error log */
1753 	iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET);
1754 
1755 	/* Writing 1 indicates IRQ interrupts; 0 would be MSI */
1756 	iowrite32(1, gsi->virt + GSI_CNTXT_INTSET_OFFSET);
1757 
1758 	ret = gsi_channel_setup(gsi);
1759 	if (ret)
1760 		gsi_irq_teardown(gsi);
1761 
1762 	return ret;
1763 }
1764 
1765 /* Inverse of gsi_setup() */
1766 void gsi_teardown(struct gsi *gsi)
1767 {
1768 	gsi_channel_teardown(gsi);
1769 	gsi_irq_teardown(gsi);
1770 }
1771 
1772 /* Initialize a channel's event ring */
1773 static int gsi_channel_evt_ring_init(struct gsi_channel *channel)
1774 {
1775 	struct gsi *gsi = channel->gsi;
1776 	struct gsi_evt_ring *evt_ring;
1777 	int ret;
1778 
1779 	ret = gsi_evt_ring_id_alloc(gsi);
1780 	if (ret < 0)
1781 		return ret;
1782 	channel->evt_ring_id = ret;
1783 
1784 	evt_ring = &gsi->evt_ring[channel->evt_ring_id];
1785 	evt_ring->channel = channel;
1786 
1787 	ret = gsi_ring_alloc(gsi, &evt_ring->ring, channel->event_count);
1788 	if (!ret)
1789 		return 0;	/* Success! */
1790 
1791 	dev_err(gsi->dev, "error %d allocating channel %u event ring\n",
1792 		ret, gsi_channel_id(channel));
1793 
1794 	gsi_evt_ring_id_free(gsi, channel->evt_ring_id);
1795 
1796 	return ret;
1797 }
1798 
1799 /* Inverse of gsi_channel_evt_ring_init() */
1800 static void gsi_channel_evt_ring_exit(struct gsi_channel *channel)
1801 {
1802 	u32 evt_ring_id = channel->evt_ring_id;
1803 	struct gsi *gsi = channel->gsi;
1804 	struct gsi_evt_ring *evt_ring;
1805 
1806 	evt_ring = &gsi->evt_ring[evt_ring_id];
1807 	gsi_ring_free(gsi, &evt_ring->ring);
1808 	gsi_evt_ring_id_free(gsi, evt_ring_id);
1809 }
1810 
1811 /* Init function for event rings */
1812 static void gsi_evt_ring_init(struct gsi *gsi)
1813 {
1814 	u32 evt_ring_id = 0;
1815 
1816 	gsi->event_bitmap = gsi_event_bitmap_init(GSI_EVT_RING_COUNT_MAX);
1817 	gsi->ieob_enabled_bitmap = 0;
1818 	do
1819 		init_completion(&gsi->evt_ring[evt_ring_id].completion);
1820 	while (++evt_ring_id < GSI_EVT_RING_COUNT_MAX);
1821 }
1822 
1823 /* Inverse of gsi_evt_ring_init() */
1824 static void gsi_evt_ring_exit(struct gsi *gsi)
1825 {
1826 	/* Nothing to do */
1827 }
1828 
1829 static bool gsi_channel_data_valid(struct gsi *gsi,
1830 				   const struct ipa_gsi_endpoint_data *data)
1831 {
1832 #ifdef IPA_VALIDATION
1833 	u32 channel_id = data->channel_id;
1834 	struct device *dev = gsi->dev;
1835 
1836 	/* Make sure channel ids are in the range driver supports */
1837 	if (channel_id >= GSI_CHANNEL_COUNT_MAX) {
1838 		dev_err(dev, "bad channel id %u; must be less than %u\n",
1839 			channel_id, GSI_CHANNEL_COUNT_MAX);
1840 		return false;
1841 	}
1842 
1843 	if (data->ee_id != GSI_EE_AP && data->ee_id != GSI_EE_MODEM) {
1844 		dev_err(dev, "bad EE id %u; not AP or modem\n", data->ee_id);
1845 		return false;
1846 	}
1847 
1848 	if (!data->channel.tlv_count ||
1849 	    data->channel.tlv_count > GSI_TLV_MAX) {
1850 		dev_err(dev, "channel %u bad tlv_count %u; must be 1..%u\n",
1851 			channel_id, data->channel.tlv_count, GSI_TLV_MAX);
1852 		return false;
1853 	}
1854 
1855 	/* We have to allow at least one maximally-sized transaction to
1856 	 * be outstanding (which would use tlv_count TREs).  Given how
1857 	 * gsi_channel_tre_max() is computed, tre_count has to be almost
1858 	 * twice the TLV FIFO size to satisfy this requirement.
1859 	 */
1860 	if (data->channel.tre_count < 2 * data->channel.tlv_count - 1) {
1861 		dev_err(dev, "channel %u TLV count %u exceeds TRE count %u\n",
1862 			channel_id, data->channel.tlv_count,
1863 			data->channel.tre_count);
1864 		return false;
1865 	}
1866 
1867 	if (!is_power_of_2(data->channel.tre_count)) {
1868 		dev_err(dev, "channel %u bad tre_count %u; not power of 2\n",
1869 			channel_id, data->channel.tre_count);
1870 		return false;
1871 	}
1872 
1873 	if (!is_power_of_2(data->channel.event_count)) {
1874 		dev_err(dev, "channel %u bad event_count %u; not power of 2\n",
1875 			channel_id, data->channel.event_count);
1876 		return false;
1877 	}
1878 #endif /* IPA_VALIDATION */
1879 
1880 	return true;
1881 }
1882 
1883 /* Init function for a single channel */
1884 static int gsi_channel_init_one(struct gsi *gsi,
1885 				const struct ipa_gsi_endpoint_data *data,
1886 				bool command)
1887 {
1888 	struct gsi_channel *channel;
1889 	u32 tre_count;
1890 	int ret;
1891 
1892 	if (!gsi_channel_data_valid(gsi, data))
1893 		return -EINVAL;
1894 
1895 	/* Worst case we need an event for every outstanding TRE */
1896 	if (data->channel.tre_count > data->channel.event_count) {
1897 		tre_count = data->channel.event_count;
1898 		dev_warn(gsi->dev, "channel %u limited to %u TREs\n",
1899 			 data->channel_id, tre_count);
1900 	} else {
1901 		tre_count = data->channel.tre_count;
1902 	}
1903 
1904 	channel = &gsi->channel[data->channel_id];
1905 	memset(channel, 0, sizeof(*channel));
1906 
1907 	channel->gsi = gsi;
1908 	channel->toward_ipa = data->toward_ipa;
1909 	channel->command = command;
1910 	channel->tlv_count = data->channel.tlv_count;
1911 	channel->tre_count = tre_count;
1912 	channel->event_count = data->channel.event_count;
1913 	init_completion(&channel->completion);
1914 
1915 	ret = gsi_channel_evt_ring_init(channel);
1916 	if (ret)
1917 		goto err_clear_gsi;
1918 
1919 	ret = gsi_ring_alloc(gsi, &channel->tre_ring, data->channel.tre_count);
1920 	if (ret) {
1921 		dev_err(gsi->dev, "error %d allocating channel %u ring\n",
1922 			ret, data->channel_id);
1923 		goto err_channel_evt_ring_exit;
1924 	}
1925 
1926 	ret = gsi_channel_trans_init(gsi, data->channel_id);
1927 	if (ret)
1928 		goto err_ring_free;
1929 
1930 	if (command) {
1931 		u32 tre_max = gsi_channel_tre_max(gsi, data->channel_id);
1932 
1933 		ret = ipa_cmd_pool_init(channel, tre_max);
1934 	}
1935 	if (!ret)
1936 		return 0;	/* Success! */
1937 
1938 	gsi_channel_trans_exit(channel);
1939 err_ring_free:
1940 	gsi_ring_free(gsi, &channel->tre_ring);
1941 err_channel_evt_ring_exit:
1942 	gsi_channel_evt_ring_exit(channel);
1943 err_clear_gsi:
1944 	channel->gsi = NULL;	/* Mark it not (fully) initialized */
1945 
1946 	return ret;
1947 }
1948 
1949 /* Inverse of gsi_channel_init_one() */
1950 static void gsi_channel_exit_one(struct gsi_channel *channel)
1951 {
1952 	if (!channel->gsi)
1953 		return;		/* Ignore uninitialized channels */
1954 
1955 	if (channel->command)
1956 		ipa_cmd_pool_exit(channel);
1957 	gsi_channel_trans_exit(channel);
1958 	gsi_ring_free(channel->gsi, &channel->tre_ring);
1959 	gsi_channel_evt_ring_exit(channel);
1960 }
1961 
1962 /* Init function for channels */
1963 static int gsi_channel_init(struct gsi *gsi, u32 count,
1964 			    const struct ipa_gsi_endpoint_data *data)
1965 {
1966 	bool modem_alloc;
1967 	int ret = 0;
1968 	u32 i;
1969 
1970 	/* IPA v4.2 requires the AP to allocate channels for the modem */
1971 	modem_alloc = gsi->version == IPA_VERSION_4_2;
1972 
1973 	gsi_evt_ring_init(gsi);
1974 
1975 	/* The endpoint data array is indexed by endpoint name */
1976 	for (i = 0; i < count; i++) {
1977 		bool command = i == IPA_ENDPOINT_AP_COMMAND_TX;
1978 
1979 		if (ipa_gsi_endpoint_data_empty(&data[i]))
1980 			continue;	/* Skip over empty slots */
1981 
1982 		/* Mark modem channels to be allocated (hardware workaround) */
1983 		if (data[i].ee_id == GSI_EE_MODEM) {
1984 			if (modem_alloc)
1985 				gsi->modem_channel_bitmap |=
1986 						BIT(data[i].channel_id);
1987 			continue;
1988 		}
1989 
1990 		ret = gsi_channel_init_one(gsi, &data[i], command);
1991 		if (ret)
1992 			goto err_unwind;
1993 	}
1994 
1995 	return ret;
1996 
1997 err_unwind:
1998 	while (i--) {
1999 		if (ipa_gsi_endpoint_data_empty(&data[i]))
2000 			continue;
2001 		if (modem_alloc && data[i].ee_id == GSI_EE_MODEM) {
2002 			gsi->modem_channel_bitmap &= ~BIT(data[i].channel_id);
2003 			continue;
2004 		}
2005 		gsi_channel_exit_one(&gsi->channel[data->channel_id]);
2006 	}
2007 	gsi_evt_ring_exit(gsi);
2008 
2009 	return ret;
2010 }
2011 
2012 /* Inverse of gsi_channel_init() */
2013 static void gsi_channel_exit(struct gsi *gsi)
2014 {
2015 	u32 channel_id = GSI_CHANNEL_COUNT_MAX - 1;
2016 
2017 	do
2018 		gsi_channel_exit_one(&gsi->channel[channel_id]);
2019 	while (channel_id--);
2020 	gsi->modem_channel_bitmap = 0;
2021 
2022 	gsi_evt_ring_exit(gsi);
2023 }
2024 
2025 /* Init function for GSI.  GSI hardware does not need to be "ready" */
2026 int gsi_init(struct gsi *gsi, struct platform_device *pdev,
2027 	     enum ipa_version version, u32 count,
2028 	     const struct ipa_gsi_endpoint_data *data)
2029 {
2030 	struct device *dev = &pdev->dev;
2031 	struct resource *res;
2032 	resource_size_t size;
2033 	int ret;
2034 
2035 	gsi_validate_build();
2036 
2037 	gsi->dev = dev;
2038 	gsi->version = version;
2039 
2040 	/* The GSI layer performs NAPI on all endpoints.  NAPI requires a
2041 	 * network device structure, but the GSI layer does not have one,
2042 	 * so we must create a dummy network device for this purpose.
2043 	 */
2044 	init_dummy_netdev(&gsi->dummy_dev);
2045 
2046 	/* Get GSI memory range and map it */
2047 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gsi");
2048 	if (!res) {
2049 		dev_err(dev, "DT error getting \"gsi\" memory property\n");
2050 		return -ENODEV;
2051 	}
2052 
2053 	size = resource_size(res);
2054 	if (res->start > U32_MAX || size > U32_MAX - res->start) {
2055 		dev_err(dev, "DT memory resource \"gsi\" out of range\n");
2056 		return -EINVAL;
2057 	}
2058 
2059 	gsi->virt = ioremap(res->start, size);
2060 	if (!gsi->virt) {
2061 		dev_err(dev, "unable to remap \"gsi\" memory\n");
2062 		return -ENOMEM;
2063 	}
2064 
2065 	init_completion(&gsi->completion);
2066 
2067 	ret = gsi_irq_init(gsi, pdev);
2068 	if (ret)
2069 		goto err_iounmap;
2070 
2071 	ret = gsi_channel_init(gsi, count, data);
2072 	if (ret)
2073 		goto err_irq_exit;
2074 
2075 	mutex_init(&gsi->mutex);
2076 
2077 	return 0;
2078 
2079 err_irq_exit:
2080 	gsi_irq_exit(gsi);
2081 err_iounmap:
2082 	iounmap(gsi->virt);
2083 
2084 	return ret;
2085 }
2086 
2087 /* Inverse of gsi_init() */
2088 void gsi_exit(struct gsi *gsi)
2089 {
2090 	mutex_destroy(&gsi->mutex);
2091 	gsi_channel_exit(gsi);
2092 	gsi_irq_exit(gsi);
2093 	iounmap(gsi->virt);
2094 }
2095 
2096 /* The maximum number of outstanding TREs on a channel.  This limits
2097  * a channel's maximum number of transactions outstanding (worst case
2098  * is one TRE per transaction).
2099  *
2100  * The absolute limit is the number of TREs in the channel's TRE ring,
2101  * and in theory we should be able use all of them.  But in practice,
2102  * doing that led to the hardware reporting exhaustion of event ring
2103  * slots for writing completion information.  So the hardware limit
2104  * would be (tre_count - 1).
2105  *
2106  * We reduce it a bit further though.  Transaction resource pools are
2107  * sized to be a little larger than this maximum, to allow resource
2108  * allocations to always be contiguous.  The number of entries in a
2109  * TRE ring buffer is a power of 2, and the extra resources in a pool
2110  * tends to nearly double the memory allocated for it.  Reducing the
2111  * maximum number of outstanding TREs allows the number of entries in
2112  * a pool to avoid crossing that power-of-2 boundary, and this can
2113  * substantially reduce pool memory requirements.  The number we
2114  * reduce it by matches the number added in gsi_trans_pool_init().
2115  */
2116 u32 gsi_channel_tre_max(struct gsi *gsi, u32 channel_id)
2117 {
2118 	struct gsi_channel *channel = &gsi->channel[channel_id];
2119 
2120 	/* Hardware limit is channel->tre_count - 1 */
2121 	return channel->tre_count - (channel->tlv_count - 1);
2122 }
2123 
2124 /* Returns the maximum number of TREs in a single transaction for a channel */
2125 u32 gsi_channel_trans_tre_max(struct gsi *gsi, u32 channel_id)
2126 {
2127 	struct gsi_channel *channel = &gsi->channel[channel_id];
2128 
2129 	return channel->tlv_count;
2130 }
2131