xref: /openbmc/linux/drivers/net/ipa/gsi.c (revision 94ad8f3ac6aff5acde3f6c4719997efc61e0dccf)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
4  * Copyright (C) 2018-2020 Linaro Ltd.
5  */
6 
7 #include <linux/types.h>
8 #include <linux/bits.h>
9 #include <linux/bitfield.h>
10 #include <linux/mutex.h>
11 #include <linux/completion.h>
12 #include <linux/io.h>
13 #include <linux/bug.h>
14 #include <linux/interrupt.h>
15 #include <linux/platform_device.h>
16 #include <linux/netdevice.h>
17 
18 #include "gsi.h"
19 #include "gsi_reg.h"
20 #include "gsi_private.h"
21 #include "gsi_trans.h"
22 #include "ipa_gsi.h"
23 #include "ipa_data.h"
24 #include "ipa_version.h"
25 
26 /**
27  * DOC: The IPA Generic Software Interface
28  *
29  * The generic software interface (GSI) is an integral component of the IPA,
30  * providing a well-defined communication layer between the AP subsystem
31  * and the IPA core.  The modem uses the GSI layer as well.
32  *
33  *	--------	     ---------
34  *	|      |	     |	     |
35  *	|  AP  +<---.	.----+ Modem |
36  *	|      +--. |	| .->+	     |
37  *	|      |  | |	| |  |	     |
38  *	--------  | |	| |  ---------
39  *		  v |	v |
40  *		--+-+---+-+--
41  *		|    GSI    |
42  *		|-----------|
43  *		|	    |
44  *		|    IPA    |
45  *		|	    |
46  *		-------------
47  *
48  * In the above diagram, the AP and Modem represent "execution environments"
49  * (EEs), which are independent operating environments that use the IPA for
50  * data transfer.
51  *
52  * Each EE uses a set of unidirectional GSI "channels," which allow transfer
53  * of data to or from the IPA.  A channel is implemented as a ring buffer,
54  * with a DRAM-resident array of "transfer elements" (TREs) available to
55  * describe transfers to or from other EEs through the IPA.  A transfer
56  * element can also contain an immediate command, requesting the IPA perform
57  * actions other than data transfer.
58  *
59  * Each TRE refers to a block of data--also located DRAM.  After writing one
60  * or more TREs to a channel, the writer (either the IPA or an EE) writes a
61  * doorbell register to inform the receiving side how many elements have
62  * been written.
63  *
64  * Each channel has a GSI "event ring" associated with it.  An event ring
65  * is implemented very much like a channel ring, but is always directed from
66  * the IPA to an EE.  The IPA notifies an EE (such as the AP) about channel
67  * events by adding an entry to the event ring associated with the channel.
68  * The GSI then writes its doorbell for the event ring, causing the target
69  * EE to be interrupted.  Each entry in an event ring contains a pointer
70  * to the channel TRE whose completion the event represents.
71  *
72  * Each TRE in a channel ring has a set of flags.  One flag indicates whether
73  * the completion of the transfer operation generates an entry (and possibly
74  * an interrupt) in the channel's event ring.  Other flags allow transfer
75  * elements to be chained together, forming a single logical transaction.
76  * TRE flags are used to control whether and when interrupts are generated
77  * to signal completion of channel transfers.
78  *
79  * Elements in channel and event rings are completed (or consumed) strictly
80  * in order.  Completion of one entry implies the completion of all preceding
81  * entries.  A single completion interrupt can therefore communicate the
82  * completion of many transfers.
83  *
84  * Note that all GSI registers are little-endian, which is the assumed
85  * endianness of I/O space accesses.  The accessor functions perform byte
86  * swapping if needed (i.e., for a big endian CPU).
87  */
88 
89 /* Delay period for interrupt moderation (in 32KHz IPA internal timer ticks) */
90 #define GSI_EVT_RING_INT_MODT		(32 * 1) /* 1ms under 32KHz clock */
91 
92 #define GSI_CMD_TIMEOUT			5	/* seconds */
93 
94 #define GSI_CHANNEL_STOP_RX_RETRIES	10
95 #define GSI_CHANNEL_MODEM_HALT_RETRIES	10
96 
97 #define GSI_MHI_EVENT_ID_START		10	/* 1st reserved event id */
98 #define GSI_MHI_EVENT_ID_END		16	/* Last reserved event id */
99 
100 #define GSI_ISR_MAX_ITER		50	/* Detect interrupt storms */
101 
102 /* An entry in an event ring */
103 struct gsi_event {
104 	__le64 xfer_ptr;
105 	__le16 len;
106 	u8 reserved1;
107 	u8 code;
108 	__le16 reserved2;
109 	u8 type;
110 	u8 chid;
111 };
112 
113 /** gsi_channel_scratch_gpi - GPI protocol scratch register
114  * @max_outstanding_tre:
115  *	Defines the maximum number of TREs allowed in a single transaction
116  *	on a channel (in bytes).  This determines the amount of prefetch
117  *	performed by the hardware.  We configure this to equal the size of
118  *	the TLV FIFO for the channel.
119  * @outstanding_threshold:
120  *	Defines the threshold (in bytes) determining when the sequencer
121  *	should update the channel doorbell.  We configure this to equal
122  *	the size of two TREs.
123  */
124 struct gsi_channel_scratch_gpi {
125 	u64 reserved1;
126 	u16 reserved2;
127 	u16 max_outstanding_tre;
128 	u16 reserved3;
129 	u16 outstanding_threshold;
130 };
131 
132 /** gsi_channel_scratch - channel scratch configuration area
133  *
134  * The exact interpretation of this register is protocol-specific.
135  * We only use GPI channels; see struct gsi_channel_scratch_gpi, above.
136  */
137 union gsi_channel_scratch {
138 	struct gsi_channel_scratch_gpi gpi;
139 	struct {
140 		u32 word1;
141 		u32 word2;
142 		u32 word3;
143 		u32 word4;
144 	} data;
145 };
146 
147 /* Check things that can be validated at build time. */
148 static void gsi_validate_build(void)
149 {
150 	/* This is used as a divisor */
151 	BUILD_BUG_ON(!GSI_RING_ELEMENT_SIZE);
152 
153 	/* Code assumes the size of channel and event ring element are
154 	 * the same (and fixed).  Make sure the size of an event ring
155 	 * element is what's expected.
156 	 */
157 	BUILD_BUG_ON(sizeof(struct gsi_event) != GSI_RING_ELEMENT_SIZE);
158 
159 	/* Hardware requires a 2^n ring size.  We ensure the number of
160 	 * elements in an event ring is a power of 2 elsewhere; this
161 	 * ensure the elements themselves meet the requirement.
162 	 */
163 	BUILD_BUG_ON(!is_power_of_2(GSI_RING_ELEMENT_SIZE));
164 
165 	/* The channel element size must fit in this field */
166 	BUILD_BUG_ON(GSI_RING_ELEMENT_SIZE > field_max(ELEMENT_SIZE_FMASK));
167 
168 	/* The event ring element size must fit in this field */
169 	BUILD_BUG_ON(GSI_RING_ELEMENT_SIZE > field_max(EV_ELEMENT_SIZE_FMASK));
170 }
171 
172 /* Return the channel id associated with a given channel */
173 static u32 gsi_channel_id(struct gsi_channel *channel)
174 {
175 	return channel - &channel->gsi->channel[0];
176 }
177 
178 /* Update the GSI IRQ type register with the cached value */
179 static void gsi_irq_type_update(struct gsi *gsi, u32 val)
180 {
181 	gsi->type_enabled_bitmap = val;
182 	iowrite32(val, gsi->virt + GSI_CNTXT_TYPE_IRQ_MSK_OFFSET);
183 }
184 
185 static void gsi_irq_type_enable(struct gsi *gsi, enum gsi_irq_type_id type_id)
186 {
187 	gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(type_id));
188 }
189 
190 static void gsi_irq_type_disable(struct gsi *gsi, enum gsi_irq_type_id type_id)
191 {
192 	gsi_irq_type_update(gsi, gsi->type_enabled_bitmap & ~BIT(type_id));
193 }
194 
195 /* Turn off all GSI interrupts initially */
196 static void gsi_irq_setup(struct gsi *gsi)
197 {
198 	u32 adjust;
199 
200 	/* Disable all interrupt types */
201 	gsi_irq_type_update(gsi, 0);
202 
203 	/* Clear all type-specific interrupt masks */
204 	iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
205 	iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
206 	iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
207 	iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
208 
209 	/* Reverse the offset adjustment for inter-EE register offsets */
210 	adjust = gsi->version < IPA_VERSION_4_5 ? 0 : GSI_EE_REG_ADJUST;
211 	iowrite32(0, gsi->virt + adjust + GSI_INTER_EE_SRC_CH_IRQ_OFFSET);
212 	iowrite32(0, gsi->virt + adjust + GSI_INTER_EE_SRC_EV_CH_IRQ_OFFSET);
213 
214 	iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
215 }
216 
217 /* Turn off all GSI interrupts when we're all done */
218 static void gsi_irq_teardown(struct gsi *gsi)
219 {
220 	/* Nothing to do */
221 }
222 
223 static void gsi_irq_ieob_enable(struct gsi *gsi, u32 evt_ring_id)
224 {
225 	bool enable_ieob = !gsi->ieob_enabled_bitmap;
226 	u32 val;
227 
228 	gsi->ieob_enabled_bitmap |= BIT(evt_ring_id);
229 	val = gsi->ieob_enabled_bitmap;
230 	iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
231 
232 	/* Enable the interrupt type if this is the first channel enabled */
233 	if (enable_ieob)
234 		gsi_irq_type_enable(gsi, GSI_IEOB);
235 }
236 
237 static void gsi_irq_ieob_disable(struct gsi *gsi, u32 evt_ring_id)
238 {
239 	u32 val;
240 
241 	gsi->ieob_enabled_bitmap &= ~BIT(evt_ring_id);
242 
243 	/* Disable the interrupt type if this was the last enabled channel */
244 	if (!gsi->ieob_enabled_bitmap)
245 		gsi_irq_type_disable(gsi, GSI_IEOB);
246 
247 	val = gsi->ieob_enabled_bitmap;
248 	iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
249 }
250 
251 /* Enable all GSI_interrupt types */
252 static void gsi_irq_enable(struct gsi *gsi)
253 {
254 	u32 val;
255 
256 	/* Global interrupts include hardware error reports.  Enable
257 	 * that so we can at least report the error should it occur.
258 	 */
259 	iowrite32(BIT(ERROR_INT), gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
260 	gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(GSI_GLOB_EE));
261 
262 	/* General GSI interrupts are reported to all EEs; if they occur
263 	 * they are unrecoverable (without reset).  A breakpoint interrupt
264 	 * also exists, but we don't support that.  We want to be notified
265 	 * of errors so we can report them, even if they can't be handled.
266 	 */
267 	val = BIT(BUS_ERROR);
268 	val |= BIT(CMD_FIFO_OVRFLOW);
269 	val |= BIT(MCS_STACK_OVRFLOW);
270 	iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
271 	gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(GSI_GENERAL));
272 }
273 
274 /* Disable all GSI interrupt types */
275 static void gsi_irq_disable(struct gsi *gsi)
276 {
277 	gsi_irq_type_update(gsi, 0);
278 
279 	/* Clear the type-specific interrupt masks set by gsi_irq_enable() */
280 	iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
281 	iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
282 }
283 
284 /* Return the virtual address associated with a ring index */
285 void *gsi_ring_virt(struct gsi_ring *ring, u32 index)
286 {
287 	/* Note: index *must* be used modulo the ring count here */
288 	return ring->virt + (index % ring->count) * GSI_RING_ELEMENT_SIZE;
289 }
290 
291 /* Return the 32-bit DMA address associated with a ring index */
292 static u32 gsi_ring_addr(struct gsi_ring *ring, u32 index)
293 {
294 	return (ring->addr & GENMASK(31, 0)) + index * GSI_RING_ELEMENT_SIZE;
295 }
296 
297 /* Return the ring index of a 32-bit ring offset */
298 static u32 gsi_ring_index(struct gsi_ring *ring, u32 offset)
299 {
300 	return (offset - gsi_ring_addr(ring, 0)) / GSI_RING_ELEMENT_SIZE;
301 }
302 
303 /* Issue a GSI command by writing a value to a register, then wait for
304  * completion to be signaled.  Returns true if the command completes
305  * or false if it times out.
306  */
307 static bool
308 gsi_command(struct gsi *gsi, u32 reg, u32 val, struct completion *completion)
309 {
310 	reinit_completion(completion);
311 
312 	iowrite32(val, gsi->virt + reg);
313 
314 	return !!wait_for_completion_timeout(completion, GSI_CMD_TIMEOUT * HZ);
315 }
316 
317 /* Return the hardware's notion of the current state of an event ring */
318 static enum gsi_evt_ring_state
319 gsi_evt_ring_state(struct gsi *gsi, u32 evt_ring_id)
320 {
321 	u32 val;
322 
323 	val = ioread32(gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id));
324 
325 	return u32_get_bits(val, EV_CHSTATE_FMASK);
326 }
327 
328 /* Issue an event ring command and wait for it to complete */
329 static int evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
330 			    enum gsi_evt_cmd_opcode opcode)
331 {
332 	struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
333 	struct completion *completion = &evt_ring->completion;
334 	struct device *dev = gsi->dev;
335 	bool success;
336 	u32 val;
337 
338 	/* We only perform one event ring command at a time, and event
339 	 * control interrupts should only occur when such a command
340 	 * is issued here.  Only permit *this* event ring to trigger
341 	 * an interrupt, and only enable the event control IRQ type
342 	 * when we expect it to occur.
343 	 *
344 	 * There's a small chance that a previous command completed
345 	 * after the interrupt was disabled, so make sure we have no
346 	 * pending interrupts before we enable them.
347 	 */
348 	iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET);
349 
350 	val = BIT(evt_ring_id);
351 	iowrite32(val, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
352 	gsi_irq_type_enable(gsi, GSI_EV_CTRL);
353 
354 	val = u32_encode_bits(evt_ring_id, EV_CHID_FMASK);
355 	val |= u32_encode_bits(opcode, EV_OPCODE_FMASK);
356 
357 	success = gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val, completion);
358 
359 	/* Disable the interrupt again */
360 	gsi_irq_type_disable(gsi, GSI_EV_CTRL);
361 	iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
362 
363 	if (success)
364 		return 0;
365 
366 	dev_err(dev, "GSI command %u for event ring %u timed out, state %u\n",
367 		opcode, evt_ring_id, evt_ring->state);
368 
369 	return -ETIMEDOUT;
370 }
371 
372 /* Allocate an event ring in NOT_ALLOCATED state */
373 static int gsi_evt_ring_alloc_command(struct gsi *gsi, u32 evt_ring_id)
374 {
375 	struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
376 	int ret;
377 
378 	/* Get initial event ring state */
379 	evt_ring->state = gsi_evt_ring_state(gsi, evt_ring_id);
380 	if (evt_ring->state != GSI_EVT_RING_STATE_NOT_ALLOCATED) {
381 		dev_err(gsi->dev, "event ring %u bad state %u before alloc\n",
382 			evt_ring_id, evt_ring->state);
383 		return -EINVAL;
384 	}
385 
386 	ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_ALLOCATE);
387 	if (!ret && evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) {
388 		dev_err(gsi->dev, "event ring %u bad state %u after alloc\n",
389 			evt_ring_id, evt_ring->state);
390 		ret = -EIO;
391 	}
392 
393 	return ret;
394 }
395 
396 /* Reset a GSI event ring in ALLOCATED or ERROR state. */
397 static void gsi_evt_ring_reset_command(struct gsi *gsi, u32 evt_ring_id)
398 {
399 	struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
400 	enum gsi_evt_ring_state state = evt_ring->state;
401 	int ret;
402 
403 	if (state != GSI_EVT_RING_STATE_ALLOCATED &&
404 	    state != GSI_EVT_RING_STATE_ERROR) {
405 		dev_err(gsi->dev, "event ring %u bad state %u before reset\n",
406 			evt_ring_id, evt_ring->state);
407 		return;
408 	}
409 
410 	ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_RESET);
411 	if (!ret && evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED)
412 		dev_err(gsi->dev, "event ring %u bad state %u after reset\n",
413 			evt_ring_id, evt_ring->state);
414 }
415 
416 /* Issue a hardware de-allocation request for an allocated event ring */
417 static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id)
418 {
419 	struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
420 	int ret;
421 
422 	if (evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) {
423 		dev_err(gsi->dev, "event ring %u state %u before dealloc\n",
424 			evt_ring_id, evt_ring->state);
425 		return;
426 	}
427 
428 	ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_DE_ALLOC);
429 	if (!ret && evt_ring->state != GSI_EVT_RING_STATE_NOT_ALLOCATED)
430 		dev_err(gsi->dev, "event ring %u bad state %u after dealloc\n",
431 			evt_ring_id, evt_ring->state);
432 }
433 
434 /* Fetch the current state of a channel from hardware */
435 static enum gsi_channel_state gsi_channel_state(struct gsi_channel *channel)
436 {
437 	u32 channel_id = gsi_channel_id(channel);
438 	void *virt = channel->gsi->virt;
439 	u32 val;
440 
441 	val = ioread32(virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id));
442 
443 	return u32_get_bits(val, CHSTATE_FMASK);
444 }
445 
446 /* Issue a channel command and wait for it to complete */
447 static int
448 gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
449 {
450 	struct completion *completion = &channel->completion;
451 	u32 channel_id = gsi_channel_id(channel);
452 	struct gsi *gsi = channel->gsi;
453 	struct device *dev = gsi->dev;
454 	bool success;
455 	u32 val;
456 
457 	/* We only perform one channel command at a time, and channel
458 	 * control interrupts should only occur when such a command is
459 	 * issued here.  So we only permit *this* channel to trigger
460 	 * an interrupt and only enable the channel control IRQ type
461 	 * when we expect it to occur.
462 	 *
463 	 * There's a small chance that a previous command completed
464 	 * after the interrupt was disabled, so make sure we have no
465 	 * pending interrupts before we enable them.
466 	 */
467 	iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET);
468 
469 	val = BIT(channel_id);
470 	iowrite32(val, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
471 	gsi_irq_type_enable(gsi, GSI_CH_CTRL);
472 
473 	val = u32_encode_bits(channel_id, CH_CHID_FMASK);
474 	val |= u32_encode_bits(opcode, CH_OPCODE_FMASK);
475 	success = gsi_command(gsi, GSI_CH_CMD_OFFSET, val, completion);
476 
477 	/* Disable the interrupt again */
478 	gsi_irq_type_disable(gsi, GSI_CH_CTRL);
479 	iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
480 
481 	if (success)
482 		return 0;
483 
484 	dev_err(dev, "GSI command %u for channel %u timed out, state %u\n",
485 		opcode, channel_id, gsi_channel_state(channel));
486 
487 	return -ETIMEDOUT;
488 }
489 
490 /* Allocate GSI channel in NOT_ALLOCATED state */
491 static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id)
492 {
493 	struct gsi_channel *channel = &gsi->channel[channel_id];
494 	struct device *dev = gsi->dev;
495 	enum gsi_channel_state state;
496 	int ret;
497 
498 	/* Get initial channel state */
499 	state = gsi_channel_state(channel);
500 	if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED) {
501 		dev_err(dev, "channel %u bad state %u before alloc\n",
502 			channel_id, state);
503 		return -EINVAL;
504 	}
505 
506 	ret = gsi_channel_command(channel, GSI_CH_ALLOCATE);
507 
508 	/* Channel state will normally have been updated */
509 	state = gsi_channel_state(channel);
510 	if (!ret && state != GSI_CHANNEL_STATE_ALLOCATED) {
511 		dev_err(dev, "channel %u bad state %u after alloc\n",
512 			channel_id, state);
513 		ret = -EIO;
514 	}
515 
516 	return ret;
517 }
518 
519 /* Start an ALLOCATED channel */
520 static int gsi_channel_start_command(struct gsi_channel *channel)
521 {
522 	struct device *dev = channel->gsi->dev;
523 	enum gsi_channel_state state;
524 	int ret;
525 
526 	state = gsi_channel_state(channel);
527 	if (state != GSI_CHANNEL_STATE_ALLOCATED &&
528 	    state != GSI_CHANNEL_STATE_STOPPED) {
529 		dev_err(dev, "channel %u bad state %u before start\n",
530 			gsi_channel_id(channel), state);
531 		return -EINVAL;
532 	}
533 
534 	ret = gsi_channel_command(channel, GSI_CH_START);
535 
536 	/* Channel state will normally have been updated */
537 	state = gsi_channel_state(channel);
538 	if (!ret && state != GSI_CHANNEL_STATE_STARTED) {
539 		dev_err(dev, "channel %u bad state %u after start\n",
540 			gsi_channel_id(channel), state);
541 		ret = -EIO;
542 	}
543 
544 	return ret;
545 }
546 
547 /* Stop a GSI channel in STARTED state */
548 static int gsi_channel_stop_command(struct gsi_channel *channel)
549 {
550 	struct device *dev = channel->gsi->dev;
551 	enum gsi_channel_state state;
552 	int ret;
553 
554 	state = gsi_channel_state(channel);
555 
556 	/* Channel could have entered STOPPED state since last call
557 	 * if it timed out.  If so, we're done.
558 	 */
559 	if (state == GSI_CHANNEL_STATE_STOPPED)
560 		return 0;
561 
562 	if (state != GSI_CHANNEL_STATE_STARTED &&
563 	    state != GSI_CHANNEL_STATE_STOP_IN_PROC) {
564 		dev_err(dev, "channel %u bad state %u before stop\n",
565 			gsi_channel_id(channel), state);
566 		return -EINVAL;
567 	}
568 
569 	ret = gsi_channel_command(channel, GSI_CH_STOP);
570 
571 	/* Channel state will normally have been updated */
572 	state = gsi_channel_state(channel);
573 	if (ret || state == GSI_CHANNEL_STATE_STOPPED)
574 		return ret;
575 
576 	/* We may have to try again if stop is in progress */
577 	if (state == GSI_CHANNEL_STATE_STOP_IN_PROC)
578 		return -EAGAIN;
579 
580 	dev_err(dev, "channel %u bad state %u after stop\n",
581 		gsi_channel_id(channel), state);
582 
583 	return -EIO;
584 }
585 
586 /* Reset a GSI channel in ALLOCATED or ERROR state. */
587 static void gsi_channel_reset_command(struct gsi_channel *channel)
588 {
589 	struct device *dev = channel->gsi->dev;
590 	enum gsi_channel_state state;
591 	int ret;
592 
593 	msleep(1);	/* A short delay is required before a RESET command */
594 
595 	state = gsi_channel_state(channel);
596 	if (state != GSI_CHANNEL_STATE_STOPPED &&
597 	    state != GSI_CHANNEL_STATE_ERROR) {
598 		/* No need to reset a channel already in ALLOCATED state */
599 		if (state != GSI_CHANNEL_STATE_ALLOCATED)
600 			dev_err(dev, "channel %u bad state %u before reset\n",
601 				gsi_channel_id(channel), state);
602 		return;
603 	}
604 
605 	ret = gsi_channel_command(channel, GSI_CH_RESET);
606 
607 	/* Channel state will normally have been updated */
608 	state = gsi_channel_state(channel);
609 	if (!ret && state != GSI_CHANNEL_STATE_ALLOCATED)
610 		dev_err(dev, "channel %u bad state %u after reset\n",
611 			gsi_channel_id(channel), state);
612 }
613 
614 /* Deallocate an ALLOCATED GSI channel */
615 static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id)
616 {
617 	struct gsi_channel *channel = &gsi->channel[channel_id];
618 	struct device *dev = gsi->dev;
619 	enum gsi_channel_state state;
620 	int ret;
621 
622 	state = gsi_channel_state(channel);
623 	if (state != GSI_CHANNEL_STATE_ALLOCATED) {
624 		dev_err(dev, "channel %u bad state %u before dealloc\n",
625 			channel_id, state);
626 		return;
627 	}
628 
629 	ret = gsi_channel_command(channel, GSI_CH_DE_ALLOC);
630 
631 	/* Channel state will normally have been updated */
632 	state = gsi_channel_state(channel);
633 	if (!ret && state != GSI_CHANNEL_STATE_NOT_ALLOCATED)
634 		dev_err(dev, "channel %u bad state %u after dealloc\n",
635 			channel_id, state);
636 }
637 
638 /* Ring an event ring doorbell, reporting the last entry processed by the AP.
639  * The index argument (modulo the ring count) is the first unfilled entry, so
640  * we supply one less than that with the doorbell.  Update the event ring
641  * index field with the value provided.
642  */
643 static void gsi_evt_ring_doorbell(struct gsi *gsi, u32 evt_ring_id, u32 index)
644 {
645 	struct gsi_ring *ring = &gsi->evt_ring[evt_ring_id].ring;
646 	u32 val;
647 
648 	ring->index = index;	/* Next unused entry */
649 
650 	/* Note: index *must* be used modulo the ring count here */
651 	val = gsi_ring_addr(ring, (index - 1) % ring->count);
652 	iowrite32(val, gsi->virt + GSI_EV_CH_E_DOORBELL_0_OFFSET(evt_ring_id));
653 }
654 
655 /* Program an event ring for use */
656 static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id)
657 {
658 	struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
659 	size_t size = evt_ring->ring.count * GSI_RING_ELEMENT_SIZE;
660 	u32 val;
661 
662 	/* We program all event rings as GPI type/protocol */
663 	val = u32_encode_bits(GSI_CHANNEL_TYPE_GPI, EV_CHTYPE_FMASK);
664 	val |= EV_INTYPE_FMASK;
665 	val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, EV_ELEMENT_SIZE_FMASK);
666 	iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id));
667 
668 	val = u32_encode_bits(size, EV_R_LENGTH_FMASK);
669 	iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_1_OFFSET(evt_ring_id));
670 
671 	/* The context 2 and 3 registers store the low-order and
672 	 * high-order 32 bits of the address of the event ring,
673 	 * respectively.
674 	 */
675 	val = evt_ring->ring.addr & GENMASK(31, 0);
676 	iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_2_OFFSET(evt_ring_id));
677 
678 	val = evt_ring->ring.addr >> 32;
679 	iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_3_OFFSET(evt_ring_id));
680 
681 	/* Enable interrupt moderation by setting the moderation delay */
682 	val = u32_encode_bits(GSI_EVT_RING_INT_MODT, MODT_FMASK);
683 	val |= u32_encode_bits(1, MODC_FMASK);	/* comes from channel */
684 	iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_8_OFFSET(evt_ring_id));
685 
686 	/* No MSI write data, and MSI address high and low address is 0 */
687 	iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_9_OFFSET(evt_ring_id));
688 	iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_10_OFFSET(evt_ring_id));
689 	iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_11_OFFSET(evt_ring_id));
690 
691 	/* We don't need to get event read pointer updates */
692 	iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_12_OFFSET(evt_ring_id));
693 	iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_13_OFFSET(evt_ring_id));
694 
695 	/* Finally, tell the hardware we've completed event 0 (arbitrary) */
696 	gsi_evt_ring_doorbell(gsi, evt_ring_id, 0);
697 }
698 
699 /* Return the last (most recent) transaction completed on a channel. */
700 static struct gsi_trans *gsi_channel_trans_last(struct gsi_channel *channel)
701 {
702 	struct gsi_trans_info *trans_info = &channel->trans_info;
703 	struct gsi_trans *trans;
704 
705 	spin_lock_bh(&trans_info->spinlock);
706 
707 	if (!list_empty(&trans_info->complete))
708 		trans = list_last_entry(&trans_info->complete,
709 					struct gsi_trans, links);
710 	else if (!list_empty(&trans_info->polled))
711 		trans = list_last_entry(&trans_info->polled,
712 					struct gsi_trans, links);
713 	else
714 		trans = NULL;
715 
716 	/* Caller will wait for this, so take a reference */
717 	if (trans)
718 		refcount_inc(&trans->refcount);
719 
720 	spin_unlock_bh(&trans_info->spinlock);
721 
722 	return trans;
723 }
724 
725 /* Wait for transaction activity on a channel to complete */
726 static void gsi_channel_trans_quiesce(struct gsi_channel *channel)
727 {
728 	struct gsi_trans *trans;
729 
730 	/* Get the last transaction, and wait for it to complete */
731 	trans = gsi_channel_trans_last(channel);
732 	if (trans) {
733 		wait_for_completion(&trans->completion);
734 		gsi_trans_free(trans);
735 	}
736 }
737 
738 /* Stop channel activity.  Transactions may not be allocated until thawed. */
739 static void gsi_channel_freeze(struct gsi_channel *channel)
740 {
741 	gsi_channel_trans_quiesce(channel);
742 
743 	napi_disable(&channel->napi);
744 
745 	gsi_irq_ieob_disable(channel->gsi, channel->evt_ring_id);
746 }
747 
748 /* Allow transactions to be used on the channel again. */
749 static void gsi_channel_thaw(struct gsi_channel *channel)
750 {
751 	gsi_irq_ieob_enable(channel->gsi, channel->evt_ring_id);
752 
753 	napi_enable(&channel->napi);
754 }
755 
756 /* Program a channel for use */
757 static void gsi_channel_program(struct gsi_channel *channel, bool doorbell)
758 {
759 	size_t size = channel->tre_ring.count * GSI_RING_ELEMENT_SIZE;
760 	u32 channel_id = gsi_channel_id(channel);
761 	union gsi_channel_scratch scr = { };
762 	struct gsi_channel_scratch_gpi *gpi;
763 	struct gsi *gsi = channel->gsi;
764 	u32 wrr_weight = 0;
765 	u32 val;
766 
767 	/* Arbitrarily pick TRE 0 as the first channel element to use */
768 	channel->tre_ring.index = 0;
769 
770 	/* We program all channels as GPI type/protocol */
771 	val = u32_encode_bits(GSI_CHANNEL_TYPE_GPI, CHTYPE_PROTOCOL_FMASK);
772 	if (channel->toward_ipa)
773 		val |= CHTYPE_DIR_FMASK;
774 	val |= u32_encode_bits(channel->evt_ring_id, ERINDEX_FMASK);
775 	val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, ELEMENT_SIZE_FMASK);
776 	iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id));
777 
778 	val = u32_encode_bits(size, R_LENGTH_FMASK);
779 	iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_1_OFFSET(channel_id));
780 
781 	/* The context 2 and 3 registers store the low-order and
782 	 * high-order 32 bits of the address of the channel ring,
783 	 * respectively.
784 	 */
785 	val = channel->tre_ring.addr & GENMASK(31, 0);
786 	iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_2_OFFSET(channel_id));
787 
788 	val = channel->tre_ring.addr >> 32;
789 	iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_3_OFFSET(channel_id));
790 
791 	/* Command channel gets low weighted round-robin priority */
792 	if (channel->command)
793 		wrr_weight = field_max(WRR_WEIGHT_FMASK);
794 	val = u32_encode_bits(wrr_weight, WRR_WEIGHT_FMASK);
795 
796 	/* Max prefetch is 1 segment (do not set MAX_PREFETCH_FMASK) */
797 
798 	/* We enable the doorbell engine for IPA v3.5.1 */
799 	if (gsi->version == IPA_VERSION_3_5_1 && doorbell)
800 		val |= USE_DB_ENG_FMASK;
801 
802 	/* v4.0 introduces an escape buffer for prefetch.  We use it
803 	 * on all but the AP command channel.
804 	 */
805 	if (gsi->version != IPA_VERSION_3_5_1 && !channel->command) {
806 		/* If not otherwise set, prefetch buffers are used */
807 		if (gsi->version < IPA_VERSION_4_5)
808 			val |= USE_ESCAPE_BUF_ONLY_FMASK;
809 		else
810 			val |= u32_encode_bits(GSI_ESCAPE_BUF_ONLY,
811 					       PREFETCH_MODE_FMASK);
812 	}
813 
814 	iowrite32(val, gsi->virt + GSI_CH_C_QOS_OFFSET(channel_id));
815 
816 	/* Now update the scratch registers for GPI protocol */
817 	gpi = &scr.gpi;
818 	gpi->max_outstanding_tre = gsi_channel_trans_tre_max(gsi, channel_id) *
819 					GSI_RING_ELEMENT_SIZE;
820 	gpi->outstanding_threshold = 2 * GSI_RING_ELEMENT_SIZE;
821 
822 	val = scr.data.word1;
823 	iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_0_OFFSET(channel_id));
824 
825 	val = scr.data.word2;
826 	iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_1_OFFSET(channel_id));
827 
828 	val = scr.data.word3;
829 	iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_2_OFFSET(channel_id));
830 
831 	/* We must preserve the upper 16 bits of the last scratch register.
832 	 * The next sequence assumes those bits remain unchanged between the
833 	 * read and the write.
834 	 */
835 	val = ioread32(gsi->virt + GSI_CH_C_SCRATCH_3_OFFSET(channel_id));
836 	val = (scr.data.word4 & GENMASK(31, 16)) | (val & GENMASK(15, 0));
837 	iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_3_OFFSET(channel_id));
838 
839 	/* All done! */
840 }
841 
842 static void gsi_channel_deprogram(struct gsi_channel *channel)
843 {
844 	/* Nothing to do */
845 }
846 
847 /* Start an allocated GSI channel */
848 int gsi_channel_start(struct gsi *gsi, u32 channel_id)
849 {
850 	struct gsi_channel *channel = &gsi->channel[channel_id];
851 	int ret;
852 
853 	mutex_lock(&gsi->mutex);
854 
855 	ret = gsi_channel_start_command(channel);
856 
857 	mutex_unlock(&gsi->mutex);
858 
859 	gsi_channel_thaw(channel);
860 
861 	return ret;
862 }
863 
864 /* Stop a started channel */
865 int gsi_channel_stop(struct gsi *gsi, u32 channel_id)
866 {
867 	struct gsi_channel *channel = &gsi->channel[channel_id];
868 	u32 retries;
869 	int ret;
870 
871 	gsi_channel_freeze(channel);
872 
873 	/* RX channels might require a little time to enter STOPPED state */
874 	retries = channel->toward_ipa ? 0 : GSI_CHANNEL_STOP_RX_RETRIES;
875 
876 	mutex_lock(&gsi->mutex);
877 
878 	do {
879 		ret = gsi_channel_stop_command(channel);
880 		if (ret != -EAGAIN)
881 			break;
882 		msleep(1);
883 	} while (retries--);
884 
885 	mutex_unlock(&gsi->mutex);
886 
887 	/* Thaw the channel if we need to retry (or on error) */
888 	if (ret)
889 		gsi_channel_thaw(channel);
890 
891 	return ret;
892 }
893 
894 /* Reset and reconfigure a channel, (possibly) enabling the doorbell engine */
895 void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool doorbell)
896 {
897 	struct gsi_channel *channel = &gsi->channel[channel_id];
898 
899 	mutex_lock(&gsi->mutex);
900 
901 	gsi_channel_reset_command(channel);
902 	/* Due to a hardware quirk we may need to reset RX channels twice. */
903 	if (gsi->version == IPA_VERSION_3_5_1 && !channel->toward_ipa)
904 		gsi_channel_reset_command(channel);
905 
906 	gsi_channel_program(channel, doorbell);
907 	gsi_channel_trans_cancel_pending(channel);
908 
909 	mutex_unlock(&gsi->mutex);
910 }
911 
912 /* Stop a STARTED channel for suspend (using stop if requested) */
913 int gsi_channel_suspend(struct gsi *gsi, u32 channel_id, bool stop)
914 {
915 	struct gsi_channel *channel = &gsi->channel[channel_id];
916 
917 	if (stop)
918 		return gsi_channel_stop(gsi, channel_id);
919 
920 	gsi_channel_freeze(channel);
921 
922 	return 0;
923 }
924 
925 /* Resume a suspended channel (starting will be requested if STOPPED) */
926 int gsi_channel_resume(struct gsi *gsi, u32 channel_id, bool start)
927 {
928 	struct gsi_channel *channel = &gsi->channel[channel_id];
929 
930 	if (start)
931 		return gsi_channel_start(gsi, channel_id);
932 
933 	gsi_channel_thaw(channel);
934 
935 	return 0;
936 }
937 
938 /**
939  * gsi_channel_tx_queued() - Report queued TX transfers for a channel
940  * @channel:	Channel for which to report
941  *
942  * Report to the network stack the number of bytes and transactions that
943  * have been queued to hardware since last call.  This and the next function
944  * supply information used by the network stack for throttling.
945  *
946  * For each channel we track the number of transactions used and bytes of
947  * data those transactions represent.  We also track what those values are
948  * each time this function is called.  Subtracting the two tells us
949  * the number of bytes and transactions that have been added between
950  * successive calls.
951  *
952  * Calling this each time we ring the channel doorbell allows us to
953  * provide accurate information to the network stack about how much
954  * work we've given the hardware at any point in time.
955  */
956 void gsi_channel_tx_queued(struct gsi_channel *channel)
957 {
958 	u32 trans_count;
959 	u32 byte_count;
960 
961 	byte_count = channel->byte_count - channel->queued_byte_count;
962 	trans_count = channel->trans_count - channel->queued_trans_count;
963 	channel->queued_byte_count = channel->byte_count;
964 	channel->queued_trans_count = channel->trans_count;
965 
966 	ipa_gsi_channel_tx_queued(channel->gsi, gsi_channel_id(channel),
967 				  trans_count, byte_count);
968 }
969 
970 /**
971  * gsi_channel_tx_update() - Report completed TX transfers
972  * @channel:	Channel that has completed transmitting packets
973  * @trans:	Last transation known to be complete
974  *
975  * Compute the number of transactions and bytes that have been transferred
976  * over a TX channel since the given transaction was committed.  Report this
977  * information to the network stack.
978  *
979  * At the time a transaction is committed, we record its channel's
980  * committed transaction and byte counts *in the transaction*.
981  * Completions are signaled by the hardware with an interrupt, and
982  * we can determine the latest completed transaction at that time.
983  *
984  * The difference between the byte/transaction count recorded in
985  * the transaction and the count last time we recorded a completion
986  * tells us exactly how much data has been transferred between
987  * completions.
988  *
989  * Calling this each time we learn of a newly-completed transaction
990  * allows us to provide accurate information to the network stack
991  * about how much work has been completed by the hardware at a given
992  * point in time.
993  */
994 static void
995 gsi_channel_tx_update(struct gsi_channel *channel, struct gsi_trans *trans)
996 {
997 	u64 byte_count = trans->byte_count + trans->len;
998 	u64 trans_count = trans->trans_count + 1;
999 
1000 	byte_count -= channel->compl_byte_count;
1001 	channel->compl_byte_count += byte_count;
1002 	trans_count -= channel->compl_trans_count;
1003 	channel->compl_trans_count += trans_count;
1004 
1005 	ipa_gsi_channel_tx_completed(channel->gsi, gsi_channel_id(channel),
1006 				     trans_count, byte_count);
1007 }
1008 
1009 /* Channel control interrupt handler */
1010 static void gsi_isr_chan_ctrl(struct gsi *gsi)
1011 {
1012 	u32 channel_mask;
1013 
1014 	channel_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_CH_IRQ_OFFSET);
1015 	iowrite32(channel_mask, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET);
1016 
1017 	while (channel_mask) {
1018 		u32 channel_id = __ffs(channel_mask);
1019 		struct gsi_channel *channel;
1020 
1021 		channel_mask ^= BIT(channel_id);
1022 
1023 		channel = &gsi->channel[channel_id];
1024 
1025 		complete(&channel->completion);
1026 	}
1027 }
1028 
1029 /* Event ring control interrupt handler */
1030 static void gsi_isr_evt_ctrl(struct gsi *gsi)
1031 {
1032 	u32 event_mask;
1033 
1034 	event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_OFFSET);
1035 	iowrite32(event_mask, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET);
1036 
1037 	while (event_mask) {
1038 		u32 evt_ring_id = __ffs(event_mask);
1039 		struct gsi_evt_ring *evt_ring;
1040 
1041 		event_mask ^= BIT(evt_ring_id);
1042 
1043 		evt_ring = &gsi->evt_ring[evt_ring_id];
1044 		evt_ring->state = gsi_evt_ring_state(gsi, evt_ring_id);
1045 
1046 		complete(&evt_ring->completion);
1047 	}
1048 }
1049 
1050 /* Global channel error interrupt handler */
1051 static void
1052 gsi_isr_glob_chan_err(struct gsi *gsi, u32 err_ee, u32 channel_id, u32 code)
1053 {
1054 	if (code == GSI_OUT_OF_RESOURCES) {
1055 		dev_err(gsi->dev, "channel %u out of resources\n", channel_id);
1056 		complete(&gsi->channel[channel_id].completion);
1057 		return;
1058 	}
1059 
1060 	/* Report, but otherwise ignore all other error codes */
1061 	dev_err(gsi->dev, "channel %u global error ee 0x%08x code 0x%08x\n",
1062 		channel_id, err_ee, code);
1063 }
1064 
1065 /* Global event error interrupt handler */
1066 static void
1067 gsi_isr_glob_evt_err(struct gsi *gsi, u32 err_ee, u32 evt_ring_id, u32 code)
1068 {
1069 	if (code == GSI_OUT_OF_RESOURCES) {
1070 		struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
1071 		u32 channel_id = gsi_channel_id(evt_ring->channel);
1072 
1073 		complete(&evt_ring->completion);
1074 		dev_err(gsi->dev, "evt_ring for channel %u out of resources\n",
1075 			channel_id);
1076 		return;
1077 	}
1078 
1079 	/* Report, but otherwise ignore all other error codes */
1080 	dev_err(gsi->dev, "event ring %u global error ee %u code 0x%08x\n",
1081 		evt_ring_id, err_ee, code);
1082 }
1083 
1084 /* Global error interrupt handler */
1085 static void gsi_isr_glob_err(struct gsi *gsi)
1086 {
1087 	enum gsi_err_type type;
1088 	enum gsi_err_code code;
1089 	u32 which;
1090 	u32 val;
1091 	u32 ee;
1092 
1093 	/* Get the logged error, then reinitialize the log */
1094 	val = ioread32(gsi->virt + GSI_ERROR_LOG_OFFSET);
1095 	iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET);
1096 	iowrite32(~0, gsi->virt + GSI_ERROR_LOG_CLR_OFFSET);
1097 
1098 	ee = u32_get_bits(val, ERR_EE_FMASK);
1099 	type = u32_get_bits(val, ERR_TYPE_FMASK);
1100 	which = u32_get_bits(val, ERR_VIRT_IDX_FMASK);
1101 	code = u32_get_bits(val, ERR_CODE_FMASK);
1102 
1103 	if (type == GSI_ERR_TYPE_CHAN)
1104 		gsi_isr_glob_chan_err(gsi, ee, which, code);
1105 	else if (type == GSI_ERR_TYPE_EVT)
1106 		gsi_isr_glob_evt_err(gsi, ee, which, code);
1107 	else	/* type GSI_ERR_TYPE_GLOB should be fatal */
1108 		dev_err(gsi->dev, "unexpected global error 0x%08x\n", type);
1109 }
1110 
1111 /* Generic EE interrupt handler */
1112 static void gsi_isr_gp_int1(struct gsi *gsi)
1113 {
1114 	u32 result;
1115 	u32 val;
1116 
1117 	/* This interrupt is used to handle completions of the two GENERIC
1118 	 * GSI commands.  We use these to allocate and halt channels on
1119 	 * the modem's behalf due to a hardware quirk on IPA v4.2.  Once
1120 	 * allocated, the modem "owns" these channels, and as a result we
1121 	 * have no way of knowing the channel's state at any given time.
1122 	 *
1123 	 * It is recommended that we halt the modem channels we allocated
1124 	 * when shutting down, but it's possible the channel isn't running
1125 	 * at the time we issue the HALT command.  We'll get an error in
1126 	 * that case, but it's harmless (the channel is already halted).
1127 	 *
1128 	 * For this reason, we silently ignore a CHANNEL_NOT_RUNNING error
1129 	 * if we receive it.
1130 	 */
1131 	val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
1132 	result = u32_get_bits(val, GENERIC_EE_RESULT_FMASK);
1133 
1134 	switch (result) {
1135 	case GENERIC_EE_SUCCESS:
1136 	case GENERIC_EE_CHANNEL_NOT_RUNNING:
1137 		gsi->result = 0;
1138 		break;
1139 
1140 	case GENERIC_EE_RETRY:
1141 		gsi->result = -EAGAIN;
1142 		break;
1143 
1144 	default:
1145 		dev_err(gsi->dev, "global INT1 generic result %u\n", result);
1146 		gsi->result = -EIO;
1147 		break;
1148 	}
1149 
1150 	complete(&gsi->completion);
1151 }
1152 
1153 /* Inter-EE interrupt handler */
1154 static void gsi_isr_glob_ee(struct gsi *gsi)
1155 {
1156 	u32 val;
1157 
1158 	val = ioread32(gsi->virt + GSI_CNTXT_GLOB_IRQ_STTS_OFFSET);
1159 
1160 	if (val & BIT(ERROR_INT))
1161 		gsi_isr_glob_err(gsi);
1162 
1163 	iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_CLR_OFFSET);
1164 
1165 	val &= ~BIT(ERROR_INT);
1166 
1167 	if (val & BIT(GP_INT1)) {
1168 		val ^= BIT(GP_INT1);
1169 		gsi_isr_gp_int1(gsi);
1170 	}
1171 
1172 	if (val)
1173 		dev_err(gsi->dev, "unexpected global interrupt 0x%08x\n", val);
1174 }
1175 
1176 /* I/O completion interrupt event */
1177 static void gsi_isr_ieob(struct gsi *gsi)
1178 {
1179 	u32 event_mask;
1180 
1181 	event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_OFFSET);
1182 	iowrite32(event_mask, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_CLR_OFFSET);
1183 
1184 	while (event_mask) {
1185 		u32 evt_ring_id = __ffs(event_mask);
1186 
1187 		event_mask ^= BIT(evt_ring_id);
1188 
1189 		gsi_irq_ieob_disable(gsi, evt_ring_id);
1190 		napi_schedule(&gsi->evt_ring[evt_ring_id].channel->napi);
1191 	}
1192 }
1193 
1194 /* General event interrupts represent serious problems, so report them */
1195 static void gsi_isr_general(struct gsi *gsi)
1196 {
1197 	struct device *dev = gsi->dev;
1198 	u32 val;
1199 
1200 	val = ioread32(gsi->virt + GSI_CNTXT_GSI_IRQ_STTS_OFFSET);
1201 	iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_CLR_OFFSET);
1202 
1203 	dev_err(dev, "unexpected general interrupt 0x%08x\n", val);
1204 }
1205 
1206 /**
1207  * gsi_isr() - Top level GSI interrupt service routine
1208  * @irq:	Interrupt number (ignored)
1209  * @dev_id:	GSI pointer supplied to request_irq()
1210  *
1211  * This is the main handler function registered for the GSI IRQ. Each type
1212  * of interrupt has a separate handler function that is called from here.
1213  */
1214 static irqreturn_t gsi_isr(int irq, void *dev_id)
1215 {
1216 	struct gsi *gsi = dev_id;
1217 	u32 intr_mask;
1218 	u32 cnt = 0;
1219 
1220 	/* enum gsi_irq_type_id defines GSI interrupt types */
1221 	while ((intr_mask = ioread32(gsi->virt + GSI_CNTXT_TYPE_IRQ_OFFSET))) {
1222 		/* intr_mask contains bitmask of pending GSI interrupts */
1223 		do {
1224 			u32 gsi_intr = BIT(__ffs(intr_mask));
1225 
1226 			intr_mask ^= gsi_intr;
1227 
1228 			switch (gsi_intr) {
1229 			case BIT(GSI_CH_CTRL):
1230 				gsi_isr_chan_ctrl(gsi);
1231 				break;
1232 			case BIT(GSI_EV_CTRL):
1233 				gsi_isr_evt_ctrl(gsi);
1234 				break;
1235 			case BIT(GSI_GLOB_EE):
1236 				gsi_isr_glob_ee(gsi);
1237 				break;
1238 			case BIT(GSI_IEOB):
1239 				gsi_isr_ieob(gsi);
1240 				break;
1241 			case BIT(GSI_GENERAL):
1242 				gsi_isr_general(gsi);
1243 				break;
1244 			default:
1245 				dev_err(gsi->dev,
1246 					"unrecognized interrupt type 0x%08x\n",
1247 					gsi_intr);
1248 				break;
1249 			}
1250 		} while (intr_mask);
1251 
1252 		if (++cnt > GSI_ISR_MAX_ITER) {
1253 			dev_err(gsi->dev, "interrupt flood\n");
1254 			break;
1255 		}
1256 	}
1257 
1258 	return IRQ_HANDLED;
1259 }
1260 
1261 static int gsi_irq_init(struct gsi *gsi, struct platform_device *pdev)
1262 {
1263 	struct device *dev = &pdev->dev;
1264 	unsigned int irq;
1265 	int ret;
1266 
1267 	ret = platform_get_irq_byname(pdev, "gsi");
1268 	if (ret <= 0) {
1269 		dev_err(dev, "DT error %d getting \"gsi\" IRQ property\n", ret);
1270 		return ret ? : -EINVAL;
1271 	}
1272 	irq = ret;
1273 
1274 	ret = request_irq(irq, gsi_isr, 0, "gsi", gsi);
1275 	if (ret) {
1276 		dev_err(dev, "error %d requesting \"gsi\" IRQ\n", ret);
1277 		return ret;
1278 	}
1279 	gsi->irq = irq;
1280 
1281 	return 0;
1282 }
1283 
1284 static void gsi_irq_exit(struct gsi *gsi)
1285 {
1286 	free_irq(gsi->irq, gsi);
1287 }
1288 
1289 /* Return the transaction associated with a transfer completion event */
1290 static struct gsi_trans *gsi_event_trans(struct gsi_channel *channel,
1291 					 struct gsi_event *event)
1292 {
1293 	u32 tre_offset;
1294 	u32 tre_index;
1295 
1296 	/* Event xfer_ptr records the TRE it's associated with */
1297 	tre_offset = le64_to_cpu(event->xfer_ptr) & GENMASK(31, 0);
1298 	tre_index = gsi_ring_index(&channel->tre_ring, tre_offset);
1299 
1300 	return gsi_channel_trans_mapped(channel, tre_index);
1301 }
1302 
1303 /**
1304  * gsi_evt_ring_rx_update() - Record lengths of received data
1305  * @evt_ring:	Event ring associated with channel that received packets
1306  * @index:	Event index in ring reported by hardware
1307  *
1308  * Events for RX channels contain the actual number of bytes received into
1309  * the buffer.  Every event has a transaction associated with it, and here
1310  * we update transactions to record their actual received lengths.
1311  *
1312  * This function is called whenever we learn that the GSI hardware has filled
1313  * new events since the last time we checked.  The ring's index field tells
1314  * the first entry in need of processing.  The index provided is the
1315  * first *unfilled* event in the ring (following the last filled one).
1316  *
1317  * Events are sequential within the event ring, and transactions are
1318  * sequential within the transaction pool.
1319  *
1320  * Note that @index always refers to an element *within* the event ring.
1321  */
1322 static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index)
1323 {
1324 	struct gsi_channel *channel = evt_ring->channel;
1325 	struct gsi_ring *ring = &evt_ring->ring;
1326 	struct gsi_trans_info *trans_info;
1327 	struct gsi_event *event_done;
1328 	struct gsi_event *event;
1329 	struct gsi_trans *trans;
1330 	u32 byte_count = 0;
1331 	u32 old_index;
1332 	u32 event_avail;
1333 
1334 	trans_info = &channel->trans_info;
1335 
1336 	/* We'll start with the oldest un-processed event.  RX channels
1337 	 * replenish receive buffers in single-TRE transactions, so we
1338 	 * can just map that event to its transaction.  Transactions
1339 	 * associated with completion events are consecutive.
1340 	 */
1341 	old_index = ring->index;
1342 	event = gsi_ring_virt(ring, old_index);
1343 	trans = gsi_event_trans(channel, event);
1344 
1345 	/* Compute the number of events to process before we wrap,
1346 	 * and determine when we'll be done processing events.
1347 	 */
1348 	event_avail = ring->count - old_index % ring->count;
1349 	event_done = gsi_ring_virt(ring, index);
1350 	do {
1351 		trans->len = __le16_to_cpu(event->len);
1352 		byte_count += trans->len;
1353 
1354 		/* Move on to the next event and transaction */
1355 		if (--event_avail)
1356 			event++;
1357 		else
1358 			event = gsi_ring_virt(ring, 0);
1359 		trans = gsi_trans_pool_next(&trans_info->pool, trans);
1360 	} while (event != event_done);
1361 
1362 	/* We record RX bytes when they are received */
1363 	channel->byte_count += byte_count;
1364 	channel->trans_count++;
1365 }
1366 
1367 /* Initialize a ring, including allocating DMA memory for its entries */
1368 static int gsi_ring_alloc(struct gsi *gsi, struct gsi_ring *ring, u32 count)
1369 {
1370 	size_t size = count * GSI_RING_ELEMENT_SIZE;
1371 	struct device *dev = gsi->dev;
1372 	dma_addr_t addr;
1373 
1374 	/* Hardware requires a 2^n ring size, with alignment equal to size */
1375 	ring->virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL);
1376 	if (ring->virt && addr % size) {
1377 		dma_free_coherent(dev, size, ring->virt, ring->addr);
1378 		dev_err(dev, "unable to alloc 0x%zx-aligned ring buffer\n",
1379 			size);
1380 		return -EINVAL;	/* Not a good error value, but distinct */
1381 	} else if (!ring->virt) {
1382 		return -ENOMEM;
1383 	}
1384 	ring->addr = addr;
1385 	ring->count = count;
1386 
1387 	return 0;
1388 }
1389 
1390 /* Free a previously-allocated ring */
1391 static void gsi_ring_free(struct gsi *gsi, struct gsi_ring *ring)
1392 {
1393 	size_t size = ring->count * GSI_RING_ELEMENT_SIZE;
1394 
1395 	dma_free_coherent(gsi->dev, size, ring->virt, ring->addr);
1396 }
1397 
1398 /* Allocate an available event ring id */
1399 static int gsi_evt_ring_id_alloc(struct gsi *gsi)
1400 {
1401 	u32 evt_ring_id;
1402 
1403 	if (gsi->event_bitmap == ~0U) {
1404 		dev_err(gsi->dev, "event rings exhausted\n");
1405 		return -ENOSPC;
1406 	}
1407 
1408 	evt_ring_id = ffz(gsi->event_bitmap);
1409 	gsi->event_bitmap |= BIT(evt_ring_id);
1410 
1411 	return (int)evt_ring_id;
1412 }
1413 
1414 /* Free a previously-allocated event ring id */
1415 static void gsi_evt_ring_id_free(struct gsi *gsi, u32 evt_ring_id)
1416 {
1417 	gsi->event_bitmap &= ~BIT(evt_ring_id);
1418 }
1419 
1420 /* Ring a channel doorbell, reporting the first un-filled entry */
1421 void gsi_channel_doorbell(struct gsi_channel *channel)
1422 {
1423 	struct gsi_ring *tre_ring = &channel->tre_ring;
1424 	u32 channel_id = gsi_channel_id(channel);
1425 	struct gsi *gsi = channel->gsi;
1426 	u32 val;
1427 
1428 	/* Note: index *must* be used modulo the ring count here */
1429 	val = gsi_ring_addr(tre_ring, tre_ring->index % tre_ring->count);
1430 	iowrite32(val, gsi->virt + GSI_CH_C_DOORBELL_0_OFFSET(channel_id));
1431 }
1432 
1433 /* Consult hardware, move any newly completed transactions to completed list */
1434 static void gsi_channel_update(struct gsi_channel *channel)
1435 {
1436 	u32 evt_ring_id = channel->evt_ring_id;
1437 	struct gsi *gsi = channel->gsi;
1438 	struct gsi_evt_ring *evt_ring;
1439 	struct gsi_trans *trans;
1440 	struct gsi_ring *ring;
1441 	u32 offset;
1442 	u32 index;
1443 
1444 	evt_ring = &gsi->evt_ring[evt_ring_id];
1445 	ring = &evt_ring->ring;
1446 
1447 	/* See if there's anything new to process; if not, we're done.  Note
1448 	 * that index always refers to an entry *within* the event ring.
1449 	 */
1450 	offset = GSI_EV_CH_E_CNTXT_4_OFFSET(evt_ring_id);
1451 	index = gsi_ring_index(ring, ioread32(gsi->virt + offset));
1452 	if (index == ring->index % ring->count)
1453 		return;
1454 
1455 	/* Get the transaction for the latest completed event.  Take a
1456 	 * reference to keep it from completing before we give the events
1457 	 * for this and previous transactions back to the hardware.
1458 	 */
1459 	trans = gsi_event_trans(channel, gsi_ring_virt(ring, index - 1));
1460 	refcount_inc(&trans->refcount);
1461 
1462 	/* For RX channels, update each completed transaction with the number
1463 	 * of bytes that were actually received.  For TX channels, report
1464 	 * the number of transactions and bytes this completion represents
1465 	 * up the network stack.
1466 	 */
1467 	if (channel->toward_ipa)
1468 		gsi_channel_tx_update(channel, trans);
1469 	else
1470 		gsi_evt_ring_rx_update(evt_ring, index);
1471 
1472 	gsi_trans_move_complete(trans);
1473 
1474 	/* Tell the hardware we've handled these events */
1475 	gsi_evt_ring_doorbell(channel->gsi, channel->evt_ring_id, index);
1476 
1477 	gsi_trans_free(trans);
1478 }
1479 
1480 /**
1481  * gsi_channel_poll_one() - Return a single completed transaction on a channel
1482  * @channel:	Channel to be polled
1483  *
1484  * Return:	Transaction pointer, or null if none are available
1485  *
1486  * This function returns the first entry on a channel's completed transaction
1487  * list.  If that list is empty, the hardware is consulted to determine
1488  * whether any new transactions have completed.  If so, they're moved to the
1489  * completed list and the new first entry is returned.  If there are no more
1490  * completed transactions, a null pointer is returned.
1491  */
1492 static struct gsi_trans *gsi_channel_poll_one(struct gsi_channel *channel)
1493 {
1494 	struct gsi_trans *trans;
1495 
1496 	/* Get the first transaction from the completed list */
1497 	trans = gsi_channel_trans_complete(channel);
1498 	if (!trans) {
1499 		/* List is empty; see if there's more to do */
1500 		gsi_channel_update(channel);
1501 		trans = gsi_channel_trans_complete(channel);
1502 	}
1503 
1504 	if (trans)
1505 		gsi_trans_move_polled(trans);
1506 
1507 	return trans;
1508 }
1509 
1510 /**
1511  * gsi_channel_poll() - NAPI poll function for a channel
1512  * @napi:	NAPI structure for the channel
1513  * @budget:	Budget supplied by NAPI core
1514  *
1515  * Return:	Number of items polled (<= budget)
1516  *
1517  * Single transactions completed by hardware are polled until either
1518  * the budget is exhausted, or there are no more.  Each transaction
1519  * polled is passed to gsi_trans_complete(), to perform remaining
1520  * completion processing and retire/free the transaction.
1521  */
1522 static int gsi_channel_poll(struct napi_struct *napi, int budget)
1523 {
1524 	struct gsi_channel *channel;
1525 	int count = 0;
1526 
1527 	channel = container_of(napi, struct gsi_channel, napi);
1528 	while (count < budget) {
1529 		struct gsi_trans *trans;
1530 
1531 		count++;
1532 		trans = gsi_channel_poll_one(channel);
1533 		if (!trans)
1534 			break;
1535 		gsi_trans_complete(trans);
1536 	}
1537 
1538 	if (count < budget) {
1539 		napi_complete(&channel->napi);
1540 		gsi_irq_ieob_enable(channel->gsi, channel->evt_ring_id);
1541 	}
1542 
1543 	return count;
1544 }
1545 
1546 /* The event bitmap represents which event ids are available for allocation.
1547  * Set bits are not available, clear bits can be used.  This function
1548  * initializes the map so all events supported by the hardware are available,
1549  * then precludes any reserved events from being allocated.
1550  */
1551 static u32 gsi_event_bitmap_init(u32 evt_ring_max)
1552 {
1553 	u32 event_bitmap = GENMASK(BITS_PER_LONG - 1, evt_ring_max);
1554 
1555 	event_bitmap |= GENMASK(GSI_MHI_EVENT_ID_END, GSI_MHI_EVENT_ID_START);
1556 
1557 	return event_bitmap;
1558 }
1559 
1560 /* Setup function for event rings */
1561 static void gsi_evt_ring_setup(struct gsi *gsi)
1562 {
1563 	/* Nothing to do */
1564 }
1565 
1566 /* Inverse of gsi_evt_ring_setup() */
1567 static void gsi_evt_ring_teardown(struct gsi *gsi)
1568 {
1569 	/* Nothing to do */
1570 }
1571 
1572 /* Setup function for a single channel */
1573 static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id)
1574 {
1575 	struct gsi_channel *channel = &gsi->channel[channel_id];
1576 	u32 evt_ring_id = channel->evt_ring_id;
1577 	int ret;
1578 
1579 	if (!channel->gsi)
1580 		return 0;	/* Ignore uninitialized channels */
1581 
1582 	ret = gsi_evt_ring_alloc_command(gsi, evt_ring_id);
1583 	if (ret)
1584 		return ret;
1585 
1586 	gsi_evt_ring_program(gsi, evt_ring_id);
1587 
1588 	ret = gsi_channel_alloc_command(gsi, channel_id);
1589 	if (ret)
1590 		goto err_evt_ring_de_alloc;
1591 
1592 	gsi_channel_program(channel, true);
1593 
1594 	if (channel->toward_ipa)
1595 		netif_tx_napi_add(&gsi->dummy_dev, &channel->napi,
1596 				  gsi_channel_poll, NAPI_POLL_WEIGHT);
1597 	else
1598 		netif_napi_add(&gsi->dummy_dev, &channel->napi,
1599 			       gsi_channel_poll, NAPI_POLL_WEIGHT);
1600 
1601 	return 0;
1602 
1603 err_evt_ring_de_alloc:
1604 	/* We've done nothing with the event ring yet so don't reset */
1605 	gsi_evt_ring_de_alloc_command(gsi, evt_ring_id);
1606 
1607 	return ret;
1608 }
1609 
1610 /* Inverse of gsi_channel_setup_one() */
1611 static void gsi_channel_teardown_one(struct gsi *gsi, u32 channel_id)
1612 {
1613 	struct gsi_channel *channel = &gsi->channel[channel_id];
1614 	u32 evt_ring_id = channel->evt_ring_id;
1615 
1616 	if (!channel->gsi)
1617 		return;		/* Ignore uninitialized channels */
1618 
1619 	netif_napi_del(&channel->napi);
1620 
1621 	gsi_channel_deprogram(channel);
1622 	gsi_channel_de_alloc_command(gsi, channel_id);
1623 	gsi_evt_ring_reset_command(gsi, evt_ring_id);
1624 	gsi_evt_ring_de_alloc_command(gsi, evt_ring_id);
1625 }
1626 
1627 static int gsi_generic_command(struct gsi *gsi, u32 channel_id,
1628 			       enum gsi_generic_cmd_opcode opcode)
1629 {
1630 	struct completion *completion = &gsi->completion;
1631 	bool success;
1632 	u32 val;
1633 
1634 	/* The error global interrupt type is always enabled (until we
1635 	 * teardown), so we won't change that.  A generic EE command
1636 	 * completes with a GSI global interrupt of type GP_INT1.  We
1637 	 * only perform one generic command at a time (to allocate or
1638 	 * halt a modem channel) and only from this function.  So we
1639 	 * enable the GP_INT1 IRQ type here while we're expecting it.
1640 	 */
1641 	val = BIT(ERROR_INT) | BIT(GP_INT1);
1642 	iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
1643 
1644 	/* First zero the result code field */
1645 	val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
1646 	val &= ~GENERIC_EE_RESULT_FMASK;
1647 	iowrite32(val, gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
1648 
1649 	/* Now issue the command */
1650 	val = u32_encode_bits(opcode, GENERIC_OPCODE_FMASK);
1651 	val |= u32_encode_bits(channel_id, GENERIC_CHID_FMASK);
1652 	val |= u32_encode_bits(GSI_EE_MODEM, GENERIC_EE_FMASK);
1653 
1654 	success = gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val, completion);
1655 
1656 	/* Disable the GP_INT1 IRQ type again */
1657 	iowrite32(BIT(ERROR_INT), gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
1658 
1659 	if (success)
1660 		return gsi->result;
1661 
1662 	dev_err(gsi->dev, "GSI generic command %u to channel %u timed out\n",
1663 		opcode, channel_id);
1664 
1665 	return -ETIMEDOUT;
1666 }
1667 
1668 static int gsi_modem_channel_alloc(struct gsi *gsi, u32 channel_id)
1669 {
1670 	return gsi_generic_command(gsi, channel_id,
1671 				   GSI_GENERIC_ALLOCATE_CHANNEL);
1672 }
1673 
1674 static void gsi_modem_channel_halt(struct gsi *gsi, u32 channel_id)
1675 {
1676 	u32 retries = GSI_CHANNEL_MODEM_HALT_RETRIES;
1677 	int ret;
1678 
1679 	do
1680 		ret = gsi_generic_command(gsi, channel_id,
1681 					  GSI_GENERIC_HALT_CHANNEL);
1682 	while (ret == -EAGAIN && retries--);
1683 
1684 	if (ret)
1685 		dev_err(gsi->dev, "error %d halting modem channel %u\n",
1686 			ret, channel_id);
1687 }
1688 
1689 /* Setup function for channels */
1690 static int gsi_channel_setup(struct gsi *gsi)
1691 {
1692 	u32 channel_id = 0;
1693 	u32 mask;
1694 	int ret;
1695 
1696 	gsi_evt_ring_setup(gsi);
1697 	gsi_irq_enable(gsi);
1698 
1699 	mutex_lock(&gsi->mutex);
1700 
1701 	do {
1702 		ret = gsi_channel_setup_one(gsi, channel_id);
1703 		if (ret)
1704 			goto err_unwind;
1705 	} while (++channel_id < gsi->channel_count);
1706 
1707 	/* Make sure no channels were defined that hardware does not support */
1708 	while (channel_id < GSI_CHANNEL_COUNT_MAX) {
1709 		struct gsi_channel *channel = &gsi->channel[channel_id++];
1710 
1711 		if (!channel->gsi)
1712 			continue;	/* Ignore uninitialized channels */
1713 
1714 		dev_err(gsi->dev, "channel %u not supported by hardware\n",
1715 			channel_id - 1);
1716 		channel_id = gsi->channel_count;
1717 		goto err_unwind;
1718 	}
1719 
1720 	/* Allocate modem channels if necessary */
1721 	mask = gsi->modem_channel_bitmap;
1722 	while (mask) {
1723 		u32 modem_channel_id = __ffs(mask);
1724 
1725 		ret = gsi_modem_channel_alloc(gsi, modem_channel_id);
1726 		if (ret)
1727 			goto err_unwind_modem;
1728 
1729 		/* Clear bit from mask only after success (for unwind) */
1730 		mask ^= BIT(modem_channel_id);
1731 	}
1732 
1733 	mutex_unlock(&gsi->mutex);
1734 
1735 	return 0;
1736 
1737 err_unwind_modem:
1738 	/* Compute which modem channels need to be deallocated */
1739 	mask ^= gsi->modem_channel_bitmap;
1740 	while (mask) {
1741 		channel_id = __fls(mask);
1742 
1743 		mask ^= BIT(channel_id);
1744 
1745 		gsi_modem_channel_halt(gsi, channel_id);
1746 	}
1747 
1748 err_unwind:
1749 	while (channel_id--)
1750 		gsi_channel_teardown_one(gsi, channel_id);
1751 
1752 	mutex_unlock(&gsi->mutex);
1753 
1754 	gsi_irq_disable(gsi);
1755 	gsi_evt_ring_teardown(gsi);
1756 
1757 	return ret;
1758 }
1759 
1760 /* Inverse of gsi_channel_setup() */
1761 static void gsi_channel_teardown(struct gsi *gsi)
1762 {
1763 	u32 mask = gsi->modem_channel_bitmap;
1764 	u32 channel_id;
1765 
1766 	mutex_lock(&gsi->mutex);
1767 
1768 	while (mask) {
1769 		channel_id = __fls(mask);
1770 
1771 		mask ^= BIT(channel_id);
1772 
1773 		gsi_modem_channel_halt(gsi, channel_id);
1774 	}
1775 
1776 	channel_id = gsi->channel_count - 1;
1777 	do
1778 		gsi_channel_teardown_one(gsi, channel_id);
1779 	while (channel_id--);
1780 
1781 	mutex_unlock(&gsi->mutex);
1782 
1783 	gsi_irq_disable(gsi);
1784 	gsi_evt_ring_teardown(gsi);
1785 }
1786 
1787 /* Setup function for GSI.  GSI firmware must be loaded and initialized */
1788 int gsi_setup(struct gsi *gsi)
1789 {
1790 	struct device *dev = gsi->dev;
1791 	u32 val;
1792 	int ret;
1793 
1794 	/* Here is where we first touch the GSI hardware */
1795 	val = ioread32(gsi->virt + GSI_GSI_STATUS_OFFSET);
1796 	if (!(val & ENABLED_FMASK)) {
1797 		dev_err(dev, "GSI has not been enabled\n");
1798 		return -EIO;
1799 	}
1800 
1801 	gsi_irq_setup(gsi);
1802 
1803 	val = ioread32(gsi->virt + GSI_GSI_HW_PARAM_2_OFFSET);
1804 
1805 	gsi->channel_count = u32_get_bits(val, NUM_CH_PER_EE_FMASK);
1806 	if (!gsi->channel_count) {
1807 		dev_err(dev, "GSI reports zero channels supported\n");
1808 		return -EINVAL;
1809 	}
1810 	if (gsi->channel_count > GSI_CHANNEL_COUNT_MAX) {
1811 		dev_warn(dev,
1812 			 "limiting to %u channels; hardware supports %u\n",
1813 			 GSI_CHANNEL_COUNT_MAX, gsi->channel_count);
1814 		gsi->channel_count = GSI_CHANNEL_COUNT_MAX;
1815 	}
1816 
1817 	gsi->evt_ring_count = u32_get_bits(val, NUM_EV_PER_EE_FMASK);
1818 	if (!gsi->evt_ring_count) {
1819 		dev_err(dev, "GSI reports zero event rings supported\n");
1820 		return -EINVAL;
1821 	}
1822 	if (gsi->evt_ring_count > GSI_EVT_RING_COUNT_MAX) {
1823 		dev_warn(dev,
1824 			 "limiting to %u event rings; hardware supports %u\n",
1825 			 GSI_EVT_RING_COUNT_MAX, gsi->evt_ring_count);
1826 		gsi->evt_ring_count = GSI_EVT_RING_COUNT_MAX;
1827 	}
1828 
1829 	/* Initialize the error log */
1830 	iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET);
1831 
1832 	/* Writing 1 indicates IRQ interrupts; 0 would be MSI */
1833 	iowrite32(1, gsi->virt + GSI_CNTXT_INTSET_OFFSET);
1834 
1835 	ret = gsi_channel_setup(gsi);
1836 	if (ret)
1837 		gsi_irq_teardown(gsi);
1838 
1839 	return ret;
1840 }
1841 
1842 /* Inverse of gsi_setup() */
1843 void gsi_teardown(struct gsi *gsi)
1844 {
1845 	gsi_channel_teardown(gsi);
1846 	gsi_irq_teardown(gsi);
1847 }
1848 
1849 /* Initialize a channel's event ring */
1850 static int gsi_channel_evt_ring_init(struct gsi_channel *channel)
1851 {
1852 	struct gsi *gsi = channel->gsi;
1853 	struct gsi_evt_ring *evt_ring;
1854 	int ret;
1855 
1856 	ret = gsi_evt_ring_id_alloc(gsi);
1857 	if (ret < 0)
1858 		return ret;
1859 	channel->evt_ring_id = ret;
1860 
1861 	evt_ring = &gsi->evt_ring[channel->evt_ring_id];
1862 	evt_ring->channel = channel;
1863 
1864 	ret = gsi_ring_alloc(gsi, &evt_ring->ring, channel->event_count);
1865 	if (!ret)
1866 		return 0;	/* Success! */
1867 
1868 	dev_err(gsi->dev, "error %d allocating channel %u event ring\n",
1869 		ret, gsi_channel_id(channel));
1870 
1871 	gsi_evt_ring_id_free(gsi, channel->evt_ring_id);
1872 
1873 	return ret;
1874 }
1875 
1876 /* Inverse of gsi_channel_evt_ring_init() */
1877 static void gsi_channel_evt_ring_exit(struct gsi_channel *channel)
1878 {
1879 	u32 evt_ring_id = channel->evt_ring_id;
1880 	struct gsi *gsi = channel->gsi;
1881 	struct gsi_evt_ring *evt_ring;
1882 
1883 	evt_ring = &gsi->evt_ring[evt_ring_id];
1884 	gsi_ring_free(gsi, &evt_ring->ring);
1885 	gsi_evt_ring_id_free(gsi, evt_ring_id);
1886 }
1887 
1888 /* Init function for event rings */
1889 static void gsi_evt_ring_init(struct gsi *gsi)
1890 {
1891 	u32 evt_ring_id = 0;
1892 
1893 	gsi->event_bitmap = gsi_event_bitmap_init(GSI_EVT_RING_COUNT_MAX);
1894 	gsi->ieob_enabled_bitmap = 0;
1895 	do
1896 		init_completion(&gsi->evt_ring[evt_ring_id].completion);
1897 	while (++evt_ring_id < GSI_EVT_RING_COUNT_MAX);
1898 }
1899 
1900 /* Inverse of gsi_evt_ring_init() */
1901 static void gsi_evt_ring_exit(struct gsi *gsi)
1902 {
1903 	/* Nothing to do */
1904 }
1905 
1906 static bool gsi_channel_data_valid(struct gsi *gsi,
1907 				   const struct ipa_gsi_endpoint_data *data)
1908 {
1909 #ifdef IPA_VALIDATION
1910 	u32 channel_id = data->channel_id;
1911 	struct device *dev = gsi->dev;
1912 
1913 	/* Make sure channel ids are in the range driver supports */
1914 	if (channel_id >= GSI_CHANNEL_COUNT_MAX) {
1915 		dev_err(dev, "bad channel id %u; must be less than %u\n",
1916 			channel_id, GSI_CHANNEL_COUNT_MAX);
1917 		return false;
1918 	}
1919 
1920 	if (data->ee_id != GSI_EE_AP && data->ee_id != GSI_EE_MODEM) {
1921 		dev_err(dev, "bad EE id %u; not AP or modem\n", data->ee_id);
1922 		return false;
1923 	}
1924 
1925 	if (!data->channel.tlv_count ||
1926 	    data->channel.tlv_count > GSI_TLV_MAX) {
1927 		dev_err(dev, "channel %u bad tlv_count %u; must be 1..%u\n",
1928 			channel_id, data->channel.tlv_count, GSI_TLV_MAX);
1929 		return false;
1930 	}
1931 
1932 	/* We have to allow at least one maximally-sized transaction to
1933 	 * be outstanding (which would use tlv_count TREs).  Given how
1934 	 * gsi_channel_tre_max() is computed, tre_count has to be almost
1935 	 * twice the TLV FIFO size to satisfy this requirement.
1936 	 */
1937 	if (data->channel.tre_count < 2 * data->channel.tlv_count - 1) {
1938 		dev_err(dev, "channel %u TLV count %u exceeds TRE count %u\n",
1939 			channel_id, data->channel.tlv_count,
1940 			data->channel.tre_count);
1941 		return false;
1942 	}
1943 
1944 	if (!is_power_of_2(data->channel.tre_count)) {
1945 		dev_err(dev, "channel %u bad tre_count %u; not power of 2\n",
1946 			channel_id, data->channel.tre_count);
1947 		return false;
1948 	}
1949 
1950 	if (!is_power_of_2(data->channel.event_count)) {
1951 		dev_err(dev, "channel %u bad event_count %u; not power of 2\n",
1952 			channel_id, data->channel.event_count);
1953 		return false;
1954 	}
1955 #endif /* IPA_VALIDATION */
1956 
1957 	return true;
1958 }
1959 
1960 /* Init function for a single channel */
1961 static int gsi_channel_init_one(struct gsi *gsi,
1962 				const struct ipa_gsi_endpoint_data *data,
1963 				bool command)
1964 {
1965 	struct gsi_channel *channel;
1966 	u32 tre_count;
1967 	int ret;
1968 
1969 	if (!gsi_channel_data_valid(gsi, data))
1970 		return -EINVAL;
1971 
1972 	/* Worst case we need an event for every outstanding TRE */
1973 	if (data->channel.tre_count > data->channel.event_count) {
1974 		tre_count = data->channel.event_count;
1975 		dev_warn(gsi->dev, "channel %u limited to %u TREs\n",
1976 			 data->channel_id, tre_count);
1977 	} else {
1978 		tre_count = data->channel.tre_count;
1979 	}
1980 
1981 	channel = &gsi->channel[data->channel_id];
1982 	memset(channel, 0, sizeof(*channel));
1983 
1984 	channel->gsi = gsi;
1985 	channel->toward_ipa = data->toward_ipa;
1986 	channel->command = command;
1987 	channel->tlv_count = data->channel.tlv_count;
1988 	channel->tre_count = tre_count;
1989 	channel->event_count = data->channel.event_count;
1990 	init_completion(&channel->completion);
1991 
1992 	ret = gsi_channel_evt_ring_init(channel);
1993 	if (ret)
1994 		goto err_clear_gsi;
1995 
1996 	ret = gsi_ring_alloc(gsi, &channel->tre_ring, data->channel.tre_count);
1997 	if (ret) {
1998 		dev_err(gsi->dev, "error %d allocating channel %u ring\n",
1999 			ret, data->channel_id);
2000 		goto err_channel_evt_ring_exit;
2001 	}
2002 
2003 	ret = gsi_channel_trans_init(gsi, data->channel_id);
2004 	if (ret)
2005 		goto err_ring_free;
2006 
2007 	if (command) {
2008 		u32 tre_max = gsi_channel_tre_max(gsi, data->channel_id);
2009 
2010 		ret = ipa_cmd_pool_init(channel, tre_max);
2011 	}
2012 	if (!ret)
2013 		return 0;	/* Success! */
2014 
2015 	gsi_channel_trans_exit(channel);
2016 err_ring_free:
2017 	gsi_ring_free(gsi, &channel->tre_ring);
2018 err_channel_evt_ring_exit:
2019 	gsi_channel_evt_ring_exit(channel);
2020 err_clear_gsi:
2021 	channel->gsi = NULL;	/* Mark it not (fully) initialized */
2022 
2023 	return ret;
2024 }
2025 
2026 /* Inverse of gsi_channel_init_one() */
2027 static void gsi_channel_exit_one(struct gsi_channel *channel)
2028 {
2029 	if (!channel->gsi)
2030 		return;		/* Ignore uninitialized channels */
2031 
2032 	if (channel->command)
2033 		ipa_cmd_pool_exit(channel);
2034 	gsi_channel_trans_exit(channel);
2035 	gsi_ring_free(channel->gsi, &channel->tre_ring);
2036 	gsi_channel_evt_ring_exit(channel);
2037 }
2038 
2039 /* Init function for channels */
2040 static int gsi_channel_init(struct gsi *gsi, u32 count,
2041 			    const struct ipa_gsi_endpoint_data *data)
2042 {
2043 	bool modem_alloc;
2044 	int ret = 0;
2045 	u32 i;
2046 
2047 	/* IPA v4.2 requires the AP to allocate channels for the modem */
2048 	modem_alloc = gsi->version == IPA_VERSION_4_2;
2049 
2050 	gsi_evt_ring_init(gsi);
2051 
2052 	/* The endpoint data array is indexed by endpoint name */
2053 	for (i = 0; i < count; i++) {
2054 		bool command = i == IPA_ENDPOINT_AP_COMMAND_TX;
2055 
2056 		if (ipa_gsi_endpoint_data_empty(&data[i]))
2057 			continue;	/* Skip over empty slots */
2058 
2059 		/* Mark modem channels to be allocated (hardware workaround) */
2060 		if (data[i].ee_id == GSI_EE_MODEM) {
2061 			if (modem_alloc)
2062 				gsi->modem_channel_bitmap |=
2063 						BIT(data[i].channel_id);
2064 			continue;
2065 		}
2066 
2067 		ret = gsi_channel_init_one(gsi, &data[i], command);
2068 		if (ret)
2069 			goto err_unwind;
2070 	}
2071 
2072 	return ret;
2073 
2074 err_unwind:
2075 	while (i--) {
2076 		if (ipa_gsi_endpoint_data_empty(&data[i]))
2077 			continue;
2078 		if (modem_alloc && data[i].ee_id == GSI_EE_MODEM) {
2079 			gsi->modem_channel_bitmap &= ~BIT(data[i].channel_id);
2080 			continue;
2081 		}
2082 		gsi_channel_exit_one(&gsi->channel[data->channel_id]);
2083 	}
2084 	gsi_evt_ring_exit(gsi);
2085 
2086 	return ret;
2087 }
2088 
2089 /* Inverse of gsi_channel_init() */
2090 static void gsi_channel_exit(struct gsi *gsi)
2091 {
2092 	u32 channel_id = GSI_CHANNEL_COUNT_MAX - 1;
2093 
2094 	do
2095 		gsi_channel_exit_one(&gsi->channel[channel_id]);
2096 	while (channel_id--);
2097 	gsi->modem_channel_bitmap = 0;
2098 
2099 	gsi_evt_ring_exit(gsi);
2100 }
2101 
2102 /* Init function for GSI.  GSI hardware does not need to be "ready" */
2103 int gsi_init(struct gsi *gsi, struct platform_device *pdev,
2104 	     enum ipa_version version, u32 count,
2105 	     const struct ipa_gsi_endpoint_data *data)
2106 {
2107 	struct device *dev = &pdev->dev;
2108 	struct resource *res;
2109 	resource_size_t size;
2110 	u32 adjust;
2111 	int ret;
2112 
2113 	gsi_validate_build();
2114 
2115 	gsi->dev = dev;
2116 	gsi->version = version;
2117 
2118 	/* The GSI layer performs NAPI on all endpoints.  NAPI requires a
2119 	 * network device structure, but the GSI layer does not have one,
2120 	 * so we must create a dummy network device for this purpose.
2121 	 */
2122 	init_dummy_netdev(&gsi->dummy_dev);
2123 
2124 	/* Get GSI memory range and map it */
2125 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gsi");
2126 	if (!res) {
2127 		dev_err(dev, "DT error getting \"gsi\" memory property\n");
2128 		return -ENODEV;
2129 	}
2130 
2131 	size = resource_size(res);
2132 	if (res->start > U32_MAX || size > U32_MAX - res->start) {
2133 		dev_err(dev, "DT memory resource \"gsi\" out of range\n");
2134 		return -EINVAL;
2135 	}
2136 
2137 	/* Make sure we can make our pointer adjustment if necessary */
2138 	adjust = gsi->version < IPA_VERSION_4_5 ? 0 : GSI_EE_REG_ADJUST;
2139 	if (res->start < adjust) {
2140 		dev_err(dev, "DT memory resource \"gsi\" too low (< %u)\n",
2141 			adjust);
2142 		return -EINVAL;
2143 	}
2144 
2145 	gsi->virt = ioremap(res->start, size);
2146 	if (!gsi->virt) {
2147 		dev_err(dev, "unable to remap \"gsi\" memory\n");
2148 		return -ENOMEM;
2149 	}
2150 	/* Adjust register range pointer downward for newer IPA versions */
2151 	gsi->virt -= adjust;
2152 
2153 	init_completion(&gsi->completion);
2154 
2155 	ret = gsi_irq_init(gsi, pdev);
2156 	if (ret)
2157 		goto err_iounmap;
2158 
2159 	ret = gsi_channel_init(gsi, count, data);
2160 	if (ret)
2161 		goto err_irq_exit;
2162 
2163 	mutex_init(&gsi->mutex);
2164 
2165 	return 0;
2166 
2167 err_irq_exit:
2168 	gsi_irq_exit(gsi);
2169 err_iounmap:
2170 	iounmap(gsi->virt);
2171 
2172 	return ret;
2173 }
2174 
2175 /* Inverse of gsi_init() */
2176 void gsi_exit(struct gsi *gsi)
2177 {
2178 	mutex_destroy(&gsi->mutex);
2179 	gsi_channel_exit(gsi);
2180 	gsi_irq_exit(gsi);
2181 	iounmap(gsi->virt);
2182 }
2183 
2184 /* The maximum number of outstanding TREs on a channel.  This limits
2185  * a channel's maximum number of transactions outstanding (worst case
2186  * is one TRE per transaction).
2187  *
2188  * The absolute limit is the number of TREs in the channel's TRE ring,
2189  * and in theory we should be able use all of them.  But in practice,
2190  * doing that led to the hardware reporting exhaustion of event ring
2191  * slots for writing completion information.  So the hardware limit
2192  * would be (tre_count - 1).
2193  *
2194  * We reduce it a bit further though.  Transaction resource pools are
2195  * sized to be a little larger than this maximum, to allow resource
2196  * allocations to always be contiguous.  The number of entries in a
2197  * TRE ring buffer is a power of 2, and the extra resources in a pool
2198  * tends to nearly double the memory allocated for it.  Reducing the
2199  * maximum number of outstanding TREs allows the number of entries in
2200  * a pool to avoid crossing that power-of-2 boundary, and this can
2201  * substantially reduce pool memory requirements.  The number we
2202  * reduce it by matches the number added in gsi_trans_pool_init().
2203  */
2204 u32 gsi_channel_tre_max(struct gsi *gsi, u32 channel_id)
2205 {
2206 	struct gsi_channel *channel = &gsi->channel[channel_id];
2207 
2208 	/* Hardware limit is channel->tre_count - 1 */
2209 	return channel->tre_count - (channel->tlv_count - 1);
2210 }
2211 
2212 /* Returns the maximum number of TREs in a single transaction for a channel */
2213 u32 gsi_channel_trans_tre_max(struct gsi *gsi, u32 channel_id)
2214 {
2215 	struct gsi_channel *channel = &gsi->channel[channel_id];
2216 
2217 	return channel->tlv_count;
2218 }
2219