xref: /openbmc/linux/drivers/net/ipa/gsi.c (revision 04295878beac396dae47ba93141cae0d9386e7ef)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
4  * Copyright (C) 2018-2020 Linaro Ltd.
5  */
6 
7 #include <linux/types.h>
8 #include <linux/bits.h>
9 #include <linux/bitfield.h>
10 #include <linux/mutex.h>
11 #include <linux/completion.h>
12 #include <linux/io.h>
13 #include <linux/bug.h>
14 #include <linux/interrupt.h>
15 #include <linux/platform_device.h>
16 #include <linux/netdevice.h>
17 
18 #include "gsi.h"
19 #include "gsi_reg.h"
20 #include "gsi_private.h"
21 #include "gsi_trans.h"
22 #include "ipa_gsi.h"
23 #include "ipa_data.h"
24 #include "ipa_version.h"
25 
26 /**
27  * DOC: The IPA Generic Software Interface
28  *
29  * The generic software interface (GSI) is an integral component of the IPA,
30  * providing a well-defined communication layer between the AP subsystem
31  * and the IPA core.  The modem uses the GSI layer as well.
32  *
33  *	--------	     ---------
34  *	|      |	     |	     |
35  *	|  AP  +<---.	.----+ Modem |
36  *	|      +--. |	| .->+	     |
37  *	|      |  | |	| |  |	     |
38  *	--------  | |	| |  ---------
39  *		  v |	v |
40  *		--+-+---+-+--
41  *		|    GSI    |
42  *		|-----------|
43  *		|	    |
44  *		|    IPA    |
45  *		|	    |
46  *		-------------
47  *
48  * In the above diagram, the AP and Modem represent "execution environments"
49  * (EEs), which are independent operating environments that use the IPA for
50  * data transfer.
51  *
52  * Each EE uses a set of unidirectional GSI "channels," which allow transfer
53  * of data to or from the IPA.  A channel is implemented as a ring buffer,
54  * with a DRAM-resident array of "transfer elements" (TREs) available to
55  * describe transfers to or from other EEs through the IPA.  A transfer
56  * element can also contain an immediate command, requesting the IPA perform
57  * actions other than data transfer.
58  *
59  * Each TRE refers to a block of data--also located DRAM.  After writing one
60  * or more TREs to a channel, the writer (either the IPA or an EE) writes a
61  * doorbell register to inform the receiving side how many elements have
62  * been written.
63  *
64  * Each channel has a GSI "event ring" associated with it.  An event ring
65  * is implemented very much like a channel ring, but is always directed from
66  * the IPA to an EE.  The IPA notifies an EE (such as the AP) about channel
67  * events by adding an entry to the event ring associated with the channel.
68  * The GSI then writes its doorbell for the event ring, causing the target
69  * EE to be interrupted.  Each entry in an event ring contains a pointer
70  * to the channel TRE whose completion the event represents.
71  *
72  * Each TRE in a channel ring has a set of flags.  One flag indicates whether
73  * the completion of the transfer operation generates an entry (and possibly
74  * an interrupt) in the channel's event ring.  Other flags allow transfer
75  * elements to be chained together, forming a single logical transaction.
76  * TRE flags are used to control whether and when interrupts are generated
77  * to signal completion of channel transfers.
78  *
79  * Elements in channel and event rings are completed (or consumed) strictly
80  * in order.  Completion of one entry implies the completion of all preceding
81  * entries.  A single completion interrupt can therefore communicate the
82  * completion of many transfers.
83  *
84  * Note that all GSI registers are little-endian, which is the assumed
85  * endianness of I/O space accesses.  The accessor functions perform byte
86  * swapping if needed (i.e., for a big endian CPU).
87  */
88 
89 /* Delay period for interrupt moderation (in 32KHz IPA internal timer ticks) */
90 #define GSI_EVT_RING_INT_MODT		(32 * 1) /* 1ms under 32KHz clock */
91 
92 #define GSI_CMD_TIMEOUT			5	/* seconds */
93 
94 #define GSI_CHANNEL_STOP_RX_RETRIES	10
95 #define GSI_CHANNEL_MODEM_HALT_RETRIES	10
96 
97 #define GSI_MHI_EVENT_ID_START		10	/* 1st reserved event id */
98 #define GSI_MHI_EVENT_ID_END		16	/* Last reserved event id */
99 
100 #define GSI_ISR_MAX_ITER		50	/* Detect interrupt storms */
101 
102 /* An entry in an event ring */
103 struct gsi_event {
104 	__le64 xfer_ptr;
105 	__le16 len;
106 	u8 reserved1;
107 	u8 code;
108 	__le16 reserved2;
109 	u8 type;
110 	u8 chid;
111 };
112 
113 /** gsi_channel_scratch_gpi - GPI protocol scratch register
114  * @max_outstanding_tre:
115  *	Defines the maximum number of TREs allowed in a single transaction
116  *	on a channel (in bytes).  This determines the amount of prefetch
117  *	performed by the hardware.  We configure this to equal the size of
118  *	the TLV FIFO for the channel.
119  * @outstanding_threshold:
120  *	Defines the threshold (in bytes) determining when the sequencer
121  *	should update the channel doorbell.  We configure this to equal
122  *	the size of two TREs.
123  */
124 struct gsi_channel_scratch_gpi {
125 	u64 reserved1;
126 	u16 reserved2;
127 	u16 max_outstanding_tre;
128 	u16 reserved3;
129 	u16 outstanding_threshold;
130 };
131 
132 /** gsi_channel_scratch - channel scratch configuration area
133  *
134  * The exact interpretation of this register is protocol-specific.
135  * We only use GPI channels; see struct gsi_channel_scratch_gpi, above.
136  */
137 union gsi_channel_scratch {
138 	struct gsi_channel_scratch_gpi gpi;
139 	struct {
140 		u32 word1;
141 		u32 word2;
142 		u32 word3;
143 		u32 word4;
144 	} data;
145 };
146 
147 /* Check things that can be validated at build time. */
148 static void gsi_validate_build(void)
149 {
150 	/* This is used as a divisor */
151 	BUILD_BUG_ON(!GSI_RING_ELEMENT_SIZE);
152 
153 	/* Code assumes the size of channel and event ring element are
154 	 * the same (and fixed).  Make sure the size of an event ring
155 	 * element is what's expected.
156 	 */
157 	BUILD_BUG_ON(sizeof(struct gsi_event) != GSI_RING_ELEMENT_SIZE);
158 
159 	/* Hardware requires a 2^n ring size.  We ensure the number of
160 	 * elements in an event ring is a power of 2 elsewhere; this
161 	 * ensure the elements themselves meet the requirement.
162 	 */
163 	BUILD_BUG_ON(!is_power_of_2(GSI_RING_ELEMENT_SIZE));
164 
165 	/* The channel element size must fit in this field */
166 	BUILD_BUG_ON(GSI_RING_ELEMENT_SIZE > field_max(ELEMENT_SIZE_FMASK));
167 
168 	/* The event ring element size must fit in this field */
169 	BUILD_BUG_ON(GSI_RING_ELEMENT_SIZE > field_max(EV_ELEMENT_SIZE_FMASK));
170 }
171 
172 /* Return the channel id associated with a given channel */
173 static u32 gsi_channel_id(struct gsi_channel *channel)
174 {
175 	return channel - &channel->gsi->channel[0];
176 }
177 
178 /* Update the GSI IRQ type register with the cached value */
179 static void gsi_irq_type_update(struct gsi *gsi, u32 val)
180 {
181 	gsi->type_enabled_bitmap = val;
182 	iowrite32(val, gsi->virt + GSI_CNTXT_TYPE_IRQ_MSK_OFFSET);
183 }
184 
185 static void gsi_irq_type_enable(struct gsi *gsi, enum gsi_irq_type_id type_id)
186 {
187 	gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(type_id));
188 }
189 
190 static void gsi_irq_type_disable(struct gsi *gsi, enum gsi_irq_type_id type_id)
191 {
192 	gsi_irq_type_update(gsi, gsi->type_enabled_bitmap & ~BIT(type_id));
193 }
194 
195 /* Turn off all GSI interrupts initially */
196 static void gsi_irq_setup(struct gsi *gsi)
197 {
198 	/* Disable all interrupt types */
199 	gsi_irq_type_update(gsi, 0);
200 
201 	/* Clear all type-specific interrupt masks */
202 	iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
203 	iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
204 	iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
205 	iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
206 	iowrite32(0, gsi->virt + GSI_INTER_EE_SRC_CH_IRQ_OFFSET);
207 	iowrite32(0, gsi->virt + GSI_INTER_EE_SRC_EV_CH_IRQ_OFFSET);
208 	iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
209 }
210 
211 /* Turn off all GSI interrupts when we're all done */
212 static void gsi_irq_teardown(struct gsi *gsi)
213 {
214 	/* Nothing to do */
215 }
216 
217 static void gsi_irq_ieob_enable(struct gsi *gsi, u32 evt_ring_id)
218 {
219 	bool enable_ieob = !gsi->ieob_enabled_bitmap;
220 	u32 val;
221 
222 	gsi->ieob_enabled_bitmap |= BIT(evt_ring_id);
223 	val = gsi->ieob_enabled_bitmap;
224 	iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
225 
226 	/* Enable the interrupt type if this is the first channel enabled */
227 	if (enable_ieob)
228 		gsi_irq_type_enable(gsi, GSI_IEOB);
229 }
230 
231 static void gsi_irq_ieob_disable(struct gsi *gsi, u32 evt_ring_id)
232 {
233 	u32 val;
234 
235 	gsi->ieob_enabled_bitmap &= ~BIT(evt_ring_id);
236 
237 	/* Disable the interrupt type if this was the last enabled channel */
238 	if (!gsi->ieob_enabled_bitmap)
239 		gsi_irq_type_disable(gsi, GSI_IEOB);
240 
241 	val = gsi->ieob_enabled_bitmap;
242 	iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
243 }
244 
245 /* Enable all GSI_interrupt types */
246 static void gsi_irq_enable(struct gsi *gsi)
247 {
248 	u32 val;
249 
250 	/* Global interrupts include hardware error reports.  Enable
251 	 * that so we can at least report the error should it occur.
252 	 */
253 	iowrite32(BIT(ERROR_INT), gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
254 	gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(GSI_GLOB_EE));
255 
256 	/* General GSI interrupts are reported to all EEs; if they occur
257 	 * they are unrecoverable (without reset).  A breakpoint interrupt
258 	 * also exists, but we don't support that.  We want to be notified
259 	 * of errors so we can report them, even if they can't be handled.
260 	 */
261 	val = BIT(BUS_ERROR);
262 	val |= BIT(CMD_FIFO_OVRFLOW);
263 	val |= BIT(MCS_STACK_OVRFLOW);
264 	iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
265 	gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(GSI_GENERAL));
266 }
267 
268 /* Disable all GSI interrupt types */
269 static void gsi_irq_disable(struct gsi *gsi)
270 {
271 	gsi_irq_type_update(gsi, 0);
272 
273 	/* Clear the type-specific interrupt masks set by gsi_irq_enable() */
274 	iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
275 	iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
276 }
277 
278 /* Return the virtual address associated with a ring index */
279 void *gsi_ring_virt(struct gsi_ring *ring, u32 index)
280 {
281 	/* Note: index *must* be used modulo the ring count here */
282 	return ring->virt + (index % ring->count) * GSI_RING_ELEMENT_SIZE;
283 }
284 
285 /* Return the 32-bit DMA address associated with a ring index */
286 static u32 gsi_ring_addr(struct gsi_ring *ring, u32 index)
287 {
288 	return (ring->addr & GENMASK(31, 0)) + index * GSI_RING_ELEMENT_SIZE;
289 }
290 
291 /* Return the ring index of a 32-bit ring offset */
292 static u32 gsi_ring_index(struct gsi_ring *ring, u32 offset)
293 {
294 	return (offset - gsi_ring_addr(ring, 0)) / GSI_RING_ELEMENT_SIZE;
295 }
296 
297 /* Issue a GSI command by writing a value to a register, then wait for
298  * completion to be signaled.  Returns true if the command completes
299  * or false if it times out.
300  */
301 static bool
302 gsi_command(struct gsi *gsi, u32 reg, u32 val, struct completion *completion)
303 {
304 	reinit_completion(completion);
305 
306 	iowrite32(val, gsi->virt + reg);
307 
308 	return !!wait_for_completion_timeout(completion, GSI_CMD_TIMEOUT * HZ);
309 }
310 
311 /* Return the hardware's notion of the current state of an event ring */
312 static enum gsi_evt_ring_state
313 gsi_evt_ring_state(struct gsi *gsi, u32 evt_ring_id)
314 {
315 	u32 val;
316 
317 	val = ioread32(gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id));
318 
319 	return u32_get_bits(val, EV_CHSTATE_FMASK);
320 }
321 
322 /* Issue an event ring command and wait for it to complete */
323 static int evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
324 			    enum gsi_evt_cmd_opcode opcode)
325 {
326 	struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
327 	struct completion *completion = &evt_ring->completion;
328 	struct device *dev = gsi->dev;
329 	bool success;
330 	u32 val;
331 
332 	/* We only perform one event ring command at a time, and event
333 	 * control interrupts should only occur when such a command
334 	 * is issued here.  Only permit *this* event ring to trigger
335 	 * an interrupt, and only enable the event control IRQ type
336 	 * when we expect it to occur.
337 	 */
338 	val = BIT(evt_ring_id);
339 	iowrite32(val, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
340 	gsi_irq_type_enable(gsi, GSI_EV_CTRL);
341 
342 	val = u32_encode_bits(evt_ring_id, EV_CHID_FMASK);
343 	val |= u32_encode_bits(opcode, EV_OPCODE_FMASK);
344 
345 	success = gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val, completion);
346 
347 	/* Disable the interrupt again */
348 	gsi_irq_type_disable(gsi, GSI_EV_CTRL);
349 	iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
350 
351 	if (success)
352 		return 0;
353 
354 	dev_err(dev, "GSI command %u for event ring %u timed out, state %u\n",
355 		opcode, evt_ring_id, evt_ring->state);
356 
357 	return -ETIMEDOUT;
358 }
359 
360 /* Allocate an event ring in NOT_ALLOCATED state */
361 static int gsi_evt_ring_alloc_command(struct gsi *gsi, u32 evt_ring_id)
362 {
363 	struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
364 	int ret;
365 
366 	/* Get initial event ring state */
367 	evt_ring->state = gsi_evt_ring_state(gsi, evt_ring_id);
368 	if (evt_ring->state != GSI_EVT_RING_STATE_NOT_ALLOCATED) {
369 		dev_err(gsi->dev, "event ring %u bad state %u before alloc\n",
370 			evt_ring_id, evt_ring->state);
371 		return -EINVAL;
372 	}
373 
374 	ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_ALLOCATE);
375 	if (!ret && evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) {
376 		dev_err(gsi->dev, "event ring %u bad state %u after alloc\n",
377 			evt_ring_id, evt_ring->state);
378 		ret = -EIO;
379 	}
380 
381 	return ret;
382 }
383 
384 /* Reset a GSI event ring in ALLOCATED or ERROR state. */
385 static void gsi_evt_ring_reset_command(struct gsi *gsi, u32 evt_ring_id)
386 {
387 	struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
388 	enum gsi_evt_ring_state state = evt_ring->state;
389 	int ret;
390 
391 	if (state != GSI_EVT_RING_STATE_ALLOCATED &&
392 	    state != GSI_EVT_RING_STATE_ERROR) {
393 		dev_err(gsi->dev, "event ring %u bad state %u before reset\n",
394 			evt_ring_id, evt_ring->state);
395 		return;
396 	}
397 
398 	ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_RESET);
399 	if (!ret && evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED)
400 		dev_err(gsi->dev, "event ring %u bad state %u after reset\n",
401 			evt_ring_id, evt_ring->state);
402 }
403 
404 /* Issue a hardware de-allocation request for an allocated event ring */
405 static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id)
406 {
407 	struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
408 	int ret;
409 
410 	if (evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) {
411 		dev_err(gsi->dev, "event ring %u state %u before dealloc\n",
412 			evt_ring_id, evt_ring->state);
413 		return;
414 	}
415 
416 	ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_DE_ALLOC);
417 	if (!ret && evt_ring->state != GSI_EVT_RING_STATE_NOT_ALLOCATED)
418 		dev_err(gsi->dev, "event ring %u bad state %u after dealloc\n",
419 			evt_ring_id, evt_ring->state);
420 }
421 
422 /* Fetch the current state of a channel from hardware */
423 static enum gsi_channel_state gsi_channel_state(struct gsi_channel *channel)
424 {
425 	u32 channel_id = gsi_channel_id(channel);
426 	void *virt = channel->gsi->virt;
427 	u32 val;
428 
429 	val = ioread32(virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id));
430 
431 	return u32_get_bits(val, CHSTATE_FMASK);
432 }
433 
434 /* Issue a channel command and wait for it to complete */
435 static int
436 gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
437 {
438 	struct completion *completion = &channel->completion;
439 	u32 channel_id = gsi_channel_id(channel);
440 	struct gsi *gsi = channel->gsi;
441 	struct device *dev = gsi->dev;
442 	bool success;
443 	u32 val;
444 
445 	/* We only perform one channel command at a time, and channel
446 	 * control interrupts should only occur when such a command is
447 	 * issued here.  So we only permit *this* channel to trigger
448 	 * an interrupt and only enable the channel control IRQ type
449 	 * when we expect it to occur.
450 	 */
451 	val = BIT(channel_id);
452 	iowrite32(val, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
453 	gsi_irq_type_enable(gsi, GSI_CH_CTRL);
454 
455 	val = u32_encode_bits(channel_id, CH_CHID_FMASK);
456 	val |= u32_encode_bits(opcode, CH_OPCODE_FMASK);
457 	success = gsi_command(gsi, GSI_CH_CMD_OFFSET, val, completion);
458 
459 	/* Disable the interrupt again */
460 	gsi_irq_type_disable(gsi, GSI_CH_CTRL);
461 	iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
462 
463 	if (success)
464 		return 0;
465 
466 	dev_err(dev, "GSI command %u for channel %u timed out, state %u\n",
467 		opcode, channel_id, gsi_channel_state(channel));
468 
469 	return -ETIMEDOUT;
470 }
471 
472 /* Allocate GSI channel in NOT_ALLOCATED state */
473 static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id)
474 {
475 	struct gsi_channel *channel = &gsi->channel[channel_id];
476 	struct device *dev = gsi->dev;
477 	enum gsi_channel_state state;
478 	int ret;
479 
480 	/* Get initial channel state */
481 	state = gsi_channel_state(channel);
482 	if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED) {
483 		dev_err(dev, "channel %u bad state %u before alloc\n",
484 			channel_id, state);
485 		return -EINVAL;
486 	}
487 
488 	ret = gsi_channel_command(channel, GSI_CH_ALLOCATE);
489 
490 	/* Channel state will normally have been updated */
491 	state = gsi_channel_state(channel);
492 	if (!ret && state != GSI_CHANNEL_STATE_ALLOCATED) {
493 		dev_err(dev, "channel %u bad state %u after alloc\n",
494 			channel_id, state);
495 		ret = -EIO;
496 	}
497 
498 	return ret;
499 }
500 
501 /* Start an ALLOCATED channel */
502 static int gsi_channel_start_command(struct gsi_channel *channel)
503 {
504 	struct device *dev = channel->gsi->dev;
505 	enum gsi_channel_state state;
506 	int ret;
507 
508 	state = gsi_channel_state(channel);
509 	if (state != GSI_CHANNEL_STATE_ALLOCATED &&
510 	    state != GSI_CHANNEL_STATE_STOPPED) {
511 		dev_err(dev, "channel %u bad state %u before start\n",
512 			gsi_channel_id(channel), state);
513 		return -EINVAL;
514 	}
515 
516 	ret = gsi_channel_command(channel, GSI_CH_START);
517 
518 	/* Channel state will normally have been updated */
519 	state = gsi_channel_state(channel);
520 	if (!ret && state != GSI_CHANNEL_STATE_STARTED) {
521 		dev_err(dev, "channel %u bad state %u after start\n",
522 			gsi_channel_id(channel), state);
523 		ret = -EIO;
524 	}
525 
526 	return ret;
527 }
528 
529 /* Stop a GSI channel in STARTED state */
530 static int gsi_channel_stop_command(struct gsi_channel *channel)
531 {
532 	struct device *dev = channel->gsi->dev;
533 	enum gsi_channel_state state;
534 	int ret;
535 
536 	state = gsi_channel_state(channel);
537 
538 	/* Channel could have entered STOPPED state since last call
539 	 * if it timed out.  If so, we're done.
540 	 */
541 	if (state == GSI_CHANNEL_STATE_STOPPED)
542 		return 0;
543 
544 	if (state != GSI_CHANNEL_STATE_STARTED &&
545 	    state != GSI_CHANNEL_STATE_STOP_IN_PROC) {
546 		dev_err(dev, "channel %u bad state %u before stop\n",
547 			gsi_channel_id(channel), state);
548 		return -EINVAL;
549 	}
550 
551 	ret = gsi_channel_command(channel, GSI_CH_STOP);
552 
553 	/* Channel state will normally have been updated */
554 	state = gsi_channel_state(channel);
555 	if (ret || state == GSI_CHANNEL_STATE_STOPPED)
556 		return ret;
557 
558 	/* We may have to try again if stop is in progress */
559 	if (state == GSI_CHANNEL_STATE_STOP_IN_PROC)
560 		return -EAGAIN;
561 
562 	dev_err(dev, "channel %u bad state %u after stop\n",
563 		gsi_channel_id(channel), state);
564 
565 	return -EIO;
566 }
567 
568 /* Reset a GSI channel in ALLOCATED or ERROR state. */
569 static void gsi_channel_reset_command(struct gsi_channel *channel)
570 {
571 	struct device *dev = channel->gsi->dev;
572 	enum gsi_channel_state state;
573 	int ret;
574 
575 	msleep(1);	/* A short delay is required before a RESET command */
576 
577 	state = gsi_channel_state(channel);
578 	if (state != GSI_CHANNEL_STATE_STOPPED &&
579 	    state != GSI_CHANNEL_STATE_ERROR) {
580 		/* No need to reset a channel already in ALLOCATED state */
581 		if (state != GSI_CHANNEL_STATE_ALLOCATED)
582 			dev_err(dev, "channel %u bad state %u before reset\n",
583 				gsi_channel_id(channel), state);
584 		return;
585 	}
586 
587 	ret = gsi_channel_command(channel, GSI_CH_RESET);
588 
589 	/* Channel state will normally have been updated */
590 	state = gsi_channel_state(channel);
591 	if (!ret && state != GSI_CHANNEL_STATE_ALLOCATED)
592 		dev_err(dev, "channel %u bad state %u after reset\n",
593 			gsi_channel_id(channel), state);
594 }
595 
596 /* Deallocate an ALLOCATED GSI channel */
597 static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id)
598 {
599 	struct gsi_channel *channel = &gsi->channel[channel_id];
600 	struct device *dev = gsi->dev;
601 	enum gsi_channel_state state;
602 	int ret;
603 
604 	state = gsi_channel_state(channel);
605 	if (state != GSI_CHANNEL_STATE_ALLOCATED) {
606 		dev_err(dev, "channel %u bad state %u before dealloc\n",
607 			channel_id, state);
608 		return;
609 	}
610 
611 	ret = gsi_channel_command(channel, GSI_CH_DE_ALLOC);
612 
613 	/* Channel state will normally have been updated */
614 	state = gsi_channel_state(channel);
615 	if (!ret && state != GSI_CHANNEL_STATE_NOT_ALLOCATED)
616 		dev_err(dev, "channel %u bad state %u after dealloc\n",
617 			channel_id, state);
618 }
619 
620 /* Ring an event ring doorbell, reporting the last entry processed by the AP.
621  * The index argument (modulo the ring count) is the first unfilled entry, so
622  * we supply one less than that with the doorbell.  Update the event ring
623  * index field with the value provided.
624  */
625 static void gsi_evt_ring_doorbell(struct gsi *gsi, u32 evt_ring_id, u32 index)
626 {
627 	struct gsi_ring *ring = &gsi->evt_ring[evt_ring_id].ring;
628 	u32 val;
629 
630 	ring->index = index;	/* Next unused entry */
631 
632 	/* Note: index *must* be used modulo the ring count here */
633 	val = gsi_ring_addr(ring, (index - 1) % ring->count);
634 	iowrite32(val, gsi->virt + GSI_EV_CH_E_DOORBELL_0_OFFSET(evt_ring_id));
635 }
636 
637 /* Program an event ring for use */
638 static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id)
639 {
640 	struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
641 	size_t size = evt_ring->ring.count * GSI_RING_ELEMENT_SIZE;
642 	u32 val;
643 
644 	/* We program all event rings as GPI type/protocol */
645 	val = u32_encode_bits(GSI_CHANNEL_TYPE_GPI, EV_CHTYPE_FMASK);
646 	val |= EV_INTYPE_FMASK;
647 	val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, EV_ELEMENT_SIZE_FMASK);
648 	iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id));
649 
650 	val = u32_encode_bits(size, EV_R_LENGTH_FMASK);
651 	iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_1_OFFSET(evt_ring_id));
652 
653 	/* The context 2 and 3 registers store the low-order and
654 	 * high-order 32 bits of the address of the event ring,
655 	 * respectively.
656 	 */
657 	val = evt_ring->ring.addr & GENMASK(31, 0);
658 	iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_2_OFFSET(evt_ring_id));
659 
660 	val = evt_ring->ring.addr >> 32;
661 	iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_3_OFFSET(evt_ring_id));
662 
663 	/* Enable interrupt moderation by setting the moderation delay */
664 	val = u32_encode_bits(GSI_EVT_RING_INT_MODT, MODT_FMASK);
665 	val |= u32_encode_bits(1, MODC_FMASK);	/* comes from channel */
666 	iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_8_OFFSET(evt_ring_id));
667 
668 	/* No MSI write data, and MSI address high and low address is 0 */
669 	iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_9_OFFSET(evt_ring_id));
670 	iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_10_OFFSET(evt_ring_id));
671 	iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_11_OFFSET(evt_ring_id));
672 
673 	/* We don't need to get event read pointer updates */
674 	iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_12_OFFSET(evt_ring_id));
675 	iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_13_OFFSET(evt_ring_id));
676 
677 	/* Finally, tell the hardware we've completed event 0 (arbitrary) */
678 	gsi_evt_ring_doorbell(gsi, evt_ring_id, 0);
679 }
680 
681 /* Return the last (most recent) transaction completed on a channel. */
682 static struct gsi_trans *gsi_channel_trans_last(struct gsi_channel *channel)
683 {
684 	struct gsi_trans_info *trans_info = &channel->trans_info;
685 	struct gsi_trans *trans;
686 
687 	spin_lock_bh(&trans_info->spinlock);
688 
689 	if (!list_empty(&trans_info->complete))
690 		trans = list_last_entry(&trans_info->complete,
691 					struct gsi_trans, links);
692 	else if (!list_empty(&trans_info->polled))
693 		trans = list_last_entry(&trans_info->polled,
694 					struct gsi_trans, links);
695 	else
696 		trans = NULL;
697 
698 	/* Caller will wait for this, so take a reference */
699 	if (trans)
700 		refcount_inc(&trans->refcount);
701 
702 	spin_unlock_bh(&trans_info->spinlock);
703 
704 	return trans;
705 }
706 
707 /* Wait for transaction activity on a channel to complete */
708 static void gsi_channel_trans_quiesce(struct gsi_channel *channel)
709 {
710 	struct gsi_trans *trans;
711 
712 	/* Get the last transaction, and wait for it to complete */
713 	trans = gsi_channel_trans_last(channel);
714 	if (trans) {
715 		wait_for_completion(&trans->completion);
716 		gsi_trans_free(trans);
717 	}
718 }
719 
720 /* Stop channel activity.  Transactions may not be allocated until thawed. */
721 static void gsi_channel_freeze(struct gsi_channel *channel)
722 {
723 	gsi_channel_trans_quiesce(channel);
724 
725 	napi_disable(&channel->napi);
726 
727 	gsi_irq_ieob_disable(channel->gsi, channel->evt_ring_id);
728 }
729 
730 /* Allow transactions to be used on the channel again. */
731 static void gsi_channel_thaw(struct gsi_channel *channel)
732 {
733 	gsi_irq_ieob_enable(channel->gsi, channel->evt_ring_id);
734 
735 	napi_enable(&channel->napi);
736 }
737 
738 /* Program a channel for use */
739 static void gsi_channel_program(struct gsi_channel *channel, bool doorbell)
740 {
741 	size_t size = channel->tre_ring.count * GSI_RING_ELEMENT_SIZE;
742 	u32 channel_id = gsi_channel_id(channel);
743 	union gsi_channel_scratch scr = { };
744 	struct gsi_channel_scratch_gpi *gpi;
745 	struct gsi *gsi = channel->gsi;
746 	u32 wrr_weight = 0;
747 	u32 val;
748 
749 	/* Arbitrarily pick TRE 0 as the first channel element to use */
750 	channel->tre_ring.index = 0;
751 
752 	/* We program all channels as GPI type/protocol */
753 	val = u32_encode_bits(GSI_CHANNEL_TYPE_GPI, CHTYPE_PROTOCOL_FMASK);
754 	if (channel->toward_ipa)
755 		val |= CHTYPE_DIR_FMASK;
756 	val |= u32_encode_bits(channel->evt_ring_id, ERINDEX_FMASK);
757 	val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, ELEMENT_SIZE_FMASK);
758 	iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id));
759 
760 	val = u32_encode_bits(size, R_LENGTH_FMASK);
761 	iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_1_OFFSET(channel_id));
762 
763 	/* The context 2 and 3 registers store the low-order and
764 	 * high-order 32 bits of the address of the channel ring,
765 	 * respectively.
766 	 */
767 	val = channel->tre_ring.addr & GENMASK(31, 0);
768 	iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_2_OFFSET(channel_id));
769 
770 	val = channel->tre_ring.addr >> 32;
771 	iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_3_OFFSET(channel_id));
772 
773 	/* Command channel gets low weighted round-robin priority */
774 	if (channel->command)
775 		wrr_weight = field_max(WRR_WEIGHT_FMASK);
776 	val = u32_encode_bits(wrr_weight, WRR_WEIGHT_FMASK);
777 
778 	/* Max prefetch is 1 segment (do not set MAX_PREFETCH_FMASK) */
779 
780 	/* We enable the doorbell engine for IPA v3.5.1 */
781 	if (gsi->version == IPA_VERSION_3_5_1 && doorbell)
782 		val |= USE_DB_ENG_FMASK;
783 
784 	/* Starting with IPA v4.0 the command channel uses the escape buffer */
785 	if (gsi->version != IPA_VERSION_3_5_1 && channel->command)
786 		val |= USE_ESCAPE_BUF_ONLY_FMASK;
787 
788 	iowrite32(val, gsi->virt + GSI_CH_C_QOS_OFFSET(channel_id));
789 
790 	/* Now update the scratch registers for GPI protocol */
791 	gpi = &scr.gpi;
792 	gpi->max_outstanding_tre = gsi_channel_trans_tre_max(gsi, channel_id) *
793 					GSI_RING_ELEMENT_SIZE;
794 	gpi->outstanding_threshold = 2 * GSI_RING_ELEMENT_SIZE;
795 
796 	val = scr.data.word1;
797 	iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_0_OFFSET(channel_id));
798 
799 	val = scr.data.word2;
800 	iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_1_OFFSET(channel_id));
801 
802 	val = scr.data.word3;
803 	iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_2_OFFSET(channel_id));
804 
805 	/* We must preserve the upper 16 bits of the last scratch register.
806 	 * The next sequence assumes those bits remain unchanged between the
807 	 * read and the write.
808 	 */
809 	val = ioread32(gsi->virt + GSI_CH_C_SCRATCH_3_OFFSET(channel_id));
810 	val = (scr.data.word4 & GENMASK(31, 16)) | (val & GENMASK(15, 0));
811 	iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_3_OFFSET(channel_id));
812 
813 	/* All done! */
814 }
815 
816 static void gsi_channel_deprogram(struct gsi_channel *channel)
817 {
818 	/* Nothing to do */
819 }
820 
821 /* Start an allocated GSI channel */
822 int gsi_channel_start(struct gsi *gsi, u32 channel_id)
823 {
824 	struct gsi_channel *channel = &gsi->channel[channel_id];
825 	int ret;
826 
827 	mutex_lock(&gsi->mutex);
828 
829 	ret = gsi_channel_start_command(channel);
830 
831 	mutex_unlock(&gsi->mutex);
832 
833 	gsi_channel_thaw(channel);
834 
835 	return ret;
836 }
837 
838 /* Stop a started channel */
839 int gsi_channel_stop(struct gsi *gsi, u32 channel_id)
840 {
841 	struct gsi_channel *channel = &gsi->channel[channel_id];
842 	u32 retries;
843 	int ret;
844 
845 	gsi_channel_freeze(channel);
846 
847 	/* RX channels might require a little time to enter STOPPED state */
848 	retries = channel->toward_ipa ? 0 : GSI_CHANNEL_STOP_RX_RETRIES;
849 
850 	mutex_lock(&gsi->mutex);
851 
852 	do {
853 		ret = gsi_channel_stop_command(channel);
854 		if (ret != -EAGAIN)
855 			break;
856 		msleep(1);
857 	} while (retries--);
858 
859 	mutex_unlock(&gsi->mutex);
860 
861 	/* Thaw the channel if we need to retry (or on error) */
862 	if (ret)
863 		gsi_channel_thaw(channel);
864 
865 	return ret;
866 }
867 
868 /* Reset and reconfigure a channel, (possibly) enabling the doorbell engine */
869 void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool doorbell)
870 {
871 	struct gsi_channel *channel = &gsi->channel[channel_id];
872 
873 	mutex_lock(&gsi->mutex);
874 
875 	gsi_channel_reset_command(channel);
876 	/* Due to a hardware quirk we may need to reset RX channels twice. */
877 	if (gsi->version == IPA_VERSION_3_5_1 && !channel->toward_ipa)
878 		gsi_channel_reset_command(channel);
879 
880 	gsi_channel_program(channel, doorbell);
881 	gsi_channel_trans_cancel_pending(channel);
882 
883 	mutex_unlock(&gsi->mutex);
884 }
885 
886 /* Stop a STARTED channel for suspend (using stop if requested) */
887 int gsi_channel_suspend(struct gsi *gsi, u32 channel_id, bool stop)
888 {
889 	struct gsi_channel *channel = &gsi->channel[channel_id];
890 
891 	if (stop)
892 		return gsi_channel_stop(gsi, channel_id);
893 
894 	gsi_channel_freeze(channel);
895 
896 	return 0;
897 }
898 
899 /* Resume a suspended channel (starting will be requested if STOPPED) */
900 int gsi_channel_resume(struct gsi *gsi, u32 channel_id, bool start)
901 {
902 	struct gsi_channel *channel = &gsi->channel[channel_id];
903 
904 	if (start)
905 		return gsi_channel_start(gsi, channel_id);
906 
907 	gsi_channel_thaw(channel);
908 
909 	return 0;
910 }
911 
912 /**
913  * gsi_channel_tx_queued() - Report queued TX transfers for a channel
914  * @channel:	Channel for which to report
915  *
916  * Report to the network stack the number of bytes and transactions that
917  * have been queued to hardware since last call.  This and the next function
918  * supply information used by the network stack for throttling.
919  *
920  * For each channel we track the number of transactions used and bytes of
921  * data those transactions represent.  We also track what those values are
922  * each time this function is called.  Subtracting the two tells us
923  * the number of bytes and transactions that have been added between
924  * successive calls.
925  *
926  * Calling this each time we ring the channel doorbell allows us to
927  * provide accurate information to the network stack about how much
928  * work we've given the hardware at any point in time.
929  */
930 void gsi_channel_tx_queued(struct gsi_channel *channel)
931 {
932 	u32 trans_count;
933 	u32 byte_count;
934 
935 	byte_count = channel->byte_count - channel->queued_byte_count;
936 	trans_count = channel->trans_count - channel->queued_trans_count;
937 	channel->queued_byte_count = channel->byte_count;
938 	channel->queued_trans_count = channel->trans_count;
939 
940 	ipa_gsi_channel_tx_queued(channel->gsi, gsi_channel_id(channel),
941 				  trans_count, byte_count);
942 }
943 
944 /**
945  * gsi_channel_tx_update() - Report completed TX transfers
946  * @channel:	Channel that has completed transmitting packets
947  * @trans:	Last transation known to be complete
948  *
949  * Compute the number of transactions and bytes that have been transferred
950  * over a TX channel since the given transaction was committed.  Report this
951  * information to the network stack.
952  *
953  * At the time a transaction is committed, we record its channel's
954  * committed transaction and byte counts *in the transaction*.
955  * Completions are signaled by the hardware with an interrupt, and
956  * we can determine the latest completed transaction at that time.
957  *
958  * The difference between the byte/transaction count recorded in
959  * the transaction and the count last time we recorded a completion
960  * tells us exactly how much data has been transferred between
961  * completions.
962  *
963  * Calling this each time we learn of a newly-completed transaction
964  * allows us to provide accurate information to the network stack
965  * about how much work has been completed by the hardware at a given
966  * point in time.
967  */
968 static void
969 gsi_channel_tx_update(struct gsi_channel *channel, struct gsi_trans *trans)
970 {
971 	u64 byte_count = trans->byte_count + trans->len;
972 	u64 trans_count = trans->trans_count + 1;
973 
974 	byte_count -= channel->compl_byte_count;
975 	channel->compl_byte_count += byte_count;
976 	trans_count -= channel->compl_trans_count;
977 	channel->compl_trans_count += trans_count;
978 
979 	ipa_gsi_channel_tx_completed(channel->gsi, gsi_channel_id(channel),
980 				     trans_count, byte_count);
981 }
982 
983 /* Channel control interrupt handler */
984 static void gsi_isr_chan_ctrl(struct gsi *gsi)
985 {
986 	u32 channel_mask;
987 
988 	channel_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_CH_IRQ_OFFSET);
989 	iowrite32(channel_mask, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET);
990 
991 	while (channel_mask) {
992 		u32 channel_id = __ffs(channel_mask);
993 		struct gsi_channel *channel;
994 
995 		channel_mask ^= BIT(channel_id);
996 
997 		channel = &gsi->channel[channel_id];
998 
999 		complete(&channel->completion);
1000 	}
1001 }
1002 
1003 /* Event ring control interrupt handler */
1004 static void gsi_isr_evt_ctrl(struct gsi *gsi)
1005 {
1006 	u32 event_mask;
1007 
1008 	event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_OFFSET);
1009 	iowrite32(event_mask, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET);
1010 
1011 	while (event_mask) {
1012 		u32 evt_ring_id = __ffs(event_mask);
1013 		struct gsi_evt_ring *evt_ring;
1014 
1015 		event_mask ^= BIT(evt_ring_id);
1016 
1017 		evt_ring = &gsi->evt_ring[evt_ring_id];
1018 		evt_ring->state = gsi_evt_ring_state(gsi, evt_ring_id);
1019 
1020 		complete(&evt_ring->completion);
1021 	}
1022 }
1023 
1024 /* Global channel error interrupt handler */
1025 static void
1026 gsi_isr_glob_chan_err(struct gsi *gsi, u32 err_ee, u32 channel_id, u32 code)
1027 {
1028 	if (code == GSI_OUT_OF_RESOURCES) {
1029 		dev_err(gsi->dev, "channel %u out of resources\n", channel_id);
1030 		complete(&gsi->channel[channel_id].completion);
1031 		return;
1032 	}
1033 
1034 	/* Report, but otherwise ignore all other error codes */
1035 	dev_err(gsi->dev, "channel %u global error ee 0x%08x code 0x%08x\n",
1036 		channel_id, err_ee, code);
1037 }
1038 
1039 /* Global event error interrupt handler */
1040 static void
1041 gsi_isr_glob_evt_err(struct gsi *gsi, u32 err_ee, u32 evt_ring_id, u32 code)
1042 {
1043 	if (code == GSI_OUT_OF_RESOURCES) {
1044 		struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
1045 		u32 channel_id = gsi_channel_id(evt_ring->channel);
1046 
1047 		complete(&evt_ring->completion);
1048 		dev_err(gsi->dev, "evt_ring for channel %u out of resources\n",
1049 			channel_id);
1050 		return;
1051 	}
1052 
1053 	/* Report, but otherwise ignore all other error codes */
1054 	dev_err(gsi->dev, "event ring %u global error ee %u code 0x%08x\n",
1055 		evt_ring_id, err_ee, code);
1056 }
1057 
1058 /* Global error interrupt handler */
1059 static void gsi_isr_glob_err(struct gsi *gsi)
1060 {
1061 	enum gsi_err_type type;
1062 	enum gsi_err_code code;
1063 	u32 which;
1064 	u32 val;
1065 	u32 ee;
1066 
1067 	/* Get the logged error, then reinitialize the log */
1068 	val = ioread32(gsi->virt + GSI_ERROR_LOG_OFFSET);
1069 	iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET);
1070 	iowrite32(~0, gsi->virt + GSI_ERROR_LOG_CLR_OFFSET);
1071 
1072 	ee = u32_get_bits(val, ERR_EE_FMASK);
1073 	type = u32_get_bits(val, ERR_TYPE_FMASK);
1074 	which = u32_get_bits(val, ERR_VIRT_IDX_FMASK);
1075 	code = u32_get_bits(val, ERR_CODE_FMASK);
1076 
1077 	if (type == GSI_ERR_TYPE_CHAN)
1078 		gsi_isr_glob_chan_err(gsi, ee, which, code);
1079 	else if (type == GSI_ERR_TYPE_EVT)
1080 		gsi_isr_glob_evt_err(gsi, ee, which, code);
1081 	else	/* type GSI_ERR_TYPE_GLOB should be fatal */
1082 		dev_err(gsi->dev, "unexpected global error 0x%08x\n", type);
1083 }
1084 
1085 /* Generic EE interrupt handler */
1086 static void gsi_isr_gp_int1(struct gsi *gsi)
1087 {
1088 	u32 result;
1089 	u32 val;
1090 
1091 	/* This interrupt is used to handle completions of the two GENERIC
1092 	 * GSI commands.  We use these to allocate and halt channels on
1093 	 * the modem's behalf due to a hardware quirk on IPA v4.2.  Once
1094 	 * allocated, the modem "owns" these channels, and as a result we
1095 	 * have no way of knowing the channel's state at any given time.
1096 	 *
1097 	 * It is recommended that we halt the modem channels we allocated
1098 	 * when shutting down, but it's possible the channel isn't running
1099 	 * at the time we issue the HALT command.  We'll get an error in
1100 	 * that case, but it's harmless (the channel is already halted).
1101 	 *
1102 	 * For this reason, we silently ignore a CHANNEL_NOT_RUNNING error
1103 	 * if we receive it.
1104 	 */
1105 	val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
1106 	result = u32_get_bits(val, GENERIC_EE_RESULT_FMASK);
1107 
1108 	switch (result) {
1109 	case GENERIC_EE_SUCCESS:
1110 	case GENERIC_EE_CHANNEL_NOT_RUNNING:
1111 		gsi->result = 0;
1112 		break;
1113 
1114 	case GENERIC_EE_RETRY:
1115 		gsi->result = -EAGAIN;
1116 		break;
1117 
1118 	default:
1119 		dev_err(gsi->dev, "global INT1 generic result %u\n", result);
1120 		gsi->result = -EIO;
1121 		break;
1122 	}
1123 
1124 	complete(&gsi->completion);
1125 }
1126 
1127 /* Inter-EE interrupt handler */
1128 static void gsi_isr_glob_ee(struct gsi *gsi)
1129 {
1130 	u32 val;
1131 
1132 	val = ioread32(gsi->virt + GSI_CNTXT_GLOB_IRQ_STTS_OFFSET);
1133 
1134 	if (val & BIT(ERROR_INT))
1135 		gsi_isr_glob_err(gsi);
1136 
1137 	iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_CLR_OFFSET);
1138 
1139 	val &= ~BIT(ERROR_INT);
1140 
1141 	if (val & BIT(GP_INT1)) {
1142 		val ^= BIT(GP_INT1);
1143 		gsi_isr_gp_int1(gsi);
1144 	}
1145 
1146 	if (val)
1147 		dev_err(gsi->dev, "unexpected global interrupt 0x%08x\n", val);
1148 }
1149 
1150 /* I/O completion interrupt event */
1151 static void gsi_isr_ieob(struct gsi *gsi)
1152 {
1153 	u32 event_mask;
1154 
1155 	event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_OFFSET);
1156 	iowrite32(event_mask, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_CLR_OFFSET);
1157 
1158 	while (event_mask) {
1159 		u32 evt_ring_id = __ffs(event_mask);
1160 
1161 		event_mask ^= BIT(evt_ring_id);
1162 
1163 		gsi_irq_ieob_disable(gsi, evt_ring_id);
1164 		napi_schedule(&gsi->evt_ring[evt_ring_id].channel->napi);
1165 	}
1166 }
1167 
1168 /* General event interrupts represent serious problems, so report them */
1169 static void gsi_isr_general(struct gsi *gsi)
1170 {
1171 	struct device *dev = gsi->dev;
1172 	u32 val;
1173 
1174 	val = ioread32(gsi->virt + GSI_CNTXT_GSI_IRQ_STTS_OFFSET);
1175 	iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_CLR_OFFSET);
1176 
1177 	dev_err(dev, "unexpected general interrupt 0x%08x\n", val);
1178 }
1179 
1180 /**
1181  * gsi_isr() - Top level GSI interrupt service routine
1182  * @irq:	Interrupt number (ignored)
1183  * @dev_id:	GSI pointer supplied to request_irq()
1184  *
1185  * This is the main handler function registered for the GSI IRQ. Each type
1186  * of interrupt has a separate handler function that is called from here.
1187  */
1188 static irqreturn_t gsi_isr(int irq, void *dev_id)
1189 {
1190 	struct gsi *gsi = dev_id;
1191 	u32 intr_mask;
1192 	u32 cnt = 0;
1193 
1194 	/* enum gsi_irq_type_id defines GSI interrupt types */
1195 	while ((intr_mask = ioread32(gsi->virt + GSI_CNTXT_TYPE_IRQ_OFFSET))) {
1196 		/* intr_mask contains bitmask of pending GSI interrupts */
1197 		do {
1198 			u32 gsi_intr = BIT(__ffs(intr_mask));
1199 
1200 			intr_mask ^= gsi_intr;
1201 
1202 			switch (gsi_intr) {
1203 			case BIT(GSI_CH_CTRL):
1204 				gsi_isr_chan_ctrl(gsi);
1205 				break;
1206 			case BIT(GSI_EV_CTRL):
1207 				gsi_isr_evt_ctrl(gsi);
1208 				break;
1209 			case BIT(GSI_GLOB_EE):
1210 				gsi_isr_glob_ee(gsi);
1211 				break;
1212 			case BIT(GSI_IEOB):
1213 				gsi_isr_ieob(gsi);
1214 				break;
1215 			case BIT(GSI_GENERAL):
1216 				gsi_isr_general(gsi);
1217 				break;
1218 			default:
1219 				dev_err(gsi->dev,
1220 					"unrecognized interrupt type 0x%08x\n",
1221 					gsi_intr);
1222 				break;
1223 			}
1224 		} while (intr_mask);
1225 
1226 		if (++cnt > GSI_ISR_MAX_ITER) {
1227 			dev_err(gsi->dev, "interrupt flood\n");
1228 			break;
1229 		}
1230 	}
1231 
1232 	return IRQ_HANDLED;
1233 }
1234 
1235 static int gsi_irq_init(struct gsi *gsi, struct platform_device *pdev)
1236 {
1237 	struct device *dev = &pdev->dev;
1238 	unsigned int irq;
1239 	int ret;
1240 
1241 	ret = platform_get_irq_byname(pdev, "gsi");
1242 	if (ret <= 0) {
1243 		dev_err(dev, "DT error %d getting \"gsi\" IRQ property\n", ret);
1244 		return ret ? : -EINVAL;
1245 	}
1246 	irq = ret;
1247 
1248 	ret = request_irq(irq, gsi_isr, 0, "gsi", gsi);
1249 	if (ret) {
1250 		dev_err(dev, "error %d requesting \"gsi\" IRQ\n", ret);
1251 		return ret;
1252 	}
1253 	gsi->irq = irq;
1254 
1255 	return 0;
1256 }
1257 
1258 static void gsi_irq_exit(struct gsi *gsi)
1259 {
1260 	free_irq(gsi->irq, gsi);
1261 }
1262 
1263 /* Return the transaction associated with a transfer completion event */
1264 static struct gsi_trans *gsi_event_trans(struct gsi_channel *channel,
1265 					 struct gsi_event *event)
1266 {
1267 	u32 tre_offset;
1268 	u32 tre_index;
1269 
1270 	/* Event xfer_ptr records the TRE it's associated with */
1271 	tre_offset = le64_to_cpu(event->xfer_ptr) & GENMASK(31, 0);
1272 	tre_index = gsi_ring_index(&channel->tre_ring, tre_offset);
1273 
1274 	return gsi_channel_trans_mapped(channel, tre_index);
1275 }
1276 
1277 /**
1278  * gsi_evt_ring_rx_update() - Record lengths of received data
1279  * @evt_ring:	Event ring associated with channel that received packets
1280  * @index:	Event index in ring reported by hardware
1281  *
1282  * Events for RX channels contain the actual number of bytes received into
1283  * the buffer.  Every event has a transaction associated with it, and here
1284  * we update transactions to record their actual received lengths.
1285  *
1286  * This function is called whenever we learn that the GSI hardware has filled
1287  * new events since the last time we checked.  The ring's index field tells
1288  * the first entry in need of processing.  The index provided is the
1289  * first *unfilled* event in the ring (following the last filled one).
1290  *
1291  * Events are sequential within the event ring, and transactions are
1292  * sequential within the transaction pool.
1293  *
1294  * Note that @index always refers to an element *within* the event ring.
1295  */
1296 static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index)
1297 {
1298 	struct gsi_channel *channel = evt_ring->channel;
1299 	struct gsi_ring *ring = &evt_ring->ring;
1300 	struct gsi_trans_info *trans_info;
1301 	struct gsi_event *event_done;
1302 	struct gsi_event *event;
1303 	struct gsi_trans *trans;
1304 	u32 byte_count = 0;
1305 	u32 old_index;
1306 	u32 event_avail;
1307 
1308 	trans_info = &channel->trans_info;
1309 
1310 	/* We'll start with the oldest un-processed event.  RX channels
1311 	 * replenish receive buffers in single-TRE transactions, so we
1312 	 * can just map that event to its transaction.  Transactions
1313 	 * associated with completion events are consecutive.
1314 	 */
1315 	old_index = ring->index;
1316 	event = gsi_ring_virt(ring, old_index);
1317 	trans = gsi_event_trans(channel, event);
1318 
1319 	/* Compute the number of events to process before we wrap,
1320 	 * and determine when we'll be done processing events.
1321 	 */
1322 	event_avail = ring->count - old_index % ring->count;
1323 	event_done = gsi_ring_virt(ring, index);
1324 	do {
1325 		trans->len = __le16_to_cpu(event->len);
1326 		byte_count += trans->len;
1327 
1328 		/* Move on to the next event and transaction */
1329 		if (--event_avail)
1330 			event++;
1331 		else
1332 			event = gsi_ring_virt(ring, 0);
1333 		trans = gsi_trans_pool_next(&trans_info->pool, trans);
1334 	} while (event != event_done);
1335 
1336 	/* We record RX bytes when they are received */
1337 	channel->byte_count += byte_count;
1338 	channel->trans_count++;
1339 }
1340 
1341 /* Initialize a ring, including allocating DMA memory for its entries */
1342 static int gsi_ring_alloc(struct gsi *gsi, struct gsi_ring *ring, u32 count)
1343 {
1344 	size_t size = count * GSI_RING_ELEMENT_SIZE;
1345 	struct device *dev = gsi->dev;
1346 	dma_addr_t addr;
1347 
1348 	/* Hardware requires a 2^n ring size, with alignment equal to size */
1349 	ring->virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL);
1350 	if (ring->virt && addr % size) {
1351 		dma_free_coherent(dev, size, ring->virt, ring->addr);
1352 		dev_err(dev, "unable to alloc 0x%zx-aligned ring buffer\n",
1353 			size);
1354 		return -EINVAL;	/* Not a good error value, but distinct */
1355 	} else if (!ring->virt) {
1356 		return -ENOMEM;
1357 	}
1358 	ring->addr = addr;
1359 	ring->count = count;
1360 
1361 	return 0;
1362 }
1363 
1364 /* Free a previously-allocated ring */
1365 static void gsi_ring_free(struct gsi *gsi, struct gsi_ring *ring)
1366 {
1367 	size_t size = ring->count * GSI_RING_ELEMENT_SIZE;
1368 
1369 	dma_free_coherent(gsi->dev, size, ring->virt, ring->addr);
1370 }
1371 
1372 /* Allocate an available event ring id */
1373 static int gsi_evt_ring_id_alloc(struct gsi *gsi)
1374 {
1375 	u32 evt_ring_id;
1376 
1377 	if (gsi->event_bitmap == ~0U) {
1378 		dev_err(gsi->dev, "event rings exhausted\n");
1379 		return -ENOSPC;
1380 	}
1381 
1382 	evt_ring_id = ffz(gsi->event_bitmap);
1383 	gsi->event_bitmap |= BIT(evt_ring_id);
1384 
1385 	return (int)evt_ring_id;
1386 }
1387 
1388 /* Free a previously-allocated event ring id */
1389 static void gsi_evt_ring_id_free(struct gsi *gsi, u32 evt_ring_id)
1390 {
1391 	gsi->event_bitmap &= ~BIT(evt_ring_id);
1392 }
1393 
1394 /* Ring a channel doorbell, reporting the first un-filled entry */
1395 void gsi_channel_doorbell(struct gsi_channel *channel)
1396 {
1397 	struct gsi_ring *tre_ring = &channel->tre_ring;
1398 	u32 channel_id = gsi_channel_id(channel);
1399 	struct gsi *gsi = channel->gsi;
1400 	u32 val;
1401 
1402 	/* Note: index *must* be used modulo the ring count here */
1403 	val = gsi_ring_addr(tre_ring, tre_ring->index % tre_ring->count);
1404 	iowrite32(val, gsi->virt + GSI_CH_C_DOORBELL_0_OFFSET(channel_id));
1405 }
1406 
1407 /* Consult hardware, move any newly completed transactions to completed list */
1408 static void gsi_channel_update(struct gsi_channel *channel)
1409 {
1410 	u32 evt_ring_id = channel->evt_ring_id;
1411 	struct gsi *gsi = channel->gsi;
1412 	struct gsi_evt_ring *evt_ring;
1413 	struct gsi_trans *trans;
1414 	struct gsi_ring *ring;
1415 	u32 offset;
1416 	u32 index;
1417 
1418 	evt_ring = &gsi->evt_ring[evt_ring_id];
1419 	ring = &evt_ring->ring;
1420 
1421 	/* See if there's anything new to process; if not, we're done.  Note
1422 	 * that index always refers to an entry *within* the event ring.
1423 	 */
1424 	offset = GSI_EV_CH_E_CNTXT_4_OFFSET(evt_ring_id);
1425 	index = gsi_ring_index(ring, ioread32(gsi->virt + offset));
1426 	if (index == ring->index % ring->count)
1427 		return;
1428 
1429 	/* Get the transaction for the latest completed event.  Take a
1430 	 * reference to keep it from completing before we give the events
1431 	 * for this and previous transactions back to the hardware.
1432 	 */
1433 	trans = gsi_event_trans(channel, gsi_ring_virt(ring, index - 1));
1434 	refcount_inc(&trans->refcount);
1435 
1436 	/* For RX channels, update each completed transaction with the number
1437 	 * of bytes that were actually received.  For TX channels, report
1438 	 * the number of transactions and bytes this completion represents
1439 	 * up the network stack.
1440 	 */
1441 	if (channel->toward_ipa)
1442 		gsi_channel_tx_update(channel, trans);
1443 	else
1444 		gsi_evt_ring_rx_update(evt_ring, index);
1445 
1446 	gsi_trans_move_complete(trans);
1447 
1448 	/* Tell the hardware we've handled these events */
1449 	gsi_evt_ring_doorbell(channel->gsi, channel->evt_ring_id, index);
1450 
1451 	gsi_trans_free(trans);
1452 }
1453 
1454 /**
1455  * gsi_channel_poll_one() - Return a single completed transaction on a channel
1456  * @channel:	Channel to be polled
1457  *
1458  * Return:	Transaction pointer, or null if none are available
1459  *
1460  * This function returns the first entry on a channel's completed transaction
1461  * list.  If that list is empty, the hardware is consulted to determine
1462  * whether any new transactions have completed.  If so, they're moved to the
1463  * completed list and the new first entry is returned.  If there are no more
1464  * completed transactions, a null pointer is returned.
1465  */
1466 static struct gsi_trans *gsi_channel_poll_one(struct gsi_channel *channel)
1467 {
1468 	struct gsi_trans *trans;
1469 
1470 	/* Get the first transaction from the completed list */
1471 	trans = gsi_channel_trans_complete(channel);
1472 	if (!trans) {
1473 		/* List is empty; see if there's more to do */
1474 		gsi_channel_update(channel);
1475 		trans = gsi_channel_trans_complete(channel);
1476 	}
1477 
1478 	if (trans)
1479 		gsi_trans_move_polled(trans);
1480 
1481 	return trans;
1482 }
1483 
1484 /**
1485  * gsi_channel_poll() - NAPI poll function for a channel
1486  * @napi:	NAPI structure for the channel
1487  * @budget:	Budget supplied by NAPI core
1488  *
1489  * Return:	Number of items polled (<= budget)
1490  *
1491  * Single transactions completed by hardware are polled until either
1492  * the budget is exhausted, or there are no more.  Each transaction
1493  * polled is passed to gsi_trans_complete(), to perform remaining
1494  * completion processing and retire/free the transaction.
1495  */
1496 static int gsi_channel_poll(struct napi_struct *napi, int budget)
1497 {
1498 	struct gsi_channel *channel;
1499 	int count = 0;
1500 
1501 	channel = container_of(napi, struct gsi_channel, napi);
1502 	while (count < budget) {
1503 		struct gsi_trans *trans;
1504 
1505 		count++;
1506 		trans = gsi_channel_poll_one(channel);
1507 		if (!trans)
1508 			break;
1509 		gsi_trans_complete(trans);
1510 	}
1511 
1512 	if (count < budget) {
1513 		napi_complete(&channel->napi);
1514 		gsi_irq_ieob_enable(channel->gsi, channel->evt_ring_id);
1515 	}
1516 
1517 	return count;
1518 }
1519 
1520 /* The event bitmap represents which event ids are available for allocation.
1521  * Set bits are not available, clear bits can be used.  This function
1522  * initializes the map so all events supported by the hardware are available,
1523  * then precludes any reserved events from being allocated.
1524  */
1525 static u32 gsi_event_bitmap_init(u32 evt_ring_max)
1526 {
1527 	u32 event_bitmap = GENMASK(BITS_PER_LONG - 1, evt_ring_max);
1528 
1529 	event_bitmap |= GENMASK(GSI_MHI_EVENT_ID_END, GSI_MHI_EVENT_ID_START);
1530 
1531 	return event_bitmap;
1532 }
1533 
1534 /* Setup function for event rings */
1535 static void gsi_evt_ring_setup(struct gsi *gsi)
1536 {
1537 	/* Nothing to do */
1538 }
1539 
1540 /* Inverse of gsi_evt_ring_setup() */
1541 static void gsi_evt_ring_teardown(struct gsi *gsi)
1542 {
1543 	/* Nothing to do */
1544 }
1545 
1546 /* Setup function for a single channel */
1547 static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id)
1548 {
1549 	struct gsi_channel *channel = &gsi->channel[channel_id];
1550 	u32 evt_ring_id = channel->evt_ring_id;
1551 	int ret;
1552 
1553 	if (!channel->gsi)
1554 		return 0;	/* Ignore uninitialized channels */
1555 
1556 	ret = gsi_evt_ring_alloc_command(gsi, evt_ring_id);
1557 	if (ret)
1558 		return ret;
1559 
1560 	gsi_evt_ring_program(gsi, evt_ring_id);
1561 
1562 	ret = gsi_channel_alloc_command(gsi, channel_id);
1563 	if (ret)
1564 		goto err_evt_ring_de_alloc;
1565 
1566 	gsi_channel_program(channel, true);
1567 
1568 	if (channel->toward_ipa)
1569 		netif_tx_napi_add(&gsi->dummy_dev, &channel->napi,
1570 				  gsi_channel_poll, NAPI_POLL_WEIGHT);
1571 	else
1572 		netif_napi_add(&gsi->dummy_dev, &channel->napi,
1573 			       gsi_channel_poll, NAPI_POLL_WEIGHT);
1574 
1575 	return 0;
1576 
1577 err_evt_ring_de_alloc:
1578 	/* We've done nothing with the event ring yet so don't reset */
1579 	gsi_evt_ring_de_alloc_command(gsi, evt_ring_id);
1580 
1581 	return ret;
1582 }
1583 
1584 /* Inverse of gsi_channel_setup_one() */
1585 static void gsi_channel_teardown_one(struct gsi *gsi, u32 channel_id)
1586 {
1587 	struct gsi_channel *channel = &gsi->channel[channel_id];
1588 	u32 evt_ring_id = channel->evt_ring_id;
1589 
1590 	if (!channel->gsi)
1591 		return;		/* Ignore uninitialized channels */
1592 
1593 	netif_napi_del(&channel->napi);
1594 
1595 	gsi_channel_deprogram(channel);
1596 	gsi_channel_de_alloc_command(gsi, channel_id);
1597 	gsi_evt_ring_reset_command(gsi, evt_ring_id);
1598 	gsi_evt_ring_de_alloc_command(gsi, evt_ring_id);
1599 }
1600 
1601 static int gsi_generic_command(struct gsi *gsi, u32 channel_id,
1602 			       enum gsi_generic_cmd_opcode opcode)
1603 {
1604 	struct completion *completion = &gsi->completion;
1605 	bool success;
1606 	u32 val;
1607 
1608 	/* The error global interrupt type is always enabled (until we
1609 	 * teardown), so we won't change that.  A generic EE command
1610 	 * completes with a GSI global interrupt of type GP_INT1.  We
1611 	 * only perform one generic command at a time (to allocate or
1612 	 * halt a modem channel) and only from this function.  So we
1613 	 * enable the GP_INT1 IRQ type here while we're expecting it.
1614 	 */
1615 	val = BIT(ERROR_INT) | BIT(GP_INT1);
1616 	iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
1617 
1618 	/* First zero the result code field */
1619 	val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
1620 	val &= ~GENERIC_EE_RESULT_FMASK;
1621 	iowrite32(val, gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
1622 
1623 	/* Now issue the command */
1624 	val = u32_encode_bits(opcode, GENERIC_OPCODE_FMASK);
1625 	val |= u32_encode_bits(channel_id, GENERIC_CHID_FMASK);
1626 	val |= u32_encode_bits(GSI_EE_MODEM, GENERIC_EE_FMASK);
1627 
1628 	success = gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val, completion);
1629 
1630 	/* Disable the GP_INT1 IRQ type again */
1631 	iowrite32(BIT(ERROR_INT), gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
1632 
1633 	if (success)
1634 		return gsi->result;
1635 
1636 	dev_err(gsi->dev, "GSI generic command %u to channel %u timed out\n",
1637 		opcode, channel_id);
1638 
1639 	return -ETIMEDOUT;
1640 }
1641 
1642 static int gsi_modem_channel_alloc(struct gsi *gsi, u32 channel_id)
1643 {
1644 	return gsi_generic_command(gsi, channel_id,
1645 				   GSI_GENERIC_ALLOCATE_CHANNEL);
1646 }
1647 
1648 static void gsi_modem_channel_halt(struct gsi *gsi, u32 channel_id)
1649 {
1650 	u32 retries = GSI_CHANNEL_MODEM_HALT_RETRIES;
1651 	int ret;
1652 
1653 	do
1654 		ret = gsi_generic_command(gsi, channel_id,
1655 					  GSI_GENERIC_HALT_CHANNEL);
1656 	while (ret == -EAGAIN && retries--);
1657 
1658 	if (ret)
1659 		dev_err(gsi->dev, "error %d halting modem channel %u\n",
1660 			ret, channel_id);
1661 }
1662 
1663 /* Setup function for channels */
1664 static int gsi_channel_setup(struct gsi *gsi)
1665 {
1666 	u32 channel_id = 0;
1667 	u32 mask;
1668 	int ret;
1669 
1670 	gsi_evt_ring_setup(gsi);
1671 	gsi_irq_enable(gsi);
1672 
1673 	mutex_lock(&gsi->mutex);
1674 
1675 	do {
1676 		ret = gsi_channel_setup_one(gsi, channel_id);
1677 		if (ret)
1678 			goto err_unwind;
1679 	} while (++channel_id < gsi->channel_count);
1680 
1681 	/* Make sure no channels were defined that hardware does not support */
1682 	while (channel_id < GSI_CHANNEL_COUNT_MAX) {
1683 		struct gsi_channel *channel = &gsi->channel[channel_id++];
1684 
1685 		if (!channel->gsi)
1686 			continue;	/* Ignore uninitialized channels */
1687 
1688 		dev_err(gsi->dev, "channel %u not supported by hardware\n",
1689 			channel_id - 1);
1690 		channel_id = gsi->channel_count;
1691 		goto err_unwind;
1692 	}
1693 
1694 	/* Allocate modem channels if necessary */
1695 	mask = gsi->modem_channel_bitmap;
1696 	while (mask) {
1697 		u32 modem_channel_id = __ffs(mask);
1698 
1699 		ret = gsi_modem_channel_alloc(gsi, modem_channel_id);
1700 		if (ret)
1701 			goto err_unwind_modem;
1702 
1703 		/* Clear bit from mask only after success (for unwind) */
1704 		mask ^= BIT(modem_channel_id);
1705 	}
1706 
1707 	mutex_unlock(&gsi->mutex);
1708 
1709 	return 0;
1710 
1711 err_unwind_modem:
1712 	/* Compute which modem channels need to be deallocated */
1713 	mask ^= gsi->modem_channel_bitmap;
1714 	while (mask) {
1715 		channel_id = __fls(mask);
1716 
1717 		mask ^= BIT(channel_id);
1718 
1719 		gsi_modem_channel_halt(gsi, channel_id);
1720 	}
1721 
1722 err_unwind:
1723 	while (channel_id--)
1724 		gsi_channel_teardown_one(gsi, channel_id);
1725 
1726 	mutex_unlock(&gsi->mutex);
1727 
1728 	gsi_irq_disable(gsi);
1729 	gsi_evt_ring_teardown(gsi);
1730 
1731 	return ret;
1732 }
1733 
1734 /* Inverse of gsi_channel_setup() */
1735 static void gsi_channel_teardown(struct gsi *gsi)
1736 {
1737 	u32 mask = gsi->modem_channel_bitmap;
1738 	u32 channel_id;
1739 
1740 	mutex_lock(&gsi->mutex);
1741 
1742 	while (mask) {
1743 		channel_id = __fls(mask);
1744 
1745 		mask ^= BIT(channel_id);
1746 
1747 		gsi_modem_channel_halt(gsi, channel_id);
1748 	}
1749 
1750 	channel_id = gsi->channel_count - 1;
1751 	do
1752 		gsi_channel_teardown_one(gsi, channel_id);
1753 	while (channel_id--);
1754 
1755 	mutex_unlock(&gsi->mutex);
1756 
1757 	gsi_irq_disable(gsi);
1758 	gsi_evt_ring_teardown(gsi);
1759 }
1760 
1761 /* Setup function for GSI.  GSI firmware must be loaded and initialized */
1762 int gsi_setup(struct gsi *gsi)
1763 {
1764 	struct device *dev = gsi->dev;
1765 	u32 val;
1766 	int ret;
1767 
1768 	/* Here is where we first touch the GSI hardware */
1769 	val = ioread32(gsi->virt + GSI_GSI_STATUS_OFFSET);
1770 	if (!(val & ENABLED_FMASK)) {
1771 		dev_err(dev, "GSI has not been enabled\n");
1772 		return -EIO;
1773 	}
1774 
1775 	gsi_irq_setup(gsi);
1776 
1777 	val = ioread32(gsi->virt + GSI_GSI_HW_PARAM_2_OFFSET);
1778 
1779 	gsi->channel_count = u32_get_bits(val, NUM_CH_PER_EE_FMASK);
1780 	if (!gsi->channel_count) {
1781 		dev_err(dev, "GSI reports zero channels supported\n");
1782 		return -EINVAL;
1783 	}
1784 	if (gsi->channel_count > GSI_CHANNEL_COUNT_MAX) {
1785 		dev_warn(dev,
1786 			 "limiting to %u channels; hardware supports %u\n",
1787 			 GSI_CHANNEL_COUNT_MAX, gsi->channel_count);
1788 		gsi->channel_count = GSI_CHANNEL_COUNT_MAX;
1789 	}
1790 
1791 	gsi->evt_ring_count = u32_get_bits(val, NUM_EV_PER_EE_FMASK);
1792 	if (!gsi->evt_ring_count) {
1793 		dev_err(dev, "GSI reports zero event rings supported\n");
1794 		return -EINVAL;
1795 	}
1796 	if (gsi->evt_ring_count > GSI_EVT_RING_COUNT_MAX) {
1797 		dev_warn(dev,
1798 			 "limiting to %u event rings; hardware supports %u\n",
1799 			 GSI_EVT_RING_COUNT_MAX, gsi->evt_ring_count);
1800 		gsi->evt_ring_count = GSI_EVT_RING_COUNT_MAX;
1801 	}
1802 
1803 	/* Initialize the error log */
1804 	iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET);
1805 
1806 	/* Writing 1 indicates IRQ interrupts; 0 would be MSI */
1807 	iowrite32(1, gsi->virt + GSI_CNTXT_INTSET_OFFSET);
1808 
1809 	ret = gsi_channel_setup(gsi);
1810 	if (ret)
1811 		gsi_irq_teardown(gsi);
1812 
1813 	return ret;
1814 }
1815 
1816 /* Inverse of gsi_setup() */
1817 void gsi_teardown(struct gsi *gsi)
1818 {
1819 	gsi_channel_teardown(gsi);
1820 	gsi_irq_teardown(gsi);
1821 }
1822 
1823 /* Initialize a channel's event ring */
1824 static int gsi_channel_evt_ring_init(struct gsi_channel *channel)
1825 {
1826 	struct gsi *gsi = channel->gsi;
1827 	struct gsi_evt_ring *evt_ring;
1828 	int ret;
1829 
1830 	ret = gsi_evt_ring_id_alloc(gsi);
1831 	if (ret < 0)
1832 		return ret;
1833 	channel->evt_ring_id = ret;
1834 
1835 	evt_ring = &gsi->evt_ring[channel->evt_ring_id];
1836 	evt_ring->channel = channel;
1837 
1838 	ret = gsi_ring_alloc(gsi, &evt_ring->ring, channel->event_count);
1839 	if (!ret)
1840 		return 0;	/* Success! */
1841 
1842 	dev_err(gsi->dev, "error %d allocating channel %u event ring\n",
1843 		ret, gsi_channel_id(channel));
1844 
1845 	gsi_evt_ring_id_free(gsi, channel->evt_ring_id);
1846 
1847 	return ret;
1848 }
1849 
1850 /* Inverse of gsi_channel_evt_ring_init() */
1851 static void gsi_channel_evt_ring_exit(struct gsi_channel *channel)
1852 {
1853 	u32 evt_ring_id = channel->evt_ring_id;
1854 	struct gsi *gsi = channel->gsi;
1855 	struct gsi_evt_ring *evt_ring;
1856 
1857 	evt_ring = &gsi->evt_ring[evt_ring_id];
1858 	gsi_ring_free(gsi, &evt_ring->ring);
1859 	gsi_evt_ring_id_free(gsi, evt_ring_id);
1860 }
1861 
1862 /* Init function for event rings */
1863 static void gsi_evt_ring_init(struct gsi *gsi)
1864 {
1865 	u32 evt_ring_id = 0;
1866 
1867 	gsi->event_bitmap = gsi_event_bitmap_init(GSI_EVT_RING_COUNT_MAX);
1868 	gsi->ieob_enabled_bitmap = 0;
1869 	do
1870 		init_completion(&gsi->evt_ring[evt_ring_id].completion);
1871 	while (++evt_ring_id < GSI_EVT_RING_COUNT_MAX);
1872 }
1873 
1874 /* Inverse of gsi_evt_ring_init() */
1875 static void gsi_evt_ring_exit(struct gsi *gsi)
1876 {
1877 	/* Nothing to do */
1878 }
1879 
1880 static bool gsi_channel_data_valid(struct gsi *gsi,
1881 				   const struct ipa_gsi_endpoint_data *data)
1882 {
1883 #ifdef IPA_VALIDATION
1884 	u32 channel_id = data->channel_id;
1885 	struct device *dev = gsi->dev;
1886 
1887 	/* Make sure channel ids are in the range driver supports */
1888 	if (channel_id >= GSI_CHANNEL_COUNT_MAX) {
1889 		dev_err(dev, "bad channel id %u; must be less than %u\n",
1890 			channel_id, GSI_CHANNEL_COUNT_MAX);
1891 		return false;
1892 	}
1893 
1894 	if (data->ee_id != GSI_EE_AP && data->ee_id != GSI_EE_MODEM) {
1895 		dev_err(dev, "bad EE id %u; not AP or modem\n", data->ee_id);
1896 		return false;
1897 	}
1898 
1899 	if (!data->channel.tlv_count ||
1900 	    data->channel.tlv_count > GSI_TLV_MAX) {
1901 		dev_err(dev, "channel %u bad tlv_count %u; must be 1..%u\n",
1902 			channel_id, data->channel.tlv_count, GSI_TLV_MAX);
1903 		return false;
1904 	}
1905 
1906 	/* We have to allow at least one maximally-sized transaction to
1907 	 * be outstanding (which would use tlv_count TREs).  Given how
1908 	 * gsi_channel_tre_max() is computed, tre_count has to be almost
1909 	 * twice the TLV FIFO size to satisfy this requirement.
1910 	 */
1911 	if (data->channel.tre_count < 2 * data->channel.tlv_count - 1) {
1912 		dev_err(dev, "channel %u TLV count %u exceeds TRE count %u\n",
1913 			channel_id, data->channel.tlv_count,
1914 			data->channel.tre_count);
1915 		return false;
1916 	}
1917 
1918 	if (!is_power_of_2(data->channel.tre_count)) {
1919 		dev_err(dev, "channel %u bad tre_count %u; not power of 2\n",
1920 			channel_id, data->channel.tre_count);
1921 		return false;
1922 	}
1923 
1924 	if (!is_power_of_2(data->channel.event_count)) {
1925 		dev_err(dev, "channel %u bad event_count %u; not power of 2\n",
1926 			channel_id, data->channel.event_count);
1927 		return false;
1928 	}
1929 #endif /* IPA_VALIDATION */
1930 
1931 	return true;
1932 }
1933 
1934 /* Init function for a single channel */
1935 static int gsi_channel_init_one(struct gsi *gsi,
1936 				const struct ipa_gsi_endpoint_data *data,
1937 				bool command)
1938 {
1939 	struct gsi_channel *channel;
1940 	u32 tre_count;
1941 	int ret;
1942 
1943 	if (!gsi_channel_data_valid(gsi, data))
1944 		return -EINVAL;
1945 
1946 	/* Worst case we need an event for every outstanding TRE */
1947 	if (data->channel.tre_count > data->channel.event_count) {
1948 		tre_count = data->channel.event_count;
1949 		dev_warn(gsi->dev, "channel %u limited to %u TREs\n",
1950 			 data->channel_id, tre_count);
1951 	} else {
1952 		tre_count = data->channel.tre_count;
1953 	}
1954 
1955 	channel = &gsi->channel[data->channel_id];
1956 	memset(channel, 0, sizeof(*channel));
1957 
1958 	channel->gsi = gsi;
1959 	channel->toward_ipa = data->toward_ipa;
1960 	channel->command = command;
1961 	channel->tlv_count = data->channel.tlv_count;
1962 	channel->tre_count = tre_count;
1963 	channel->event_count = data->channel.event_count;
1964 	init_completion(&channel->completion);
1965 
1966 	ret = gsi_channel_evt_ring_init(channel);
1967 	if (ret)
1968 		goto err_clear_gsi;
1969 
1970 	ret = gsi_ring_alloc(gsi, &channel->tre_ring, data->channel.tre_count);
1971 	if (ret) {
1972 		dev_err(gsi->dev, "error %d allocating channel %u ring\n",
1973 			ret, data->channel_id);
1974 		goto err_channel_evt_ring_exit;
1975 	}
1976 
1977 	ret = gsi_channel_trans_init(gsi, data->channel_id);
1978 	if (ret)
1979 		goto err_ring_free;
1980 
1981 	if (command) {
1982 		u32 tre_max = gsi_channel_tre_max(gsi, data->channel_id);
1983 
1984 		ret = ipa_cmd_pool_init(channel, tre_max);
1985 	}
1986 	if (!ret)
1987 		return 0;	/* Success! */
1988 
1989 	gsi_channel_trans_exit(channel);
1990 err_ring_free:
1991 	gsi_ring_free(gsi, &channel->tre_ring);
1992 err_channel_evt_ring_exit:
1993 	gsi_channel_evt_ring_exit(channel);
1994 err_clear_gsi:
1995 	channel->gsi = NULL;	/* Mark it not (fully) initialized */
1996 
1997 	return ret;
1998 }
1999 
2000 /* Inverse of gsi_channel_init_one() */
2001 static void gsi_channel_exit_one(struct gsi_channel *channel)
2002 {
2003 	if (!channel->gsi)
2004 		return;		/* Ignore uninitialized channels */
2005 
2006 	if (channel->command)
2007 		ipa_cmd_pool_exit(channel);
2008 	gsi_channel_trans_exit(channel);
2009 	gsi_ring_free(channel->gsi, &channel->tre_ring);
2010 	gsi_channel_evt_ring_exit(channel);
2011 }
2012 
2013 /* Init function for channels */
2014 static int gsi_channel_init(struct gsi *gsi, u32 count,
2015 			    const struct ipa_gsi_endpoint_data *data)
2016 {
2017 	bool modem_alloc;
2018 	int ret = 0;
2019 	u32 i;
2020 
2021 	/* IPA v4.2 requires the AP to allocate channels for the modem */
2022 	modem_alloc = gsi->version == IPA_VERSION_4_2;
2023 
2024 	gsi_evt_ring_init(gsi);
2025 
2026 	/* The endpoint data array is indexed by endpoint name */
2027 	for (i = 0; i < count; i++) {
2028 		bool command = i == IPA_ENDPOINT_AP_COMMAND_TX;
2029 
2030 		if (ipa_gsi_endpoint_data_empty(&data[i]))
2031 			continue;	/* Skip over empty slots */
2032 
2033 		/* Mark modem channels to be allocated (hardware workaround) */
2034 		if (data[i].ee_id == GSI_EE_MODEM) {
2035 			if (modem_alloc)
2036 				gsi->modem_channel_bitmap |=
2037 						BIT(data[i].channel_id);
2038 			continue;
2039 		}
2040 
2041 		ret = gsi_channel_init_one(gsi, &data[i], command);
2042 		if (ret)
2043 			goto err_unwind;
2044 	}
2045 
2046 	return ret;
2047 
2048 err_unwind:
2049 	while (i--) {
2050 		if (ipa_gsi_endpoint_data_empty(&data[i]))
2051 			continue;
2052 		if (modem_alloc && data[i].ee_id == GSI_EE_MODEM) {
2053 			gsi->modem_channel_bitmap &= ~BIT(data[i].channel_id);
2054 			continue;
2055 		}
2056 		gsi_channel_exit_one(&gsi->channel[data->channel_id]);
2057 	}
2058 	gsi_evt_ring_exit(gsi);
2059 
2060 	return ret;
2061 }
2062 
2063 /* Inverse of gsi_channel_init() */
2064 static void gsi_channel_exit(struct gsi *gsi)
2065 {
2066 	u32 channel_id = GSI_CHANNEL_COUNT_MAX - 1;
2067 
2068 	do
2069 		gsi_channel_exit_one(&gsi->channel[channel_id]);
2070 	while (channel_id--);
2071 	gsi->modem_channel_bitmap = 0;
2072 
2073 	gsi_evt_ring_exit(gsi);
2074 }
2075 
2076 /* Init function for GSI.  GSI hardware does not need to be "ready" */
2077 int gsi_init(struct gsi *gsi, struct platform_device *pdev,
2078 	     enum ipa_version version, u32 count,
2079 	     const struct ipa_gsi_endpoint_data *data)
2080 {
2081 	struct device *dev = &pdev->dev;
2082 	struct resource *res;
2083 	resource_size_t size;
2084 	int ret;
2085 
2086 	gsi_validate_build();
2087 
2088 	gsi->dev = dev;
2089 	gsi->version = version;
2090 
2091 	/* The GSI layer performs NAPI on all endpoints.  NAPI requires a
2092 	 * network device structure, but the GSI layer does not have one,
2093 	 * so we must create a dummy network device for this purpose.
2094 	 */
2095 	init_dummy_netdev(&gsi->dummy_dev);
2096 
2097 	/* Get GSI memory range and map it */
2098 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gsi");
2099 	if (!res) {
2100 		dev_err(dev, "DT error getting \"gsi\" memory property\n");
2101 		return -ENODEV;
2102 	}
2103 
2104 	size = resource_size(res);
2105 	if (res->start > U32_MAX || size > U32_MAX - res->start) {
2106 		dev_err(dev, "DT memory resource \"gsi\" out of range\n");
2107 		return -EINVAL;
2108 	}
2109 
2110 	gsi->virt = ioremap(res->start, size);
2111 	if (!gsi->virt) {
2112 		dev_err(dev, "unable to remap \"gsi\" memory\n");
2113 		return -ENOMEM;
2114 	}
2115 
2116 	init_completion(&gsi->completion);
2117 
2118 	ret = gsi_irq_init(gsi, pdev);
2119 	if (ret)
2120 		goto err_iounmap;
2121 
2122 	ret = gsi_channel_init(gsi, count, data);
2123 	if (ret)
2124 		goto err_irq_exit;
2125 
2126 	mutex_init(&gsi->mutex);
2127 
2128 	return 0;
2129 
2130 err_irq_exit:
2131 	gsi_irq_exit(gsi);
2132 err_iounmap:
2133 	iounmap(gsi->virt);
2134 
2135 	return ret;
2136 }
2137 
2138 /* Inverse of gsi_init() */
2139 void gsi_exit(struct gsi *gsi)
2140 {
2141 	mutex_destroy(&gsi->mutex);
2142 	gsi_channel_exit(gsi);
2143 	gsi_irq_exit(gsi);
2144 	iounmap(gsi->virt);
2145 }
2146 
2147 /* The maximum number of outstanding TREs on a channel.  This limits
2148  * a channel's maximum number of transactions outstanding (worst case
2149  * is one TRE per transaction).
2150  *
2151  * The absolute limit is the number of TREs in the channel's TRE ring,
2152  * and in theory we should be able use all of them.  But in practice,
2153  * doing that led to the hardware reporting exhaustion of event ring
2154  * slots for writing completion information.  So the hardware limit
2155  * would be (tre_count - 1).
2156  *
2157  * We reduce it a bit further though.  Transaction resource pools are
2158  * sized to be a little larger than this maximum, to allow resource
2159  * allocations to always be contiguous.  The number of entries in a
2160  * TRE ring buffer is a power of 2, and the extra resources in a pool
2161  * tends to nearly double the memory allocated for it.  Reducing the
2162  * maximum number of outstanding TREs allows the number of entries in
2163  * a pool to avoid crossing that power-of-2 boundary, and this can
2164  * substantially reduce pool memory requirements.  The number we
2165  * reduce it by matches the number added in gsi_trans_pool_init().
2166  */
2167 u32 gsi_channel_tre_max(struct gsi *gsi, u32 channel_id)
2168 {
2169 	struct gsi_channel *channel = &gsi->channel[channel_id];
2170 
2171 	/* Hardware limit is channel->tre_count - 1 */
2172 	return channel->tre_count - (channel->tlv_count - 1);
2173 }
2174 
2175 /* Returns the maximum number of TREs in a single transaction for a channel */
2176 u32 gsi_channel_trans_tre_max(struct gsi *gsi, u32 channel_id)
2177 {
2178 	struct gsi_channel *channel = &gsi->channel[channel_id];
2179 
2180 	return channel->tlv_count;
2181 }
2182