xref: /openbmc/linux/drivers/net/ipa/gsi.c (revision 9d5dbfe0)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
4  * Copyright (C) 2018-2022 Linaro Ltd.
5  */
6 
7 #include <linux/types.h>
8 #include <linux/bits.h>
9 #include <linux/bitfield.h>
10 #include <linux/mutex.h>
11 #include <linux/completion.h>
12 #include <linux/io.h>
13 #include <linux/bug.h>
14 #include <linux/interrupt.h>
15 #include <linux/platform_device.h>
16 #include <linux/netdevice.h>
17 
18 #include "gsi.h"
19 #include "gsi_reg.h"
20 #include "gsi_private.h"
21 #include "gsi_trans.h"
22 #include "ipa_gsi.h"
23 #include "ipa_data.h"
24 #include "ipa_version.h"
25 
26 /**
27  * DOC: The IPA Generic Software Interface
28  *
29  * The generic software interface (GSI) is an integral component of the IPA,
30  * providing a well-defined communication layer between the AP subsystem
31  * and the IPA core.  The modem uses the GSI layer as well.
32  *
33  *	--------	     ---------
34  *	|      |	     |	     |
35  *	|  AP  +<---.	.----+ Modem |
36  *	|      +--. |	| .->+	     |
37  *	|      |  | |	| |  |	     |
38  *	--------  | |	| |  ---------
39  *		  v |	v |
40  *		--+-+---+-+--
41  *		|    GSI    |
42  *		|-----------|
43  *		|	    |
44  *		|    IPA    |
45  *		|	    |
46  *		-------------
47  *
48  * In the above diagram, the AP and Modem represent "execution environments"
49  * (EEs), which are independent operating environments that use the IPA for
50  * data transfer.
51  *
52  * Each EE uses a set of unidirectional GSI "channels," which allow transfer
53  * of data to or from the IPA.  A channel is implemented as a ring buffer,
54  * with a DRAM-resident array of "transfer elements" (TREs) available to
55  * describe transfers to or from other EEs through the IPA.  A transfer
56  * element can also contain an immediate command, requesting the IPA perform
57  * actions other than data transfer.
58  *
59  * Each TRE refers to a block of data--also located in DRAM.  After writing
60  * one or more TREs to a channel, the writer (either the IPA or an EE) writes
61  * a doorbell register to inform the receiving side how many elements have
62  * been written.
63  *
64  * Each channel has a GSI "event ring" associated with it.  An event ring
65  * is implemented very much like a channel ring, but is always directed from
66  * the IPA to an EE.  The IPA notifies an EE (such as the AP) about channel
67  * events by adding an entry to the event ring associated with the channel.
68  * The GSI then writes its doorbell for the event ring, causing the target
69  * EE to be interrupted.  Each entry in an event ring contains a pointer
70  * to the channel TRE whose completion the event represents.
71  *
72  * Each TRE in a channel ring has a set of flags.  One flag indicates whether
73  * the completion of the transfer operation generates an entry (and possibly
74  * an interrupt) in the channel's event ring.  Other flags allow transfer
75  * elements to be chained together, forming a single logical transaction.
76  * TRE flags are used to control whether and when interrupts are generated
77  * to signal completion of channel transfers.
78  *
79  * Elements in channel and event rings are completed (or consumed) strictly
80  * in order.  Completion of one entry implies the completion of all preceding
81  * entries.  A single completion interrupt can therefore communicate the
82  * completion of many transfers.
83  *
84  * Note that all GSI registers are little-endian, which is the assumed
85  * endianness of I/O space accesses.  The accessor functions perform byte
86  * swapping if needed (i.e., for a big endian CPU).
87  */
88 
89 /* Delay period for interrupt moderation (in 32KHz IPA internal timer ticks) */
90 #define GSI_EVT_RING_INT_MODT		(32 * 1) /* 1ms under 32KHz clock */
91 
92 #define GSI_CMD_TIMEOUT			50	/* milliseconds */
93 
94 #define GSI_CHANNEL_STOP_RETRIES	10
95 #define GSI_CHANNEL_MODEM_HALT_RETRIES	10
96 #define GSI_CHANNEL_MODEM_FLOW_RETRIES	5	/* disable flow control only */
97 
98 #define GSI_MHI_EVENT_ID_START		10	/* 1st reserved event id */
99 #define GSI_MHI_EVENT_ID_END		16	/* Last reserved event id */
100 
101 #define GSI_ISR_MAX_ITER		50	/* Detect interrupt storms */
102 
103 /* An entry in an event ring */
104 struct gsi_event {
105 	__le64 xfer_ptr;
106 	__le16 len;
107 	u8 reserved1;
108 	u8 code;
109 	__le16 reserved2;
110 	u8 type;
111 	u8 chid;
112 };
113 
114 /** gsi_channel_scratch_gpi - GPI protocol scratch register
115  * @max_outstanding_tre:
116  *	Defines the maximum number of TREs allowed in a single transaction
117  *	on a channel (in bytes).  This determines the amount of prefetch
118  *	performed by the hardware.  We configure this to equal the size of
119  *	the TLV FIFO for the channel.
120  * @outstanding_threshold:
121  *	Defines the threshold (in bytes) determining when the sequencer
122  *	should update the channel doorbell.  We configure this to equal
123  *	the size of two TREs.
124  */
125 struct gsi_channel_scratch_gpi {
126 	u64 reserved1;
127 	u16 reserved2;
128 	u16 max_outstanding_tre;
129 	u16 reserved3;
130 	u16 outstanding_threshold;
131 };
132 
133 /** gsi_channel_scratch - channel scratch configuration area
134  *
135  * The exact interpretation of this register is protocol-specific.
136  * We only use GPI channels; see struct gsi_channel_scratch_gpi, above.
137  */
138 union gsi_channel_scratch {
139 	struct gsi_channel_scratch_gpi gpi;
140 	struct {
141 		u32 word1;
142 		u32 word2;
143 		u32 word3;
144 		u32 word4;
145 	} data;
146 };
147 
148 /* Check things that can be validated at build time. */
149 static void gsi_validate_build(void)
150 {
151 	/* This is used as a divisor */
152 	BUILD_BUG_ON(!GSI_RING_ELEMENT_SIZE);
153 
154 	/* Code assumes the size of channel and event ring element are
155 	 * the same (and fixed).  Make sure the size of an event ring
156 	 * element is what's expected.
157 	 */
158 	BUILD_BUG_ON(sizeof(struct gsi_event) != GSI_RING_ELEMENT_SIZE);
159 
160 	/* Hardware requires a 2^n ring size.  We ensure the number of
161 	 * elements in an event ring is a power of 2 elsewhere; this
162 	 * ensure the elements themselves meet the requirement.
163 	 */
164 	BUILD_BUG_ON(!is_power_of_2(GSI_RING_ELEMENT_SIZE));
165 
166 	/* The channel element size must fit in this field */
167 	BUILD_BUG_ON(GSI_RING_ELEMENT_SIZE > field_max(ELEMENT_SIZE_FMASK));
168 
169 	/* The event ring element size must fit in this field */
170 	BUILD_BUG_ON(GSI_RING_ELEMENT_SIZE > field_max(EV_ELEMENT_SIZE_FMASK));
171 }
172 
173 /* Return the channel id associated with a given channel */
174 static u32 gsi_channel_id(struct gsi_channel *channel)
175 {
176 	return channel - &channel->gsi->channel[0];
177 }
178 
179 /* An initialized channel has a non-null GSI pointer */
180 static bool gsi_channel_initialized(struct gsi_channel *channel)
181 {
182 	return !!channel->gsi;
183 }
184 
185 /* Encode the channel protocol for the CH_C_CNTXT_0 register */
186 static u32 ch_c_cntxt_0_type_encode(enum ipa_version version,
187 				    enum gsi_channel_type type)
188 {
189 	u32 val;
190 
191 	val = u32_encode_bits(type, CHTYPE_PROTOCOL_FMASK);
192 	if (version < IPA_VERSION_4_5)
193 		return val;
194 
195 	type >>= hweight32(CHTYPE_PROTOCOL_FMASK);
196 
197 	return val | u32_encode_bits(type, CHTYPE_PROTOCOL_MSB_FMASK);
198 }
199 
200 /* Encode a channel ring buffer length for the CH_C_CNTXT_1 register */
201 static u32 ch_c_cntxt_1_length_encode(enum ipa_version version, u32 length)
202 {
203 	if (version < IPA_VERSION_4_9)
204 		return u32_encode_bits(length, GENMASK(15, 0));
205 
206 	return u32_encode_bits(length, GENMASK(19, 0));
207 }
208 
209 /* Encode the length of the event channel ring buffer for the
210  * EV_CH_E_CNTXT_1 register.
211  */
212 static u32 ev_ch_e_cntxt_1_length_encode(enum ipa_version version, u32 length)
213 {
214 	if (version < IPA_VERSION_4_9)
215 		return u32_encode_bits(length, GENMASK(15, 0));
216 
217 	return u32_encode_bits(length, GENMASK(19, 0));
218 }
219 
220 /* Update the GSI IRQ type register with the cached value */
221 static void gsi_irq_type_update(struct gsi *gsi, u32 val)
222 {
223 	gsi->type_enabled_bitmap = val;
224 	iowrite32(val, gsi->virt + GSI_CNTXT_TYPE_IRQ_MSK_OFFSET);
225 }
226 
227 static void gsi_irq_type_enable(struct gsi *gsi, enum gsi_irq_type_id type_id)
228 {
229 	gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | type_id);
230 }
231 
232 static void gsi_irq_type_disable(struct gsi *gsi, enum gsi_irq_type_id type_id)
233 {
234 	gsi_irq_type_update(gsi, gsi->type_enabled_bitmap & ~type_id);
235 }
236 
237 /* Event ring commands are performed one at a time.  Their completion
238  * is signaled by the event ring control GSI interrupt type, which is
239  * only enabled when we issue an event ring command.  Only the event
240  * ring being operated on has this interrupt enabled.
241  */
242 static void gsi_irq_ev_ctrl_enable(struct gsi *gsi, u32 evt_ring_id)
243 {
244 	u32 val = BIT(evt_ring_id);
245 
246 	/* There's a small chance that a previous command completed
247 	 * after the interrupt was disabled, so make sure we have no
248 	 * pending interrupts before we enable them.
249 	 */
250 	iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET);
251 
252 	iowrite32(val, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
253 	gsi_irq_type_enable(gsi, GSI_EV_CTRL);
254 }
255 
256 /* Disable event ring control interrupts */
257 static void gsi_irq_ev_ctrl_disable(struct gsi *gsi)
258 {
259 	gsi_irq_type_disable(gsi, GSI_EV_CTRL);
260 	iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
261 }
262 
263 /* Channel commands are performed one at a time.  Their completion is
264  * signaled by the channel control GSI interrupt type, which is only
265  * enabled when we issue a channel command.  Only the channel being
266  * operated on has this interrupt enabled.
267  */
268 static void gsi_irq_ch_ctrl_enable(struct gsi *gsi, u32 channel_id)
269 {
270 	u32 val = BIT(channel_id);
271 
272 	/* There's a small chance that a previous command completed
273 	 * after the interrupt was disabled, so make sure we have no
274 	 * pending interrupts before we enable them.
275 	 */
276 	iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET);
277 
278 	iowrite32(val, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
279 	gsi_irq_type_enable(gsi, GSI_CH_CTRL);
280 }
281 
282 /* Disable channel control interrupts */
283 static void gsi_irq_ch_ctrl_disable(struct gsi *gsi)
284 {
285 	gsi_irq_type_disable(gsi, GSI_CH_CTRL);
286 	iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
287 }
288 
289 static void gsi_irq_ieob_enable_one(struct gsi *gsi, u32 evt_ring_id)
290 {
291 	bool enable_ieob = !gsi->ieob_enabled_bitmap;
292 	u32 val;
293 
294 	gsi->ieob_enabled_bitmap |= BIT(evt_ring_id);
295 	val = gsi->ieob_enabled_bitmap;
296 	iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
297 
298 	/* Enable the interrupt type if this is the first channel enabled */
299 	if (enable_ieob)
300 		gsi_irq_type_enable(gsi, GSI_IEOB);
301 }
302 
303 static void gsi_irq_ieob_disable(struct gsi *gsi, u32 event_mask)
304 {
305 	u32 val;
306 
307 	gsi->ieob_enabled_bitmap &= ~event_mask;
308 
309 	/* Disable the interrupt type if this was the last enabled channel */
310 	if (!gsi->ieob_enabled_bitmap)
311 		gsi_irq_type_disable(gsi, GSI_IEOB);
312 
313 	val = gsi->ieob_enabled_bitmap;
314 	iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
315 }
316 
317 static void gsi_irq_ieob_disable_one(struct gsi *gsi, u32 evt_ring_id)
318 {
319 	gsi_irq_ieob_disable(gsi, BIT(evt_ring_id));
320 }
321 
322 /* Enable all GSI_interrupt types */
323 static void gsi_irq_enable(struct gsi *gsi)
324 {
325 	u32 val;
326 
327 	/* Global interrupts include hardware error reports.  Enable
328 	 * that so we can at least report the error should it occur.
329 	 */
330 	iowrite32(ERROR_INT, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
331 	gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | GSI_GLOB_EE);
332 
333 	/* General GSI interrupts are reported to all EEs; if they occur
334 	 * they are unrecoverable (without reset).  A breakpoint interrupt
335 	 * also exists, but we don't support that.  We want to be notified
336 	 * of errors so we can report them, even if they can't be handled.
337 	 */
338 	val = BUS_ERROR;
339 	val |= CMD_FIFO_OVRFLOW;
340 	val |= MCS_STACK_OVRFLOW;
341 	iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
342 	gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | GSI_GENERAL);
343 }
344 
345 /* Disable all GSI interrupt types */
346 static void gsi_irq_disable(struct gsi *gsi)
347 {
348 	gsi_irq_type_update(gsi, 0);
349 
350 	/* Clear the type-specific interrupt masks set by gsi_irq_enable() */
351 	iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
352 	iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
353 }
354 
355 /* Return the virtual address associated with a ring index */
356 void *gsi_ring_virt(struct gsi_ring *ring, u32 index)
357 {
358 	/* Note: index *must* be used modulo the ring count here */
359 	return ring->virt + (index % ring->count) * GSI_RING_ELEMENT_SIZE;
360 }
361 
362 /* Return the 32-bit DMA address associated with a ring index */
363 static u32 gsi_ring_addr(struct gsi_ring *ring, u32 index)
364 {
365 	return lower_32_bits(ring->addr) + index * GSI_RING_ELEMENT_SIZE;
366 }
367 
368 /* Return the ring index of a 32-bit ring offset */
369 static u32 gsi_ring_index(struct gsi_ring *ring, u32 offset)
370 {
371 	return (offset - gsi_ring_addr(ring, 0)) / GSI_RING_ELEMENT_SIZE;
372 }
373 
374 /* Issue a GSI command by writing a value to a register, then wait for
375  * completion to be signaled.  Returns true if the command completes
376  * or false if it times out.
377  */
378 static bool gsi_command(struct gsi *gsi, u32 reg, u32 val)
379 {
380 	unsigned long timeout = msecs_to_jiffies(GSI_CMD_TIMEOUT);
381 	struct completion *completion = &gsi->completion;
382 
383 	reinit_completion(completion);
384 
385 	iowrite32(val, gsi->virt + reg);
386 
387 	return !!wait_for_completion_timeout(completion, timeout);
388 }
389 
390 /* Return the hardware's notion of the current state of an event ring */
391 static enum gsi_evt_ring_state
392 gsi_evt_ring_state(struct gsi *gsi, u32 evt_ring_id)
393 {
394 	u32 val;
395 
396 	val = ioread32(gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id));
397 
398 	return u32_get_bits(val, EV_CHSTATE_FMASK);
399 }
400 
401 /* Issue an event ring command and wait for it to complete */
402 static void gsi_evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
403 				 enum gsi_evt_cmd_opcode opcode)
404 {
405 	struct device *dev = gsi->dev;
406 	bool timeout;
407 	u32 val;
408 
409 	/* Enable the completion interrupt for the command */
410 	gsi_irq_ev_ctrl_enable(gsi, evt_ring_id);
411 
412 	val = u32_encode_bits(evt_ring_id, EV_CHID_FMASK);
413 	val |= u32_encode_bits(opcode, EV_OPCODE_FMASK);
414 
415 	timeout = !gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val);
416 
417 	gsi_irq_ev_ctrl_disable(gsi);
418 
419 	if (!timeout)
420 		return;
421 
422 	dev_err(dev, "GSI command %u for event ring %u timed out, state %u\n",
423 		opcode, evt_ring_id, gsi_evt_ring_state(gsi, evt_ring_id));
424 }
425 
426 /* Allocate an event ring in NOT_ALLOCATED state */
427 static int gsi_evt_ring_alloc_command(struct gsi *gsi, u32 evt_ring_id)
428 {
429 	enum gsi_evt_ring_state state;
430 
431 	/* Get initial event ring state */
432 	state = gsi_evt_ring_state(gsi, evt_ring_id);
433 	if (state != GSI_EVT_RING_STATE_NOT_ALLOCATED) {
434 		dev_err(gsi->dev, "event ring %u bad state %u before alloc\n",
435 			evt_ring_id, state);
436 		return -EINVAL;
437 	}
438 
439 	gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_ALLOCATE);
440 
441 	/* If successful the event ring state will have changed */
442 	state = gsi_evt_ring_state(gsi, evt_ring_id);
443 	if (state == GSI_EVT_RING_STATE_ALLOCATED)
444 		return 0;
445 
446 	dev_err(gsi->dev, "event ring %u bad state %u after alloc\n",
447 		evt_ring_id, state);
448 
449 	return -EIO;
450 }
451 
452 /* Reset a GSI event ring in ALLOCATED or ERROR state. */
453 static void gsi_evt_ring_reset_command(struct gsi *gsi, u32 evt_ring_id)
454 {
455 	enum gsi_evt_ring_state state;
456 
457 	state = gsi_evt_ring_state(gsi, evt_ring_id);
458 	if (state != GSI_EVT_RING_STATE_ALLOCATED &&
459 	    state != GSI_EVT_RING_STATE_ERROR) {
460 		dev_err(gsi->dev, "event ring %u bad state %u before reset\n",
461 			evt_ring_id, state);
462 		return;
463 	}
464 
465 	gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_RESET);
466 
467 	/* If successful the event ring state will have changed */
468 	state = gsi_evt_ring_state(gsi, evt_ring_id);
469 	if (state == GSI_EVT_RING_STATE_ALLOCATED)
470 		return;
471 
472 	dev_err(gsi->dev, "event ring %u bad state %u after reset\n",
473 		evt_ring_id, state);
474 }
475 
476 /* Issue a hardware de-allocation request for an allocated event ring */
477 static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id)
478 {
479 	enum gsi_evt_ring_state state;
480 
481 	state = gsi_evt_ring_state(gsi, evt_ring_id);
482 	if (state != GSI_EVT_RING_STATE_ALLOCATED) {
483 		dev_err(gsi->dev, "event ring %u state %u before dealloc\n",
484 			evt_ring_id, state);
485 		return;
486 	}
487 
488 	gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_DE_ALLOC);
489 
490 	/* If successful the event ring state will have changed */
491 	state = gsi_evt_ring_state(gsi, evt_ring_id);
492 	if (state == GSI_EVT_RING_STATE_NOT_ALLOCATED)
493 		return;
494 
495 	dev_err(gsi->dev, "event ring %u bad state %u after dealloc\n",
496 		evt_ring_id, state);
497 }
498 
499 /* Fetch the current state of a channel from hardware */
500 static enum gsi_channel_state gsi_channel_state(struct gsi_channel *channel)
501 {
502 	u32 channel_id = gsi_channel_id(channel);
503 	void __iomem *virt = channel->gsi->virt;
504 	u32 val;
505 
506 	val = ioread32(virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id));
507 
508 	return u32_get_bits(val, CHSTATE_FMASK);
509 }
510 
511 /* Issue a channel command and wait for it to complete */
512 static void
513 gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
514 {
515 	u32 channel_id = gsi_channel_id(channel);
516 	struct gsi *gsi = channel->gsi;
517 	struct device *dev = gsi->dev;
518 	bool timeout;
519 	u32 val;
520 
521 	/* Enable the completion interrupt for the command */
522 	gsi_irq_ch_ctrl_enable(gsi, channel_id);
523 
524 	val = u32_encode_bits(channel_id, CH_CHID_FMASK);
525 	val |= u32_encode_bits(opcode, CH_OPCODE_FMASK);
526 	timeout = !gsi_command(gsi, GSI_CH_CMD_OFFSET, val);
527 
528 	gsi_irq_ch_ctrl_disable(gsi);
529 
530 	if (!timeout)
531 		return;
532 
533 	dev_err(dev, "GSI command %u for channel %u timed out, state %u\n",
534 		opcode, channel_id, gsi_channel_state(channel));
535 }
536 
537 /* Allocate GSI channel in NOT_ALLOCATED state */
538 static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id)
539 {
540 	struct gsi_channel *channel = &gsi->channel[channel_id];
541 	struct device *dev = gsi->dev;
542 	enum gsi_channel_state state;
543 
544 	/* Get initial channel state */
545 	state = gsi_channel_state(channel);
546 	if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED) {
547 		dev_err(dev, "channel %u bad state %u before alloc\n",
548 			channel_id, state);
549 		return -EINVAL;
550 	}
551 
552 	gsi_channel_command(channel, GSI_CH_ALLOCATE);
553 
554 	/* If successful the channel state will have changed */
555 	state = gsi_channel_state(channel);
556 	if (state == GSI_CHANNEL_STATE_ALLOCATED)
557 		return 0;
558 
559 	dev_err(dev, "channel %u bad state %u after alloc\n",
560 		channel_id, state);
561 
562 	return -EIO;
563 }
564 
565 /* Start an ALLOCATED channel */
566 static int gsi_channel_start_command(struct gsi_channel *channel)
567 {
568 	struct device *dev = channel->gsi->dev;
569 	enum gsi_channel_state state;
570 
571 	state = gsi_channel_state(channel);
572 	if (state != GSI_CHANNEL_STATE_ALLOCATED &&
573 	    state != GSI_CHANNEL_STATE_STOPPED) {
574 		dev_err(dev, "channel %u bad state %u before start\n",
575 			gsi_channel_id(channel), state);
576 		return -EINVAL;
577 	}
578 
579 	gsi_channel_command(channel, GSI_CH_START);
580 
581 	/* If successful the channel state will have changed */
582 	state = gsi_channel_state(channel);
583 	if (state == GSI_CHANNEL_STATE_STARTED)
584 		return 0;
585 
586 	dev_err(dev, "channel %u bad state %u after start\n",
587 		gsi_channel_id(channel), state);
588 
589 	return -EIO;
590 }
591 
592 /* Stop a GSI channel in STARTED state */
593 static int gsi_channel_stop_command(struct gsi_channel *channel)
594 {
595 	struct device *dev = channel->gsi->dev;
596 	enum gsi_channel_state state;
597 
598 	state = gsi_channel_state(channel);
599 
600 	/* Channel could have entered STOPPED state since last call
601 	 * if it timed out.  If so, we're done.
602 	 */
603 	if (state == GSI_CHANNEL_STATE_STOPPED)
604 		return 0;
605 
606 	if (state != GSI_CHANNEL_STATE_STARTED &&
607 	    state != GSI_CHANNEL_STATE_STOP_IN_PROC) {
608 		dev_err(dev, "channel %u bad state %u before stop\n",
609 			gsi_channel_id(channel), state);
610 		return -EINVAL;
611 	}
612 
613 	gsi_channel_command(channel, GSI_CH_STOP);
614 
615 	/* If successful the channel state will have changed */
616 	state = gsi_channel_state(channel);
617 	if (state == GSI_CHANNEL_STATE_STOPPED)
618 		return 0;
619 
620 	/* We may have to try again if stop is in progress */
621 	if (state == GSI_CHANNEL_STATE_STOP_IN_PROC)
622 		return -EAGAIN;
623 
624 	dev_err(dev, "channel %u bad state %u after stop\n",
625 		gsi_channel_id(channel), state);
626 
627 	return -EIO;
628 }
629 
630 /* Reset a GSI channel in ALLOCATED or ERROR state. */
631 static void gsi_channel_reset_command(struct gsi_channel *channel)
632 {
633 	struct device *dev = channel->gsi->dev;
634 	enum gsi_channel_state state;
635 
636 	/* A short delay is required before a RESET command */
637 	usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
638 
639 	state = gsi_channel_state(channel);
640 	if (state != GSI_CHANNEL_STATE_STOPPED &&
641 	    state != GSI_CHANNEL_STATE_ERROR) {
642 		/* No need to reset a channel already in ALLOCATED state */
643 		if (state != GSI_CHANNEL_STATE_ALLOCATED)
644 			dev_err(dev, "channel %u bad state %u before reset\n",
645 				gsi_channel_id(channel), state);
646 		return;
647 	}
648 
649 	gsi_channel_command(channel, GSI_CH_RESET);
650 
651 	/* If successful the channel state will have changed */
652 	state = gsi_channel_state(channel);
653 	if (state != GSI_CHANNEL_STATE_ALLOCATED)
654 		dev_err(dev, "channel %u bad state %u after reset\n",
655 			gsi_channel_id(channel), state);
656 }
657 
658 /* Deallocate an ALLOCATED GSI channel */
659 static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id)
660 {
661 	struct gsi_channel *channel = &gsi->channel[channel_id];
662 	struct device *dev = gsi->dev;
663 	enum gsi_channel_state state;
664 
665 	state = gsi_channel_state(channel);
666 	if (state != GSI_CHANNEL_STATE_ALLOCATED) {
667 		dev_err(dev, "channel %u bad state %u before dealloc\n",
668 			channel_id, state);
669 		return;
670 	}
671 
672 	gsi_channel_command(channel, GSI_CH_DE_ALLOC);
673 
674 	/* If successful the channel state will have changed */
675 	state = gsi_channel_state(channel);
676 
677 	if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED)
678 		dev_err(dev, "channel %u bad state %u after dealloc\n",
679 			channel_id, state);
680 }
681 
682 /* Ring an event ring doorbell, reporting the last entry processed by the AP.
683  * The index argument (modulo the ring count) is the first unfilled entry, so
684  * we supply one less than that with the doorbell.  Update the event ring
685  * index field with the value provided.
686  */
687 static void gsi_evt_ring_doorbell(struct gsi *gsi, u32 evt_ring_id, u32 index)
688 {
689 	struct gsi_ring *ring = &gsi->evt_ring[evt_ring_id].ring;
690 	u32 val;
691 
692 	ring->index = index;	/* Next unused entry */
693 
694 	/* Note: index *must* be used modulo the ring count here */
695 	val = gsi_ring_addr(ring, (index - 1) % ring->count);
696 	iowrite32(val, gsi->virt + GSI_EV_CH_E_DOORBELL_0_OFFSET(evt_ring_id));
697 }
698 
699 /* Program an event ring for use */
700 static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id)
701 {
702 	struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
703 	struct gsi_ring *ring = &evt_ring->ring;
704 	size_t size;
705 	u32 val;
706 
707 	/* We program all event rings as GPI type/protocol */
708 	val = u32_encode_bits(GSI_CHANNEL_TYPE_GPI, EV_CHTYPE_FMASK);
709 	val |= EV_INTYPE_FMASK;
710 	val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, EV_ELEMENT_SIZE_FMASK);
711 	iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id));
712 
713 	size = ring->count * GSI_RING_ELEMENT_SIZE;
714 	val = ev_ch_e_cntxt_1_length_encode(gsi->version, size);
715 	iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_1_OFFSET(evt_ring_id));
716 
717 	/* The context 2 and 3 registers store the low-order and
718 	 * high-order 32 bits of the address of the event ring,
719 	 * respectively.
720 	 */
721 	val = lower_32_bits(ring->addr);
722 	iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_2_OFFSET(evt_ring_id));
723 	val = upper_32_bits(ring->addr);
724 	iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_3_OFFSET(evt_ring_id));
725 
726 	/* Enable interrupt moderation by setting the moderation delay */
727 	val = u32_encode_bits(GSI_EVT_RING_INT_MODT, MODT_FMASK);
728 	val |= u32_encode_bits(1, MODC_FMASK);	/* comes from channel */
729 	iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_8_OFFSET(evt_ring_id));
730 
731 	/* No MSI write data, and MSI address high and low address is 0 */
732 	iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_9_OFFSET(evt_ring_id));
733 	iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_10_OFFSET(evt_ring_id));
734 	iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_11_OFFSET(evt_ring_id));
735 
736 	/* We don't need to get event read pointer updates */
737 	iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_12_OFFSET(evt_ring_id));
738 	iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_13_OFFSET(evt_ring_id));
739 
740 	/* Finally, tell the hardware our "last processed" event (arbitrary) */
741 	gsi_evt_ring_doorbell(gsi, evt_ring_id, ring->index);
742 }
743 
744 /* Find the transaction whose completion indicates a channel is quiesced */
745 static struct gsi_trans *gsi_channel_trans_last(struct gsi_channel *channel)
746 {
747 	struct gsi_trans_info *trans_info = &channel->trans_info;
748 	u32 pending_id = trans_info->pending_id;
749 	struct gsi_trans *trans;
750 	u16 trans_id;
751 
752 	if (channel->toward_ipa && pending_id != trans_info->free_id) {
753 		/* There is a small chance a TX transaction got allocated
754 		 * just before we disabled transmits, so check for that.
755 		 * The last allocated, committed, or pending transaction
756 		 * precedes the first free transaction.
757 		 */
758 		trans_id = trans_info->free_id - 1;
759 	} else if (trans_info->polled_id != pending_id) {
760 		/* Otherwise (TX or RX) we want to wait for anything that
761 		 * has completed, or has been polled but not released yet.
762 		 *
763 		 * The last completed or polled transaction precedes the
764 		 * first pending transaction.
765 		 */
766 		trans_id = pending_id - 1;
767 	} else {
768 		return NULL;
769 	}
770 
771 	/* Caller will wait for this, so take a reference */
772 	trans = &trans_info->trans[trans_id % channel->tre_count];
773 	refcount_inc(&trans->refcount);
774 
775 	return trans;
776 }
777 
778 /* Wait for transaction activity on a channel to complete */
779 static void gsi_channel_trans_quiesce(struct gsi_channel *channel)
780 {
781 	struct gsi_trans *trans;
782 
783 	/* Get the last transaction, and wait for it to complete */
784 	trans = gsi_channel_trans_last(channel);
785 	if (trans) {
786 		wait_for_completion(&trans->completion);
787 		gsi_trans_free(trans);
788 	}
789 }
790 
791 /* Program a channel for use; there is no gsi_channel_deprogram() */
792 static void gsi_channel_program(struct gsi_channel *channel, bool doorbell)
793 {
794 	size_t size = channel->tre_ring.count * GSI_RING_ELEMENT_SIZE;
795 	u32 channel_id = gsi_channel_id(channel);
796 	union gsi_channel_scratch scr = { };
797 	struct gsi_channel_scratch_gpi *gpi;
798 	struct gsi *gsi = channel->gsi;
799 	u32 wrr_weight = 0;
800 	u32 val;
801 
802 	/* We program all channels as GPI type/protocol */
803 	val = ch_c_cntxt_0_type_encode(gsi->version, GSI_CHANNEL_TYPE_GPI);
804 	if (channel->toward_ipa)
805 		val |= CHTYPE_DIR_FMASK;
806 	val |= u32_encode_bits(channel->evt_ring_id, ERINDEX_FMASK);
807 	val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, ELEMENT_SIZE_FMASK);
808 	iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id));
809 
810 	val = ch_c_cntxt_1_length_encode(gsi->version, size);
811 	iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_1_OFFSET(channel_id));
812 
813 	/* The context 2 and 3 registers store the low-order and
814 	 * high-order 32 bits of the address of the channel ring,
815 	 * respectively.
816 	 */
817 	val = lower_32_bits(channel->tre_ring.addr);
818 	iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_2_OFFSET(channel_id));
819 	val = upper_32_bits(channel->tre_ring.addr);
820 	iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_3_OFFSET(channel_id));
821 
822 	/* Command channel gets low weighted round-robin priority */
823 	if (channel->command)
824 		wrr_weight = field_max(WRR_WEIGHT_FMASK);
825 	val = u32_encode_bits(wrr_weight, WRR_WEIGHT_FMASK);
826 
827 	/* Max prefetch is 1 segment (do not set MAX_PREFETCH_FMASK) */
828 
829 	/* No need to use the doorbell engine starting at IPA v4.0 */
830 	if (gsi->version < IPA_VERSION_4_0 && doorbell)
831 		val |= USE_DB_ENG_FMASK;
832 
833 	/* v4.0 introduces an escape buffer for prefetch.  We use it
834 	 * on all but the AP command channel.
835 	 */
836 	if (gsi->version >= IPA_VERSION_4_0 && !channel->command) {
837 		/* If not otherwise set, prefetch buffers are used */
838 		if (gsi->version < IPA_VERSION_4_5)
839 			val |= USE_ESCAPE_BUF_ONLY_FMASK;
840 		else
841 			val |= u32_encode_bits(GSI_ESCAPE_BUF_ONLY,
842 					       PREFETCH_MODE_FMASK);
843 	}
844 	/* All channels set DB_IN_BYTES */
845 	if (gsi->version >= IPA_VERSION_4_9)
846 		val |= DB_IN_BYTES;
847 
848 	iowrite32(val, gsi->virt + GSI_CH_C_QOS_OFFSET(channel_id));
849 
850 	/* Now update the scratch registers for GPI protocol */
851 	gpi = &scr.gpi;
852 	gpi->max_outstanding_tre = channel->trans_tre_max *
853 					GSI_RING_ELEMENT_SIZE;
854 	gpi->outstanding_threshold = 2 * GSI_RING_ELEMENT_SIZE;
855 
856 	val = scr.data.word1;
857 	iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_0_OFFSET(channel_id));
858 
859 	val = scr.data.word2;
860 	iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_1_OFFSET(channel_id));
861 
862 	val = scr.data.word3;
863 	iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_2_OFFSET(channel_id));
864 
865 	/* We must preserve the upper 16 bits of the last scratch register.
866 	 * The next sequence assumes those bits remain unchanged between the
867 	 * read and the write.
868 	 */
869 	val = ioread32(gsi->virt + GSI_CH_C_SCRATCH_3_OFFSET(channel_id));
870 	val = (scr.data.word4 & GENMASK(31, 16)) | (val & GENMASK(15, 0));
871 	iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_3_OFFSET(channel_id));
872 
873 	/* All done! */
874 }
875 
876 static int __gsi_channel_start(struct gsi_channel *channel, bool resume)
877 {
878 	struct gsi *gsi = channel->gsi;
879 	int ret;
880 
881 	/* Prior to IPA v4.0 suspend/resume is not implemented by GSI */
882 	if (resume && gsi->version < IPA_VERSION_4_0)
883 		return 0;
884 
885 	mutex_lock(&gsi->mutex);
886 
887 	ret = gsi_channel_start_command(channel);
888 
889 	mutex_unlock(&gsi->mutex);
890 
891 	return ret;
892 }
893 
894 /* Start an allocated GSI channel */
895 int gsi_channel_start(struct gsi *gsi, u32 channel_id)
896 {
897 	struct gsi_channel *channel = &gsi->channel[channel_id];
898 	int ret;
899 
900 	/* Enable NAPI and the completion interrupt */
901 	napi_enable(&channel->napi);
902 	gsi_irq_ieob_enable_one(gsi, channel->evt_ring_id);
903 
904 	ret = __gsi_channel_start(channel, false);
905 	if (ret) {
906 		gsi_irq_ieob_disable_one(gsi, channel->evt_ring_id);
907 		napi_disable(&channel->napi);
908 	}
909 
910 	return ret;
911 }
912 
913 static int gsi_channel_stop_retry(struct gsi_channel *channel)
914 {
915 	u32 retries = GSI_CHANNEL_STOP_RETRIES;
916 	int ret;
917 
918 	do {
919 		ret = gsi_channel_stop_command(channel);
920 		if (ret != -EAGAIN)
921 			break;
922 		usleep_range(3 * USEC_PER_MSEC, 5 * USEC_PER_MSEC);
923 	} while (retries--);
924 
925 	return ret;
926 }
927 
928 static int __gsi_channel_stop(struct gsi_channel *channel, bool suspend)
929 {
930 	struct gsi *gsi = channel->gsi;
931 	int ret;
932 
933 	/* Wait for any underway transactions to complete before stopping. */
934 	gsi_channel_trans_quiesce(channel);
935 
936 	/* Prior to IPA v4.0 suspend/resume is not implemented by GSI */
937 	if (suspend && gsi->version < IPA_VERSION_4_0)
938 		return 0;
939 
940 	mutex_lock(&gsi->mutex);
941 
942 	ret = gsi_channel_stop_retry(channel);
943 
944 	mutex_unlock(&gsi->mutex);
945 
946 	return ret;
947 }
948 
949 /* Stop a started channel */
950 int gsi_channel_stop(struct gsi *gsi, u32 channel_id)
951 {
952 	struct gsi_channel *channel = &gsi->channel[channel_id];
953 	int ret;
954 
955 	ret = __gsi_channel_stop(channel, false);
956 	if (ret)
957 		return ret;
958 
959 	/* Disable the completion interrupt and NAPI if successful */
960 	gsi_irq_ieob_disable_one(gsi, channel->evt_ring_id);
961 	napi_disable(&channel->napi);
962 
963 	return 0;
964 }
965 
966 /* Reset and reconfigure a channel, (possibly) enabling the doorbell engine */
967 void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool doorbell)
968 {
969 	struct gsi_channel *channel = &gsi->channel[channel_id];
970 
971 	mutex_lock(&gsi->mutex);
972 
973 	gsi_channel_reset_command(channel);
974 	/* Due to a hardware quirk we may need to reset RX channels twice. */
975 	if (gsi->version < IPA_VERSION_4_0 && !channel->toward_ipa)
976 		gsi_channel_reset_command(channel);
977 
978 	/* Hardware assumes this is 0 following reset */
979 	channel->tre_ring.index = 0;
980 	gsi_channel_program(channel, doorbell);
981 	gsi_channel_trans_cancel_pending(channel);
982 
983 	mutex_unlock(&gsi->mutex);
984 }
985 
986 /* Stop a started channel for suspend */
987 int gsi_channel_suspend(struct gsi *gsi, u32 channel_id)
988 {
989 	struct gsi_channel *channel = &gsi->channel[channel_id];
990 	int ret;
991 
992 	ret = __gsi_channel_stop(channel, true);
993 	if (ret)
994 		return ret;
995 
996 	/* Ensure NAPI polling has finished. */
997 	napi_synchronize(&channel->napi);
998 
999 	return 0;
1000 }
1001 
1002 /* Resume a suspended channel (starting if stopped) */
1003 int gsi_channel_resume(struct gsi *gsi, u32 channel_id)
1004 {
1005 	struct gsi_channel *channel = &gsi->channel[channel_id];
1006 
1007 	return __gsi_channel_start(channel, true);
1008 }
1009 
1010 /* Prevent all GSI interrupts while suspended */
1011 void gsi_suspend(struct gsi *gsi)
1012 {
1013 	disable_irq(gsi->irq);
1014 }
1015 
1016 /* Allow all GSI interrupts again when resuming */
1017 void gsi_resume(struct gsi *gsi)
1018 {
1019 	enable_irq(gsi->irq);
1020 }
1021 
1022 void gsi_trans_tx_committed(struct gsi_trans *trans)
1023 {
1024 	struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
1025 
1026 	channel->trans_count++;
1027 	channel->byte_count += trans->len;
1028 
1029 	trans->trans_count = channel->trans_count;
1030 	trans->byte_count = channel->byte_count;
1031 }
1032 
1033 void gsi_trans_tx_queued(struct gsi_trans *trans)
1034 {
1035 	u32 channel_id = trans->channel_id;
1036 	struct gsi *gsi = trans->gsi;
1037 	struct gsi_channel *channel;
1038 	u32 trans_count;
1039 	u32 byte_count;
1040 
1041 	channel = &gsi->channel[channel_id];
1042 
1043 	byte_count = channel->byte_count - channel->queued_byte_count;
1044 	trans_count = channel->trans_count - channel->queued_trans_count;
1045 	channel->queued_byte_count = channel->byte_count;
1046 	channel->queued_trans_count = channel->trans_count;
1047 
1048 	ipa_gsi_channel_tx_queued(gsi, channel_id, trans_count, byte_count);
1049 }
1050 
1051 /**
1052  * gsi_trans_tx_completed() - Report completed TX transactions
1053  * @trans:	TX channel transaction that has completed
1054  *
1055  * Report that a transaction on a TX channel has completed.  At the time a
1056  * transaction is committed, we record *in the transaction* its channel's
1057  * committed transaction and byte counts.  Transactions are completed in
1058  * order, and the difference between the channel's byte/transaction count
1059  * when the transaction was committed and when it completes tells us
1060  * exactly how much data has been transferred while the transaction was
1061  * pending.
1062  *
1063  * We report this information to the network stack, which uses it to manage
1064  * the rate at which data is sent to hardware.
1065  */
1066 static void gsi_trans_tx_completed(struct gsi_trans *trans)
1067 {
1068 	u32 channel_id = trans->channel_id;
1069 	struct gsi *gsi = trans->gsi;
1070 	struct gsi_channel *channel;
1071 	u32 trans_count;
1072 	u32 byte_count;
1073 
1074 	channel = &gsi->channel[channel_id];
1075 	trans_count = trans->trans_count - channel->compl_trans_count;
1076 	byte_count = trans->byte_count - channel->compl_byte_count;
1077 
1078 	channel->compl_trans_count += trans_count;
1079 	channel->compl_byte_count += byte_count;
1080 
1081 	ipa_gsi_channel_tx_completed(gsi, channel_id, trans_count, byte_count);
1082 }
1083 
1084 /* Channel control interrupt handler */
1085 static void gsi_isr_chan_ctrl(struct gsi *gsi)
1086 {
1087 	u32 channel_mask;
1088 
1089 	channel_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_CH_IRQ_OFFSET);
1090 	iowrite32(channel_mask, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET);
1091 
1092 	while (channel_mask) {
1093 		u32 channel_id = __ffs(channel_mask);
1094 
1095 		channel_mask ^= BIT(channel_id);
1096 
1097 		complete(&gsi->completion);
1098 	}
1099 }
1100 
1101 /* Event ring control interrupt handler */
1102 static void gsi_isr_evt_ctrl(struct gsi *gsi)
1103 {
1104 	u32 event_mask;
1105 
1106 	event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_OFFSET);
1107 	iowrite32(event_mask, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET);
1108 
1109 	while (event_mask) {
1110 		u32 evt_ring_id = __ffs(event_mask);
1111 
1112 		event_mask ^= BIT(evt_ring_id);
1113 
1114 		complete(&gsi->completion);
1115 	}
1116 }
1117 
1118 /* Global channel error interrupt handler */
1119 static void
1120 gsi_isr_glob_chan_err(struct gsi *gsi, u32 err_ee, u32 channel_id, u32 code)
1121 {
1122 	if (code == GSI_OUT_OF_RESOURCES) {
1123 		dev_err(gsi->dev, "channel %u out of resources\n", channel_id);
1124 		complete(&gsi->completion);
1125 		return;
1126 	}
1127 
1128 	/* Report, but otherwise ignore all other error codes */
1129 	dev_err(gsi->dev, "channel %u global error ee 0x%08x code 0x%08x\n",
1130 		channel_id, err_ee, code);
1131 }
1132 
1133 /* Global event error interrupt handler */
1134 static void
1135 gsi_isr_glob_evt_err(struct gsi *gsi, u32 err_ee, u32 evt_ring_id, u32 code)
1136 {
1137 	if (code == GSI_OUT_OF_RESOURCES) {
1138 		struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
1139 		u32 channel_id = gsi_channel_id(evt_ring->channel);
1140 
1141 		complete(&gsi->completion);
1142 		dev_err(gsi->dev, "evt_ring for channel %u out of resources\n",
1143 			channel_id);
1144 		return;
1145 	}
1146 
1147 	/* Report, but otherwise ignore all other error codes */
1148 	dev_err(gsi->dev, "event ring %u global error ee %u code 0x%08x\n",
1149 		evt_ring_id, err_ee, code);
1150 }
1151 
1152 /* Global error interrupt handler */
1153 static void gsi_isr_glob_err(struct gsi *gsi)
1154 {
1155 	enum gsi_err_type type;
1156 	enum gsi_err_code code;
1157 	u32 which;
1158 	u32 val;
1159 	u32 ee;
1160 
1161 	/* Get the logged error, then reinitialize the log */
1162 	val = ioread32(gsi->virt + GSI_ERROR_LOG_OFFSET);
1163 	iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET);
1164 	iowrite32(~0, gsi->virt + GSI_ERROR_LOG_CLR_OFFSET);
1165 
1166 	ee = u32_get_bits(val, ERR_EE_FMASK);
1167 	type = u32_get_bits(val, ERR_TYPE_FMASK);
1168 	which = u32_get_bits(val, ERR_VIRT_IDX_FMASK);
1169 	code = u32_get_bits(val, ERR_CODE_FMASK);
1170 
1171 	if (type == GSI_ERR_TYPE_CHAN)
1172 		gsi_isr_glob_chan_err(gsi, ee, which, code);
1173 	else if (type == GSI_ERR_TYPE_EVT)
1174 		gsi_isr_glob_evt_err(gsi, ee, which, code);
1175 	else	/* type GSI_ERR_TYPE_GLOB should be fatal */
1176 		dev_err(gsi->dev, "unexpected global error 0x%08x\n", type);
1177 }
1178 
1179 /* Generic EE interrupt handler */
1180 static void gsi_isr_gp_int1(struct gsi *gsi)
1181 {
1182 	u32 result;
1183 	u32 val;
1184 
1185 	/* This interrupt is used to handle completions of GENERIC GSI
1186 	 * commands.  We use these to allocate and halt channels on the
1187 	 * modem's behalf due to a hardware quirk on IPA v4.2.  The modem
1188 	 * "owns" channels even when the AP allocates them, and have no
1189 	 * way of knowing whether a modem channel's state has been changed.
1190 	 *
1191 	 * We also use GENERIC commands to enable/disable channel flow
1192 	 * control for IPA v4.2+.
1193 	 *
1194 	 * It is recommended that we halt the modem channels we allocated
1195 	 * when shutting down, but it's possible the channel isn't running
1196 	 * at the time we issue the HALT command.  We'll get an error in
1197 	 * that case, but it's harmless (the channel is already halted).
1198 	 * Similarly, we could get an error back when updating flow control
1199 	 * on a channel because it's not in the proper state.
1200 	 *
1201 	 * In either case, we silently ignore a INCORRECT_CHANNEL_STATE
1202 	 * error if we receive it.
1203 	 */
1204 	val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
1205 	result = u32_get_bits(val, GENERIC_EE_RESULT_FMASK);
1206 
1207 	switch (result) {
1208 	case GENERIC_EE_SUCCESS:
1209 	case GENERIC_EE_INCORRECT_CHANNEL_STATE:
1210 		gsi->result = 0;
1211 		break;
1212 
1213 	case GENERIC_EE_RETRY:
1214 		gsi->result = -EAGAIN;
1215 		break;
1216 
1217 	default:
1218 		dev_err(gsi->dev, "global INT1 generic result %u\n", result);
1219 		gsi->result = -EIO;
1220 		break;
1221 	}
1222 
1223 	complete(&gsi->completion);
1224 }
1225 
1226 /* Inter-EE interrupt handler */
1227 static void gsi_isr_glob_ee(struct gsi *gsi)
1228 {
1229 	u32 val;
1230 
1231 	val = ioread32(gsi->virt + GSI_CNTXT_GLOB_IRQ_STTS_OFFSET);
1232 
1233 	if (val & ERROR_INT)
1234 		gsi_isr_glob_err(gsi);
1235 
1236 	iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_CLR_OFFSET);
1237 
1238 	val &= ~ERROR_INT;
1239 
1240 	if (val & GP_INT1) {
1241 		val ^= GP_INT1;
1242 		gsi_isr_gp_int1(gsi);
1243 	}
1244 
1245 	if (val)
1246 		dev_err(gsi->dev, "unexpected global interrupt 0x%08x\n", val);
1247 }
1248 
1249 /* I/O completion interrupt event */
1250 static void gsi_isr_ieob(struct gsi *gsi)
1251 {
1252 	u32 event_mask;
1253 
1254 	event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_OFFSET);
1255 	gsi_irq_ieob_disable(gsi, event_mask);
1256 	iowrite32(event_mask, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_CLR_OFFSET);
1257 
1258 	while (event_mask) {
1259 		u32 evt_ring_id = __ffs(event_mask);
1260 
1261 		event_mask ^= BIT(evt_ring_id);
1262 
1263 		napi_schedule(&gsi->evt_ring[evt_ring_id].channel->napi);
1264 	}
1265 }
1266 
1267 /* General event interrupts represent serious problems, so report them */
1268 static void gsi_isr_general(struct gsi *gsi)
1269 {
1270 	struct device *dev = gsi->dev;
1271 	u32 val;
1272 
1273 	val = ioread32(gsi->virt + GSI_CNTXT_GSI_IRQ_STTS_OFFSET);
1274 	iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_CLR_OFFSET);
1275 
1276 	dev_err(dev, "unexpected general interrupt 0x%08x\n", val);
1277 }
1278 
1279 /**
1280  * gsi_isr() - Top level GSI interrupt service routine
1281  * @irq:	Interrupt number (ignored)
1282  * @dev_id:	GSI pointer supplied to request_irq()
1283  *
1284  * This is the main handler function registered for the GSI IRQ. Each type
1285  * of interrupt has a separate handler function that is called from here.
1286  */
1287 static irqreturn_t gsi_isr(int irq, void *dev_id)
1288 {
1289 	struct gsi *gsi = dev_id;
1290 	u32 intr_mask;
1291 	u32 cnt = 0;
1292 
1293 	/* enum gsi_irq_type_id defines GSI interrupt types */
1294 	while ((intr_mask = ioread32(gsi->virt + GSI_CNTXT_TYPE_IRQ_OFFSET))) {
1295 		/* intr_mask contains bitmask of pending GSI interrupts */
1296 		do {
1297 			u32 gsi_intr = BIT(__ffs(intr_mask));
1298 
1299 			intr_mask ^= gsi_intr;
1300 
1301 			switch (gsi_intr) {
1302 			case GSI_CH_CTRL:
1303 				gsi_isr_chan_ctrl(gsi);
1304 				break;
1305 			case GSI_EV_CTRL:
1306 				gsi_isr_evt_ctrl(gsi);
1307 				break;
1308 			case GSI_GLOB_EE:
1309 				gsi_isr_glob_ee(gsi);
1310 				break;
1311 			case GSI_IEOB:
1312 				gsi_isr_ieob(gsi);
1313 				break;
1314 			case GSI_GENERAL:
1315 				gsi_isr_general(gsi);
1316 				break;
1317 			default:
1318 				dev_err(gsi->dev,
1319 					"unrecognized interrupt type 0x%08x\n",
1320 					gsi_intr);
1321 				break;
1322 			}
1323 		} while (intr_mask);
1324 
1325 		if (++cnt > GSI_ISR_MAX_ITER) {
1326 			dev_err(gsi->dev, "interrupt flood\n");
1327 			break;
1328 		}
1329 	}
1330 
1331 	return IRQ_HANDLED;
1332 }
1333 
1334 /* Init function for GSI IRQ lookup; there is no gsi_irq_exit() */
1335 static int gsi_irq_init(struct gsi *gsi, struct platform_device *pdev)
1336 {
1337 	int ret;
1338 
1339 	ret = platform_get_irq_byname(pdev, "gsi");
1340 	if (ret <= 0)
1341 		return ret ? : -EINVAL;
1342 
1343 	gsi->irq = ret;
1344 
1345 	return 0;
1346 }
1347 
1348 /* Return the transaction associated with a transfer completion event */
1349 static struct gsi_trans *
1350 gsi_event_trans(struct gsi *gsi, struct gsi_event *event)
1351 {
1352 	u32 channel_id = event->chid;
1353 	struct gsi_channel *channel;
1354 	struct gsi_trans *trans;
1355 	u32 tre_offset;
1356 	u32 tre_index;
1357 
1358 	channel = &gsi->channel[channel_id];
1359 	if (WARN(!channel->gsi, "event has bad channel %u\n", channel_id))
1360 		return NULL;
1361 
1362 	/* Event xfer_ptr records the TRE it's associated with */
1363 	tre_offset = lower_32_bits(le64_to_cpu(event->xfer_ptr));
1364 	tre_index = gsi_ring_index(&channel->tre_ring, tre_offset);
1365 
1366 	trans = gsi_channel_trans_mapped(channel, tre_index);
1367 
1368 	if (WARN(!trans, "channel %u event with no transaction\n", channel_id))
1369 		return NULL;
1370 
1371 	return trans;
1372 }
1373 
1374 /**
1375  * gsi_evt_ring_update() - Update transaction state from hardware
1376  * @gsi:		GSI pointer
1377  * @evt_ring_id:	Event ring ID
1378  * @index:		Event index in ring reported by hardware
1379  *
1380  * Events for RX channels contain the actual number of bytes received into
1381  * the buffer.  Every event has a transaction associated with it, and here
1382  * we update transactions to record their actual received lengths.
1383  *
1384  * When an event for a TX channel arrives we use information in the
1385  * transaction to report the number of requests and bytes that have
1386  * been transferred.
1387  *
1388  * This function is called whenever we learn that the GSI hardware has filled
1389  * new events since the last time we checked.  The ring's index field tells
1390  * the first entry in need of processing.  The index provided is the
1391  * first *unfilled* event in the ring (following the last filled one).
1392  *
1393  * Events are sequential within the event ring, and transactions are
1394  * sequential within the transaction array.
1395  *
1396  * Note that @index always refers to an element *within* the event ring.
1397  */
1398 static void gsi_evt_ring_update(struct gsi *gsi, u32 evt_ring_id, u32 index)
1399 {
1400 	struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
1401 	struct gsi_ring *ring = &evt_ring->ring;
1402 	struct gsi_event *event_done;
1403 	struct gsi_event *event;
1404 	u32 event_avail;
1405 	u32 old_index;
1406 
1407 	/* Starting with the oldest un-processed event, determine which
1408 	 * transaction (and which channel) is associated with the event.
1409 	 * For RX channels, update each completed transaction with the
1410 	 * number of bytes that were actually received.  For TX channels
1411 	 * associated with a network device, report to the network stack
1412 	 * the number of transfers and bytes this completion represents.
1413 	 */
1414 	old_index = ring->index;
1415 	event = gsi_ring_virt(ring, old_index);
1416 
1417 	/* Compute the number of events to process before we wrap,
1418 	 * and determine when we'll be done processing events.
1419 	 */
1420 	event_avail = ring->count - old_index % ring->count;
1421 	event_done = gsi_ring_virt(ring, index);
1422 	do {
1423 		struct gsi_trans *trans;
1424 
1425 		trans = gsi_event_trans(gsi, event);
1426 		if (!trans)
1427 			return;
1428 
1429 		if (trans->direction == DMA_FROM_DEVICE)
1430 			trans->len = __le16_to_cpu(event->len);
1431 		else
1432 			gsi_trans_tx_completed(trans);
1433 
1434 		gsi_trans_move_complete(trans);
1435 
1436 		/* Move on to the next event and transaction */
1437 		if (--event_avail)
1438 			event++;
1439 		else
1440 			event = gsi_ring_virt(ring, 0);
1441 	} while (event != event_done);
1442 
1443 	/* Tell the hardware we've handled these events */
1444 	gsi_evt_ring_doorbell(gsi, evt_ring_id, index);
1445 }
1446 
1447 /* Initialize a ring, including allocating DMA memory for its entries */
1448 static int gsi_ring_alloc(struct gsi *gsi, struct gsi_ring *ring, u32 count)
1449 {
1450 	u32 size = count * GSI_RING_ELEMENT_SIZE;
1451 	struct device *dev = gsi->dev;
1452 	dma_addr_t addr;
1453 
1454 	/* Hardware requires a 2^n ring size, with alignment equal to size.
1455 	 * The DMA address returned by dma_alloc_coherent() is guaranteed to
1456 	 * be a power-of-2 number of pages, which satisfies the requirement.
1457 	 */
1458 	ring->virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL);
1459 	if (!ring->virt)
1460 		return -ENOMEM;
1461 
1462 	ring->addr = addr;
1463 	ring->count = count;
1464 	ring->index = 0;
1465 
1466 	return 0;
1467 }
1468 
1469 /* Free a previously-allocated ring */
1470 static void gsi_ring_free(struct gsi *gsi, struct gsi_ring *ring)
1471 {
1472 	size_t size = ring->count * GSI_RING_ELEMENT_SIZE;
1473 
1474 	dma_free_coherent(gsi->dev, size, ring->virt, ring->addr);
1475 }
1476 
1477 /* Allocate an available event ring id */
1478 static int gsi_evt_ring_id_alloc(struct gsi *gsi)
1479 {
1480 	u32 evt_ring_id;
1481 
1482 	if (gsi->event_bitmap == ~0U) {
1483 		dev_err(gsi->dev, "event rings exhausted\n");
1484 		return -ENOSPC;
1485 	}
1486 
1487 	evt_ring_id = ffz(gsi->event_bitmap);
1488 	gsi->event_bitmap |= BIT(evt_ring_id);
1489 
1490 	return (int)evt_ring_id;
1491 }
1492 
1493 /* Free a previously-allocated event ring id */
1494 static void gsi_evt_ring_id_free(struct gsi *gsi, u32 evt_ring_id)
1495 {
1496 	gsi->event_bitmap &= ~BIT(evt_ring_id);
1497 }
1498 
1499 /* Ring a channel doorbell, reporting the first un-filled entry */
1500 void gsi_channel_doorbell(struct gsi_channel *channel)
1501 {
1502 	struct gsi_ring *tre_ring = &channel->tre_ring;
1503 	u32 channel_id = gsi_channel_id(channel);
1504 	struct gsi *gsi = channel->gsi;
1505 	u32 val;
1506 
1507 	/* Note: index *must* be used modulo the ring count here */
1508 	val = gsi_ring_addr(tre_ring, tre_ring->index % tre_ring->count);
1509 	iowrite32(val, gsi->virt + GSI_CH_C_DOORBELL_0_OFFSET(channel_id));
1510 }
1511 
1512 /* Consult hardware, move newly completed transactions to completed state */
1513 void gsi_channel_update(struct gsi_channel *channel)
1514 {
1515 	u32 evt_ring_id = channel->evt_ring_id;
1516 	struct gsi *gsi = channel->gsi;
1517 	struct gsi_evt_ring *evt_ring;
1518 	struct gsi_trans *trans;
1519 	struct gsi_ring *ring;
1520 	u32 offset;
1521 	u32 index;
1522 
1523 	evt_ring = &gsi->evt_ring[evt_ring_id];
1524 	ring = &evt_ring->ring;
1525 
1526 	/* See if there's anything new to process; if not, we're done.  Note
1527 	 * that index always refers to an entry *within* the event ring.
1528 	 */
1529 	offset = GSI_EV_CH_E_CNTXT_4_OFFSET(evt_ring_id);
1530 	index = gsi_ring_index(ring, ioread32(gsi->virt + offset));
1531 	if (index == ring->index % ring->count)
1532 		return;
1533 
1534 	/* Get the transaction for the latest completed event. */
1535 	trans = gsi_event_trans(gsi, gsi_ring_virt(ring, index - 1));
1536 	if (!trans)
1537 		return;
1538 
1539 	/* For RX channels, update each completed transaction with the number
1540 	 * of bytes that were actually received.  For TX channels, report
1541 	 * the number of transactions and bytes this completion represents
1542 	 * up the network stack.
1543 	 */
1544 	gsi_evt_ring_update(gsi, evt_ring_id, index);
1545 }
1546 
1547 /**
1548  * gsi_channel_poll_one() - Return a single completed transaction on a channel
1549  * @channel:	Channel to be polled
1550  *
1551  * Return:	Transaction pointer, or null if none are available
1552  *
1553  * This function returns the first of a channel's completed transactions.
1554  * If no transactions are in completed state, the hardware is consulted to
1555  * determine whether any new transactions have completed.  If so, they're
1556  * moved to completed state and the first such transaction is returned.
1557  * If there are no more completed transactions, a null pointer is returned.
1558  */
1559 static struct gsi_trans *gsi_channel_poll_one(struct gsi_channel *channel)
1560 {
1561 	struct gsi_trans *trans;
1562 
1563 	/* Get the first completed transaction */
1564 	trans = gsi_channel_trans_complete(channel);
1565 	if (trans)
1566 		gsi_trans_move_polled(trans);
1567 
1568 	return trans;
1569 }
1570 
1571 /**
1572  * gsi_channel_poll() - NAPI poll function for a channel
1573  * @napi:	NAPI structure for the channel
1574  * @budget:	Budget supplied by NAPI core
1575  *
1576  * Return:	Number of items polled (<= budget)
1577  *
1578  * Single transactions completed by hardware are polled until either
1579  * the budget is exhausted, or there are no more.  Each transaction
1580  * polled is passed to gsi_trans_complete(), to perform remaining
1581  * completion processing and retire/free the transaction.
1582  */
1583 static int gsi_channel_poll(struct napi_struct *napi, int budget)
1584 {
1585 	struct gsi_channel *channel;
1586 	int count;
1587 
1588 	channel = container_of(napi, struct gsi_channel, napi);
1589 	for (count = 0; count < budget; count++) {
1590 		struct gsi_trans *trans;
1591 
1592 		trans = gsi_channel_poll_one(channel);
1593 		if (!trans)
1594 			break;
1595 		gsi_trans_complete(trans);
1596 	}
1597 
1598 	if (count < budget && napi_complete(napi))
1599 		gsi_irq_ieob_enable_one(channel->gsi, channel->evt_ring_id);
1600 
1601 	return count;
1602 }
1603 
1604 /* The event bitmap represents which event ids are available for allocation.
1605  * Set bits are not available, clear bits can be used.  This function
1606  * initializes the map so all events supported by the hardware are available,
1607  * then precludes any reserved events from being allocated.
1608  */
1609 static u32 gsi_event_bitmap_init(u32 evt_ring_max)
1610 {
1611 	u32 event_bitmap = GENMASK(BITS_PER_LONG - 1, evt_ring_max);
1612 
1613 	event_bitmap |= GENMASK(GSI_MHI_EVENT_ID_END, GSI_MHI_EVENT_ID_START);
1614 
1615 	return event_bitmap;
1616 }
1617 
1618 /* Setup function for a single channel */
1619 static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id)
1620 {
1621 	struct gsi_channel *channel = &gsi->channel[channel_id];
1622 	u32 evt_ring_id = channel->evt_ring_id;
1623 	int ret;
1624 
1625 	if (!gsi_channel_initialized(channel))
1626 		return 0;
1627 
1628 	ret = gsi_evt_ring_alloc_command(gsi, evt_ring_id);
1629 	if (ret)
1630 		return ret;
1631 
1632 	gsi_evt_ring_program(gsi, evt_ring_id);
1633 
1634 	ret = gsi_channel_alloc_command(gsi, channel_id);
1635 	if (ret)
1636 		goto err_evt_ring_de_alloc;
1637 
1638 	gsi_channel_program(channel, true);
1639 
1640 	if (channel->toward_ipa)
1641 		netif_napi_add_tx(&gsi->dummy_dev, &channel->napi,
1642 				  gsi_channel_poll);
1643 	else
1644 		netif_napi_add(&gsi->dummy_dev, &channel->napi,
1645 			       gsi_channel_poll);
1646 
1647 	return 0;
1648 
1649 err_evt_ring_de_alloc:
1650 	/* We've done nothing with the event ring yet so don't reset */
1651 	gsi_evt_ring_de_alloc_command(gsi, evt_ring_id);
1652 
1653 	return ret;
1654 }
1655 
1656 /* Inverse of gsi_channel_setup_one() */
1657 static void gsi_channel_teardown_one(struct gsi *gsi, u32 channel_id)
1658 {
1659 	struct gsi_channel *channel = &gsi->channel[channel_id];
1660 	u32 evt_ring_id = channel->evt_ring_id;
1661 
1662 	if (!gsi_channel_initialized(channel))
1663 		return;
1664 
1665 	netif_napi_del(&channel->napi);
1666 
1667 	gsi_channel_de_alloc_command(gsi, channel_id);
1668 	gsi_evt_ring_reset_command(gsi, evt_ring_id);
1669 	gsi_evt_ring_de_alloc_command(gsi, evt_ring_id);
1670 }
1671 
1672 /* We use generic commands only to operate on modem channels.  We don't have
1673  * the ability to determine channel state for a modem channel, so we simply
1674  * issue the command and wait for it to complete.
1675  */
1676 static int gsi_generic_command(struct gsi *gsi, u32 channel_id,
1677 			       enum gsi_generic_cmd_opcode opcode,
1678 			       u8 params)
1679 {
1680 	bool timeout;
1681 	u32 val;
1682 
1683 	/* The error global interrupt type is always enabled (until we tear
1684 	 * down), so we will keep it enabled.
1685 	 *
1686 	 * A generic EE command completes with a GSI global interrupt of
1687 	 * type GP_INT1.  We only perform one generic command at a time
1688 	 * (to allocate, halt, or enable/disable flow control on a modem
1689 	 * channel), and only from this function.  So we enable the GP_INT1
1690 	 * IRQ type here, and disable it again after the command completes.
1691 	 */
1692 	val = ERROR_INT | GP_INT1;
1693 	iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
1694 
1695 	/* First zero the result code field */
1696 	val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
1697 	val &= ~GENERIC_EE_RESULT_FMASK;
1698 	iowrite32(val, gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
1699 
1700 	/* Now issue the command */
1701 	val = u32_encode_bits(opcode, GENERIC_OPCODE_FMASK);
1702 	val |= u32_encode_bits(channel_id, GENERIC_CHID_FMASK);
1703 	val |= u32_encode_bits(GSI_EE_MODEM, GENERIC_EE_FMASK);
1704 	if (gsi->version >= IPA_VERSION_4_11)
1705 		val |= u32_encode_bits(params, GENERIC_PARAMS_FMASK);
1706 
1707 	timeout = !gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val);
1708 
1709 	/* Disable the GP_INT1 IRQ type again */
1710 	iowrite32(ERROR_INT, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
1711 
1712 	if (!timeout)
1713 		return gsi->result;
1714 
1715 	dev_err(gsi->dev, "GSI generic command %u to channel %u timed out\n",
1716 		opcode, channel_id);
1717 
1718 	return -ETIMEDOUT;
1719 }
1720 
1721 static int gsi_modem_channel_alloc(struct gsi *gsi, u32 channel_id)
1722 {
1723 	return gsi_generic_command(gsi, channel_id,
1724 				   GSI_GENERIC_ALLOCATE_CHANNEL, 0);
1725 }
1726 
1727 static void gsi_modem_channel_halt(struct gsi *gsi, u32 channel_id)
1728 {
1729 	u32 retries = GSI_CHANNEL_MODEM_HALT_RETRIES;
1730 	int ret;
1731 
1732 	do
1733 		ret = gsi_generic_command(gsi, channel_id,
1734 					  GSI_GENERIC_HALT_CHANNEL, 0);
1735 	while (ret == -EAGAIN && retries--);
1736 
1737 	if (ret)
1738 		dev_err(gsi->dev, "error %d halting modem channel %u\n",
1739 			ret, channel_id);
1740 }
1741 
1742 /* Enable or disable flow control for a modem GSI TX channel (IPA v4.2+) */
1743 void
1744 gsi_modem_channel_flow_control(struct gsi *gsi, u32 channel_id, bool enable)
1745 {
1746 	u32 retries = 0;
1747 	u32 command;
1748 	int ret;
1749 
1750 	command = enable ? GSI_GENERIC_ENABLE_FLOW_CONTROL
1751 			 : GSI_GENERIC_DISABLE_FLOW_CONTROL;
1752 	/* Disabling flow control on IPA v4.11+ can return -EAGAIN if enable
1753 	 * is underway.  In this case we need to retry the command.
1754 	 */
1755 	if (!enable && gsi->version >= IPA_VERSION_4_11)
1756 		retries = GSI_CHANNEL_MODEM_FLOW_RETRIES;
1757 
1758 	do
1759 		ret = gsi_generic_command(gsi, channel_id, command, 0);
1760 	while (ret == -EAGAIN && retries--);
1761 
1762 	if (ret)
1763 		dev_err(gsi->dev,
1764 			"error %d %sabling mode channel %u flow control\n",
1765 			ret, enable ? "en" : "dis", channel_id);
1766 }
1767 
1768 /* Setup function for channels */
1769 static int gsi_channel_setup(struct gsi *gsi)
1770 {
1771 	u32 channel_id = 0;
1772 	u32 mask;
1773 	int ret;
1774 
1775 	gsi_irq_enable(gsi);
1776 
1777 	mutex_lock(&gsi->mutex);
1778 
1779 	do {
1780 		ret = gsi_channel_setup_one(gsi, channel_id);
1781 		if (ret)
1782 			goto err_unwind;
1783 	} while (++channel_id < gsi->channel_count);
1784 
1785 	/* Make sure no channels were defined that hardware does not support */
1786 	while (channel_id < GSI_CHANNEL_COUNT_MAX) {
1787 		struct gsi_channel *channel = &gsi->channel[channel_id++];
1788 
1789 		if (!gsi_channel_initialized(channel))
1790 			continue;
1791 
1792 		ret = -EINVAL;
1793 		dev_err(gsi->dev, "channel %u not supported by hardware\n",
1794 			channel_id - 1);
1795 		channel_id = gsi->channel_count;
1796 		goto err_unwind;
1797 	}
1798 
1799 	/* Allocate modem channels if necessary */
1800 	mask = gsi->modem_channel_bitmap;
1801 	while (mask) {
1802 		u32 modem_channel_id = __ffs(mask);
1803 
1804 		ret = gsi_modem_channel_alloc(gsi, modem_channel_id);
1805 		if (ret)
1806 			goto err_unwind_modem;
1807 
1808 		/* Clear bit from mask only after success (for unwind) */
1809 		mask ^= BIT(modem_channel_id);
1810 	}
1811 
1812 	mutex_unlock(&gsi->mutex);
1813 
1814 	return 0;
1815 
1816 err_unwind_modem:
1817 	/* Compute which modem channels need to be deallocated */
1818 	mask ^= gsi->modem_channel_bitmap;
1819 	while (mask) {
1820 		channel_id = __fls(mask);
1821 
1822 		mask ^= BIT(channel_id);
1823 
1824 		gsi_modem_channel_halt(gsi, channel_id);
1825 	}
1826 
1827 err_unwind:
1828 	while (channel_id--)
1829 		gsi_channel_teardown_one(gsi, channel_id);
1830 
1831 	mutex_unlock(&gsi->mutex);
1832 
1833 	gsi_irq_disable(gsi);
1834 
1835 	return ret;
1836 }
1837 
1838 /* Inverse of gsi_channel_setup() */
1839 static void gsi_channel_teardown(struct gsi *gsi)
1840 {
1841 	u32 mask = gsi->modem_channel_bitmap;
1842 	u32 channel_id;
1843 
1844 	mutex_lock(&gsi->mutex);
1845 
1846 	while (mask) {
1847 		channel_id = __fls(mask);
1848 
1849 		mask ^= BIT(channel_id);
1850 
1851 		gsi_modem_channel_halt(gsi, channel_id);
1852 	}
1853 
1854 	channel_id = gsi->channel_count - 1;
1855 	do
1856 		gsi_channel_teardown_one(gsi, channel_id);
1857 	while (channel_id--);
1858 
1859 	mutex_unlock(&gsi->mutex);
1860 
1861 	gsi_irq_disable(gsi);
1862 }
1863 
1864 /* Turn off all GSI interrupts initially */
1865 static int gsi_irq_setup(struct gsi *gsi)
1866 {
1867 	int ret;
1868 
1869 	/* Writing 1 indicates IRQ interrupts; 0 would be MSI */
1870 	iowrite32(1, gsi->virt + GSI_CNTXT_INTSET_OFFSET);
1871 
1872 	/* Disable all interrupt types */
1873 	gsi_irq_type_update(gsi, 0);
1874 
1875 	/* Clear all type-specific interrupt masks */
1876 	iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
1877 	iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
1878 	iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
1879 	iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
1880 
1881 	/* The inter-EE interrupts are not supported for IPA v3.0-v3.1 */
1882 	if (gsi->version > IPA_VERSION_3_1) {
1883 		u32 offset;
1884 
1885 		/* These registers are in the non-adjusted address range */
1886 		offset = GSI_INTER_EE_SRC_CH_IRQ_MSK_OFFSET;
1887 		iowrite32(0, gsi->virt_raw + offset);
1888 		offset = GSI_INTER_EE_SRC_EV_CH_IRQ_MSK_OFFSET;
1889 		iowrite32(0, gsi->virt_raw + offset);
1890 	}
1891 
1892 	iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
1893 
1894 	ret = request_irq(gsi->irq, gsi_isr, 0, "gsi", gsi);
1895 	if (ret)
1896 		dev_err(gsi->dev, "error %d requesting \"gsi\" IRQ\n", ret);
1897 
1898 	return ret;
1899 }
1900 
1901 static void gsi_irq_teardown(struct gsi *gsi)
1902 {
1903 	free_irq(gsi->irq, gsi);
1904 }
1905 
1906 /* Get # supported channel and event rings; there is no gsi_ring_teardown() */
1907 static int gsi_ring_setup(struct gsi *gsi)
1908 {
1909 	struct device *dev = gsi->dev;
1910 	u32 count;
1911 	u32 val;
1912 
1913 	if (gsi->version < IPA_VERSION_3_5_1) {
1914 		/* No HW_PARAM_2 register prior to IPA v3.5.1, assume the max */
1915 		gsi->channel_count = GSI_CHANNEL_COUNT_MAX;
1916 		gsi->evt_ring_count = GSI_EVT_RING_COUNT_MAX;
1917 
1918 		return 0;
1919 	}
1920 
1921 	val = ioread32(gsi->virt + GSI_GSI_HW_PARAM_2_OFFSET);
1922 
1923 	count = u32_get_bits(val, NUM_CH_PER_EE_FMASK);
1924 	if (!count) {
1925 		dev_err(dev, "GSI reports zero channels supported\n");
1926 		return -EINVAL;
1927 	}
1928 	if (count > GSI_CHANNEL_COUNT_MAX) {
1929 		dev_warn(dev, "limiting to %u channels; hardware supports %u\n",
1930 			 GSI_CHANNEL_COUNT_MAX, count);
1931 		count = GSI_CHANNEL_COUNT_MAX;
1932 	}
1933 	gsi->channel_count = count;
1934 
1935 	count = u32_get_bits(val, NUM_EV_PER_EE_FMASK);
1936 	if (!count) {
1937 		dev_err(dev, "GSI reports zero event rings supported\n");
1938 		return -EINVAL;
1939 	}
1940 	if (count > GSI_EVT_RING_COUNT_MAX) {
1941 		dev_warn(dev,
1942 			 "limiting to %u event rings; hardware supports %u\n",
1943 			 GSI_EVT_RING_COUNT_MAX, count);
1944 		count = GSI_EVT_RING_COUNT_MAX;
1945 	}
1946 	gsi->evt_ring_count = count;
1947 
1948 	return 0;
1949 }
1950 
1951 /* Setup function for GSI.  GSI firmware must be loaded and initialized */
1952 int gsi_setup(struct gsi *gsi)
1953 {
1954 	u32 val;
1955 	int ret;
1956 
1957 	/* Here is where we first touch the GSI hardware */
1958 	val = ioread32(gsi->virt + GSI_GSI_STATUS_OFFSET);
1959 	if (!(val & ENABLED_FMASK)) {
1960 		dev_err(gsi->dev, "GSI has not been enabled\n");
1961 		return -EIO;
1962 	}
1963 
1964 	ret = gsi_irq_setup(gsi);
1965 	if (ret)
1966 		return ret;
1967 
1968 	ret = gsi_ring_setup(gsi);	/* No matching teardown required */
1969 	if (ret)
1970 		goto err_irq_teardown;
1971 
1972 	/* Initialize the error log */
1973 	iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET);
1974 
1975 	ret = gsi_channel_setup(gsi);
1976 	if (ret)
1977 		goto err_irq_teardown;
1978 
1979 	return 0;
1980 
1981 err_irq_teardown:
1982 	gsi_irq_teardown(gsi);
1983 
1984 	return ret;
1985 }
1986 
1987 /* Inverse of gsi_setup() */
1988 void gsi_teardown(struct gsi *gsi)
1989 {
1990 	gsi_channel_teardown(gsi);
1991 	gsi_irq_teardown(gsi);
1992 }
1993 
1994 /* Initialize a channel's event ring */
1995 static int gsi_channel_evt_ring_init(struct gsi_channel *channel)
1996 {
1997 	struct gsi *gsi = channel->gsi;
1998 	struct gsi_evt_ring *evt_ring;
1999 	int ret;
2000 
2001 	ret = gsi_evt_ring_id_alloc(gsi);
2002 	if (ret < 0)
2003 		return ret;
2004 	channel->evt_ring_id = ret;
2005 
2006 	evt_ring = &gsi->evt_ring[channel->evt_ring_id];
2007 	evt_ring->channel = channel;
2008 
2009 	ret = gsi_ring_alloc(gsi, &evt_ring->ring, channel->event_count);
2010 	if (!ret)
2011 		return 0;	/* Success! */
2012 
2013 	dev_err(gsi->dev, "error %d allocating channel %u event ring\n",
2014 		ret, gsi_channel_id(channel));
2015 
2016 	gsi_evt_ring_id_free(gsi, channel->evt_ring_id);
2017 
2018 	return ret;
2019 }
2020 
2021 /* Inverse of gsi_channel_evt_ring_init() */
2022 static void gsi_channel_evt_ring_exit(struct gsi_channel *channel)
2023 {
2024 	u32 evt_ring_id = channel->evt_ring_id;
2025 	struct gsi *gsi = channel->gsi;
2026 	struct gsi_evt_ring *evt_ring;
2027 
2028 	evt_ring = &gsi->evt_ring[evt_ring_id];
2029 	gsi_ring_free(gsi, &evt_ring->ring);
2030 	gsi_evt_ring_id_free(gsi, evt_ring_id);
2031 }
2032 
2033 static bool gsi_channel_data_valid(struct gsi *gsi, bool command,
2034 				   const struct ipa_gsi_endpoint_data *data)
2035 {
2036 	const struct gsi_channel_data *channel_data;
2037 	u32 channel_id = data->channel_id;
2038 	struct device *dev = gsi->dev;
2039 
2040 	/* Make sure channel ids are in the range driver supports */
2041 	if (channel_id >= GSI_CHANNEL_COUNT_MAX) {
2042 		dev_err(dev, "bad channel id %u; must be less than %u\n",
2043 			channel_id, GSI_CHANNEL_COUNT_MAX);
2044 		return false;
2045 	}
2046 
2047 	if (data->ee_id != GSI_EE_AP && data->ee_id != GSI_EE_MODEM) {
2048 		dev_err(dev, "bad EE id %u; not AP or modem\n", data->ee_id);
2049 		return false;
2050 	}
2051 
2052 	if (command && !data->toward_ipa) {
2053 		dev_err(dev, "command channel %u is not TX\n", channel_id);
2054 		return false;
2055 	}
2056 
2057 	channel_data = &data->channel;
2058 
2059 	if (!channel_data->tlv_count ||
2060 	    channel_data->tlv_count > GSI_TLV_MAX) {
2061 		dev_err(dev, "channel %u bad tlv_count %u; must be 1..%u\n",
2062 			channel_id, channel_data->tlv_count, GSI_TLV_MAX);
2063 		return false;
2064 	}
2065 
2066 	if (command && IPA_COMMAND_TRANS_TRE_MAX > channel_data->tlv_count) {
2067 		dev_err(dev, "command TRE max too big for channel %u (%u > %u)\n",
2068 			channel_id, IPA_COMMAND_TRANS_TRE_MAX,
2069 			channel_data->tlv_count);
2070 		return false;
2071 	}
2072 
2073 	/* We have to allow at least one maximally-sized transaction to
2074 	 * be outstanding (which would use tlv_count TREs).  Given how
2075 	 * gsi_channel_tre_max() is computed, tre_count has to be almost
2076 	 * twice the TLV FIFO size to satisfy this requirement.
2077 	 */
2078 	if (channel_data->tre_count < 2 * channel_data->tlv_count - 1) {
2079 		dev_err(dev, "channel %u TLV count %u exceeds TRE count %u\n",
2080 			channel_id, channel_data->tlv_count,
2081 			channel_data->tre_count);
2082 		return false;
2083 	}
2084 
2085 	if (!is_power_of_2(channel_data->tre_count)) {
2086 		dev_err(dev, "channel %u bad tre_count %u; not power of 2\n",
2087 			channel_id, channel_data->tre_count);
2088 		return false;
2089 	}
2090 
2091 	if (!is_power_of_2(channel_data->event_count)) {
2092 		dev_err(dev, "channel %u bad event_count %u; not power of 2\n",
2093 			channel_id, channel_data->event_count);
2094 		return false;
2095 	}
2096 
2097 	return true;
2098 }
2099 
2100 /* Init function for a single channel */
2101 static int gsi_channel_init_one(struct gsi *gsi,
2102 				const struct ipa_gsi_endpoint_data *data,
2103 				bool command)
2104 {
2105 	struct gsi_channel *channel;
2106 	u32 tre_count;
2107 	int ret;
2108 
2109 	if (!gsi_channel_data_valid(gsi, command, data))
2110 		return -EINVAL;
2111 
2112 	/* Worst case we need an event for every outstanding TRE */
2113 	if (data->channel.tre_count > data->channel.event_count) {
2114 		tre_count = data->channel.event_count;
2115 		dev_warn(gsi->dev, "channel %u limited to %u TREs\n",
2116 			 data->channel_id, tre_count);
2117 	} else {
2118 		tre_count = data->channel.tre_count;
2119 	}
2120 
2121 	channel = &gsi->channel[data->channel_id];
2122 	memset(channel, 0, sizeof(*channel));
2123 
2124 	channel->gsi = gsi;
2125 	channel->toward_ipa = data->toward_ipa;
2126 	channel->command = command;
2127 	channel->trans_tre_max = data->channel.tlv_count;
2128 	channel->tre_count = tre_count;
2129 	channel->event_count = data->channel.event_count;
2130 
2131 	ret = gsi_channel_evt_ring_init(channel);
2132 	if (ret)
2133 		goto err_clear_gsi;
2134 
2135 	ret = gsi_ring_alloc(gsi, &channel->tre_ring, data->channel.tre_count);
2136 	if (ret) {
2137 		dev_err(gsi->dev, "error %d allocating channel %u ring\n",
2138 			ret, data->channel_id);
2139 		goto err_channel_evt_ring_exit;
2140 	}
2141 
2142 	ret = gsi_channel_trans_init(gsi, data->channel_id);
2143 	if (ret)
2144 		goto err_ring_free;
2145 
2146 	if (command) {
2147 		u32 tre_max = gsi_channel_tre_max(gsi, data->channel_id);
2148 
2149 		ret = ipa_cmd_pool_init(channel, tre_max);
2150 	}
2151 	if (!ret)
2152 		return 0;	/* Success! */
2153 
2154 	gsi_channel_trans_exit(channel);
2155 err_ring_free:
2156 	gsi_ring_free(gsi, &channel->tre_ring);
2157 err_channel_evt_ring_exit:
2158 	gsi_channel_evt_ring_exit(channel);
2159 err_clear_gsi:
2160 	channel->gsi = NULL;	/* Mark it not (fully) initialized */
2161 
2162 	return ret;
2163 }
2164 
2165 /* Inverse of gsi_channel_init_one() */
2166 static void gsi_channel_exit_one(struct gsi_channel *channel)
2167 {
2168 	if (!gsi_channel_initialized(channel))
2169 		return;
2170 
2171 	if (channel->command)
2172 		ipa_cmd_pool_exit(channel);
2173 	gsi_channel_trans_exit(channel);
2174 	gsi_ring_free(channel->gsi, &channel->tre_ring);
2175 	gsi_channel_evt_ring_exit(channel);
2176 }
2177 
2178 /* Init function for channels */
2179 static int gsi_channel_init(struct gsi *gsi, u32 count,
2180 			    const struct ipa_gsi_endpoint_data *data)
2181 {
2182 	bool modem_alloc;
2183 	int ret = 0;
2184 	u32 i;
2185 
2186 	/* IPA v4.2 requires the AP to allocate channels for the modem */
2187 	modem_alloc = gsi->version == IPA_VERSION_4_2;
2188 
2189 	gsi->event_bitmap = gsi_event_bitmap_init(GSI_EVT_RING_COUNT_MAX);
2190 	gsi->ieob_enabled_bitmap = 0;
2191 
2192 	/* The endpoint data array is indexed by endpoint name */
2193 	for (i = 0; i < count; i++) {
2194 		bool command = i == IPA_ENDPOINT_AP_COMMAND_TX;
2195 
2196 		if (ipa_gsi_endpoint_data_empty(&data[i]))
2197 			continue;	/* Skip over empty slots */
2198 
2199 		/* Mark modem channels to be allocated (hardware workaround) */
2200 		if (data[i].ee_id == GSI_EE_MODEM) {
2201 			if (modem_alloc)
2202 				gsi->modem_channel_bitmap |=
2203 						BIT(data[i].channel_id);
2204 			continue;
2205 		}
2206 
2207 		ret = gsi_channel_init_one(gsi, &data[i], command);
2208 		if (ret)
2209 			goto err_unwind;
2210 	}
2211 
2212 	return ret;
2213 
2214 err_unwind:
2215 	while (i--) {
2216 		if (ipa_gsi_endpoint_data_empty(&data[i]))
2217 			continue;
2218 		if (modem_alloc && data[i].ee_id == GSI_EE_MODEM) {
2219 			gsi->modem_channel_bitmap &= ~BIT(data[i].channel_id);
2220 			continue;
2221 		}
2222 		gsi_channel_exit_one(&gsi->channel[data->channel_id]);
2223 	}
2224 
2225 	return ret;
2226 }
2227 
2228 /* Inverse of gsi_channel_init() */
2229 static void gsi_channel_exit(struct gsi *gsi)
2230 {
2231 	u32 channel_id = GSI_CHANNEL_COUNT_MAX - 1;
2232 
2233 	do
2234 		gsi_channel_exit_one(&gsi->channel[channel_id]);
2235 	while (channel_id--);
2236 	gsi->modem_channel_bitmap = 0;
2237 }
2238 
2239 /* Init function for GSI.  GSI hardware does not need to be "ready" */
2240 int gsi_init(struct gsi *gsi, struct platform_device *pdev,
2241 	     enum ipa_version version, u32 count,
2242 	     const struct ipa_gsi_endpoint_data *data)
2243 {
2244 	struct device *dev = &pdev->dev;
2245 	struct resource *res;
2246 	resource_size_t size;
2247 	u32 adjust;
2248 	int ret;
2249 
2250 	gsi_validate_build();
2251 
2252 	gsi->dev = dev;
2253 	gsi->version = version;
2254 
2255 	/* GSI uses NAPI on all channels.  Create a dummy network device
2256 	 * for the channel NAPI contexts to be associated with.
2257 	 */
2258 	init_dummy_netdev(&gsi->dummy_dev);
2259 
2260 	/* Get GSI memory range and map it */
2261 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gsi");
2262 	if (!res) {
2263 		dev_err(dev, "DT error getting \"gsi\" memory property\n");
2264 		return -ENODEV;
2265 	}
2266 
2267 	size = resource_size(res);
2268 	if (res->start > U32_MAX || size > U32_MAX - res->start) {
2269 		dev_err(dev, "DT memory resource \"gsi\" out of range\n");
2270 		return -EINVAL;
2271 	}
2272 
2273 	/* Make sure we can make our pointer adjustment if necessary */
2274 	adjust = gsi->version < IPA_VERSION_4_5 ? 0 : GSI_EE_REG_ADJUST;
2275 	if (res->start < adjust) {
2276 		dev_err(dev, "DT memory resource \"gsi\" too low (< %u)\n",
2277 			adjust);
2278 		return -EINVAL;
2279 	}
2280 
2281 	gsi->virt_raw = ioremap(res->start, size);
2282 	if (!gsi->virt_raw) {
2283 		dev_err(dev, "unable to remap \"gsi\" memory\n");
2284 		return -ENOMEM;
2285 	}
2286 	/* Most registers are accessed using an adjusted register range */
2287 	gsi->virt = gsi->virt_raw - adjust;
2288 
2289 	init_completion(&gsi->completion);
2290 
2291 	ret = gsi_irq_init(gsi, pdev);	/* No matching exit required */
2292 	if (ret)
2293 		goto err_iounmap;
2294 
2295 	ret = gsi_channel_init(gsi, count, data);
2296 	if (ret)
2297 		goto err_iounmap;
2298 
2299 	mutex_init(&gsi->mutex);
2300 
2301 	return 0;
2302 
2303 err_iounmap:
2304 	iounmap(gsi->virt_raw);
2305 
2306 	return ret;
2307 }
2308 
2309 /* Inverse of gsi_init() */
2310 void gsi_exit(struct gsi *gsi)
2311 {
2312 	mutex_destroy(&gsi->mutex);
2313 	gsi_channel_exit(gsi);
2314 	iounmap(gsi->virt_raw);
2315 }
2316 
2317 /* The maximum number of outstanding TREs on a channel.  This limits
2318  * a channel's maximum number of transactions outstanding (worst case
2319  * is one TRE per transaction).
2320  *
2321  * The absolute limit is the number of TREs in the channel's TRE ring,
2322  * and in theory we should be able use all of them.  But in practice,
2323  * doing that led to the hardware reporting exhaustion of event ring
2324  * slots for writing completion information.  So the hardware limit
2325  * would be (tre_count - 1).
2326  *
2327  * We reduce it a bit further though.  Transaction resource pools are
2328  * sized to be a little larger than this maximum, to allow resource
2329  * allocations to always be contiguous.  The number of entries in a
2330  * TRE ring buffer is a power of 2, and the extra resources in a pool
2331  * tends to nearly double the memory allocated for it.  Reducing the
2332  * maximum number of outstanding TREs allows the number of entries in
2333  * a pool to avoid crossing that power-of-2 boundary, and this can
2334  * substantially reduce pool memory requirements.  The number we
2335  * reduce it by matches the number added in gsi_trans_pool_init().
2336  */
2337 u32 gsi_channel_tre_max(struct gsi *gsi, u32 channel_id)
2338 {
2339 	struct gsi_channel *channel = &gsi->channel[channel_id];
2340 
2341 	/* Hardware limit is channel->tre_count - 1 */
2342 	return channel->tre_count - (channel->trans_tre_max - 1);
2343 }
2344