xref: /openbmc/linux/drivers/net/ipa/ipa_endpoint.c (revision 8b0adbe3e38dbe5aae9edf6f5159ffdca7cfbdf1)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
4  * Copyright (C) 2019-2021 Linaro Ltd.
5  */
6 
7 #include <linux/types.h>
8 #include <linux/device.h>
9 #include <linux/slab.h>
10 #include <linux/bitfield.h>
11 #include <linux/if_rmnet.h>
12 #include <linux/dma-direction.h>
13 
14 #include "gsi.h"
15 #include "gsi_trans.h"
16 #include "ipa.h"
17 #include "ipa_data.h"
18 #include "ipa_endpoint.h"
19 #include "ipa_cmd.h"
20 #include "ipa_mem.h"
21 #include "ipa_modem.h"
22 #include "ipa_table.h"
23 #include "ipa_gsi.h"
24 #include "ipa_clock.h"
25 
26 #define atomic_dec_not_zero(v)	atomic_add_unless((v), -1, 0)
27 
28 #define IPA_REPLENISH_BATCH	16
29 
30 /* RX buffer is 1 page (or a power-of-2 contiguous pages) */
31 #define IPA_RX_BUFFER_SIZE	8192	/* PAGE_SIZE > 4096 wastes a LOT */
32 
33 /* The amount of RX buffer space consumed by standard skb overhead */
34 #define IPA_RX_BUFFER_OVERHEAD	(PAGE_SIZE - SKB_MAX_ORDER(NET_SKB_PAD, 0))
35 
36 /* Where to find the QMAP mux_id for a packet within modem-supplied metadata */
37 #define IPA_ENDPOINT_QMAP_METADATA_MASK		0x000000ff /* host byte order */
38 
39 #define IPA_ENDPOINT_RESET_AGGR_RETRY_MAX	3
40 #define IPA_AGGR_TIME_LIMIT			500	/* microseconds */
41 
42 /** enum ipa_status_opcode - status element opcode hardware values */
43 enum ipa_status_opcode {
44 	IPA_STATUS_OPCODE_PACKET		= 0x01,
45 	IPA_STATUS_OPCODE_DROPPED_PACKET	= 0x04,
46 	IPA_STATUS_OPCODE_SUSPENDED_PACKET	= 0x08,
47 	IPA_STATUS_OPCODE_PACKET_2ND_PASS	= 0x40,
48 };
49 
50 /** enum ipa_status_exception - status element exception type */
51 enum ipa_status_exception {
52 	/* 0 means no exception */
53 	IPA_STATUS_EXCEPTION_DEAGGR		= 0x01,
54 };
55 
56 /* Status element provided by hardware */
57 struct ipa_status {
58 	u8 opcode;		/* enum ipa_status_opcode */
59 	u8 exception;		/* enum ipa_status_exception */
60 	__le16 mask;
61 	__le16 pkt_len;
62 	u8 endp_src_idx;
63 	u8 endp_dst_idx;
64 	__le32 metadata;
65 	__le32 flags1;
66 	__le64 flags2;
67 	__le32 flags3;
68 	__le32 flags4;
69 };
70 
71 /* Field masks for struct ipa_status structure fields */
72 #define IPA_STATUS_MASK_TAG_VALID_FMASK		GENMASK(4, 4)
73 #define IPA_STATUS_SRC_IDX_FMASK		GENMASK(4, 0)
74 #define IPA_STATUS_DST_IDX_FMASK		GENMASK(4, 0)
75 #define IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK	GENMASK(31, 22)
76 #define IPA_STATUS_FLAGS2_TAG_FMASK		GENMASK_ULL(63, 16)
77 
78 #ifdef IPA_VALIDATE
79 
80 static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
81 			    const struct ipa_gsi_endpoint_data *all_data,
82 			    const struct ipa_gsi_endpoint_data *data)
83 {
84 	const struct ipa_gsi_endpoint_data *other_data;
85 	struct device *dev = &ipa->pdev->dev;
86 	enum ipa_endpoint_name other_name;
87 
88 	if (ipa_gsi_endpoint_data_empty(data))
89 		return true;
90 
91 	if (!data->toward_ipa) {
92 		if (data->endpoint.filter_support) {
93 			dev_err(dev, "filtering not supported for "
94 					"RX endpoint %u\n",
95 				data->endpoint_id);
96 			return false;
97 		}
98 
99 		return true;	/* Nothing more to check for RX */
100 	}
101 
102 	if (data->endpoint.config.status_enable) {
103 		other_name = data->endpoint.config.tx.status_endpoint;
104 		if (other_name >= count) {
105 			dev_err(dev, "status endpoint name %u out of range "
106 					"for endpoint %u\n",
107 				other_name, data->endpoint_id);
108 			return false;
109 		}
110 
111 		/* Status endpoint must be defined... */
112 		other_data = &all_data[other_name];
113 		if (ipa_gsi_endpoint_data_empty(other_data)) {
114 			dev_err(dev, "DMA endpoint name %u undefined "
115 					"for endpoint %u\n",
116 				other_name, data->endpoint_id);
117 			return false;
118 		}
119 
120 		/* ...and has to be an RX endpoint... */
121 		if (other_data->toward_ipa) {
122 			dev_err(dev,
123 				"status endpoint for endpoint %u not RX\n",
124 				data->endpoint_id);
125 			return false;
126 		}
127 
128 		/* ...and if it's to be an AP endpoint... */
129 		if (other_data->ee_id == GSI_EE_AP) {
130 			/* ...make sure it has status enabled. */
131 			if (!other_data->endpoint.config.status_enable) {
132 				dev_err(dev,
133 					"status not enabled for endpoint %u\n",
134 					other_data->endpoint_id);
135 				return false;
136 			}
137 		}
138 	}
139 
140 	if (data->endpoint.config.dma_mode) {
141 		other_name = data->endpoint.config.dma_endpoint;
142 		if (other_name >= count) {
143 			dev_err(dev, "DMA endpoint name %u out of range "
144 					"for endpoint %u\n",
145 				other_name, data->endpoint_id);
146 			return false;
147 		}
148 
149 		other_data = &all_data[other_name];
150 		if (ipa_gsi_endpoint_data_empty(other_data)) {
151 			dev_err(dev, "DMA endpoint name %u undefined "
152 					"for endpoint %u\n",
153 				other_name, data->endpoint_id);
154 			return false;
155 		}
156 	}
157 
158 	return true;
159 }
160 
161 static u32 aggr_byte_limit_max(enum ipa_version version)
162 {
163 	if (version < IPA_VERSION_4_5)
164 		return field_max(aggr_byte_limit_fmask(true));
165 
166 	return field_max(aggr_byte_limit_fmask(false));
167 }
168 
169 static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count,
170 				    const struct ipa_gsi_endpoint_data *data)
171 {
172 	const struct ipa_gsi_endpoint_data *dp = data;
173 	struct device *dev = &ipa->pdev->dev;
174 	enum ipa_endpoint_name name;
175 	u32 limit;
176 
177 	if (count > IPA_ENDPOINT_COUNT) {
178 		dev_err(dev, "too many endpoints specified (%u > %u)\n",
179 			count, IPA_ENDPOINT_COUNT);
180 		return false;
181 	}
182 
183 	/* The aggregation byte limit defines the point at which an
184 	 * aggregation window will close.  It is programmed into the
185 	 * IPA hardware as a number of KB.  We don't use "hard byte
186 	 * limit" aggregation, which means that we need to supply
187 	 * enough space in a receive buffer to hold a complete MTU
188 	 * plus normal skb overhead *after* that aggregation byte
189 	 * limit has been crossed.
190 	 *
191 	 * This check ensures we don't define a receive buffer size
192 	 * that would exceed what we can represent in the field that
193 	 * is used to program its size.
194 	 */
195 	limit = aggr_byte_limit_max(ipa->version) * SZ_1K;
196 	limit += IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
197 	if (limit < IPA_RX_BUFFER_SIZE) {
198 		dev_err(dev, "buffer size too big for aggregation (%u > %u)\n",
199 			IPA_RX_BUFFER_SIZE, limit);
200 		return false;
201 	}
202 
203 	/* Make sure needed endpoints have defined data */
204 	if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_COMMAND_TX])) {
205 		dev_err(dev, "command TX endpoint not defined\n");
206 		return false;
207 	}
208 	if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_LAN_RX])) {
209 		dev_err(dev, "LAN RX endpoint not defined\n");
210 		return false;
211 	}
212 	if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_TX])) {
213 		dev_err(dev, "AP->modem TX endpoint not defined\n");
214 		return false;
215 	}
216 	if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_RX])) {
217 		dev_err(dev, "AP<-modem RX endpoint not defined\n");
218 		return false;
219 	}
220 
221 	for (name = 0; name < count; name++, dp++)
222 		if (!ipa_endpoint_data_valid_one(ipa, count, data, dp))
223 			return false;
224 
225 	return true;
226 }
227 
228 #else /* !IPA_VALIDATE */
229 
230 static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count,
231 				    const struct ipa_gsi_endpoint_data *data)
232 {
233 	return true;
234 }
235 
236 #endif /* !IPA_VALIDATE */
237 
238 /* Allocate a transaction to use on a non-command endpoint */
239 static struct gsi_trans *ipa_endpoint_trans_alloc(struct ipa_endpoint *endpoint,
240 						  u32 tre_count)
241 {
242 	struct gsi *gsi = &endpoint->ipa->gsi;
243 	u32 channel_id = endpoint->channel_id;
244 	enum dma_data_direction direction;
245 
246 	direction = endpoint->toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
247 
248 	return gsi_channel_trans_alloc(gsi, channel_id, tre_count, direction);
249 }
250 
251 /* suspend_delay represents suspend for RX, delay for TX endpoints.
252  * Note that suspend is not supported starting with IPA v4.0.
253  */
254 static bool
255 ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay)
256 {
257 	u32 offset = IPA_REG_ENDP_INIT_CTRL_N_OFFSET(endpoint->endpoint_id);
258 	struct ipa *ipa = endpoint->ipa;
259 	bool state;
260 	u32 mask;
261 	u32 val;
262 
263 	/* Suspend is not supported for IPA v4.0+.  Delay doesn't work
264 	 * correctly on IPA v4.2.
265 	 *
266 	 * if (endpoint->toward_ipa)
267 	 * 	assert(ipa->version != IPA_VERSION_4.2);
268 	 * else
269 	 *	assert(ipa->version < IPA_VERSION_4_0);
270 	 */
271 	mask = endpoint->toward_ipa ? ENDP_DELAY_FMASK : ENDP_SUSPEND_FMASK;
272 
273 	val = ioread32(ipa->reg_virt + offset);
274 	/* Don't bother if it's already in the requested state */
275 	state = !!(val & mask);
276 	if (suspend_delay != state) {
277 		val ^= mask;
278 		iowrite32(val, ipa->reg_virt + offset);
279 	}
280 
281 	return state;
282 }
283 
284 /* We currently don't care what the previous state was for delay mode */
285 static void
286 ipa_endpoint_program_delay(struct ipa_endpoint *endpoint, bool enable)
287 {
288 	/* assert(endpoint->toward_ipa); */
289 
290 	/* Delay mode doesn't work properly for IPA v4.2 */
291 	if (endpoint->ipa->version != IPA_VERSION_4_2)
292 		(void)ipa_endpoint_init_ctrl(endpoint, enable);
293 }
294 
295 static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint)
296 {
297 	u32 mask = BIT(endpoint->endpoint_id);
298 	struct ipa *ipa = endpoint->ipa;
299 	u32 offset;
300 	u32 val;
301 
302 	/* assert(mask & ipa->available); */
303 	offset = ipa_reg_state_aggr_active_offset(ipa->version);
304 	val = ioread32(ipa->reg_virt + offset);
305 
306 	return !!(val & mask);
307 }
308 
309 static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint)
310 {
311 	u32 mask = BIT(endpoint->endpoint_id);
312 	struct ipa *ipa = endpoint->ipa;
313 
314 	/* assert(mask & ipa->available); */
315 	iowrite32(mask, ipa->reg_virt + IPA_REG_AGGR_FORCE_CLOSE_OFFSET);
316 }
317 
318 /**
319  * ipa_endpoint_suspend_aggr() - Emulate suspend interrupt
320  * @endpoint:	Endpoint on which to emulate a suspend
321  *
322  *  Emulate suspend IPA interrupt to unsuspend an endpoint suspended
323  *  with an open aggregation frame.  This is to work around a hardware
324  *  issue in IPA version 3.5.1 where the suspend interrupt will not be
325  *  generated when it should be.
326  */
327 static void ipa_endpoint_suspend_aggr(struct ipa_endpoint *endpoint)
328 {
329 	struct ipa *ipa = endpoint->ipa;
330 
331 	if (!endpoint->data->aggregation)
332 		return;
333 
334 	/* Nothing to do if the endpoint doesn't have aggregation open */
335 	if (!ipa_endpoint_aggr_active(endpoint))
336 		return;
337 
338 	/* Force close aggregation */
339 	ipa_endpoint_force_close(endpoint);
340 
341 	ipa_interrupt_simulate_suspend(ipa->interrupt);
342 }
343 
344 /* Returns previous suspend state (true means suspend was enabled) */
345 static bool
346 ipa_endpoint_program_suspend(struct ipa_endpoint *endpoint, bool enable)
347 {
348 	bool suspended;
349 
350 	if (endpoint->ipa->version >= IPA_VERSION_4_0)
351 		return enable;	/* For IPA v4.0+, no change made */
352 
353 	/* assert(!endpoint->toward_ipa); */
354 
355 	suspended = ipa_endpoint_init_ctrl(endpoint, enable);
356 
357 	/* A client suspended with an open aggregation frame will not
358 	 * generate a SUSPEND IPA interrupt.  If enabling suspend, have
359 	 * ipa_endpoint_suspend_aggr() handle this.
360 	 */
361 	if (enable && !suspended)
362 		ipa_endpoint_suspend_aggr(endpoint);
363 
364 	return suspended;
365 }
366 
367 /* Enable or disable delay or suspend mode on all modem endpoints */
368 void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable)
369 {
370 	u32 endpoint_id;
371 
372 	/* DELAY mode doesn't work correctly on IPA v4.2 */
373 	if (ipa->version == IPA_VERSION_4_2)
374 		return;
375 
376 	for (endpoint_id = 0; endpoint_id < IPA_ENDPOINT_MAX; endpoint_id++) {
377 		struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id];
378 
379 		if (endpoint->ee_id != GSI_EE_MODEM)
380 			continue;
381 
382 		/* Set TX delay mode or RX suspend mode */
383 		if (endpoint->toward_ipa)
384 			ipa_endpoint_program_delay(endpoint, enable);
385 		else
386 			(void)ipa_endpoint_program_suspend(endpoint, enable);
387 	}
388 }
389 
390 /* Reset all modem endpoints to use the default exception endpoint */
391 int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
392 {
393 	u32 initialized = ipa->initialized;
394 	struct gsi_trans *trans;
395 	u32 count;
396 
397 	/* We need one command per modem TX endpoint.  We can get an upper
398 	 * bound on that by assuming all initialized endpoints are modem->IPA.
399 	 * That won't happen, and we could be more precise, but this is fine
400 	 * for now.  We need to end the transaction with a "tag process."
401 	 */
402 	count = hweight32(initialized) + ipa_cmd_pipeline_clear_count();
403 	trans = ipa_cmd_trans_alloc(ipa, count);
404 	if (!trans) {
405 		dev_err(&ipa->pdev->dev,
406 			"no transaction to reset modem exception endpoints\n");
407 		return -EBUSY;
408 	}
409 
410 	while (initialized) {
411 		u32 endpoint_id = __ffs(initialized);
412 		struct ipa_endpoint *endpoint;
413 		u32 offset;
414 
415 		initialized ^= BIT(endpoint_id);
416 
417 		/* We only reset modem TX endpoints */
418 		endpoint = &ipa->endpoint[endpoint_id];
419 		if (!(endpoint->ee_id == GSI_EE_MODEM && endpoint->toward_ipa))
420 			continue;
421 
422 		offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id);
423 
424 		/* Value written is 0, and all bits are updated.  That
425 		 * means status is disabled on the endpoint, and as a
426 		 * result all other fields in the register are ignored.
427 		 */
428 		ipa_cmd_register_write_add(trans, offset, 0, ~0, false);
429 	}
430 
431 	ipa_cmd_pipeline_clear_add(trans);
432 
433 	/* XXX This should have a 1 second timeout */
434 	gsi_trans_commit_wait(trans);
435 
436 	ipa_cmd_pipeline_clear_wait(ipa);
437 
438 	return 0;
439 }
440 
441 static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint)
442 {
443 	u32 offset = IPA_REG_ENDP_INIT_CFG_N_OFFSET(endpoint->endpoint_id);
444 	u32 val = 0;
445 
446 	/* FRAG_OFFLOAD_EN is 0 */
447 	if (endpoint->data->checksum) {
448 		if (endpoint->toward_ipa) {
449 			u32 checksum_offset;
450 
451 			val |= u32_encode_bits(IPA_CS_OFFLOAD_UL,
452 					       CS_OFFLOAD_EN_FMASK);
453 			/* Checksum header offset is in 4-byte units */
454 			checksum_offset = sizeof(struct rmnet_map_header);
455 			checksum_offset /= sizeof(u32);
456 			val |= u32_encode_bits(checksum_offset,
457 					       CS_METADATA_HDR_OFFSET_FMASK);
458 		} else {
459 			val |= u32_encode_bits(IPA_CS_OFFLOAD_DL,
460 					       CS_OFFLOAD_EN_FMASK);
461 		}
462 	} else {
463 		val |= u32_encode_bits(IPA_CS_OFFLOAD_NONE,
464 				       CS_OFFLOAD_EN_FMASK);
465 	}
466 	/* CS_GEN_QMB_MASTER_SEL is 0 */
467 
468 	iowrite32(val, endpoint->ipa->reg_virt + offset);
469 }
470 
471 static void ipa_endpoint_init_nat(struct ipa_endpoint *endpoint)
472 {
473 	u32 offset;
474 	u32 val;
475 
476 	if (!endpoint->toward_ipa)
477 		return;
478 
479 	offset = IPA_REG_ENDP_INIT_NAT_N_OFFSET(endpoint->endpoint_id);
480 	val = u32_encode_bits(IPA_NAT_BYPASS, NAT_EN_FMASK);
481 
482 	iowrite32(val, endpoint->ipa->reg_virt + offset);
483 }
484 
485 /**
486  * ipa_endpoint_init_hdr() - Initialize HDR endpoint configuration register
487  * @endpoint:	Endpoint pointer
488  *
489  * We program QMAP endpoints so each packet received is preceded by a QMAP
490  * header structure.  The QMAP header contains a 1-byte mux_id and 2-byte
491  * packet size field, and we have the IPA hardware populate both for each
492  * received packet.  The header is configured (in the HDR_EXT register)
493  * to use big endian format.
494  *
495  * The packet size is written into the QMAP header's pkt_len field.  That
496  * location is defined here using the HDR_OFST_PKT_SIZE field.
497  *
498  * The mux_id comes from a 4-byte metadata value supplied with each packet
499  * by the modem.  It is *not* a QMAP header, but it does contain the mux_id
500  * value that we want, in its low-order byte.  A bitmask defined in the
501  * endpoint's METADATA_MASK register defines which byte within the modem
502  * metadata contains the mux_id.  And the OFST_METADATA field programmed
503  * here indicates where the extracted byte should be placed within the QMAP
504  * header.
505  */
506 static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint)
507 {
508 	u32 offset = IPA_REG_ENDP_INIT_HDR_N_OFFSET(endpoint->endpoint_id);
509 	struct ipa *ipa = endpoint->ipa;
510 	u32 val = 0;
511 
512 	if (endpoint->data->qmap) {
513 		size_t header_size = sizeof(struct rmnet_map_header);
514 		enum ipa_version version = ipa->version;
515 
516 		/* We might supply a checksum header after the QMAP header */
517 		if (endpoint->toward_ipa && endpoint->data->checksum)
518 			header_size += sizeof(struct rmnet_map_ul_csum_header);
519 		val |= ipa_header_size_encoded(version, header_size);
520 
521 		/* Define how to fill fields in a received QMAP header */
522 		if (!endpoint->toward_ipa) {
523 			u32 offset;	/* Field offset within header */
524 
525 			/* Where IPA will write the metadata value */
526 			offset = offsetof(struct rmnet_map_header, mux_id);
527 			val |= ipa_metadata_offset_encoded(version, offset);
528 
529 			/* Where IPA will write the length */
530 			offset = offsetof(struct rmnet_map_header, pkt_len);
531 			/* Upper bits are stored in HDR_EXT with IPA v4.5 */
532 			if (version >= IPA_VERSION_4_5)
533 				offset &= field_mask(HDR_OFST_PKT_SIZE_FMASK);
534 
535 			val |= HDR_OFST_PKT_SIZE_VALID_FMASK;
536 			val |= u32_encode_bits(offset, HDR_OFST_PKT_SIZE_FMASK);
537 		}
538 		/* For QMAP TX, metadata offset is 0 (modem assumes this) */
539 		val |= HDR_OFST_METADATA_VALID_FMASK;
540 
541 		/* HDR_ADDITIONAL_CONST_LEN is 0; (RX only) */
542 		/* HDR_A5_MUX is 0 */
543 		/* HDR_LEN_INC_DEAGG_HDR is 0 */
544 		/* HDR_METADATA_REG_VALID is 0 (TX only, version < v4.5) */
545 	}
546 
547 	iowrite32(val, ipa->reg_virt + offset);
548 }
549 
550 static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
551 {
552 	u32 offset = IPA_REG_ENDP_INIT_HDR_EXT_N_OFFSET(endpoint->endpoint_id);
553 	u32 pad_align = endpoint->data->rx.pad_align;
554 	struct ipa *ipa = endpoint->ipa;
555 	u32 val = 0;
556 
557 	val |= HDR_ENDIANNESS_FMASK;		/* big endian */
558 
559 	/* A QMAP header contains a 6 bit pad field at offset 0.  The RMNet
560 	 * driver assumes this field is meaningful in packets it receives,
561 	 * and assumes the header's payload length includes that padding.
562 	 * The RMNet driver does *not* pad packets it sends, however, so
563 	 * the pad field (although 0) should be ignored.
564 	 */
565 	if (endpoint->data->qmap && !endpoint->toward_ipa) {
566 		val |= HDR_TOTAL_LEN_OR_PAD_VALID_FMASK;
567 		/* HDR_TOTAL_LEN_OR_PAD is 0 (pad, not total_len) */
568 		val |= HDR_PAYLOAD_LEN_INC_PADDING_FMASK;
569 		/* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0 */
570 	}
571 
572 	/* HDR_PAYLOAD_LEN_INC_PADDING is 0 */
573 	if (!endpoint->toward_ipa)
574 		val |= u32_encode_bits(pad_align, HDR_PAD_TO_ALIGNMENT_FMASK);
575 
576 	/* IPA v4.5 adds some most-significant bits to a few fields,
577 	 * two of which are defined in the HDR (not HDR_EXT) register.
578 	 */
579 	if (ipa->version >= IPA_VERSION_4_5) {
580 		/* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0, so MSB is 0 */
581 		if (endpoint->data->qmap && !endpoint->toward_ipa) {
582 			u32 offset;
583 
584 			offset = offsetof(struct rmnet_map_header, pkt_len);
585 			offset >>= hweight32(HDR_OFST_PKT_SIZE_FMASK);
586 			val |= u32_encode_bits(offset,
587 					       HDR_OFST_PKT_SIZE_MSB_FMASK);
588 			/* HDR_ADDITIONAL_CONST_LEN is 0 so MSB is 0 */
589 		}
590 	}
591 	iowrite32(val, ipa->reg_virt + offset);
592 }
593 
594 static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint)
595 {
596 	u32 endpoint_id = endpoint->endpoint_id;
597 	u32 val = 0;
598 	u32 offset;
599 
600 	if (endpoint->toward_ipa)
601 		return;		/* Register not valid for TX endpoints */
602 
603 	offset = IPA_REG_ENDP_INIT_HDR_METADATA_MASK_N_OFFSET(endpoint_id);
604 
605 	/* Note that HDR_ENDIANNESS indicates big endian header fields */
606 	if (endpoint->data->qmap)
607 		val = (__force u32)cpu_to_be32(IPA_ENDPOINT_QMAP_METADATA_MASK);
608 
609 	iowrite32(val, endpoint->ipa->reg_virt + offset);
610 }
611 
612 static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint)
613 {
614 	u32 offset = IPA_REG_ENDP_INIT_MODE_N_OFFSET(endpoint->endpoint_id);
615 	u32 val;
616 
617 	if (!endpoint->toward_ipa)
618 		return;		/* Register not valid for RX endpoints */
619 
620 	if (endpoint->data->dma_mode) {
621 		enum ipa_endpoint_name name = endpoint->data->dma_endpoint;
622 		u32 dma_endpoint_id;
623 
624 		dma_endpoint_id = endpoint->ipa->name_map[name]->endpoint_id;
625 
626 		val = u32_encode_bits(IPA_DMA, MODE_FMASK);
627 		val |= u32_encode_bits(dma_endpoint_id, DEST_PIPE_INDEX_FMASK);
628 	} else {
629 		val = u32_encode_bits(IPA_BASIC, MODE_FMASK);
630 	}
631 	/* All other bits unspecified (and 0) */
632 
633 	iowrite32(val, endpoint->ipa->reg_virt + offset);
634 }
635 
636 /* Compute the aggregation size value to use for a given buffer size */
637 static u32 ipa_aggr_size_kb(u32 rx_buffer_size)
638 {
639 	/* We don't use "hard byte limit" aggregation, so we define the
640 	 * aggregation limit such that our buffer has enough space *after*
641 	 * that limit to receive a full MTU of data, plus overhead.
642 	 */
643 	rx_buffer_size -= IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
644 
645 	return rx_buffer_size / SZ_1K;
646 }
647 
648 /* Encoded values for AGGR endpoint register fields */
649 static u32 aggr_byte_limit_encoded(enum ipa_version version, u32 limit)
650 {
651 	if (version < IPA_VERSION_4_5)
652 		return u32_encode_bits(limit, aggr_byte_limit_fmask(true));
653 
654 	return u32_encode_bits(limit, aggr_byte_limit_fmask(false));
655 }
656 
657 /* Encode the aggregation timer limit (microseconds) based on IPA version */
658 static u32 aggr_time_limit_encoded(enum ipa_version version, u32 limit)
659 {
660 	u32 gran_sel;
661 	u32 fmask;
662 	u32 val;
663 
664 	if (version < IPA_VERSION_4_5) {
665 		/* We set aggregation granularity in ipa_hardware_config() */
666 		limit = DIV_ROUND_CLOSEST(limit, IPA_AGGR_GRANULARITY);
667 
668 		return u32_encode_bits(limit, aggr_time_limit_fmask(true));
669 	}
670 
671 	/* IPA v4.5 expresses the time limit using Qtime.  The AP has
672 	 * pulse generators 0 and 1 available, which were configured
673 	 * in ipa_qtime_config() to have granularity 100 usec and
674 	 * 1 msec, respectively.  Use pulse generator 0 if possible,
675 	 * otherwise fall back to pulse generator 1.
676 	 */
677 	fmask = aggr_time_limit_fmask(false);
678 	val = DIV_ROUND_CLOSEST(limit, 100);
679 	if (val > field_max(fmask)) {
680 		/* Have to use pulse generator 1 (millisecond granularity) */
681 		gran_sel = AGGR_GRAN_SEL_FMASK;
682 		val = DIV_ROUND_CLOSEST(limit, 1000);
683 	} else {
684 		/* We can use pulse generator 0 (100 usec granularity) */
685 		gran_sel = 0;
686 	}
687 
688 	return gran_sel | u32_encode_bits(val, fmask);
689 }
690 
691 static u32 aggr_sw_eof_active_encoded(enum ipa_version version, bool enabled)
692 {
693 	u32 val = enabled ? 1 : 0;
694 
695 	if (version < IPA_VERSION_4_5)
696 		return u32_encode_bits(val, aggr_sw_eof_active_fmask(true));
697 
698 	return u32_encode_bits(val, aggr_sw_eof_active_fmask(false));
699 }
700 
701 static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
702 {
703 	u32 offset = IPA_REG_ENDP_INIT_AGGR_N_OFFSET(endpoint->endpoint_id);
704 	enum ipa_version version = endpoint->ipa->version;
705 	u32 val = 0;
706 
707 	if (endpoint->data->aggregation) {
708 		if (!endpoint->toward_ipa) {
709 			bool close_eof;
710 			u32 limit;
711 
712 			val |= u32_encode_bits(IPA_ENABLE_AGGR, AGGR_EN_FMASK);
713 			val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK);
714 
715 			limit = ipa_aggr_size_kb(IPA_RX_BUFFER_SIZE);
716 			val |= aggr_byte_limit_encoded(version, limit);
717 
718 			limit = IPA_AGGR_TIME_LIMIT;
719 			val |= aggr_time_limit_encoded(version, limit);
720 
721 			/* AGGR_PKT_LIMIT is 0 (unlimited) */
722 
723 			close_eof = endpoint->data->rx.aggr_close_eof;
724 			val |= aggr_sw_eof_active_encoded(version, close_eof);
725 
726 			/* AGGR_HARD_BYTE_LIMIT_ENABLE is 0 */
727 		} else {
728 			val |= u32_encode_bits(IPA_ENABLE_DEAGGR,
729 					       AGGR_EN_FMASK);
730 			val |= u32_encode_bits(IPA_QCMAP, AGGR_TYPE_FMASK);
731 			/* other fields ignored */
732 		}
733 		/* AGGR_FORCE_CLOSE is 0 */
734 		/* AGGR_GRAN_SEL is 0 for IPA v4.5 */
735 	} else {
736 		val |= u32_encode_bits(IPA_BYPASS_AGGR, AGGR_EN_FMASK);
737 		/* other fields ignored */
738 	}
739 
740 	iowrite32(val, endpoint->ipa->reg_virt + offset);
741 }
742 
743 /* Return the Qtime-based head-of-line blocking timer value that
744  * represents the given number of microseconds.  The result
745  * includes both the timer value and the selected timer granularity.
746  */
747 static u32 hol_block_timer_qtime_val(struct ipa *ipa, u32 microseconds)
748 {
749 	u32 gran_sel;
750 	u32 val;
751 
752 	/* IPA v4.5 expresses time limits using Qtime.  The AP has
753 	 * pulse generators 0 and 1 available, which were configured
754 	 * in ipa_qtime_config() to have granularity 100 usec and
755 	 * 1 msec, respectively.  Use pulse generator 0 if possible,
756 	 * otherwise fall back to pulse generator 1.
757 	 */
758 	val = DIV_ROUND_CLOSEST(microseconds, 100);
759 	if (val > field_max(TIME_LIMIT_FMASK)) {
760 		/* Have to use pulse generator 1 (millisecond granularity) */
761 		gran_sel = GRAN_SEL_FMASK;
762 		val = DIV_ROUND_CLOSEST(microseconds, 1000);
763 	} else {
764 		/* We can use pulse generator 0 (100 usec granularity) */
765 		gran_sel = 0;
766 	}
767 
768 	return gran_sel | u32_encode_bits(val, TIME_LIMIT_FMASK);
769 }
770 
771 /* The head-of-line blocking timer is defined as a tick count.  For
772  * IPA version 4.5 the tick count is based on the Qtimer, which is
773  * derived from the 19.2 MHz SoC XO clock.  For older IPA versions
774  * each tick represents 128 cycles of the IPA core clock.
775  *
776  * Return the encoded value that should be written to that register
777  * that represents the timeout period provided.  For IPA v4.2 this
778  * encodes a base and scale value, while for earlier versions the
779  * value is a simple tick count.
780  */
781 static u32 hol_block_timer_val(struct ipa *ipa, u32 microseconds)
782 {
783 	u32 width;
784 	u32 scale;
785 	u64 ticks;
786 	u64 rate;
787 	u32 high;
788 	u32 val;
789 
790 	if (!microseconds)
791 		return 0;	/* Nothing to compute if timer period is 0 */
792 
793 	if (ipa->version >= IPA_VERSION_4_5)
794 		return hol_block_timer_qtime_val(ipa, microseconds);
795 
796 	/* Use 64 bit arithmetic to avoid overflow... */
797 	rate = ipa_clock_rate(ipa);
798 	ticks = DIV_ROUND_CLOSEST(microseconds * rate, 128 * USEC_PER_SEC);
799 	/* ...but we still need to fit into a 32-bit register */
800 	WARN_ON(ticks > U32_MAX);
801 
802 	/* IPA v3.5.1 through v4.1 just record the tick count */
803 	if (ipa->version < IPA_VERSION_4_2)
804 		return (u32)ticks;
805 
806 	/* For IPA v4.2, the tick count is represented by base and
807 	 * scale fields within the 32-bit timer register, where:
808 	 *     ticks = base << scale;
809 	 * The best precision is achieved when the base value is as
810 	 * large as possible.  Find the highest set bit in the tick
811 	 * count, and extract the number of bits in the base field
812 	 * such that that high bit is included.
813 	 */
814 	high = fls(ticks);		/* 1..32 */
815 	width = HWEIGHT32(BASE_VALUE_FMASK);
816 	scale = high > width ? high - width : 0;
817 	if (scale) {
818 		/* If we're scaling, round up to get a closer result */
819 		ticks += 1 << (scale - 1);
820 		/* High bit was set, so rounding might have affected it */
821 		if (fls(ticks) != high)
822 			scale++;
823 	}
824 
825 	val = u32_encode_bits(scale, SCALE_FMASK);
826 	val |= u32_encode_bits(ticks >> scale, BASE_VALUE_FMASK);
827 
828 	return val;
829 }
830 
831 /* If microseconds is 0, timeout is immediate */
832 static void ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint,
833 					      u32 microseconds)
834 {
835 	u32 endpoint_id = endpoint->endpoint_id;
836 	struct ipa *ipa = endpoint->ipa;
837 	u32 offset;
838 	u32 val;
839 
840 	offset = IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(endpoint_id);
841 	val = hol_block_timer_val(ipa, microseconds);
842 	iowrite32(val, ipa->reg_virt + offset);
843 }
844 
845 static void
846 ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint, bool enable)
847 {
848 	u32 endpoint_id = endpoint->endpoint_id;
849 	u32 offset;
850 	u32 val;
851 
852 	val = enable ? HOL_BLOCK_EN_FMASK : 0;
853 	offset = IPA_REG_ENDP_INIT_HOL_BLOCK_EN_N_OFFSET(endpoint_id);
854 	iowrite32(val, endpoint->ipa->reg_virt + offset);
855 }
856 
857 void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa)
858 {
859 	u32 i;
860 
861 	for (i = 0; i < IPA_ENDPOINT_MAX; i++) {
862 		struct ipa_endpoint *endpoint = &ipa->endpoint[i];
863 
864 		if (endpoint->toward_ipa || endpoint->ee_id != GSI_EE_MODEM)
865 			continue;
866 
867 		ipa_endpoint_init_hol_block_timer(endpoint, 0);
868 		ipa_endpoint_init_hol_block_enable(endpoint, true);
869 	}
870 }
871 
872 static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint)
873 {
874 	u32 offset = IPA_REG_ENDP_INIT_DEAGGR_N_OFFSET(endpoint->endpoint_id);
875 	u32 val = 0;
876 
877 	if (!endpoint->toward_ipa)
878 		return;		/* Register not valid for RX endpoints */
879 
880 	/* DEAGGR_HDR_LEN is 0 */
881 	/* PACKET_OFFSET_VALID is 0 */
882 	/* PACKET_OFFSET_LOCATION is ignored (not valid) */
883 	/* MAX_PACKET_LEN is 0 (not enforced) */
884 
885 	iowrite32(val, endpoint->ipa->reg_virt + offset);
886 }
887 
888 static void ipa_endpoint_init_rsrc_grp(struct ipa_endpoint *endpoint)
889 {
890 	u32 offset = IPA_REG_ENDP_INIT_RSRC_GRP_N_OFFSET(endpoint->endpoint_id);
891 	struct ipa *ipa = endpoint->ipa;
892 	u32 val;
893 
894 	val = rsrc_grp_encoded(ipa->version, endpoint->data->resource_group);
895 	iowrite32(val, ipa->reg_virt + offset);
896 }
897 
898 static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint)
899 {
900 	u32 offset = IPA_REG_ENDP_INIT_SEQ_N_OFFSET(endpoint->endpoint_id);
901 	u32 val = 0;
902 
903 	if (!endpoint->toward_ipa)
904 		return;		/* Register not valid for RX endpoints */
905 
906 	/* Low-order byte configures primary packet processing */
907 	val |= u32_encode_bits(endpoint->data->tx.seq_type, SEQ_TYPE_FMASK);
908 
909 	/* Second byte configures replicated packet processing */
910 	val |= u32_encode_bits(endpoint->data->tx.seq_rep_type,
911 			       SEQ_REP_TYPE_FMASK);
912 
913 	iowrite32(val, endpoint->ipa->reg_virt + offset);
914 }
915 
916 /**
917  * ipa_endpoint_skb_tx() - Transmit a socket buffer
918  * @endpoint:	Endpoint pointer
919  * @skb:	Socket buffer to send
920  *
921  * Returns:	0 if successful, or a negative error code
922  */
923 int ipa_endpoint_skb_tx(struct ipa_endpoint *endpoint, struct sk_buff *skb)
924 {
925 	struct gsi_trans *trans;
926 	u32 nr_frags;
927 	int ret;
928 
929 	/* Make sure source endpoint's TLV FIFO has enough entries to
930 	 * hold the linear portion of the skb and all its fragments.
931 	 * If not, see if we can linearize it before giving up.
932 	 */
933 	nr_frags = skb_shinfo(skb)->nr_frags;
934 	if (1 + nr_frags > endpoint->trans_tre_max) {
935 		if (skb_linearize(skb))
936 			return -E2BIG;
937 		nr_frags = 0;
938 	}
939 
940 	trans = ipa_endpoint_trans_alloc(endpoint, 1 + nr_frags);
941 	if (!trans)
942 		return -EBUSY;
943 
944 	ret = gsi_trans_skb_add(trans, skb);
945 	if (ret)
946 		goto err_trans_free;
947 	trans->data = skb;	/* transaction owns skb now */
948 
949 	gsi_trans_commit(trans, !netdev_xmit_more());
950 
951 	return 0;
952 
953 err_trans_free:
954 	gsi_trans_free(trans);
955 
956 	return -ENOMEM;
957 }
958 
959 static void ipa_endpoint_status(struct ipa_endpoint *endpoint)
960 {
961 	u32 endpoint_id = endpoint->endpoint_id;
962 	struct ipa *ipa = endpoint->ipa;
963 	u32 val = 0;
964 	u32 offset;
965 
966 	offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id);
967 
968 	if (endpoint->data->status_enable) {
969 		val |= STATUS_EN_FMASK;
970 		if (endpoint->toward_ipa) {
971 			enum ipa_endpoint_name name;
972 			u32 status_endpoint_id;
973 
974 			name = endpoint->data->tx.status_endpoint;
975 			status_endpoint_id = ipa->name_map[name]->endpoint_id;
976 
977 			val |= u32_encode_bits(status_endpoint_id,
978 					       STATUS_ENDP_FMASK);
979 		}
980 		/* STATUS_LOCATION is 0, meaning status element precedes
981 		 * packet (not present for IPA v4.5)
982 		 */
983 		/* STATUS_PKT_SUPPRESS_FMASK is 0 (not present for v3.5.1) */
984 	}
985 
986 	iowrite32(val, ipa->reg_virt + offset);
987 }
988 
989 static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint)
990 {
991 	struct gsi_trans *trans;
992 	bool doorbell = false;
993 	struct page *page;
994 	u32 offset;
995 	u32 len;
996 	int ret;
997 
998 	page = dev_alloc_pages(get_order(IPA_RX_BUFFER_SIZE));
999 	if (!page)
1000 		return -ENOMEM;
1001 
1002 	trans = ipa_endpoint_trans_alloc(endpoint, 1);
1003 	if (!trans)
1004 		goto err_free_pages;
1005 
1006 	/* Offset the buffer to make space for skb headroom */
1007 	offset = NET_SKB_PAD;
1008 	len = IPA_RX_BUFFER_SIZE - offset;
1009 
1010 	ret = gsi_trans_page_add(trans, page, len, offset);
1011 	if (ret)
1012 		goto err_trans_free;
1013 	trans->data = page;	/* transaction owns page now */
1014 
1015 	if (++endpoint->replenish_ready == IPA_REPLENISH_BATCH) {
1016 		doorbell = true;
1017 		endpoint->replenish_ready = 0;
1018 	}
1019 
1020 	gsi_trans_commit(trans, doorbell);
1021 
1022 	return 0;
1023 
1024 err_trans_free:
1025 	gsi_trans_free(trans);
1026 err_free_pages:
1027 	__free_pages(page, get_order(IPA_RX_BUFFER_SIZE));
1028 
1029 	return -ENOMEM;
1030 }
1031 
1032 /**
1033  * ipa_endpoint_replenish() - Replenish endpoint receive buffers
1034  * @endpoint:	Endpoint to be replenished
1035  * @add_one:	Whether this is replacing a just-consumed buffer
1036  *
1037  * The IPA hardware can hold a fixed number of receive buffers for an RX
1038  * endpoint, based on the number of entries in the underlying channel ring
1039  * buffer.  If an endpoint's "backlog" is non-zero, it indicates how many
1040  * more receive buffers can be supplied to the hardware.  Replenishing for
1041  * an endpoint can be disabled, in which case requests to replenish a
1042  * buffer are "saved", and transferred to the backlog once it is re-enabled
1043  * again.
1044  */
1045 static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint, bool add_one)
1046 {
1047 	struct gsi *gsi;
1048 	u32 backlog;
1049 
1050 	if (!endpoint->replenish_enabled) {
1051 		if (add_one)
1052 			atomic_inc(&endpoint->replenish_saved);
1053 		return;
1054 	}
1055 
1056 	while (atomic_dec_not_zero(&endpoint->replenish_backlog))
1057 		if (ipa_endpoint_replenish_one(endpoint))
1058 			goto try_again_later;
1059 	if (add_one)
1060 		atomic_inc(&endpoint->replenish_backlog);
1061 
1062 	return;
1063 
1064 try_again_later:
1065 	/* The last one didn't succeed, so fix the backlog */
1066 	backlog = atomic_inc_return(&endpoint->replenish_backlog);
1067 
1068 	if (add_one)
1069 		atomic_inc(&endpoint->replenish_backlog);
1070 
1071 	/* Whenever a receive buffer transaction completes we'll try to
1072 	 * replenish again.  It's unlikely, but if we fail to supply even
1073 	 * one buffer, nothing will trigger another replenish attempt.
1074 	 * Receive buffer transactions use one TRE, so schedule work to
1075 	 * try replenishing again if our backlog is *all* available TREs.
1076 	 */
1077 	gsi = &endpoint->ipa->gsi;
1078 	if (backlog == gsi_channel_tre_max(gsi, endpoint->channel_id))
1079 		schedule_delayed_work(&endpoint->replenish_work,
1080 				      msecs_to_jiffies(1));
1081 }
1082 
1083 static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint)
1084 {
1085 	struct gsi *gsi = &endpoint->ipa->gsi;
1086 	u32 max_backlog;
1087 	u32 saved;
1088 
1089 	endpoint->replenish_enabled = true;
1090 	while ((saved = atomic_xchg(&endpoint->replenish_saved, 0)))
1091 		atomic_add(saved, &endpoint->replenish_backlog);
1092 
1093 	/* Start replenishing if hardware currently has no buffers */
1094 	max_backlog = gsi_channel_tre_max(gsi, endpoint->channel_id);
1095 	if (atomic_read(&endpoint->replenish_backlog) == max_backlog)
1096 		ipa_endpoint_replenish(endpoint, false);
1097 }
1098 
1099 static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint)
1100 {
1101 	u32 backlog;
1102 
1103 	endpoint->replenish_enabled = false;
1104 	while ((backlog = atomic_xchg(&endpoint->replenish_backlog, 0)))
1105 		atomic_add(backlog, &endpoint->replenish_saved);
1106 }
1107 
1108 static void ipa_endpoint_replenish_work(struct work_struct *work)
1109 {
1110 	struct delayed_work *dwork = to_delayed_work(work);
1111 	struct ipa_endpoint *endpoint;
1112 
1113 	endpoint = container_of(dwork, struct ipa_endpoint, replenish_work);
1114 
1115 	ipa_endpoint_replenish(endpoint, false);
1116 }
1117 
1118 static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint,
1119 				  void *data, u32 len, u32 extra)
1120 {
1121 	struct sk_buff *skb;
1122 
1123 	skb = __dev_alloc_skb(len, GFP_ATOMIC);
1124 	if (skb) {
1125 		skb_put(skb, len);
1126 		memcpy(skb->data, data, len);
1127 		skb->truesize += extra;
1128 	}
1129 
1130 	/* Now receive it, or drop it if there's no netdev */
1131 	if (endpoint->netdev)
1132 		ipa_modem_skb_rx(endpoint->netdev, skb);
1133 	else if (skb)
1134 		dev_kfree_skb_any(skb);
1135 }
1136 
1137 static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint,
1138 				   struct page *page, u32 len)
1139 {
1140 	struct sk_buff *skb;
1141 
1142 	/* Nothing to do if there's no netdev */
1143 	if (!endpoint->netdev)
1144 		return false;
1145 
1146 	/* assert(len <= SKB_WITH_OVERHEAD(IPA_RX_BUFFER_SIZE-NET_SKB_PAD)); */
1147 	skb = build_skb(page_address(page), IPA_RX_BUFFER_SIZE);
1148 	if (skb) {
1149 		/* Reserve the headroom and account for the data */
1150 		skb_reserve(skb, NET_SKB_PAD);
1151 		skb_put(skb, len);
1152 	}
1153 
1154 	/* Receive the buffer (or record drop if unable to build it) */
1155 	ipa_modem_skb_rx(endpoint->netdev, skb);
1156 
1157 	return skb != NULL;
1158 }
1159 
1160 /* The format of a packet status element is the same for several status
1161  * types (opcodes).  Other types aren't currently supported.
1162  */
1163 static bool ipa_status_format_packet(enum ipa_status_opcode opcode)
1164 {
1165 	switch (opcode) {
1166 	case IPA_STATUS_OPCODE_PACKET:
1167 	case IPA_STATUS_OPCODE_DROPPED_PACKET:
1168 	case IPA_STATUS_OPCODE_SUSPENDED_PACKET:
1169 	case IPA_STATUS_OPCODE_PACKET_2ND_PASS:
1170 		return true;
1171 	default:
1172 		return false;
1173 	}
1174 }
1175 
1176 static bool ipa_endpoint_status_skip(struct ipa_endpoint *endpoint,
1177 				     const struct ipa_status *status)
1178 {
1179 	u32 endpoint_id;
1180 
1181 	if (!ipa_status_format_packet(status->opcode))
1182 		return true;
1183 	if (!status->pkt_len)
1184 		return true;
1185 	endpoint_id = u8_get_bits(status->endp_dst_idx,
1186 				  IPA_STATUS_DST_IDX_FMASK);
1187 	if (endpoint_id != endpoint->endpoint_id)
1188 		return true;
1189 
1190 	return false;	/* Don't skip this packet, process it */
1191 }
1192 
1193 static bool ipa_endpoint_status_tag(struct ipa_endpoint *endpoint,
1194 				    const struct ipa_status *status)
1195 {
1196 	struct ipa_endpoint *command_endpoint;
1197 	struct ipa *ipa = endpoint->ipa;
1198 	u32 endpoint_id;
1199 
1200 	if (!le16_get_bits(status->mask, IPA_STATUS_MASK_TAG_VALID_FMASK))
1201 		return false;	/* No valid tag */
1202 
1203 	/* The status contains a valid tag.  We know the packet was sent to
1204 	 * this endpoint (already verified by ipa_endpoint_status_skip()).
1205 	 * If the packet came from the AP->command TX endpoint we know
1206 	 * this packet was sent as part of the pipeline clear process.
1207 	 */
1208 	endpoint_id = u8_get_bits(status->endp_src_idx,
1209 				  IPA_STATUS_SRC_IDX_FMASK);
1210 	command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
1211 	if (endpoint_id == command_endpoint->endpoint_id) {
1212 		complete(&ipa->completion);
1213 	} else {
1214 		dev_err(&ipa->pdev->dev,
1215 			"unexpected tagged packet from endpoint %u\n",
1216 			endpoint_id);
1217 	}
1218 
1219 	return true;
1220 }
1221 
1222 /* Return whether the status indicates the packet should be dropped */
1223 static bool ipa_endpoint_status_drop(struct ipa_endpoint *endpoint,
1224 				     const struct ipa_status *status)
1225 {
1226 	u32 val;
1227 
1228 	/* If the status indicates a tagged transfer, we'll drop the packet */
1229 	if (ipa_endpoint_status_tag(endpoint, status))
1230 		return true;
1231 
1232 	/* Deaggregation exceptions we drop; all other types we consume */
1233 	if (status->exception)
1234 		return status->exception == IPA_STATUS_EXCEPTION_DEAGGR;
1235 
1236 	/* Drop the packet if it fails to match a routing rule; otherwise no */
1237 	val = le32_get_bits(status->flags1, IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK);
1238 
1239 	return val == field_max(IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK);
1240 }
1241 
1242 static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
1243 				      struct page *page, u32 total_len)
1244 {
1245 	void *data = page_address(page) + NET_SKB_PAD;
1246 	u32 unused = IPA_RX_BUFFER_SIZE - total_len;
1247 	u32 resid = total_len;
1248 
1249 	while (resid) {
1250 		const struct ipa_status *status = data;
1251 		u32 align;
1252 		u32 len;
1253 
1254 		if (resid < sizeof(*status)) {
1255 			dev_err(&endpoint->ipa->pdev->dev,
1256 				"short message (%u bytes < %zu byte status)\n",
1257 				resid, sizeof(*status));
1258 			break;
1259 		}
1260 
1261 		/* Skip over status packets that lack packet data */
1262 		if (ipa_endpoint_status_skip(endpoint, status)) {
1263 			data += sizeof(*status);
1264 			resid -= sizeof(*status);
1265 			continue;
1266 		}
1267 
1268 		/* Compute the amount of buffer space consumed by the packet,
1269 		 * including the status element.  If the hardware is configured
1270 		 * to pad packet data to an aligned boundary, account for that.
1271 		 * And if checksum offload is enabled a trailer containing
1272 		 * computed checksum information will be appended.
1273 		 */
1274 		align = endpoint->data->rx.pad_align ? : 1;
1275 		len = le16_to_cpu(status->pkt_len);
1276 		len = sizeof(*status) + ALIGN(len, align);
1277 		if (endpoint->data->checksum)
1278 			len += sizeof(struct rmnet_map_dl_csum_trailer);
1279 
1280 		if (!ipa_endpoint_status_drop(endpoint, status)) {
1281 			void *data2;
1282 			u32 extra;
1283 			u32 len2;
1284 
1285 			/* Client receives only packet data (no status) */
1286 			data2 = data + sizeof(*status);
1287 			len2 = le16_to_cpu(status->pkt_len);
1288 
1289 			/* Have the true size reflect the extra unused space in
1290 			 * the original receive buffer.  Distribute the "cost"
1291 			 * proportionately across all aggregated packets in the
1292 			 * buffer.
1293 			 */
1294 			extra = DIV_ROUND_CLOSEST(unused * len, total_len);
1295 			ipa_endpoint_skb_copy(endpoint, data2, len2, extra);
1296 		}
1297 
1298 		/* Consume status and the full packet it describes */
1299 		data += len;
1300 		resid -= len;
1301 	}
1302 }
1303 
1304 /* Complete a TX transaction, command or from ipa_endpoint_skb_tx() */
1305 static void ipa_endpoint_tx_complete(struct ipa_endpoint *endpoint,
1306 				     struct gsi_trans *trans)
1307 {
1308 }
1309 
1310 /* Complete transaction initiated in ipa_endpoint_replenish_one() */
1311 static void ipa_endpoint_rx_complete(struct ipa_endpoint *endpoint,
1312 				     struct gsi_trans *trans)
1313 {
1314 	struct page *page;
1315 
1316 	ipa_endpoint_replenish(endpoint, true);
1317 
1318 	if (trans->cancelled)
1319 		return;
1320 
1321 	/* Parse or build a socket buffer using the actual received length */
1322 	page = trans->data;
1323 	if (endpoint->data->status_enable)
1324 		ipa_endpoint_status_parse(endpoint, page, trans->len);
1325 	else if (ipa_endpoint_skb_build(endpoint, page, trans->len))
1326 		trans->data = NULL;	/* Pages have been consumed */
1327 }
1328 
1329 void ipa_endpoint_trans_complete(struct ipa_endpoint *endpoint,
1330 				 struct gsi_trans *trans)
1331 {
1332 	if (endpoint->toward_ipa)
1333 		ipa_endpoint_tx_complete(endpoint, trans);
1334 	else
1335 		ipa_endpoint_rx_complete(endpoint, trans);
1336 }
1337 
1338 void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint,
1339 				struct gsi_trans *trans)
1340 {
1341 	if (endpoint->toward_ipa) {
1342 		struct ipa *ipa = endpoint->ipa;
1343 
1344 		/* Nothing to do for command transactions */
1345 		if (endpoint != ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]) {
1346 			struct sk_buff *skb = trans->data;
1347 
1348 			if (skb)
1349 				dev_kfree_skb_any(skb);
1350 		}
1351 	} else {
1352 		struct page *page = trans->data;
1353 
1354 		if (page)
1355 			__free_pages(page, get_order(IPA_RX_BUFFER_SIZE));
1356 	}
1357 }
1358 
1359 void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id)
1360 {
1361 	u32 val;
1362 
1363 	/* ROUTE_DIS is 0 */
1364 	val = u32_encode_bits(endpoint_id, ROUTE_DEF_PIPE_FMASK);
1365 	val |= ROUTE_DEF_HDR_TABLE_FMASK;
1366 	val |= u32_encode_bits(0, ROUTE_DEF_HDR_OFST_FMASK);
1367 	val |= u32_encode_bits(endpoint_id, ROUTE_FRAG_DEF_PIPE_FMASK);
1368 	val |= ROUTE_DEF_RETAIN_HDR_FMASK;
1369 
1370 	iowrite32(val, ipa->reg_virt + IPA_REG_ROUTE_OFFSET);
1371 }
1372 
1373 void ipa_endpoint_default_route_clear(struct ipa *ipa)
1374 {
1375 	ipa_endpoint_default_route_set(ipa, 0);
1376 }
1377 
1378 /**
1379  * ipa_endpoint_reset_rx_aggr() - Reset RX endpoint with aggregation active
1380  * @endpoint:	Endpoint to be reset
1381  *
1382  * If aggregation is active on an RX endpoint when a reset is performed
1383  * on its underlying GSI channel, a special sequence of actions must be
1384  * taken to ensure the IPA pipeline is properly cleared.
1385  *
1386  * Return:	0 if successful, or a negative error code
1387  */
1388 static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
1389 {
1390 	struct device *dev = &endpoint->ipa->pdev->dev;
1391 	struct ipa *ipa = endpoint->ipa;
1392 	struct gsi *gsi = &ipa->gsi;
1393 	bool suspended = false;
1394 	dma_addr_t addr;
1395 	u32 retries;
1396 	u32 len = 1;
1397 	void *virt;
1398 	int ret;
1399 
1400 	virt = kzalloc(len, GFP_KERNEL);
1401 	if (!virt)
1402 		return -ENOMEM;
1403 
1404 	addr = dma_map_single(dev, virt, len, DMA_FROM_DEVICE);
1405 	if (dma_mapping_error(dev, addr)) {
1406 		ret = -ENOMEM;
1407 		goto out_kfree;
1408 	}
1409 
1410 	/* Force close aggregation before issuing the reset */
1411 	ipa_endpoint_force_close(endpoint);
1412 
1413 	/* Reset and reconfigure the channel with the doorbell engine
1414 	 * disabled.  Then poll until we know aggregation is no longer
1415 	 * active.  We'll re-enable the doorbell (if appropriate) when
1416 	 * we reset again below.
1417 	 */
1418 	gsi_channel_reset(gsi, endpoint->channel_id, false);
1419 
1420 	/* Make sure the channel isn't suspended */
1421 	suspended = ipa_endpoint_program_suspend(endpoint, false);
1422 
1423 	/* Start channel and do a 1 byte read */
1424 	ret = gsi_channel_start(gsi, endpoint->channel_id);
1425 	if (ret)
1426 		goto out_suspend_again;
1427 
1428 	ret = gsi_trans_read_byte(gsi, endpoint->channel_id, addr);
1429 	if (ret)
1430 		goto err_endpoint_stop;
1431 
1432 	/* Wait for aggregation to be closed on the channel */
1433 	retries = IPA_ENDPOINT_RESET_AGGR_RETRY_MAX;
1434 	do {
1435 		if (!ipa_endpoint_aggr_active(endpoint))
1436 			break;
1437 		usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
1438 	} while (retries--);
1439 
1440 	/* Check one last time */
1441 	if (ipa_endpoint_aggr_active(endpoint))
1442 		dev_err(dev, "endpoint %u still active during reset\n",
1443 			endpoint->endpoint_id);
1444 
1445 	gsi_trans_read_byte_done(gsi, endpoint->channel_id);
1446 
1447 	ret = gsi_channel_stop(gsi, endpoint->channel_id);
1448 	if (ret)
1449 		goto out_suspend_again;
1450 
1451 	/* Finally, reset and reconfigure the channel again (re-enabling the
1452 	 * the doorbell engine if appropriate).  Sleep for 1 millisecond to
1453 	 * complete the channel reset sequence.  Finish by suspending the
1454 	 * channel again (if necessary).
1455 	 */
1456 	gsi_channel_reset(gsi, endpoint->channel_id, true);
1457 
1458 	usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
1459 
1460 	goto out_suspend_again;
1461 
1462 err_endpoint_stop:
1463 	(void)gsi_channel_stop(gsi, endpoint->channel_id);
1464 out_suspend_again:
1465 	if (suspended)
1466 		(void)ipa_endpoint_program_suspend(endpoint, true);
1467 	dma_unmap_single(dev, addr, len, DMA_FROM_DEVICE);
1468 out_kfree:
1469 	kfree(virt);
1470 
1471 	return ret;
1472 }
1473 
1474 static void ipa_endpoint_reset(struct ipa_endpoint *endpoint)
1475 {
1476 	u32 channel_id = endpoint->channel_id;
1477 	struct ipa *ipa = endpoint->ipa;
1478 	bool special;
1479 	int ret = 0;
1480 
1481 	/* On IPA v3.5.1, if an RX endpoint is reset while aggregation
1482 	 * is active, we need to handle things specially to recover.
1483 	 * All other cases just need to reset the underlying GSI channel.
1484 	 */
1485 	special = ipa->version < IPA_VERSION_4_0 && !endpoint->toward_ipa &&
1486 			endpoint->data->aggregation;
1487 	if (special && ipa_endpoint_aggr_active(endpoint))
1488 		ret = ipa_endpoint_reset_rx_aggr(endpoint);
1489 	else
1490 		gsi_channel_reset(&ipa->gsi, channel_id, true);
1491 
1492 	if (ret)
1493 		dev_err(&ipa->pdev->dev,
1494 			"error %d resetting channel %u for endpoint %u\n",
1495 			ret, endpoint->channel_id, endpoint->endpoint_id);
1496 }
1497 
1498 static void ipa_endpoint_program(struct ipa_endpoint *endpoint)
1499 {
1500 	if (endpoint->toward_ipa)
1501 		ipa_endpoint_program_delay(endpoint, false);
1502 	else
1503 		(void)ipa_endpoint_program_suspend(endpoint, false);
1504 	ipa_endpoint_init_cfg(endpoint);
1505 	ipa_endpoint_init_nat(endpoint);
1506 	ipa_endpoint_init_hdr(endpoint);
1507 	ipa_endpoint_init_hdr_ext(endpoint);
1508 	ipa_endpoint_init_hdr_metadata_mask(endpoint);
1509 	ipa_endpoint_init_mode(endpoint);
1510 	ipa_endpoint_init_aggr(endpoint);
1511 	ipa_endpoint_init_deaggr(endpoint);
1512 	ipa_endpoint_init_rsrc_grp(endpoint);
1513 	ipa_endpoint_init_seq(endpoint);
1514 	ipa_endpoint_status(endpoint);
1515 }
1516 
1517 int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint)
1518 {
1519 	struct ipa *ipa = endpoint->ipa;
1520 	struct gsi *gsi = &ipa->gsi;
1521 	int ret;
1522 
1523 	ret = gsi_channel_start(gsi, endpoint->channel_id);
1524 	if (ret) {
1525 		dev_err(&ipa->pdev->dev,
1526 			"error %d starting %cX channel %u for endpoint %u\n",
1527 			ret, endpoint->toward_ipa ? 'T' : 'R',
1528 			endpoint->channel_id, endpoint->endpoint_id);
1529 		return ret;
1530 	}
1531 
1532 	if (!endpoint->toward_ipa) {
1533 		ipa_interrupt_suspend_enable(ipa->interrupt,
1534 					     endpoint->endpoint_id);
1535 		ipa_endpoint_replenish_enable(endpoint);
1536 	}
1537 
1538 	ipa->enabled |= BIT(endpoint->endpoint_id);
1539 
1540 	return 0;
1541 }
1542 
1543 void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint)
1544 {
1545 	u32 mask = BIT(endpoint->endpoint_id);
1546 	struct ipa *ipa = endpoint->ipa;
1547 	struct gsi *gsi = &ipa->gsi;
1548 	int ret;
1549 
1550 	if (!(ipa->enabled & mask))
1551 		return;
1552 
1553 	ipa->enabled ^= mask;
1554 
1555 	if (!endpoint->toward_ipa) {
1556 		ipa_endpoint_replenish_disable(endpoint);
1557 		ipa_interrupt_suspend_disable(ipa->interrupt,
1558 					      endpoint->endpoint_id);
1559 	}
1560 
1561 	/* Note that if stop fails, the channel's state is not well-defined */
1562 	ret = gsi_channel_stop(gsi, endpoint->channel_id);
1563 	if (ret)
1564 		dev_err(&ipa->pdev->dev,
1565 			"error %d attempting to stop endpoint %u\n", ret,
1566 			endpoint->endpoint_id);
1567 }
1568 
1569 void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint)
1570 {
1571 	struct device *dev = &endpoint->ipa->pdev->dev;
1572 	struct gsi *gsi = &endpoint->ipa->gsi;
1573 	bool stop_channel;
1574 	int ret;
1575 
1576 	if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id)))
1577 		return;
1578 
1579 	if (!endpoint->toward_ipa) {
1580 		ipa_endpoint_replenish_disable(endpoint);
1581 		(void)ipa_endpoint_program_suspend(endpoint, true);
1582 	}
1583 
1584 	/* Starting with IPA v4.0, endpoints are suspended by stopping the
1585 	 * underlying GSI channel rather than using endpoint suspend mode.
1586 	 */
1587 	stop_channel = endpoint->ipa->version >= IPA_VERSION_4_0;
1588 	ret = gsi_channel_suspend(gsi, endpoint->channel_id, stop_channel);
1589 	if (ret)
1590 		dev_err(dev, "error %d suspending channel %u\n", ret,
1591 			endpoint->channel_id);
1592 }
1593 
1594 void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint)
1595 {
1596 	struct device *dev = &endpoint->ipa->pdev->dev;
1597 	struct gsi *gsi = &endpoint->ipa->gsi;
1598 	bool start_channel;
1599 	int ret;
1600 
1601 	if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id)))
1602 		return;
1603 
1604 	if (!endpoint->toward_ipa)
1605 		(void)ipa_endpoint_program_suspend(endpoint, false);
1606 
1607 	/* Starting with IPA v4.0, the underlying GSI channel must be
1608 	 * restarted for resume.
1609 	 */
1610 	start_channel = endpoint->ipa->version >= IPA_VERSION_4_0;
1611 	ret = gsi_channel_resume(gsi, endpoint->channel_id, start_channel);
1612 	if (ret)
1613 		dev_err(dev, "error %d resuming channel %u\n", ret,
1614 			endpoint->channel_id);
1615 	else if (!endpoint->toward_ipa)
1616 		ipa_endpoint_replenish_enable(endpoint);
1617 }
1618 
1619 void ipa_endpoint_suspend(struct ipa *ipa)
1620 {
1621 	if (!ipa->setup_complete)
1622 		return;
1623 
1624 	if (ipa->modem_netdev)
1625 		ipa_modem_suspend(ipa->modem_netdev);
1626 
1627 	ipa_cmd_pipeline_clear(ipa);
1628 
1629 	ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
1630 	ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
1631 }
1632 
1633 void ipa_endpoint_resume(struct ipa *ipa)
1634 {
1635 	if (!ipa->setup_complete)
1636 		return;
1637 
1638 	ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
1639 	ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
1640 
1641 	if (ipa->modem_netdev)
1642 		ipa_modem_resume(ipa->modem_netdev);
1643 }
1644 
1645 static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint)
1646 {
1647 	struct gsi *gsi = &endpoint->ipa->gsi;
1648 	u32 channel_id = endpoint->channel_id;
1649 
1650 	/* Only AP endpoints get set up */
1651 	if (endpoint->ee_id != GSI_EE_AP)
1652 		return;
1653 
1654 	endpoint->trans_tre_max = gsi_channel_trans_tre_max(gsi, channel_id);
1655 	if (!endpoint->toward_ipa) {
1656 		/* RX transactions require a single TRE, so the maximum
1657 		 * backlog is the same as the maximum outstanding TREs.
1658 		 */
1659 		endpoint->replenish_enabled = false;
1660 		atomic_set(&endpoint->replenish_saved,
1661 			   gsi_channel_tre_max(gsi, endpoint->channel_id));
1662 		atomic_set(&endpoint->replenish_backlog, 0);
1663 		INIT_DELAYED_WORK(&endpoint->replenish_work,
1664 				  ipa_endpoint_replenish_work);
1665 	}
1666 
1667 	ipa_endpoint_program(endpoint);
1668 
1669 	endpoint->ipa->set_up |= BIT(endpoint->endpoint_id);
1670 }
1671 
1672 static void ipa_endpoint_teardown_one(struct ipa_endpoint *endpoint)
1673 {
1674 	endpoint->ipa->set_up &= ~BIT(endpoint->endpoint_id);
1675 
1676 	if (!endpoint->toward_ipa)
1677 		cancel_delayed_work_sync(&endpoint->replenish_work);
1678 
1679 	ipa_endpoint_reset(endpoint);
1680 }
1681 
1682 void ipa_endpoint_setup(struct ipa *ipa)
1683 {
1684 	u32 initialized = ipa->initialized;
1685 
1686 	ipa->set_up = 0;
1687 	while (initialized) {
1688 		u32 endpoint_id = __ffs(initialized);
1689 
1690 		initialized ^= BIT(endpoint_id);
1691 
1692 		ipa_endpoint_setup_one(&ipa->endpoint[endpoint_id]);
1693 	}
1694 }
1695 
1696 void ipa_endpoint_teardown(struct ipa *ipa)
1697 {
1698 	u32 set_up = ipa->set_up;
1699 
1700 	while (set_up) {
1701 		u32 endpoint_id = __fls(set_up);
1702 
1703 		set_up ^= BIT(endpoint_id);
1704 
1705 		ipa_endpoint_teardown_one(&ipa->endpoint[endpoint_id]);
1706 	}
1707 	ipa->set_up = 0;
1708 }
1709 
1710 int ipa_endpoint_config(struct ipa *ipa)
1711 {
1712 	struct device *dev = &ipa->pdev->dev;
1713 	u32 initialized;
1714 	u32 rx_base;
1715 	u32 rx_mask;
1716 	u32 tx_mask;
1717 	int ret = 0;
1718 	u32 max;
1719 	u32 val;
1720 
1721 	/* Find out about the endpoints supplied by the hardware, and ensure
1722 	 * the highest one doesn't exceed the number we support.
1723 	 */
1724 	val = ioread32(ipa->reg_virt + IPA_REG_FLAVOR_0_OFFSET);
1725 
1726 	/* Our RX is an IPA producer */
1727 	rx_base = u32_get_bits(val, IPA_PROD_LOWEST_FMASK);
1728 	max = rx_base + u32_get_bits(val, IPA_MAX_PROD_PIPES_FMASK);
1729 	if (max > IPA_ENDPOINT_MAX) {
1730 		dev_err(dev, "too many endpoints (%u > %u)\n",
1731 			max, IPA_ENDPOINT_MAX);
1732 		return -EINVAL;
1733 	}
1734 	rx_mask = GENMASK(max - 1, rx_base);
1735 
1736 	/* Our TX is an IPA consumer */
1737 	max = u32_get_bits(val, IPA_MAX_CONS_PIPES_FMASK);
1738 	tx_mask = GENMASK(max - 1, 0);
1739 
1740 	ipa->available = rx_mask | tx_mask;
1741 
1742 	/* Check for initialized endpoints not supported by the hardware */
1743 	if (ipa->initialized & ~ipa->available) {
1744 		dev_err(dev, "unavailable endpoint id(s) 0x%08x\n",
1745 			ipa->initialized & ~ipa->available);
1746 		ret = -EINVAL;		/* Report other errors too */
1747 	}
1748 
1749 	initialized = ipa->initialized;
1750 	while (initialized) {
1751 		u32 endpoint_id = __ffs(initialized);
1752 		struct ipa_endpoint *endpoint;
1753 
1754 		initialized ^= BIT(endpoint_id);
1755 
1756 		/* Make sure it's pointing in the right direction */
1757 		endpoint = &ipa->endpoint[endpoint_id];
1758 		if ((endpoint_id < rx_base) != !!endpoint->toward_ipa) {
1759 			dev_err(dev, "endpoint id %u wrong direction\n",
1760 				endpoint_id);
1761 			ret = -EINVAL;
1762 		}
1763 	}
1764 
1765 	return ret;
1766 }
1767 
1768 void ipa_endpoint_deconfig(struct ipa *ipa)
1769 {
1770 	ipa->available = 0;	/* Nothing more to do */
1771 }
1772 
1773 static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name,
1774 				  const struct ipa_gsi_endpoint_data *data)
1775 {
1776 	struct ipa_endpoint *endpoint;
1777 
1778 	endpoint = &ipa->endpoint[data->endpoint_id];
1779 
1780 	if (data->ee_id == GSI_EE_AP)
1781 		ipa->channel_map[data->channel_id] = endpoint;
1782 	ipa->name_map[name] = endpoint;
1783 
1784 	endpoint->ipa = ipa;
1785 	endpoint->ee_id = data->ee_id;
1786 	endpoint->channel_id = data->channel_id;
1787 	endpoint->endpoint_id = data->endpoint_id;
1788 	endpoint->toward_ipa = data->toward_ipa;
1789 	endpoint->data = &data->endpoint.config;
1790 
1791 	ipa->initialized |= BIT(endpoint->endpoint_id);
1792 }
1793 
1794 void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint)
1795 {
1796 	endpoint->ipa->initialized &= ~BIT(endpoint->endpoint_id);
1797 
1798 	memset(endpoint, 0, sizeof(*endpoint));
1799 }
1800 
1801 void ipa_endpoint_exit(struct ipa *ipa)
1802 {
1803 	u32 initialized = ipa->initialized;
1804 
1805 	while (initialized) {
1806 		u32 endpoint_id = __fls(initialized);
1807 
1808 		initialized ^= BIT(endpoint_id);
1809 
1810 		ipa_endpoint_exit_one(&ipa->endpoint[endpoint_id]);
1811 	}
1812 	memset(ipa->name_map, 0, sizeof(ipa->name_map));
1813 	memset(ipa->channel_map, 0, sizeof(ipa->channel_map));
1814 }
1815 
1816 /* Returns a bitmask of endpoints that support filtering, or 0 on error */
1817 u32 ipa_endpoint_init(struct ipa *ipa, u32 count,
1818 		      const struct ipa_gsi_endpoint_data *data)
1819 {
1820 	enum ipa_endpoint_name name;
1821 	u32 filter_map;
1822 
1823 	if (!ipa_endpoint_data_valid(ipa, count, data))
1824 		return 0;	/* Error */
1825 
1826 	ipa->initialized = 0;
1827 
1828 	filter_map = 0;
1829 	for (name = 0; name < count; name++, data++) {
1830 		if (ipa_gsi_endpoint_data_empty(data))
1831 			continue;	/* Skip over empty slots */
1832 
1833 		ipa_endpoint_init_one(ipa, name, data);
1834 
1835 		if (data->endpoint.filter_support)
1836 			filter_map |= BIT(data->endpoint_id);
1837 	}
1838 
1839 	if (!ipa_filter_map_valid(ipa, filter_map))
1840 		goto err_endpoint_exit;
1841 
1842 	return filter_map;	/* Non-zero bitmask */
1843 
1844 err_endpoint_exit:
1845 	ipa_endpoint_exit(ipa);
1846 
1847 	return 0;	/* Error */
1848 }
1849