xref: /openbmc/linux/drivers/net/ipa/ipa_endpoint.c (revision 1ac731c529cd4d6adbce134754b51ff7d822b145)
1  // SPDX-License-Identifier: GPL-2.0
2  
3  /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
4   * Copyright (C) 2019-2023 Linaro Ltd.
5   */
6  
7  #include <linux/types.h>
8  #include <linux/device.h>
9  #include <linux/slab.h>
10  #include <linux/bitfield.h>
11  #include <linux/if_rmnet.h>
12  #include <linux/dma-direction.h>
13  
14  #include "gsi.h"
15  #include "gsi_trans.h"
16  #include "ipa.h"
17  #include "ipa_data.h"
18  #include "ipa_endpoint.h"
19  #include "ipa_cmd.h"
20  #include "ipa_mem.h"
21  #include "ipa_modem.h"
22  #include "ipa_table.h"
23  #include "ipa_gsi.h"
24  #include "ipa_power.h"
25  
26  /* Hardware is told about receive buffers once a "batch" has been queued */
27  #define IPA_REPLENISH_BATCH	16		/* Must be non-zero */
28  
29  /* The amount of RX buffer space consumed by standard skb overhead */
30  #define IPA_RX_BUFFER_OVERHEAD	(PAGE_SIZE - SKB_MAX_ORDER(NET_SKB_PAD, 0))
31  
32  /* Where to find the QMAP mux_id for a packet within modem-supplied metadata */
33  #define IPA_ENDPOINT_QMAP_METADATA_MASK		0x000000ff /* host byte order */
34  
35  #define IPA_ENDPOINT_RESET_AGGR_RETRY_MAX	3
36  
37  /** enum ipa_status_opcode - IPA status opcode field hardware values */
38  enum ipa_status_opcode {				/* *Not* a bitmask */
39  	IPA_STATUS_OPCODE_PACKET		= 1,
40  	IPA_STATUS_OPCODE_NEW_RULE_PACKET	= 2,
41  	IPA_STATUS_OPCODE_DROPPED_PACKET	= 4,
42  	IPA_STATUS_OPCODE_SUSPENDED_PACKET	= 8,
43  	IPA_STATUS_OPCODE_LOG			= 16,
44  	IPA_STATUS_OPCODE_DCMP			= 32,
45  	IPA_STATUS_OPCODE_PACKET_2ND_PASS	= 64,
46  };
47  
48  /** enum ipa_status_exception - IPA status exception field hardware values */
49  enum ipa_status_exception {				/* *Not* a bitmask */
50  	/* 0 means no exception */
51  	IPA_STATUS_EXCEPTION_DEAGGR		= 1,
52  	IPA_STATUS_EXCEPTION_IPTYPE		= 4,
53  	IPA_STATUS_EXCEPTION_PACKET_LENGTH	= 8,
54  	IPA_STATUS_EXCEPTION_FRAG_RULE_MISS	= 16,
55  	IPA_STATUS_EXCEPTION_SW_FILTER		= 32,
56  	IPA_STATUS_EXCEPTION_NAT		= 64,		/* IPv4 */
57  	IPA_STATUS_EXCEPTION_IPV6_CONN_TRACK	= 64,		/* IPv6 */
58  	IPA_STATUS_EXCEPTION_UC			= 128,
59  	IPA_STATUS_EXCEPTION_INVALID_ENDPOINT	= 129,
60  	IPA_STATUS_EXCEPTION_HEADER_INSERT	= 136,
61  	IPA_STATUS_EXCEPTION_CHEKCSUM		= 229,
62  };
63  
64  /** enum ipa_status_mask - IPA status mask field bitmask hardware values */
65  enum ipa_status_mask {
66  	IPA_STATUS_MASK_FRAG_PROCESS		= BIT(0),
67  	IPA_STATUS_MASK_FILT_PROCESS		= BIT(1),
68  	IPA_STATUS_MASK_NAT_PROCESS		= BIT(2),
69  	IPA_STATUS_MASK_ROUTE_PROCESS		= BIT(3),
70  	IPA_STATUS_MASK_TAG_VALID		= BIT(4),
71  	IPA_STATUS_MASK_FRAGMENT		= BIT(5),
72  	IPA_STATUS_MASK_FIRST_FRAGMENT		= BIT(6),
73  	IPA_STATUS_MASK_V4			= BIT(7),
74  	IPA_STATUS_MASK_CKSUM_PROCESS		= BIT(8),
75  	IPA_STATUS_MASK_AGGR_PROCESS		= BIT(9),
76  	IPA_STATUS_MASK_DEST_EOT		= BIT(10),
77  	IPA_STATUS_MASK_DEAGGR_PROCESS		= BIT(11),
78  	IPA_STATUS_MASK_DEAGG_FIRST		= BIT(12),
79  	IPA_STATUS_MASK_SRC_EOT			= BIT(13),
80  	IPA_STATUS_MASK_PREV_EOT		= BIT(14),
81  	IPA_STATUS_MASK_BYTE_LIMIT		= BIT(15),
82  };
83  
84  /* Special IPA filter/router rule field value indicating "rule miss" */
85  #define IPA_STATUS_RULE_MISS	0x3ff	/* 10-bit filter/router rule fields */
86  
87  /** The IPA status nat_type field uses enum ipa_nat_type hardware values */
88  
89  /* enum ipa_status_field_id - IPA packet status structure field identifiers */
90  enum ipa_status_field_id {
91  	STATUS_OPCODE,			/* enum ipa_status_opcode */
92  	STATUS_EXCEPTION,		/* enum ipa_status_exception */
93  	STATUS_MASK,			/* enum ipa_status_mask (bitmask) */
94  	STATUS_LENGTH,
95  	STATUS_SRC_ENDPOINT,
96  	STATUS_DST_ENDPOINT,
97  	STATUS_METADATA,
98  	STATUS_FILTER_LOCAL,		/* Boolean */
99  	STATUS_FILTER_HASH,		/* Boolean */
100  	STATUS_FILTER_GLOBAL,		/* Boolean */
101  	STATUS_FILTER_RETAIN,		/* Boolean */
102  	STATUS_FILTER_RULE_INDEX,
103  	STATUS_ROUTER_LOCAL,		/* Boolean */
104  	STATUS_ROUTER_HASH,		/* Boolean */
105  	STATUS_UCP,			/* Boolean */
106  	STATUS_ROUTER_TABLE,
107  	STATUS_ROUTER_RULE_INDEX,
108  	STATUS_NAT_HIT,			/* Boolean */
109  	STATUS_NAT_INDEX,
110  	STATUS_NAT_TYPE,		/* enum ipa_nat_type */
111  	STATUS_TAG_LOW32,		/* Low-order 32 bits of 48-bit tag */
112  	STATUS_TAG_HIGH16,		/* High-order 16 bits of 48-bit tag */
113  	STATUS_SEQUENCE,
114  	STATUS_TIME_OF_DAY,
115  	STATUS_HEADER_LOCAL,		/* Boolean */
116  	STATUS_HEADER_OFFSET,
117  	STATUS_FRAG_HIT,		/* Boolean */
118  	STATUS_FRAG_RULE_INDEX,
119  };
120  
121  /* Size in bytes of an IPA packet status structure */
122  #define IPA_STATUS_SIZE			sizeof(__le32[8])
123  
124  /* IPA status structure decoder; looks up field values for a structure */
ipa_status_extract(struct ipa * ipa,const void * data,enum ipa_status_field_id field)125  static u32 ipa_status_extract(struct ipa *ipa, const void *data,
126  			      enum ipa_status_field_id field)
127  {
128  	enum ipa_version version = ipa->version;
129  	const __le32 *word = data;
130  
131  	switch (field) {
132  	case STATUS_OPCODE:
133  		return le32_get_bits(word[0], GENMASK(7, 0));
134  	case STATUS_EXCEPTION:
135  		return le32_get_bits(word[0], GENMASK(15, 8));
136  	case STATUS_MASK:
137  		return le32_get_bits(word[0], GENMASK(31, 16));
138  	case STATUS_LENGTH:
139  		return le32_get_bits(word[1], GENMASK(15, 0));
140  	case STATUS_SRC_ENDPOINT:
141  		if (version < IPA_VERSION_5_0)
142  			return le32_get_bits(word[1], GENMASK(20, 16));
143  		return le32_get_bits(word[1], GENMASK(23, 16));
144  	/* Status word 1, bits 21-23 are reserved (not IPA v5.0+) */
145  	/* Status word 1, bits 24-26 are reserved (IPA v5.0+) */
146  	case STATUS_DST_ENDPOINT:
147  		if (version < IPA_VERSION_5_0)
148  			return le32_get_bits(word[1], GENMASK(28, 24));
149  		return le32_get_bits(word[7], GENMASK(23, 16));
150  	/* Status word 1, bits 29-31 are reserved */
151  	case STATUS_METADATA:
152  		return le32_to_cpu(word[2]);
153  	case STATUS_FILTER_LOCAL:
154  		return le32_get_bits(word[3], GENMASK(0, 0));
155  	case STATUS_FILTER_HASH:
156  		return le32_get_bits(word[3], GENMASK(1, 1));
157  	case STATUS_FILTER_GLOBAL:
158  		return le32_get_bits(word[3], GENMASK(2, 2));
159  	case STATUS_FILTER_RETAIN:
160  		return le32_get_bits(word[3], GENMASK(3, 3));
161  	case STATUS_FILTER_RULE_INDEX:
162  		return le32_get_bits(word[3], GENMASK(13, 4));
163  	/* ROUTER_TABLE is in word 3, bits 14-21 (IPA v5.0+) */
164  	case STATUS_ROUTER_LOCAL:
165  		if (version < IPA_VERSION_5_0)
166  			return le32_get_bits(word[3], GENMASK(14, 14));
167  		return le32_get_bits(word[1], GENMASK(27, 27));
168  	case STATUS_ROUTER_HASH:
169  		if (version < IPA_VERSION_5_0)
170  			return le32_get_bits(word[3], GENMASK(15, 15));
171  		return le32_get_bits(word[1], GENMASK(28, 28));
172  	case STATUS_UCP:
173  		if (version < IPA_VERSION_5_0)
174  			return le32_get_bits(word[3], GENMASK(16, 16));
175  		return le32_get_bits(word[7], GENMASK(31, 31));
176  	case STATUS_ROUTER_TABLE:
177  		if (version < IPA_VERSION_5_0)
178  			return le32_get_bits(word[3], GENMASK(21, 17));
179  		return le32_get_bits(word[3], GENMASK(21, 14));
180  	case STATUS_ROUTER_RULE_INDEX:
181  		return le32_get_bits(word[3], GENMASK(31, 22));
182  	case STATUS_NAT_HIT:
183  		return le32_get_bits(word[4], GENMASK(0, 0));
184  	case STATUS_NAT_INDEX:
185  		return le32_get_bits(word[4], GENMASK(13, 1));
186  	case STATUS_NAT_TYPE:
187  		return le32_get_bits(word[4], GENMASK(15, 14));
188  	case STATUS_TAG_LOW32:
189  		return le32_get_bits(word[4], GENMASK(31, 16)) |
190  			(le32_get_bits(word[5], GENMASK(15, 0)) << 16);
191  	case STATUS_TAG_HIGH16:
192  		return le32_get_bits(word[5], GENMASK(31, 16));
193  	case STATUS_SEQUENCE:
194  		return le32_get_bits(word[6], GENMASK(7, 0));
195  	case STATUS_TIME_OF_DAY:
196  		return le32_get_bits(word[6], GENMASK(31, 8));
197  	case STATUS_HEADER_LOCAL:
198  		return le32_get_bits(word[7], GENMASK(0, 0));
199  	case STATUS_HEADER_OFFSET:
200  		return le32_get_bits(word[7], GENMASK(10, 1));
201  	case STATUS_FRAG_HIT:
202  		return le32_get_bits(word[7], GENMASK(11, 11));
203  	case STATUS_FRAG_RULE_INDEX:
204  		return le32_get_bits(word[7], GENMASK(15, 12));
205  	/* Status word 7, bits 16-30 are reserved */
206  	/* Status word 7, bit 31 is reserved (not IPA v5.0+) */
207  	default:
208  		WARN(true, "%s: bad field_id %u\n", __func__, field);
209  		return 0;
210  	}
211  }
212  
213  /* Compute the aggregation size value to use for a given buffer size */
ipa_aggr_size_kb(u32 rx_buffer_size,bool aggr_hard_limit)214  static u32 ipa_aggr_size_kb(u32 rx_buffer_size, bool aggr_hard_limit)
215  {
216  	/* A hard aggregation limit will not be crossed; aggregation closes
217  	 * if saving incoming data would cross the hard byte limit boundary.
218  	 *
219  	 * With a soft limit, aggregation closes *after* the size boundary
220  	 * has been crossed.  In that case the limit must leave enough space
221  	 * after that limit to receive a full MTU of data plus overhead.
222  	 */
223  	if (!aggr_hard_limit)
224  		rx_buffer_size -= IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
225  
226  	/* The byte limit is encoded as a number of kilobytes */
227  
228  	return rx_buffer_size / SZ_1K;
229  }
230  
ipa_endpoint_data_valid_one(struct ipa * ipa,u32 count,const struct ipa_gsi_endpoint_data * all_data,const struct ipa_gsi_endpoint_data * data)231  static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
232  			    const struct ipa_gsi_endpoint_data *all_data,
233  			    const struct ipa_gsi_endpoint_data *data)
234  {
235  	const struct ipa_gsi_endpoint_data *other_data;
236  	struct device *dev = &ipa->pdev->dev;
237  	enum ipa_endpoint_name other_name;
238  
239  	if (ipa_gsi_endpoint_data_empty(data))
240  		return true;
241  
242  	if (!data->toward_ipa) {
243  		const struct ipa_endpoint_rx *rx_config;
244  		const struct reg *reg;
245  		u32 buffer_size;
246  		u32 aggr_size;
247  		u32 limit;
248  
249  		if (data->endpoint.filter_support) {
250  			dev_err(dev, "filtering not supported for "
251  					"RX endpoint %u\n",
252  				data->endpoint_id);
253  			return false;
254  		}
255  
256  		/* Nothing more to check for non-AP RX */
257  		if (data->ee_id != GSI_EE_AP)
258  			return true;
259  
260  		rx_config = &data->endpoint.config.rx;
261  
262  		/* The buffer size must hold an MTU plus overhead */
263  		buffer_size = rx_config->buffer_size;
264  		limit = IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
265  		if (buffer_size < limit) {
266  			dev_err(dev, "RX buffer size too small for RX endpoint %u (%u < %u)\n",
267  				data->endpoint_id, buffer_size, limit);
268  			return false;
269  		}
270  
271  		if (!data->endpoint.config.aggregation) {
272  			bool result = true;
273  
274  			/* No aggregation; check for bogus aggregation data */
275  			if (rx_config->aggr_time_limit) {
276  				dev_err(dev,
277  					"time limit with no aggregation for RX endpoint %u\n",
278  					data->endpoint_id);
279  				result = false;
280  			}
281  
282  			if (rx_config->aggr_hard_limit) {
283  				dev_err(dev, "hard limit with no aggregation for RX endpoint %u\n",
284  					data->endpoint_id);
285  				result = false;
286  			}
287  
288  			if (rx_config->aggr_close_eof) {
289  				dev_err(dev, "close EOF with no aggregation for RX endpoint %u\n",
290  					data->endpoint_id);
291  				result = false;
292  			}
293  
294  			return result;	/* Nothing more to check */
295  		}
296  
297  		/* For an endpoint supporting receive aggregation, the byte
298  		 * limit defines the point at which aggregation closes.  This
299  		 * check ensures the receive buffer size doesn't result in a
300  		 * limit that exceeds what's representable in the aggregation
301  		 * byte limit field.
302  		 */
303  		aggr_size = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD,
304  					     rx_config->aggr_hard_limit);
305  		reg = ipa_reg(ipa, ENDP_INIT_AGGR);
306  
307  		limit = reg_field_max(reg, BYTE_LIMIT);
308  		if (aggr_size > limit) {
309  			dev_err(dev, "aggregated size too large for RX endpoint %u (%u KB > %u KB)\n",
310  				data->endpoint_id, aggr_size, limit);
311  
312  			return false;
313  		}
314  
315  		return true;	/* Nothing more to check for RX */
316  	}
317  
318  	/* Starting with IPA v4.5 sequencer replication is obsolete */
319  	if (ipa->version >= IPA_VERSION_4_5) {
320  		if (data->endpoint.config.tx.seq_rep_type) {
321  			dev_err(dev, "no-zero seq_rep_type TX endpoint %u\n",
322  				data->endpoint_id);
323  			return false;
324  		}
325  	}
326  
327  	if (data->endpoint.config.status_enable) {
328  		other_name = data->endpoint.config.tx.status_endpoint;
329  		if (other_name >= count) {
330  			dev_err(dev, "status endpoint name %u out of range "
331  					"for endpoint %u\n",
332  				other_name, data->endpoint_id);
333  			return false;
334  		}
335  
336  		/* Status endpoint must be defined... */
337  		other_data = &all_data[other_name];
338  		if (ipa_gsi_endpoint_data_empty(other_data)) {
339  			dev_err(dev, "DMA endpoint name %u undefined "
340  					"for endpoint %u\n",
341  				other_name, data->endpoint_id);
342  			return false;
343  		}
344  
345  		/* ...and has to be an RX endpoint... */
346  		if (other_data->toward_ipa) {
347  			dev_err(dev,
348  				"status endpoint for endpoint %u not RX\n",
349  				data->endpoint_id);
350  			return false;
351  		}
352  
353  		/* ...and if it's to be an AP endpoint... */
354  		if (other_data->ee_id == GSI_EE_AP) {
355  			/* ...make sure it has status enabled. */
356  			if (!other_data->endpoint.config.status_enable) {
357  				dev_err(dev,
358  					"status not enabled for endpoint %u\n",
359  					other_data->endpoint_id);
360  				return false;
361  			}
362  		}
363  	}
364  
365  	if (data->endpoint.config.dma_mode) {
366  		other_name = data->endpoint.config.dma_endpoint;
367  		if (other_name >= count) {
368  			dev_err(dev, "DMA endpoint name %u out of range "
369  					"for endpoint %u\n",
370  				other_name, data->endpoint_id);
371  			return false;
372  		}
373  
374  		other_data = &all_data[other_name];
375  		if (ipa_gsi_endpoint_data_empty(other_data)) {
376  			dev_err(dev, "DMA endpoint name %u undefined "
377  					"for endpoint %u\n",
378  				other_name, data->endpoint_id);
379  			return false;
380  		}
381  	}
382  
383  	return true;
384  }
385  
386  /* Validate endpoint configuration data.  Return max defined endpoint ID */
ipa_endpoint_max(struct ipa * ipa,u32 count,const struct ipa_gsi_endpoint_data * data)387  static u32 ipa_endpoint_max(struct ipa *ipa, u32 count,
388  			    const struct ipa_gsi_endpoint_data *data)
389  {
390  	const struct ipa_gsi_endpoint_data *dp = data;
391  	struct device *dev = &ipa->pdev->dev;
392  	enum ipa_endpoint_name name;
393  	u32 max;
394  
395  	if (count > IPA_ENDPOINT_COUNT) {
396  		dev_err(dev, "too many endpoints specified (%u > %u)\n",
397  			count, IPA_ENDPOINT_COUNT);
398  		return 0;
399  	}
400  
401  	/* Make sure needed endpoints have defined data */
402  	if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_COMMAND_TX])) {
403  		dev_err(dev, "command TX endpoint not defined\n");
404  		return 0;
405  	}
406  	if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_LAN_RX])) {
407  		dev_err(dev, "LAN RX endpoint not defined\n");
408  		return 0;
409  	}
410  	if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_TX])) {
411  		dev_err(dev, "AP->modem TX endpoint not defined\n");
412  		return 0;
413  	}
414  	if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_RX])) {
415  		dev_err(dev, "AP<-modem RX endpoint not defined\n");
416  		return 0;
417  	}
418  
419  	max = 0;
420  	for (name = 0; name < count; name++, dp++) {
421  		if (!ipa_endpoint_data_valid_one(ipa, count, data, dp))
422  			return 0;
423  		max = max_t(u32, max, dp->endpoint_id);
424  	}
425  
426  	return max;
427  }
428  
429  /* Allocate a transaction to use on a non-command endpoint */
ipa_endpoint_trans_alloc(struct ipa_endpoint * endpoint,u32 tre_count)430  static struct gsi_trans *ipa_endpoint_trans_alloc(struct ipa_endpoint *endpoint,
431  						  u32 tre_count)
432  {
433  	struct gsi *gsi = &endpoint->ipa->gsi;
434  	u32 channel_id = endpoint->channel_id;
435  	enum dma_data_direction direction;
436  
437  	direction = endpoint->toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
438  
439  	return gsi_channel_trans_alloc(gsi, channel_id, tre_count, direction);
440  }
441  
442  /* suspend_delay represents suspend for RX, delay for TX endpoints.
443   * Note that suspend is not supported starting with IPA v4.0, and
444   * delay mode should not be used starting with IPA v4.2.
445   */
446  static bool
ipa_endpoint_init_ctrl(struct ipa_endpoint * endpoint,bool suspend_delay)447  ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay)
448  {
449  	struct ipa *ipa = endpoint->ipa;
450  	const struct reg *reg;
451  	u32 field_id;
452  	u32 offset;
453  	bool state;
454  	u32 mask;
455  	u32 val;
456  
457  	if (endpoint->toward_ipa)
458  		WARN_ON(ipa->version >= IPA_VERSION_4_2);
459  	else
460  		WARN_ON(ipa->version >= IPA_VERSION_4_0);
461  
462  	reg = ipa_reg(ipa, ENDP_INIT_CTRL);
463  	offset = reg_n_offset(reg, endpoint->endpoint_id);
464  	val = ioread32(ipa->reg_virt + offset);
465  
466  	field_id = endpoint->toward_ipa ? ENDP_DELAY : ENDP_SUSPEND;
467  	mask = reg_bit(reg, field_id);
468  
469  	state = !!(val & mask);
470  
471  	/* Don't bother if it's already in the requested state */
472  	if (suspend_delay != state) {
473  		val ^= mask;
474  		iowrite32(val, ipa->reg_virt + offset);
475  	}
476  
477  	return state;
478  }
479  
480  /* We don't care what the previous state was for delay mode */
481  static void
ipa_endpoint_program_delay(struct ipa_endpoint * endpoint,bool enable)482  ipa_endpoint_program_delay(struct ipa_endpoint *endpoint, bool enable)
483  {
484  	/* Delay mode should not be used for IPA v4.2+ */
485  	WARN_ON(endpoint->ipa->version >= IPA_VERSION_4_2);
486  	WARN_ON(!endpoint->toward_ipa);
487  
488  	(void)ipa_endpoint_init_ctrl(endpoint, enable);
489  }
490  
ipa_endpoint_aggr_active(struct ipa_endpoint * endpoint)491  static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint)
492  {
493  	u32 endpoint_id = endpoint->endpoint_id;
494  	struct ipa *ipa = endpoint->ipa;
495  	u32 unit = endpoint_id / 32;
496  	const struct reg *reg;
497  	u32 val;
498  
499  	WARN_ON(!test_bit(endpoint_id, ipa->available));
500  
501  	reg = ipa_reg(ipa, STATE_AGGR_ACTIVE);
502  	val = ioread32(ipa->reg_virt + reg_n_offset(reg, unit));
503  
504  	return !!(val & BIT(endpoint_id % 32));
505  }
506  
ipa_endpoint_force_close(struct ipa_endpoint * endpoint)507  static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint)
508  {
509  	u32 endpoint_id = endpoint->endpoint_id;
510  	u32 mask = BIT(endpoint_id % 32);
511  	struct ipa *ipa = endpoint->ipa;
512  	u32 unit = endpoint_id / 32;
513  	const struct reg *reg;
514  
515  	WARN_ON(!test_bit(endpoint_id, ipa->available));
516  
517  	reg = ipa_reg(ipa, AGGR_FORCE_CLOSE);
518  	iowrite32(mask, ipa->reg_virt + reg_n_offset(reg, unit));
519  }
520  
521  /**
522   * ipa_endpoint_suspend_aggr() - Emulate suspend interrupt
523   * @endpoint:	Endpoint on which to emulate a suspend
524   *
525   *  Emulate suspend IPA interrupt to unsuspend an endpoint suspended
526   *  with an open aggregation frame.  This is to work around a hardware
527   *  issue in IPA version 3.5.1 where the suspend interrupt will not be
528   *  generated when it should be.
529   */
ipa_endpoint_suspend_aggr(struct ipa_endpoint * endpoint)530  static void ipa_endpoint_suspend_aggr(struct ipa_endpoint *endpoint)
531  {
532  	struct ipa *ipa = endpoint->ipa;
533  
534  	if (!endpoint->config.aggregation)
535  		return;
536  
537  	/* Nothing to do if the endpoint doesn't have aggregation open */
538  	if (!ipa_endpoint_aggr_active(endpoint))
539  		return;
540  
541  	/* Force close aggregation */
542  	ipa_endpoint_force_close(endpoint);
543  
544  	ipa_interrupt_simulate_suspend(ipa->interrupt);
545  }
546  
547  /* Returns previous suspend state (true means suspend was enabled) */
548  static bool
ipa_endpoint_program_suspend(struct ipa_endpoint * endpoint,bool enable)549  ipa_endpoint_program_suspend(struct ipa_endpoint *endpoint, bool enable)
550  {
551  	bool suspended;
552  
553  	if (endpoint->ipa->version >= IPA_VERSION_4_0)
554  		return enable;	/* For IPA v4.0+, no change made */
555  
556  	WARN_ON(endpoint->toward_ipa);
557  
558  	suspended = ipa_endpoint_init_ctrl(endpoint, enable);
559  
560  	/* A client suspended with an open aggregation frame will not
561  	 * generate a SUSPEND IPA interrupt.  If enabling suspend, have
562  	 * ipa_endpoint_suspend_aggr() handle this.
563  	 */
564  	if (enable && !suspended)
565  		ipa_endpoint_suspend_aggr(endpoint);
566  
567  	return suspended;
568  }
569  
570  /* Put all modem RX endpoints into suspend mode, and stop transmission
571   * on all modem TX endpoints.  Prior to IPA v4.2, endpoint DELAY mode is
572   * used for TX endpoints; starting with IPA v4.2 we use GSI channel flow
573   * control instead.
574   */
ipa_endpoint_modem_pause_all(struct ipa * ipa,bool enable)575  void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable)
576  {
577  	u32 endpoint_id = 0;
578  
579  	while (endpoint_id < ipa->endpoint_count) {
580  		struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id++];
581  
582  		if (endpoint->ee_id != GSI_EE_MODEM)
583  			continue;
584  
585  		if (!endpoint->toward_ipa)
586  			(void)ipa_endpoint_program_suspend(endpoint, enable);
587  		else if (ipa->version < IPA_VERSION_4_2)
588  			ipa_endpoint_program_delay(endpoint, enable);
589  		else
590  			gsi_modem_channel_flow_control(&ipa->gsi,
591  						       endpoint->channel_id,
592  						       enable);
593  	}
594  }
595  
596  /* Reset all modem endpoints to use the default exception endpoint */
ipa_endpoint_modem_exception_reset_all(struct ipa * ipa)597  int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
598  {
599  	struct gsi_trans *trans;
600  	u32 endpoint_id;
601  	u32 count;
602  
603  	/* We need one command per modem TX endpoint, plus the commands
604  	 * that clear the pipeline.
605  	 */
606  	count = ipa->modem_tx_count + ipa_cmd_pipeline_clear_count();
607  	trans = ipa_cmd_trans_alloc(ipa, count);
608  	if (!trans) {
609  		dev_err(&ipa->pdev->dev,
610  			"no transaction to reset modem exception endpoints\n");
611  		return -EBUSY;
612  	}
613  
614  	for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count) {
615  		struct ipa_endpoint *endpoint;
616  		const struct reg *reg;
617  		u32 offset;
618  
619  		/* We only reset modem TX endpoints */
620  		endpoint = &ipa->endpoint[endpoint_id];
621  		if (!(endpoint->ee_id == GSI_EE_MODEM && endpoint->toward_ipa))
622  			continue;
623  
624  		reg = ipa_reg(ipa, ENDP_STATUS);
625  		offset = reg_n_offset(reg, endpoint_id);
626  
627  		/* Value written is 0, and all bits are updated.  That
628  		 * means status is disabled on the endpoint, and as a
629  		 * result all other fields in the register are ignored.
630  		 */
631  		ipa_cmd_register_write_add(trans, offset, 0, ~0, false);
632  	}
633  
634  	ipa_cmd_pipeline_clear_add(trans);
635  
636  	gsi_trans_commit_wait(trans);
637  
638  	ipa_cmd_pipeline_clear_wait(ipa);
639  
640  	return 0;
641  }
642  
ipa_endpoint_init_cfg(struct ipa_endpoint * endpoint)643  static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint)
644  {
645  	u32 endpoint_id = endpoint->endpoint_id;
646  	struct ipa *ipa = endpoint->ipa;
647  	enum ipa_cs_offload_en enabled;
648  	const struct reg *reg;
649  	u32 val = 0;
650  
651  	reg = ipa_reg(ipa, ENDP_INIT_CFG);
652  	/* FRAG_OFFLOAD_EN is 0 */
653  	if (endpoint->config.checksum) {
654  		enum ipa_version version = ipa->version;
655  
656  		if (endpoint->toward_ipa) {
657  			u32 off;
658  
659  			/* Checksum header offset is in 4-byte units */
660  			off = sizeof(struct rmnet_map_header) / sizeof(u32);
661  			val |= reg_encode(reg, CS_METADATA_HDR_OFFSET, off);
662  
663  			enabled = version < IPA_VERSION_4_5
664  					? IPA_CS_OFFLOAD_UL
665  					: IPA_CS_OFFLOAD_INLINE;
666  		} else {
667  			enabled = version < IPA_VERSION_4_5
668  					? IPA_CS_OFFLOAD_DL
669  					: IPA_CS_OFFLOAD_INLINE;
670  		}
671  	} else {
672  		enabled = IPA_CS_OFFLOAD_NONE;
673  	}
674  	val |= reg_encode(reg, CS_OFFLOAD_EN, enabled);
675  	/* CS_GEN_QMB_MASTER_SEL is 0 */
676  
677  	iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
678  }
679  
ipa_endpoint_init_nat(struct ipa_endpoint * endpoint)680  static void ipa_endpoint_init_nat(struct ipa_endpoint *endpoint)
681  {
682  	u32 endpoint_id = endpoint->endpoint_id;
683  	struct ipa *ipa = endpoint->ipa;
684  	const struct reg *reg;
685  	u32 val;
686  
687  	if (!endpoint->toward_ipa)
688  		return;
689  
690  	reg = ipa_reg(ipa, ENDP_INIT_NAT);
691  	val = reg_encode(reg, NAT_EN, IPA_NAT_TYPE_BYPASS);
692  
693  	iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
694  }
695  
696  static u32
ipa_qmap_header_size(enum ipa_version version,struct ipa_endpoint * endpoint)697  ipa_qmap_header_size(enum ipa_version version, struct ipa_endpoint *endpoint)
698  {
699  	u32 header_size = sizeof(struct rmnet_map_header);
700  
701  	/* Without checksum offload, we just have the MAP header */
702  	if (!endpoint->config.checksum)
703  		return header_size;
704  
705  	if (version < IPA_VERSION_4_5) {
706  		/* Checksum header inserted for AP TX endpoints only */
707  		if (endpoint->toward_ipa)
708  			header_size += sizeof(struct rmnet_map_ul_csum_header);
709  	} else {
710  		/* Checksum header is used in both directions */
711  		header_size += sizeof(struct rmnet_map_v5_csum_header);
712  	}
713  
714  	return header_size;
715  }
716  
717  /* Encoded value for ENDP_INIT_HDR register HDR_LEN* field(s) */
ipa_header_size_encode(enum ipa_version version,const struct reg * reg,u32 header_size)718  static u32 ipa_header_size_encode(enum ipa_version version,
719  				  const struct reg *reg, u32 header_size)
720  {
721  	u32 field_max = reg_field_max(reg, HDR_LEN);
722  	u32 val;
723  
724  	/* We know field_max can be used as a mask (2^n - 1) */
725  	val = reg_encode(reg, HDR_LEN, header_size & field_max);
726  	if (version < IPA_VERSION_4_5) {
727  		WARN_ON(header_size > field_max);
728  		return val;
729  	}
730  
731  	/* IPA v4.5 adds a few more most-significant bits */
732  	header_size >>= hweight32(field_max);
733  	WARN_ON(header_size > reg_field_max(reg, HDR_LEN_MSB));
734  	val |= reg_encode(reg, HDR_LEN_MSB, header_size);
735  
736  	return val;
737  }
738  
739  /* Encoded value for ENDP_INIT_HDR register OFST_METADATA* field(s) */
ipa_metadata_offset_encode(enum ipa_version version,const struct reg * reg,u32 offset)740  static u32 ipa_metadata_offset_encode(enum ipa_version version,
741  				      const struct reg *reg, u32 offset)
742  {
743  	u32 field_max = reg_field_max(reg, HDR_OFST_METADATA);
744  	u32 val;
745  
746  	/* We know field_max can be used as a mask (2^n - 1) */
747  	val = reg_encode(reg, HDR_OFST_METADATA, offset);
748  	if (version < IPA_VERSION_4_5) {
749  		WARN_ON(offset > field_max);
750  		return val;
751  	}
752  
753  	/* IPA v4.5 adds a few more most-significant bits */
754  	offset >>= hweight32(field_max);
755  	WARN_ON(offset > reg_field_max(reg, HDR_OFST_METADATA_MSB));
756  	val |= reg_encode(reg, HDR_OFST_METADATA_MSB, offset);
757  
758  	return val;
759  }
760  
761  /**
762   * ipa_endpoint_init_hdr() - Initialize HDR endpoint configuration register
763   * @endpoint:	Endpoint pointer
764   *
765   * We program QMAP endpoints so each packet received is preceded by a QMAP
766   * header structure.  The QMAP header contains a 1-byte mux_id and 2-byte
767   * packet size field, and we have the IPA hardware populate both for each
768   * received packet.  The header is configured (in the HDR_EXT register)
769   * to use big endian format.
770   *
771   * The packet size is written into the QMAP header's pkt_len field.  That
772   * location is defined here using the HDR_OFST_PKT_SIZE field.
773   *
774   * The mux_id comes from a 4-byte metadata value supplied with each packet
775   * by the modem.  It is *not* a QMAP header, but it does contain the mux_id
776   * value that we want, in its low-order byte.  A bitmask defined in the
777   * endpoint's METADATA_MASK register defines which byte within the modem
778   * metadata contains the mux_id.  And the OFST_METADATA field programmed
779   * here indicates where the extracted byte should be placed within the QMAP
780   * header.
781   */
ipa_endpoint_init_hdr(struct ipa_endpoint * endpoint)782  static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint)
783  {
784  	u32 endpoint_id = endpoint->endpoint_id;
785  	struct ipa *ipa = endpoint->ipa;
786  	const struct reg *reg;
787  	u32 val = 0;
788  
789  	reg = ipa_reg(ipa, ENDP_INIT_HDR);
790  	if (endpoint->config.qmap) {
791  		enum ipa_version version = ipa->version;
792  		size_t header_size;
793  
794  		header_size = ipa_qmap_header_size(version, endpoint);
795  		val = ipa_header_size_encode(version, reg, header_size);
796  
797  		/* Define how to fill fields in a received QMAP header */
798  		if (!endpoint->toward_ipa) {
799  			u32 off;     /* Field offset within header */
800  
801  			/* Where IPA will write the metadata value */
802  			off = offsetof(struct rmnet_map_header, mux_id);
803  			val |= ipa_metadata_offset_encode(version, reg, off);
804  
805  			/* Where IPA will write the length */
806  			off = offsetof(struct rmnet_map_header, pkt_len);
807  			/* Upper bits are stored in HDR_EXT with IPA v4.5 */
808  			if (version >= IPA_VERSION_4_5)
809  				off &= reg_field_max(reg, HDR_OFST_PKT_SIZE);
810  
811  			val |= reg_bit(reg, HDR_OFST_PKT_SIZE_VALID);
812  			val |= reg_encode(reg, HDR_OFST_PKT_SIZE, off);
813  		}
814  		/* For QMAP TX, metadata offset is 0 (modem assumes this) */
815  		val |= reg_bit(reg, HDR_OFST_METADATA_VALID);
816  
817  		/* HDR_ADDITIONAL_CONST_LEN is 0; (RX only) */
818  		/* HDR_A5_MUX is 0 */
819  		/* HDR_LEN_INC_DEAGG_HDR is 0 */
820  		/* HDR_METADATA_REG_VALID is 0 (TX only, version < v4.5) */
821  	}
822  
823  	iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
824  }
825  
ipa_endpoint_init_hdr_ext(struct ipa_endpoint * endpoint)826  static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
827  {
828  	u32 pad_align = endpoint->config.rx.pad_align;
829  	u32 endpoint_id = endpoint->endpoint_id;
830  	struct ipa *ipa = endpoint->ipa;
831  	const struct reg *reg;
832  	u32 val = 0;
833  
834  	reg = ipa_reg(ipa, ENDP_INIT_HDR_EXT);
835  	if (endpoint->config.qmap) {
836  		/* We have a header, so we must specify its endianness */
837  		val |= reg_bit(reg, HDR_ENDIANNESS);	/* big endian */
838  
839  		/* A QMAP header contains a 6 bit pad field at offset 0.
840  		 * The RMNet driver assumes this field is meaningful in
841  		 * packets it receives, and assumes the header's payload
842  		 * length includes that padding.  The RMNet driver does
843  		 * *not* pad packets it sends, however, so the pad field
844  		 * (although 0) should be ignored.
845  		 */
846  		if (!endpoint->toward_ipa) {
847  			val |= reg_bit(reg, HDR_TOTAL_LEN_OR_PAD_VALID);
848  			/* HDR_TOTAL_LEN_OR_PAD is 0 (pad, not total_len) */
849  			val |= reg_bit(reg, HDR_PAYLOAD_LEN_INC_PADDING);
850  			/* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0 */
851  		}
852  	}
853  
854  	/* HDR_PAYLOAD_LEN_INC_PADDING is 0 */
855  	if (!endpoint->toward_ipa)
856  		val |= reg_encode(reg, HDR_PAD_TO_ALIGNMENT, pad_align);
857  
858  	/* IPA v4.5 adds some most-significant bits to a few fields,
859  	 * two of which are defined in the HDR (not HDR_EXT) register.
860  	 */
861  	if (ipa->version >= IPA_VERSION_4_5) {
862  		/* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0, so MSB is 0 */
863  		if (endpoint->config.qmap && !endpoint->toward_ipa) {
864  			u32 mask = reg_field_max(reg, HDR_OFST_PKT_SIZE);
865  			u32 off;     /* Field offset within header */
866  
867  			off = offsetof(struct rmnet_map_header, pkt_len);
868  			/* Low bits are in the ENDP_INIT_HDR register */
869  			off >>= hweight32(mask);
870  			val |= reg_encode(reg, HDR_OFST_PKT_SIZE_MSB, off);
871  			/* HDR_ADDITIONAL_CONST_LEN is 0 so MSB is 0 */
872  		}
873  	}
874  
875  	iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
876  }
877  
ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint * endpoint)878  static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint)
879  {
880  	u32 endpoint_id = endpoint->endpoint_id;
881  	struct ipa *ipa = endpoint->ipa;
882  	const struct reg *reg;
883  	u32 val = 0;
884  	u32 offset;
885  
886  	if (endpoint->toward_ipa)
887  		return;		/* Register not valid for TX endpoints */
888  
889  	reg = ipa_reg(ipa,  ENDP_INIT_HDR_METADATA_MASK);
890  	offset = reg_n_offset(reg, endpoint_id);
891  
892  	/* Note that HDR_ENDIANNESS indicates big endian header fields */
893  	if (endpoint->config.qmap)
894  		val = (__force u32)cpu_to_be32(IPA_ENDPOINT_QMAP_METADATA_MASK);
895  
896  	iowrite32(val, ipa->reg_virt + offset);
897  }
898  
ipa_endpoint_init_mode(struct ipa_endpoint * endpoint)899  static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint)
900  {
901  	struct ipa *ipa = endpoint->ipa;
902  	const struct reg *reg;
903  	u32 offset;
904  	u32 val;
905  
906  	if (!endpoint->toward_ipa)
907  		return;		/* Register not valid for RX endpoints */
908  
909  	reg = ipa_reg(ipa, ENDP_INIT_MODE);
910  	if (endpoint->config.dma_mode) {
911  		enum ipa_endpoint_name name = endpoint->config.dma_endpoint;
912  		u32 dma_endpoint_id = ipa->name_map[name]->endpoint_id;
913  
914  		val = reg_encode(reg, ENDP_MODE, IPA_DMA);
915  		val |= reg_encode(reg, DEST_PIPE_INDEX, dma_endpoint_id);
916  	} else {
917  		val = reg_encode(reg, ENDP_MODE, IPA_BASIC);
918  	}
919  	/* All other bits unspecified (and 0) */
920  
921  	offset = reg_n_offset(reg, endpoint->endpoint_id);
922  	iowrite32(val, ipa->reg_virt + offset);
923  }
924  
925  /* For IPA v4.5+, times are expressed using Qtime.  A time is represented
926   * at one of several available granularities, which are configured in
927   * ipa_qtime_config().  Three (or, starting with IPA v5.0, four) pulse
928   * generators are set up with different "tick" periods.  A Qtime value
929   * encodes a tick count along with an indication of a pulse generator
930   * (which has a fixed tick period).  Two pulse generators are always
931   * available to the AP; a third is available starting with IPA v5.0.
932   * This function determines which pulse generator most accurately
933   * represents the time period provided, and returns the tick count to
934   * use to represent that time.
935   */
936  static u32
ipa_qtime_val(struct ipa * ipa,u32 microseconds,u32 max,u32 * select)937  ipa_qtime_val(struct ipa *ipa, u32 microseconds, u32 max, u32 *select)
938  {
939  	u32 which = 0;
940  	u32 ticks;
941  
942  	/* Pulse generator 0 has 100 microsecond granularity */
943  	ticks = DIV_ROUND_CLOSEST(microseconds, 100);
944  	if (ticks <= max)
945  		goto out;
946  
947  	/* Pulse generator 1 has millisecond granularity */
948  	which = 1;
949  	ticks = DIV_ROUND_CLOSEST(microseconds, 1000);
950  	if (ticks <= max)
951  		goto out;
952  
953  	if (ipa->version >= IPA_VERSION_5_0) {
954  		/* Pulse generator 2 has 10 millisecond granularity */
955  		which = 2;
956  		ticks = DIV_ROUND_CLOSEST(microseconds, 100);
957  	}
958  	WARN_ON(ticks > max);
959  out:
960  	*select = which;
961  
962  	return ticks;
963  }
964  
965  /* Encode the aggregation timer limit (microseconds) based on IPA version */
aggr_time_limit_encode(struct ipa * ipa,const struct reg * reg,u32 microseconds)966  static u32 aggr_time_limit_encode(struct ipa *ipa, const struct reg *reg,
967  				  u32 microseconds)
968  {
969  	u32 ticks;
970  	u32 max;
971  
972  	if (!microseconds)
973  		return 0;	/* Nothing to compute if time limit is 0 */
974  
975  	max = reg_field_max(reg, TIME_LIMIT);
976  	if (ipa->version >= IPA_VERSION_4_5) {
977  		u32 select;
978  
979  		ticks = ipa_qtime_val(ipa, microseconds, max, &select);
980  
981  		return reg_encode(reg, AGGR_GRAN_SEL, select) |
982  		       reg_encode(reg, TIME_LIMIT, ticks);
983  	}
984  
985  	/* We program aggregation granularity in ipa_hardware_config() */
986  	ticks = DIV_ROUND_CLOSEST(microseconds, IPA_AGGR_GRANULARITY);
987  	WARN(ticks > max, "aggr_time_limit too large (%u > %u usec)\n",
988  	     microseconds, max * IPA_AGGR_GRANULARITY);
989  
990  	return reg_encode(reg, TIME_LIMIT, ticks);
991  }
992  
ipa_endpoint_init_aggr(struct ipa_endpoint * endpoint)993  static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
994  {
995  	u32 endpoint_id = endpoint->endpoint_id;
996  	struct ipa *ipa = endpoint->ipa;
997  	const struct reg *reg;
998  	u32 val = 0;
999  
1000  	reg = ipa_reg(ipa, ENDP_INIT_AGGR);
1001  	if (endpoint->config.aggregation) {
1002  		if (!endpoint->toward_ipa) {
1003  			const struct ipa_endpoint_rx *rx_config;
1004  			u32 buffer_size;
1005  			u32 limit;
1006  
1007  			rx_config = &endpoint->config.rx;
1008  			val |= reg_encode(reg, AGGR_EN, IPA_ENABLE_AGGR);
1009  			val |= reg_encode(reg, AGGR_TYPE, IPA_GENERIC);
1010  
1011  			buffer_size = rx_config->buffer_size;
1012  			limit = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD,
1013  						 rx_config->aggr_hard_limit);
1014  			val |= reg_encode(reg, BYTE_LIMIT, limit);
1015  
1016  			limit = rx_config->aggr_time_limit;
1017  			val |= aggr_time_limit_encode(ipa, reg, limit);
1018  
1019  			/* AGGR_PKT_LIMIT is 0 (unlimited) */
1020  
1021  			if (rx_config->aggr_close_eof)
1022  				val |= reg_bit(reg, SW_EOF_ACTIVE);
1023  		} else {
1024  			val |= reg_encode(reg, AGGR_EN, IPA_ENABLE_DEAGGR);
1025  			val |= reg_encode(reg, AGGR_TYPE, IPA_QCMAP);
1026  			/* other fields ignored */
1027  		}
1028  		/* AGGR_FORCE_CLOSE is 0 */
1029  		/* AGGR_GRAN_SEL is 0 for IPA v4.5 */
1030  	} else {
1031  		val |= reg_encode(reg, AGGR_EN, IPA_BYPASS_AGGR);
1032  		/* other fields ignored */
1033  	}
1034  
1035  	iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
1036  }
1037  
1038  /* The head-of-line blocking timer is defined as a tick count.  For
1039   * IPA version 4.5 the tick count is based on the Qtimer, which is
1040   * derived from the 19.2 MHz SoC XO clock.  For older IPA versions
1041   * each tick represents 128 cycles of the IPA core clock.
1042   *
1043   * Return the encoded value representing the timeout period provided
1044   * that should be written to the ENDP_INIT_HOL_BLOCK_TIMER register.
1045   */
hol_block_timer_encode(struct ipa * ipa,const struct reg * reg,u32 microseconds)1046  static u32 hol_block_timer_encode(struct ipa *ipa, const struct reg *reg,
1047  				  u32 microseconds)
1048  {
1049  	u32 width;
1050  	u32 scale;
1051  	u64 ticks;
1052  	u64 rate;
1053  	u32 high;
1054  	u32 val;
1055  
1056  	if (!microseconds)
1057  		return 0;	/* Nothing to compute if timer period is 0 */
1058  
1059  	if (ipa->version >= IPA_VERSION_4_5) {
1060  		u32 max = reg_field_max(reg, TIMER_LIMIT);
1061  		u32 select;
1062  		u32 ticks;
1063  
1064  		ticks = ipa_qtime_val(ipa, microseconds, max, &select);
1065  
1066  		return reg_encode(reg, TIMER_GRAN_SEL, 1) |
1067  		       reg_encode(reg, TIMER_LIMIT, ticks);
1068  	}
1069  
1070  	/* Use 64 bit arithmetic to avoid overflow */
1071  	rate = ipa_core_clock_rate(ipa);
1072  	ticks = DIV_ROUND_CLOSEST(microseconds * rate, 128 * USEC_PER_SEC);
1073  
1074  	/* We still need the result to fit into the field */
1075  	WARN_ON(ticks > reg_field_max(reg, TIMER_BASE_VALUE));
1076  
1077  	/* IPA v3.5.1 through v4.1 just record the tick count */
1078  	if (ipa->version < IPA_VERSION_4_2)
1079  		return reg_encode(reg, TIMER_BASE_VALUE, (u32)ticks);
1080  
1081  	/* For IPA v4.2, the tick count is represented by base and
1082  	 * scale fields within the 32-bit timer register, where:
1083  	 *     ticks = base << scale;
1084  	 * The best precision is achieved when the base value is as
1085  	 * large as possible.  Find the highest set bit in the tick
1086  	 * count, and extract the number of bits in the base field
1087  	 * such that high bit is included.
1088  	 */
1089  	high = fls(ticks);		/* 1..32 (or warning above) */
1090  	width = hweight32(reg_fmask(reg, TIMER_BASE_VALUE));
1091  	scale = high > width ? high - width : 0;
1092  	if (scale) {
1093  		/* If we're scaling, round up to get a closer result */
1094  		ticks += 1 << (scale - 1);
1095  		/* High bit was set, so rounding might have affected it */
1096  		if (fls(ticks) != high)
1097  			scale++;
1098  	}
1099  
1100  	val = reg_encode(reg, TIMER_SCALE, scale);
1101  	val |= reg_encode(reg, TIMER_BASE_VALUE, (u32)ticks >> scale);
1102  
1103  	return val;
1104  }
1105  
1106  /* If microseconds is 0, timeout is immediate */
ipa_endpoint_init_hol_block_timer(struct ipa_endpoint * endpoint,u32 microseconds)1107  static void ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint,
1108  					      u32 microseconds)
1109  {
1110  	u32 endpoint_id = endpoint->endpoint_id;
1111  	struct ipa *ipa = endpoint->ipa;
1112  	const struct reg *reg;
1113  	u32 val;
1114  
1115  	/* This should only be changed when HOL_BLOCK_EN is disabled */
1116  	reg = ipa_reg(ipa, ENDP_INIT_HOL_BLOCK_TIMER);
1117  	val = hol_block_timer_encode(ipa, reg, microseconds);
1118  
1119  	iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
1120  }
1121  
1122  static void
ipa_endpoint_init_hol_block_en(struct ipa_endpoint * endpoint,bool enable)1123  ipa_endpoint_init_hol_block_en(struct ipa_endpoint *endpoint, bool enable)
1124  {
1125  	u32 endpoint_id = endpoint->endpoint_id;
1126  	struct ipa *ipa = endpoint->ipa;
1127  	const struct reg *reg;
1128  	u32 offset;
1129  	u32 val;
1130  
1131  	reg = ipa_reg(ipa, ENDP_INIT_HOL_BLOCK_EN);
1132  	offset = reg_n_offset(reg, endpoint_id);
1133  	val = enable ? reg_bit(reg, HOL_BLOCK_EN) : 0;
1134  
1135  	iowrite32(val, ipa->reg_virt + offset);
1136  
1137  	/* When enabling, the register must be written twice for IPA v4.5+ */
1138  	if (enable && ipa->version >= IPA_VERSION_4_5)
1139  		iowrite32(val, ipa->reg_virt + offset);
1140  }
1141  
1142  /* Assumes HOL_BLOCK is in disabled state */
ipa_endpoint_init_hol_block_enable(struct ipa_endpoint * endpoint,u32 microseconds)1143  static void ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint,
1144  					       u32 microseconds)
1145  {
1146  	ipa_endpoint_init_hol_block_timer(endpoint, microseconds);
1147  	ipa_endpoint_init_hol_block_en(endpoint, true);
1148  }
1149  
ipa_endpoint_init_hol_block_disable(struct ipa_endpoint * endpoint)1150  static void ipa_endpoint_init_hol_block_disable(struct ipa_endpoint *endpoint)
1151  {
1152  	ipa_endpoint_init_hol_block_en(endpoint, false);
1153  }
1154  
ipa_endpoint_modem_hol_block_clear_all(struct ipa * ipa)1155  void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa)
1156  {
1157  	u32 endpoint_id = 0;
1158  
1159  	while (endpoint_id < ipa->endpoint_count) {
1160  		struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id++];
1161  
1162  		if (endpoint->toward_ipa || endpoint->ee_id != GSI_EE_MODEM)
1163  			continue;
1164  
1165  		ipa_endpoint_init_hol_block_disable(endpoint);
1166  		ipa_endpoint_init_hol_block_enable(endpoint, 0);
1167  	}
1168  }
1169  
ipa_endpoint_init_deaggr(struct ipa_endpoint * endpoint)1170  static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint)
1171  {
1172  	u32 endpoint_id = endpoint->endpoint_id;
1173  	struct ipa *ipa = endpoint->ipa;
1174  	const struct reg *reg;
1175  	u32 val = 0;
1176  
1177  	if (!endpoint->toward_ipa)
1178  		return;		/* Register not valid for RX endpoints */
1179  
1180  	reg = ipa_reg(ipa, ENDP_INIT_DEAGGR);
1181  	/* DEAGGR_HDR_LEN is 0 */
1182  	/* PACKET_OFFSET_VALID is 0 */
1183  	/* PACKET_OFFSET_LOCATION is ignored (not valid) */
1184  	/* MAX_PACKET_LEN is 0 (not enforced) */
1185  
1186  	iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
1187  }
1188  
ipa_endpoint_init_rsrc_grp(struct ipa_endpoint * endpoint)1189  static void ipa_endpoint_init_rsrc_grp(struct ipa_endpoint *endpoint)
1190  {
1191  	u32 resource_group = endpoint->config.resource_group;
1192  	u32 endpoint_id = endpoint->endpoint_id;
1193  	struct ipa *ipa = endpoint->ipa;
1194  	const struct reg *reg;
1195  	u32 val;
1196  
1197  	reg = ipa_reg(ipa, ENDP_INIT_RSRC_GRP);
1198  	val = reg_encode(reg, ENDP_RSRC_GRP, resource_group);
1199  
1200  	iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
1201  }
1202  
ipa_endpoint_init_seq(struct ipa_endpoint * endpoint)1203  static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint)
1204  {
1205  	u32 endpoint_id = endpoint->endpoint_id;
1206  	struct ipa *ipa = endpoint->ipa;
1207  	const struct reg *reg;
1208  	u32 val;
1209  
1210  	if (!endpoint->toward_ipa)
1211  		return;		/* Register not valid for RX endpoints */
1212  
1213  	reg = ipa_reg(ipa, ENDP_INIT_SEQ);
1214  
1215  	/* Low-order byte configures primary packet processing */
1216  	val = reg_encode(reg, SEQ_TYPE, endpoint->config.tx.seq_type);
1217  
1218  	/* Second byte (if supported) configures replicated packet processing */
1219  	if (ipa->version < IPA_VERSION_4_5)
1220  		val |= reg_encode(reg, SEQ_REP_TYPE,
1221  				  endpoint->config.tx.seq_rep_type);
1222  
1223  	iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
1224  }
1225  
1226  /**
1227   * ipa_endpoint_skb_tx() - Transmit a socket buffer
1228   * @endpoint:	Endpoint pointer
1229   * @skb:	Socket buffer to send
1230   *
1231   * Returns:	0 if successful, or a negative error code
1232   */
ipa_endpoint_skb_tx(struct ipa_endpoint * endpoint,struct sk_buff * skb)1233  int ipa_endpoint_skb_tx(struct ipa_endpoint *endpoint, struct sk_buff *skb)
1234  {
1235  	struct gsi_trans *trans;
1236  	u32 nr_frags;
1237  	int ret;
1238  
1239  	/* Make sure source endpoint's TLV FIFO has enough entries to
1240  	 * hold the linear portion of the skb and all its fragments.
1241  	 * If not, see if we can linearize it before giving up.
1242  	 */
1243  	nr_frags = skb_shinfo(skb)->nr_frags;
1244  	if (nr_frags > endpoint->skb_frag_max) {
1245  		if (skb_linearize(skb))
1246  			return -E2BIG;
1247  		nr_frags = 0;
1248  	}
1249  
1250  	trans = ipa_endpoint_trans_alloc(endpoint, 1 + nr_frags);
1251  	if (!trans)
1252  		return -EBUSY;
1253  
1254  	ret = gsi_trans_skb_add(trans, skb);
1255  	if (ret)
1256  		goto err_trans_free;
1257  	trans->data = skb;	/* transaction owns skb now */
1258  
1259  	gsi_trans_commit(trans, !netdev_xmit_more());
1260  
1261  	return 0;
1262  
1263  err_trans_free:
1264  	gsi_trans_free(trans);
1265  
1266  	return -ENOMEM;
1267  }
1268  
ipa_endpoint_status(struct ipa_endpoint * endpoint)1269  static void ipa_endpoint_status(struct ipa_endpoint *endpoint)
1270  {
1271  	u32 endpoint_id = endpoint->endpoint_id;
1272  	struct ipa *ipa = endpoint->ipa;
1273  	const struct reg *reg;
1274  	u32 val = 0;
1275  
1276  	reg = ipa_reg(ipa, ENDP_STATUS);
1277  	if (endpoint->config.status_enable) {
1278  		val |= reg_bit(reg, STATUS_EN);
1279  		if (endpoint->toward_ipa) {
1280  			enum ipa_endpoint_name name;
1281  			u32 status_endpoint_id;
1282  
1283  			name = endpoint->config.tx.status_endpoint;
1284  			status_endpoint_id = ipa->name_map[name]->endpoint_id;
1285  
1286  			val |= reg_encode(reg, STATUS_ENDP, status_endpoint_id);
1287  		}
1288  		/* STATUS_LOCATION is 0, meaning IPA packet status
1289  		 * precedes the packet (not present for IPA v4.5+)
1290  		 */
1291  		/* STATUS_PKT_SUPPRESS_FMASK is 0 (not present for v4.0+) */
1292  	}
1293  
1294  	iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
1295  }
1296  
ipa_endpoint_replenish_one(struct ipa_endpoint * endpoint,struct gsi_trans * trans)1297  static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint,
1298  				      struct gsi_trans *trans)
1299  {
1300  	struct page *page;
1301  	u32 buffer_size;
1302  	u32 offset;
1303  	u32 len;
1304  	int ret;
1305  
1306  	buffer_size = endpoint->config.rx.buffer_size;
1307  	page = dev_alloc_pages(get_order(buffer_size));
1308  	if (!page)
1309  		return -ENOMEM;
1310  
1311  	/* Offset the buffer to make space for skb headroom */
1312  	offset = NET_SKB_PAD;
1313  	len = buffer_size - offset;
1314  
1315  	ret = gsi_trans_page_add(trans, page, len, offset);
1316  	if (ret)
1317  		put_page(page);
1318  	else
1319  		trans->data = page;	/* transaction owns page now */
1320  
1321  	return ret;
1322  }
1323  
1324  /**
1325   * ipa_endpoint_replenish() - Replenish endpoint receive buffers
1326   * @endpoint:	Endpoint to be replenished
1327   *
1328   * The IPA hardware can hold a fixed number of receive buffers for an RX
1329   * endpoint, based on the number of entries in the underlying channel ring
1330   * buffer.  If an endpoint's "backlog" is non-zero, it indicates how many
1331   * more receive buffers can be supplied to the hardware.  Replenishing for
1332   * an endpoint can be disabled, in which case buffers are not queued to
1333   * the hardware.
1334   */
ipa_endpoint_replenish(struct ipa_endpoint * endpoint)1335  static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint)
1336  {
1337  	struct gsi_trans *trans;
1338  
1339  	if (!test_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags))
1340  		return;
1341  
1342  	/* Skip it if it's already active */
1343  	if (test_and_set_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags))
1344  		return;
1345  
1346  	while ((trans = ipa_endpoint_trans_alloc(endpoint, 1))) {
1347  		bool doorbell;
1348  
1349  		if (ipa_endpoint_replenish_one(endpoint, trans))
1350  			goto try_again_later;
1351  
1352  
1353  		/* Ring the doorbell if we've got a full batch */
1354  		doorbell = !(++endpoint->replenish_count % IPA_REPLENISH_BATCH);
1355  		gsi_trans_commit(trans, doorbell);
1356  	}
1357  
1358  	clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
1359  
1360  	return;
1361  
1362  try_again_later:
1363  	gsi_trans_free(trans);
1364  	clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
1365  
1366  	/* Whenever a receive buffer transaction completes we'll try to
1367  	 * replenish again.  It's unlikely, but if we fail to supply even
1368  	 * one buffer, nothing will trigger another replenish attempt.
1369  	 * If the hardware has no receive buffers queued, schedule work to
1370  	 * try replenishing again.
1371  	 */
1372  	if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id))
1373  		schedule_delayed_work(&endpoint->replenish_work,
1374  				      msecs_to_jiffies(1));
1375  }
1376  
ipa_endpoint_replenish_enable(struct ipa_endpoint * endpoint)1377  static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint)
1378  {
1379  	set_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
1380  
1381  	/* Start replenishing if hardware currently has no buffers */
1382  	if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id))
1383  		ipa_endpoint_replenish(endpoint);
1384  }
1385  
ipa_endpoint_replenish_disable(struct ipa_endpoint * endpoint)1386  static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint)
1387  {
1388  	clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
1389  }
1390  
ipa_endpoint_replenish_work(struct work_struct * work)1391  static void ipa_endpoint_replenish_work(struct work_struct *work)
1392  {
1393  	struct delayed_work *dwork = to_delayed_work(work);
1394  	struct ipa_endpoint *endpoint;
1395  
1396  	endpoint = container_of(dwork, struct ipa_endpoint, replenish_work);
1397  
1398  	ipa_endpoint_replenish(endpoint);
1399  }
1400  
ipa_endpoint_skb_copy(struct ipa_endpoint * endpoint,void * data,u32 len,u32 extra)1401  static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint,
1402  				  void *data, u32 len, u32 extra)
1403  {
1404  	struct sk_buff *skb;
1405  
1406  	if (!endpoint->netdev)
1407  		return;
1408  
1409  	skb = __dev_alloc_skb(len, GFP_ATOMIC);
1410  	if (skb) {
1411  		/* Copy the data into the socket buffer and receive it */
1412  		skb_put(skb, len);
1413  		memcpy(skb->data, data, len);
1414  		skb->truesize += extra;
1415  	}
1416  
1417  	ipa_modem_skb_rx(endpoint->netdev, skb);
1418  }
1419  
ipa_endpoint_skb_build(struct ipa_endpoint * endpoint,struct page * page,u32 len)1420  static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint,
1421  				   struct page *page, u32 len)
1422  {
1423  	u32 buffer_size = endpoint->config.rx.buffer_size;
1424  	struct sk_buff *skb;
1425  
1426  	/* Nothing to do if there's no netdev */
1427  	if (!endpoint->netdev)
1428  		return false;
1429  
1430  	WARN_ON(len > SKB_WITH_OVERHEAD(buffer_size - NET_SKB_PAD));
1431  
1432  	skb = build_skb(page_address(page), buffer_size);
1433  	if (skb) {
1434  		/* Reserve the headroom and account for the data */
1435  		skb_reserve(skb, NET_SKB_PAD);
1436  		skb_put(skb, len);
1437  	}
1438  
1439  	/* Receive the buffer (or record drop if unable to build it) */
1440  	ipa_modem_skb_rx(endpoint->netdev, skb);
1441  
1442  	return skb != NULL;
1443  }
1444  
1445   /* The format of an IPA packet status structure is the same for several
1446    * status types (opcodes).  Other types aren't currently supported.
1447   */
ipa_status_format_packet(enum ipa_status_opcode opcode)1448  static bool ipa_status_format_packet(enum ipa_status_opcode opcode)
1449  {
1450  	switch (opcode) {
1451  	case IPA_STATUS_OPCODE_PACKET:
1452  	case IPA_STATUS_OPCODE_DROPPED_PACKET:
1453  	case IPA_STATUS_OPCODE_SUSPENDED_PACKET:
1454  	case IPA_STATUS_OPCODE_PACKET_2ND_PASS:
1455  		return true;
1456  	default:
1457  		return false;
1458  	}
1459  }
1460  
1461  static bool
ipa_endpoint_status_skip(struct ipa_endpoint * endpoint,const void * data)1462  ipa_endpoint_status_skip(struct ipa_endpoint *endpoint, const void *data)
1463  {
1464  	struct ipa *ipa = endpoint->ipa;
1465  	enum ipa_status_opcode opcode;
1466  	u32 endpoint_id;
1467  
1468  	opcode = ipa_status_extract(ipa, data, STATUS_OPCODE);
1469  	if (!ipa_status_format_packet(opcode))
1470  		return true;
1471  
1472  	endpoint_id = ipa_status_extract(ipa, data, STATUS_DST_ENDPOINT);
1473  	if (endpoint_id != endpoint->endpoint_id)
1474  		return true;
1475  
1476  	return false;	/* Don't skip this packet, process it */
1477  }
1478  
1479  static bool
ipa_endpoint_status_tag_valid(struct ipa_endpoint * endpoint,const void * data)1480  ipa_endpoint_status_tag_valid(struct ipa_endpoint *endpoint, const void *data)
1481  {
1482  	struct ipa_endpoint *command_endpoint;
1483  	enum ipa_status_mask status_mask;
1484  	struct ipa *ipa = endpoint->ipa;
1485  	u32 endpoint_id;
1486  
1487  	status_mask = ipa_status_extract(ipa, data, STATUS_MASK);
1488  	if (!status_mask)
1489  		return false;	/* No valid tag */
1490  
1491  	/* The status contains a valid tag.  We know the packet was sent to
1492  	 * this endpoint (already verified by ipa_endpoint_status_skip()).
1493  	 * If the packet came from the AP->command TX endpoint we know
1494  	 * this packet was sent as part of the pipeline clear process.
1495  	 */
1496  	endpoint_id = ipa_status_extract(ipa, data, STATUS_SRC_ENDPOINT);
1497  	command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
1498  	if (endpoint_id == command_endpoint->endpoint_id) {
1499  		complete(&ipa->completion);
1500  	} else {
1501  		dev_err(&ipa->pdev->dev,
1502  			"unexpected tagged packet from endpoint %u\n",
1503  			endpoint_id);
1504  	}
1505  
1506  	return true;
1507  }
1508  
1509  /* Return whether the status indicates the packet should be dropped */
1510  static bool
ipa_endpoint_status_drop(struct ipa_endpoint * endpoint,const void * data)1511  ipa_endpoint_status_drop(struct ipa_endpoint *endpoint, const void *data)
1512  {
1513  	enum ipa_status_exception exception;
1514  	struct ipa *ipa = endpoint->ipa;
1515  	u32 rule;
1516  
1517  	/* If the status indicates a tagged transfer, we'll drop the packet */
1518  	if (ipa_endpoint_status_tag_valid(endpoint, data))
1519  		return true;
1520  
1521  	/* Deaggregation exceptions we drop; all other types we consume */
1522  	exception = ipa_status_extract(ipa, data, STATUS_EXCEPTION);
1523  	if (exception)
1524  		return exception == IPA_STATUS_EXCEPTION_DEAGGR;
1525  
1526  	/* Drop the packet if it fails to match a routing rule; otherwise no */
1527  	rule = ipa_status_extract(ipa, data, STATUS_ROUTER_RULE_INDEX);
1528  
1529  	return rule == IPA_STATUS_RULE_MISS;
1530  }
1531  
ipa_endpoint_status_parse(struct ipa_endpoint * endpoint,struct page * page,u32 total_len)1532  static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
1533  				      struct page *page, u32 total_len)
1534  {
1535  	u32 buffer_size = endpoint->config.rx.buffer_size;
1536  	void *data = page_address(page) + NET_SKB_PAD;
1537  	u32 unused = buffer_size - total_len;
1538  	struct ipa *ipa = endpoint->ipa;
1539  	u32 resid = total_len;
1540  
1541  	while (resid) {
1542  		u32 length;
1543  		u32 align;
1544  		u32 len;
1545  
1546  		if (resid < IPA_STATUS_SIZE) {
1547  			dev_err(&endpoint->ipa->pdev->dev,
1548  				"short message (%u bytes < %zu byte status)\n",
1549  				resid, IPA_STATUS_SIZE);
1550  			break;
1551  		}
1552  
1553  		/* Skip over status packets that lack packet data */
1554  		length = ipa_status_extract(ipa, data, STATUS_LENGTH);
1555  		if (!length || ipa_endpoint_status_skip(endpoint, data)) {
1556  			data += IPA_STATUS_SIZE;
1557  			resid -= IPA_STATUS_SIZE;
1558  			continue;
1559  		}
1560  
1561  		/* Compute the amount of buffer space consumed by the packet,
1562  		 * including the status.  If the hardware is configured to
1563  		 * pad packet data to an aligned boundary, account for that.
1564  		 * And if checksum offload is enabled a trailer containing
1565  		 * computed checksum information will be appended.
1566  		 */
1567  		align = endpoint->config.rx.pad_align ? : 1;
1568  		len = IPA_STATUS_SIZE + ALIGN(length, align);
1569  		if (endpoint->config.checksum)
1570  			len += sizeof(struct rmnet_map_dl_csum_trailer);
1571  
1572  		if (!ipa_endpoint_status_drop(endpoint, data)) {
1573  			void *data2;
1574  			u32 extra;
1575  
1576  			/* Client receives only packet data (no status) */
1577  			data2 = data + IPA_STATUS_SIZE;
1578  
1579  			/* Have the true size reflect the extra unused space in
1580  			 * the original receive buffer.  Distribute the "cost"
1581  			 * proportionately across all aggregated packets in the
1582  			 * buffer.
1583  			 */
1584  			extra = DIV_ROUND_CLOSEST(unused * len, total_len);
1585  			ipa_endpoint_skb_copy(endpoint, data2, length, extra);
1586  		}
1587  
1588  		/* Consume status and the full packet it describes */
1589  		data += len;
1590  		resid -= len;
1591  	}
1592  }
1593  
ipa_endpoint_trans_complete(struct ipa_endpoint * endpoint,struct gsi_trans * trans)1594  void ipa_endpoint_trans_complete(struct ipa_endpoint *endpoint,
1595  				 struct gsi_trans *trans)
1596  {
1597  	struct page *page;
1598  
1599  	if (endpoint->toward_ipa)
1600  		return;
1601  
1602  	if (trans->cancelled)
1603  		goto done;
1604  
1605  	/* Parse or build a socket buffer using the actual received length */
1606  	page = trans->data;
1607  	if (endpoint->config.status_enable)
1608  		ipa_endpoint_status_parse(endpoint, page, trans->len);
1609  	else if (ipa_endpoint_skb_build(endpoint, page, trans->len))
1610  		trans->data = NULL;	/* Pages have been consumed */
1611  done:
1612  	ipa_endpoint_replenish(endpoint);
1613  }
1614  
ipa_endpoint_trans_release(struct ipa_endpoint * endpoint,struct gsi_trans * trans)1615  void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint,
1616  				struct gsi_trans *trans)
1617  {
1618  	if (endpoint->toward_ipa) {
1619  		struct ipa *ipa = endpoint->ipa;
1620  
1621  		/* Nothing to do for command transactions */
1622  		if (endpoint != ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]) {
1623  			struct sk_buff *skb = trans->data;
1624  
1625  			if (skb)
1626  				dev_kfree_skb_any(skb);
1627  		}
1628  	} else {
1629  		struct page *page = trans->data;
1630  
1631  		if (page)
1632  			put_page(page);
1633  	}
1634  }
1635  
ipa_endpoint_default_route_set(struct ipa * ipa,u32 endpoint_id)1636  void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id)
1637  {
1638  	const struct reg *reg;
1639  	u32 val;
1640  
1641  	reg = ipa_reg(ipa, ROUTE);
1642  	/* ROUTE_DIS is 0 */
1643  	val = reg_encode(reg, ROUTE_DEF_PIPE, endpoint_id);
1644  	val |= reg_bit(reg, ROUTE_DEF_HDR_TABLE);
1645  	/* ROUTE_DEF_HDR_OFST is 0 */
1646  	val |= reg_encode(reg, ROUTE_FRAG_DEF_PIPE, endpoint_id);
1647  	val |= reg_bit(reg, ROUTE_DEF_RETAIN_HDR);
1648  
1649  	iowrite32(val, ipa->reg_virt + reg_offset(reg));
1650  }
1651  
ipa_endpoint_default_route_clear(struct ipa * ipa)1652  void ipa_endpoint_default_route_clear(struct ipa *ipa)
1653  {
1654  	ipa_endpoint_default_route_set(ipa, 0);
1655  }
1656  
1657  /**
1658   * ipa_endpoint_reset_rx_aggr() - Reset RX endpoint with aggregation active
1659   * @endpoint:	Endpoint to be reset
1660   *
1661   * If aggregation is active on an RX endpoint when a reset is performed
1662   * on its underlying GSI channel, a special sequence of actions must be
1663   * taken to ensure the IPA pipeline is properly cleared.
1664   *
1665   * Return:	0 if successful, or a negative error code
1666   */
ipa_endpoint_reset_rx_aggr(struct ipa_endpoint * endpoint)1667  static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
1668  {
1669  	struct device *dev = &endpoint->ipa->pdev->dev;
1670  	struct ipa *ipa = endpoint->ipa;
1671  	struct gsi *gsi = &ipa->gsi;
1672  	bool suspended = false;
1673  	dma_addr_t addr;
1674  	u32 retries;
1675  	u32 len = 1;
1676  	void *virt;
1677  	int ret;
1678  
1679  	virt = kzalloc(len, GFP_KERNEL);
1680  	if (!virt)
1681  		return -ENOMEM;
1682  
1683  	addr = dma_map_single(dev, virt, len, DMA_FROM_DEVICE);
1684  	if (dma_mapping_error(dev, addr)) {
1685  		ret = -ENOMEM;
1686  		goto out_kfree;
1687  	}
1688  
1689  	/* Force close aggregation before issuing the reset */
1690  	ipa_endpoint_force_close(endpoint);
1691  
1692  	/* Reset and reconfigure the channel with the doorbell engine
1693  	 * disabled.  Then poll until we know aggregation is no longer
1694  	 * active.  We'll re-enable the doorbell (if appropriate) when
1695  	 * we reset again below.
1696  	 */
1697  	gsi_channel_reset(gsi, endpoint->channel_id, false);
1698  
1699  	/* Make sure the channel isn't suspended */
1700  	suspended = ipa_endpoint_program_suspend(endpoint, false);
1701  
1702  	/* Start channel and do a 1 byte read */
1703  	ret = gsi_channel_start(gsi, endpoint->channel_id);
1704  	if (ret)
1705  		goto out_suspend_again;
1706  
1707  	ret = gsi_trans_read_byte(gsi, endpoint->channel_id, addr);
1708  	if (ret)
1709  		goto err_endpoint_stop;
1710  
1711  	/* Wait for aggregation to be closed on the channel */
1712  	retries = IPA_ENDPOINT_RESET_AGGR_RETRY_MAX;
1713  	do {
1714  		if (!ipa_endpoint_aggr_active(endpoint))
1715  			break;
1716  		usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
1717  	} while (retries--);
1718  
1719  	/* Check one last time */
1720  	if (ipa_endpoint_aggr_active(endpoint))
1721  		dev_err(dev, "endpoint %u still active during reset\n",
1722  			endpoint->endpoint_id);
1723  
1724  	gsi_trans_read_byte_done(gsi, endpoint->channel_id);
1725  
1726  	ret = gsi_channel_stop(gsi, endpoint->channel_id);
1727  	if (ret)
1728  		goto out_suspend_again;
1729  
1730  	/* Finally, reset and reconfigure the channel again (re-enabling
1731  	 * the doorbell engine if appropriate).  Sleep for 1 millisecond to
1732  	 * complete the channel reset sequence.  Finish by suspending the
1733  	 * channel again (if necessary).
1734  	 */
1735  	gsi_channel_reset(gsi, endpoint->channel_id, true);
1736  
1737  	usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
1738  
1739  	goto out_suspend_again;
1740  
1741  err_endpoint_stop:
1742  	(void)gsi_channel_stop(gsi, endpoint->channel_id);
1743  out_suspend_again:
1744  	if (suspended)
1745  		(void)ipa_endpoint_program_suspend(endpoint, true);
1746  	dma_unmap_single(dev, addr, len, DMA_FROM_DEVICE);
1747  out_kfree:
1748  	kfree(virt);
1749  
1750  	return ret;
1751  }
1752  
ipa_endpoint_reset(struct ipa_endpoint * endpoint)1753  static void ipa_endpoint_reset(struct ipa_endpoint *endpoint)
1754  {
1755  	u32 channel_id = endpoint->channel_id;
1756  	struct ipa *ipa = endpoint->ipa;
1757  	bool special;
1758  	int ret = 0;
1759  
1760  	/* On IPA v3.5.1, if an RX endpoint is reset while aggregation
1761  	 * is active, we need to handle things specially to recover.
1762  	 * All other cases just need to reset the underlying GSI channel.
1763  	 */
1764  	special = ipa->version < IPA_VERSION_4_0 && !endpoint->toward_ipa &&
1765  			endpoint->config.aggregation;
1766  	if (special && ipa_endpoint_aggr_active(endpoint))
1767  		ret = ipa_endpoint_reset_rx_aggr(endpoint);
1768  	else
1769  		gsi_channel_reset(&ipa->gsi, channel_id, true);
1770  
1771  	if (ret)
1772  		dev_err(&ipa->pdev->dev,
1773  			"error %d resetting channel %u for endpoint %u\n",
1774  			ret, endpoint->channel_id, endpoint->endpoint_id);
1775  }
1776  
ipa_endpoint_program(struct ipa_endpoint * endpoint)1777  static void ipa_endpoint_program(struct ipa_endpoint *endpoint)
1778  {
1779  	if (endpoint->toward_ipa) {
1780  		/* Newer versions of IPA use GSI channel flow control
1781  		 * instead of endpoint DELAY mode to prevent sending data.
1782  		 * Flow control is disabled for newly-allocated channels,
1783  		 * and we can assume flow control is not (ever) enabled
1784  		 * for AP TX channels.
1785  		 */
1786  		if (endpoint->ipa->version < IPA_VERSION_4_2)
1787  			ipa_endpoint_program_delay(endpoint, false);
1788  	} else {
1789  		/* Ensure suspend mode is off on all AP RX endpoints */
1790  		(void)ipa_endpoint_program_suspend(endpoint, false);
1791  	}
1792  	ipa_endpoint_init_cfg(endpoint);
1793  	ipa_endpoint_init_nat(endpoint);
1794  	ipa_endpoint_init_hdr(endpoint);
1795  	ipa_endpoint_init_hdr_ext(endpoint);
1796  	ipa_endpoint_init_hdr_metadata_mask(endpoint);
1797  	ipa_endpoint_init_mode(endpoint);
1798  	ipa_endpoint_init_aggr(endpoint);
1799  	if (!endpoint->toward_ipa) {
1800  		if (endpoint->config.rx.holb_drop)
1801  			ipa_endpoint_init_hol_block_enable(endpoint, 0);
1802  		else
1803  			ipa_endpoint_init_hol_block_disable(endpoint);
1804  	}
1805  	ipa_endpoint_init_deaggr(endpoint);
1806  	ipa_endpoint_init_rsrc_grp(endpoint);
1807  	ipa_endpoint_init_seq(endpoint);
1808  	ipa_endpoint_status(endpoint);
1809  }
1810  
ipa_endpoint_enable_one(struct ipa_endpoint * endpoint)1811  int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint)
1812  {
1813  	u32 endpoint_id = endpoint->endpoint_id;
1814  	struct ipa *ipa = endpoint->ipa;
1815  	struct gsi *gsi = &ipa->gsi;
1816  	int ret;
1817  
1818  	ret = gsi_channel_start(gsi, endpoint->channel_id);
1819  	if (ret) {
1820  		dev_err(&ipa->pdev->dev,
1821  			"error %d starting %cX channel %u for endpoint %u\n",
1822  			ret, endpoint->toward_ipa ? 'T' : 'R',
1823  			endpoint->channel_id, endpoint_id);
1824  		return ret;
1825  	}
1826  
1827  	if (!endpoint->toward_ipa) {
1828  		ipa_interrupt_suspend_enable(ipa->interrupt, endpoint_id);
1829  		ipa_endpoint_replenish_enable(endpoint);
1830  	}
1831  
1832  	__set_bit(endpoint_id, ipa->enabled);
1833  
1834  	return 0;
1835  }
1836  
ipa_endpoint_disable_one(struct ipa_endpoint * endpoint)1837  void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint)
1838  {
1839  	u32 endpoint_id = endpoint->endpoint_id;
1840  	struct ipa *ipa = endpoint->ipa;
1841  	struct gsi *gsi = &ipa->gsi;
1842  	int ret;
1843  
1844  	if (!test_bit(endpoint_id, ipa->enabled))
1845  		return;
1846  
1847  	__clear_bit(endpoint_id, endpoint->ipa->enabled);
1848  
1849  	if (!endpoint->toward_ipa) {
1850  		ipa_endpoint_replenish_disable(endpoint);
1851  		ipa_interrupt_suspend_disable(ipa->interrupt, endpoint_id);
1852  	}
1853  
1854  	/* Note that if stop fails, the channel's state is not well-defined */
1855  	ret = gsi_channel_stop(gsi, endpoint->channel_id);
1856  	if (ret)
1857  		dev_err(&ipa->pdev->dev,
1858  			"error %d attempting to stop endpoint %u\n", ret,
1859  			endpoint_id);
1860  }
1861  
ipa_endpoint_suspend_one(struct ipa_endpoint * endpoint)1862  void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint)
1863  {
1864  	struct device *dev = &endpoint->ipa->pdev->dev;
1865  	struct gsi *gsi = &endpoint->ipa->gsi;
1866  	int ret;
1867  
1868  	if (!test_bit(endpoint->endpoint_id, endpoint->ipa->enabled))
1869  		return;
1870  
1871  	if (!endpoint->toward_ipa) {
1872  		ipa_endpoint_replenish_disable(endpoint);
1873  		(void)ipa_endpoint_program_suspend(endpoint, true);
1874  	}
1875  
1876  	ret = gsi_channel_suspend(gsi, endpoint->channel_id);
1877  	if (ret)
1878  		dev_err(dev, "error %d suspending channel %u\n", ret,
1879  			endpoint->channel_id);
1880  }
1881  
ipa_endpoint_resume_one(struct ipa_endpoint * endpoint)1882  void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint)
1883  {
1884  	struct device *dev = &endpoint->ipa->pdev->dev;
1885  	struct gsi *gsi = &endpoint->ipa->gsi;
1886  	int ret;
1887  
1888  	if (!test_bit(endpoint->endpoint_id, endpoint->ipa->enabled))
1889  		return;
1890  
1891  	if (!endpoint->toward_ipa)
1892  		(void)ipa_endpoint_program_suspend(endpoint, false);
1893  
1894  	ret = gsi_channel_resume(gsi, endpoint->channel_id);
1895  	if (ret)
1896  		dev_err(dev, "error %d resuming channel %u\n", ret,
1897  			endpoint->channel_id);
1898  	else if (!endpoint->toward_ipa)
1899  		ipa_endpoint_replenish_enable(endpoint);
1900  }
1901  
ipa_endpoint_suspend(struct ipa * ipa)1902  void ipa_endpoint_suspend(struct ipa *ipa)
1903  {
1904  	if (!ipa->setup_complete)
1905  		return;
1906  
1907  	if (ipa->modem_netdev)
1908  		ipa_modem_suspend(ipa->modem_netdev);
1909  
1910  	ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
1911  	ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
1912  }
1913  
ipa_endpoint_resume(struct ipa * ipa)1914  void ipa_endpoint_resume(struct ipa *ipa)
1915  {
1916  	if (!ipa->setup_complete)
1917  		return;
1918  
1919  	ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
1920  	ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
1921  
1922  	if (ipa->modem_netdev)
1923  		ipa_modem_resume(ipa->modem_netdev);
1924  }
1925  
ipa_endpoint_setup_one(struct ipa_endpoint * endpoint)1926  static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint)
1927  {
1928  	struct gsi *gsi = &endpoint->ipa->gsi;
1929  	u32 channel_id = endpoint->channel_id;
1930  
1931  	/* Only AP endpoints get set up */
1932  	if (endpoint->ee_id != GSI_EE_AP)
1933  		return;
1934  
1935  	endpoint->skb_frag_max = gsi->channel[channel_id].trans_tre_max - 1;
1936  	if (!endpoint->toward_ipa) {
1937  		/* RX transactions require a single TRE, so the maximum
1938  		 * backlog is the same as the maximum outstanding TREs.
1939  		 */
1940  		clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
1941  		clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
1942  		INIT_DELAYED_WORK(&endpoint->replenish_work,
1943  				  ipa_endpoint_replenish_work);
1944  	}
1945  
1946  	ipa_endpoint_program(endpoint);
1947  
1948  	__set_bit(endpoint->endpoint_id, endpoint->ipa->set_up);
1949  }
1950  
ipa_endpoint_teardown_one(struct ipa_endpoint * endpoint)1951  static void ipa_endpoint_teardown_one(struct ipa_endpoint *endpoint)
1952  {
1953  	__clear_bit(endpoint->endpoint_id, endpoint->ipa->set_up);
1954  
1955  	if (!endpoint->toward_ipa)
1956  		cancel_delayed_work_sync(&endpoint->replenish_work);
1957  
1958  	ipa_endpoint_reset(endpoint);
1959  }
1960  
ipa_endpoint_setup(struct ipa * ipa)1961  void ipa_endpoint_setup(struct ipa *ipa)
1962  {
1963  	u32 endpoint_id;
1964  
1965  	for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count)
1966  		ipa_endpoint_setup_one(&ipa->endpoint[endpoint_id]);
1967  }
1968  
ipa_endpoint_teardown(struct ipa * ipa)1969  void ipa_endpoint_teardown(struct ipa *ipa)
1970  {
1971  	u32 endpoint_id;
1972  
1973  	for_each_set_bit(endpoint_id, ipa->set_up, ipa->endpoint_count)
1974  		ipa_endpoint_teardown_one(&ipa->endpoint[endpoint_id]);
1975  }
1976  
ipa_endpoint_deconfig(struct ipa * ipa)1977  void ipa_endpoint_deconfig(struct ipa *ipa)
1978  {
1979  	ipa->available_count = 0;
1980  	bitmap_free(ipa->available);
1981  	ipa->available = NULL;
1982  }
1983  
ipa_endpoint_config(struct ipa * ipa)1984  int ipa_endpoint_config(struct ipa *ipa)
1985  {
1986  	struct device *dev = &ipa->pdev->dev;
1987  	const struct reg *reg;
1988  	u32 endpoint_id;
1989  	u32 hw_limit;
1990  	u32 tx_count;
1991  	u32 rx_count;
1992  	u32 rx_base;
1993  	u32 limit;
1994  	u32 val;
1995  
1996  	/* Prior to IPA v3.5, the FLAVOR_0 register was not supported.
1997  	 * Furthermore, the endpoints were not grouped such that TX
1998  	 * endpoint numbers started with 0 and RX endpoints had numbers
1999  	 * higher than all TX endpoints, so we can't do the simple
2000  	 * direction check used for newer hardware below.
2001  	 *
2002  	 * For hardware that doesn't support the FLAVOR_0 register,
2003  	 * just set the available mask to support any endpoint, and
2004  	 * assume the configuration is valid.
2005  	 */
2006  	if (ipa->version < IPA_VERSION_3_5) {
2007  		ipa->available = bitmap_zalloc(IPA_ENDPOINT_MAX, GFP_KERNEL);
2008  		if (!ipa->available)
2009  			return -ENOMEM;
2010  		ipa->available_count = IPA_ENDPOINT_MAX;
2011  
2012  		bitmap_set(ipa->available, 0, IPA_ENDPOINT_MAX);
2013  
2014  		return 0;
2015  	}
2016  
2017  	/* Find out about the endpoints supplied by the hardware, and ensure
2018  	 * the highest one doesn't exceed the number supported by software.
2019  	 */
2020  	reg = ipa_reg(ipa, FLAVOR_0);
2021  	val = ioread32(ipa->reg_virt + reg_offset(reg));
2022  
2023  	/* Our RX is an IPA producer; our TX is an IPA consumer. */
2024  	tx_count = reg_decode(reg, MAX_CONS_PIPES, val);
2025  	rx_count = reg_decode(reg, MAX_PROD_PIPES, val);
2026  	rx_base = reg_decode(reg, PROD_LOWEST, val);
2027  
2028  	limit = rx_base + rx_count;
2029  	if (limit > IPA_ENDPOINT_MAX) {
2030  		dev_err(dev, "too many endpoints, %u > %u\n",
2031  			limit, IPA_ENDPOINT_MAX);
2032  		return -EINVAL;
2033  	}
2034  
2035  	/* Until IPA v5.0, the max endpoint ID was 32 */
2036  	hw_limit = ipa->version < IPA_VERSION_5_0 ? 32 : U8_MAX + 1;
2037  	if (limit > hw_limit) {
2038  		dev_err(dev, "unexpected endpoint count, %u > %u\n",
2039  			limit, hw_limit);
2040  		return -EINVAL;
2041  	}
2042  
2043  	/* Allocate and initialize the available endpoint bitmap */
2044  	ipa->available = bitmap_zalloc(limit, GFP_KERNEL);
2045  	if (!ipa->available)
2046  		return -ENOMEM;
2047  	ipa->available_count = limit;
2048  
2049  	/* Mark all supported RX and TX endpoints as available */
2050  	bitmap_set(ipa->available, 0, tx_count);
2051  	bitmap_set(ipa->available, rx_base, rx_count);
2052  
2053  	for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count) {
2054  		struct ipa_endpoint *endpoint;
2055  
2056  		if (endpoint_id >= limit) {
2057  			dev_err(dev, "invalid endpoint id, %u > %u\n",
2058  				endpoint_id, limit - 1);
2059  			goto err_free_bitmap;
2060  		}
2061  
2062  		if (!test_bit(endpoint_id, ipa->available)) {
2063  			dev_err(dev, "unavailable endpoint id %u\n",
2064  				endpoint_id);
2065  			goto err_free_bitmap;
2066  		}
2067  
2068  		/* Make sure it's pointing in the right direction */
2069  		endpoint = &ipa->endpoint[endpoint_id];
2070  		if (endpoint->toward_ipa) {
2071  			if (endpoint_id < tx_count)
2072  				continue;
2073  		} else if (endpoint_id >= rx_base) {
2074  			continue;
2075  		}
2076  
2077  		dev_err(dev, "endpoint id %u wrong direction\n", endpoint_id);
2078  		goto err_free_bitmap;
2079  	}
2080  
2081  	return 0;
2082  
2083  err_free_bitmap:
2084  	ipa_endpoint_deconfig(ipa);
2085  
2086  	return -EINVAL;
2087  }
2088  
ipa_endpoint_init_one(struct ipa * ipa,enum ipa_endpoint_name name,const struct ipa_gsi_endpoint_data * data)2089  static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name,
2090  				  const struct ipa_gsi_endpoint_data *data)
2091  {
2092  	struct ipa_endpoint *endpoint;
2093  
2094  	endpoint = &ipa->endpoint[data->endpoint_id];
2095  
2096  	if (data->ee_id == GSI_EE_AP)
2097  		ipa->channel_map[data->channel_id] = endpoint;
2098  	ipa->name_map[name] = endpoint;
2099  
2100  	endpoint->ipa = ipa;
2101  	endpoint->ee_id = data->ee_id;
2102  	endpoint->channel_id = data->channel_id;
2103  	endpoint->endpoint_id = data->endpoint_id;
2104  	endpoint->toward_ipa = data->toward_ipa;
2105  	endpoint->config = data->endpoint.config;
2106  
2107  	__set_bit(endpoint->endpoint_id, ipa->defined);
2108  }
2109  
ipa_endpoint_exit_one(struct ipa_endpoint * endpoint)2110  static void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint)
2111  {
2112  	__clear_bit(endpoint->endpoint_id, endpoint->ipa->defined);
2113  
2114  	memset(endpoint, 0, sizeof(*endpoint));
2115  }
2116  
ipa_endpoint_exit(struct ipa * ipa)2117  void ipa_endpoint_exit(struct ipa *ipa)
2118  {
2119  	u32 endpoint_id;
2120  
2121  	ipa->filtered = 0;
2122  
2123  	for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count)
2124  		ipa_endpoint_exit_one(&ipa->endpoint[endpoint_id]);
2125  
2126  	bitmap_free(ipa->enabled);
2127  	ipa->enabled = NULL;
2128  	bitmap_free(ipa->set_up);
2129  	ipa->set_up = NULL;
2130  	bitmap_free(ipa->defined);
2131  	ipa->defined = NULL;
2132  
2133  	memset(ipa->name_map, 0, sizeof(ipa->name_map));
2134  	memset(ipa->channel_map, 0, sizeof(ipa->channel_map));
2135  }
2136  
2137  /* Returns a bitmask of endpoints that support filtering, or 0 on error */
ipa_endpoint_init(struct ipa * ipa,u32 count,const struct ipa_gsi_endpoint_data * data)2138  int ipa_endpoint_init(struct ipa *ipa, u32 count,
2139  		      const struct ipa_gsi_endpoint_data *data)
2140  {
2141  	enum ipa_endpoint_name name;
2142  	u32 filtered;
2143  
2144  	BUILD_BUG_ON(!IPA_REPLENISH_BATCH);
2145  
2146  	/* Number of endpoints is one more than the maximum ID */
2147  	ipa->endpoint_count = ipa_endpoint_max(ipa, count, data) + 1;
2148  	if (!ipa->endpoint_count)
2149  		return -EINVAL;
2150  
2151  	/* Initialize endpoint state bitmaps */
2152  	ipa->defined = bitmap_zalloc(ipa->endpoint_count, GFP_KERNEL);
2153  	if (!ipa->defined)
2154  		return -ENOMEM;
2155  
2156  	ipa->set_up = bitmap_zalloc(ipa->endpoint_count, GFP_KERNEL);
2157  	if (!ipa->set_up)
2158  		goto err_free_defined;
2159  
2160  	ipa->enabled = bitmap_zalloc(ipa->endpoint_count, GFP_KERNEL);
2161  	if (!ipa->enabled)
2162  		goto err_free_set_up;
2163  
2164  	filtered = 0;
2165  	for (name = 0; name < count; name++, data++) {
2166  		if (ipa_gsi_endpoint_data_empty(data))
2167  			continue;	/* Skip over empty slots */
2168  
2169  		ipa_endpoint_init_one(ipa, name, data);
2170  
2171  		if (data->endpoint.filter_support)
2172  			filtered |= BIT(data->endpoint_id);
2173  		if (data->ee_id == GSI_EE_MODEM && data->toward_ipa)
2174  			ipa->modem_tx_count++;
2175  	}
2176  
2177  	/* Make sure the set of filtered endpoints is valid */
2178  	if (!ipa_filtered_valid(ipa, filtered)) {
2179  		ipa_endpoint_exit(ipa);
2180  
2181  		return -EINVAL;
2182  	}
2183  
2184  	ipa->filtered = filtered;
2185  
2186  	return 0;
2187  
2188  err_free_set_up:
2189  	bitmap_free(ipa->set_up);
2190  	ipa->set_up = NULL;
2191  err_free_defined:
2192  	bitmap_free(ipa->defined);
2193  	ipa->defined = NULL;
2194  
2195  	return -ENOMEM;
2196  }
2197