xref: /openbmc/linux/drivers/net/ipa/ipa_cmd.c (revision ce05a9f39607623362aea9147c3dfab7a9f94ccb)
1731c46edSAlex Elder // SPDX-License-Identifier: GPL-2.0
2731c46edSAlex Elder 
3731c46edSAlex Elder /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
42d65ed76SAlex Elder  * Copyright (C) 2019-2021 Linaro Ltd.
5731c46edSAlex Elder  */
6731c46edSAlex Elder 
7731c46edSAlex Elder #include <linux/types.h>
8731c46edSAlex Elder #include <linux/device.h>
9731c46edSAlex Elder #include <linux/slab.h>
10731c46edSAlex Elder #include <linux/bitfield.h>
11731c46edSAlex Elder #include <linux/dma-direction.h>
12731c46edSAlex Elder 
13731c46edSAlex Elder #include "gsi.h"
14731c46edSAlex Elder #include "gsi_trans.h"
15731c46edSAlex Elder #include "ipa.h"
16731c46edSAlex Elder #include "ipa_endpoint.h"
17731c46edSAlex Elder #include "ipa_table.h"
18731c46edSAlex Elder #include "ipa_cmd.h"
19731c46edSAlex Elder #include "ipa_mem.h"
20731c46edSAlex Elder 
21731c46edSAlex Elder /**
22731c46edSAlex Elder  * DOC:  IPA Immediate Commands
23731c46edSAlex Elder  *
24731c46edSAlex Elder  * The AP command TX endpoint is used to issue immediate commands to the IPA.
25731c46edSAlex Elder  * An immediate command is generally used to request the IPA do something
26731c46edSAlex Elder  * other than data transfer to another endpoint.
27731c46edSAlex Elder  *
28731c46edSAlex Elder  * Immediate commands are represented by GSI transactions just like other
29731c46edSAlex Elder  * transfer requests, represented by a single GSI TRE.  Each immediate
30731c46edSAlex Elder  * command has a well-defined format, having a payload of a known length.
31731c46edSAlex Elder  * This allows the transfer element's length field to be used to hold an
32731c46edSAlex Elder  * immediate command's opcode.  The payload for a command resides in DRAM
33731c46edSAlex Elder  * and is described by a single scatterlist entry in its transaction.
34731c46edSAlex Elder  * Commands do not require a transaction completion callback.  To commit
35731c46edSAlex Elder  * an immediate command transaction, either gsi_trans_commit_wait() or
36731c46edSAlex Elder  * gsi_trans_commit_wait_timeout() is used.
37731c46edSAlex Elder  */
38731c46edSAlex Elder 
39731c46edSAlex Elder /* Some commands can wait until indicated pipeline stages are clear */
40731c46edSAlex Elder enum pipeline_clear_options {
418701cb00SAlex Elder 	pipeline_clear_hps		= 0x0,
428701cb00SAlex Elder 	pipeline_clear_src_grp		= 0x1,
438701cb00SAlex Elder 	pipeline_clear_full		= 0x2,
44731c46edSAlex Elder };
45731c46edSAlex Elder 
46731c46edSAlex Elder /* IPA_CMD_IP_V{4,6}_{FILTER,ROUTING}_INIT */
47731c46edSAlex Elder 
48731c46edSAlex Elder struct ipa_cmd_hw_ip_fltrt_init {
49731c46edSAlex Elder 	__le64 hash_rules_addr;
50731c46edSAlex Elder 	__le64 flags;
51731c46edSAlex Elder 	__le64 nhash_rules_addr;
52731c46edSAlex Elder };
53731c46edSAlex Elder 
54731c46edSAlex Elder /* Field masks for ipa_cmd_hw_ip_fltrt_init structure fields */
55731c46edSAlex Elder #define IP_FLTRT_FLAGS_HASH_SIZE_FMASK			GENMASK_ULL(11, 0)
56731c46edSAlex Elder #define IP_FLTRT_FLAGS_HASH_ADDR_FMASK			GENMASK_ULL(27, 12)
57731c46edSAlex Elder #define IP_FLTRT_FLAGS_NHASH_SIZE_FMASK			GENMASK_ULL(39, 28)
58731c46edSAlex Elder #define IP_FLTRT_FLAGS_NHASH_ADDR_FMASK			GENMASK_ULL(55, 40)
59731c46edSAlex Elder 
60731c46edSAlex Elder /* IPA_CMD_HDR_INIT_LOCAL */
61731c46edSAlex Elder 
62731c46edSAlex Elder struct ipa_cmd_hw_hdr_init_local {
63731c46edSAlex Elder 	__le64 hdr_table_addr;
64731c46edSAlex Elder 	__le32 flags;
65731c46edSAlex Elder 	__le32 reserved;
66731c46edSAlex Elder };
67731c46edSAlex Elder 
68731c46edSAlex Elder /* Field masks for ipa_cmd_hw_hdr_init_local structure fields */
69731c46edSAlex Elder #define HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK		GENMASK(11, 0)
70731c46edSAlex Elder #define HDR_INIT_LOCAL_FLAGS_HDR_ADDR_FMASK		GENMASK(27, 12)
71731c46edSAlex Elder 
72731c46edSAlex Elder /* IPA_CMD_REGISTER_WRITE */
73731c46edSAlex Elder 
74d7f3087bSAlex Elder /* For IPA v4.0+, the pipeline clear options are encoded in the opcode */
75731c46edSAlex Elder #define REGISTER_WRITE_OPCODE_SKIP_CLEAR_FMASK		GENMASK(8, 8)
76731c46edSAlex Elder #define REGISTER_WRITE_OPCODE_CLEAR_OPTION_FMASK	GENMASK(10, 9)
77731c46edSAlex Elder 
78731c46edSAlex Elder struct ipa_cmd_register_write {
79d7f3087bSAlex Elder 	__le16 flags;		/* Unused/reserved prior to IPA v4.0 */
80731c46edSAlex Elder 	__le16 offset;
81731c46edSAlex Elder 	__le32 value;
82731c46edSAlex Elder 	__le32 value_mask;
83731c46edSAlex Elder 	__le32 clear_options;	/* Unused/reserved for IPA v4.0+ */
84731c46edSAlex Elder };
85731c46edSAlex Elder 
86731c46edSAlex Elder /* Field masks for ipa_cmd_register_write structure fields */
87d7f3087bSAlex Elder /* The next field is present for IPA v4.0+ */
88731c46edSAlex Elder #define REGISTER_WRITE_FLAGS_OFFSET_HIGH_FMASK		GENMASK(14, 11)
89d7f3087bSAlex Elder /* The next field is not present for IPA v4.0+ */
90731c46edSAlex Elder #define REGISTER_WRITE_FLAGS_SKIP_CLEAR_FMASK		GENMASK(15, 15)
91731c46edSAlex Elder 
92d7f3087bSAlex Elder /* The next field and its values are not present for IPA v4.0+ */
93731c46edSAlex Elder #define REGISTER_WRITE_CLEAR_OPTIONS_FMASK		GENMASK(1, 0)
94731c46edSAlex Elder 
95731c46edSAlex Elder /* IPA_CMD_IP_PACKET_INIT */
96731c46edSAlex Elder 
97731c46edSAlex Elder struct ipa_cmd_ip_packet_init {
98731c46edSAlex Elder 	u8 dest_endpoint;
99731c46edSAlex Elder 	u8 reserved[7];
100731c46edSAlex Elder };
101731c46edSAlex Elder 
102731c46edSAlex Elder /* Field masks for ipa_cmd_ip_packet_init dest_endpoint field */
103731c46edSAlex Elder #define IPA_PACKET_INIT_DEST_ENDPOINT_FMASK		GENMASK(4, 0)
104731c46edSAlex Elder 
105731c46edSAlex Elder /* IPA_CMD_DMA_SHARED_MEM */
106731c46edSAlex Elder 
107731c46edSAlex Elder /* For IPA v4.0+, this opcode gets modified with pipeline clear options */
108731c46edSAlex Elder 
109731c46edSAlex Elder #define DMA_SHARED_MEM_OPCODE_SKIP_CLEAR_FMASK		GENMASK(8, 8)
110731c46edSAlex Elder #define DMA_SHARED_MEM_OPCODE_CLEAR_OPTION_FMASK	GENMASK(10, 9)
111731c46edSAlex Elder 
112731c46edSAlex Elder struct ipa_cmd_hw_dma_mem_mem {
113731c46edSAlex Elder 	__le16 clear_after_read; /* 0 or DMA_SHARED_MEM_CLEAR_AFTER_READ */
114731c46edSAlex Elder 	__le16 size;
115731c46edSAlex Elder 	__le16 local_addr;
116731c46edSAlex Elder 	__le16 flags;
117731c46edSAlex Elder 	__le64 system_addr;
118731c46edSAlex Elder };
119731c46edSAlex Elder 
120731c46edSAlex Elder /* Flag allowing atomic clear of target region after reading data (v4.0+)*/
121731c46edSAlex Elder #define DMA_SHARED_MEM_CLEAR_AFTER_READ			GENMASK(15, 15)
122731c46edSAlex Elder 
123731c46edSAlex Elder /* Field masks for ipa_cmd_hw_dma_mem_mem structure fields */
124731c46edSAlex Elder #define DMA_SHARED_MEM_FLAGS_DIRECTION_FMASK		GENMASK(0, 0)
125d7f3087bSAlex Elder /* The next two fields are not present for IPA v4.0+ */
126731c46edSAlex Elder #define DMA_SHARED_MEM_FLAGS_SKIP_CLEAR_FMASK		GENMASK(1, 1)
127731c46edSAlex Elder #define DMA_SHARED_MEM_FLAGS_CLEAR_OPTIONS_FMASK	GENMASK(3, 2)
128731c46edSAlex Elder 
129731c46edSAlex Elder /* IPA_CMD_IP_PACKET_TAG_STATUS */
130731c46edSAlex Elder 
131731c46edSAlex Elder struct ipa_cmd_ip_packet_tag_status {
132731c46edSAlex Elder 	__le64 tag;
133731c46edSAlex Elder };
134731c46edSAlex Elder 
135731c46edSAlex Elder #define IP_PACKET_TAG_STATUS_TAG_FMASK			GENMASK_ULL(63, 16)
136731c46edSAlex Elder 
137731c46edSAlex Elder /* Immediate command payload */
138731c46edSAlex Elder union ipa_cmd_payload {
139731c46edSAlex Elder 	struct ipa_cmd_hw_ip_fltrt_init table_init;
140731c46edSAlex Elder 	struct ipa_cmd_hw_hdr_init_local hdr_init_local;
141731c46edSAlex Elder 	struct ipa_cmd_register_write register_write;
142731c46edSAlex Elder 	struct ipa_cmd_ip_packet_init ip_packet_init;
143731c46edSAlex Elder 	struct ipa_cmd_hw_dma_mem_mem dma_shared_mem;
144731c46edSAlex Elder 	struct ipa_cmd_ip_packet_tag_status ip_packet_tag_status;
145731c46edSAlex Elder };
146731c46edSAlex Elder 
147731c46edSAlex Elder static void ipa_cmd_validate_build(void)
148731c46edSAlex Elder {
149731c46edSAlex Elder 	/* The sizes of a filter and route tables need to fit into fields
150731c46edSAlex Elder 	 * in the ipa_cmd_hw_ip_fltrt_init structure.  Although hashed tables
151731c46edSAlex Elder 	 * might not be used, non-hashed and hashed tables have the same
152731c46edSAlex Elder 	 * maximum size.  IPv4 and IPv6 filter tables have the same number
153731c46edSAlex Elder 	 * of entries, as and IPv4 and IPv6 route tables have the same number
154731c46edSAlex Elder 	 * of entries.
155731c46edSAlex Elder 	 */
1564ea29143SAlex Elder #define TABLE_SIZE	(TABLE_COUNT_MAX * sizeof(__le64))
157731c46edSAlex Elder #define TABLE_COUNT_MAX	max_t(u32, IPA_ROUTE_COUNT_MAX, IPA_FILTER_COUNT_MAX)
158731c46edSAlex Elder 	BUILD_BUG_ON(TABLE_SIZE > field_max(IP_FLTRT_FLAGS_HASH_SIZE_FMASK));
159731c46edSAlex Elder 	BUILD_BUG_ON(TABLE_SIZE > field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK));
160731c46edSAlex Elder #undef TABLE_COUNT_MAX
161731c46edSAlex Elder #undef TABLE_SIZE
162731c46edSAlex Elder }
163731c46edSAlex Elder 
164731c46edSAlex Elder #ifdef IPA_VALIDATE
165731c46edSAlex Elder 
166731c46edSAlex Elder /* Validate a memory region holding a table */
167731c46edSAlex Elder bool ipa_cmd_table_valid(struct ipa *ipa, const struct ipa_mem *mem,
168731c46edSAlex Elder 			 bool route, bool ipv6, bool hashed)
169731c46edSAlex Elder {
170731c46edSAlex Elder 	struct device *dev = &ipa->pdev->dev;
171731c46edSAlex Elder 	u32 offset_max;
172731c46edSAlex Elder 
173731c46edSAlex Elder 	offset_max = hashed ? field_max(IP_FLTRT_FLAGS_HASH_ADDR_FMASK)
174731c46edSAlex Elder 			    : field_max(IP_FLTRT_FLAGS_NHASH_ADDR_FMASK);
175731c46edSAlex Elder 	if (mem->offset > offset_max ||
176731c46edSAlex Elder 	    ipa->mem_offset > offset_max - mem->offset) {
177b4afd4b9SAlex Elder 		dev_err(dev, "IPv%c %s%s table region offset too large\n",
178731c46edSAlex Elder 			ipv6 ? '6' : '4', hashed ? "hashed " : "",
179b4afd4b9SAlex Elder 			route ? "route" : "filter");
180b4afd4b9SAlex Elder 		dev_err(dev, "    (0x%04x + 0x%04x > 0x%04x)\n",
181731c46edSAlex Elder 			ipa->mem_offset, mem->offset, offset_max);
182b4afd4b9SAlex Elder 
183731c46edSAlex Elder 		return false;
184731c46edSAlex Elder 	}
185731c46edSAlex Elder 
186731c46edSAlex Elder 	if (mem->offset > ipa->mem_size ||
187731c46edSAlex Elder 	    mem->size > ipa->mem_size - mem->offset) {
188b4afd4b9SAlex Elder 		dev_err(dev, "IPv%c %s%s table region out of range\n",
189731c46edSAlex Elder 			ipv6 ? '6' : '4', hashed ? "hashed " : "",
190b4afd4b9SAlex Elder 			route ? "route" : "filter");
191b4afd4b9SAlex Elder 		dev_err(dev, "    (0x%04x + 0x%04x > 0x%04x)\n",
192731c46edSAlex Elder 			mem->offset, mem->size, ipa->mem_size);
193b4afd4b9SAlex Elder 
194731c46edSAlex Elder 		return false;
195731c46edSAlex Elder 	}
196731c46edSAlex Elder 
197731c46edSAlex Elder 	return true;
198731c46edSAlex Elder }
199731c46edSAlex Elder 
200731c46edSAlex Elder /* Validate the memory region that holds headers */
201731c46edSAlex Elder static bool ipa_cmd_header_valid(struct ipa *ipa)
202731c46edSAlex Elder {
203731c46edSAlex Elder 	struct device *dev = &ipa->pdev->dev;
204*ce05a9f3SAlex Elder 	const struct ipa_mem *mem;
205731c46edSAlex Elder 	u32 offset_max;
206731c46edSAlex Elder 	u32 size_max;
207*ce05a9f3SAlex Elder 	u32 offset;
208731c46edSAlex Elder 	u32 size;
209731c46edSAlex Elder 
210*ce05a9f3SAlex Elder 	/* In ipa_cmd_hdr_init_local_add() we record the offset and size of
211*ce05a9f3SAlex Elder 	 * the header table memory area in an immediate command.  Make sure
212*ce05a9f3SAlex Elder 	 * the offset and size fit in the fields that need to hold them, and
213*ce05a9f3SAlex Elder 	 * that the entire range is within the overall IPA memory range.
214b4afd4b9SAlex Elder 	 */
215731c46edSAlex Elder 	offset_max = field_max(HDR_INIT_LOCAL_FLAGS_HDR_ADDR_FMASK);
216*ce05a9f3SAlex Elder 	size_max = field_max(HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK);
217*ce05a9f3SAlex Elder 
218*ce05a9f3SAlex Elder 	/* The header memory area contains both the modem and AP header
219*ce05a9f3SAlex Elder 	 * regions.  The modem portion defines the address of the region.
220*ce05a9f3SAlex Elder 	 */
221*ce05a9f3SAlex Elder 	mem = &ipa->mem[IPA_MEM_MODEM_HEADER];
222*ce05a9f3SAlex Elder 	offset = mem->offset;
223*ce05a9f3SAlex Elder 	size = mem->size;
224*ce05a9f3SAlex Elder 
225*ce05a9f3SAlex Elder 	/* Make sure the offset fits in the IPA command */
226*ce05a9f3SAlex Elder 	if (offset > offset_max || ipa->mem_offset > offset_max - offset) {
227b4afd4b9SAlex Elder 		dev_err(dev, "header table region offset too large\n");
228b4afd4b9SAlex Elder 		dev_err(dev, "    (0x%04x + 0x%04x > 0x%04x)\n",
229*ce05a9f3SAlex Elder 			ipa->mem_offset, offset, offset_max);
230b4afd4b9SAlex Elder 
231731c46edSAlex Elder 		return false;
232731c46edSAlex Elder 	}
233731c46edSAlex Elder 
234*ce05a9f3SAlex Elder 	/* Add the size of the AP portion to the combined size */
235731c46edSAlex Elder 	size += ipa->mem[IPA_MEM_AP_HEADER].size;
236b4afd4b9SAlex Elder 
237*ce05a9f3SAlex Elder 	/* Make sure the combined size fits in the IPA command */
238b4afd4b9SAlex Elder 	if (size > size_max) {
239b4afd4b9SAlex Elder 		dev_err(dev, "header table region size too large\n");
240b4afd4b9SAlex Elder 		dev_err(dev, "    (0x%04x > 0x%08x)\n", size, size_max);
241b4afd4b9SAlex Elder 
242b4afd4b9SAlex Elder 		return false;
243b4afd4b9SAlex Elder 	}
244*ce05a9f3SAlex Elder 
245*ce05a9f3SAlex Elder 	/* Make sure the entire combined area fits in IPA memory */
246*ce05a9f3SAlex Elder 	if (size > ipa->mem_size || offset > ipa->mem_size - size) {
247b4afd4b9SAlex Elder 		dev_err(dev, "header table region out of range\n");
248b4afd4b9SAlex Elder 		dev_err(dev, "    (0x%04x + 0x%04x > 0x%04x)\n",
249*ce05a9f3SAlex Elder 			offset, size, ipa->mem_size);
250b4afd4b9SAlex Elder 
251731c46edSAlex Elder 		return false;
252731c46edSAlex Elder 	}
253731c46edSAlex Elder 
254731c46edSAlex Elder 	return true;
255731c46edSAlex Elder }
256731c46edSAlex Elder 
257731c46edSAlex Elder /* Indicate whether an offset can be used with a register_write command */
258731c46edSAlex Elder static bool ipa_cmd_register_write_offset_valid(struct ipa *ipa,
259731c46edSAlex Elder 						const char *name, u32 offset)
260731c46edSAlex Elder {
261731c46edSAlex Elder 	struct ipa_cmd_register_write *payload;
262731c46edSAlex Elder 	struct device *dev = &ipa->pdev->dev;
263731c46edSAlex Elder 	u32 offset_max;
264731c46edSAlex Elder 	u32 bit_count;
265731c46edSAlex Elder 
266731c46edSAlex Elder 	/* The maximum offset in a register_write immediate command depends
267d7f3087bSAlex Elder 	 * on the version of IPA.  A 16 bit offset is always supported,
268d7f3087bSAlex Elder 	 * but starting with IPA v4.0 some additional high-order bits are
269d7f3087bSAlex Elder 	 * allowed.
270731c46edSAlex Elder 	 */
271731c46edSAlex Elder 	bit_count = BITS_PER_BYTE * sizeof(payload->offset);
272d7f3087bSAlex Elder 	if (ipa->version >= IPA_VERSION_4_0)
273731c46edSAlex Elder 		bit_count += hweight32(REGISTER_WRITE_FLAGS_OFFSET_HIGH_FMASK);
274731c46edSAlex Elder 	BUILD_BUG_ON(bit_count > 32);
2752d65ed76SAlex Elder 	offset_max = ~0U >> (32 - bit_count);
276731c46edSAlex Elder 
2772d65ed76SAlex Elder 	/* Make sure the offset can be represented by the field(s)
2782d65ed76SAlex Elder 	 * that holds it.  Also make sure the offset is not outside
2792d65ed76SAlex Elder 	 * the overall IPA memory range.
2802d65ed76SAlex Elder 	 */
281731c46edSAlex Elder 	if (offset > offset_max || ipa->mem_offset > offset_max - offset) {
282731c46edSAlex Elder 		dev_err(dev, "%s offset too large 0x%04x + 0x%04x > 0x%04x)\n",
2832d65ed76SAlex Elder 			name, ipa->mem_offset, offset, offset_max);
284731c46edSAlex Elder 		return false;
285731c46edSAlex Elder 	}
286731c46edSAlex Elder 
287731c46edSAlex Elder 	return true;
288731c46edSAlex Elder }
289731c46edSAlex Elder 
290731c46edSAlex Elder /* Check whether offsets passed to register_write are valid */
291731c46edSAlex Elder static bool ipa_cmd_register_write_valid(struct ipa *ipa)
292731c46edSAlex Elder {
293731c46edSAlex Elder 	const char *name;
294731c46edSAlex Elder 	u32 offset;
295731c46edSAlex Elder 
2962d65ed76SAlex Elder 	/* If hashed tables are supported, ensure the hash flush register
2972d65ed76SAlex Elder 	 * offset will fit in a register write IPA immediate command.
2982d65ed76SAlex Elder 	 */
299a266ad6bSAlex Elder 	if (ipa_table_hash_support(ipa)) {
300731c46edSAlex Elder 		offset = ipa_reg_filt_rout_hash_flush_offset(ipa->version);
301731c46edSAlex Elder 		name = "filter/route hash flush";
302731c46edSAlex Elder 		if (!ipa_cmd_register_write_offset_valid(ipa, name, offset))
303731c46edSAlex Elder 			return false;
3042d65ed76SAlex Elder 	}
305731c46edSAlex Elder 
3062d65ed76SAlex Elder 	/* Each endpoint can have a status endpoint associated with it,
3072d65ed76SAlex Elder 	 * and this is recorded in an endpoint register.  If the modem
3082d65ed76SAlex Elder 	 * crashes, we reset the status endpoint for all modem endpoints
3092d65ed76SAlex Elder 	 * using a register write IPA immediate command.  Make sure the
3102d65ed76SAlex Elder 	 * worst case (highest endpoint number) offset of that endpoint
3112d65ed76SAlex Elder 	 * fits in the register write command field(s) that must hold it.
3122d65ed76SAlex Elder 	 */
3132d65ed76SAlex Elder 	offset = IPA_REG_ENDP_STATUS_N_OFFSET(IPA_ENDPOINT_COUNT - 1);
314731c46edSAlex Elder 	name = "maximal endpoint status";
315731c46edSAlex Elder 	if (!ipa_cmd_register_write_offset_valid(ipa, name, offset))
316731c46edSAlex Elder 		return false;
317731c46edSAlex Elder 
318731c46edSAlex Elder 	return true;
319731c46edSAlex Elder }
320731c46edSAlex Elder 
321731c46edSAlex Elder bool ipa_cmd_data_valid(struct ipa *ipa)
322731c46edSAlex Elder {
323731c46edSAlex Elder 	if (!ipa_cmd_header_valid(ipa))
324731c46edSAlex Elder 		return false;
325731c46edSAlex Elder 
326731c46edSAlex Elder 	if (!ipa_cmd_register_write_valid(ipa))
327731c46edSAlex Elder 		return false;
328731c46edSAlex Elder 
329731c46edSAlex Elder 	return true;
330731c46edSAlex Elder }
331731c46edSAlex Elder 
332731c46edSAlex Elder #endif /* IPA_VALIDATE */
333731c46edSAlex Elder 
334731c46edSAlex Elder int ipa_cmd_pool_init(struct gsi_channel *channel, u32 tre_max)
335731c46edSAlex Elder {
336731c46edSAlex Elder 	struct gsi_trans_info *trans_info = &channel->trans_info;
337731c46edSAlex Elder 	struct device *dev = channel->gsi->dev;
338731c46edSAlex Elder 	int ret;
339731c46edSAlex Elder 
340731c46edSAlex Elder 	/* This is as good a place as any to validate build constants */
341731c46edSAlex Elder 	ipa_cmd_validate_build();
342731c46edSAlex Elder 
343731c46edSAlex Elder 	/* Even though command payloads are allocated one at a time,
344731c46edSAlex Elder 	 * a single transaction can require up to tlv_count of them,
345731c46edSAlex Elder 	 * so we treat them as if that many can be allocated at once.
346731c46edSAlex Elder 	 */
347731c46edSAlex Elder 	ret = gsi_trans_pool_init_dma(dev, &trans_info->cmd_pool,
348731c46edSAlex Elder 				      sizeof(union ipa_cmd_payload),
349731c46edSAlex Elder 				      tre_max, channel->tlv_count);
350731c46edSAlex Elder 	if (ret)
351731c46edSAlex Elder 		return ret;
352731c46edSAlex Elder 
353731c46edSAlex Elder 	/* Each TRE needs a command info structure */
354731c46edSAlex Elder 	ret = gsi_trans_pool_init(&trans_info->info_pool,
355731c46edSAlex Elder 				   sizeof(struct ipa_cmd_info),
356731c46edSAlex Elder 				   tre_max, channel->tlv_count);
357731c46edSAlex Elder 	if (ret)
358731c46edSAlex Elder 		gsi_trans_pool_exit_dma(dev, &trans_info->cmd_pool);
359731c46edSAlex Elder 
360731c46edSAlex Elder 	return ret;
361731c46edSAlex Elder }
362731c46edSAlex Elder 
363731c46edSAlex Elder void ipa_cmd_pool_exit(struct gsi_channel *channel)
364731c46edSAlex Elder {
365731c46edSAlex Elder 	struct gsi_trans_info *trans_info = &channel->trans_info;
366731c46edSAlex Elder 	struct device *dev = channel->gsi->dev;
367731c46edSAlex Elder 
368731c46edSAlex Elder 	gsi_trans_pool_exit(&trans_info->info_pool);
369731c46edSAlex Elder 	gsi_trans_pool_exit_dma(dev, &trans_info->cmd_pool);
370731c46edSAlex Elder }
371731c46edSAlex Elder 
372731c46edSAlex Elder static union ipa_cmd_payload *
373731c46edSAlex Elder ipa_cmd_payload_alloc(struct ipa *ipa, dma_addr_t *addr)
374731c46edSAlex Elder {
375731c46edSAlex Elder 	struct gsi_trans_info *trans_info;
376731c46edSAlex Elder 	struct ipa_endpoint *endpoint;
377731c46edSAlex Elder 
378731c46edSAlex Elder 	endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
379731c46edSAlex Elder 	trans_info = &ipa->gsi.channel[endpoint->channel_id].trans_info;
380731c46edSAlex Elder 
381731c46edSAlex Elder 	return gsi_trans_pool_alloc_dma(&trans_info->cmd_pool, addr);
382731c46edSAlex Elder }
383731c46edSAlex Elder 
384731c46edSAlex Elder /* If hash_size is 0, hash_offset and hash_addr ignored. */
385731c46edSAlex Elder void ipa_cmd_table_init_add(struct gsi_trans *trans,
386731c46edSAlex Elder 			    enum ipa_cmd_opcode opcode, u16 size, u32 offset,
387731c46edSAlex Elder 			    dma_addr_t addr, u16 hash_size, u32 hash_offset,
388731c46edSAlex Elder 			    dma_addr_t hash_addr)
389731c46edSAlex Elder {
390731c46edSAlex Elder 	struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
391731c46edSAlex Elder 	enum dma_data_direction direction = DMA_TO_DEVICE;
392731c46edSAlex Elder 	struct ipa_cmd_hw_ip_fltrt_init *payload;
393731c46edSAlex Elder 	union ipa_cmd_payload *cmd_payload;
394731c46edSAlex Elder 	dma_addr_t payload_addr;
395731c46edSAlex Elder 	u64 val;
396731c46edSAlex Elder 
397731c46edSAlex Elder 	/* Record the non-hash table offset and size */
398731c46edSAlex Elder 	offset += ipa->mem_offset;
399731c46edSAlex Elder 	val = u64_encode_bits(offset, IP_FLTRT_FLAGS_NHASH_ADDR_FMASK);
400731c46edSAlex Elder 	val |= u64_encode_bits(size, IP_FLTRT_FLAGS_NHASH_SIZE_FMASK);
401731c46edSAlex Elder 
402731c46edSAlex Elder 	/* The hash table offset and address are zero if its size is 0 */
403731c46edSAlex Elder 	if (hash_size) {
404731c46edSAlex Elder 		/* Record the hash table offset and size */
405731c46edSAlex Elder 		hash_offset += ipa->mem_offset;
406731c46edSAlex Elder 		val |= u64_encode_bits(hash_offset,
407731c46edSAlex Elder 				       IP_FLTRT_FLAGS_HASH_ADDR_FMASK);
408731c46edSAlex Elder 		val |= u64_encode_bits(hash_size,
409731c46edSAlex Elder 				       IP_FLTRT_FLAGS_HASH_SIZE_FMASK);
410731c46edSAlex Elder 	}
411731c46edSAlex Elder 
412731c46edSAlex Elder 	cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
413731c46edSAlex Elder 	payload = &cmd_payload->table_init;
414731c46edSAlex Elder 
415731c46edSAlex Elder 	/* Fill in all offsets and sizes and the non-hash table address */
416731c46edSAlex Elder 	if (hash_size)
417731c46edSAlex Elder 		payload->hash_rules_addr = cpu_to_le64(hash_addr);
418731c46edSAlex Elder 	payload->flags = cpu_to_le64(val);
419731c46edSAlex Elder 	payload->nhash_rules_addr = cpu_to_le64(addr);
420731c46edSAlex Elder 
421731c46edSAlex Elder 	gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
422731c46edSAlex Elder 			  direction, opcode);
423731c46edSAlex Elder }
424731c46edSAlex Elder 
425731c46edSAlex Elder /* Initialize header space in IPA-local memory */
426731c46edSAlex Elder void ipa_cmd_hdr_init_local_add(struct gsi_trans *trans, u32 offset, u16 size,
427731c46edSAlex Elder 				dma_addr_t addr)
428731c46edSAlex Elder {
429731c46edSAlex Elder 	struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
430731c46edSAlex Elder 	enum ipa_cmd_opcode opcode = IPA_CMD_HDR_INIT_LOCAL;
431731c46edSAlex Elder 	enum dma_data_direction direction = DMA_TO_DEVICE;
432731c46edSAlex Elder 	struct ipa_cmd_hw_hdr_init_local *payload;
433731c46edSAlex Elder 	union ipa_cmd_payload *cmd_payload;
434731c46edSAlex Elder 	dma_addr_t payload_addr;
435731c46edSAlex Elder 	u32 flags;
436731c46edSAlex Elder 
437731c46edSAlex Elder 	offset += ipa->mem_offset;
438731c46edSAlex Elder 
439731c46edSAlex Elder 	/* With this command we tell the IPA where in its local memory the
440731c46edSAlex Elder 	 * header tables reside.  The content of the buffer provided is
441731c46edSAlex Elder 	 * also written via DMA into that space.  The IPA hardware owns
442731c46edSAlex Elder 	 * the table, but the AP must initialize it.
443731c46edSAlex Elder 	 */
444731c46edSAlex Elder 	cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
445731c46edSAlex Elder 	payload = &cmd_payload->hdr_init_local;
446731c46edSAlex Elder 
447731c46edSAlex Elder 	payload->hdr_table_addr = cpu_to_le64(addr);
448731c46edSAlex Elder 	flags = u32_encode_bits(size, HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK);
449731c46edSAlex Elder 	flags |= u32_encode_bits(offset, HDR_INIT_LOCAL_FLAGS_HDR_ADDR_FMASK);
450731c46edSAlex Elder 	payload->flags = cpu_to_le32(flags);
451731c46edSAlex Elder 
452731c46edSAlex Elder 	gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
453731c46edSAlex Elder 			  direction, opcode);
454731c46edSAlex Elder }
455731c46edSAlex Elder 
456731c46edSAlex Elder void ipa_cmd_register_write_add(struct gsi_trans *trans, u32 offset, u32 value,
457731c46edSAlex Elder 				u32 mask, bool clear_full)
458731c46edSAlex Elder {
459731c46edSAlex Elder 	struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
460731c46edSAlex Elder 	struct ipa_cmd_register_write *payload;
461731c46edSAlex Elder 	union ipa_cmd_payload *cmd_payload;
462731c46edSAlex Elder 	u32 opcode = IPA_CMD_REGISTER_WRITE;
463731c46edSAlex Elder 	dma_addr_t payload_addr;
464731c46edSAlex Elder 	u32 clear_option;
465731c46edSAlex Elder 	u32 options;
466731c46edSAlex Elder 	u16 flags;
467731c46edSAlex Elder 
468731c46edSAlex Elder 	/* pipeline_clear_src_grp is not used */
469731c46edSAlex Elder 	clear_option = clear_full ? pipeline_clear_full : pipeline_clear_hps;
470731c46edSAlex Elder 
471d7f3087bSAlex Elder 	/* IPA v4.0+ represents the pipeline clear options in the opcode.  It
472d7f3087bSAlex Elder 	 * also supports a larger offset by encoding additional high-order
473d7f3087bSAlex Elder 	 * bits in the payload flags field.
474d7f3087bSAlex Elder 	 */
475d7f3087bSAlex Elder 	if (ipa->version >= IPA_VERSION_4_0) {
476731c46edSAlex Elder 		u16 offset_high;
477731c46edSAlex Elder 		u32 val;
478731c46edSAlex Elder 
479731c46edSAlex Elder 		/* Opcode encodes pipeline clear options */
480731c46edSAlex Elder 		/* SKIP_CLEAR is always 0 (don't skip pipeline clear) */
481731c46edSAlex Elder 		val = u16_encode_bits(clear_option,
482731c46edSAlex Elder 				      REGISTER_WRITE_OPCODE_CLEAR_OPTION_FMASK);
483731c46edSAlex Elder 		opcode |= val;
484731c46edSAlex Elder 
485731c46edSAlex Elder 		/* Extract the high 4 bits from the offset */
486731c46edSAlex Elder 		offset_high = (u16)u32_get_bits(offset, GENMASK(19, 16));
487731c46edSAlex Elder 		offset &= (1 << 16) - 1;
488731c46edSAlex Elder 
489731c46edSAlex Elder 		/* Extract the top 4 bits and encode it into the flags field */
490731c46edSAlex Elder 		flags = u16_encode_bits(offset_high,
491731c46edSAlex Elder 				REGISTER_WRITE_FLAGS_OFFSET_HIGH_FMASK);
492731c46edSAlex Elder 		options = 0;	/* reserved */
493731c46edSAlex Elder 
494731c46edSAlex Elder 	} else {
495731c46edSAlex Elder 		flags = 0;	/* SKIP_CLEAR flag is always 0 */
496731c46edSAlex Elder 		options = u16_encode_bits(clear_option,
497731c46edSAlex Elder 					  REGISTER_WRITE_CLEAR_OPTIONS_FMASK);
498731c46edSAlex Elder 	}
499731c46edSAlex Elder 
500731c46edSAlex Elder 	cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
501731c46edSAlex Elder 	payload = &cmd_payload->register_write;
502731c46edSAlex Elder 
503731c46edSAlex Elder 	payload->flags = cpu_to_le16(flags);
504731c46edSAlex Elder 	payload->offset = cpu_to_le16((u16)offset);
505731c46edSAlex Elder 	payload->value = cpu_to_le32(value);
506731c46edSAlex Elder 	payload->value_mask = cpu_to_le32(mask);
507731c46edSAlex Elder 	payload->clear_options = cpu_to_le32(options);
508731c46edSAlex Elder 
509731c46edSAlex Elder 	gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
510731c46edSAlex Elder 			  DMA_NONE, opcode);
511731c46edSAlex Elder }
512731c46edSAlex Elder 
513731c46edSAlex Elder /* Skip IP packet processing on the next data transfer on a TX channel */
514731c46edSAlex Elder static void ipa_cmd_ip_packet_init_add(struct gsi_trans *trans, u8 endpoint_id)
515731c46edSAlex Elder {
516731c46edSAlex Elder 	struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
517731c46edSAlex Elder 	enum ipa_cmd_opcode opcode = IPA_CMD_IP_PACKET_INIT;
518731c46edSAlex Elder 	enum dma_data_direction direction = DMA_TO_DEVICE;
519731c46edSAlex Elder 	struct ipa_cmd_ip_packet_init *payload;
520731c46edSAlex Elder 	union ipa_cmd_payload *cmd_payload;
521731c46edSAlex Elder 	dma_addr_t payload_addr;
522731c46edSAlex Elder 
523731c46edSAlex Elder 	/* assert(endpoint_id <
524731c46edSAlex Elder 		  field_max(IPA_PACKET_INIT_DEST_ENDPOINT_FMASK)); */
525731c46edSAlex Elder 
526731c46edSAlex Elder 	cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
527731c46edSAlex Elder 	payload = &cmd_payload->ip_packet_init;
528731c46edSAlex Elder 
529731c46edSAlex Elder 	payload->dest_endpoint = u8_encode_bits(endpoint_id,
530731c46edSAlex Elder 					IPA_PACKET_INIT_DEST_ENDPOINT_FMASK);
531731c46edSAlex Elder 
532731c46edSAlex Elder 	gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
533731c46edSAlex Elder 			  direction, opcode);
534731c46edSAlex Elder }
535731c46edSAlex Elder 
536731c46edSAlex Elder /* Use a DMA command to read or write a block of IPA-resident memory */
537731c46edSAlex Elder void ipa_cmd_dma_shared_mem_add(struct gsi_trans *trans, u32 offset, u16 size,
538731c46edSAlex Elder 				dma_addr_t addr, bool toward_ipa)
539731c46edSAlex Elder {
540731c46edSAlex Elder 	struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
541731c46edSAlex Elder 	enum ipa_cmd_opcode opcode = IPA_CMD_DMA_SHARED_MEM;
542731c46edSAlex Elder 	struct ipa_cmd_hw_dma_mem_mem *payload;
543731c46edSAlex Elder 	union ipa_cmd_payload *cmd_payload;
544731c46edSAlex Elder 	enum dma_data_direction direction;
545731c46edSAlex Elder 	dma_addr_t payload_addr;
546731c46edSAlex Elder 	u16 flags;
547731c46edSAlex Elder 
548731c46edSAlex Elder 	/* size and offset must fit in 16 bit fields */
549731c46edSAlex Elder 	/* assert(size > 0 && size <= U16_MAX); */
550731c46edSAlex Elder 	/* assert(offset <= U16_MAX && ipa->mem_offset <= U16_MAX - offset); */
551731c46edSAlex Elder 
552731c46edSAlex Elder 	offset += ipa->mem_offset;
553731c46edSAlex Elder 
554731c46edSAlex Elder 	cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
555731c46edSAlex Elder 	payload = &cmd_payload->dma_shared_mem;
556731c46edSAlex Elder 
557731c46edSAlex Elder 	/* payload->clear_after_read was reserved prior to IPA v4.0.  It's
558731c46edSAlex Elder 	 * never needed for current code, so it's 0 regardless of version.
559731c46edSAlex Elder 	 */
560731c46edSAlex Elder 	payload->size = cpu_to_le16(size);
561731c46edSAlex Elder 	payload->local_addr = cpu_to_le16(offset);
562731c46edSAlex Elder 	/* payload->flags:
563731c46edSAlex Elder 	 *   direction:		0 = write to IPA, 1 read from IPA
564731c46edSAlex Elder 	 * Starting at v4.0 these are reserved; either way, all zero:
565731c46edSAlex Elder 	 *   pipeline clear:	0 = wait for pipeline clear (don't skip)
566731c46edSAlex Elder 	 *   clear_options:	0 = pipeline_clear_hps
567731c46edSAlex Elder 	 * Instead, for v4.0+ these are encoded in the opcode.  But again
568731c46edSAlex Elder 	 * since both values are 0 we won't bother OR'ing them in.
569731c46edSAlex Elder 	 */
570731c46edSAlex Elder 	flags = toward_ipa ? 0 : DMA_SHARED_MEM_FLAGS_DIRECTION_FMASK;
571731c46edSAlex Elder 	payload->flags = cpu_to_le16(flags);
572731c46edSAlex Elder 	payload->system_addr = cpu_to_le64(addr);
573731c46edSAlex Elder 
574731c46edSAlex Elder 	direction = toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
575731c46edSAlex Elder 
576731c46edSAlex Elder 	gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
577731c46edSAlex Elder 			  direction, opcode);
578731c46edSAlex Elder }
579731c46edSAlex Elder 
580792b75b1SAlex Elder static void ipa_cmd_ip_tag_status_add(struct gsi_trans *trans)
581731c46edSAlex Elder {
582731c46edSAlex Elder 	struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
583731c46edSAlex Elder 	enum ipa_cmd_opcode opcode = IPA_CMD_IP_PACKET_TAG_STATUS;
584731c46edSAlex Elder 	enum dma_data_direction direction = DMA_TO_DEVICE;
585731c46edSAlex Elder 	struct ipa_cmd_ip_packet_tag_status *payload;
586731c46edSAlex Elder 	union ipa_cmd_payload *cmd_payload;
587731c46edSAlex Elder 	dma_addr_t payload_addr;
588731c46edSAlex Elder 
589731c46edSAlex Elder 	/* assert(tag <= field_max(IP_PACKET_TAG_STATUS_TAG_FMASK)); */
590731c46edSAlex Elder 
591731c46edSAlex Elder 	cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
592731c46edSAlex Elder 	payload = &cmd_payload->ip_packet_tag_status;
593731c46edSAlex Elder 
594792b75b1SAlex Elder 	payload->tag = le64_encode_bits(0, IP_PACKET_TAG_STATUS_TAG_FMASK);
595731c46edSAlex Elder 
596731c46edSAlex Elder 	gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
597731c46edSAlex Elder 			  direction, opcode);
598731c46edSAlex Elder }
599731c46edSAlex Elder 
600731c46edSAlex Elder /* Issue a small command TX data transfer */
601070740d3SAlex Elder static void ipa_cmd_transfer_add(struct gsi_trans *trans)
602731c46edSAlex Elder {
603731c46edSAlex Elder 	struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
604731c46edSAlex Elder 	enum dma_data_direction direction = DMA_TO_DEVICE;
605731c46edSAlex Elder 	enum ipa_cmd_opcode opcode = IPA_CMD_NONE;
606731c46edSAlex Elder 	union ipa_cmd_payload *payload;
607731c46edSAlex Elder 	dma_addr_t payload_addr;
608731c46edSAlex Elder 
609731c46edSAlex Elder 	/* Just transfer a zero-filled payload structure */
610731c46edSAlex Elder 	payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
611731c46edSAlex Elder 
612731c46edSAlex Elder 	gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
613731c46edSAlex Elder 			  direction, opcode);
614731c46edSAlex Elder }
615731c46edSAlex Elder 
616aa56e3e5SAlex Elder /* Add immediate commands to a transaction to clear the hardware pipeline */
617aa56e3e5SAlex Elder void ipa_cmd_pipeline_clear_add(struct gsi_trans *trans)
618731c46edSAlex Elder {
619731c46edSAlex Elder 	struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
6202c4bb809SAlex Elder 	struct ipa_endpoint *endpoint;
621731c46edSAlex Elder 
62251c48ce2SAlex Elder 	/* This will complete when the transfer is received */
62351c48ce2SAlex Elder 	reinit_completion(&ipa->completion);
62451c48ce2SAlex Elder 
625aa56e3e5SAlex Elder 	/* Issue a no-op register write command (mask 0 means no write) */
6262c4bb809SAlex Elder 	ipa_cmd_register_write_add(trans, 0, 0, 0, true);
627aa56e3e5SAlex Elder 
628aa56e3e5SAlex Elder 	/* Send a data packet through the IPA pipeline.  The packet_init
629aa56e3e5SAlex Elder 	 * command says to send the next packet directly to the exception
630aa56e3e5SAlex Elder 	 * endpoint without any other IPA processing.  The tag_status
631aa56e3e5SAlex Elder 	 * command requests that status be generated on completion of
632792b75b1SAlex Elder 	 * that transfer, and that it will be tagged with a value.
633aa56e3e5SAlex Elder 	 * Finally, the transfer command sends a small packet of data
634aa56e3e5SAlex Elder 	 * (instead of a command) using the command endpoint.
635aa56e3e5SAlex Elder 	 */
636aa56e3e5SAlex Elder 	endpoint = ipa->name_map[IPA_ENDPOINT_AP_LAN_RX];
637731c46edSAlex Elder 	ipa_cmd_ip_packet_init_add(trans, endpoint->endpoint_id);
638792b75b1SAlex Elder 	ipa_cmd_ip_tag_status_add(trans);
639070740d3SAlex Elder 	ipa_cmd_transfer_add(trans);
640731c46edSAlex Elder }
641731c46edSAlex Elder 
642aa56e3e5SAlex Elder /* Returns the number of commands required to clear the pipeline */
643aa56e3e5SAlex Elder u32 ipa_cmd_pipeline_clear_count(void)
644731c46edSAlex Elder {
645731c46edSAlex Elder 	return 4;
646731c46edSAlex Elder }
647731c46edSAlex Elder 
64851c48ce2SAlex Elder void ipa_cmd_pipeline_clear_wait(struct ipa *ipa)
64951c48ce2SAlex Elder {
65051c48ce2SAlex Elder 	wait_for_completion(&ipa->completion);
65151c48ce2SAlex Elder }
65251c48ce2SAlex Elder 
653aa56e3e5SAlex Elder void ipa_cmd_pipeline_clear(struct ipa *ipa)
6546cb63ea6SAlex Elder {
655aa56e3e5SAlex Elder 	u32 count = ipa_cmd_pipeline_clear_count();
6566cb63ea6SAlex Elder 	struct gsi_trans *trans;
6576cb63ea6SAlex Elder 
6586cb63ea6SAlex Elder 	trans = ipa_cmd_trans_alloc(ipa, count);
6596cb63ea6SAlex Elder 	if (trans) {
660aa56e3e5SAlex Elder 		ipa_cmd_pipeline_clear_add(trans);
6616cb63ea6SAlex Elder 		gsi_trans_commit_wait(trans);
66251c48ce2SAlex Elder 		ipa_cmd_pipeline_clear_wait(ipa);
6636cb63ea6SAlex Elder 	} else {
6646cb63ea6SAlex Elder 		dev_err(&ipa->pdev->dev,
6656cb63ea6SAlex Elder 			"error allocating %u entry tag transaction\n", count);
6666cb63ea6SAlex Elder 	}
6676cb63ea6SAlex Elder }
6686cb63ea6SAlex Elder 
669731c46edSAlex Elder static struct ipa_cmd_info *
670731c46edSAlex Elder ipa_cmd_info_alloc(struct ipa_endpoint *endpoint, u32 tre_count)
671731c46edSAlex Elder {
672731c46edSAlex Elder 	struct gsi_channel *channel;
673731c46edSAlex Elder 
674731c46edSAlex Elder 	channel = &endpoint->ipa->gsi.channel[endpoint->channel_id];
675731c46edSAlex Elder 
676731c46edSAlex Elder 	return gsi_trans_pool_alloc(&channel->trans_info.info_pool, tre_count);
677731c46edSAlex Elder }
678731c46edSAlex Elder 
679731c46edSAlex Elder /* Allocate a transaction for the command TX endpoint */
680731c46edSAlex Elder struct gsi_trans *ipa_cmd_trans_alloc(struct ipa *ipa, u32 tre_count)
681731c46edSAlex Elder {
682731c46edSAlex Elder 	struct ipa_endpoint *endpoint;
683731c46edSAlex Elder 	struct gsi_trans *trans;
684731c46edSAlex Elder 
685731c46edSAlex Elder 	endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
686731c46edSAlex Elder 
687731c46edSAlex Elder 	trans = gsi_channel_trans_alloc(&ipa->gsi, endpoint->channel_id,
688731c46edSAlex Elder 					tre_count, DMA_NONE);
689731c46edSAlex Elder 	if (trans)
690731c46edSAlex Elder 		trans->info = ipa_cmd_info_alloc(endpoint, tre_count);
691731c46edSAlex Elder 
692731c46edSAlex Elder 	return trans;
693731c46edSAlex Elder }
694