1 // SPDX-License-Identifier: GPL-2.0
2
3 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
4 * Copyright (C) 2019-2023 Linaro Ltd.
5 */
6
7 #include <linux/types.h>
8 #include <linux/device.h>
9 #include <linux/slab.h>
10 #include <linux/bitfield.h>
11 #include <linux/dma-direction.h>
12
13 #include "gsi.h"
14 #include "gsi_trans.h"
15 #include "ipa.h"
16 #include "ipa_endpoint.h"
17 #include "ipa_table.h"
18 #include "ipa_cmd.h"
19 #include "ipa_mem.h"
20
21 /**
22 * DOC: IPA Immediate Commands
23 *
24 * The AP command TX endpoint is used to issue immediate commands to the IPA.
25 * An immediate command is generally used to request the IPA do something
26 * other than data transfer to another endpoint.
27 *
28 * Immediate commands are represented by GSI transactions just like other
29 * transfer requests, and use a single GSI TRE. Each immediate command
30 * has a well-defined format, having a payload of a known length. This
31 * allows the transfer element's length field to be used to hold an
32 * immediate command's opcode. The payload for a command resides in AP
33 * memory and is described by a single scatterlist entry in its transaction.
34 * Commands do not require a transaction completion callback, and are
35 * always issued using gsi_trans_commit_wait().
36 */
37
38 /* Some commands can wait until indicated pipeline stages are clear */
39 enum pipeline_clear_options {
40 pipeline_clear_hps = 0x0,
41 pipeline_clear_src_grp = 0x1,
42 pipeline_clear_full = 0x2,
43 };
44
45 /* IPA_CMD_IP_V{4,6}_{FILTER,ROUTING}_INIT */
46
47 struct ipa_cmd_hw_ip_fltrt_init {
48 __le64 hash_rules_addr;
49 __le64 flags;
50 __le64 nhash_rules_addr;
51 };
52
53 /* Field masks for ipa_cmd_hw_ip_fltrt_init structure fields */
54 #define IP_FLTRT_FLAGS_HASH_SIZE_FMASK GENMASK_ULL(11, 0)
55 #define IP_FLTRT_FLAGS_HASH_ADDR_FMASK GENMASK_ULL(27, 12)
56 #define IP_FLTRT_FLAGS_NHASH_SIZE_FMASK GENMASK_ULL(39, 28)
57 #define IP_FLTRT_FLAGS_NHASH_ADDR_FMASK GENMASK_ULL(55, 40)
58
59 /* IPA_CMD_HDR_INIT_LOCAL */
60
61 struct ipa_cmd_hw_hdr_init_local {
62 __le64 hdr_table_addr;
63 __le32 flags;
64 __le32 reserved;
65 };
66
67 /* Field masks for ipa_cmd_hw_hdr_init_local structure fields */
68 #define HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK GENMASK(11, 0)
69 #define HDR_INIT_LOCAL_FLAGS_HDR_ADDR_FMASK GENMASK(27, 12)
70
71 /* IPA_CMD_REGISTER_WRITE */
72
73 /* For IPA v4.0+, the pipeline clear options are encoded in the opcode */
74 #define REGISTER_WRITE_OPCODE_SKIP_CLEAR_FMASK GENMASK(8, 8)
75 #define REGISTER_WRITE_OPCODE_CLEAR_OPTION_FMASK GENMASK(10, 9)
76
77 struct ipa_cmd_register_write {
78 __le16 flags; /* Unused/reserved prior to IPA v4.0 */
79 __le16 offset;
80 __le32 value;
81 __le32 value_mask;
82 __le32 clear_options; /* Unused/reserved for IPA v4.0+ */
83 };
84
85 /* Field masks for ipa_cmd_register_write structure fields */
86 /* The next field is present for IPA v4.0+ */
87 #define REGISTER_WRITE_FLAGS_OFFSET_HIGH_FMASK GENMASK(14, 11)
88 /* The next field is not present for IPA v4.0+ */
89 #define REGISTER_WRITE_FLAGS_SKIP_CLEAR_FMASK GENMASK(15, 15)
90
91 /* The next field and its values are not present for IPA v4.0+ */
92 #define REGISTER_WRITE_CLEAR_OPTIONS_FMASK GENMASK(1, 0)
93
94 /* IPA_CMD_IP_PACKET_INIT */
95
96 struct ipa_cmd_ip_packet_init {
97 u8 dest_endpoint; /* Full 8 bits used for IPA v5.0+ */
98 u8 reserved[7];
99 };
100
101 /* Field mask for ipa_cmd_ip_packet_init dest_endpoint field (unused v5.0+) */
102 #define IPA_PACKET_INIT_DEST_ENDPOINT_FMASK GENMASK(4, 0)
103
104 /* IPA_CMD_DMA_SHARED_MEM */
105
106 /* For IPA v4.0+, this opcode gets modified with pipeline clear options */
107
108 #define DMA_SHARED_MEM_OPCODE_SKIP_CLEAR_FMASK GENMASK(8, 8)
109 #define DMA_SHARED_MEM_OPCODE_CLEAR_OPTION_FMASK GENMASK(10, 9)
110
111 struct ipa_cmd_hw_dma_mem_mem {
112 __le16 clear_after_read; /* 0 or DMA_SHARED_MEM_CLEAR_AFTER_READ */
113 __le16 size;
114 __le16 local_addr;
115 __le16 flags;
116 __le64 system_addr;
117 };
118
119 /* Flag allowing atomic clear of target region after reading data (v4.0+)*/
120 #define DMA_SHARED_MEM_CLEAR_AFTER_READ GENMASK(15, 15)
121
122 /* Field masks for ipa_cmd_hw_dma_mem_mem structure fields */
123 #define DMA_SHARED_MEM_FLAGS_DIRECTION_FMASK GENMASK(0, 0)
124 /* The next two fields are not present for IPA v4.0+ */
125 #define DMA_SHARED_MEM_FLAGS_SKIP_CLEAR_FMASK GENMASK(1, 1)
126 #define DMA_SHARED_MEM_FLAGS_CLEAR_OPTIONS_FMASK GENMASK(3, 2)
127
128 /* IPA_CMD_IP_PACKET_TAG_STATUS */
129
130 struct ipa_cmd_ip_packet_tag_status {
131 __le64 tag;
132 };
133
134 #define IP_PACKET_TAG_STATUS_TAG_FMASK GENMASK_ULL(63, 16)
135
136 /* Immediate command payload */
137 union ipa_cmd_payload {
138 struct ipa_cmd_hw_ip_fltrt_init table_init;
139 struct ipa_cmd_hw_hdr_init_local hdr_init_local;
140 struct ipa_cmd_register_write register_write;
141 struct ipa_cmd_ip_packet_init ip_packet_init;
142 struct ipa_cmd_hw_dma_mem_mem dma_shared_mem;
143 struct ipa_cmd_ip_packet_tag_status ip_packet_tag_status;
144 };
145
ipa_cmd_validate_build(void)146 static void ipa_cmd_validate_build(void)
147 {
148 /* The size of a filter table needs to fit into fields in the
149 * ipa_cmd_hw_ip_fltrt_init structure. Although hashed tables
150 * might not be used, non-hashed and hashed tables have the same
151 * maximum size. IPv4 and IPv6 filter tables have the same number
152 * of entries.
153 */
154 /* Hashed and non-hashed fields are assumed to be the same size */
155 BUILD_BUG_ON(field_max(IP_FLTRT_FLAGS_HASH_SIZE_FMASK) !=
156 field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK));
157 BUILD_BUG_ON(field_max(IP_FLTRT_FLAGS_HASH_ADDR_FMASK) !=
158 field_max(IP_FLTRT_FLAGS_NHASH_ADDR_FMASK));
159
160 /* Prior to IPA v5.0, we supported no more than 32 endpoints,
161 * and this was reflected in some 5-bit fields that held
162 * endpoint numbers. Starting with IPA v5.0, the widths of
163 * these fields were extended to 8 bits, meaning up to 256
164 * endpoints. If the driver claims to support more than
165 * that it's an error.
166 */
167 BUILD_BUG_ON(IPA_ENDPOINT_MAX - 1 > U8_MAX);
168 }
169
170 /* Validate a memory region holding a table */
ipa_cmd_table_init_valid(struct ipa * ipa,const struct ipa_mem * mem,bool route)171 bool ipa_cmd_table_init_valid(struct ipa *ipa, const struct ipa_mem *mem,
172 bool route)
173 {
174 u32 offset_max = field_max(IP_FLTRT_FLAGS_NHASH_ADDR_FMASK);
175 u32 size_max = field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK);
176 const char *table = route ? "route" : "filter";
177 struct device *dev = &ipa->pdev->dev;
178 u32 size;
179
180 size = route ? ipa->route_count : ipa->filter_count + 1;
181 size *= sizeof(__le64);
182
183 /* Size must fit in the immediate command field that holds it */
184 if (size > size_max) {
185 dev_err(dev, "%s table region size too large\n", table);
186 dev_err(dev, " (0x%04x > 0x%04x)\n", size, size_max);
187
188 return false;
189 }
190
191 /* Offset must fit in the immediate command field that holds it */
192 if (mem->offset > offset_max ||
193 ipa->mem_offset > offset_max - mem->offset) {
194 dev_err(dev, "%s table region offset too large\n", table);
195 dev_err(dev, " (0x%04x + 0x%04x > 0x%04x)\n",
196 ipa->mem_offset, mem->offset, offset_max);
197
198 return false;
199 }
200
201 return true;
202 }
203
204 /* Validate the memory region that holds headers */
ipa_cmd_header_init_local_valid(struct ipa * ipa)205 static bool ipa_cmd_header_init_local_valid(struct ipa *ipa)
206 {
207 struct device *dev = &ipa->pdev->dev;
208 const struct ipa_mem *mem;
209 u32 offset_max;
210 u32 size_max;
211 u32 offset;
212 u32 size;
213
214 /* In ipa_cmd_hdr_init_local_add() we record the offset and size of
215 * the header table memory area in an immediate command. Make sure
216 * the offset and size fit in the fields that need to hold them, and
217 * that the entire range is within the overall IPA memory range.
218 */
219 offset_max = field_max(HDR_INIT_LOCAL_FLAGS_HDR_ADDR_FMASK);
220 size_max = field_max(HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK);
221
222 /* The header memory area contains both the modem and AP header
223 * regions. The modem portion defines the address of the region.
224 */
225 mem = ipa_mem_find(ipa, IPA_MEM_MODEM_HEADER);
226 offset = mem->offset;
227 size = mem->size;
228
229 /* Make sure the offset fits in the IPA command */
230 if (offset > offset_max || ipa->mem_offset > offset_max - offset) {
231 dev_err(dev, "header table region offset too large\n");
232 dev_err(dev, " (0x%04x + 0x%04x > 0x%04x)\n",
233 ipa->mem_offset, offset, offset_max);
234
235 return false;
236 }
237
238 /* Add the size of the AP portion (if defined) to the combined size */
239 mem = ipa_mem_find(ipa, IPA_MEM_AP_HEADER);
240 if (mem)
241 size += mem->size;
242
243 /* Make sure the combined size fits in the IPA command */
244 if (size > size_max) {
245 dev_err(dev, "header table region size too large\n");
246 dev_err(dev, " (0x%04x > 0x%08x)\n", size, size_max);
247
248 return false;
249 }
250
251 return true;
252 }
253
254 /* Indicate whether an offset can be used with a register_write command */
ipa_cmd_register_write_offset_valid(struct ipa * ipa,const char * name,u32 offset)255 static bool ipa_cmd_register_write_offset_valid(struct ipa *ipa,
256 const char *name, u32 offset)
257 {
258 struct ipa_cmd_register_write *payload;
259 struct device *dev = &ipa->pdev->dev;
260 u32 offset_max;
261 u32 bit_count;
262
263 /* The maximum offset in a register_write immediate command depends
264 * on the version of IPA. A 16 bit offset is always supported,
265 * but starting with IPA v4.0 some additional high-order bits are
266 * allowed.
267 */
268 bit_count = BITS_PER_BYTE * sizeof(payload->offset);
269 if (ipa->version >= IPA_VERSION_4_0)
270 bit_count += hweight32(REGISTER_WRITE_FLAGS_OFFSET_HIGH_FMASK);
271 BUILD_BUG_ON(bit_count > 32);
272 offset_max = ~0U >> (32 - bit_count);
273
274 /* Make sure the offset can be represented by the field(s)
275 * that holds it. Also make sure the offset is not outside
276 * the overall IPA memory range.
277 */
278 if (offset > offset_max || ipa->mem_offset > offset_max - offset) {
279 dev_err(dev, "%s offset too large 0x%04x + 0x%04x > 0x%04x)\n",
280 name, ipa->mem_offset, offset, offset_max);
281 return false;
282 }
283
284 return true;
285 }
286
287 /* Check whether offsets passed to register_write are valid */
ipa_cmd_register_write_valid(struct ipa * ipa)288 static bool ipa_cmd_register_write_valid(struct ipa *ipa)
289 {
290 const struct reg *reg;
291 const char *name;
292 u32 offset;
293
294 /* If hashed tables are supported, ensure the hash flush register
295 * offset will fit in a register write IPA immediate command.
296 */
297 if (ipa_table_hash_support(ipa)) {
298 if (ipa->version < IPA_VERSION_5_0)
299 reg = ipa_reg(ipa, FILT_ROUT_HASH_FLUSH);
300 else
301 reg = ipa_reg(ipa, FILT_ROUT_CACHE_FLUSH);
302
303 offset = reg_offset(reg);
304 name = "filter/route hash flush";
305 if (!ipa_cmd_register_write_offset_valid(ipa, name, offset))
306 return false;
307 }
308
309 /* Each endpoint can have a status endpoint associated with it,
310 * and this is recorded in an endpoint register. If the modem
311 * crashes, we reset the status endpoint for all modem endpoints
312 * using a register write IPA immediate command. Make sure the
313 * worst case (highest endpoint number) offset of that endpoint
314 * fits in the register write command field(s) that must hold it.
315 */
316 reg = ipa_reg(ipa, ENDP_STATUS);
317 offset = reg_n_offset(reg, IPA_ENDPOINT_COUNT - 1);
318 name = "maximal endpoint status";
319 if (!ipa_cmd_register_write_offset_valid(ipa, name, offset))
320 return false;
321
322 return true;
323 }
324
ipa_cmd_pool_init(struct gsi_channel * channel,u32 tre_max)325 int ipa_cmd_pool_init(struct gsi_channel *channel, u32 tre_max)
326 {
327 struct gsi_trans_info *trans_info = &channel->trans_info;
328 struct device *dev = channel->gsi->dev;
329
330 /* Command payloads are allocated one at a time, but a single
331 * transaction can require up to the maximum supported by the
332 * channel; treat them as if they were allocated all at once.
333 */
334 return gsi_trans_pool_init_dma(dev, &trans_info->cmd_pool,
335 sizeof(union ipa_cmd_payload),
336 tre_max, channel->trans_tre_max);
337 }
338
ipa_cmd_pool_exit(struct gsi_channel * channel)339 void ipa_cmd_pool_exit(struct gsi_channel *channel)
340 {
341 struct gsi_trans_info *trans_info = &channel->trans_info;
342 struct device *dev = channel->gsi->dev;
343
344 gsi_trans_pool_exit_dma(dev, &trans_info->cmd_pool);
345 }
346
347 static union ipa_cmd_payload *
ipa_cmd_payload_alloc(struct ipa * ipa,dma_addr_t * addr)348 ipa_cmd_payload_alloc(struct ipa *ipa, dma_addr_t *addr)
349 {
350 struct gsi_trans_info *trans_info;
351 struct ipa_endpoint *endpoint;
352
353 endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
354 trans_info = &ipa->gsi.channel[endpoint->channel_id].trans_info;
355
356 return gsi_trans_pool_alloc_dma(&trans_info->cmd_pool, addr);
357 }
358
359 /* If hash_size is 0, hash_offset and hash_addr ignored. */
ipa_cmd_table_init_add(struct gsi_trans * trans,enum ipa_cmd_opcode opcode,u16 size,u32 offset,dma_addr_t addr,u16 hash_size,u32 hash_offset,dma_addr_t hash_addr)360 void ipa_cmd_table_init_add(struct gsi_trans *trans,
361 enum ipa_cmd_opcode opcode, u16 size, u32 offset,
362 dma_addr_t addr, u16 hash_size, u32 hash_offset,
363 dma_addr_t hash_addr)
364 {
365 struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
366 struct ipa_cmd_hw_ip_fltrt_init *payload;
367 union ipa_cmd_payload *cmd_payload;
368 dma_addr_t payload_addr;
369 u64 val;
370
371 /* Record the non-hash table offset and size */
372 offset += ipa->mem_offset;
373 val = u64_encode_bits(offset, IP_FLTRT_FLAGS_NHASH_ADDR_FMASK);
374 val |= u64_encode_bits(size, IP_FLTRT_FLAGS_NHASH_SIZE_FMASK);
375
376 /* The hash table offset and address are zero if its size is 0 */
377 if (hash_size) {
378 /* Record the hash table offset and size */
379 hash_offset += ipa->mem_offset;
380 val |= u64_encode_bits(hash_offset,
381 IP_FLTRT_FLAGS_HASH_ADDR_FMASK);
382 val |= u64_encode_bits(hash_size,
383 IP_FLTRT_FLAGS_HASH_SIZE_FMASK);
384 }
385
386 cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
387 payload = &cmd_payload->table_init;
388
389 /* Fill in all offsets and sizes and the non-hash table address */
390 if (hash_size)
391 payload->hash_rules_addr = cpu_to_le64(hash_addr);
392 payload->flags = cpu_to_le64(val);
393 payload->nhash_rules_addr = cpu_to_le64(addr);
394
395 gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
396 opcode);
397 }
398
399 /* Initialize header space in IPA-local memory */
ipa_cmd_hdr_init_local_add(struct gsi_trans * trans,u32 offset,u16 size,dma_addr_t addr)400 void ipa_cmd_hdr_init_local_add(struct gsi_trans *trans, u32 offset, u16 size,
401 dma_addr_t addr)
402 {
403 struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
404 enum ipa_cmd_opcode opcode = IPA_CMD_HDR_INIT_LOCAL;
405 struct ipa_cmd_hw_hdr_init_local *payload;
406 union ipa_cmd_payload *cmd_payload;
407 dma_addr_t payload_addr;
408 u32 flags;
409
410 offset += ipa->mem_offset;
411
412 /* With this command we tell the IPA where in its local memory the
413 * header tables reside. The content of the buffer provided is
414 * also written via DMA into that space. The IPA hardware owns
415 * the table, but the AP must initialize it.
416 */
417 cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
418 payload = &cmd_payload->hdr_init_local;
419
420 payload->hdr_table_addr = cpu_to_le64(addr);
421 flags = u32_encode_bits(size, HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK);
422 flags |= u32_encode_bits(offset, HDR_INIT_LOCAL_FLAGS_HDR_ADDR_FMASK);
423 payload->flags = cpu_to_le32(flags);
424
425 gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
426 opcode);
427 }
428
ipa_cmd_register_write_add(struct gsi_trans * trans,u32 offset,u32 value,u32 mask,bool clear_full)429 void ipa_cmd_register_write_add(struct gsi_trans *trans, u32 offset, u32 value,
430 u32 mask, bool clear_full)
431 {
432 struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
433 struct ipa_cmd_register_write *payload;
434 union ipa_cmd_payload *cmd_payload;
435 u32 opcode = IPA_CMD_REGISTER_WRITE;
436 dma_addr_t payload_addr;
437 u32 clear_option;
438 u32 options;
439 u16 flags;
440
441 /* pipeline_clear_src_grp is not used */
442 clear_option = clear_full ? pipeline_clear_full : pipeline_clear_hps;
443
444 /* IPA v4.0+ represents the pipeline clear options in the opcode. It
445 * also supports a larger offset by encoding additional high-order
446 * bits in the payload flags field.
447 */
448 if (ipa->version >= IPA_VERSION_4_0) {
449 u16 offset_high;
450 u32 val;
451
452 /* Opcode encodes pipeline clear options */
453 /* SKIP_CLEAR is always 0 (don't skip pipeline clear) */
454 val = u16_encode_bits(clear_option,
455 REGISTER_WRITE_OPCODE_CLEAR_OPTION_FMASK);
456 opcode |= val;
457
458 /* Extract the high 4 bits from the offset */
459 offset_high = (u16)u32_get_bits(offset, GENMASK(19, 16));
460 offset &= (1 << 16) - 1;
461
462 /* Extract the top 4 bits and encode it into the flags field */
463 flags = u16_encode_bits(offset_high,
464 REGISTER_WRITE_FLAGS_OFFSET_HIGH_FMASK);
465 options = 0; /* reserved */
466
467 } else {
468 flags = 0; /* SKIP_CLEAR flag is always 0 */
469 options = u16_encode_bits(clear_option,
470 REGISTER_WRITE_CLEAR_OPTIONS_FMASK);
471 }
472
473 cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
474 payload = &cmd_payload->register_write;
475
476 payload->flags = cpu_to_le16(flags);
477 payload->offset = cpu_to_le16((u16)offset);
478 payload->value = cpu_to_le32(value);
479 payload->value_mask = cpu_to_le32(mask);
480 payload->clear_options = cpu_to_le32(options);
481
482 gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
483 opcode);
484 }
485
486 /* Skip IP packet processing on the next data transfer on a TX channel */
ipa_cmd_ip_packet_init_add(struct gsi_trans * trans,u8 endpoint_id)487 static void ipa_cmd_ip_packet_init_add(struct gsi_trans *trans, u8 endpoint_id)
488 {
489 struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
490 enum ipa_cmd_opcode opcode = IPA_CMD_IP_PACKET_INIT;
491 struct ipa_cmd_ip_packet_init *payload;
492 union ipa_cmd_payload *cmd_payload;
493 dma_addr_t payload_addr;
494
495 cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
496 payload = &cmd_payload->ip_packet_init;
497
498 if (ipa->version < IPA_VERSION_5_0) {
499 payload->dest_endpoint =
500 u8_encode_bits(endpoint_id,
501 IPA_PACKET_INIT_DEST_ENDPOINT_FMASK);
502 } else {
503 payload->dest_endpoint = endpoint_id;
504 }
505
506 gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
507 opcode);
508 }
509
510 /* Use a DMA command to read or write a block of IPA-resident memory */
ipa_cmd_dma_shared_mem_add(struct gsi_trans * trans,u32 offset,u16 size,dma_addr_t addr,bool toward_ipa)511 void ipa_cmd_dma_shared_mem_add(struct gsi_trans *trans, u32 offset, u16 size,
512 dma_addr_t addr, bool toward_ipa)
513 {
514 struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
515 enum ipa_cmd_opcode opcode = IPA_CMD_DMA_SHARED_MEM;
516 struct ipa_cmd_hw_dma_mem_mem *payload;
517 union ipa_cmd_payload *cmd_payload;
518 dma_addr_t payload_addr;
519 u16 flags;
520
521 /* size and offset must fit in 16 bit fields */
522 WARN_ON(!size);
523 WARN_ON(size > U16_MAX);
524 WARN_ON(offset > U16_MAX || ipa->mem_offset > U16_MAX - offset);
525
526 offset += ipa->mem_offset;
527
528 cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
529 payload = &cmd_payload->dma_shared_mem;
530
531 /* payload->clear_after_read was reserved prior to IPA v4.0. It's
532 * never needed for current code, so it's 0 regardless of version.
533 */
534 payload->size = cpu_to_le16(size);
535 payload->local_addr = cpu_to_le16(offset);
536 /* payload->flags:
537 * direction: 0 = write to IPA, 1 read from IPA
538 * Starting at v4.0 these are reserved; either way, all zero:
539 * pipeline clear: 0 = wait for pipeline clear (don't skip)
540 * clear_options: 0 = pipeline_clear_hps
541 * Instead, for v4.0+ these are encoded in the opcode. But again
542 * since both values are 0 we won't bother OR'ing them in.
543 */
544 flags = toward_ipa ? 0 : DMA_SHARED_MEM_FLAGS_DIRECTION_FMASK;
545 payload->flags = cpu_to_le16(flags);
546 payload->system_addr = cpu_to_le64(addr);
547
548 gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
549 opcode);
550 }
551
ipa_cmd_ip_tag_status_add(struct gsi_trans * trans)552 static void ipa_cmd_ip_tag_status_add(struct gsi_trans *trans)
553 {
554 struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
555 enum ipa_cmd_opcode opcode = IPA_CMD_IP_PACKET_TAG_STATUS;
556 struct ipa_cmd_ip_packet_tag_status *payload;
557 union ipa_cmd_payload *cmd_payload;
558 dma_addr_t payload_addr;
559
560 cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
561 payload = &cmd_payload->ip_packet_tag_status;
562
563 payload->tag = le64_encode_bits(0, IP_PACKET_TAG_STATUS_TAG_FMASK);
564
565 gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
566 opcode);
567 }
568
569 /* Issue a small command TX data transfer */
ipa_cmd_transfer_add(struct gsi_trans * trans)570 static void ipa_cmd_transfer_add(struct gsi_trans *trans)
571 {
572 struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
573 enum ipa_cmd_opcode opcode = IPA_CMD_NONE;
574 union ipa_cmd_payload *payload;
575 dma_addr_t payload_addr;
576
577 /* Just transfer a zero-filled payload structure */
578 payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
579
580 gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
581 opcode);
582 }
583
584 /* Add immediate commands to a transaction to clear the hardware pipeline */
ipa_cmd_pipeline_clear_add(struct gsi_trans * trans)585 void ipa_cmd_pipeline_clear_add(struct gsi_trans *trans)
586 {
587 struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
588 struct ipa_endpoint *endpoint;
589
590 /* This will complete when the transfer is received */
591 reinit_completion(&ipa->completion);
592
593 /* Issue a no-op register write command (mask 0 means no write) */
594 ipa_cmd_register_write_add(trans, 0, 0, 0, true);
595
596 /* Send a data packet through the IPA pipeline. The packet_init
597 * command says to send the next packet directly to the exception
598 * endpoint without any other IPA processing. The tag_status
599 * command requests that status be generated on completion of
600 * that transfer, and that it will be tagged with a value.
601 * Finally, the transfer command sends a small packet of data
602 * (instead of a command) using the command endpoint.
603 */
604 endpoint = ipa->name_map[IPA_ENDPOINT_AP_LAN_RX];
605 ipa_cmd_ip_packet_init_add(trans, endpoint->endpoint_id);
606 ipa_cmd_ip_tag_status_add(trans);
607 ipa_cmd_transfer_add(trans);
608 }
609
610 /* Returns the number of commands required to clear the pipeline */
ipa_cmd_pipeline_clear_count(void)611 u32 ipa_cmd_pipeline_clear_count(void)
612 {
613 return 4;
614 }
615
ipa_cmd_pipeline_clear_wait(struct ipa * ipa)616 void ipa_cmd_pipeline_clear_wait(struct ipa *ipa)
617 {
618 wait_for_completion(&ipa->completion);
619 }
620
621 /* Allocate a transaction for the command TX endpoint */
ipa_cmd_trans_alloc(struct ipa * ipa,u32 tre_count)622 struct gsi_trans *ipa_cmd_trans_alloc(struct ipa *ipa, u32 tre_count)
623 {
624 struct ipa_endpoint *endpoint;
625
626 if (WARN_ON(tre_count > IPA_COMMAND_TRANS_TRE_MAX))
627 return NULL;
628
629 endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
630
631 return gsi_channel_trans_alloc(&ipa->gsi, endpoint->channel_id,
632 tre_count, DMA_NONE);
633 }
634
635 /* Init function for immediate commands; there is no ipa_cmd_exit() */
ipa_cmd_init(struct ipa * ipa)636 int ipa_cmd_init(struct ipa *ipa)
637 {
638 ipa_cmd_validate_build();
639
640 if (!ipa_cmd_header_init_local_valid(ipa))
641 return -EINVAL;
642
643 if (!ipa_cmd_register_write_valid(ipa))
644 return -EINVAL;
645
646 return 0;
647 }
648