1 // SPDX-License-Identifier: GPL-2.0 2 3 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. 4 * Copyright (C) 2019-2021 Linaro Ltd. 5 */ 6 7 #include <linux/types.h> 8 #include <linux/device.h> 9 #include <linux/slab.h> 10 #include <linux/bitfield.h> 11 #include <linux/if_rmnet.h> 12 #include <linux/dma-direction.h> 13 14 #include "gsi.h" 15 #include "gsi_trans.h" 16 #include "ipa.h" 17 #include "ipa_data.h" 18 #include "ipa_endpoint.h" 19 #include "ipa_cmd.h" 20 #include "ipa_mem.h" 21 #include "ipa_modem.h" 22 #include "ipa_table.h" 23 #include "ipa_gsi.h" 24 #include "ipa_power.h" 25 26 #define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0) 27 28 #define IPA_REPLENISH_BATCH 16 29 30 /* RX buffer is 1 page (or a power-of-2 contiguous pages) */ 31 #define IPA_RX_BUFFER_SIZE 8192 /* PAGE_SIZE > 4096 wastes a LOT */ 32 33 /* The amount of RX buffer space consumed by standard skb overhead */ 34 #define IPA_RX_BUFFER_OVERHEAD (PAGE_SIZE - SKB_MAX_ORDER(NET_SKB_PAD, 0)) 35 36 /* Where to find the QMAP mux_id for a packet within modem-supplied metadata */ 37 #define IPA_ENDPOINT_QMAP_METADATA_MASK 0x000000ff /* host byte order */ 38 39 #define IPA_ENDPOINT_RESET_AGGR_RETRY_MAX 3 40 #define IPA_AGGR_TIME_LIMIT 500 /* microseconds */ 41 42 /** enum ipa_status_opcode - status element opcode hardware values */ 43 enum ipa_status_opcode { 44 IPA_STATUS_OPCODE_PACKET = 0x01, 45 IPA_STATUS_OPCODE_DROPPED_PACKET = 0x04, 46 IPA_STATUS_OPCODE_SUSPENDED_PACKET = 0x08, 47 IPA_STATUS_OPCODE_PACKET_2ND_PASS = 0x40, 48 }; 49 50 /** enum ipa_status_exception - status element exception type */ 51 enum ipa_status_exception { 52 /* 0 means no exception */ 53 IPA_STATUS_EXCEPTION_DEAGGR = 0x01, 54 }; 55 56 /* Status element provided by hardware */ 57 struct ipa_status { 58 u8 opcode; /* enum ipa_status_opcode */ 59 u8 exception; /* enum ipa_status_exception */ 60 __le16 mask; 61 __le16 pkt_len; 62 u8 endp_src_idx; 63 u8 endp_dst_idx; 64 __le32 metadata; 65 __le32 flags1; 66 __le64 flags2; 67 __le32 flags3; 68 __le32 flags4; 69 }; 70 71 /* Field masks for struct ipa_status structure fields */ 72 #define IPA_STATUS_MASK_TAG_VALID_FMASK GENMASK(4, 4) 73 #define IPA_STATUS_SRC_IDX_FMASK GENMASK(4, 0) 74 #define IPA_STATUS_DST_IDX_FMASK GENMASK(4, 0) 75 #define IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK GENMASK(31, 22) 76 #define IPA_STATUS_FLAGS2_TAG_FMASK GENMASK_ULL(63, 16) 77 78 static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count, 79 const struct ipa_gsi_endpoint_data *all_data, 80 const struct ipa_gsi_endpoint_data *data) 81 { 82 const struct ipa_gsi_endpoint_data *other_data; 83 struct device *dev = &ipa->pdev->dev; 84 enum ipa_endpoint_name other_name; 85 86 if (ipa_gsi_endpoint_data_empty(data)) 87 return true; 88 89 if (!data->toward_ipa) { 90 if (data->endpoint.filter_support) { 91 dev_err(dev, "filtering not supported for " 92 "RX endpoint %u\n", 93 data->endpoint_id); 94 return false; 95 } 96 97 return true; /* Nothing more to check for RX */ 98 } 99 100 if (data->endpoint.config.status_enable) { 101 other_name = data->endpoint.config.tx.status_endpoint; 102 if (other_name >= count) { 103 dev_err(dev, "status endpoint name %u out of range " 104 "for endpoint %u\n", 105 other_name, data->endpoint_id); 106 return false; 107 } 108 109 /* Status endpoint must be defined... */ 110 other_data = &all_data[other_name]; 111 if (ipa_gsi_endpoint_data_empty(other_data)) { 112 dev_err(dev, "DMA endpoint name %u undefined " 113 "for endpoint %u\n", 114 other_name, data->endpoint_id); 115 return false; 116 } 117 118 /* ...and has to be an RX endpoint... */ 119 if (other_data->toward_ipa) { 120 dev_err(dev, 121 "status endpoint for endpoint %u not RX\n", 122 data->endpoint_id); 123 return false; 124 } 125 126 /* ...and if it's to be an AP endpoint... */ 127 if (other_data->ee_id == GSI_EE_AP) { 128 /* ...make sure it has status enabled. */ 129 if (!other_data->endpoint.config.status_enable) { 130 dev_err(dev, 131 "status not enabled for endpoint %u\n", 132 other_data->endpoint_id); 133 return false; 134 } 135 } 136 } 137 138 if (data->endpoint.config.dma_mode) { 139 other_name = data->endpoint.config.dma_endpoint; 140 if (other_name >= count) { 141 dev_err(dev, "DMA endpoint name %u out of range " 142 "for endpoint %u\n", 143 other_name, data->endpoint_id); 144 return false; 145 } 146 147 other_data = &all_data[other_name]; 148 if (ipa_gsi_endpoint_data_empty(other_data)) { 149 dev_err(dev, "DMA endpoint name %u undefined " 150 "for endpoint %u\n", 151 other_name, data->endpoint_id); 152 return false; 153 } 154 } 155 156 return true; 157 } 158 159 static u32 aggr_byte_limit_max(enum ipa_version version) 160 { 161 if (version < IPA_VERSION_4_5) 162 return field_max(aggr_byte_limit_fmask(true)); 163 164 return field_max(aggr_byte_limit_fmask(false)); 165 } 166 167 static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count, 168 const struct ipa_gsi_endpoint_data *data) 169 { 170 const struct ipa_gsi_endpoint_data *dp = data; 171 struct device *dev = &ipa->pdev->dev; 172 enum ipa_endpoint_name name; 173 u32 limit; 174 175 if (count > IPA_ENDPOINT_COUNT) { 176 dev_err(dev, "too many endpoints specified (%u > %u)\n", 177 count, IPA_ENDPOINT_COUNT); 178 return false; 179 } 180 181 /* The aggregation byte limit defines the point at which an 182 * aggregation window will close. It is programmed into the 183 * IPA hardware as a number of KB. We don't use "hard byte 184 * limit" aggregation, which means that we need to supply 185 * enough space in a receive buffer to hold a complete MTU 186 * plus normal skb overhead *after* that aggregation byte 187 * limit has been crossed. 188 * 189 * This check ensures we don't define a receive buffer size 190 * that would exceed what we can represent in the field that 191 * is used to program its size. 192 */ 193 limit = aggr_byte_limit_max(ipa->version) * SZ_1K; 194 limit += IPA_MTU + IPA_RX_BUFFER_OVERHEAD; 195 if (limit < IPA_RX_BUFFER_SIZE) { 196 dev_err(dev, "buffer size too big for aggregation (%u > %u)\n", 197 IPA_RX_BUFFER_SIZE, limit); 198 return false; 199 } 200 201 /* Make sure needed endpoints have defined data */ 202 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_COMMAND_TX])) { 203 dev_err(dev, "command TX endpoint not defined\n"); 204 return false; 205 } 206 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_LAN_RX])) { 207 dev_err(dev, "LAN RX endpoint not defined\n"); 208 return false; 209 } 210 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_TX])) { 211 dev_err(dev, "AP->modem TX endpoint not defined\n"); 212 return false; 213 } 214 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_RX])) { 215 dev_err(dev, "AP<-modem RX endpoint not defined\n"); 216 return false; 217 } 218 219 for (name = 0; name < count; name++, dp++) 220 if (!ipa_endpoint_data_valid_one(ipa, count, data, dp)) 221 return false; 222 223 return true; 224 } 225 226 /* Allocate a transaction to use on a non-command endpoint */ 227 static struct gsi_trans *ipa_endpoint_trans_alloc(struct ipa_endpoint *endpoint, 228 u32 tre_count) 229 { 230 struct gsi *gsi = &endpoint->ipa->gsi; 231 u32 channel_id = endpoint->channel_id; 232 enum dma_data_direction direction; 233 234 direction = endpoint->toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE; 235 236 return gsi_channel_trans_alloc(gsi, channel_id, tre_count, direction); 237 } 238 239 /* suspend_delay represents suspend for RX, delay for TX endpoints. 240 * Note that suspend is not supported starting with IPA v4.0, and 241 * delay mode should not be used starting with IPA v4.2. 242 */ 243 static bool 244 ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay) 245 { 246 u32 offset = IPA_REG_ENDP_INIT_CTRL_N_OFFSET(endpoint->endpoint_id); 247 struct ipa *ipa = endpoint->ipa; 248 bool state; 249 u32 mask; 250 u32 val; 251 252 if (endpoint->toward_ipa) 253 WARN_ON(ipa->version >= IPA_VERSION_4_2); 254 else 255 WARN_ON(ipa->version >= IPA_VERSION_4_0); 256 257 mask = endpoint->toward_ipa ? ENDP_DELAY_FMASK : ENDP_SUSPEND_FMASK; 258 259 val = ioread32(ipa->reg_virt + offset); 260 state = !!(val & mask); 261 262 /* Don't bother if it's already in the requested state */ 263 if (suspend_delay != state) { 264 val ^= mask; 265 iowrite32(val, ipa->reg_virt + offset); 266 } 267 268 return state; 269 } 270 271 /* We don't care what the previous state was for delay mode */ 272 static void 273 ipa_endpoint_program_delay(struct ipa_endpoint *endpoint, bool enable) 274 { 275 /* Delay mode should not be used for IPA v4.2+ */ 276 WARN_ON(endpoint->ipa->version >= IPA_VERSION_4_2); 277 WARN_ON(!endpoint->toward_ipa); 278 279 (void)ipa_endpoint_init_ctrl(endpoint, enable); 280 } 281 282 static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint) 283 { 284 u32 mask = BIT(endpoint->endpoint_id); 285 struct ipa *ipa = endpoint->ipa; 286 u32 offset; 287 u32 val; 288 289 WARN_ON(!(mask & ipa->available)); 290 291 offset = ipa_reg_state_aggr_active_offset(ipa->version); 292 val = ioread32(ipa->reg_virt + offset); 293 294 return !!(val & mask); 295 } 296 297 static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint) 298 { 299 u32 mask = BIT(endpoint->endpoint_id); 300 struct ipa *ipa = endpoint->ipa; 301 302 WARN_ON(!(mask & ipa->available)); 303 304 iowrite32(mask, ipa->reg_virt + IPA_REG_AGGR_FORCE_CLOSE_OFFSET); 305 } 306 307 /** 308 * ipa_endpoint_suspend_aggr() - Emulate suspend interrupt 309 * @endpoint: Endpoint on which to emulate a suspend 310 * 311 * Emulate suspend IPA interrupt to unsuspend an endpoint suspended 312 * with an open aggregation frame. This is to work around a hardware 313 * issue in IPA version 3.5.1 where the suspend interrupt will not be 314 * generated when it should be. 315 */ 316 static void ipa_endpoint_suspend_aggr(struct ipa_endpoint *endpoint) 317 { 318 struct ipa *ipa = endpoint->ipa; 319 320 if (!endpoint->data->aggregation) 321 return; 322 323 /* Nothing to do if the endpoint doesn't have aggregation open */ 324 if (!ipa_endpoint_aggr_active(endpoint)) 325 return; 326 327 /* Force close aggregation */ 328 ipa_endpoint_force_close(endpoint); 329 330 ipa_interrupt_simulate_suspend(ipa->interrupt); 331 } 332 333 /* Returns previous suspend state (true means suspend was enabled) */ 334 static bool 335 ipa_endpoint_program_suspend(struct ipa_endpoint *endpoint, bool enable) 336 { 337 bool suspended; 338 339 if (endpoint->ipa->version >= IPA_VERSION_4_0) 340 return enable; /* For IPA v4.0+, no change made */ 341 342 WARN_ON(endpoint->toward_ipa); 343 344 suspended = ipa_endpoint_init_ctrl(endpoint, enable); 345 346 /* A client suspended with an open aggregation frame will not 347 * generate a SUSPEND IPA interrupt. If enabling suspend, have 348 * ipa_endpoint_suspend_aggr() handle this. 349 */ 350 if (enable && !suspended) 351 ipa_endpoint_suspend_aggr(endpoint); 352 353 return suspended; 354 } 355 356 /* Put all modem RX endpoints into suspend mode, and stop transmission 357 * on all modem TX endpoints. Prior to IPA v4.2, endpoint DELAY mode is 358 * used for TX endpoints; starting with IPA v4.2 we use GSI channel flow 359 * control instead. 360 */ 361 void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable) 362 { 363 u32 endpoint_id; 364 365 for (endpoint_id = 0; endpoint_id < IPA_ENDPOINT_MAX; endpoint_id++) { 366 struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id]; 367 368 if (endpoint->ee_id != GSI_EE_MODEM) 369 continue; 370 371 if (!endpoint->toward_ipa) 372 (void)ipa_endpoint_program_suspend(endpoint, enable); 373 else if (ipa->version < IPA_VERSION_4_2) 374 ipa_endpoint_program_delay(endpoint, enable); 375 else 376 gsi_modem_channel_flow_control(&ipa->gsi, 377 endpoint->channel_id, 378 enable); 379 } 380 } 381 382 /* Reset all modem endpoints to use the default exception endpoint */ 383 int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa) 384 { 385 u32 initialized = ipa->initialized; 386 struct gsi_trans *trans; 387 u32 count; 388 389 /* We need one command per modem TX endpoint. We can get an upper 390 * bound on that by assuming all initialized endpoints are modem->IPA. 391 * That won't happen, and we could be more precise, but this is fine 392 * for now. End the transaction with commands to clear the pipeline. 393 */ 394 count = hweight32(initialized) + ipa_cmd_pipeline_clear_count(); 395 trans = ipa_cmd_trans_alloc(ipa, count); 396 if (!trans) { 397 dev_err(&ipa->pdev->dev, 398 "no transaction to reset modem exception endpoints\n"); 399 return -EBUSY; 400 } 401 402 while (initialized) { 403 u32 endpoint_id = __ffs(initialized); 404 struct ipa_endpoint *endpoint; 405 u32 offset; 406 407 initialized ^= BIT(endpoint_id); 408 409 /* We only reset modem TX endpoints */ 410 endpoint = &ipa->endpoint[endpoint_id]; 411 if (!(endpoint->ee_id == GSI_EE_MODEM && endpoint->toward_ipa)) 412 continue; 413 414 offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id); 415 416 /* Value written is 0, and all bits are updated. That 417 * means status is disabled on the endpoint, and as a 418 * result all other fields in the register are ignored. 419 */ 420 ipa_cmd_register_write_add(trans, offset, 0, ~0, false); 421 } 422 423 ipa_cmd_pipeline_clear_add(trans); 424 425 /* XXX This should have a 1 second timeout */ 426 gsi_trans_commit_wait(trans); 427 428 ipa_cmd_pipeline_clear_wait(ipa); 429 430 return 0; 431 } 432 433 static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint) 434 { 435 u32 offset = IPA_REG_ENDP_INIT_CFG_N_OFFSET(endpoint->endpoint_id); 436 enum ipa_cs_offload_en enabled; 437 u32 val = 0; 438 439 /* FRAG_OFFLOAD_EN is 0 */ 440 if (endpoint->data->checksum) { 441 enum ipa_version version = endpoint->ipa->version; 442 443 if (endpoint->toward_ipa) { 444 u32 checksum_offset; 445 446 /* Checksum header offset is in 4-byte units */ 447 checksum_offset = sizeof(struct rmnet_map_header); 448 checksum_offset /= sizeof(u32); 449 val |= u32_encode_bits(checksum_offset, 450 CS_METADATA_HDR_OFFSET_FMASK); 451 452 enabled = version < IPA_VERSION_4_5 453 ? IPA_CS_OFFLOAD_UL 454 : IPA_CS_OFFLOAD_INLINE; 455 } else { 456 enabled = version < IPA_VERSION_4_5 457 ? IPA_CS_OFFLOAD_DL 458 : IPA_CS_OFFLOAD_INLINE; 459 } 460 } else { 461 enabled = IPA_CS_OFFLOAD_NONE; 462 } 463 val |= u32_encode_bits(enabled, CS_OFFLOAD_EN_FMASK); 464 /* CS_GEN_QMB_MASTER_SEL is 0 */ 465 466 iowrite32(val, endpoint->ipa->reg_virt + offset); 467 } 468 469 static void ipa_endpoint_init_nat(struct ipa_endpoint *endpoint) 470 { 471 u32 offset; 472 u32 val; 473 474 if (!endpoint->toward_ipa) 475 return; 476 477 offset = IPA_REG_ENDP_INIT_NAT_N_OFFSET(endpoint->endpoint_id); 478 val = u32_encode_bits(IPA_NAT_BYPASS, NAT_EN_FMASK); 479 480 iowrite32(val, endpoint->ipa->reg_virt + offset); 481 } 482 483 static u32 484 ipa_qmap_header_size(enum ipa_version version, struct ipa_endpoint *endpoint) 485 { 486 u32 header_size = sizeof(struct rmnet_map_header); 487 488 /* Without checksum offload, we just have the MAP header */ 489 if (!endpoint->data->checksum) 490 return header_size; 491 492 if (version < IPA_VERSION_4_5) { 493 /* Checksum header inserted for AP TX endpoints only */ 494 if (endpoint->toward_ipa) 495 header_size += sizeof(struct rmnet_map_ul_csum_header); 496 } else { 497 /* Checksum header is used in both directions */ 498 header_size += sizeof(struct rmnet_map_v5_csum_header); 499 } 500 501 return header_size; 502 } 503 504 /** 505 * ipa_endpoint_init_hdr() - Initialize HDR endpoint configuration register 506 * @endpoint: Endpoint pointer 507 * 508 * We program QMAP endpoints so each packet received is preceded by a QMAP 509 * header structure. The QMAP header contains a 1-byte mux_id and 2-byte 510 * packet size field, and we have the IPA hardware populate both for each 511 * received packet. The header is configured (in the HDR_EXT register) 512 * to use big endian format. 513 * 514 * The packet size is written into the QMAP header's pkt_len field. That 515 * location is defined here using the HDR_OFST_PKT_SIZE field. 516 * 517 * The mux_id comes from a 4-byte metadata value supplied with each packet 518 * by the modem. It is *not* a QMAP header, but it does contain the mux_id 519 * value that we want, in its low-order byte. A bitmask defined in the 520 * endpoint's METADATA_MASK register defines which byte within the modem 521 * metadata contains the mux_id. And the OFST_METADATA field programmed 522 * here indicates where the extracted byte should be placed within the QMAP 523 * header. 524 */ 525 static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint) 526 { 527 u32 offset = IPA_REG_ENDP_INIT_HDR_N_OFFSET(endpoint->endpoint_id); 528 struct ipa *ipa = endpoint->ipa; 529 u32 val = 0; 530 531 if (endpoint->data->qmap) { 532 enum ipa_version version = ipa->version; 533 size_t header_size; 534 535 header_size = ipa_qmap_header_size(version, endpoint); 536 val = ipa_header_size_encoded(version, header_size); 537 538 /* Define how to fill fields in a received QMAP header */ 539 if (!endpoint->toward_ipa) { 540 u32 offset; /* Field offset within header */ 541 542 /* Where IPA will write the metadata value */ 543 offset = offsetof(struct rmnet_map_header, mux_id); 544 val |= ipa_metadata_offset_encoded(version, offset); 545 546 /* Where IPA will write the length */ 547 offset = offsetof(struct rmnet_map_header, pkt_len); 548 /* Upper bits are stored in HDR_EXT with IPA v4.5 */ 549 if (version >= IPA_VERSION_4_5) 550 offset &= field_mask(HDR_OFST_PKT_SIZE_FMASK); 551 552 val |= HDR_OFST_PKT_SIZE_VALID_FMASK; 553 val |= u32_encode_bits(offset, HDR_OFST_PKT_SIZE_FMASK); 554 } 555 /* For QMAP TX, metadata offset is 0 (modem assumes this) */ 556 val |= HDR_OFST_METADATA_VALID_FMASK; 557 558 /* HDR_ADDITIONAL_CONST_LEN is 0; (RX only) */ 559 /* HDR_A5_MUX is 0 */ 560 /* HDR_LEN_INC_DEAGG_HDR is 0 */ 561 /* HDR_METADATA_REG_VALID is 0 (TX only, version < v4.5) */ 562 } 563 564 iowrite32(val, ipa->reg_virt + offset); 565 } 566 567 static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint) 568 { 569 u32 offset = IPA_REG_ENDP_INIT_HDR_EXT_N_OFFSET(endpoint->endpoint_id); 570 u32 pad_align = endpoint->data->rx.pad_align; 571 struct ipa *ipa = endpoint->ipa; 572 u32 val = 0; 573 574 val |= HDR_ENDIANNESS_FMASK; /* big endian */ 575 576 /* A QMAP header contains a 6 bit pad field at offset 0. The RMNet 577 * driver assumes this field is meaningful in packets it receives, 578 * and assumes the header's payload length includes that padding. 579 * The RMNet driver does *not* pad packets it sends, however, so 580 * the pad field (although 0) should be ignored. 581 */ 582 if (endpoint->data->qmap && !endpoint->toward_ipa) { 583 val |= HDR_TOTAL_LEN_OR_PAD_VALID_FMASK; 584 /* HDR_TOTAL_LEN_OR_PAD is 0 (pad, not total_len) */ 585 val |= HDR_PAYLOAD_LEN_INC_PADDING_FMASK; 586 /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0 */ 587 } 588 589 /* HDR_PAYLOAD_LEN_INC_PADDING is 0 */ 590 if (!endpoint->toward_ipa) 591 val |= u32_encode_bits(pad_align, HDR_PAD_TO_ALIGNMENT_FMASK); 592 593 /* IPA v4.5 adds some most-significant bits to a few fields, 594 * two of which are defined in the HDR (not HDR_EXT) register. 595 */ 596 if (ipa->version >= IPA_VERSION_4_5) { 597 /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0, so MSB is 0 */ 598 if (endpoint->data->qmap && !endpoint->toward_ipa) { 599 u32 offset; 600 601 offset = offsetof(struct rmnet_map_header, pkt_len); 602 offset >>= hweight32(HDR_OFST_PKT_SIZE_FMASK); 603 val |= u32_encode_bits(offset, 604 HDR_OFST_PKT_SIZE_MSB_FMASK); 605 /* HDR_ADDITIONAL_CONST_LEN is 0 so MSB is 0 */ 606 } 607 } 608 iowrite32(val, ipa->reg_virt + offset); 609 } 610 611 static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint) 612 { 613 u32 endpoint_id = endpoint->endpoint_id; 614 u32 val = 0; 615 u32 offset; 616 617 if (endpoint->toward_ipa) 618 return; /* Register not valid for TX endpoints */ 619 620 offset = IPA_REG_ENDP_INIT_HDR_METADATA_MASK_N_OFFSET(endpoint_id); 621 622 /* Note that HDR_ENDIANNESS indicates big endian header fields */ 623 if (endpoint->data->qmap) 624 val = (__force u32)cpu_to_be32(IPA_ENDPOINT_QMAP_METADATA_MASK); 625 626 iowrite32(val, endpoint->ipa->reg_virt + offset); 627 } 628 629 static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint) 630 { 631 u32 offset = IPA_REG_ENDP_INIT_MODE_N_OFFSET(endpoint->endpoint_id); 632 u32 val; 633 634 if (!endpoint->toward_ipa) 635 return; /* Register not valid for RX endpoints */ 636 637 if (endpoint->data->dma_mode) { 638 enum ipa_endpoint_name name = endpoint->data->dma_endpoint; 639 u32 dma_endpoint_id; 640 641 dma_endpoint_id = endpoint->ipa->name_map[name]->endpoint_id; 642 643 val = u32_encode_bits(IPA_DMA, MODE_FMASK); 644 val |= u32_encode_bits(dma_endpoint_id, DEST_PIPE_INDEX_FMASK); 645 } else { 646 val = u32_encode_bits(IPA_BASIC, MODE_FMASK); 647 } 648 /* All other bits unspecified (and 0) */ 649 650 iowrite32(val, endpoint->ipa->reg_virt + offset); 651 } 652 653 /* Compute the aggregation size value to use for a given buffer size */ 654 static u32 ipa_aggr_size_kb(u32 rx_buffer_size) 655 { 656 /* We don't use "hard byte limit" aggregation, so we define the 657 * aggregation limit such that our buffer has enough space *after* 658 * that limit to receive a full MTU of data, plus overhead. 659 */ 660 rx_buffer_size -= IPA_MTU + IPA_RX_BUFFER_OVERHEAD; 661 662 return rx_buffer_size / SZ_1K; 663 } 664 665 /* Encoded values for AGGR endpoint register fields */ 666 static u32 aggr_byte_limit_encoded(enum ipa_version version, u32 limit) 667 { 668 if (version < IPA_VERSION_4_5) 669 return u32_encode_bits(limit, aggr_byte_limit_fmask(true)); 670 671 return u32_encode_bits(limit, aggr_byte_limit_fmask(false)); 672 } 673 674 /* Encode the aggregation timer limit (microseconds) based on IPA version */ 675 static u32 aggr_time_limit_encoded(enum ipa_version version, u32 limit) 676 { 677 u32 gran_sel; 678 u32 fmask; 679 u32 val; 680 681 if (version < IPA_VERSION_4_5) { 682 /* We set aggregation granularity in ipa_hardware_config() */ 683 limit = DIV_ROUND_CLOSEST(limit, IPA_AGGR_GRANULARITY); 684 685 return u32_encode_bits(limit, aggr_time_limit_fmask(true)); 686 } 687 688 /* IPA v4.5 expresses the time limit using Qtime. The AP has 689 * pulse generators 0 and 1 available, which were configured 690 * in ipa_qtime_config() to have granularity 100 usec and 691 * 1 msec, respectively. Use pulse generator 0 if possible, 692 * otherwise fall back to pulse generator 1. 693 */ 694 fmask = aggr_time_limit_fmask(false); 695 val = DIV_ROUND_CLOSEST(limit, 100); 696 if (val > field_max(fmask)) { 697 /* Have to use pulse generator 1 (millisecond granularity) */ 698 gran_sel = AGGR_GRAN_SEL_FMASK; 699 val = DIV_ROUND_CLOSEST(limit, 1000); 700 } else { 701 /* We can use pulse generator 0 (100 usec granularity) */ 702 gran_sel = 0; 703 } 704 705 return gran_sel | u32_encode_bits(val, fmask); 706 } 707 708 static u32 aggr_sw_eof_active_encoded(enum ipa_version version, bool enabled) 709 { 710 u32 val = enabled ? 1 : 0; 711 712 if (version < IPA_VERSION_4_5) 713 return u32_encode_bits(val, aggr_sw_eof_active_fmask(true)); 714 715 return u32_encode_bits(val, aggr_sw_eof_active_fmask(false)); 716 } 717 718 static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint) 719 { 720 u32 offset = IPA_REG_ENDP_INIT_AGGR_N_OFFSET(endpoint->endpoint_id); 721 enum ipa_version version = endpoint->ipa->version; 722 u32 val = 0; 723 724 if (endpoint->data->aggregation) { 725 if (!endpoint->toward_ipa) { 726 bool close_eof; 727 u32 limit; 728 729 val |= u32_encode_bits(IPA_ENABLE_AGGR, AGGR_EN_FMASK); 730 val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK); 731 732 limit = ipa_aggr_size_kb(IPA_RX_BUFFER_SIZE); 733 val |= aggr_byte_limit_encoded(version, limit); 734 735 limit = IPA_AGGR_TIME_LIMIT; 736 val |= aggr_time_limit_encoded(version, limit); 737 738 /* AGGR_PKT_LIMIT is 0 (unlimited) */ 739 740 close_eof = endpoint->data->rx.aggr_close_eof; 741 val |= aggr_sw_eof_active_encoded(version, close_eof); 742 743 /* AGGR_HARD_BYTE_LIMIT_ENABLE is 0 */ 744 } else { 745 val |= u32_encode_bits(IPA_ENABLE_DEAGGR, 746 AGGR_EN_FMASK); 747 val |= u32_encode_bits(IPA_QCMAP, AGGR_TYPE_FMASK); 748 /* other fields ignored */ 749 } 750 /* AGGR_FORCE_CLOSE is 0 */ 751 /* AGGR_GRAN_SEL is 0 for IPA v4.5 */ 752 } else { 753 val |= u32_encode_bits(IPA_BYPASS_AGGR, AGGR_EN_FMASK); 754 /* other fields ignored */ 755 } 756 757 iowrite32(val, endpoint->ipa->reg_virt + offset); 758 } 759 760 /* Return the Qtime-based head-of-line blocking timer value that 761 * represents the given number of microseconds. The result 762 * includes both the timer value and the selected timer granularity. 763 */ 764 static u32 hol_block_timer_qtime_val(struct ipa *ipa, u32 microseconds) 765 { 766 u32 gran_sel; 767 u32 val; 768 769 /* IPA v4.5 expresses time limits using Qtime. The AP has 770 * pulse generators 0 and 1 available, which were configured 771 * in ipa_qtime_config() to have granularity 100 usec and 772 * 1 msec, respectively. Use pulse generator 0 if possible, 773 * otherwise fall back to pulse generator 1. 774 */ 775 val = DIV_ROUND_CLOSEST(microseconds, 100); 776 if (val > field_max(TIME_LIMIT_FMASK)) { 777 /* Have to use pulse generator 1 (millisecond granularity) */ 778 gran_sel = GRAN_SEL_FMASK; 779 val = DIV_ROUND_CLOSEST(microseconds, 1000); 780 } else { 781 /* We can use pulse generator 0 (100 usec granularity) */ 782 gran_sel = 0; 783 } 784 785 return gran_sel | u32_encode_bits(val, TIME_LIMIT_FMASK); 786 } 787 788 /* The head-of-line blocking timer is defined as a tick count. For 789 * IPA version 4.5 the tick count is based on the Qtimer, which is 790 * derived from the 19.2 MHz SoC XO clock. For older IPA versions 791 * each tick represents 128 cycles of the IPA core clock. 792 * 793 * Return the encoded value that should be written to that register 794 * that represents the timeout period provided. For IPA v4.2 this 795 * encodes a base and scale value, while for earlier versions the 796 * value is a simple tick count. 797 */ 798 static u32 hol_block_timer_val(struct ipa *ipa, u32 microseconds) 799 { 800 u32 width; 801 u32 scale; 802 u64 ticks; 803 u64 rate; 804 u32 high; 805 u32 val; 806 807 if (!microseconds) 808 return 0; /* Nothing to compute if timer period is 0 */ 809 810 if (ipa->version >= IPA_VERSION_4_5) 811 return hol_block_timer_qtime_val(ipa, microseconds); 812 813 /* Use 64 bit arithmetic to avoid overflow... */ 814 rate = ipa_core_clock_rate(ipa); 815 ticks = DIV_ROUND_CLOSEST(microseconds * rate, 128 * USEC_PER_SEC); 816 /* ...but we still need to fit into a 32-bit register */ 817 WARN_ON(ticks > U32_MAX); 818 819 /* IPA v3.5.1 through v4.1 just record the tick count */ 820 if (ipa->version < IPA_VERSION_4_2) 821 return (u32)ticks; 822 823 /* For IPA v4.2, the tick count is represented by base and 824 * scale fields within the 32-bit timer register, where: 825 * ticks = base << scale; 826 * The best precision is achieved when the base value is as 827 * large as possible. Find the highest set bit in the tick 828 * count, and extract the number of bits in the base field 829 * such that high bit is included. 830 */ 831 high = fls(ticks); /* 1..32 */ 832 width = HWEIGHT32(BASE_VALUE_FMASK); 833 scale = high > width ? high - width : 0; 834 if (scale) { 835 /* If we're scaling, round up to get a closer result */ 836 ticks += 1 << (scale - 1); 837 /* High bit was set, so rounding might have affected it */ 838 if (fls(ticks) != high) 839 scale++; 840 } 841 842 val = u32_encode_bits(scale, SCALE_FMASK); 843 val |= u32_encode_bits(ticks >> scale, BASE_VALUE_FMASK); 844 845 return val; 846 } 847 848 /* If microseconds is 0, timeout is immediate */ 849 static void ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint, 850 u32 microseconds) 851 { 852 u32 endpoint_id = endpoint->endpoint_id; 853 struct ipa *ipa = endpoint->ipa; 854 u32 offset; 855 u32 val; 856 857 /* This should only be changed when HOL_BLOCK_EN is disabled */ 858 offset = IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(endpoint_id); 859 val = hol_block_timer_val(ipa, microseconds); 860 iowrite32(val, ipa->reg_virt + offset); 861 } 862 863 static void 864 ipa_endpoint_init_hol_block_en(struct ipa_endpoint *endpoint, bool enable) 865 { 866 u32 endpoint_id = endpoint->endpoint_id; 867 u32 offset; 868 u32 val; 869 870 val = enable ? HOL_BLOCK_EN_FMASK : 0; 871 offset = IPA_REG_ENDP_INIT_HOL_BLOCK_EN_N_OFFSET(endpoint_id); 872 iowrite32(val, endpoint->ipa->reg_virt + offset); 873 /* When enabling, the register must be written twice for IPA v4.5+ */ 874 if (enable && endpoint->ipa->version >= IPA_VERSION_4_5) 875 iowrite32(val, endpoint->ipa->reg_virt + offset); 876 } 877 878 /* Assumes HOL_BLOCK is in disabled state */ 879 static void ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint, 880 u32 microseconds) 881 { 882 ipa_endpoint_init_hol_block_timer(endpoint, microseconds); 883 ipa_endpoint_init_hol_block_en(endpoint, true); 884 } 885 886 static void ipa_endpoint_init_hol_block_disable(struct ipa_endpoint *endpoint) 887 { 888 ipa_endpoint_init_hol_block_en(endpoint, false); 889 } 890 891 void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa) 892 { 893 u32 i; 894 895 for (i = 0; i < IPA_ENDPOINT_MAX; i++) { 896 struct ipa_endpoint *endpoint = &ipa->endpoint[i]; 897 898 if (endpoint->toward_ipa || endpoint->ee_id != GSI_EE_MODEM) 899 continue; 900 901 ipa_endpoint_init_hol_block_disable(endpoint); 902 ipa_endpoint_init_hol_block_enable(endpoint, 0); 903 } 904 } 905 906 static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint) 907 { 908 u32 offset = IPA_REG_ENDP_INIT_DEAGGR_N_OFFSET(endpoint->endpoint_id); 909 u32 val = 0; 910 911 if (!endpoint->toward_ipa) 912 return; /* Register not valid for RX endpoints */ 913 914 /* DEAGGR_HDR_LEN is 0 */ 915 /* PACKET_OFFSET_VALID is 0 */ 916 /* PACKET_OFFSET_LOCATION is ignored (not valid) */ 917 /* MAX_PACKET_LEN is 0 (not enforced) */ 918 919 iowrite32(val, endpoint->ipa->reg_virt + offset); 920 } 921 922 static void ipa_endpoint_init_rsrc_grp(struct ipa_endpoint *endpoint) 923 { 924 u32 offset = IPA_REG_ENDP_INIT_RSRC_GRP_N_OFFSET(endpoint->endpoint_id); 925 struct ipa *ipa = endpoint->ipa; 926 u32 val; 927 928 val = rsrc_grp_encoded(ipa->version, endpoint->data->resource_group); 929 iowrite32(val, ipa->reg_virt + offset); 930 } 931 932 static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint) 933 { 934 u32 offset = IPA_REG_ENDP_INIT_SEQ_N_OFFSET(endpoint->endpoint_id); 935 u32 val = 0; 936 937 if (!endpoint->toward_ipa) 938 return; /* Register not valid for RX endpoints */ 939 940 /* Low-order byte configures primary packet processing */ 941 val |= u32_encode_bits(endpoint->data->tx.seq_type, SEQ_TYPE_FMASK); 942 943 /* Second byte configures replicated packet processing */ 944 val |= u32_encode_bits(endpoint->data->tx.seq_rep_type, 945 SEQ_REP_TYPE_FMASK); 946 947 iowrite32(val, endpoint->ipa->reg_virt + offset); 948 } 949 950 /** 951 * ipa_endpoint_skb_tx() - Transmit a socket buffer 952 * @endpoint: Endpoint pointer 953 * @skb: Socket buffer to send 954 * 955 * Returns: 0 if successful, or a negative error code 956 */ 957 int ipa_endpoint_skb_tx(struct ipa_endpoint *endpoint, struct sk_buff *skb) 958 { 959 struct gsi_trans *trans; 960 u32 nr_frags; 961 int ret; 962 963 /* Make sure source endpoint's TLV FIFO has enough entries to 964 * hold the linear portion of the skb and all its fragments. 965 * If not, see if we can linearize it before giving up. 966 */ 967 nr_frags = skb_shinfo(skb)->nr_frags; 968 if (1 + nr_frags > endpoint->trans_tre_max) { 969 if (skb_linearize(skb)) 970 return -E2BIG; 971 nr_frags = 0; 972 } 973 974 trans = ipa_endpoint_trans_alloc(endpoint, 1 + nr_frags); 975 if (!trans) 976 return -EBUSY; 977 978 ret = gsi_trans_skb_add(trans, skb); 979 if (ret) 980 goto err_trans_free; 981 trans->data = skb; /* transaction owns skb now */ 982 983 gsi_trans_commit(trans, !netdev_xmit_more()); 984 985 return 0; 986 987 err_trans_free: 988 gsi_trans_free(trans); 989 990 return -ENOMEM; 991 } 992 993 static void ipa_endpoint_status(struct ipa_endpoint *endpoint) 994 { 995 u32 endpoint_id = endpoint->endpoint_id; 996 struct ipa *ipa = endpoint->ipa; 997 u32 val = 0; 998 u32 offset; 999 1000 offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id); 1001 1002 if (endpoint->data->status_enable) { 1003 val |= STATUS_EN_FMASK; 1004 if (endpoint->toward_ipa) { 1005 enum ipa_endpoint_name name; 1006 u32 status_endpoint_id; 1007 1008 name = endpoint->data->tx.status_endpoint; 1009 status_endpoint_id = ipa->name_map[name]->endpoint_id; 1010 1011 val |= u32_encode_bits(status_endpoint_id, 1012 STATUS_ENDP_FMASK); 1013 } 1014 /* STATUS_LOCATION is 0, meaning status element precedes 1015 * packet (not present for IPA v4.5) 1016 */ 1017 /* STATUS_PKT_SUPPRESS_FMASK is 0 (not present for v3.5.1) */ 1018 } 1019 1020 iowrite32(val, ipa->reg_virt + offset); 1021 } 1022 1023 static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint) 1024 { 1025 struct gsi_trans *trans; 1026 bool doorbell = false; 1027 struct page *page; 1028 u32 offset; 1029 u32 len; 1030 int ret; 1031 1032 page = dev_alloc_pages(get_order(IPA_RX_BUFFER_SIZE)); 1033 if (!page) 1034 return -ENOMEM; 1035 1036 trans = ipa_endpoint_trans_alloc(endpoint, 1); 1037 if (!trans) 1038 goto err_free_pages; 1039 1040 /* Offset the buffer to make space for skb headroom */ 1041 offset = NET_SKB_PAD; 1042 len = IPA_RX_BUFFER_SIZE - offset; 1043 1044 ret = gsi_trans_page_add(trans, page, len, offset); 1045 if (ret) 1046 goto err_trans_free; 1047 trans->data = page; /* transaction owns page now */ 1048 1049 if (++endpoint->replenish_ready == IPA_REPLENISH_BATCH) { 1050 doorbell = true; 1051 endpoint->replenish_ready = 0; 1052 } 1053 1054 gsi_trans_commit(trans, doorbell); 1055 1056 return 0; 1057 1058 err_trans_free: 1059 gsi_trans_free(trans); 1060 err_free_pages: 1061 __free_pages(page, get_order(IPA_RX_BUFFER_SIZE)); 1062 1063 return -ENOMEM; 1064 } 1065 1066 /** 1067 * ipa_endpoint_replenish() - Replenish endpoint receive buffers 1068 * @endpoint: Endpoint to be replenished 1069 * @add_one: Whether this is replacing a just-consumed buffer 1070 * 1071 * The IPA hardware can hold a fixed number of receive buffers for an RX 1072 * endpoint, based on the number of entries in the underlying channel ring 1073 * buffer. If an endpoint's "backlog" is non-zero, it indicates how many 1074 * more receive buffers can be supplied to the hardware. Replenishing for 1075 * an endpoint can be disabled, in which case requests to replenish a 1076 * buffer are "saved", and transferred to the backlog once it is re-enabled 1077 * again. 1078 */ 1079 static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint, bool add_one) 1080 { 1081 struct gsi *gsi; 1082 u32 backlog; 1083 int delta; 1084 1085 if (!test_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags)) { 1086 if (add_one) 1087 atomic_inc(&endpoint->replenish_saved); 1088 return; 1089 } 1090 1091 /* If already active, just update the backlog */ 1092 if (test_and_set_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags)) { 1093 if (add_one) 1094 atomic_inc(&endpoint->replenish_backlog); 1095 return; 1096 } 1097 1098 while (atomic_dec_not_zero(&endpoint->replenish_backlog)) 1099 if (ipa_endpoint_replenish_one(endpoint)) 1100 goto try_again_later; 1101 1102 clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags); 1103 1104 if (add_one) 1105 atomic_inc(&endpoint->replenish_backlog); 1106 1107 return; 1108 1109 try_again_later: 1110 clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags); 1111 1112 /* The last one didn't succeed, so fix the backlog */ 1113 delta = add_one ? 2 : 1; 1114 backlog = atomic_add_return(delta, &endpoint->replenish_backlog); 1115 1116 /* Whenever a receive buffer transaction completes we'll try to 1117 * replenish again. It's unlikely, but if we fail to supply even 1118 * one buffer, nothing will trigger another replenish attempt. 1119 * Receive buffer transactions use one TRE, so schedule work to 1120 * try replenishing again if our backlog is *all* available TREs. 1121 */ 1122 gsi = &endpoint->ipa->gsi; 1123 if (backlog == gsi_channel_tre_max(gsi, endpoint->channel_id)) 1124 schedule_delayed_work(&endpoint->replenish_work, 1125 msecs_to_jiffies(1)); 1126 } 1127 1128 static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint) 1129 { 1130 struct gsi *gsi = &endpoint->ipa->gsi; 1131 u32 max_backlog; 1132 u32 saved; 1133 1134 set_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags); 1135 while ((saved = atomic_xchg(&endpoint->replenish_saved, 0))) 1136 atomic_add(saved, &endpoint->replenish_backlog); 1137 1138 /* Start replenishing if hardware currently has no buffers */ 1139 max_backlog = gsi_channel_tre_max(gsi, endpoint->channel_id); 1140 if (atomic_read(&endpoint->replenish_backlog) == max_backlog) 1141 ipa_endpoint_replenish(endpoint, false); 1142 } 1143 1144 static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint) 1145 { 1146 u32 backlog; 1147 1148 clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags); 1149 while ((backlog = atomic_xchg(&endpoint->replenish_backlog, 0))) 1150 atomic_add(backlog, &endpoint->replenish_saved); 1151 } 1152 1153 static void ipa_endpoint_replenish_work(struct work_struct *work) 1154 { 1155 struct delayed_work *dwork = to_delayed_work(work); 1156 struct ipa_endpoint *endpoint; 1157 1158 endpoint = container_of(dwork, struct ipa_endpoint, replenish_work); 1159 1160 ipa_endpoint_replenish(endpoint, false); 1161 } 1162 1163 static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint, 1164 void *data, u32 len, u32 extra) 1165 { 1166 struct sk_buff *skb; 1167 1168 if (!endpoint->netdev) 1169 return; 1170 1171 skb = __dev_alloc_skb(len, GFP_ATOMIC); 1172 if (!skb) 1173 return; 1174 1175 /* Copy the data into the socket buffer and receive it */ 1176 skb_put(skb, len); 1177 memcpy(skb->data, data, len); 1178 skb->truesize += extra; 1179 1180 ipa_modem_skb_rx(endpoint->netdev, skb); 1181 } 1182 1183 static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint, 1184 struct page *page, u32 len) 1185 { 1186 struct sk_buff *skb; 1187 1188 /* Nothing to do if there's no netdev */ 1189 if (!endpoint->netdev) 1190 return false; 1191 1192 WARN_ON(len > SKB_WITH_OVERHEAD(IPA_RX_BUFFER_SIZE - NET_SKB_PAD)); 1193 1194 skb = build_skb(page_address(page), IPA_RX_BUFFER_SIZE); 1195 if (skb) { 1196 /* Reserve the headroom and account for the data */ 1197 skb_reserve(skb, NET_SKB_PAD); 1198 skb_put(skb, len); 1199 } 1200 1201 /* Receive the buffer (or record drop if unable to build it) */ 1202 ipa_modem_skb_rx(endpoint->netdev, skb); 1203 1204 return skb != NULL; 1205 } 1206 1207 /* The format of a packet status element is the same for several status 1208 * types (opcodes). Other types aren't currently supported. 1209 */ 1210 static bool ipa_status_format_packet(enum ipa_status_opcode opcode) 1211 { 1212 switch (opcode) { 1213 case IPA_STATUS_OPCODE_PACKET: 1214 case IPA_STATUS_OPCODE_DROPPED_PACKET: 1215 case IPA_STATUS_OPCODE_SUSPENDED_PACKET: 1216 case IPA_STATUS_OPCODE_PACKET_2ND_PASS: 1217 return true; 1218 default: 1219 return false; 1220 } 1221 } 1222 1223 static bool ipa_endpoint_status_skip(struct ipa_endpoint *endpoint, 1224 const struct ipa_status *status) 1225 { 1226 u32 endpoint_id; 1227 1228 if (!ipa_status_format_packet(status->opcode)) 1229 return true; 1230 if (!status->pkt_len) 1231 return true; 1232 endpoint_id = u8_get_bits(status->endp_dst_idx, 1233 IPA_STATUS_DST_IDX_FMASK); 1234 if (endpoint_id != endpoint->endpoint_id) 1235 return true; 1236 1237 return false; /* Don't skip this packet, process it */ 1238 } 1239 1240 static bool ipa_endpoint_status_tag(struct ipa_endpoint *endpoint, 1241 const struct ipa_status *status) 1242 { 1243 struct ipa_endpoint *command_endpoint; 1244 struct ipa *ipa = endpoint->ipa; 1245 u32 endpoint_id; 1246 1247 if (!le16_get_bits(status->mask, IPA_STATUS_MASK_TAG_VALID_FMASK)) 1248 return false; /* No valid tag */ 1249 1250 /* The status contains a valid tag. We know the packet was sent to 1251 * this endpoint (already verified by ipa_endpoint_status_skip()). 1252 * If the packet came from the AP->command TX endpoint we know 1253 * this packet was sent as part of the pipeline clear process. 1254 */ 1255 endpoint_id = u8_get_bits(status->endp_src_idx, 1256 IPA_STATUS_SRC_IDX_FMASK); 1257 command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]; 1258 if (endpoint_id == command_endpoint->endpoint_id) { 1259 complete(&ipa->completion); 1260 } else { 1261 dev_err(&ipa->pdev->dev, 1262 "unexpected tagged packet from endpoint %u\n", 1263 endpoint_id); 1264 } 1265 1266 return true; 1267 } 1268 1269 /* Return whether the status indicates the packet should be dropped */ 1270 static bool ipa_endpoint_status_drop(struct ipa_endpoint *endpoint, 1271 const struct ipa_status *status) 1272 { 1273 u32 val; 1274 1275 /* If the status indicates a tagged transfer, we'll drop the packet */ 1276 if (ipa_endpoint_status_tag(endpoint, status)) 1277 return true; 1278 1279 /* Deaggregation exceptions we drop; all other types we consume */ 1280 if (status->exception) 1281 return status->exception == IPA_STATUS_EXCEPTION_DEAGGR; 1282 1283 /* Drop the packet if it fails to match a routing rule; otherwise no */ 1284 val = le32_get_bits(status->flags1, IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK); 1285 1286 return val == field_max(IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK); 1287 } 1288 1289 static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint, 1290 struct page *page, u32 total_len) 1291 { 1292 void *data = page_address(page) + NET_SKB_PAD; 1293 u32 unused = IPA_RX_BUFFER_SIZE - total_len; 1294 u32 resid = total_len; 1295 1296 while (resid) { 1297 const struct ipa_status *status = data; 1298 u32 align; 1299 u32 len; 1300 1301 if (resid < sizeof(*status)) { 1302 dev_err(&endpoint->ipa->pdev->dev, 1303 "short message (%u bytes < %zu byte status)\n", 1304 resid, sizeof(*status)); 1305 break; 1306 } 1307 1308 /* Skip over status packets that lack packet data */ 1309 if (ipa_endpoint_status_skip(endpoint, status)) { 1310 data += sizeof(*status); 1311 resid -= sizeof(*status); 1312 continue; 1313 } 1314 1315 /* Compute the amount of buffer space consumed by the packet, 1316 * including the status element. If the hardware is configured 1317 * to pad packet data to an aligned boundary, account for that. 1318 * And if checksum offload is enabled a trailer containing 1319 * computed checksum information will be appended. 1320 */ 1321 align = endpoint->data->rx.pad_align ? : 1; 1322 len = le16_to_cpu(status->pkt_len); 1323 len = sizeof(*status) + ALIGN(len, align); 1324 if (endpoint->data->checksum) 1325 len += sizeof(struct rmnet_map_dl_csum_trailer); 1326 1327 if (!ipa_endpoint_status_drop(endpoint, status)) { 1328 void *data2; 1329 u32 extra; 1330 u32 len2; 1331 1332 /* Client receives only packet data (no status) */ 1333 data2 = data + sizeof(*status); 1334 len2 = le16_to_cpu(status->pkt_len); 1335 1336 /* Have the true size reflect the extra unused space in 1337 * the original receive buffer. Distribute the "cost" 1338 * proportionately across all aggregated packets in the 1339 * buffer. 1340 */ 1341 extra = DIV_ROUND_CLOSEST(unused * len, total_len); 1342 ipa_endpoint_skb_copy(endpoint, data2, len2, extra); 1343 } 1344 1345 /* Consume status and the full packet it describes */ 1346 data += len; 1347 resid -= len; 1348 } 1349 } 1350 1351 /* Complete a TX transaction, command or from ipa_endpoint_skb_tx() */ 1352 static void ipa_endpoint_tx_complete(struct ipa_endpoint *endpoint, 1353 struct gsi_trans *trans) 1354 { 1355 } 1356 1357 /* Complete transaction initiated in ipa_endpoint_replenish_one() */ 1358 static void ipa_endpoint_rx_complete(struct ipa_endpoint *endpoint, 1359 struct gsi_trans *trans) 1360 { 1361 struct page *page; 1362 1363 ipa_endpoint_replenish(endpoint, true); 1364 1365 if (trans->cancelled) 1366 return; 1367 1368 /* Parse or build a socket buffer using the actual received length */ 1369 page = trans->data; 1370 if (endpoint->data->status_enable) 1371 ipa_endpoint_status_parse(endpoint, page, trans->len); 1372 else if (ipa_endpoint_skb_build(endpoint, page, trans->len)) 1373 trans->data = NULL; /* Pages have been consumed */ 1374 } 1375 1376 void ipa_endpoint_trans_complete(struct ipa_endpoint *endpoint, 1377 struct gsi_trans *trans) 1378 { 1379 if (endpoint->toward_ipa) 1380 ipa_endpoint_tx_complete(endpoint, trans); 1381 else 1382 ipa_endpoint_rx_complete(endpoint, trans); 1383 } 1384 1385 void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint, 1386 struct gsi_trans *trans) 1387 { 1388 if (endpoint->toward_ipa) { 1389 struct ipa *ipa = endpoint->ipa; 1390 1391 /* Nothing to do for command transactions */ 1392 if (endpoint != ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]) { 1393 struct sk_buff *skb = trans->data; 1394 1395 if (skb) 1396 dev_kfree_skb_any(skb); 1397 } 1398 } else { 1399 struct page *page = trans->data; 1400 1401 if (page) 1402 __free_pages(page, get_order(IPA_RX_BUFFER_SIZE)); 1403 } 1404 } 1405 1406 void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id) 1407 { 1408 u32 val; 1409 1410 /* ROUTE_DIS is 0 */ 1411 val = u32_encode_bits(endpoint_id, ROUTE_DEF_PIPE_FMASK); 1412 val |= ROUTE_DEF_HDR_TABLE_FMASK; 1413 val |= u32_encode_bits(0, ROUTE_DEF_HDR_OFST_FMASK); 1414 val |= u32_encode_bits(endpoint_id, ROUTE_FRAG_DEF_PIPE_FMASK); 1415 val |= ROUTE_DEF_RETAIN_HDR_FMASK; 1416 1417 iowrite32(val, ipa->reg_virt + IPA_REG_ROUTE_OFFSET); 1418 } 1419 1420 void ipa_endpoint_default_route_clear(struct ipa *ipa) 1421 { 1422 ipa_endpoint_default_route_set(ipa, 0); 1423 } 1424 1425 /** 1426 * ipa_endpoint_reset_rx_aggr() - Reset RX endpoint with aggregation active 1427 * @endpoint: Endpoint to be reset 1428 * 1429 * If aggregation is active on an RX endpoint when a reset is performed 1430 * on its underlying GSI channel, a special sequence of actions must be 1431 * taken to ensure the IPA pipeline is properly cleared. 1432 * 1433 * Return: 0 if successful, or a negative error code 1434 */ 1435 static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint) 1436 { 1437 struct device *dev = &endpoint->ipa->pdev->dev; 1438 struct ipa *ipa = endpoint->ipa; 1439 struct gsi *gsi = &ipa->gsi; 1440 bool suspended = false; 1441 dma_addr_t addr; 1442 u32 retries; 1443 u32 len = 1; 1444 void *virt; 1445 int ret; 1446 1447 virt = kzalloc(len, GFP_KERNEL); 1448 if (!virt) 1449 return -ENOMEM; 1450 1451 addr = dma_map_single(dev, virt, len, DMA_FROM_DEVICE); 1452 if (dma_mapping_error(dev, addr)) { 1453 ret = -ENOMEM; 1454 goto out_kfree; 1455 } 1456 1457 /* Force close aggregation before issuing the reset */ 1458 ipa_endpoint_force_close(endpoint); 1459 1460 /* Reset and reconfigure the channel with the doorbell engine 1461 * disabled. Then poll until we know aggregation is no longer 1462 * active. We'll re-enable the doorbell (if appropriate) when 1463 * we reset again below. 1464 */ 1465 gsi_channel_reset(gsi, endpoint->channel_id, false); 1466 1467 /* Make sure the channel isn't suspended */ 1468 suspended = ipa_endpoint_program_suspend(endpoint, false); 1469 1470 /* Start channel and do a 1 byte read */ 1471 ret = gsi_channel_start(gsi, endpoint->channel_id); 1472 if (ret) 1473 goto out_suspend_again; 1474 1475 ret = gsi_trans_read_byte(gsi, endpoint->channel_id, addr); 1476 if (ret) 1477 goto err_endpoint_stop; 1478 1479 /* Wait for aggregation to be closed on the channel */ 1480 retries = IPA_ENDPOINT_RESET_AGGR_RETRY_MAX; 1481 do { 1482 if (!ipa_endpoint_aggr_active(endpoint)) 1483 break; 1484 usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC); 1485 } while (retries--); 1486 1487 /* Check one last time */ 1488 if (ipa_endpoint_aggr_active(endpoint)) 1489 dev_err(dev, "endpoint %u still active during reset\n", 1490 endpoint->endpoint_id); 1491 1492 gsi_trans_read_byte_done(gsi, endpoint->channel_id); 1493 1494 ret = gsi_channel_stop(gsi, endpoint->channel_id); 1495 if (ret) 1496 goto out_suspend_again; 1497 1498 /* Finally, reset and reconfigure the channel again (re-enabling 1499 * the doorbell engine if appropriate). Sleep for 1 millisecond to 1500 * complete the channel reset sequence. Finish by suspending the 1501 * channel again (if necessary). 1502 */ 1503 gsi_channel_reset(gsi, endpoint->channel_id, true); 1504 1505 usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC); 1506 1507 goto out_suspend_again; 1508 1509 err_endpoint_stop: 1510 (void)gsi_channel_stop(gsi, endpoint->channel_id); 1511 out_suspend_again: 1512 if (suspended) 1513 (void)ipa_endpoint_program_suspend(endpoint, true); 1514 dma_unmap_single(dev, addr, len, DMA_FROM_DEVICE); 1515 out_kfree: 1516 kfree(virt); 1517 1518 return ret; 1519 } 1520 1521 static void ipa_endpoint_reset(struct ipa_endpoint *endpoint) 1522 { 1523 u32 channel_id = endpoint->channel_id; 1524 struct ipa *ipa = endpoint->ipa; 1525 bool special; 1526 int ret = 0; 1527 1528 /* On IPA v3.5.1, if an RX endpoint is reset while aggregation 1529 * is active, we need to handle things specially to recover. 1530 * All other cases just need to reset the underlying GSI channel. 1531 */ 1532 special = ipa->version < IPA_VERSION_4_0 && !endpoint->toward_ipa && 1533 endpoint->data->aggregation; 1534 if (special && ipa_endpoint_aggr_active(endpoint)) 1535 ret = ipa_endpoint_reset_rx_aggr(endpoint); 1536 else 1537 gsi_channel_reset(&ipa->gsi, channel_id, true); 1538 1539 if (ret) 1540 dev_err(&ipa->pdev->dev, 1541 "error %d resetting channel %u for endpoint %u\n", 1542 ret, endpoint->channel_id, endpoint->endpoint_id); 1543 } 1544 1545 static void ipa_endpoint_program(struct ipa_endpoint *endpoint) 1546 { 1547 if (endpoint->toward_ipa) { 1548 /* Newer versions of IPA use GSI channel flow control 1549 * instead of endpoint DELAY mode to prevent sending data. 1550 * Flow control is disabled for newly-allocated channels, 1551 * and we can assume flow control is not (ever) enabled 1552 * for AP TX channels. 1553 */ 1554 if (endpoint->ipa->version < IPA_VERSION_4_2) 1555 ipa_endpoint_program_delay(endpoint, false); 1556 } else { 1557 /* Ensure suspend mode is off on all AP RX endpoints */ 1558 (void)ipa_endpoint_program_suspend(endpoint, false); 1559 } 1560 ipa_endpoint_init_cfg(endpoint); 1561 ipa_endpoint_init_nat(endpoint); 1562 ipa_endpoint_init_hdr(endpoint); 1563 ipa_endpoint_init_hdr_ext(endpoint); 1564 ipa_endpoint_init_hdr_metadata_mask(endpoint); 1565 ipa_endpoint_init_mode(endpoint); 1566 ipa_endpoint_init_aggr(endpoint); 1567 if (!endpoint->toward_ipa) 1568 ipa_endpoint_init_hol_block_disable(endpoint); 1569 ipa_endpoint_init_deaggr(endpoint); 1570 ipa_endpoint_init_rsrc_grp(endpoint); 1571 ipa_endpoint_init_seq(endpoint); 1572 ipa_endpoint_status(endpoint); 1573 } 1574 1575 int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint) 1576 { 1577 struct ipa *ipa = endpoint->ipa; 1578 struct gsi *gsi = &ipa->gsi; 1579 int ret; 1580 1581 ret = gsi_channel_start(gsi, endpoint->channel_id); 1582 if (ret) { 1583 dev_err(&ipa->pdev->dev, 1584 "error %d starting %cX channel %u for endpoint %u\n", 1585 ret, endpoint->toward_ipa ? 'T' : 'R', 1586 endpoint->channel_id, endpoint->endpoint_id); 1587 return ret; 1588 } 1589 1590 if (!endpoint->toward_ipa) { 1591 ipa_interrupt_suspend_enable(ipa->interrupt, 1592 endpoint->endpoint_id); 1593 ipa_endpoint_replenish_enable(endpoint); 1594 } 1595 1596 ipa->enabled |= BIT(endpoint->endpoint_id); 1597 1598 return 0; 1599 } 1600 1601 void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint) 1602 { 1603 u32 mask = BIT(endpoint->endpoint_id); 1604 struct ipa *ipa = endpoint->ipa; 1605 struct gsi *gsi = &ipa->gsi; 1606 int ret; 1607 1608 if (!(ipa->enabled & mask)) 1609 return; 1610 1611 ipa->enabled ^= mask; 1612 1613 if (!endpoint->toward_ipa) { 1614 ipa_endpoint_replenish_disable(endpoint); 1615 ipa_interrupt_suspend_disable(ipa->interrupt, 1616 endpoint->endpoint_id); 1617 } 1618 1619 /* Note that if stop fails, the channel's state is not well-defined */ 1620 ret = gsi_channel_stop(gsi, endpoint->channel_id); 1621 if (ret) 1622 dev_err(&ipa->pdev->dev, 1623 "error %d attempting to stop endpoint %u\n", ret, 1624 endpoint->endpoint_id); 1625 } 1626 1627 void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint) 1628 { 1629 struct device *dev = &endpoint->ipa->pdev->dev; 1630 struct gsi *gsi = &endpoint->ipa->gsi; 1631 int ret; 1632 1633 if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id))) 1634 return; 1635 1636 if (!endpoint->toward_ipa) { 1637 ipa_endpoint_replenish_disable(endpoint); 1638 (void)ipa_endpoint_program_suspend(endpoint, true); 1639 } 1640 1641 ret = gsi_channel_suspend(gsi, endpoint->channel_id); 1642 if (ret) 1643 dev_err(dev, "error %d suspending channel %u\n", ret, 1644 endpoint->channel_id); 1645 } 1646 1647 void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint) 1648 { 1649 struct device *dev = &endpoint->ipa->pdev->dev; 1650 struct gsi *gsi = &endpoint->ipa->gsi; 1651 int ret; 1652 1653 if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id))) 1654 return; 1655 1656 if (!endpoint->toward_ipa) 1657 (void)ipa_endpoint_program_suspend(endpoint, false); 1658 1659 ret = gsi_channel_resume(gsi, endpoint->channel_id); 1660 if (ret) 1661 dev_err(dev, "error %d resuming channel %u\n", ret, 1662 endpoint->channel_id); 1663 else if (!endpoint->toward_ipa) 1664 ipa_endpoint_replenish_enable(endpoint); 1665 } 1666 1667 void ipa_endpoint_suspend(struct ipa *ipa) 1668 { 1669 if (!ipa->setup_complete) 1670 return; 1671 1672 if (ipa->modem_netdev) 1673 ipa_modem_suspend(ipa->modem_netdev); 1674 1675 ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]); 1676 ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]); 1677 } 1678 1679 void ipa_endpoint_resume(struct ipa *ipa) 1680 { 1681 if (!ipa->setup_complete) 1682 return; 1683 1684 ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]); 1685 ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]); 1686 1687 if (ipa->modem_netdev) 1688 ipa_modem_resume(ipa->modem_netdev); 1689 } 1690 1691 static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint) 1692 { 1693 struct gsi *gsi = &endpoint->ipa->gsi; 1694 u32 channel_id = endpoint->channel_id; 1695 1696 /* Only AP endpoints get set up */ 1697 if (endpoint->ee_id != GSI_EE_AP) 1698 return; 1699 1700 endpoint->trans_tre_max = gsi_channel_trans_tre_max(gsi, channel_id); 1701 if (!endpoint->toward_ipa) { 1702 /* RX transactions require a single TRE, so the maximum 1703 * backlog is the same as the maximum outstanding TREs. 1704 */ 1705 clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags); 1706 clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags); 1707 atomic_set(&endpoint->replenish_saved, 1708 gsi_channel_tre_max(gsi, endpoint->channel_id)); 1709 atomic_set(&endpoint->replenish_backlog, 0); 1710 INIT_DELAYED_WORK(&endpoint->replenish_work, 1711 ipa_endpoint_replenish_work); 1712 } 1713 1714 ipa_endpoint_program(endpoint); 1715 1716 endpoint->ipa->set_up |= BIT(endpoint->endpoint_id); 1717 } 1718 1719 static void ipa_endpoint_teardown_one(struct ipa_endpoint *endpoint) 1720 { 1721 endpoint->ipa->set_up &= ~BIT(endpoint->endpoint_id); 1722 1723 if (!endpoint->toward_ipa) 1724 cancel_delayed_work_sync(&endpoint->replenish_work); 1725 1726 ipa_endpoint_reset(endpoint); 1727 } 1728 1729 void ipa_endpoint_setup(struct ipa *ipa) 1730 { 1731 u32 initialized = ipa->initialized; 1732 1733 ipa->set_up = 0; 1734 while (initialized) { 1735 u32 endpoint_id = __ffs(initialized); 1736 1737 initialized ^= BIT(endpoint_id); 1738 1739 ipa_endpoint_setup_one(&ipa->endpoint[endpoint_id]); 1740 } 1741 } 1742 1743 void ipa_endpoint_teardown(struct ipa *ipa) 1744 { 1745 u32 set_up = ipa->set_up; 1746 1747 while (set_up) { 1748 u32 endpoint_id = __fls(set_up); 1749 1750 set_up ^= BIT(endpoint_id); 1751 1752 ipa_endpoint_teardown_one(&ipa->endpoint[endpoint_id]); 1753 } 1754 ipa->set_up = 0; 1755 } 1756 1757 int ipa_endpoint_config(struct ipa *ipa) 1758 { 1759 struct device *dev = &ipa->pdev->dev; 1760 u32 initialized; 1761 u32 rx_base; 1762 u32 rx_mask; 1763 u32 tx_mask; 1764 int ret = 0; 1765 u32 max; 1766 u32 val; 1767 1768 /* Prior to IPAv3.5, the FLAVOR_0 register was not supported. 1769 * Furthermore, the endpoints were not grouped such that TX 1770 * endpoint numbers started with 0 and RX endpoints had numbers 1771 * higher than all TX endpoints, so we can't do the simple 1772 * direction check used for newer hardware below. 1773 * 1774 * For hardware that doesn't support the FLAVOR_0 register, 1775 * just set the available mask to support any endpoint, and 1776 * assume the configuration is valid. 1777 */ 1778 if (ipa->version < IPA_VERSION_3_5) { 1779 ipa->available = ~0; 1780 return 0; 1781 } 1782 1783 /* Find out about the endpoints supplied by the hardware, and ensure 1784 * the highest one doesn't exceed the number we support. 1785 */ 1786 val = ioread32(ipa->reg_virt + IPA_REG_FLAVOR_0_OFFSET); 1787 1788 /* Our RX is an IPA producer */ 1789 rx_base = u32_get_bits(val, IPA_PROD_LOWEST_FMASK); 1790 max = rx_base + u32_get_bits(val, IPA_MAX_PROD_PIPES_FMASK); 1791 if (max > IPA_ENDPOINT_MAX) { 1792 dev_err(dev, "too many endpoints (%u > %u)\n", 1793 max, IPA_ENDPOINT_MAX); 1794 return -EINVAL; 1795 } 1796 rx_mask = GENMASK(max - 1, rx_base); 1797 1798 /* Our TX is an IPA consumer */ 1799 max = u32_get_bits(val, IPA_MAX_CONS_PIPES_FMASK); 1800 tx_mask = GENMASK(max - 1, 0); 1801 1802 ipa->available = rx_mask | tx_mask; 1803 1804 /* Check for initialized endpoints not supported by the hardware */ 1805 if (ipa->initialized & ~ipa->available) { 1806 dev_err(dev, "unavailable endpoint id(s) 0x%08x\n", 1807 ipa->initialized & ~ipa->available); 1808 ret = -EINVAL; /* Report other errors too */ 1809 } 1810 1811 initialized = ipa->initialized; 1812 while (initialized) { 1813 u32 endpoint_id = __ffs(initialized); 1814 struct ipa_endpoint *endpoint; 1815 1816 initialized ^= BIT(endpoint_id); 1817 1818 /* Make sure it's pointing in the right direction */ 1819 endpoint = &ipa->endpoint[endpoint_id]; 1820 if ((endpoint_id < rx_base) != endpoint->toward_ipa) { 1821 dev_err(dev, "endpoint id %u wrong direction\n", 1822 endpoint_id); 1823 ret = -EINVAL; 1824 } 1825 } 1826 1827 return ret; 1828 } 1829 1830 void ipa_endpoint_deconfig(struct ipa *ipa) 1831 { 1832 ipa->available = 0; /* Nothing more to do */ 1833 } 1834 1835 static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name, 1836 const struct ipa_gsi_endpoint_data *data) 1837 { 1838 struct ipa_endpoint *endpoint; 1839 1840 endpoint = &ipa->endpoint[data->endpoint_id]; 1841 1842 if (data->ee_id == GSI_EE_AP) 1843 ipa->channel_map[data->channel_id] = endpoint; 1844 ipa->name_map[name] = endpoint; 1845 1846 endpoint->ipa = ipa; 1847 endpoint->ee_id = data->ee_id; 1848 endpoint->channel_id = data->channel_id; 1849 endpoint->endpoint_id = data->endpoint_id; 1850 endpoint->toward_ipa = data->toward_ipa; 1851 endpoint->data = &data->endpoint.config; 1852 1853 ipa->initialized |= BIT(endpoint->endpoint_id); 1854 } 1855 1856 static void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint) 1857 { 1858 endpoint->ipa->initialized &= ~BIT(endpoint->endpoint_id); 1859 1860 memset(endpoint, 0, sizeof(*endpoint)); 1861 } 1862 1863 void ipa_endpoint_exit(struct ipa *ipa) 1864 { 1865 u32 initialized = ipa->initialized; 1866 1867 while (initialized) { 1868 u32 endpoint_id = __fls(initialized); 1869 1870 initialized ^= BIT(endpoint_id); 1871 1872 ipa_endpoint_exit_one(&ipa->endpoint[endpoint_id]); 1873 } 1874 memset(ipa->name_map, 0, sizeof(ipa->name_map)); 1875 memset(ipa->channel_map, 0, sizeof(ipa->channel_map)); 1876 } 1877 1878 /* Returns a bitmask of endpoints that support filtering, or 0 on error */ 1879 u32 ipa_endpoint_init(struct ipa *ipa, u32 count, 1880 const struct ipa_gsi_endpoint_data *data) 1881 { 1882 enum ipa_endpoint_name name; 1883 u32 filter_map; 1884 1885 if (!ipa_endpoint_data_valid(ipa, count, data)) 1886 return 0; /* Error */ 1887 1888 ipa->initialized = 0; 1889 1890 filter_map = 0; 1891 for (name = 0; name < count; name++, data++) { 1892 if (ipa_gsi_endpoint_data_empty(data)) 1893 continue; /* Skip over empty slots */ 1894 1895 ipa_endpoint_init_one(ipa, name, data); 1896 1897 if (data->endpoint.filter_support) 1898 filter_map |= BIT(data->endpoint_id); 1899 } 1900 1901 if (!ipa_filter_map_valid(ipa, filter_map)) 1902 goto err_endpoint_exit; 1903 1904 return filter_map; /* Non-zero bitmask */ 1905 1906 err_endpoint_exit: 1907 ipa_endpoint_exit(ipa); 1908 1909 return 0; /* Error */ 1910 } 1911