1 // SPDX-License-Identifier: GPL-2.0 2 3 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. 4 * Copyright (C) 2019-2021 Linaro Ltd. 5 */ 6 7 #include <linux/types.h> 8 #include <linux/device.h> 9 #include <linux/slab.h> 10 #include <linux/bitfield.h> 11 #include <linux/if_rmnet.h> 12 #include <linux/dma-direction.h> 13 14 #include "gsi.h" 15 #include "gsi_trans.h" 16 #include "ipa.h" 17 #include "ipa_data.h" 18 #include "ipa_endpoint.h" 19 #include "ipa_cmd.h" 20 #include "ipa_mem.h" 21 #include "ipa_modem.h" 22 #include "ipa_table.h" 23 #include "ipa_gsi.h" 24 #include "ipa_power.h" 25 26 #define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0) 27 28 /* Hardware is told about receive buffers once a "batch" has been queued */ 29 #define IPA_REPLENISH_BATCH 16 /* Must be non-zero */ 30 31 /* The amount of RX buffer space consumed by standard skb overhead */ 32 #define IPA_RX_BUFFER_OVERHEAD (PAGE_SIZE - SKB_MAX_ORDER(NET_SKB_PAD, 0)) 33 34 /* Where to find the QMAP mux_id for a packet within modem-supplied metadata */ 35 #define IPA_ENDPOINT_QMAP_METADATA_MASK 0x000000ff /* host byte order */ 36 37 #define IPA_ENDPOINT_RESET_AGGR_RETRY_MAX 3 38 39 /** enum ipa_status_opcode - status element opcode hardware values */ 40 enum ipa_status_opcode { 41 IPA_STATUS_OPCODE_PACKET = 0x01, 42 IPA_STATUS_OPCODE_DROPPED_PACKET = 0x04, 43 IPA_STATUS_OPCODE_SUSPENDED_PACKET = 0x08, 44 IPA_STATUS_OPCODE_PACKET_2ND_PASS = 0x40, 45 }; 46 47 /** enum ipa_status_exception - status element exception type */ 48 enum ipa_status_exception { 49 /* 0 means no exception */ 50 IPA_STATUS_EXCEPTION_DEAGGR = 0x01, 51 }; 52 53 /* Status element provided by hardware */ 54 struct ipa_status { 55 u8 opcode; /* enum ipa_status_opcode */ 56 u8 exception; /* enum ipa_status_exception */ 57 __le16 mask; 58 __le16 pkt_len; 59 u8 endp_src_idx; 60 u8 endp_dst_idx; 61 __le32 metadata; 62 __le32 flags1; 63 __le64 flags2; 64 __le32 flags3; 65 __le32 flags4; 66 }; 67 68 /* Field masks for struct ipa_status structure fields */ 69 #define IPA_STATUS_MASK_TAG_VALID_FMASK GENMASK(4, 4) 70 #define IPA_STATUS_SRC_IDX_FMASK GENMASK(4, 0) 71 #define IPA_STATUS_DST_IDX_FMASK GENMASK(4, 0) 72 #define IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK GENMASK(31, 22) 73 #define IPA_STATUS_FLAGS2_TAG_FMASK GENMASK_ULL(63, 16) 74 75 static u32 aggr_byte_limit_max(enum ipa_version version) 76 { 77 if (version < IPA_VERSION_4_5) 78 return field_max(aggr_byte_limit_fmask(true)); 79 80 return field_max(aggr_byte_limit_fmask(false)); 81 } 82 83 /* Compute the aggregation size value to use for a given buffer size */ 84 static u32 ipa_aggr_size_kb(u32 rx_buffer_size, bool aggr_hard_limit) 85 { 86 /* A hard aggregation limit will not be crossed; aggregation closes 87 * if saving incoming data would cross the hard byte limit boundary. 88 * 89 * With a soft limit, aggregation closes *after* the size boundary 90 * has been crossed. In that case the limit must leave enough space 91 * after that limit to receive a full MTU of data plus overhead. 92 */ 93 if (!aggr_hard_limit) 94 rx_buffer_size -= IPA_MTU + IPA_RX_BUFFER_OVERHEAD; 95 96 /* The byte limit is encoded as a number of kilobytes */ 97 98 return rx_buffer_size / SZ_1K; 99 } 100 101 static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count, 102 const struct ipa_gsi_endpoint_data *all_data, 103 const struct ipa_gsi_endpoint_data *data) 104 { 105 const struct ipa_gsi_endpoint_data *other_data; 106 struct device *dev = &ipa->pdev->dev; 107 enum ipa_endpoint_name other_name; 108 109 if (ipa_gsi_endpoint_data_empty(data)) 110 return true; 111 112 if (!data->toward_ipa) { 113 const struct ipa_endpoint_rx *rx_config; 114 u32 buffer_size; 115 u32 aggr_size; 116 u32 limit; 117 118 if (data->endpoint.filter_support) { 119 dev_err(dev, "filtering not supported for " 120 "RX endpoint %u\n", 121 data->endpoint_id); 122 return false; 123 } 124 125 /* Nothing more to check for non-AP RX */ 126 if (data->ee_id != GSI_EE_AP) 127 return true; 128 129 rx_config = &data->endpoint.config.rx; 130 131 /* The buffer size must hold an MTU plus overhead */ 132 buffer_size = rx_config->buffer_size; 133 limit = IPA_MTU + IPA_RX_BUFFER_OVERHEAD; 134 if (buffer_size < limit) { 135 dev_err(dev, "RX buffer size too small for RX endpoint %u (%u < %u)\n", 136 data->endpoint_id, buffer_size, limit); 137 return false; 138 } 139 140 if (!data->endpoint.config.aggregation) { 141 bool result = true; 142 143 /* No aggregation; check for bogus aggregation data */ 144 if (rx_config->aggr_time_limit) { 145 dev_err(dev, 146 "time limit with no aggregation for RX endpoint %u\n", 147 data->endpoint_id); 148 result = false; 149 } 150 151 if (rx_config->aggr_hard_limit) { 152 dev_err(dev, "hard limit with no aggregation for RX endpoint %u\n", 153 data->endpoint_id); 154 result = false; 155 } 156 157 if (rx_config->aggr_close_eof) { 158 dev_err(dev, "close EOF with no aggregation for RX endpoint %u\n", 159 data->endpoint_id); 160 result = false; 161 } 162 163 return result; /* Nothing more to check */ 164 } 165 166 /* For an endpoint supporting receive aggregation, the byte 167 * limit defines the point at which aggregation closes. This 168 * check ensures the receive buffer size doesn't result in a 169 * limit that exceeds what's representable in the aggregation 170 * byte limit field. 171 */ 172 aggr_size = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD, 173 rx_config->aggr_hard_limit); 174 limit = aggr_byte_limit_max(ipa->version); 175 if (aggr_size > limit) { 176 dev_err(dev, "aggregated size too large for RX endpoint %u (%u KB > %u KB)\n", 177 data->endpoint_id, aggr_size, limit); 178 179 return false; 180 } 181 182 return true; /* Nothing more to check for RX */ 183 } 184 185 /* Starting with IPA v4.5 sequencer replication is obsolete */ 186 if (ipa->version >= IPA_VERSION_4_5) { 187 if (data->endpoint.config.tx.seq_rep_type) { 188 dev_err(dev, "no-zero seq_rep_type TX endpoint %u\n", 189 data->endpoint_id); 190 return false; 191 } 192 } 193 194 if (data->endpoint.config.status_enable) { 195 other_name = data->endpoint.config.tx.status_endpoint; 196 if (other_name >= count) { 197 dev_err(dev, "status endpoint name %u out of range " 198 "for endpoint %u\n", 199 other_name, data->endpoint_id); 200 return false; 201 } 202 203 /* Status endpoint must be defined... */ 204 other_data = &all_data[other_name]; 205 if (ipa_gsi_endpoint_data_empty(other_data)) { 206 dev_err(dev, "DMA endpoint name %u undefined " 207 "for endpoint %u\n", 208 other_name, data->endpoint_id); 209 return false; 210 } 211 212 /* ...and has to be an RX endpoint... */ 213 if (other_data->toward_ipa) { 214 dev_err(dev, 215 "status endpoint for endpoint %u not RX\n", 216 data->endpoint_id); 217 return false; 218 } 219 220 /* ...and if it's to be an AP endpoint... */ 221 if (other_data->ee_id == GSI_EE_AP) { 222 /* ...make sure it has status enabled. */ 223 if (!other_data->endpoint.config.status_enable) { 224 dev_err(dev, 225 "status not enabled for endpoint %u\n", 226 other_data->endpoint_id); 227 return false; 228 } 229 } 230 } 231 232 if (data->endpoint.config.dma_mode) { 233 other_name = data->endpoint.config.dma_endpoint; 234 if (other_name >= count) { 235 dev_err(dev, "DMA endpoint name %u out of range " 236 "for endpoint %u\n", 237 other_name, data->endpoint_id); 238 return false; 239 } 240 241 other_data = &all_data[other_name]; 242 if (ipa_gsi_endpoint_data_empty(other_data)) { 243 dev_err(dev, "DMA endpoint name %u undefined " 244 "for endpoint %u\n", 245 other_name, data->endpoint_id); 246 return false; 247 } 248 } 249 250 return true; 251 } 252 253 static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count, 254 const struct ipa_gsi_endpoint_data *data) 255 { 256 const struct ipa_gsi_endpoint_data *dp = data; 257 struct device *dev = &ipa->pdev->dev; 258 enum ipa_endpoint_name name; 259 260 if (count > IPA_ENDPOINT_COUNT) { 261 dev_err(dev, "too many endpoints specified (%u > %u)\n", 262 count, IPA_ENDPOINT_COUNT); 263 return false; 264 } 265 266 /* Make sure needed endpoints have defined data */ 267 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_COMMAND_TX])) { 268 dev_err(dev, "command TX endpoint not defined\n"); 269 return false; 270 } 271 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_LAN_RX])) { 272 dev_err(dev, "LAN RX endpoint not defined\n"); 273 return false; 274 } 275 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_TX])) { 276 dev_err(dev, "AP->modem TX endpoint not defined\n"); 277 return false; 278 } 279 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_RX])) { 280 dev_err(dev, "AP<-modem RX endpoint not defined\n"); 281 return false; 282 } 283 284 for (name = 0; name < count; name++, dp++) 285 if (!ipa_endpoint_data_valid_one(ipa, count, data, dp)) 286 return false; 287 288 return true; 289 } 290 291 /* Allocate a transaction to use on a non-command endpoint */ 292 static struct gsi_trans *ipa_endpoint_trans_alloc(struct ipa_endpoint *endpoint, 293 u32 tre_count) 294 { 295 struct gsi *gsi = &endpoint->ipa->gsi; 296 u32 channel_id = endpoint->channel_id; 297 enum dma_data_direction direction; 298 299 direction = endpoint->toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE; 300 301 return gsi_channel_trans_alloc(gsi, channel_id, tre_count, direction); 302 } 303 304 /* suspend_delay represents suspend for RX, delay for TX endpoints. 305 * Note that suspend is not supported starting with IPA v4.0, and 306 * delay mode should not be used starting with IPA v4.2. 307 */ 308 static bool 309 ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay) 310 { 311 u32 offset = IPA_REG_ENDP_INIT_CTRL_N_OFFSET(endpoint->endpoint_id); 312 struct ipa *ipa = endpoint->ipa; 313 bool state; 314 u32 mask; 315 u32 val; 316 317 if (endpoint->toward_ipa) 318 WARN_ON(ipa->version >= IPA_VERSION_4_2); 319 else 320 WARN_ON(ipa->version >= IPA_VERSION_4_0); 321 322 mask = endpoint->toward_ipa ? ENDP_DELAY_FMASK : ENDP_SUSPEND_FMASK; 323 324 val = ioread32(ipa->reg_virt + offset); 325 state = !!(val & mask); 326 327 /* Don't bother if it's already in the requested state */ 328 if (suspend_delay != state) { 329 val ^= mask; 330 iowrite32(val, ipa->reg_virt + offset); 331 } 332 333 return state; 334 } 335 336 /* We don't care what the previous state was for delay mode */ 337 static void 338 ipa_endpoint_program_delay(struct ipa_endpoint *endpoint, bool enable) 339 { 340 /* Delay mode should not be used for IPA v4.2+ */ 341 WARN_ON(endpoint->ipa->version >= IPA_VERSION_4_2); 342 WARN_ON(!endpoint->toward_ipa); 343 344 (void)ipa_endpoint_init_ctrl(endpoint, enable); 345 } 346 347 static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint) 348 { 349 u32 mask = BIT(endpoint->endpoint_id); 350 struct ipa *ipa = endpoint->ipa; 351 u32 offset; 352 u32 val; 353 354 WARN_ON(!(mask & ipa->available)); 355 356 offset = ipa_reg_state_aggr_active_offset(ipa->version); 357 val = ioread32(ipa->reg_virt + offset); 358 359 return !!(val & mask); 360 } 361 362 static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint) 363 { 364 u32 mask = BIT(endpoint->endpoint_id); 365 struct ipa *ipa = endpoint->ipa; 366 367 WARN_ON(!(mask & ipa->available)); 368 369 iowrite32(mask, ipa->reg_virt + IPA_REG_AGGR_FORCE_CLOSE_OFFSET); 370 } 371 372 /** 373 * ipa_endpoint_suspend_aggr() - Emulate suspend interrupt 374 * @endpoint: Endpoint on which to emulate a suspend 375 * 376 * Emulate suspend IPA interrupt to unsuspend an endpoint suspended 377 * with an open aggregation frame. This is to work around a hardware 378 * issue in IPA version 3.5.1 where the suspend interrupt will not be 379 * generated when it should be. 380 */ 381 static void ipa_endpoint_suspend_aggr(struct ipa_endpoint *endpoint) 382 { 383 struct ipa *ipa = endpoint->ipa; 384 385 if (!endpoint->config.aggregation) 386 return; 387 388 /* Nothing to do if the endpoint doesn't have aggregation open */ 389 if (!ipa_endpoint_aggr_active(endpoint)) 390 return; 391 392 /* Force close aggregation */ 393 ipa_endpoint_force_close(endpoint); 394 395 ipa_interrupt_simulate_suspend(ipa->interrupt); 396 } 397 398 /* Returns previous suspend state (true means suspend was enabled) */ 399 static bool 400 ipa_endpoint_program_suspend(struct ipa_endpoint *endpoint, bool enable) 401 { 402 bool suspended; 403 404 if (endpoint->ipa->version >= IPA_VERSION_4_0) 405 return enable; /* For IPA v4.0+, no change made */ 406 407 WARN_ON(endpoint->toward_ipa); 408 409 suspended = ipa_endpoint_init_ctrl(endpoint, enable); 410 411 /* A client suspended with an open aggregation frame will not 412 * generate a SUSPEND IPA interrupt. If enabling suspend, have 413 * ipa_endpoint_suspend_aggr() handle this. 414 */ 415 if (enable && !suspended) 416 ipa_endpoint_suspend_aggr(endpoint); 417 418 return suspended; 419 } 420 421 /* Put all modem RX endpoints into suspend mode, and stop transmission 422 * on all modem TX endpoints. Prior to IPA v4.2, endpoint DELAY mode is 423 * used for TX endpoints; starting with IPA v4.2 we use GSI channel flow 424 * control instead. 425 */ 426 void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable) 427 { 428 u32 endpoint_id; 429 430 for (endpoint_id = 0; endpoint_id < IPA_ENDPOINT_MAX; endpoint_id++) { 431 struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id]; 432 433 if (endpoint->ee_id != GSI_EE_MODEM) 434 continue; 435 436 if (!endpoint->toward_ipa) 437 (void)ipa_endpoint_program_suspend(endpoint, enable); 438 else if (ipa->version < IPA_VERSION_4_2) 439 ipa_endpoint_program_delay(endpoint, enable); 440 else 441 gsi_modem_channel_flow_control(&ipa->gsi, 442 endpoint->channel_id, 443 enable); 444 } 445 } 446 447 /* Reset all modem endpoints to use the default exception endpoint */ 448 int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa) 449 { 450 u32 initialized = ipa->initialized; 451 struct gsi_trans *trans; 452 u32 count; 453 454 /* We need one command per modem TX endpoint, plus the commands 455 * that clear the pipeline. 456 */ 457 count = ipa->modem_tx_count + ipa_cmd_pipeline_clear_count(); 458 trans = ipa_cmd_trans_alloc(ipa, count); 459 if (!trans) { 460 dev_err(&ipa->pdev->dev, 461 "no transaction to reset modem exception endpoints\n"); 462 return -EBUSY; 463 } 464 465 while (initialized) { 466 u32 endpoint_id = __ffs(initialized); 467 struct ipa_endpoint *endpoint; 468 u32 offset; 469 470 initialized ^= BIT(endpoint_id); 471 472 /* We only reset modem TX endpoints */ 473 endpoint = &ipa->endpoint[endpoint_id]; 474 if (!(endpoint->ee_id == GSI_EE_MODEM && endpoint->toward_ipa)) 475 continue; 476 477 offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id); 478 479 /* Value written is 0, and all bits are updated. That 480 * means status is disabled on the endpoint, and as a 481 * result all other fields in the register are ignored. 482 */ 483 ipa_cmd_register_write_add(trans, offset, 0, ~0, false); 484 } 485 486 ipa_cmd_pipeline_clear_add(trans); 487 488 gsi_trans_commit_wait(trans); 489 490 ipa_cmd_pipeline_clear_wait(ipa); 491 492 return 0; 493 } 494 495 static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint) 496 { 497 u32 offset = IPA_REG_ENDP_INIT_CFG_N_OFFSET(endpoint->endpoint_id); 498 enum ipa_cs_offload_en enabled; 499 u32 val = 0; 500 501 /* FRAG_OFFLOAD_EN is 0 */ 502 if (endpoint->config.checksum) { 503 enum ipa_version version = endpoint->ipa->version; 504 505 if (endpoint->toward_ipa) { 506 u32 off; 507 508 /* Checksum header offset is in 4-byte units */ 509 off = sizeof(struct rmnet_map_header); 510 off /= sizeof(u32); 511 val |= u32_encode_bits(off, 512 CS_METADATA_HDR_OFFSET_FMASK); 513 514 enabled = version < IPA_VERSION_4_5 515 ? IPA_CS_OFFLOAD_UL 516 : IPA_CS_OFFLOAD_INLINE; 517 } else { 518 enabled = version < IPA_VERSION_4_5 519 ? IPA_CS_OFFLOAD_DL 520 : IPA_CS_OFFLOAD_INLINE; 521 } 522 } else { 523 enabled = IPA_CS_OFFLOAD_NONE; 524 } 525 val |= u32_encode_bits(enabled, CS_OFFLOAD_EN_FMASK); 526 /* CS_GEN_QMB_MASTER_SEL is 0 */ 527 528 iowrite32(val, endpoint->ipa->reg_virt + offset); 529 } 530 531 static void ipa_endpoint_init_nat(struct ipa_endpoint *endpoint) 532 { 533 u32 offset; 534 u32 val; 535 536 if (!endpoint->toward_ipa) 537 return; 538 539 offset = IPA_REG_ENDP_INIT_NAT_N_OFFSET(endpoint->endpoint_id); 540 val = u32_encode_bits(IPA_NAT_BYPASS, NAT_EN_FMASK); 541 542 iowrite32(val, endpoint->ipa->reg_virt + offset); 543 } 544 545 static u32 546 ipa_qmap_header_size(enum ipa_version version, struct ipa_endpoint *endpoint) 547 { 548 u32 header_size = sizeof(struct rmnet_map_header); 549 550 /* Without checksum offload, we just have the MAP header */ 551 if (!endpoint->config.checksum) 552 return header_size; 553 554 if (version < IPA_VERSION_4_5) { 555 /* Checksum header inserted for AP TX endpoints only */ 556 if (endpoint->toward_ipa) 557 header_size += sizeof(struct rmnet_map_ul_csum_header); 558 } else { 559 /* Checksum header is used in both directions */ 560 header_size += sizeof(struct rmnet_map_v5_csum_header); 561 } 562 563 return header_size; 564 } 565 566 /** 567 * ipa_endpoint_init_hdr() - Initialize HDR endpoint configuration register 568 * @endpoint: Endpoint pointer 569 * 570 * We program QMAP endpoints so each packet received is preceded by a QMAP 571 * header structure. The QMAP header contains a 1-byte mux_id and 2-byte 572 * packet size field, and we have the IPA hardware populate both for each 573 * received packet. The header is configured (in the HDR_EXT register) 574 * to use big endian format. 575 * 576 * The packet size is written into the QMAP header's pkt_len field. That 577 * location is defined here using the HDR_OFST_PKT_SIZE field. 578 * 579 * The mux_id comes from a 4-byte metadata value supplied with each packet 580 * by the modem. It is *not* a QMAP header, but it does contain the mux_id 581 * value that we want, in its low-order byte. A bitmask defined in the 582 * endpoint's METADATA_MASK register defines which byte within the modem 583 * metadata contains the mux_id. And the OFST_METADATA field programmed 584 * here indicates where the extracted byte should be placed within the QMAP 585 * header. 586 */ 587 static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint) 588 { 589 u32 offset = IPA_REG_ENDP_INIT_HDR_N_OFFSET(endpoint->endpoint_id); 590 struct ipa *ipa = endpoint->ipa; 591 u32 val = 0; 592 593 if (endpoint->config.qmap) { 594 enum ipa_version version = ipa->version; 595 size_t header_size; 596 597 header_size = ipa_qmap_header_size(version, endpoint); 598 val = ipa_header_size_encoded(version, header_size); 599 600 /* Define how to fill fields in a received QMAP header */ 601 if (!endpoint->toward_ipa) { 602 u32 off; /* Field offset within header */ 603 604 /* Where IPA will write the metadata value */ 605 off = offsetof(struct rmnet_map_header, mux_id); 606 val |= ipa_metadata_offset_encoded(version, off); 607 608 /* Where IPA will write the length */ 609 off = offsetof(struct rmnet_map_header, pkt_len); 610 /* Upper bits are stored in HDR_EXT with IPA v4.5 */ 611 if (version >= IPA_VERSION_4_5) 612 off &= field_mask(HDR_OFST_PKT_SIZE_FMASK); 613 614 val |= HDR_OFST_PKT_SIZE_VALID_FMASK; 615 val |= u32_encode_bits(off, HDR_OFST_PKT_SIZE_FMASK); 616 } 617 /* For QMAP TX, metadata offset is 0 (modem assumes this) */ 618 val |= HDR_OFST_METADATA_VALID_FMASK; 619 620 /* HDR_ADDITIONAL_CONST_LEN is 0; (RX only) */ 621 /* HDR_A5_MUX is 0 */ 622 /* HDR_LEN_INC_DEAGG_HDR is 0 */ 623 /* HDR_METADATA_REG_VALID is 0 (TX only, version < v4.5) */ 624 } 625 626 iowrite32(val, ipa->reg_virt + offset); 627 } 628 629 static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint) 630 { 631 u32 offset = IPA_REG_ENDP_INIT_HDR_EXT_N_OFFSET(endpoint->endpoint_id); 632 u32 pad_align = endpoint->config.rx.pad_align; 633 struct ipa *ipa = endpoint->ipa; 634 u32 val = 0; 635 636 if (endpoint->config.qmap) { 637 /* We have a header, so we must specify its endianness */ 638 val |= HDR_ENDIANNESS_FMASK; /* big endian */ 639 640 /* A QMAP header contains a 6 bit pad field at offset 0. 641 * The RMNet driver assumes this field is meaningful in 642 * packets it receives, and assumes the header's payload 643 * length includes that padding. The RMNet driver does 644 * *not* pad packets it sends, however, so the pad field 645 * (although 0) should be ignored. 646 */ 647 if (!endpoint->toward_ipa) { 648 val |= HDR_TOTAL_LEN_OR_PAD_VALID_FMASK; 649 /* HDR_TOTAL_LEN_OR_PAD is 0 (pad, not total_len) */ 650 val |= HDR_PAYLOAD_LEN_INC_PADDING_FMASK; 651 /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0 */ 652 } 653 } 654 655 /* HDR_PAYLOAD_LEN_INC_PADDING is 0 */ 656 if (!endpoint->toward_ipa) 657 val |= u32_encode_bits(pad_align, HDR_PAD_TO_ALIGNMENT_FMASK); 658 659 /* IPA v4.5 adds some most-significant bits to a few fields, 660 * two of which are defined in the HDR (not HDR_EXT) register. 661 */ 662 if (ipa->version >= IPA_VERSION_4_5) { 663 /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0, so MSB is 0 */ 664 if (endpoint->config.qmap && !endpoint->toward_ipa) { 665 u32 off; 666 667 off = offsetof(struct rmnet_map_header, pkt_len); 668 off >>= hweight32(HDR_OFST_PKT_SIZE_FMASK); 669 val |= u32_encode_bits(off, 670 HDR_OFST_PKT_SIZE_MSB_FMASK); 671 /* HDR_ADDITIONAL_CONST_LEN is 0 so MSB is 0 */ 672 } 673 } 674 iowrite32(val, ipa->reg_virt + offset); 675 } 676 677 static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint) 678 { 679 u32 endpoint_id = endpoint->endpoint_id; 680 u32 val = 0; 681 u32 offset; 682 683 if (endpoint->toward_ipa) 684 return; /* Register not valid for TX endpoints */ 685 686 offset = IPA_REG_ENDP_INIT_HDR_METADATA_MASK_N_OFFSET(endpoint_id); 687 688 /* Note that HDR_ENDIANNESS indicates big endian header fields */ 689 if (endpoint->config.qmap) 690 val = (__force u32)cpu_to_be32(IPA_ENDPOINT_QMAP_METADATA_MASK); 691 692 iowrite32(val, endpoint->ipa->reg_virt + offset); 693 } 694 695 static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint) 696 { 697 u32 offset = IPA_REG_ENDP_INIT_MODE_N_OFFSET(endpoint->endpoint_id); 698 u32 val; 699 700 if (!endpoint->toward_ipa) 701 return; /* Register not valid for RX endpoints */ 702 703 if (endpoint->config.dma_mode) { 704 enum ipa_endpoint_name name = endpoint->config.dma_endpoint; 705 u32 dma_endpoint_id; 706 707 dma_endpoint_id = endpoint->ipa->name_map[name]->endpoint_id; 708 709 val = u32_encode_bits(IPA_DMA, MODE_FMASK); 710 val |= u32_encode_bits(dma_endpoint_id, DEST_PIPE_INDEX_FMASK); 711 } else { 712 val = u32_encode_bits(IPA_BASIC, MODE_FMASK); 713 } 714 /* All other bits unspecified (and 0) */ 715 716 iowrite32(val, endpoint->ipa->reg_virt + offset); 717 } 718 719 /* Encoded values for AGGR endpoint register fields */ 720 static u32 aggr_byte_limit_encoded(enum ipa_version version, u32 limit) 721 { 722 if (version < IPA_VERSION_4_5) 723 return u32_encode_bits(limit, aggr_byte_limit_fmask(true)); 724 725 return u32_encode_bits(limit, aggr_byte_limit_fmask(false)); 726 } 727 728 /* Encode the aggregation timer limit (microseconds) based on IPA version */ 729 static u32 aggr_time_limit_encoded(enum ipa_version version, u32 limit) 730 { 731 u32 gran_sel; 732 u32 fmask; 733 u32 val; 734 735 if (version < IPA_VERSION_4_5) { 736 /* We set aggregation granularity in ipa_hardware_config() */ 737 fmask = aggr_time_limit_fmask(true); 738 val = DIV_ROUND_CLOSEST(limit, IPA_AGGR_GRANULARITY); 739 WARN(val > field_max(fmask), 740 "aggr_time_limit too large (%u > %u usec)\n", 741 val, field_max(fmask) * IPA_AGGR_GRANULARITY); 742 743 return u32_encode_bits(val, fmask); 744 } 745 746 /* IPA v4.5 expresses the time limit using Qtime. The AP has 747 * pulse generators 0 and 1 available, which were configured 748 * in ipa_qtime_config() to have granularity 100 usec and 749 * 1 msec, respectively. Use pulse generator 0 if possible, 750 * otherwise fall back to pulse generator 1. 751 */ 752 fmask = aggr_time_limit_fmask(false); 753 val = DIV_ROUND_CLOSEST(limit, 100); 754 if (val > field_max(fmask)) { 755 /* Have to use pulse generator 1 (millisecond granularity) */ 756 gran_sel = AGGR_GRAN_SEL_FMASK; 757 val = DIV_ROUND_CLOSEST(limit, 1000); 758 WARN(val > field_max(fmask), 759 "aggr_time_limit too large (%u > %u usec)\n", 760 limit, field_max(fmask) * 1000); 761 } else { 762 /* We can use pulse generator 0 (100 usec granularity) */ 763 gran_sel = 0; 764 } 765 766 return gran_sel | u32_encode_bits(val, fmask); 767 } 768 769 static u32 aggr_sw_eof_active_encoded(enum ipa_version version, bool enabled) 770 { 771 u32 val = enabled ? 1 : 0; 772 773 if (version < IPA_VERSION_4_5) 774 return u32_encode_bits(val, aggr_sw_eof_active_fmask(true)); 775 776 return u32_encode_bits(val, aggr_sw_eof_active_fmask(false)); 777 } 778 779 static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint) 780 { 781 u32 offset = IPA_REG_ENDP_INIT_AGGR_N_OFFSET(endpoint->endpoint_id); 782 enum ipa_version version = endpoint->ipa->version; 783 u32 val = 0; 784 785 if (endpoint->config.aggregation) { 786 if (!endpoint->toward_ipa) { 787 const struct ipa_endpoint_rx *rx_config; 788 u32 buffer_size; 789 bool close_eof; 790 u32 limit; 791 792 rx_config = &endpoint->config.rx; 793 val |= u32_encode_bits(IPA_ENABLE_AGGR, AGGR_EN_FMASK); 794 val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK); 795 796 buffer_size = rx_config->buffer_size; 797 limit = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD, 798 rx_config->aggr_hard_limit); 799 val |= aggr_byte_limit_encoded(version, limit); 800 801 limit = rx_config->aggr_time_limit; 802 val |= aggr_time_limit_encoded(version, limit); 803 804 /* AGGR_PKT_LIMIT is 0 (unlimited) */ 805 806 close_eof = rx_config->aggr_close_eof; 807 val |= aggr_sw_eof_active_encoded(version, close_eof); 808 } else { 809 val |= u32_encode_bits(IPA_ENABLE_DEAGGR, 810 AGGR_EN_FMASK); 811 val |= u32_encode_bits(IPA_QCMAP, AGGR_TYPE_FMASK); 812 /* other fields ignored */ 813 } 814 /* AGGR_FORCE_CLOSE is 0 */ 815 /* AGGR_GRAN_SEL is 0 for IPA v4.5 */ 816 } else { 817 val |= u32_encode_bits(IPA_BYPASS_AGGR, AGGR_EN_FMASK); 818 /* other fields ignored */ 819 } 820 821 iowrite32(val, endpoint->ipa->reg_virt + offset); 822 } 823 824 /* Return the Qtime-based head-of-line blocking timer value that 825 * represents the given number of microseconds. The result 826 * includes both the timer value and the selected timer granularity. 827 */ 828 static u32 hol_block_timer_qtime_val(struct ipa *ipa, u32 microseconds) 829 { 830 u32 gran_sel; 831 u32 val; 832 833 /* IPA v4.5 expresses time limits using Qtime. The AP has 834 * pulse generators 0 and 1 available, which were configured 835 * in ipa_qtime_config() to have granularity 100 usec and 836 * 1 msec, respectively. Use pulse generator 0 if possible, 837 * otherwise fall back to pulse generator 1. 838 */ 839 val = DIV_ROUND_CLOSEST(microseconds, 100); 840 if (val > field_max(TIME_LIMIT_FMASK)) { 841 /* Have to use pulse generator 1 (millisecond granularity) */ 842 gran_sel = GRAN_SEL_FMASK; 843 val = DIV_ROUND_CLOSEST(microseconds, 1000); 844 } else { 845 /* We can use pulse generator 0 (100 usec granularity) */ 846 gran_sel = 0; 847 } 848 849 return gran_sel | u32_encode_bits(val, TIME_LIMIT_FMASK); 850 } 851 852 /* The head-of-line blocking timer is defined as a tick count. For 853 * IPA version 4.5 the tick count is based on the Qtimer, which is 854 * derived from the 19.2 MHz SoC XO clock. For older IPA versions 855 * each tick represents 128 cycles of the IPA core clock. 856 * 857 * Return the encoded value that should be written to that register 858 * that represents the timeout period provided. For IPA v4.2 this 859 * encodes a base and scale value, while for earlier versions the 860 * value is a simple tick count. 861 */ 862 static u32 hol_block_timer_val(struct ipa *ipa, u32 microseconds) 863 { 864 u32 width; 865 u32 scale; 866 u64 ticks; 867 u64 rate; 868 u32 high; 869 u32 val; 870 871 if (!microseconds) 872 return 0; /* Nothing to compute if timer period is 0 */ 873 874 if (ipa->version >= IPA_VERSION_4_5) 875 return hol_block_timer_qtime_val(ipa, microseconds); 876 877 /* Use 64 bit arithmetic to avoid overflow... */ 878 rate = ipa_core_clock_rate(ipa); 879 ticks = DIV_ROUND_CLOSEST(microseconds * rate, 128 * USEC_PER_SEC); 880 /* ...but we still need to fit into a 32-bit register */ 881 WARN_ON(ticks > U32_MAX); 882 883 /* IPA v3.5.1 through v4.1 just record the tick count */ 884 if (ipa->version < IPA_VERSION_4_2) 885 return (u32)ticks; 886 887 /* For IPA v4.2, the tick count is represented by base and 888 * scale fields within the 32-bit timer register, where: 889 * ticks = base << scale; 890 * The best precision is achieved when the base value is as 891 * large as possible. Find the highest set bit in the tick 892 * count, and extract the number of bits in the base field 893 * such that high bit is included. 894 */ 895 high = fls(ticks); /* 1..32 */ 896 width = HWEIGHT32(BASE_VALUE_FMASK); 897 scale = high > width ? high - width : 0; 898 if (scale) { 899 /* If we're scaling, round up to get a closer result */ 900 ticks += 1 << (scale - 1); 901 /* High bit was set, so rounding might have affected it */ 902 if (fls(ticks) != high) 903 scale++; 904 } 905 906 val = u32_encode_bits(scale, SCALE_FMASK); 907 val |= u32_encode_bits(ticks >> scale, BASE_VALUE_FMASK); 908 909 return val; 910 } 911 912 /* If microseconds is 0, timeout is immediate */ 913 static void ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint, 914 u32 microseconds) 915 { 916 u32 endpoint_id = endpoint->endpoint_id; 917 struct ipa *ipa = endpoint->ipa; 918 u32 offset; 919 u32 val; 920 921 /* This should only be changed when HOL_BLOCK_EN is disabled */ 922 offset = IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(endpoint_id); 923 val = hol_block_timer_val(ipa, microseconds); 924 iowrite32(val, ipa->reg_virt + offset); 925 } 926 927 static void 928 ipa_endpoint_init_hol_block_en(struct ipa_endpoint *endpoint, bool enable) 929 { 930 u32 endpoint_id = endpoint->endpoint_id; 931 u32 offset; 932 u32 val; 933 934 val = enable ? HOL_BLOCK_EN_FMASK : 0; 935 offset = IPA_REG_ENDP_INIT_HOL_BLOCK_EN_N_OFFSET(endpoint_id); 936 iowrite32(val, endpoint->ipa->reg_virt + offset); 937 /* When enabling, the register must be written twice for IPA v4.5+ */ 938 if (enable && endpoint->ipa->version >= IPA_VERSION_4_5) 939 iowrite32(val, endpoint->ipa->reg_virt + offset); 940 } 941 942 /* Assumes HOL_BLOCK is in disabled state */ 943 static void ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint, 944 u32 microseconds) 945 { 946 ipa_endpoint_init_hol_block_timer(endpoint, microseconds); 947 ipa_endpoint_init_hol_block_en(endpoint, true); 948 } 949 950 static void ipa_endpoint_init_hol_block_disable(struct ipa_endpoint *endpoint) 951 { 952 ipa_endpoint_init_hol_block_en(endpoint, false); 953 } 954 955 void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa) 956 { 957 u32 i; 958 959 for (i = 0; i < IPA_ENDPOINT_MAX; i++) { 960 struct ipa_endpoint *endpoint = &ipa->endpoint[i]; 961 962 if (endpoint->toward_ipa || endpoint->ee_id != GSI_EE_MODEM) 963 continue; 964 965 ipa_endpoint_init_hol_block_disable(endpoint); 966 ipa_endpoint_init_hol_block_enable(endpoint, 0); 967 } 968 } 969 970 static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint) 971 { 972 u32 offset = IPA_REG_ENDP_INIT_DEAGGR_N_OFFSET(endpoint->endpoint_id); 973 u32 val = 0; 974 975 if (!endpoint->toward_ipa) 976 return; /* Register not valid for RX endpoints */ 977 978 /* DEAGGR_HDR_LEN is 0 */ 979 /* PACKET_OFFSET_VALID is 0 */ 980 /* PACKET_OFFSET_LOCATION is ignored (not valid) */ 981 /* MAX_PACKET_LEN is 0 (not enforced) */ 982 983 iowrite32(val, endpoint->ipa->reg_virt + offset); 984 } 985 986 static void ipa_endpoint_init_rsrc_grp(struct ipa_endpoint *endpoint) 987 { 988 u32 offset = IPA_REG_ENDP_INIT_RSRC_GRP_N_OFFSET(endpoint->endpoint_id); 989 struct ipa *ipa = endpoint->ipa; 990 u32 val; 991 992 val = rsrc_grp_encoded(ipa->version, endpoint->config.resource_group); 993 iowrite32(val, ipa->reg_virt + offset); 994 } 995 996 static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint) 997 { 998 u32 offset = IPA_REG_ENDP_INIT_SEQ_N_OFFSET(endpoint->endpoint_id); 999 u32 val = 0; 1000 1001 if (!endpoint->toward_ipa) 1002 return; /* Register not valid for RX endpoints */ 1003 1004 /* Low-order byte configures primary packet processing */ 1005 val |= u32_encode_bits(endpoint->config.tx.seq_type, SEQ_TYPE_FMASK); 1006 1007 /* Second byte (if supported) configures replicated packet processing */ 1008 if (endpoint->ipa->version < IPA_VERSION_4_5) 1009 val |= u32_encode_bits(endpoint->config.tx.seq_rep_type, 1010 SEQ_REP_TYPE_FMASK); 1011 1012 iowrite32(val, endpoint->ipa->reg_virt + offset); 1013 } 1014 1015 /** 1016 * ipa_endpoint_skb_tx() - Transmit a socket buffer 1017 * @endpoint: Endpoint pointer 1018 * @skb: Socket buffer to send 1019 * 1020 * Returns: 0 if successful, or a negative error code 1021 */ 1022 int ipa_endpoint_skb_tx(struct ipa_endpoint *endpoint, struct sk_buff *skb) 1023 { 1024 struct gsi_trans *trans; 1025 u32 nr_frags; 1026 int ret; 1027 1028 /* Make sure source endpoint's TLV FIFO has enough entries to 1029 * hold the linear portion of the skb and all its fragments. 1030 * If not, see if we can linearize it before giving up. 1031 */ 1032 nr_frags = skb_shinfo(skb)->nr_frags; 1033 if (nr_frags > endpoint->skb_frag_max) { 1034 if (skb_linearize(skb)) 1035 return -E2BIG; 1036 nr_frags = 0; 1037 } 1038 1039 trans = ipa_endpoint_trans_alloc(endpoint, 1 + nr_frags); 1040 if (!trans) 1041 return -EBUSY; 1042 1043 ret = gsi_trans_skb_add(trans, skb); 1044 if (ret) 1045 goto err_trans_free; 1046 trans->data = skb; /* transaction owns skb now */ 1047 1048 gsi_trans_commit(trans, !netdev_xmit_more()); 1049 1050 return 0; 1051 1052 err_trans_free: 1053 gsi_trans_free(trans); 1054 1055 return -ENOMEM; 1056 } 1057 1058 static void ipa_endpoint_status(struct ipa_endpoint *endpoint) 1059 { 1060 u32 endpoint_id = endpoint->endpoint_id; 1061 struct ipa *ipa = endpoint->ipa; 1062 u32 val = 0; 1063 u32 offset; 1064 1065 offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id); 1066 1067 if (endpoint->config.status_enable) { 1068 val |= STATUS_EN_FMASK; 1069 if (endpoint->toward_ipa) { 1070 enum ipa_endpoint_name name; 1071 u32 status_endpoint_id; 1072 1073 name = endpoint->config.tx.status_endpoint; 1074 status_endpoint_id = ipa->name_map[name]->endpoint_id; 1075 1076 val |= u32_encode_bits(status_endpoint_id, 1077 STATUS_ENDP_FMASK); 1078 } 1079 /* STATUS_LOCATION is 0, meaning status element precedes 1080 * packet (not present for IPA v4.5) 1081 */ 1082 /* STATUS_PKT_SUPPRESS_FMASK is 0 (not present for v3.5.1) */ 1083 } 1084 1085 iowrite32(val, ipa->reg_virt + offset); 1086 } 1087 1088 static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint, 1089 struct gsi_trans *trans) 1090 { 1091 struct page *page; 1092 u32 buffer_size; 1093 u32 offset; 1094 u32 len; 1095 int ret; 1096 1097 buffer_size = endpoint->config.rx.buffer_size; 1098 page = dev_alloc_pages(get_order(buffer_size)); 1099 if (!page) 1100 return -ENOMEM; 1101 1102 /* Offset the buffer to make space for skb headroom */ 1103 offset = NET_SKB_PAD; 1104 len = buffer_size - offset; 1105 1106 ret = gsi_trans_page_add(trans, page, len, offset); 1107 if (ret) 1108 put_page(page); 1109 else 1110 trans->data = page; /* transaction owns page now */ 1111 1112 return ret; 1113 } 1114 1115 /** 1116 * ipa_endpoint_replenish() - Replenish endpoint receive buffers 1117 * @endpoint: Endpoint to be replenished 1118 * 1119 * The IPA hardware can hold a fixed number of receive buffers for an RX 1120 * endpoint, based on the number of entries in the underlying channel ring 1121 * buffer. If an endpoint's "backlog" is non-zero, it indicates how many 1122 * more receive buffers can be supplied to the hardware. Replenishing for 1123 * an endpoint can be disabled, in which case buffers are not queued to 1124 * the hardware. 1125 */ 1126 static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint) 1127 { 1128 struct gsi_trans *trans; 1129 1130 if (!test_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags)) 1131 return; 1132 1133 /* Skip it if it's already active */ 1134 if (test_and_set_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags)) 1135 return; 1136 1137 while ((trans = ipa_endpoint_trans_alloc(endpoint, 1))) { 1138 bool doorbell; 1139 1140 if (ipa_endpoint_replenish_one(endpoint, trans)) 1141 goto try_again_later; 1142 1143 1144 /* Ring the doorbell if we've got a full batch */ 1145 doorbell = !(++endpoint->replenish_count % IPA_REPLENISH_BATCH); 1146 gsi_trans_commit(trans, doorbell); 1147 } 1148 1149 clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags); 1150 1151 return; 1152 1153 try_again_later: 1154 gsi_trans_free(trans); 1155 clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags); 1156 1157 /* Whenever a receive buffer transaction completes we'll try to 1158 * replenish again. It's unlikely, but if we fail to supply even 1159 * one buffer, nothing will trigger another replenish attempt. 1160 * If the hardware has no receive buffers queued, schedule work to 1161 * try replenishing again. 1162 */ 1163 if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id)) 1164 schedule_delayed_work(&endpoint->replenish_work, 1165 msecs_to_jiffies(1)); 1166 } 1167 1168 static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint) 1169 { 1170 set_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags); 1171 1172 /* Start replenishing if hardware currently has no buffers */ 1173 if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id)) 1174 ipa_endpoint_replenish(endpoint); 1175 } 1176 1177 static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint) 1178 { 1179 clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags); 1180 } 1181 1182 static void ipa_endpoint_replenish_work(struct work_struct *work) 1183 { 1184 struct delayed_work *dwork = to_delayed_work(work); 1185 struct ipa_endpoint *endpoint; 1186 1187 endpoint = container_of(dwork, struct ipa_endpoint, replenish_work); 1188 1189 ipa_endpoint_replenish(endpoint); 1190 } 1191 1192 static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint, 1193 void *data, u32 len, u32 extra) 1194 { 1195 struct sk_buff *skb; 1196 1197 if (!endpoint->netdev) 1198 return; 1199 1200 skb = __dev_alloc_skb(len, GFP_ATOMIC); 1201 if (skb) { 1202 /* Copy the data into the socket buffer and receive it */ 1203 skb_put(skb, len); 1204 memcpy(skb->data, data, len); 1205 skb->truesize += extra; 1206 } 1207 1208 ipa_modem_skb_rx(endpoint->netdev, skb); 1209 } 1210 1211 static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint, 1212 struct page *page, u32 len) 1213 { 1214 u32 buffer_size = endpoint->config.rx.buffer_size; 1215 struct sk_buff *skb; 1216 1217 /* Nothing to do if there's no netdev */ 1218 if (!endpoint->netdev) 1219 return false; 1220 1221 WARN_ON(len > SKB_WITH_OVERHEAD(buffer_size - NET_SKB_PAD)); 1222 1223 skb = build_skb(page_address(page), buffer_size); 1224 if (skb) { 1225 /* Reserve the headroom and account for the data */ 1226 skb_reserve(skb, NET_SKB_PAD); 1227 skb_put(skb, len); 1228 } 1229 1230 /* Receive the buffer (or record drop if unable to build it) */ 1231 ipa_modem_skb_rx(endpoint->netdev, skb); 1232 1233 return skb != NULL; 1234 } 1235 1236 /* The format of a packet status element is the same for several status 1237 * types (opcodes). Other types aren't currently supported. 1238 */ 1239 static bool ipa_status_format_packet(enum ipa_status_opcode opcode) 1240 { 1241 switch (opcode) { 1242 case IPA_STATUS_OPCODE_PACKET: 1243 case IPA_STATUS_OPCODE_DROPPED_PACKET: 1244 case IPA_STATUS_OPCODE_SUSPENDED_PACKET: 1245 case IPA_STATUS_OPCODE_PACKET_2ND_PASS: 1246 return true; 1247 default: 1248 return false; 1249 } 1250 } 1251 1252 static bool ipa_endpoint_status_skip(struct ipa_endpoint *endpoint, 1253 const struct ipa_status *status) 1254 { 1255 u32 endpoint_id; 1256 1257 if (!ipa_status_format_packet(status->opcode)) 1258 return true; 1259 if (!status->pkt_len) 1260 return true; 1261 endpoint_id = u8_get_bits(status->endp_dst_idx, 1262 IPA_STATUS_DST_IDX_FMASK); 1263 if (endpoint_id != endpoint->endpoint_id) 1264 return true; 1265 1266 return false; /* Don't skip this packet, process it */ 1267 } 1268 1269 static bool ipa_endpoint_status_tag(struct ipa_endpoint *endpoint, 1270 const struct ipa_status *status) 1271 { 1272 struct ipa_endpoint *command_endpoint; 1273 struct ipa *ipa = endpoint->ipa; 1274 u32 endpoint_id; 1275 1276 if (!le16_get_bits(status->mask, IPA_STATUS_MASK_TAG_VALID_FMASK)) 1277 return false; /* No valid tag */ 1278 1279 /* The status contains a valid tag. We know the packet was sent to 1280 * this endpoint (already verified by ipa_endpoint_status_skip()). 1281 * If the packet came from the AP->command TX endpoint we know 1282 * this packet was sent as part of the pipeline clear process. 1283 */ 1284 endpoint_id = u8_get_bits(status->endp_src_idx, 1285 IPA_STATUS_SRC_IDX_FMASK); 1286 command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]; 1287 if (endpoint_id == command_endpoint->endpoint_id) { 1288 complete(&ipa->completion); 1289 } else { 1290 dev_err(&ipa->pdev->dev, 1291 "unexpected tagged packet from endpoint %u\n", 1292 endpoint_id); 1293 } 1294 1295 return true; 1296 } 1297 1298 /* Return whether the status indicates the packet should be dropped */ 1299 static bool ipa_endpoint_status_drop(struct ipa_endpoint *endpoint, 1300 const struct ipa_status *status) 1301 { 1302 u32 val; 1303 1304 /* If the status indicates a tagged transfer, we'll drop the packet */ 1305 if (ipa_endpoint_status_tag(endpoint, status)) 1306 return true; 1307 1308 /* Deaggregation exceptions we drop; all other types we consume */ 1309 if (status->exception) 1310 return status->exception == IPA_STATUS_EXCEPTION_DEAGGR; 1311 1312 /* Drop the packet if it fails to match a routing rule; otherwise no */ 1313 val = le32_get_bits(status->flags1, IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK); 1314 1315 return val == field_max(IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK); 1316 } 1317 1318 static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint, 1319 struct page *page, u32 total_len) 1320 { 1321 u32 buffer_size = endpoint->config.rx.buffer_size; 1322 void *data = page_address(page) + NET_SKB_PAD; 1323 u32 unused = buffer_size - total_len; 1324 u32 resid = total_len; 1325 1326 while (resid) { 1327 const struct ipa_status *status = data; 1328 u32 align; 1329 u32 len; 1330 1331 if (resid < sizeof(*status)) { 1332 dev_err(&endpoint->ipa->pdev->dev, 1333 "short message (%u bytes < %zu byte status)\n", 1334 resid, sizeof(*status)); 1335 break; 1336 } 1337 1338 /* Skip over status packets that lack packet data */ 1339 if (ipa_endpoint_status_skip(endpoint, status)) { 1340 data += sizeof(*status); 1341 resid -= sizeof(*status); 1342 continue; 1343 } 1344 1345 /* Compute the amount of buffer space consumed by the packet, 1346 * including the status element. If the hardware is configured 1347 * to pad packet data to an aligned boundary, account for that. 1348 * And if checksum offload is enabled a trailer containing 1349 * computed checksum information will be appended. 1350 */ 1351 align = endpoint->config.rx.pad_align ? : 1; 1352 len = le16_to_cpu(status->pkt_len); 1353 len = sizeof(*status) + ALIGN(len, align); 1354 if (endpoint->config.checksum) 1355 len += sizeof(struct rmnet_map_dl_csum_trailer); 1356 1357 if (!ipa_endpoint_status_drop(endpoint, status)) { 1358 void *data2; 1359 u32 extra; 1360 u32 len2; 1361 1362 /* Client receives only packet data (no status) */ 1363 data2 = data + sizeof(*status); 1364 len2 = le16_to_cpu(status->pkt_len); 1365 1366 /* Have the true size reflect the extra unused space in 1367 * the original receive buffer. Distribute the "cost" 1368 * proportionately across all aggregated packets in the 1369 * buffer. 1370 */ 1371 extra = DIV_ROUND_CLOSEST(unused * len, total_len); 1372 ipa_endpoint_skb_copy(endpoint, data2, len2, extra); 1373 } 1374 1375 /* Consume status and the full packet it describes */ 1376 data += len; 1377 resid -= len; 1378 } 1379 } 1380 1381 void ipa_endpoint_trans_complete(struct ipa_endpoint *endpoint, 1382 struct gsi_trans *trans) 1383 { 1384 struct page *page; 1385 1386 if (endpoint->toward_ipa) 1387 return; 1388 1389 if (trans->cancelled) 1390 goto done; 1391 1392 /* Parse or build a socket buffer using the actual received length */ 1393 page = trans->data; 1394 if (endpoint->config.status_enable) 1395 ipa_endpoint_status_parse(endpoint, page, trans->len); 1396 else if (ipa_endpoint_skb_build(endpoint, page, trans->len)) 1397 trans->data = NULL; /* Pages have been consumed */ 1398 done: 1399 ipa_endpoint_replenish(endpoint); 1400 } 1401 1402 void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint, 1403 struct gsi_trans *trans) 1404 { 1405 if (endpoint->toward_ipa) { 1406 struct ipa *ipa = endpoint->ipa; 1407 1408 /* Nothing to do for command transactions */ 1409 if (endpoint != ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]) { 1410 struct sk_buff *skb = trans->data; 1411 1412 if (skb) 1413 dev_kfree_skb_any(skb); 1414 } 1415 } else { 1416 struct page *page = trans->data; 1417 1418 if (page) 1419 put_page(page); 1420 } 1421 } 1422 1423 void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id) 1424 { 1425 u32 val; 1426 1427 /* ROUTE_DIS is 0 */ 1428 val = u32_encode_bits(endpoint_id, ROUTE_DEF_PIPE_FMASK); 1429 val |= ROUTE_DEF_HDR_TABLE_FMASK; 1430 val |= u32_encode_bits(0, ROUTE_DEF_HDR_OFST_FMASK); 1431 val |= u32_encode_bits(endpoint_id, ROUTE_FRAG_DEF_PIPE_FMASK); 1432 val |= ROUTE_DEF_RETAIN_HDR_FMASK; 1433 1434 iowrite32(val, ipa->reg_virt + IPA_REG_ROUTE_OFFSET); 1435 } 1436 1437 void ipa_endpoint_default_route_clear(struct ipa *ipa) 1438 { 1439 ipa_endpoint_default_route_set(ipa, 0); 1440 } 1441 1442 /** 1443 * ipa_endpoint_reset_rx_aggr() - Reset RX endpoint with aggregation active 1444 * @endpoint: Endpoint to be reset 1445 * 1446 * If aggregation is active on an RX endpoint when a reset is performed 1447 * on its underlying GSI channel, a special sequence of actions must be 1448 * taken to ensure the IPA pipeline is properly cleared. 1449 * 1450 * Return: 0 if successful, or a negative error code 1451 */ 1452 static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint) 1453 { 1454 struct device *dev = &endpoint->ipa->pdev->dev; 1455 struct ipa *ipa = endpoint->ipa; 1456 struct gsi *gsi = &ipa->gsi; 1457 bool suspended = false; 1458 dma_addr_t addr; 1459 u32 retries; 1460 u32 len = 1; 1461 void *virt; 1462 int ret; 1463 1464 virt = kzalloc(len, GFP_KERNEL); 1465 if (!virt) 1466 return -ENOMEM; 1467 1468 addr = dma_map_single(dev, virt, len, DMA_FROM_DEVICE); 1469 if (dma_mapping_error(dev, addr)) { 1470 ret = -ENOMEM; 1471 goto out_kfree; 1472 } 1473 1474 /* Force close aggregation before issuing the reset */ 1475 ipa_endpoint_force_close(endpoint); 1476 1477 /* Reset and reconfigure the channel with the doorbell engine 1478 * disabled. Then poll until we know aggregation is no longer 1479 * active. We'll re-enable the doorbell (if appropriate) when 1480 * we reset again below. 1481 */ 1482 gsi_channel_reset(gsi, endpoint->channel_id, false); 1483 1484 /* Make sure the channel isn't suspended */ 1485 suspended = ipa_endpoint_program_suspend(endpoint, false); 1486 1487 /* Start channel and do a 1 byte read */ 1488 ret = gsi_channel_start(gsi, endpoint->channel_id); 1489 if (ret) 1490 goto out_suspend_again; 1491 1492 ret = gsi_trans_read_byte(gsi, endpoint->channel_id, addr); 1493 if (ret) 1494 goto err_endpoint_stop; 1495 1496 /* Wait for aggregation to be closed on the channel */ 1497 retries = IPA_ENDPOINT_RESET_AGGR_RETRY_MAX; 1498 do { 1499 if (!ipa_endpoint_aggr_active(endpoint)) 1500 break; 1501 usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC); 1502 } while (retries--); 1503 1504 /* Check one last time */ 1505 if (ipa_endpoint_aggr_active(endpoint)) 1506 dev_err(dev, "endpoint %u still active during reset\n", 1507 endpoint->endpoint_id); 1508 1509 gsi_trans_read_byte_done(gsi, endpoint->channel_id); 1510 1511 ret = gsi_channel_stop(gsi, endpoint->channel_id); 1512 if (ret) 1513 goto out_suspend_again; 1514 1515 /* Finally, reset and reconfigure the channel again (re-enabling 1516 * the doorbell engine if appropriate). Sleep for 1 millisecond to 1517 * complete the channel reset sequence. Finish by suspending the 1518 * channel again (if necessary). 1519 */ 1520 gsi_channel_reset(gsi, endpoint->channel_id, true); 1521 1522 usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC); 1523 1524 goto out_suspend_again; 1525 1526 err_endpoint_stop: 1527 (void)gsi_channel_stop(gsi, endpoint->channel_id); 1528 out_suspend_again: 1529 if (suspended) 1530 (void)ipa_endpoint_program_suspend(endpoint, true); 1531 dma_unmap_single(dev, addr, len, DMA_FROM_DEVICE); 1532 out_kfree: 1533 kfree(virt); 1534 1535 return ret; 1536 } 1537 1538 static void ipa_endpoint_reset(struct ipa_endpoint *endpoint) 1539 { 1540 u32 channel_id = endpoint->channel_id; 1541 struct ipa *ipa = endpoint->ipa; 1542 bool special; 1543 int ret = 0; 1544 1545 /* On IPA v3.5.1, if an RX endpoint is reset while aggregation 1546 * is active, we need to handle things specially to recover. 1547 * All other cases just need to reset the underlying GSI channel. 1548 */ 1549 special = ipa->version < IPA_VERSION_4_0 && !endpoint->toward_ipa && 1550 endpoint->config.aggregation; 1551 if (special && ipa_endpoint_aggr_active(endpoint)) 1552 ret = ipa_endpoint_reset_rx_aggr(endpoint); 1553 else 1554 gsi_channel_reset(&ipa->gsi, channel_id, true); 1555 1556 if (ret) 1557 dev_err(&ipa->pdev->dev, 1558 "error %d resetting channel %u for endpoint %u\n", 1559 ret, endpoint->channel_id, endpoint->endpoint_id); 1560 } 1561 1562 static void ipa_endpoint_program(struct ipa_endpoint *endpoint) 1563 { 1564 if (endpoint->toward_ipa) { 1565 /* Newer versions of IPA use GSI channel flow control 1566 * instead of endpoint DELAY mode to prevent sending data. 1567 * Flow control is disabled for newly-allocated channels, 1568 * and we can assume flow control is not (ever) enabled 1569 * for AP TX channels. 1570 */ 1571 if (endpoint->ipa->version < IPA_VERSION_4_2) 1572 ipa_endpoint_program_delay(endpoint, false); 1573 } else { 1574 /* Ensure suspend mode is off on all AP RX endpoints */ 1575 (void)ipa_endpoint_program_suspend(endpoint, false); 1576 } 1577 ipa_endpoint_init_cfg(endpoint); 1578 ipa_endpoint_init_nat(endpoint); 1579 ipa_endpoint_init_hdr(endpoint); 1580 ipa_endpoint_init_hdr_ext(endpoint); 1581 ipa_endpoint_init_hdr_metadata_mask(endpoint); 1582 ipa_endpoint_init_mode(endpoint); 1583 ipa_endpoint_init_aggr(endpoint); 1584 if (!endpoint->toward_ipa) { 1585 if (endpoint->config.rx.holb_drop) 1586 ipa_endpoint_init_hol_block_enable(endpoint, 0); 1587 else 1588 ipa_endpoint_init_hol_block_disable(endpoint); 1589 } 1590 ipa_endpoint_init_deaggr(endpoint); 1591 ipa_endpoint_init_rsrc_grp(endpoint); 1592 ipa_endpoint_init_seq(endpoint); 1593 ipa_endpoint_status(endpoint); 1594 } 1595 1596 int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint) 1597 { 1598 struct ipa *ipa = endpoint->ipa; 1599 struct gsi *gsi = &ipa->gsi; 1600 int ret; 1601 1602 ret = gsi_channel_start(gsi, endpoint->channel_id); 1603 if (ret) { 1604 dev_err(&ipa->pdev->dev, 1605 "error %d starting %cX channel %u for endpoint %u\n", 1606 ret, endpoint->toward_ipa ? 'T' : 'R', 1607 endpoint->channel_id, endpoint->endpoint_id); 1608 return ret; 1609 } 1610 1611 if (!endpoint->toward_ipa) { 1612 ipa_interrupt_suspend_enable(ipa->interrupt, 1613 endpoint->endpoint_id); 1614 ipa_endpoint_replenish_enable(endpoint); 1615 } 1616 1617 ipa->enabled |= BIT(endpoint->endpoint_id); 1618 1619 return 0; 1620 } 1621 1622 void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint) 1623 { 1624 u32 mask = BIT(endpoint->endpoint_id); 1625 struct ipa *ipa = endpoint->ipa; 1626 struct gsi *gsi = &ipa->gsi; 1627 int ret; 1628 1629 if (!(ipa->enabled & mask)) 1630 return; 1631 1632 ipa->enabled ^= mask; 1633 1634 if (!endpoint->toward_ipa) { 1635 ipa_endpoint_replenish_disable(endpoint); 1636 ipa_interrupt_suspend_disable(ipa->interrupt, 1637 endpoint->endpoint_id); 1638 } 1639 1640 /* Note that if stop fails, the channel's state is not well-defined */ 1641 ret = gsi_channel_stop(gsi, endpoint->channel_id); 1642 if (ret) 1643 dev_err(&ipa->pdev->dev, 1644 "error %d attempting to stop endpoint %u\n", ret, 1645 endpoint->endpoint_id); 1646 } 1647 1648 void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint) 1649 { 1650 struct device *dev = &endpoint->ipa->pdev->dev; 1651 struct gsi *gsi = &endpoint->ipa->gsi; 1652 int ret; 1653 1654 if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id))) 1655 return; 1656 1657 if (!endpoint->toward_ipa) { 1658 ipa_endpoint_replenish_disable(endpoint); 1659 (void)ipa_endpoint_program_suspend(endpoint, true); 1660 } 1661 1662 ret = gsi_channel_suspend(gsi, endpoint->channel_id); 1663 if (ret) 1664 dev_err(dev, "error %d suspending channel %u\n", ret, 1665 endpoint->channel_id); 1666 } 1667 1668 void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint) 1669 { 1670 struct device *dev = &endpoint->ipa->pdev->dev; 1671 struct gsi *gsi = &endpoint->ipa->gsi; 1672 int ret; 1673 1674 if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id))) 1675 return; 1676 1677 if (!endpoint->toward_ipa) 1678 (void)ipa_endpoint_program_suspend(endpoint, false); 1679 1680 ret = gsi_channel_resume(gsi, endpoint->channel_id); 1681 if (ret) 1682 dev_err(dev, "error %d resuming channel %u\n", ret, 1683 endpoint->channel_id); 1684 else if (!endpoint->toward_ipa) 1685 ipa_endpoint_replenish_enable(endpoint); 1686 } 1687 1688 void ipa_endpoint_suspend(struct ipa *ipa) 1689 { 1690 if (!ipa->setup_complete) 1691 return; 1692 1693 if (ipa->modem_netdev) 1694 ipa_modem_suspend(ipa->modem_netdev); 1695 1696 ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]); 1697 ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]); 1698 } 1699 1700 void ipa_endpoint_resume(struct ipa *ipa) 1701 { 1702 if (!ipa->setup_complete) 1703 return; 1704 1705 ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]); 1706 ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]); 1707 1708 if (ipa->modem_netdev) 1709 ipa_modem_resume(ipa->modem_netdev); 1710 } 1711 1712 static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint) 1713 { 1714 struct gsi *gsi = &endpoint->ipa->gsi; 1715 u32 channel_id = endpoint->channel_id; 1716 1717 /* Only AP endpoints get set up */ 1718 if (endpoint->ee_id != GSI_EE_AP) 1719 return; 1720 1721 endpoint->skb_frag_max = gsi->channel[channel_id].trans_tre_max - 1; 1722 if (!endpoint->toward_ipa) { 1723 /* RX transactions require a single TRE, so the maximum 1724 * backlog is the same as the maximum outstanding TREs. 1725 */ 1726 clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags); 1727 clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags); 1728 INIT_DELAYED_WORK(&endpoint->replenish_work, 1729 ipa_endpoint_replenish_work); 1730 } 1731 1732 ipa_endpoint_program(endpoint); 1733 1734 endpoint->ipa->set_up |= BIT(endpoint->endpoint_id); 1735 } 1736 1737 static void ipa_endpoint_teardown_one(struct ipa_endpoint *endpoint) 1738 { 1739 endpoint->ipa->set_up &= ~BIT(endpoint->endpoint_id); 1740 1741 if (!endpoint->toward_ipa) 1742 cancel_delayed_work_sync(&endpoint->replenish_work); 1743 1744 ipa_endpoint_reset(endpoint); 1745 } 1746 1747 void ipa_endpoint_setup(struct ipa *ipa) 1748 { 1749 u32 initialized = ipa->initialized; 1750 1751 ipa->set_up = 0; 1752 while (initialized) { 1753 u32 endpoint_id = __ffs(initialized); 1754 1755 initialized ^= BIT(endpoint_id); 1756 1757 ipa_endpoint_setup_one(&ipa->endpoint[endpoint_id]); 1758 } 1759 } 1760 1761 void ipa_endpoint_teardown(struct ipa *ipa) 1762 { 1763 u32 set_up = ipa->set_up; 1764 1765 while (set_up) { 1766 u32 endpoint_id = __fls(set_up); 1767 1768 set_up ^= BIT(endpoint_id); 1769 1770 ipa_endpoint_teardown_one(&ipa->endpoint[endpoint_id]); 1771 } 1772 ipa->set_up = 0; 1773 } 1774 1775 int ipa_endpoint_config(struct ipa *ipa) 1776 { 1777 struct device *dev = &ipa->pdev->dev; 1778 u32 initialized; 1779 u32 rx_base; 1780 u32 rx_mask; 1781 u32 tx_mask; 1782 int ret = 0; 1783 u32 max; 1784 u32 val; 1785 1786 /* Prior to IPAv3.5, the FLAVOR_0 register was not supported. 1787 * Furthermore, the endpoints were not grouped such that TX 1788 * endpoint numbers started with 0 and RX endpoints had numbers 1789 * higher than all TX endpoints, so we can't do the simple 1790 * direction check used for newer hardware below. 1791 * 1792 * For hardware that doesn't support the FLAVOR_0 register, 1793 * just set the available mask to support any endpoint, and 1794 * assume the configuration is valid. 1795 */ 1796 if (ipa->version < IPA_VERSION_3_5) { 1797 ipa->available = ~0; 1798 return 0; 1799 } 1800 1801 /* Find out about the endpoints supplied by the hardware, and ensure 1802 * the highest one doesn't exceed the number we support. 1803 */ 1804 val = ioread32(ipa->reg_virt + IPA_REG_FLAVOR_0_OFFSET); 1805 1806 /* Our RX is an IPA producer */ 1807 rx_base = u32_get_bits(val, IPA_PROD_LOWEST_FMASK); 1808 max = rx_base + u32_get_bits(val, IPA_MAX_PROD_PIPES_FMASK); 1809 if (max > IPA_ENDPOINT_MAX) { 1810 dev_err(dev, "too many endpoints (%u > %u)\n", 1811 max, IPA_ENDPOINT_MAX); 1812 return -EINVAL; 1813 } 1814 rx_mask = GENMASK(max - 1, rx_base); 1815 1816 /* Our TX is an IPA consumer */ 1817 max = u32_get_bits(val, IPA_MAX_CONS_PIPES_FMASK); 1818 tx_mask = GENMASK(max - 1, 0); 1819 1820 ipa->available = rx_mask | tx_mask; 1821 1822 /* Check for initialized endpoints not supported by the hardware */ 1823 if (ipa->initialized & ~ipa->available) { 1824 dev_err(dev, "unavailable endpoint id(s) 0x%08x\n", 1825 ipa->initialized & ~ipa->available); 1826 ret = -EINVAL; /* Report other errors too */ 1827 } 1828 1829 initialized = ipa->initialized; 1830 while (initialized) { 1831 u32 endpoint_id = __ffs(initialized); 1832 struct ipa_endpoint *endpoint; 1833 1834 initialized ^= BIT(endpoint_id); 1835 1836 /* Make sure it's pointing in the right direction */ 1837 endpoint = &ipa->endpoint[endpoint_id]; 1838 if ((endpoint_id < rx_base) != endpoint->toward_ipa) { 1839 dev_err(dev, "endpoint id %u wrong direction\n", 1840 endpoint_id); 1841 ret = -EINVAL; 1842 } 1843 } 1844 1845 return ret; 1846 } 1847 1848 void ipa_endpoint_deconfig(struct ipa *ipa) 1849 { 1850 ipa->available = 0; /* Nothing more to do */ 1851 } 1852 1853 static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name, 1854 const struct ipa_gsi_endpoint_data *data) 1855 { 1856 struct ipa_endpoint *endpoint; 1857 1858 endpoint = &ipa->endpoint[data->endpoint_id]; 1859 1860 if (data->ee_id == GSI_EE_AP) 1861 ipa->channel_map[data->channel_id] = endpoint; 1862 ipa->name_map[name] = endpoint; 1863 1864 endpoint->ipa = ipa; 1865 endpoint->ee_id = data->ee_id; 1866 endpoint->channel_id = data->channel_id; 1867 endpoint->endpoint_id = data->endpoint_id; 1868 endpoint->toward_ipa = data->toward_ipa; 1869 endpoint->config = data->endpoint.config; 1870 1871 ipa->initialized |= BIT(endpoint->endpoint_id); 1872 } 1873 1874 static void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint) 1875 { 1876 endpoint->ipa->initialized &= ~BIT(endpoint->endpoint_id); 1877 1878 memset(endpoint, 0, sizeof(*endpoint)); 1879 } 1880 1881 void ipa_endpoint_exit(struct ipa *ipa) 1882 { 1883 u32 initialized = ipa->initialized; 1884 1885 while (initialized) { 1886 u32 endpoint_id = __fls(initialized); 1887 1888 initialized ^= BIT(endpoint_id); 1889 1890 ipa_endpoint_exit_one(&ipa->endpoint[endpoint_id]); 1891 } 1892 memset(ipa->name_map, 0, sizeof(ipa->name_map)); 1893 memset(ipa->channel_map, 0, sizeof(ipa->channel_map)); 1894 } 1895 1896 /* Returns a bitmask of endpoints that support filtering, or 0 on error */ 1897 u32 ipa_endpoint_init(struct ipa *ipa, u32 count, 1898 const struct ipa_gsi_endpoint_data *data) 1899 { 1900 enum ipa_endpoint_name name; 1901 u32 filter_map; 1902 1903 BUILD_BUG_ON(!IPA_REPLENISH_BATCH); 1904 1905 if (!ipa_endpoint_data_valid(ipa, count, data)) 1906 return 0; /* Error */ 1907 1908 ipa->initialized = 0; 1909 1910 filter_map = 0; 1911 for (name = 0; name < count; name++, data++) { 1912 if (ipa_gsi_endpoint_data_empty(data)) 1913 continue; /* Skip over empty slots */ 1914 1915 ipa_endpoint_init_one(ipa, name, data); 1916 1917 if (data->endpoint.filter_support) 1918 filter_map |= BIT(data->endpoint_id); 1919 if (data->ee_id == GSI_EE_MODEM && data->toward_ipa) 1920 ipa->modem_tx_count++; 1921 } 1922 1923 if (!ipa_filter_map_valid(ipa, filter_map)) 1924 goto err_endpoint_exit; 1925 1926 return filter_map; /* Non-zero bitmask */ 1927 1928 err_endpoint_exit: 1929 ipa_endpoint_exit(ipa); 1930 1931 return 0; /* Error */ 1932 } 1933