1 // SPDX-License-Identifier: GPL-2.0 2 3 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. 4 * Copyright (C) 2019-2021 Linaro Ltd. 5 */ 6 7 #include <linux/types.h> 8 #include <linux/device.h> 9 #include <linux/slab.h> 10 #include <linux/bitfield.h> 11 #include <linux/if_rmnet.h> 12 #include <linux/dma-direction.h> 13 14 #include "gsi.h" 15 #include "gsi_trans.h" 16 #include "ipa.h" 17 #include "ipa_data.h" 18 #include "ipa_endpoint.h" 19 #include "ipa_cmd.h" 20 #include "ipa_mem.h" 21 #include "ipa_modem.h" 22 #include "ipa_table.h" 23 #include "ipa_gsi.h" 24 #include "ipa_clock.h" 25 26 #define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0) 27 28 #define IPA_REPLENISH_BATCH 16 29 30 /* RX buffer is 1 page (or a power-of-2 contiguous pages) */ 31 #define IPA_RX_BUFFER_SIZE 8192 /* PAGE_SIZE > 4096 wastes a LOT */ 32 33 /* The amount of RX buffer space consumed by standard skb overhead */ 34 #define IPA_RX_BUFFER_OVERHEAD (PAGE_SIZE - SKB_MAX_ORDER(NET_SKB_PAD, 0)) 35 36 /* Where to find the QMAP mux_id for a packet within modem-supplied metadata */ 37 #define IPA_ENDPOINT_QMAP_METADATA_MASK 0x000000ff /* host byte order */ 38 39 #define IPA_ENDPOINT_RESET_AGGR_RETRY_MAX 3 40 #define IPA_AGGR_TIME_LIMIT 500 /* microseconds */ 41 42 /** enum ipa_status_opcode - status element opcode hardware values */ 43 enum ipa_status_opcode { 44 IPA_STATUS_OPCODE_PACKET = 0x01, 45 IPA_STATUS_OPCODE_DROPPED_PACKET = 0x04, 46 IPA_STATUS_OPCODE_SUSPENDED_PACKET = 0x08, 47 IPA_STATUS_OPCODE_PACKET_2ND_PASS = 0x40, 48 }; 49 50 /** enum ipa_status_exception - status element exception type */ 51 enum ipa_status_exception { 52 /* 0 means no exception */ 53 IPA_STATUS_EXCEPTION_DEAGGR = 0x01, 54 }; 55 56 /* Status element provided by hardware */ 57 struct ipa_status { 58 u8 opcode; /* enum ipa_status_opcode */ 59 u8 exception; /* enum ipa_status_exception */ 60 __le16 mask; 61 __le16 pkt_len; 62 u8 endp_src_idx; 63 u8 endp_dst_idx; 64 __le32 metadata; 65 __le32 flags1; 66 __le64 flags2; 67 __le32 flags3; 68 __le32 flags4; 69 }; 70 71 /* Field masks for struct ipa_status structure fields */ 72 #define IPA_STATUS_MASK_TAG_VALID_FMASK GENMASK(4, 4) 73 #define IPA_STATUS_SRC_IDX_FMASK GENMASK(4, 0) 74 #define IPA_STATUS_DST_IDX_FMASK GENMASK(4, 0) 75 #define IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK GENMASK(31, 22) 76 #define IPA_STATUS_FLAGS2_TAG_FMASK GENMASK_ULL(63, 16) 77 78 #ifdef IPA_VALIDATE 79 80 static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count, 81 const struct ipa_gsi_endpoint_data *all_data, 82 const struct ipa_gsi_endpoint_data *data) 83 { 84 const struct ipa_gsi_endpoint_data *other_data; 85 struct device *dev = &ipa->pdev->dev; 86 enum ipa_endpoint_name other_name; 87 88 if (ipa_gsi_endpoint_data_empty(data)) 89 return true; 90 91 /* IPA v4.5+ uses checksum offload, not yet supported by RMNet */ 92 if (ipa->version >= IPA_VERSION_4_5) 93 if (data->endpoint.config.checksum) 94 return false; 95 96 if (!data->toward_ipa) { 97 if (data->endpoint.filter_support) { 98 dev_err(dev, "filtering not supported for " 99 "RX endpoint %u\n", 100 data->endpoint_id); 101 return false; 102 } 103 104 return true; /* Nothing more to check for RX */ 105 } 106 107 if (data->endpoint.config.status_enable) { 108 other_name = data->endpoint.config.tx.status_endpoint; 109 if (other_name >= count) { 110 dev_err(dev, "status endpoint name %u out of range " 111 "for endpoint %u\n", 112 other_name, data->endpoint_id); 113 return false; 114 } 115 116 /* Status endpoint must be defined... */ 117 other_data = &all_data[other_name]; 118 if (ipa_gsi_endpoint_data_empty(other_data)) { 119 dev_err(dev, "DMA endpoint name %u undefined " 120 "for endpoint %u\n", 121 other_name, data->endpoint_id); 122 return false; 123 } 124 125 /* ...and has to be an RX endpoint... */ 126 if (other_data->toward_ipa) { 127 dev_err(dev, 128 "status endpoint for endpoint %u not RX\n", 129 data->endpoint_id); 130 return false; 131 } 132 133 /* ...and if it's to be an AP endpoint... */ 134 if (other_data->ee_id == GSI_EE_AP) { 135 /* ...make sure it has status enabled. */ 136 if (!other_data->endpoint.config.status_enable) { 137 dev_err(dev, 138 "status not enabled for endpoint %u\n", 139 other_data->endpoint_id); 140 return false; 141 } 142 } 143 } 144 145 if (data->endpoint.config.dma_mode) { 146 other_name = data->endpoint.config.dma_endpoint; 147 if (other_name >= count) { 148 dev_err(dev, "DMA endpoint name %u out of range " 149 "for endpoint %u\n", 150 other_name, data->endpoint_id); 151 return false; 152 } 153 154 other_data = &all_data[other_name]; 155 if (ipa_gsi_endpoint_data_empty(other_data)) { 156 dev_err(dev, "DMA endpoint name %u undefined " 157 "for endpoint %u\n", 158 other_name, data->endpoint_id); 159 return false; 160 } 161 } 162 163 return true; 164 } 165 166 static u32 aggr_byte_limit_max(enum ipa_version version) 167 { 168 if (version < IPA_VERSION_4_5) 169 return field_max(aggr_byte_limit_fmask(true)); 170 171 return field_max(aggr_byte_limit_fmask(false)); 172 } 173 174 static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count, 175 const struct ipa_gsi_endpoint_data *data) 176 { 177 const struct ipa_gsi_endpoint_data *dp = data; 178 struct device *dev = &ipa->pdev->dev; 179 enum ipa_endpoint_name name; 180 u32 limit; 181 182 if (count > IPA_ENDPOINT_COUNT) { 183 dev_err(dev, "too many endpoints specified (%u > %u)\n", 184 count, IPA_ENDPOINT_COUNT); 185 return false; 186 } 187 188 /* The aggregation byte limit defines the point at which an 189 * aggregation window will close. It is programmed into the 190 * IPA hardware as a number of KB. We don't use "hard byte 191 * limit" aggregation, which means that we need to supply 192 * enough space in a receive buffer to hold a complete MTU 193 * plus normal skb overhead *after* that aggregation byte 194 * limit has been crossed. 195 * 196 * This check ensures we don't define a receive buffer size 197 * that would exceed what we can represent in the field that 198 * is used to program its size. 199 */ 200 limit = aggr_byte_limit_max(ipa->version) * SZ_1K; 201 limit += IPA_MTU + IPA_RX_BUFFER_OVERHEAD; 202 if (limit < IPA_RX_BUFFER_SIZE) { 203 dev_err(dev, "buffer size too big for aggregation (%u > %u)\n", 204 IPA_RX_BUFFER_SIZE, limit); 205 return false; 206 } 207 208 /* Make sure needed endpoints have defined data */ 209 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_COMMAND_TX])) { 210 dev_err(dev, "command TX endpoint not defined\n"); 211 return false; 212 } 213 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_LAN_RX])) { 214 dev_err(dev, "LAN RX endpoint not defined\n"); 215 return false; 216 } 217 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_TX])) { 218 dev_err(dev, "AP->modem TX endpoint not defined\n"); 219 return false; 220 } 221 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_RX])) { 222 dev_err(dev, "AP<-modem RX endpoint not defined\n"); 223 return false; 224 } 225 226 for (name = 0; name < count; name++, dp++) 227 if (!ipa_endpoint_data_valid_one(ipa, count, data, dp)) 228 return false; 229 230 return true; 231 } 232 233 #else /* !IPA_VALIDATE */ 234 235 static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count, 236 const struct ipa_gsi_endpoint_data *data) 237 { 238 const struct ipa_gsi_endpoint_data *dp = data; 239 enum ipa_endpoint_name name; 240 241 if (ipa->version < IPA_VERSION_4_5) 242 return true; 243 244 /* IPA v4.5+ uses checksum offload, not yet supported by RMNet */ 245 for (name = 0; name < count; name++, dp++) 246 if (data->endpoint.config.checksum) 247 return false; 248 249 return true; 250 } 251 252 #endif /* !IPA_VALIDATE */ 253 254 /* Allocate a transaction to use on a non-command endpoint */ 255 static struct gsi_trans *ipa_endpoint_trans_alloc(struct ipa_endpoint *endpoint, 256 u32 tre_count) 257 { 258 struct gsi *gsi = &endpoint->ipa->gsi; 259 u32 channel_id = endpoint->channel_id; 260 enum dma_data_direction direction; 261 262 direction = endpoint->toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE; 263 264 return gsi_channel_trans_alloc(gsi, channel_id, tre_count, direction); 265 } 266 267 /* suspend_delay represents suspend for RX, delay for TX endpoints. 268 * Note that suspend is not supported starting with IPA v4.0. 269 */ 270 static bool 271 ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay) 272 { 273 u32 offset = IPA_REG_ENDP_INIT_CTRL_N_OFFSET(endpoint->endpoint_id); 274 struct ipa *ipa = endpoint->ipa; 275 bool state; 276 u32 mask; 277 u32 val; 278 279 /* Suspend is not supported for IPA v4.0+. Delay doesn't work 280 * correctly on IPA v4.2. 281 * 282 * if (endpoint->toward_ipa) 283 * assert(ipa->version != IPA_VERSION_4.2); 284 * else 285 * assert(ipa->version < IPA_VERSION_4_0); 286 */ 287 mask = endpoint->toward_ipa ? ENDP_DELAY_FMASK : ENDP_SUSPEND_FMASK; 288 289 val = ioread32(ipa->reg_virt + offset); 290 /* Don't bother if it's already in the requested state */ 291 state = !!(val & mask); 292 if (suspend_delay != state) { 293 val ^= mask; 294 iowrite32(val, ipa->reg_virt + offset); 295 } 296 297 return state; 298 } 299 300 /* We currently don't care what the previous state was for delay mode */ 301 static void 302 ipa_endpoint_program_delay(struct ipa_endpoint *endpoint, bool enable) 303 { 304 /* assert(endpoint->toward_ipa); */ 305 306 /* Delay mode doesn't work properly for IPA v4.2 */ 307 if (endpoint->ipa->version != IPA_VERSION_4_2) 308 (void)ipa_endpoint_init_ctrl(endpoint, enable); 309 } 310 311 static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint) 312 { 313 u32 mask = BIT(endpoint->endpoint_id); 314 struct ipa *ipa = endpoint->ipa; 315 u32 offset; 316 u32 val; 317 318 /* assert(mask & ipa->available); */ 319 offset = ipa_reg_state_aggr_active_offset(ipa->version); 320 val = ioread32(ipa->reg_virt + offset); 321 322 return !!(val & mask); 323 } 324 325 static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint) 326 { 327 u32 mask = BIT(endpoint->endpoint_id); 328 struct ipa *ipa = endpoint->ipa; 329 330 /* assert(mask & ipa->available); */ 331 iowrite32(mask, ipa->reg_virt + IPA_REG_AGGR_FORCE_CLOSE_OFFSET); 332 } 333 334 /** 335 * ipa_endpoint_suspend_aggr() - Emulate suspend interrupt 336 * @endpoint: Endpoint on which to emulate a suspend 337 * 338 * Emulate suspend IPA interrupt to unsuspend an endpoint suspended 339 * with an open aggregation frame. This is to work around a hardware 340 * issue in IPA version 3.5.1 where the suspend interrupt will not be 341 * generated when it should be. 342 */ 343 static void ipa_endpoint_suspend_aggr(struct ipa_endpoint *endpoint) 344 { 345 struct ipa *ipa = endpoint->ipa; 346 347 if (!endpoint->data->aggregation) 348 return; 349 350 /* Nothing to do if the endpoint doesn't have aggregation open */ 351 if (!ipa_endpoint_aggr_active(endpoint)) 352 return; 353 354 /* Force close aggregation */ 355 ipa_endpoint_force_close(endpoint); 356 357 ipa_interrupt_simulate_suspend(ipa->interrupt); 358 } 359 360 /* Returns previous suspend state (true means suspend was enabled) */ 361 static bool 362 ipa_endpoint_program_suspend(struct ipa_endpoint *endpoint, bool enable) 363 { 364 bool suspended; 365 366 if (endpoint->ipa->version >= IPA_VERSION_4_0) 367 return enable; /* For IPA v4.0+, no change made */ 368 369 /* assert(!endpoint->toward_ipa); */ 370 371 suspended = ipa_endpoint_init_ctrl(endpoint, enable); 372 373 /* A client suspended with an open aggregation frame will not 374 * generate a SUSPEND IPA interrupt. If enabling suspend, have 375 * ipa_endpoint_suspend_aggr() handle this. 376 */ 377 if (enable && !suspended) 378 ipa_endpoint_suspend_aggr(endpoint); 379 380 return suspended; 381 } 382 383 /* Enable or disable delay or suspend mode on all modem endpoints */ 384 void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable) 385 { 386 u32 endpoint_id; 387 388 /* DELAY mode doesn't work correctly on IPA v4.2 */ 389 if (ipa->version == IPA_VERSION_4_2) 390 return; 391 392 for (endpoint_id = 0; endpoint_id < IPA_ENDPOINT_MAX; endpoint_id++) { 393 struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id]; 394 395 if (endpoint->ee_id != GSI_EE_MODEM) 396 continue; 397 398 /* Set TX delay mode or RX suspend mode */ 399 if (endpoint->toward_ipa) 400 ipa_endpoint_program_delay(endpoint, enable); 401 else 402 (void)ipa_endpoint_program_suspend(endpoint, enable); 403 } 404 } 405 406 /* Reset all modem endpoints to use the default exception endpoint */ 407 int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa) 408 { 409 u32 initialized = ipa->initialized; 410 struct gsi_trans *trans; 411 u32 count; 412 413 /* We need one command per modem TX endpoint. We can get an upper 414 * bound on that by assuming all initialized endpoints are modem->IPA. 415 * That won't happen, and we could be more precise, but this is fine 416 * for now. End the transaction with commands to clear the pipeline. 417 */ 418 count = hweight32(initialized) + ipa_cmd_pipeline_clear_count(); 419 trans = ipa_cmd_trans_alloc(ipa, count); 420 if (!trans) { 421 dev_err(&ipa->pdev->dev, 422 "no transaction to reset modem exception endpoints\n"); 423 return -EBUSY; 424 } 425 426 while (initialized) { 427 u32 endpoint_id = __ffs(initialized); 428 struct ipa_endpoint *endpoint; 429 u32 offset; 430 431 initialized ^= BIT(endpoint_id); 432 433 /* We only reset modem TX endpoints */ 434 endpoint = &ipa->endpoint[endpoint_id]; 435 if (!(endpoint->ee_id == GSI_EE_MODEM && endpoint->toward_ipa)) 436 continue; 437 438 offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id); 439 440 /* Value written is 0, and all bits are updated. That 441 * means status is disabled on the endpoint, and as a 442 * result all other fields in the register are ignored. 443 */ 444 ipa_cmd_register_write_add(trans, offset, 0, ~0, false); 445 } 446 447 ipa_cmd_pipeline_clear_add(trans); 448 449 /* XXX This should have a 1 second timeout */ 450 gsi_trans_commit_wait(trans); 451 452 ipa_cmd_pipeline_clear_wait(ipa); 453 454 return 0; 455 } 456 457 static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint) 458 { 459 u32 offset = IPA_REG_ENDP_INIT_CFG_N_OFFSET(endpoint->endpoint_id); 460 u32 val = 0; 461 462 /* FRAG_OFFLOAD_EN is 0 */ 463 if (endpoint->data->checksum) { 464 if (endpoint->toward_ipa) { 465 u32 checksum_offset; 466 467 val |= u32_encode_bits(IPA_CS_OFFLOAD_UL, 468 CS_OFFLOAD_EN_FMASK); 469 /* Checksum header offset is in 4-byte units */ 470 checksum_offset = sizeof(struct rmnet_map_header); 471 checksum_offset /= sizeof(u32); 472 val |= u32_encode_bits(checksum_offset, 473 CS_METADATA_HDR_OFFSET_FMASK); 474 } else { 475 val |= u32_encode_bits(IPA_CS_OFFLOAD_DL, 476 CS_OFFLOAD_EN_FMASK); 477 } 478 } else { 479 val |= u32_encode_bits(IPA_CS_OFFLOAD_NONE, 480 CS_OFFLOAD_EN_FMASK); 481 } 482 /* CS_GEN_QMB_MASTER_SEL is 0 */ 483 484 iowrite32(val, endpoint->ipa->reg_virt + offset); 485 } 486 487 static void ipa_endpoint_init_nat(struct ipa_endpoint *endpoint) 488 { 489 u32 offset; 490 u32 val; 491 492 if (!endpoint->toward_ipa) 493 return; 494 495 offset = IPA_REG_ENDP_INIT_NAT_N_OFFSET(endpoint->endpoint_id); 496 val = u32_encode_bits(IPA_NAT_BYPASS, NAT_EN_FMASK); 497 498 iowrite32(val, endpoint->ipa->reg_virt + offset); 499 } 500 501 /** 502 * ipa_endpoint_init_hdr() - Initialize HDR endpoint configuration register 503 * @endpoint: Endpoint pointer 504 * 505 * We program QMAP endpoints so each packet received is preceded by a QMAP 506 * header structure. The QMAP header contains a 1-byte mux_id and 2-byte 507 * packet size field, and we have the IPA hardware populate both for each 508 * received packet. The header is configured (in the HDR_EXT register) 509 * to use big endian format. 510 * 511 * The packet size is written into the QMAP header's pkt_len field. That 512 * location is defined here using the HDR_OFST_PKT_SIZE field. 513 * 514 * The mux_id comes from a 4-byte metadata value supplied with each packet 515 * by the modem. It is *not* a QMAP header, but it does contain the mux_id 516 * value that we want, in its low-order byte. A bitmask defined in the 517 * endpoint's METADATA_MASK register defines which byte within the modem 518 * metadata contains the mux_id. And the OFST_METADATA field programmed 519 * here indicates where the extracted byte should be placed within the QMAP 520 * header. 521 */ 522 static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint) 523 { 524 u32 offset = IPA_REG_ENDP_INIT_HDR_N_OFFSET(endpoint->endpoint_id); 525 struct ipa *ipa = endpoint->ipa; 526 u32 val = 0; 527 528 if (endpoint->data->qmap) { 529 size_t header_size = sizeof(struct rmnet_map_header); 530 enum ipa_version version = ipa->version; 531 532 /* We might supply a checksum header after the QMAP header */ 533 if (endpoint->toward_ipa && endpoint->data->checksum) 534 header_size += sizeof(struct rmnet_map_ul_csum_header); 535 val |= ipa_header_size_encoded(version, header_size); 536 537 /* Define how to fill fields in a received QMAP header */ 538 if (!endpoint->toward_ipa) { 539 u32 offset; /* Field offset within header */ 540 541 /* Where IPA will write the metadata value */ 542 offset = offsetof(struct rmnet_map_header, mux_id); 543 val |= ipa_metadata_offset_encoded(version, offset); 544 545 /* Where IPA will write the length */ 546 offset = offsetof(struct rmnet_map_header, pkt_len); 547 /* Upper bits are stored in HDR_EXT with IPA v4.5 */ 548 if (version >= IPA_VERSION_4_5) 549 offset &= field_mask(HDR_OFST_PKT_SIZE_FMASK); 550 551 val |= HDR_OFST_PKT_SIZE_VALID_FMASK; 552 val |= u32_encode_bits(offset, HDR_OFST_PKT_SIZE_FMASK); 553 } 554 /* For QMAP TX, metadata offset is 0 (modem assumes this) */ 555 val |= HDR_OFST_METADATA_VALID_FMASK; 556 557 /* HDR_ADDITIONAL_CONST_LEN is 0; (RX only) */ 558 /* HDR_A5_MUX is 0 */ 559 /* HDR_LEN_INC_DEAGG_HDR is 0 */ 560 /* HDR_METADATA_REG_VALID is 0 (TX only, version < v4.5) */ 561 } 562 563 iowrite32(val, ipa->reg_virt + offset); 564 } 565 566 static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint) 567 { 568 u32 offset = IPA_REG_ENDP_INIT_HDR_EXT_N_OFFSET(endpoint->endpoint_id); 569 u32 pad_align = endpoint->data->rx.pad_align; 570 struct ipa *ipa = endpoint->ipa; 571 u32 val = 0; 572 573 val |= HDR_ENDIANNESS_FMASK; /* big endian */ 574 575 /* A QMAP header contains a 6 bit pad field at offset 0. The RMNet 576 * driver assumes this field is meaningful in packets it receives, 577 * and assumes the header's payload length includes that padding. 578 * The RMNet driver does *not* pad packets it sends, however, so 579 * the pad field (although 0) should be ignored. 580 */ 581 if (endpoint->data->qmap && !endpoint->toward_ipa) { 582 val |= HDR_TOTAL_LEN_OR_PAD_VALID_FMASK; 583 /* HDR_TOTAL_LEN_OR_PAD is 0 (pad, not total_len) */ 584 val |= HDR_PAYLOAD_LEN_INC_PADDING_FMASK; 585 /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0 */ 586 } 587 588 /* HDR_PAYLOAD_LEN_INC_PADDING is 0 */ 589 if (!endpoint->toward_ipa) 590 val |= u32_encode_bits(pad_align, HDR_PAD_TO_ALIGNMENT_FMASK); 591 592 /* IPA v4.5 adds some most-significant bits to a few fields, 593 * two of which are defined in the HDR (not HDR_EXT) register. 594 */ 595 if (ipa->version >= IPA_VERSION_4_5) { 596 /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0, so MSB is 0 */ 597 if (endpoint->data->qmap && !endpoint->toward_ipa) { 598 u32 offset; 599 600 offset = offsetof(struct rmnet_map_header, pkt_len); 601 offset >>= hweight32(HDR_OFST_PKT_SIZE_FMASK); 602 val |= u32_encode_bits(offset, 603 HDR_OFST_PKT_SIZE_MSB_FMASK); 604 /* HDR_ADDITIONAL_CONST_LEN is 0 so MSB is 0 */ 605 } 606 } 607 iowrite32(val, ipa->reg_virt + offset); 608 } 609 610 static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint) 611 { 612 u32 endpoint_id = endpoint->endpoint_id; 613 u32 val = 0; 614 u32 offset; 615 616 if (endpoint->toward_ipa) 617 return; /* Register not valid for TX endpoints */ 618 619 offset = IPA_REG_ENDP_INIT_HDR_METADATA_MASK_N_OFFSET(endpoint_id); 620 621 /* Note that HDR_ENDIANNESS indicates big endian header fields */ 622 if (endpoint->data->qmap) 623 val = (__force u32)cpu_to_be32(IPA_ENDPOINT_QMAP_METADATA_MASK); 624 625 iowrite32(val, endpoint->ipa->reg_virt + offset); 626 } 627 628 static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint) 629 { 630 u32 offset = IPA_REG_ENDP_INIT_MODE_N_OFFSET(endpoint->endpoint_id); 631 u32 val; 632 633 if (!endpoint->toward_ipa) 634 return; /* Register not valid for RX endpoints */ 635 636 if (endpoint->data->dma_mode) { 637 enum ipa_endpoint_name name = endpoint->data->dma_endpoint; 638 u32 dma_endpoint_id; 639 640 dma_endpoint_id = endpoint->ipa->name_map[name]->endpoint_id; 641 642 val = u32_encode_bits(IPA_DMA, MODE_FMASK); 643 val |= u32_encode_bits(dma_endpoint_id, DEST_PIPE_INDEX_FMASK); 644 } else { 645 val = u32_encode_bits(IPA_BASIC, MODE_FMASK); 646 } 647 /* All other bits unspecified (and 0) */ 648 649 iowrite32(val, endpoint->ipa->reg_virt + offset); 650 } 651 652 /* Compute the aggregation size value to use for a given buffer size */ 653 static u32 ipa_aggr_size_kb(u32 rx_buffer_size) 654 { 655 /* We don't use "hard byte limit" aggregation, so we define the 656 * aggregation limit such that our buffer has enough space *after* 657 * that limit to receive a full MTU of data, plus overhead. 658 */ 659 rx_buffer_size -= IPA_MTU + IPA_RX_BUFFER_OVERHEAD; 660 661 return rx_buffer_size / SZ_1K; 662 } 663 664 /* Encoded values for AGGR endpoint register fields */ 665 static u32 aggr_byte_limit_encoded(enum ipa_version version, u32 limit) 666 { 667 if (version < IPA_VERSION_4_5) 668 return u32_encode_bits(limit, aggr_byte_limit_fmask(true)); 669 670 return u32_encode_bits(limit, aggr_byte_limit_fmask(false)); 671 } 672 673 /* Encode the aggregation timer limit (microseconds) based on IPA version */ 674 static u32 aggr_time_limit_encoded(enum ipa_version version, u32 limit) 675 { 676 u32 gran_sel; 677 u32 fmask; 678 u32 val; 679 680 if (version < IPA_VERSION_4_5) { 681 /* We set aggregation granularity in ipa_hardware_config() */ 682 limit = DIV_ROUND_CLOSEST(limit, IPA_AGGR_GRANULARITY); 683 684 return u32_encode_bits(limit, aggr_time_limit_fmask(true)); 685 } 686 687 /* IPA v4.5 expresses the time limit using Qtime. The AP has 688 * pulse generators 0 and 1 available, which were configured 689 * in ipa_qtime_config() to have granularity 100 usec and 690 * 1 msec, respectively. Use pulse generator 0 if possible, 691 * otherwise fall back to pulse generator 1. 692 */ 693 fmask = aggr_time_limit_fmask(false); 694 val = DIV_ROUND_CLOSEST(limit, 100); 695 if (val > field_max(fmask)) { 696 /* Have to use pulse generator 1 (millisecond granularity) */ 697 gran_sel = AGGR_GRAN_SEL_FMASK; 698 val = DIV_ROUND_CLOSEST(limit, 1000); 699 } else { 700 /* We can use pulse generator 0 (100 usec granularity) */ 701 gran_sel = 0; 702 } 703 704 return gran_sel | u32_encode_bits(val, fmask); 705 } 706 707 static u32 aggr_sw_eof_active_encoded(enum ipa_version version, bool enabled) 708 { 709 u32 val = enabled ? 1 : 0; 710 711 if (version < IPA_VERSION_4_5) 712 return u32_encode_bits(val, aggr_sw_eof_active_fmask(true)); 713 714 return u32_encode_bits(val, aggr_sw_eof_active_fmask(false)); 715 } 716 717 static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint) 718 { 719 u32 offset = IPA_REG_ENDP_INIT_AGGR_N_OFFSET(endpoint->endpoint_id); 720 enum ipa_version version = endpoint->ipa->version; 721 u32 val = 0; 722 723 if (endpoint->data->aggregation) { 724 if (!endpoint->toward_ipa) { 725 bool close_eof; 726 u32 limit; 727 728 val |= u32_encode_bits(IPA_ENABLE_AGGR, AGGR_EN_FMASK); 729 val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK); 730 731 limit = ipa_aggr_size_kb(IPA_RX_BUFFER_SIZE); 732 val |= aggr_byte_limit_encoded(version, limit); 733 734 limit = IPA_AGGR_TIME_LIMIT; 735 val |= aggr_time_limit_encoded(version, limit); 736 737 /* AGGR_PKT_LIMIT is 0 (unlimited) */ 738 739 close_eof = endpoint->data->rx.aggr_close_eof; 740 val |= aggr_sw_eof_active_encoded(version, close_eof); 741 742 /* AGGR_HARD_BYTE_LIMIT_ENABLE is 0 */ 743 } else { 744 val |= u32_encode_bits(IPA_ENABLE_DEAGGR, 745 AGGR_EN_FMASK); 746 val |= u32_encode_bits(IPA_QCMAP, AGGR_TYPE_FMASK); 747 /* other fields ignored */ 748 } 749 /* AGGR_FORCE_CLOSE is 0 */ 750 /* AGGR_GRAN_SEL is 0 for IPA v4.5 */ 751 } else { 752 val |= u32_encode_bits(IPA_BYPASS_AGGR, AGGR_EN_FMASK); 753 /* other fields ignored */ 754 } 755 756 iowrite32(val, endpoint->ipa->reg_virt + offset); 757 } 758 759 /* Return the Qtime-based head-of-line blocking timer value that 760 * represents the given number of microseconds. The result 761 * includes both the timer value and the selected timer granularity. 762 */ 763 static u32 hol_block_timer_qtime_val(struct ipa *ipa, u32 microseconds) 764 { 765 u32 gran_sel; 766 u32 val; 767 768 /* IPA v4.5 expresses time limits using Qtime. The AP has 769 * pulse generators 0 and 1 available, which were configured 770 * in ipa_qtime_config() to have granularity 100 usec and 771 * 1 msec, respectively. Use pulse generator 0 if possible, 772 * otherwise fall back to pulse generator 1. 773 */ 774 val = DIV_ROUND_CLOSEST(microseconds, 100); 775 if (val > field_max(TIME_LIMIT_FMASK)) { 776 /* Have to use pulse generator 1 (millisecond granularity) */ 777 gran_sel = GRAN_SEL_FMASK; 778 val = DIV_ROUND_CLOSEST(microseconds, 1000); 779 } else { 780 /* We can use pulse generator 0 (100 usec granularity) */ 781 gran_sel = 0; 782 } 783 784 return gran_sel | u32_encode_bits(val, TIME_LIMIT_FMASK); 785 } 786 787 /* The head-of-line blocking timer is defined as a tick count. For 788 * IPA version 4.5 the tick count is based on the Qtimer, which is 789 * derived from the 19.2 MHz SoC XO clock. For older IPA versions 790 * each tick represents 128 cycles of the IPA core clock. 791 * 792 * Return the encoded value that should be written to that register 793 * that represents the timeout period provided. For IPA v4.2 this 794 * encodes a base and scale value, while for earlier versions the 795 * value is a simple tick count. 796 */ 797 static u32 hol_block_timer_val(struct ipa *ipa, u32 microseconds) 798 { 799 u32 width; 800 u32 scale; 801 u64 ticks; 802 u64 rate; 803 u32 high; 804 u32 val; 805 806 if (!microseconds) 807 return 0; /* Nothing to compute if timer period is 0 */ 808 809 if (ipa->version >= IPA_VERSION_4_5) 810 return hol_block_timer_qtime_val(ipa, microseconds); 811 812 /* Use 64 bit arithmetic to avoid overflow... */ 813 rate = ipa_clock_rate(ipa); 814 ticks = DIV_ROUND_CLOSEST(microseconds * rate, 128 * USEC_PER_SEC); 815 /* ...but we still need to fit into a 32-bit register */ 816 WARN_ON(ticks > U32_MAX); 817 818 /* IPA v3.5.1 through v4.1 just record the tick count */ 819 if (ipa->version < IPA_VERSION_4_2) 820 return (u32)ticks; 821 822 /* For IPA v4.2, the tick count is represented by base and 823 * scale fields within the 32-bit timer register, where: 824 * ticks = base << scale; 825 * The best precision is achieved when the base value is as 826 * large as possible. Find the highest set bit in the tick 827 * count, and extract the number of bits in the base field 828 * such that high bit is included. 829 */ 830 high = fls(ticks); /* 1..32 */ 831 width = HWEIGHT32(BASE_VALUE_FMASK); 832 scale = high > width ? high - width : 0; 833 if (scale) { 834 /* If we're scaling, round up to get a closer result */ 835 ticks += 1 << (scale - 1); 836 /* High bit was set, so rounding might have affected it */ 837 if (fls(ticks) != high) 838 scale++; 839 } 840 841 val = u32_encode_bits(scale, SCALE_FMASK); 842 val |= u32_encode_bits(ticks >> scale, BASE_VALUE_FMASK); 843 844 return val; 845 } 846 847 /* If microseconds is 0, timeout is immediate */ 848 static void ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint, 849 u32 microseconds) 850 { 851 u32 endpoint_id = endpoint->endpoint_id; 852 struct ipa *ipa = endpoint->ipa; 853 u32 offset; 854 u32 val; 855 856 offset = IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(endpoint_id); 857 val = hol_block_timer_val(ipa, microseconds); 858 iowrite32(val, ipa->reg_virt + offset); 859 } 860 861 static void 862 ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint, bool enable) 863 { 864 u32 endpoint_id = endpoint->endpoint_id; 865 u32 offset; 866 u32 val; 867 868 val = enable ? HOL_BLOCK_EN_FMASK : 0; 869 offset = IPA_REG_ENDP_INIT_HOL_BLOCK_EN_N_OFFSET(endpoint_id); 870 iowrite32(val, endpoint->ipa->reg_virt + offset); 871 } 872 873 void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa) 874 { 875 u32 i; 876 877 for (i = 0; i < IPA_ENDPOINT_MAX; i++) { 878 struct ipa_endpoint *endpoint = &ipa->endpoint[i]; 879 880 if (endpoint->toward_ipa || endpoint->ee_id != GSI_EE_MODEM) 881 continue; 882 883 ipa_endpoint_init_hol_block_timer(endpoint, 0); 884 ipa_endpoint_init_hol_block_enable(endpoint, true); 885 } 886 } 887 888 static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint) 889 { 890 u32 offset = IPA_REG_ENDP_INIT_DEAGGR_N_OFFSET(endpoint->endpoint_id); 891 u32 val = 0; 892 893 if (!endpoint->toward_ipa) 894 return; /* Register not valid for RX endpoints */ 895 896 /* DEAGGR_HDR_LEN is 0 */ 897 /* PACKET_OFFSET_VALID is 0 */ 898 /* PACKET_OFFSET_LOCATION is ignored (not valid) */ 899 /* MAX_PACKET_LEN is 0 (not enforced) */ 900 901 iowrite32(val, endpoint->ipa->reg_virt + offset); 902 } 903 904 static void ipa_endpoint_init_rsrc_grp(struct ipa_endpoint *endpoint) 905 { 906 u32 offset = IPA_REG_ENDP_INIT_RSRC_GRP_N_OFFSET(endpoint->endpoint_id); 907 struct ipa *ipa = endpoint->ipa; 908 u32 val; 909 910 val = rsrc_grp_encoded(ipa->version, endpoint->data->resource_group); 911 iowrite32(val, ipa->reg_virt + offset); 912 } 913 914 static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint) 915 { 916 u32 offset = IPA_REG_ENDP_INIT_SEQ_N_OFFSET(endpoint->endpoint_id); 917 u32 val = 0; 918 919 if (!endpoint->toward_ipa) 920 return; /* Register not valid for RX endpoints */ 921 922 /* Low-order byte configures primary packet processing */ 923 val |= u32_encode_bits(endpoint->data->tx.seq_type, SEQ_TYPE_FMASK); 924 925 /* Second byte configures replicated packet processing */ 926 val |= u32_encode_bits(endpoint->data->tx.seq_rep_type, 927 SEQ_REP_TYPE_FMASK); 928 929 iowrite32(val, endpoint->ipa->reg_virt + offset); 930 } 931 932 /** 933 * ipa_endpoint_skb_tx() - Transmit a socket buffer 934 * @endpoint: Endpoint pointer 935 * @skb: Socket buffer to send 936 * 937 * Returns: 0 if successful, or a negative error code 938 */ 939 int ipa_endpoint_skb_tx(struct ipa_endpoint *endpoint, struct sk_buff *skb) 940 { 941 struct gsi_trans *trans; 942 u32 nr_frags; 943 int ret; 944 945 /* Make sure source endpoint's TLV FIFO has enough entries to 946 * hold the linear portion of the skb and all its fragments. 947 * If not, see if we can linearize it before giving up. 948 */ 949 nr_frags = skb_shinfo(skb)->nr_frags; 950 if (1 + nr_frags > endpoint->trans_tre_max) { 951 if (skb_linearize(skb)) 952 return -E2BIG; 953 nr_frags = 0; 954 } 955 956 trans = ipa_endpoint_trans_alloc(endpoint, 1 + nr_frags); 957 if (!trans) 958 return -EBUSY; 959 960 ret = gsi_trans_skb_add(trans, skb); 961 if (ret) 962 goto err_trans_free; 963 trans->data = skb; /* transaction owns skb now */ 964 965 gsi_trans_commit(trans, !netdev_xmit_more()); 966 967 return 0; 968 969 err_trans_free: 970 gsi_trans_free(trans); 971 972 return -ENOMEM; 973 } 974 975 static void ipa_endpoint_status(struct ipa_endpoint *endpoint) 976 { 977 u32 endpoint_id = endpoint->endpoint_id; 978 struct ipa *ipa = endpoint->ipa; 979 u32 val = 0; 980 u32 offset; 981 982 offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id); 983 984 if (endpoint->data->status_enable) { 985 val |= STATUS_EN_FMASK; 986 if (endpoint->toward_ipa) { 987 enum ipa_endpoint_name name; 988 u32 status_endpoint_id; 989 990 name = endpoint->data->tx.status_endpoint; 991 status_endpoint_id = ipa->name_map[name]->endpoint_id; 992 993 val |= u32_encode_bits(status_endpoint_id, 994 STATUS_ENDP_FMASK); 995 } 996 /* STATUS_LOCATION is 0, meaning status element precedes 997 * packet (not present for IPA v4.5) 998 */ 999 /* STATUS_PKT_SUPPRESS_FMASK is 0 (not present for v3.5.1) */ 1000 } 1001 1002 iowrite32(val, ipa->reg_virt + offset); 1003 } 1004 1005 static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint) 1006 { 1007 struct gsi_trans *trans; 1008 bool doorbell = false; 1009 struct page *page; 1010 u32 offset; 1011 u32 len; 1012 int ret; 1013 1014 page = dev_alloc_pages(get_order(IPA_RX_BUFFER_SIZE)); 1015 if (!page) 1016 return -ENOMEM; 1017 1018 trans = ipa_endpoint_trans_alloc(endpoint, 1); 1019 if (!trans) 1020 goto err_free_pages; 1021 1022 /* Offset the buffer to make space for skb headroom */ 1023 offset = NET_SKB_PAD; 1024 len = IPA_RX_BUFFER_SIZE - offset; 1025 1026 ret = gsi_trans_page_add(trans, page, len, offset); 1027 if (ret) 1028 goto err_trans_free; 1029 trans->data = page; /* transaction owns page now */ 1030 1031 if (++endpoint->replenish_ready == IPA_REPLENISH_BATCH) { 1032 doorbell = true; 1033 endpoint->replenish_ready = 0; 1034 } 1035 1036 gsi_trans_commit(trans, doorbell); 1037 1038 return 0; 1039 1040 err_trans_free: 1041 gsi_trans_free(trans); 1042 err_free_pages: 1043 __free_pages(page, get_order(IPA_RX_BUFFER_SIZE)); 1044 1045 return -ENOMEM; 1046 } 1047 1048 /** 1049 * ipa_endpoint_replenish() - Replenish endpoint receive buffers 1050 * @endpoint: Endpoint to be replenished 1051 * @add_one: Whether this is replacing a just-consumed buffer 1052 * 1053 * The IPA hardware can hold a fixed number of receive buffers for an RX 1054 * endpoint, based on the number of entries in the underlying channel ring 1055 * buffer. If an endpoint's "backlog" is non-zero, it indicates how many 1056 * more receive buffers can be supplied to the hardware. Replenishing for 1057 * an endpoint can be disabled, in which case requests to replenish a 1058 * buffer are "saved", and transferred to the backlog once it is re-enabled 1059 * again. 1060 */ 1061 static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint, bool add_one) 1062 { 1063 struct gsi *gsi; 1064 u32 backlog; 1065 1066 if (!endpoint->replenish_enabled) { 1067 if (add_one) 1068 atomic_inc(&endpoint->replenish_saved); 1069 return; 1070 } 1071 1072 while (atomic_dec_not_zero(&endpoint->replenish_backlog)) 1073 if (ipa_endpoint_replenish_one(endpoint)) 1074 goto try_again_later; 1075 if (add_one) 1076 atomic_inc(&endpoint->replenish_backlog); 1077 1078 return; 1079 1080 try_again_later: 1081 /* The last one didn't succeed, so fix the backlog */ 1082 backlog = atomic_inc_return(&endpoint->replenish_backlog); 1083 1084 if (add_one) 1085 atomic_inc(&endpoint->replenish_backlog); 1086 1087 /* Whenever a receive buffer transaction completes we'll try to 1088 * replenish again. It's unlikely, but if we fail to supply even 1089 * one buffer, nothing will trigger another replenish attempt. 1090 * Receive buffer transactions use one TRE, so schedule work to 1091 * try replenishing again if our backlog is *all* available TREs. 1092 */ 1093 gsi = &endpoint->ipa->gsi; 1094 if (backlog == gsi_channel_tre_max(gsi, endpoint->channel_id)) 1095 schedule_delayed_work(&endpoint->replenish_work, 1096 msecs_to_jiffies(1)); 1097 } 1098 1099 static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint) 1100 { 1101 struct gsi *gsi = &endpoint->ipa->gsi; 1102 u32 max_backlog; 1103 u32 saved; 1104 1105 endpoint->replenish_enabled = true; 1106 while ((saved = atomic_xchg(&endpoint->replenish_saved, 0))) 1107 atomic_add(saved, &endpoint->replenish_backlog); 1108 1109 /* Start replenishing if hardware currently has no buffers */ 1110 max_backlog = gsi_channel_tre_max(gsi, endpoint->channel_id); 1111 if (atomic_read(&endpoint->replenish_backlog) == max_backlog) 1112 ipa_endpoint_replenish(endpoint, false); 1113 } 1114 1115 static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint) 1116 { 1117 u32 backlog; 1118 1119 endpoint->replenish_enabled = false; 1120 while ((backlog = atomic_xchg(&endpoint->replenish_backlog, 0))) 1121 atomic_add(backlog, &endpoint->replenish_saved); 1122 } 1123 1124 static void ipa_endpoint_replenish_work(struct work_struct *work) 1125 { 1126 struct delayed_work *dwork = to_delayed_work(work); 1127 struct ipa_endpoint *endpoint; 1128 1129 endpoint = container_of(dwork, struct ipa_endpoint, replenish_work); 1130 1131 ipa_endpoint_replenish(endpoint, false); 1132 } 1133 1134 static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint, 1135 void *data, u32 len, u32 extra) 1136 { 1137 struct sk_buff *skb; 1138 1139 skb = __dev_alloc_skb(len, GFP_ATOMIC); 1140 if (skb) { 1141 skb_put(skb, len); 1142 memcpy(skb->data, data, len); 1143 skb->truesize += extra; 1144 } 1145 1146 /* Now receive it, or drop it if there's no netdev */ 1147 if (endpoint->netdev) 1148 ipa_modem_skb_rx(endpoint->netdev, skb); 1149 else if (skb) 1150 dev_kfree_skb_any(skb); 1151 } 1152 1153 static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint, 1154 struct page *page, u32 len) 1155 { 1156 struct sk_buff *skb; 1157 1158 /* Nothing to do if there's no netdev */ 1159 if (!endpoint->netdev) 1160 return false; 1161 1162 /* assert(len <= SKB_WITH_OVERHEAD(IPA_RX_BUFFER_SIZE-NET_SKB_PAD)); */ 1163 skb = build_skb(page_address(page), IPA_RX_BUFFER_SIZE); 1164 if (skb) { 1165 /* Reserve the headroom and account for the data */ 1166 skb_reserve(skb, NET_SKB_PAD); 1167 skb_put(skb, len); 1168 } 1169 1170 /* Receive the buffer (or record drop if unable to build it) */ 1171 ipa_modem_skb_rx(endpoint->netdev, skb); 1172 1173 return skb != NULL; 1174 } 1175 1176 /* The format of a packet status element is the same for several status 1177 * types (opcodes). Other types aren't currently supported. 1178 */ 1179 static bool ipa_status_format_packet(enum ipa_status_opcode opcode) 1180 { 1181 switch (opcode) { 1182 case IPA_STATUS_OPCODE_PACKET: 1183 case IPA_STATUS_OPCODE_DROPPED_PACKET: 1184 case IPA_STATUS_OPCODE_SUSPENDED_PACKET: 1185 case IPA_STATUS_OPCODE_PACKET_2ND_PASS: 1186 return true; 1187 default: 1188 return false; 1189 } 1190 } 1191 1192 static bool ipa_endpoint_status_skip(struct ipa_endpoint *endpoint, 1193 const struct ipa_status *status) 1194 { 1195 u32 endpoint_id; 1196 1197 if (!ipa_status_format_packet(status->opcode)) 1198 return true; 1199 if (!status->pkt_len) 1200 return true; 1201 endpoint_id = u8_get_bits(status->endp_dst_idx, 1202 IPA_STATUS_DST_IDX_FMASK); 1203 if (endpoint_id != endpoint->endpoint_id) 1204 return true; 1205 1206 return false; /* Don't skip this packet, process it */ 1207 } 1208 1209 static bool ipa_endpoint_status_tag(struct ipa_endpoint *endpoint, 1210 const struct ipa_status *status) 1211 { 1212 struct ipa_endpoint *command_endpoint; 1213 struct ipa *ipa = endpoint->ipa; 1214 u32 endpoint_id; 1215 1216 if (!le16_get_bits(status->mask, IPA_STATUS_MASK_TAG_VALID_FMASK)) 1217 return false; /* No valid tag */ 1218 1219 /* The status contains a valid tag. We know the packet was sent to 1220 * this endpoint (already verified by ipa_endpoint_status_skip()). 1221 * If the packet came from the AP->command TX endpoint we know 1222 * this packet was sent as part of the pipeline clear process. 1223 */ 1224 endpoint_id = u8_get_bits(status->endp_src_idx, 1225 IPA_STATUS_SRC_IDX_FMASK); 1226 command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]; 1227 if (endpoint_id == command_endpoint->endpoint_id) { 1228 complete(&ipa->completion); 1229 } else { 1230 dev_err(&ipa->pdev->dev, 1231 "unexpected tagged packet from endpoint %u\n", 1232 endpoint_id); 1233 } 1234 1235 return true; 1236 } 1237 1238 /* Return whether the status indicates the packet should be dropped */ 1239 static bool ipa_endpoint_status_drop(struct ipa_endpoint *endpoint, 1240 const struct ipa_status *status) 1241 { 1242 u32 val; 1243 1244 /* If the status indicates a tagged transfer, we'll drop the packet */ 1245 if (ipa_endpoint_status_tag(endpoint, status)) 1246 return true; 1247 1248 /* Deaggregation exceptions we drop; all other types we consume */ 1249 if (status->exception) 1250 return status->exception == IPA_STATUS_EXCEPTION_DEAGGR; 1251 1252 /* Drop the packet if it fails to match a routing rule; otherwise no */ 1253 val = le32_get_bits(status->flags1, IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK); 1254 1255 return val == field_max(IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK); 1256 } 1257 1258 static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint, 1259 struct page *page, u32 total_len) 1260 { 1261 void *data = page_address(page) + NET_SKB_PAD; 1262 u32 unused = IPA_RX_BUFFER_SIZE - total_len; 1263 u32 resid = total_len; 1264 1265 while (resid) { 1266 const struct ipa_status *status = data; 1267 u32 align; 1268 u32 len; 1269 1270 if (resid < sizeof(*status)) { 1271 dev_err(&endpoint->ipa->pdev->dev, 1272 "short message (%u bytes < %zu byte status)\n", 1273 resid, sizeof(*status)); 1274 break; 1275 } 1276 1277 /* Skip over status packets that lack packet data */ 1278 if (ipa_endpoint_status_skip(endpoint, status)) { 1279 data += sizeof(*status); 1280 resid -= sizeof(*status); 1281 continue; 1282 } 1283 1284 /* Compute the amount of buffer space consumed by the packet, 1285 * including the status element. If the hardware is configured 1286 * to pad packet data to an aligned boundary, account for that. 1287 * And if checksum offload is enabled a trailer containing 1288 * computed checksum information will be appended. 1289 */ 1290 align = endpoint->data->rx.pad_align ? : 1; 1291 len = le16_to_cpu(status->pkt_len); 1292 len = sizeof(*status) + ALIGN(len, align); 1293 if (endpoint->data->checksum) 1294 len += sizeof(struct rmnet_map_dl_csum_trailer); 1295 1296 if (!ipa_endpoint_status_drop(endpoint, status)) { 1297 void *data2; 1298 u32 extra; 1299 u32 len2; 1300 1301 /* Client receives only packet data (no status) */ 1302 data2 = data + sizeof(*status); 1303 len2 = le16_to_cpu(status->pkt_len); 1304 1305 /* Have the true size reflect the extra unused space in 1306 * the original receive buffer. Distribute the "cost" 1307 * proportionately across all aggregated packets in the 1308 * buffer. 1309 */ 1310 extra = DIV_ROUND_CLOSEST(unused * len, total_len); 1311 ipa_endpoint_skb_copy(endpoint, data2, len2, extra); 1312 } 1313 1314 /* Consume status and the full packet it describes */ 1315 data += len; 1316 resid -= len; 1317 } 1318 } 1319 1320 /* Complete a TX transaction, command or from ipa_endpoint_skb_tx() */ 1321 static void ipa_endpoint_tx_complete(struct ipa_endpoint *endpoint, 1322 struct gsi_trans *trans) 1323 { 1324 } 1325 1326 /* Complete transaction initiated in ipa_endpoint_replenish_one() */ 1327 static void ipa_endpoint_rx_complete(struct ipa_endpoint *endpoint, 1328 struct gsi_trans *trans) 1329 { 1330 struct page *page; 1331 1332 ipa_endpoint_replenish(endpoint, true); 1333 1334 if (trans->cancelled) 1335 return; 1336 1337 /* Parse or build a socket buffer using the actual received length */ 1338 page = trans->data; 1339 if (endpoint->data->status_enable) 1340 ipa_endpoint_status_parse(endpoint, page, trans->len); 1341 else if (ipa_endpoint_skb_build(endpoint, page, trans->len)) 1342 trans->data = NULL; /* Pages have been consumed */ 1343 } 1344 1345 void ipa_endpoint_trans_complete(struct ipa_endpoint *endpoint, 1346 struct gsi_trans *trans) 1347 { 1348 if (endpoint->toward_ipa) 1349 ipa_endpoint_tx_complete(endpoint, trans); 1350 else 1351 ipa_endpoint_rx_complete(endpoint, trans); 1352 } 1353 1354 void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint, 1355 struct gsi_trans *trans) 1356 { 1357 if (endpoint->toward_ipa) { 1358 struct ipa *ipa = endpoint->ipa; 1359 1360 /* Nothing to do for command transactions */ 1361 if (endpoint != ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]) { 1362 struct sk_buff *skb = trans->data; 1363 1364 if (skb) 1365 dev_kfree_skb_any(skb); 1366 } 1367 } else { 1368 struct page *page = trans->data; 1369 1370 if (page) 1371 __free_pages(page, get_order(IPA_RX_BUFFER_SIZE)); 1372 } 1373 } 1374 1375 void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id) 1376 { 1377 u32 val; 1378 1379 /* ROUTE_DIS is 0 */ 1380 val = u32_encode_bits(endpoint_id, ROUTE_DEF_PIPE_FMASK); 1381 val |= ROUTE_DEF_HDR_TABLE_FMASK; 1382 val |= u32_encode_bits(0, ROUTE_DEF_HDR_OFST_FMASK); 1383 val |= u32_encode_bits(endpoint_id, ROUTE_FRAG_DEF_PIPE_FMASK); 1384 val |= ROUTE_DEF_RETAIN_HDR_FMASK; 1385 1386 iowrite32(val, ipa->reg_virt + IPA_REG_ROUTE_OFFSET); 1387 } 1388 1389 void ipa_endpoint_default_route_clear(struct ipa *ipa) 1390 { 1391 ipa_endpoint_default_route_set(ipa, 0); 1392 } 1393 1394 /** 1395 * ipa_endpoint_reset_rx_aggr() - Reset RX endpoint with aggregation active 1396 * @endpoint: Endpoint to be reset 1397 * 1398 * If aggregation is active on an RX endpoint when a reset is performed 1399 * on its underlying GSI channel, a special sequence of actions must be 1400 * taken to ensure the IPA pipeline is properly cleared. 1401 * 1402 * Return: 0 if successful, or a negative error code 1403 */ 1404 static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint) 1405 { 1406 struct device *dev = &endpoint->ipa->pdev->dev; 1407 struct ipa *ipa = endpoint->ipa; 1408 struct gsi *gsi = &ipa->gsi; 1409 bool suspended = false; 1410 dma_addr_t addr; 1411 u32 retries; 1412 u32 len = 1; 1413 void *virt; 1414 int ret; 1415 1416 virt = kzalloc(len, GFP_KERNEL); 1417 if (!virt) 1418 return -ENOMEM; 1419 1420 addr = dma_map_single(dev, virt, len, DMA_FROM_DEVICE); 1421 if (dma_mapping_error(dev, addr)) { 1422 ret = -ENOMEM; 1423 goto out_kfree; 1424 } 1425 1426 /* Force close aggregation before issuing the reset */ 1427 ipa_endpoint_force_close(endpoint); 1428 1429 /* Reset and reconfigure the channel with the doorbell engine 1430 * disabled. Then poll until we know aggregation is no longer 1431 * active. We'll re-enable the doorbell (if appropriate) when 1432 * we reset again below. 1433 */ 1434 gsi_channel_reset(gsi, endpoint->channel_id, false); 1435 1436 /* Make sure the channel isn't suspended */ 1437 suspended = ipa_endpoint_program_suspend(endpoint, false); 1438 1439 /* Start channel and do a 1 byte read */ 1440 ret = gsi_channel_start(gsi, endpoint->channel_id); 1441 if (ret) 1442 goto out_suspend_again; 1443 1444 ret = gsi_trans_read_byte(gsi, endpoint->channel_id, addr); 1445 if (ret) 1446 goto err_endpoint_stop; 1447 1448 /* Wait for aggregation to be closed on the channel */ 1449 retries = IPA_ENDPOINT_RESET_AGGR_RETRY_MAX; 1450 do { 1451 if (!ipa_endpoint_aggr_active(endpoint)) 1452 break; 1453 usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC); 1454 } while (retries--); 1455 1456 /* Check one last time */ 1457 if (ipa_endpoint_aggr_active(endpoint)) 1458 dev_err(dev, "endpoint %u still active during reset\n", 1459 endpoint->endpoint_id); 1460 1461 gsi_trans_read_byte_done(gsi, endpoint->channel_id); 1462 1463 ret = gsi_channel_stop(gsi, endpoint->channel_id); 1464 if (ret) 1465 goto out_suspend_again; 1466 1467 /* Finally, reset and reconfigure the channel again (re-enabling 1468 * the doorbell engine if appropriate). Sleep for 1 millisecond to 1469 * complete the channel reset sequence. Finish by suspending the 1470 * channel again (if necessary). 1471 */ 1472 gsi_channel_reset(gsi, endpoint->channel_id, true); 1473 1474 usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC); 1475 1476 goto out_suspend_again; 1477 1478 err_endpoint_stop: 1479 (void)gsi_channel_stop(gsi, endpoint->channel_id); 1480 out_suspend_again: 1481 if (suspended) 1482 (void)ipa_endpoint_program_suspend(endpoint, true); 1483 dma_unmap_single(dev, addr, len, DMA_FROM_DEVICE); 1484 out_kfree: 1485 kfree(virt); 1486 1487 return ret; 1488 } 1489 1490 static void ipa_endpoint_reset(struct ipa_endpoint *endpoint) 1491 { 1492 u32 channel_id = endpoint->channel_id; 1493 struct ipa *ipa = endpoint->ipa; 1494 bool special; 1495 int ret = 0; 1496 1497 /* On IPA v3.5.1, if an RX endpoint is reset while aggregation 1498 * is active, we need to handle things specially to recover. 1499 * All other cases just need to reset the underlying GSI channel. 1500 */ 1501 special = ipa->version < IPA_VERSION_4_0 && !endpoint->toward_ipa && 1502 endpoint->data->aggregation; 1503 if (special && ipa_endpoint_aggr_active(endpoint)) 1504 ret = ipa_endpoint_reset_rx_aggr(endpoint); 1505 else 1506 gsi_channel_reset(&ipa->gsi, channel_id, true); 1507 1508 if (ret) 1509 dev_err(&ipa->pdev->dev, 1510 "error %d resetting channel %u for endpoint %u\n", 1511 ret, endpoint->channel_id, endpoint->endpoint_id); 1512 } 1513 1514 static void ipa_endpoint_program(struct ipa_endpoint *endpoint) 1515 { 1516 if (endpoint->toward_ipa) 1517 ipa_endpoint_program_delay(endpoint, false); 1518 else 1519 (void)ipa_endpoint_program_suspend(endpoint, false); 1520 ipa_endpoint_init_cfg(endpoint); 1521 ipa_endpoint_init_nat(endpoint); 1522 ipa_endpoint_init_hdr(endpoint); 1523 ipa_endpoint_init_hdr_ext(endpoint); 1524 ipa_endpoint_init_hdr_metadata_mask(endpoint); 1525 ipa_endpoint_init_mode(endpoint); 1526 ipa_endpoint_init_aggr(endpoint); 1527 ipa_endpoint_init_deaggr(endpoint); 1528 ipa_endpoint_init_rsrc_grp(endpoint); 1529 ipa_endpoint_init_seq(endpoint); 1530 ipa_endpoint_status(endpoint); 1531 } 1532 1533 int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint) 1534 { 1535 struct ipa *ipa = endpoint->ipa; 1536 struct gsi *gsi = &ipa->gsi; 1537 int ret; 1538 1539 ret = gsi_channel_start(gsi, endpoint->channel_id); 1540 if (ret) { 1541 dev_err(&ipa->pdev->dev, 1542 "error %d starting %cX channel %u for endpoint %u\n", 1543 ret, endpoint->toward_ipa ? 'T' : 'R', 1544 endpoint->channel_id, endpoint->endpoint_id); 1545 return ret; 1546 } 1547 1548 if (!endpoint->toward_ipa) { 1549 ipa_interrupt_suspend_enable(ipa->interrupt, 1550 endpoint->endpoint_id); 1551 ipa_endpoint_replenish_enable(endpoint); 1552 } 1553 1554 ipa->enabled |= BIT(endpoint->endpoint_id); 1555 1556 return 0; 1557 } 1558 1559 void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint) 1560 { 1561 u32 mask = BIT(endpoint->endpoint_id); 1562 struct ipa *ipa = endpoint->ipa; 1563 struct gsi *gsi = &ipa->gsi; 1564 int ret; 1565 1566 if (!(ipa->enabled & mask)) 1567 return; 1568 1569 ipa->enabled ^= mask; 1570 1571 if (!endpoint->toward_ipa) { 1572 ipa_endpoint_replenish_disable(endpoint); 1573 ipa_interrupt_suspend_disable(ipa->interrupt, 1574 endpoint->endpoint_id); 1575 } 1576 1577 /* Note that if stop fails, the channel's state is not well-defined */ 1578 ret = gsi_channel_stop(gsi, endpoint->channel_id); 1579 if (ret) 1580 dev_err(&ipa->pdev->dev, 1581 "error %d attempting to stop endpoint %u\n", ret, 1582 endpoint->endpoint_id); 1583 } 1584 1585 void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint) 1586 { 1587 struct device *dev = &endpoint->ipa->pdev->dev; 1588 struct gsi *gsi = &endpoint->ipa->gsi; 1589 bool stop_channel; 1590 int ret; 1591 1592 if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id))) 1593 return; 1594 1595 if (!endpoint->toward_ipa) { 1596 ipa_endpoint_replenish_disable(endpoint); 1597 (void)ipa_endpoint_program_suspend(endpoint, true); 1598 } 1599 1600 /* Starting with IPA v4.0, endpoints are suspended by stopping the 1601 * underlying GSI channel rather than using endpoint suspend mode. 1602 */ 1603 stop_channel = endpoint->ipa->version >= IPA_VERSION_4_0; 1604 ret = gsi_channel_suspend(gsi, endpoint->channel_id, stop_channel); 1605 if (ret) 1606 dev_err(dev, "error %d suspending channel %u\n", ret, 1607 endpoint->channel_id); 1608 } 1609 1610 void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint) 1611 { 1612 struct device *dev = &endpoint->ipa->pdev->dev; 1613 struct gsi *gsi = &endpoint->ipa->gsi; 1614 bool start_channel; 1615 int ret; 1616 1617 if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id))) 1618 return; 1619 1620 if (!endpoint->toward_ipa) 1621 (void)ipa_endpoint_program_suspend(endpoint, false); 1622 1623 /* Starting with IPA v4.0, the underlying GSI channel must be 1624 * restarted for resume. 1625 */ 1626 start_channel = endpoint->ipa->version >= IPA_VERSION_4_0; 1627 ret = gsi_channel_resume(gsi, endpoint->channel_id, start_channel); 1628 if (ret) 1629 dev_err(dev, "error %d resuming channel %u\n", ret, 1630 endpoint->channel_id); 1631 else if (!endpoint->toward_ipa) 1632 ipa_endpoint_replenish_enable(endpoint); 1633 } 1634 1635 void ipa_endpoint_suspend(struct ipa *ipa) 1636 { 1637 if (!ipa->setup_complete) 1638 return; 1639 1640 if (ipa->modem_netdev) 1641 ipa_modem_suspend(ipa->modem_netdev); 1642 1643 ipa_cmd_pipeline_clear(ipa); 1644 1645 ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]); 1646 ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]); 1647 } 1648 1649 void ipa_endpoint_resume(struct ipa *ipa) 1650 { 1651 if (!ipa->setup_complete) 1652 return; 1653 1654 ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]); 1655 ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]); 1656 1657 if (ipa->modem_netdev) 1658 ipa_modem_resume(ipa->modem_netdev); 1659 } 1660 1661 static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint) 1662 { 1663 struct gsi *gsi = &endpoint->ipa->gsi; 1664 u32 channel_id = endpoint->channel_id; 1665 1666 /* Only AP endpoints get set up */ 1667 if (endpoint->ee_id != GSI_EE_AP) 1668 return; 1669 1670 endpoint->trans_tre_max = gsi_channel_trans_tre_max(gsi, channel_id); 1671 if (!endpoint->toward_ipa) { 1672 /* RX transactions require a single TRE, so the maximum 1673 * backlog is the same as the maximum outstanding TREs. 1674 */ 1675 endpoint->replenish_enabled = false; 1676 atomic_set(&endpoint->replenish_saved, 1677 gsi_channel_tre_max(gsi, endpoint->channel_id)); 1678 atomic_set(&endpoint->replenish_backlog, 0); 1679 INIT_DELAYED_WORK(&endpoint->replenish_work, 1680 ipa_endpoint_replenish_work); 1681 } 1682 1683 ipa_endpoint_program(endpoint); 1684 1685 endpoint->ipa->set_up |= BIT(endpoint->endpoint_id); 1686 } 1687 1688 static void ipa_endpoint_teardown_one(struct ipa_endpoint *endpoint) 1689 { 1690 endpoint->ipa->set_up &= ~BIT(endpoint->endpoint_id); 1691 1692 if (!endpoint->toward_ipa) 1693 cancel_delayed_work_sync(&endpoint->replenish_work); 1694 1695 ipa_endpoint_reset(endpoint); 1696 } 1697 1698 void ipa_endpoint_setup(struct ipa *ipa) 1699 { 1700 u32 initialized = ipa->initialized; 1701 1702 ipa->set_up = 0; 1703 while (initialized) { 1704 u32 endpoint_id = __ffs(initialized); 1705 1706 initialized ^= BIT(endpoint_id); 1707 1708 ipa_endpoint_setup_one(&ipa->endpoint[endpoint_id]); 1709 } 1710 } 1711 1712 void ipa_endpoint_teardown(struct ipa *ipa) 1713 { 1714 u32 set_up = ipa->set_up; 1715 1716 while (set_up) { 1717 u32 endpoint_id = __fls(set_up); 1718 1719 set_up ^= BIT(endpoint_id); 1720 1721 ipa_endpoint_teardown_one(&ipa->endpoint[endpoint_id]); 1722 } 1723 ipa->set_up = 0; 1724 } 1725 1726 int ipa_endpoint_config(struct ipa *ipa) 1727 { 1728 struct device *dev = &ipa->pdev->dev; 1729 u32 initialized; 1730 u32 rx_base; 1731 u32 rx_mask; 1732 u32 tx_mask; 1733 int ret = 0; 1734 u32 max; 1735 u32 val; 1736 1737 /* Find out about the endpoints supplied by the hardware, and ensure 1738 * the highest one doesn't exceed the number we support. 1739 */ 1740 val = ioread32(ipa->reg_virt + IPA_REG_FLAVOR_0_OFFSET); 1741 1742 /* Our RX is an IPA producer */ 1743 rx_base = u32_get_bits(val, IPA_PROD_LOWEST_FMASK); 1744 max = rx_base + u32_get_bits(val, IPA_MAX_PROD_PIPES_FMASK); 1745 if (max > IPA_ENDPOINT_MAX) { 1746 dev_err(dev, "too many endpoints (%u > %u)\n", 1747 max, IPA_ENDPOINT_MAX); 1748 return -EINVAL; 1749 } 1750 rx_mask = GENMASK(max - 1, rx_base); 1751 1752 /* Our TX is an IPA consumer */ 1753 max = u32_get_bits(val, IPA_MAX_CONS_PIPES_FMASK); 1754 tx_mask = GENMASK(max - 1, 0); 1755 1756 ipa->available = rx_mask | tx_mask; 1757 1758 /* Check for initialized endpoints not supported by the hardware */ 1759 if (ipa->initialized & ~ipa->available) { 1760 dev_err(dev, "unavailable endpoint id(s) 0x%08x\n", 1761 ipa->initialized & ~ipa->available); 1762 ret = -EINVAL; /* Report other errors too */ 1763 } 1764 1765 initialized = ipa->initialized; 1766 while (initialized) { 1767 u32 endpoint_id = __ffs(initialized); 1768 struct ipa_endpoint *endpoint; 1769 1770 initialized ^= BIT(endpoint_id); 1771 1772 /* Make sure it's pointing in the right direction */ 1773 endpoint = &ipa->endpoint[endpoint_id]; 1774 if ((endpoint_id < rx_base) != endpoint->toward_ipa) { 1775 dev_err(dev, "endpoint id %u wrong direction\n", 1776 endpoint_id); 1777 ret = -EINVAL; 1778 } 1779 } 1780 1781 return ret; 1782 } 1783 1784 void ipa_endpoint_deconfig(struct ipa *ipa) 1785 { 1786 ipa->available = 0; /* Nothing more to do */ 1787 } 1788 1789 static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name, 1790 const struct ipa_gsi_endpoint_data *data) 1791 { 1792 struct ipa_endpoint *endpoint; 1793 1794 endpoint = &ipa->endpoint[data->endpoint_id]; 1795 1796 if (data->ee_id == GSI_EE_AP) 1797 ipa->channel_map[data->channel_id] = endpoint; 1798 ipa->name_map[name] = endpoint; 1799 1800 endpoint->ipa = ipa; 1801 endpoint->ee_id = data->ee_id; 1802 endpoint->channel_id = data->channel_id; 1803 endpoint->endpoint_id = data->endpoint_id; 1804 endpoint->toward_ipa = data->toward_ipa; 1805 endpoint->data = &data->endpoint.config; 1806 1807 ipa->initialized |= BIT(endpoint->endpoint_id); 1808 } 1809 1810 static void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint) 1811 { 1812 endpoint->ipa->initialized &= ~BIT(endpoint->endpoint_id); 1813 1814 memset(endpoint, 0, sizeof(*endpoint)); 1815 } 1816 1817 void ipa_endpoint_exit(struct ipa *ipa) 1818 { 1819 u32 initialized = ipa->initialized; 1820 1821 while (initialized) { 1822 u32 endpoint_id = __fls(initialized); 1823 1824 initialized ^= BIT(endpoint_id); 1825 1826 ipa_endpoint_exit_one(&ipa->endpoint[endpoint_id]); 1827 } 1828 memset(ipa->name_map, 0, sizeof(ipa->name_map)); 1829 memset(ipa->channel_map, 0, sizeof(ipa->channel_map)); 1830 } 1831 1832 /* Returns a bitmask of endpoints that support filtering, or 0 on error */ 1833 u32 ipa_endpoint_init(struct ipa *ipa, u32 count, 1834 const struct ipa_gsi_endpoint_data *data) 1835 { 1836 enum ipa_endpoint_name name; 1837 u32 filter_map; 1838 1839 if (!ipa_endpoint_data_valid(ipa, count, data)) 1840 return 0; /* Error */ 1841 1842 ipa->initialized = 0; 1843 1844 filter_map = 0; 1845 for (name = 0; name < count; name++, data++) { 1846 if (ipa_gsi_endpoint_data_empty(data)) 1847 continue; /* Skip over empty slots */ 1848 1849 ipa_endpoint_init_one(ipa, name, data); 1850 1851 if (data->endpoint.filter_support) 1852 filter_map |= BIT(data->endpoint_id); 1853 } 1854 1855 if (!ipa_filter_map_valid(ipa, filter_map)) 1856 goto err_endpoint_exit; 1857 1858 return filter_map; /* Non-zero bitmask */ 1859 1860 err_endpoint_exit: 1861 ipa_endpoint_exit(ipa); 1862 1863 return 0; /* Error */ 1864 } 1865