1 // SPDX-License-Identifier: GPL-2.0 2 3 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. 4 * Copyright (C) 2019-2020 Linaro Ltd. 5 */ 6 7 #include <linux/types.h> 8 #include <linux/device.h> 9 #include <linux/slab.h> 10 #include <linux/bitfield.h> 11 #include <linux/if_rmnet.h> 12 #include <linux/dma-direction.h> 13 14 #include "gsi.h" 15 #include "gsi_trans.h" 16 #include "ipa.h" 17 #include "ipa_data.h" 18 #include "ipa_endpoint.h" 19 #include "ipa_cmd.h" 20 #include "ipa_mem.h" 21 #include "ipa_modem.h" 22 #include "ipa_table.h" 23 #include "ipa_gsi.h" 24 #include "ipa_clock.h" 25 26 #define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0) 27 28 #define IPA_REPLENISH_BATCH 16 29 30 /* RX buffer is 1 page (or a power-of-2 contiguous pages) */ 31 #define IPA_RX_BUFFER_SIZE 8192 /* PAGE_SIZE > 4096 wastes a LOT */ 32 33 /* The amount of RX buffer space consumed by standard skb overhead */ 34 #define IPA_RX_BUFFER_OVERHEAD (PAGE_SIZE - SKB_MAX_ORDER(NET_SKB_PAD, 0)) 35 36 /* Where to find the QMAP mux_id for a packet within modem-supplied metadata */ 37 #define IPA_ENDPOINT_QMAP_METADATA_MASK 0x000000ff /* host byte order */ 38 39 #define IPA_ENDPOINT_RESET_AGGR_RETRY_MAX 3 40 #define IPA_AGGR_TIME_LIMIT_DEFAULT 500 /* microseconds */ 41 42 /** enum ipa_status_opcode - status element opcode hardware values */ 43 enum ipa_status_opcode { 44 IPA_STATUS_OPCODE_PACKET = 0x01, 45 IPA_STATUS_OPCODE_NEW_FRAG_RULE = 0x02, 46 IPA_STATUS_OPCODE_DROPPED_PACKET = 0x04, 47 IPA_STATUS_OPCODE_SUSPENDED_PACKET = 0x08, 48 IPA_STATUS_OPCODE_LOG = 0x10, 49 IPA_STATUS_OPCODE_DCMP = 0x20, 50 IPA_STATUS_OPCODE_PACKET_2ND_PASS = 0x40, 51 }; 52 53 /** enum ipa_status_exception - status element exception type */ 54 enum ipa_status_exception { 55 /* 0 means no exception */ 56 IPA_STATUS_EXCEPTION_DEAGGR = 0x01, 57 IPA_STATUS_EXCEPTION_IPTYPE = 0x04, 58 IPA_STATUS_EXCEPTION_PACKET_LENGTH = 0x08, 59 IPA_STATUS_EXCEPTION_FRAG_RULE_MISS = 0x10, 60 IPA_STATUS_EXCEPTION_SW_FILT = 0x20, 61 /* The meaning of the next value depends on whether the IP version */ 62 IPA_STATUS_EXCEPTION_NAT = 0x40, /* IPv4 */ 63 IPA_STATUS_EXCEPTION_IPV6CT = IPA_STATUS_EXCEPTION_NAT, 64 }; 65 66 /* Status element provided by hardware */ 67 struct ipa_status { 68 u8 opcode; /* enum ipa_status_opcode */ 69 u8 exception; /* enum ipa_status_exception */ 70 __le16 mask; 71 __le16 pkt_len; 72 u8 endp_src_idx; 73 u8 endp_dst_idx; 74 __le32 metadata; 75 __le32 flags1; 76 __le64 flags2; 77 __le32 flags3; 78 __le32 flags4; 79 }; 80 81 /* Field masks for struct ipa_status structure fields */ 82 83 #define IPA_STATUS_SRC_IDX_FMASK GENMASK(4, 0) 84 85 #define IPA_STATUS_DST_IDX_FMASK GENMASK(4, 0) 86 87 #define IPA_STATUS_FLAGS1_FLT_LOCAL_FMASK GENMASK(0, 0) 88 #define IPA_STATUS_FLAGS1_FLT_HASH_FMASK GENMASK(1, 1) 89 #define IPA_STATUS_FLAGS1_FLT_GLOBAL_FMASK GENMASK(2, 2) 90 #define IPA_STATUS_FLAGS1_FLT_RET_HDR_FMASK GENMASK(3, 3) 91 #define IPA_STATUS_FLAGS1_FLT_RULE_ID_FMASK GENMASK(13, 4) 92 #define IPA_STATUS_FLAGS1_RT_LOCAL_FMASK GENMASK(14, 14) 93 #define IPA_STATUS_FLAGS1_RT_HASH_FMASK GENMASK(15, 15) 94 #define IPA_STATUS_FLAGS1_UCP_FMASK GENMASK(16, 16) 95 #define IPA_STATUS_FLAGS1_RT_TBL_IDX_FMASK GENMASK(21, 17) 96 #define IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK GENMASK(31, 22) 97 98 #define IPA_STATUS_FLAGS2_NAT_HIT_FMASK GENMASK_ULL(0, 0) 99 #define IPA_STATUS_FLAGS2_NAT_ENTRY_IDX_FMASK GENMASK_ULL(13, 1) 100 #define IPA_STATUS_FLAGS2_NAT_TYPE_FMASK GENMASK_ULL(15, 14) 101 #define IPA_STATUS_FLAGS2_TAG_INFO_FMASK GENMASK_ULL(63, 16) 102 103 #define IPA_STATUS_FLAGS3_SEQ_NUM_FMASK GENMASK(7, 0) 104 #define IPA_STATUS_FLAGS3_TOD_CTR_FMASK GENMASK(31, 8) 105 106 #define IPA_STATUS_FLAGS4_HDR_LOCAL_FMASK GENMASK(0, 0) 107 #define IPA_STATUS_FLAGS4_HDR_OFFSET_FMASK GENMASK(10, 1) 108 #define IPA_STATUS_FLAGS4_FRAG_HIT_FMASK GENMASK(11, 11) 109 #define IPA_STATUS_FLAGS4_FRAG_RULE_FMASK GENMASK(15, 12) 110 #define IPA_STATUS_FLAGS4_HW_SPECIFIC_FMASK GENMASK(31, 16) 111 112 #ifdef IPA_VALIDATE 113 114 static void ipa_endpoint_validate_build(void) 115 { 116 /* The aggregation byte limit defines the point at which an 117 * aggregation window will close. It is programmed into the 118 * IPA hardware as a number of KB. We don't use "hard byte 119 * limit" aggregation, which means that we need to supply 120 * enough space in a receive buffer to hold a complete MTU 121 * plus normal skb overhead *after* that aggregation byte 122 * limit has been crossed. 123 * 124 * This check just ensures we don't define a receive buffer 125 * size that would exceed what we can represent in the field 126 * that is used to program its size. 127 */ 128 BUILD_BUG_ON(IPA_RX_BUFFER_SIZE > 129 field_max(AGGR_BYTE_LIMIT_FMASK) * SZ_1K + 130 IPA_MTU + IPA_RX_BUFFER_OVERHEAD); 131 132 /* I honestly don't know where this requirement comes from. But 133 * it holds, and if we someday need to loosen the constraint we 134 * can try to track it down. 135 */ 136 BUILD_BUG_ON(sizeof(struct ipa_status) % 4); 137 } 138 139 static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count, 140 const struct ipa_gsi_endpoint_data *all_data, 141 const struct ipa_gsi_endpoint_data *data) 142 { 143 const struct ipa_gsi_endpoint_data *other_data; 144 struct device *dev = &ipa->pdev->dev; 145 enum ipa_endpoint_name other_name; 146 147 if (ipa_gsi_endpoint_data_empty(data)) 148 return true; 149 150 if (!data->toward_ipa) { 151 if (data->endpoint.filter_support) { 152 dev_err(dev, "filtering not supported for " 153 "RX endpoint %u\n", 154 data->endpoint_id); 155 return false; 156 } 157 158 return true; /* Nothing more to check for RX */ 159 } 160 161 if (data->endpoint.config.status_enable) { 162 other_name = data->endpoint.config.tx.status_endpoint; 163 if (other_name >= count) { 164 dev_err(dev, "status endpoint name %u out of range " 165 "for endpoint %u\n", 166 other_name, data->endpoint_id); 167 return false; 168 } 169 170 /* Status endpoint must be defined... */ 171 other_data = &all_data[other_name]; 172 if (ipa_gsi_endpoint_data_empty(other_data)) { 173 dev_err(dev, "DMA endpoint name %u undefined " 174 "for endpoint %u\n", 175 other_name, data->endpoint_id); 176 return false; 177 } 178 179 /* ...and has to be an RX endpoint... */ 180 if (other_data->toward_ipa) { 181 dev_err(dev, 182 "status endpoint for endpoint %u not RX\n", 183 data->endpoint_id); 184 return false; 185 } 186 187 /* ...and if it's to be an AP endpoint... */ 188 if (other_data->ee_id == GSI_EE_AP) { 189 /* ...make sure it has status enabled. */ 190 if (!other_data->endpoint.config.status_enable) { 191 dev_err(dev, 192 "status not enabled for endpoint %u\n", 193 other_data->endpoint_id); 194 return false; 195 } 196 } 197 } 198 199 if (data->endpoint.config.dma_mode) { 200 other_name = data->endpoint.config.dma_endpoint; 201 if (other_name >= count) { 202 dev_err(dev, "DMA endpoint name %u out of range " 203 "for endpoint %u\n", 204 other_name, data->endpoint_id); 205 return false; 206 } 207 208 other_data = &all_data[other_name]; 209 if (ipa_gsi_endpoint_data_empty(other_data)) { 210 dev_err(dev, "DMA endpoint name %u undefined " 211 "for endpoint %u\n", 212 other_name, data->endpoint_id); 213 return false; 214 } 215 } 216 217 return true; 218 } 219 220 static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count, 221 const struct ipa_gsi_endpoint_data *data) 222 { 223 const struct ipa_gsi_endpoint_data *dp = data; 224 struct device *dev = &ipa->pdev->dev; 225 enum ipa_endpoint_name name; 226 227 ipa_endpoint_validate_build(); 228 229 if (count > IPA_ENDPOINT_COUNT) { 230 dev_err(dev, "too many endpoints specified (%u > %u)\n", 231 count, IPA_ENDPOINT_COUNT); 232 return false; 233 } 234 235 /* Make sure needed endpoints have defined data */ 236 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_COMMAND_TX])) { 237 dev_err(dev, "command TX endpoint not defined\n"); 238 return false; 239 } 240 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_LAN_RX])) { 241 dev_err(dev, "LAN RX endpoint not defined\n"); 242 return false; 243 } 244 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_TX])) { 245 dev_err(dev, "AP->modem TX endpoint not defined\n"); 246 return false; 247 } 248 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_RX])) { 249 dev_err(dev, "AP<-modem RX endpoint not defined\n"); 250 return false; 251 } 252 253 for (name = 0; name < count; name++, dp++) 254 if (!ipa_endpoint_data_valid_one(ipa, count, data, dp)) 255 return false; 256 257 return true; 258 } 259 260 #else /* !IPA_VALIDATE */ 261 262 static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count, 263 const struct ipa_gsi_endpoint_data *data) 264 { 265 return true; 266 } 267 268 #endif /* !IPA_VALIDATE */ 269 270 /* Allocate a transaction to use on a non-command endpoint */ 271 static struct gsi_trans *ipa_endpoint_trans_alloc(struct ipa_endpoint *endpoint, 272 u32 tre_count) 273 { 274 struct gsi *gsi = &endpoint->ipa->gsi; 275 u32 channel_id = endpoint->channel_id; 276 enum dma_data_direction direction; 277 278 direction = endpoint->toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE; 279 280 return gsi_channel_trans_alloc(gsi, channel_id, tre_count, direction); 281 } 282 283 /* suspend_delay represents suspend for RX, delay for TX endpoints. 284 * Note that suspend is not supported starting with IPA v4.0. 285 */ 286 static bool 287 ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay) 288 { 289 u32 offset = IPA_REG_ENDP_INIT_CTRL_N_OFFSET(endpoint->endpoint_id); 290 struct ipa *ipa = endpoint->ipa; 291 bool state; 292 u32 mask; 293 u32 val; 294 295 /* Suspend is not supported for IPA v4.0+. Delay doesn't work 296 * correctly on IPA v4.2. 297 * 298 * if (endpoint->toward_ipa) 299 * assert(ipa->version != IPA_VERSION_4.2); 300 * else 301 * assert(ipa->version == IPA_VERSION_3_5_1); 302 */ 303 mask = endpoint->toward_ipa ? ENDP_DELAY_FMASK : ENDP_SUSPEND_FMASK; 304 305 val = ioread32(ipa->reg_virt + offset); 306 /* Don't bother if it's already in the requested state */ 307 state = !!(val & mask); 308 if (suspend_delay != state) { 309 val ^= mask; 310 iowrite32(val, ipa->reg_virt + offset); 311 } 312 313 return state; 314 } 315 316 /* We currently don't care what the previous state was for delay mode */ 317 static void 318 ipa_endpoint_program_delay(struct ipa_endpoint *endpoint, bool enable) 319 { 320 /* assert(endpoint->toward_ipa); */ 321 322 /* Delay mode doesn't work properly for IPA v4.2 */ 323 if (endpoint->ipa->version != IPA_VERSION_4_2) 324 (void)ipa_endpoint_init_ctrl(endpoint, enable); 325 } 326 327 static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint) 328 { 329 u32 mask = BIT(endpoint->endpoint_id); 330 struct ipa *ipa = endpoint->ipa; 331 u32 offset; 332 u32 val; 333 334 /* assert(mask & ipa->available); */ 335 offset = ipa_reg_state_aggr_active_offset(ipa->version); 336 val = ioread32(ipa->reg_virt + offset); 337 338 return !!(val & mask); 339 } 340 341 static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint) 342 { 343 u32 mask = BIT(endpoint->endpoint_id); 344 struct ipa *ipa = endpoint->ipa; 345 346 /* assert(mask & ipa->available); */ 347 iowrite32(mask, ipa->reg_virt + IPA_REG_AGGR_FORCE_CLOSE_OFFSET); 348 } 349 350 /** 351 * ipa_endpoint_suspend_aggr() - Emulate suspend interrupt 352 * @endpoint: Endpoint on which to emulate a suspend 353 * 354 * Emulate suspend IPA interrupt to unsuspend an endpoint suspended 355 * with an open aggregation frame. This is to work around a hardware 356 * issue in IPA version 3.5.1 where the suspend interrupt will not be 357 * generated when it should be. 358 */ 359 static void ipa_endpoint_suspend_aggr(struct ipa_endpoint *endpoint) 360 { 361 struct ipa *ipa = endpoint->ipa; 362 363 if (!endpoint->data->aggregation) 364 return; 365 366 /* Nothing to do if the endpoint doesn't have aggregation open */ 367 if (!ipa_endpoint_aggr_active(endpoint)) 368 return; 369 370 /* Force close aggregation */ 371 ipa_endpoint_force_close(endpoint); 372 373 ipa_interrupt_simulate_suspend(ipa->interrupt); 374 } 375 376 /* Returns previous suspend state (true means suspend was enabled) */ 377 static bool 378 ipa_endpoint_program_suspend(struct ipa_endpoint *endpoint, bool enable) 379 { 380 bool suspended; 381 382 if (endpoint->ipa->version != IPA_VERSION_3_5_1) 383 return enable; /* For IPA v4.0+, no change made */ 384 385 /* assert(!endpoint->toward_ipa); */ 386 387 suspended = ipa_endpoint_init_ctrl(endpoint, enable); 388 389 /* A client suspended with an open aggregation frame will not 390 * generate a SUSPEND IPA interrupt. If enabling suspend, have 391 * ipa_endpoint_suspend_aggr() handle this. 392 */ 393 if (enable && !suspended) 394 ipa_endpoint_suspend_aggr(endpoint); 395 396 return suspended; 397 } 398 399 /* Enable or disable delay or suspend mode on all modem endpoints */ 400 void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable) 401 { 402 u32 endpoint_id; 403 404 /* DELAY mode doesn't work correctly on IPA v4.2 */ 405 if (ipa->version == IPA_VERSION_4_2) 406 return; 407 408 for (endpoint_id = 0; endpoint_id < IPA_ENDPOINT_MAX; endpoint_id++) { 409 struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id]; 410 411 if (endpoint->ee_id != GSI_EE_MODEM) 412 continue; 413 414 /* Set TX delay mode or RX suspend mode */ 415 if (endpoint->toward_ipa) 416 ipa_endpoint_program_delay(endpoint, enable); 417 else 418 (void)ipa_endpoint_program_suspend(endpoint, enable); 419 } 420 } 421 422 /* Reset all modem endpoints to use the default exception endpoint */ 423 int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa) 424 { 425 u32 initialized = ipa->initialized; 426 struct gsi_trans *trans; 427 u32 count; 428 429 /* We need one command per modem TX endpoint. We can get an upper 430 * bound on that by assuming all initialized endpoints are modem->IPA. 431 * That won't happen, and we could be more precise, but this is fine 432 * for now. We need to end the transaction with a "tag process." 433 */ 434 count = hweight32(initialized) + ipa_cmd_tag_process_count(); 435 trans = ipa_cmd_trans_alloc(ipa, count); 436 if (!trans) { 437 dev_err(&ipa->pdev->dev, 438 "no transaction to reset modem exception endpoints\n"); 439 return -EBUSY; 440 } 441 442 while (initialized) { 443 u32 endpoint_id = __ffs(initialized); 444 struct ipa_endpoint *endpoint; 445 u32 offset; 446 447 initialized ^= BIT(endpoint_id); 448 449 /* We only reset modem TX endpoints */ 450 endpoint = &ipa->endpoint[endpoint_id]; 451 if (!(endpoint->ee_id == GSI_EE_MODEM && endpoint->toward_ipa)) 452 continue; 453 454 offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id); 455 456 /* Value written is 0, and all bits are updated. That 457 * means status is disabled on the endpoint, and as a 458 * result all other fields in the register are ignored. 459 */ 460 ipa_cmd_register_write_add(trans, offset, 0, ~0, false); 461 } 462 463 ipa_cmd_tag_process_add(trans); 464 465 /* XXX This should have a 1 second timeout */ 466 gsi_trans_commit_wait(trans); 467 468 return 0; 469 } 470 471 static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint) 472 { 473 u32 offset = IPA_REG_ENDP_INIT_CFG_N_OFFSET(endpoint->endpoint_id); 474 u32 val = 0; 475 476 /* FRAG_OFFLOAD_EN is 0 */ 477 if (endpoint->data->checksum) { 478 if (endpoint->toward_ipa) { 479 u32 checksum_offset; 480 481 val |= u32_encode_bits(IPA_CS_OFFLOAD_UL, 482 CS_OFFLOAD_EN_FMASK); 483 /* Checksum header offset is in 4-byte units */ 484 checksum_offset = sizeof(struct rmnet_map_header); 485 checksum_offset /= sizeof(u32); 486 val |= u32_encode_bits(checksum_offset, 487 CS_METADATA_HDR_OFFSET_FMASK); 488 } else { 489 val |= u32_encode_bits(IPA_CS_OFFLOAD_DL, 490 CS_OFFLOAD_EN_FMASK); 491 } 492 } else { 493 val |= u32_encode_bits(IPA_CS_OFFLOAD_NONE, 494 CS_OFFLOAD_EN_FMASK); 495 } 496 /* CS_GEN_QMB_MASTER_SEL is 0 */ 497 498 iowrite32(val, endpoint->ipa->reg_virt + offset); 499 } 500 501 /** 502 * ipa_endpoint_init_hdr() - Initialize HDR endpoint configuration register 503 * @endpoint: Endpoint pointer 504 * 505 * We program QMAP endpoints so each packet received is preceded by a QMAP 506 * header structure. The QMAP header contains a 1-byte mux_id and 2-byte 507 * packet size field, and we have the IPA hardware populate both for each 508 * received packet. The header is configured (in the HDR_EXT register) 509 * to use big endian format. 510 * 511 * The packet size is written into the QMAP header's pkt_len field. That 512 * location is defined here using the HDR_OFST_PKT_SIZE field. 513 * 514 * The mux_id comes from a 4-byte metadata value supplied with each packet 515 * by the modem. It is *not* a QMAP header, but it does contain the mux_id 516 * value that we want, in its low-order byte. A bitmask defined in the 517 * endpoint's METADATA_MASK register defines which byte within the modem 518 * metadata contains the mux_id. And the OFST_METADATA field programmed 519 * here indicates where the extracted byte should be placed within the QMAP 520 * header. 521 */ 522 static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint) 523 { 524 u32 offset = IPA_REG_ENDP_INIT_HDR_N_OFFSET(endpoint->endpoint_id); 525 u32 val = 0; 526 527 if (endpoint->data->qmap) { 528 size_t header_size = sizeof(struct rmnet_map_header); 529 530 /* We might supply a checksum header after the QMAP header */ 531 if (endpoint->toward_ipa && endpoint->data->checksum) 532 header_size += sizeof(struct rmnet_map_ul_csum_header); 533 val |= u32_encode_bits(header_size, HDR_LEN_FMASK); 534 535 /* Define how to fill fields in a received QMAP header */ 536 if (!endpoint->toward_ipa) { 537 u32 off; /* Field offset within header */ 538 539 /* Where IPA will write the metadata value */ 540 off = offsetof(struct rmnet_map_header, mux_id); 541 val |= u32_encode_bits(off, HDR_OFST_METADATA_FMASK); 542 543 /* Where IPA will write the length */ 544 off = offsetof(struct rmnet_map_header, pkt_len); 545 val |= HDR_OFST_PKT_SIZE_VALID_FMASK; 546 val |= u32_encode_bits(off, HDR_OFST_PKT_SIZE_FMASK); 547 } 548 /* For QMAP TX, metadata offset is 0 (modem assumes this) */ 549 val |= HDR_OFST_METADATA_VALID_FMASK; 550 551 /* HDR_ADDITIONAL_CONST_LEN is 0; (RX only) */ 552 /* HDR_A5_MUX is 0 */ 553 /* HDR_LEN_INC_DEAGG_HDR is 0 */ 554 /* HDR_METADATA_REG_VALID is 0 (TX only) */ 555 } 556 557 iowrite32(val, endpoint->ipa->reg_virt + offset); 558 } 559 560 static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint) 561 { 562 u32 offset = IPA_REG_ENDP_INIT_HDR_EXT_N_OFFSET(endpoint->endpoint_id); 563 u32 pad_align = endpoint->data->rx.pad_align; 564 u32 val = 0; 565 566 val |= HDR_ENDIANNESS_FMASK; /* big endian */ 567 568 /* A QMAP header contains a 6 bit pad field at offset 0. The RMNet 569 * driver assumes this field is meaningful in packets it receives, 570 * and assumes the header's payload length includes that padding. 571 * The RMNet driver does *not* pad packets it sends, however, so 572 * the pad field (although 0) should be ignored. 573 */ 574 if (endpoint->data->qmap && !endpoint->toward_ipa) { 575 val |= HDR_TOTAL_LEN_OR_PAD_VALID_FMASK; 576 /* HDR_TOTAL_LEN_OR_PAD is 0 (pad, not total_len) */ 577 val |= HDR_PAYLOAD_LEN_INC_PADDING_FMASK; 578 /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0 */ 579 } 580 581 /* HDR_PAYLOAD_LEN_INC_PADDING is 0 */ 582 if (!endpoint->toward_ipa) 583 val |= u32_encode_bits(pad_align, HDR_PAD_TO_ALIGNMENT_FMASK); 584 585 iowrite32(val, endpoint->ipa->reg_virt + offset); 586 } 587 588 589 static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint) 590 { 591 u32 endpoint_id = endpoint->endpoint_id; 592 u32 val = 0; 593 u32 offset; 594 595 if (endpoint->toward_ipa) 596 return; /* Register not valid for TX endpoints */ 597 598 offset = IPA_REG_ENDP_INIT_HDR_METADATA_MASK_N_OFFSET(endpoint_id); 599 600 /* Note that HDR_ENDIANNESS indicates big endian header fields */ 601 if (endpoint->data->qmap) 602 val = cpu_to_be32(IPA_ENDPOINT_QMAP_METADATA_MASK); 603 604 iowrite32(val, endpoint->ipa->reg_virt + offset); 605 } 606 607 static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint) 608 { 609 u32 offset = IPA_REG_ENDP_INIT_MODE_N_OFFSET(endpoint->endpoint_id); 610 u32 val; 611 612 if (!endpoint->toward_ipa) 613 return; /* Register not valid for RX endpoints */ 614 615 if (endpoint->data->dma_mode) { 616 enum ipa_endpoint_name name = endpoint->data->dma_endpoint; 617 u32 dma_endpoint_id; 618 619 dma_endpoint_id = endpoint->ipa->name_map[name]->endpoint_id; 620 621 val = u32_encode_bits(IPA_DMA, MODE_FMASK); 622 val |= u32_encode_bits(dma_endpoint_id, DEST_PIPE_INDEX_FMASK); 623 } else { 624 val = u32_encode_bits(IPA_BASIC, MODE_FMASK); 625 } 626 /* All other bits unspecified (and 0) */ 627 628 iowrite32(val, endpoint->ipa->reg_virt + offset); 629 } 630 631 /* Compute the aggregation size value to use for a given buffer size */ 632 static u32 ipa_aggr_size_kb(u32 rx_buffer_size) 633 { 634 /* We don't use "hard byte limit" aggregation, so we define the 635 * aggregation limit such that our buffer has enough space *after* 636 * that limit to receive a full MTU of data, plus overhead. 637 */ 638 rx_buffer_size -= IPA_MTU + IPA_RX_BUFFER_OVERHEAD; 639 640 return rx_buffer_size / SZ_1K; 641 } 642 643 static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint) 644 { 645 u32 offset = IPA_REG_ENDP_INIT_AGGR_N_OFFSET(endpoint->endpoint_id); 646 u32 val = 0; 647 648 if (endpoint->data->aggregation) { 649 if (!endpoint->toward_ipa) { 650 u32 limit; 651 652 val |= u32_encode_bits(IPA_ENABLE_AGGR, AGGR_EN_FMASK); 653 val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK); 654 655 limit = ipa_aggr_size_kb(IPA_RX_BUFFER_SIZE); 656 val |= u32_encode_bits(limit, AGGR_BYTE_LIMIT_FMASK); 657 658 limit = IPA_AGGR_TIME_LIMIT_DEFAULT; 659 limit = DIV_ROUND_CLOSEST(limit, IPA_AGGR_GRANULARITY); 660 val |= u32_encode_bits(limit, AGGR_TIME_LIMIT_FMASK); 661 662 /* AGGR_PKT_LIMIT is 0 (unlimited) */ 663 664 if (endpoint->data->rx.aggr_close_eof) 665 val |= AGGR_SW_EOF_ACTIVE_FMASK; 666 /* AGGR_HARD_BYTE_LIMIT_ENABLE is 0 */ 667 } else { 668 val |= u32_encode_bits(IPA_ENABLE_DEAGGR, 669 AGGR_EN_FMASK); 670 val |= u32_encode_bits(IPA_QCMAP, AGGR_TYPE_FMASK); 671 /* other fields ignored */ 672 } 673 /* AGGR_FORCE_CLOSE is 0 */ 674 } else { 675 val |= u32_encode_bits(IPA_BYPASS_AGGR, AGGR_EN_FMASK); 676 /* other fields ignored */ 677 } 678 679 iowrite32(val, endpoint->ipa->reg_virt + offset); 680 } 681 682 /* The head-of-line blocking timer is defined as a tick count, where each 683 * tick represents 128 cycles of the IPA core clock. Return the value 684 * that should be written to that register that represents the timeout 685 * period provided. 686 */ 687 static u32 ipa_reg_init_hol_block_timer_val(struct ipa *ipa, u32 microseconds) 688 { 689 u32 width; 690 u32 scale; 691 u64 ticks; 692 u64 rate; 693 u32 high; 694 u32 val; 695 696 if (!microseconds) 697 return 0; /* Nothing to compute if timer period is 0 */ 698 699 /* Use 64 bit arithmetic to avoid overflow... */ 700 rate = ipa_clock_rate(ipa); 701 ticks = DIV_ROUND_CLOSEST(microseconds * rate, 128 * USEC_PER_SEC); 702 /* ...but we still need to fit into a 32-bit register */ 703 WARN_ON(ticks > U32_MAX); 704 705 /* IPA v3.5.1 just records the tick count */ 706 if (ipa->version == IPA_VERSION_3_5_1) 707 return (u32)ticks; 708 709 /* For IPA v4.2, the tick count is represented by base and 710 * scale fields within the 32-bit timer register, where: 711 * ticks = base << scale; 712 * The best precision is achieved when the base value is as 713 * large as possible. Find the highest set bit in the tick 714 * count, and extract the number of bits in the base field 715 * such that that high bit is included. 716 */ 717 high = fls(ticks); /* 1..32 */ 718 width = HWEIGHT32(BASE_VALUE_FMASK); 719 scale = high > width ? high - width : 0; 720 if (scale) { 721 /* If we're scaling, round up to get a closer result */ 722 ticks += 1 << (scale - 1); 723 /* High bit was set, so rounding might have affected it */ 724 if (fls(ticks) != high) 725 scale++; 726 } 727 728 val = u32_encode_bits(scale, SCALE_FMASK); 729 val |= u32_encode_bits(ticks >> scale, BASE_VALUE_FMASK); 730 731 return val; 732 } 733 734 /* If microseconds is 0, timeout is immediate */ 735 static void ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint, 736 u32 microseconds) 737 { 738 u32 endpoint_id = endpoint->endpoint_id; 739 struct ipa *ipa = endpoint->ipa; 740 u32 offset; 741 u32 val; 742 743 offset = IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(endpoint_id); 744 val = ipa_reg_init_hol_block_timer_val(ipa, microseconds); 745 iowrite32(val, ipa->reg_virt + offset); 746 } 747 748 static void 749 ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint, bool enable) 750 { 751 u32 endpoint_id = endpoint->endpoint_id; 752 u32 offset; 753 u32 val; 754 755 val = enable ? HOL_BLOCK_EN_FMASK : 0; 756 offset = IPA_REG_ENDP_INIT_HOL_BLOCK_EN_N_OFFSET(endpoint_id); 757 iowrite32(val, endpoint->ipa->reg_virt + offset); 758 } 759 760 void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa) 761 { 762 u32 i; 763 764 for (i = 0; i < IPA_ENDPOINT_MAX; i++) { 765 struct ipa_endpoint *endpoint = &ipa->endpoint[i]; 766 767 if (endpoint->toward_ipa || endpoint->ee_id != GSI_EE_MODEM) 768 continue; 769 770 ipa_endpoint_init_hol_block_timer(endpoint, 0); 771 ipa_endpoint_init_hol_block_enable(endpoint, true); 772 } 773 } 774 775 static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint) 776 { 777 u32 offset = IPA_REG_ENDP_INIT_DEAGGR_N_OFFSET(endpoint->endpoint_id); 778 u32 val = 0; 779 780 if (!endpoint->toward_ipa) 781 return; /* Register not valid for RX endpoints */ 782 783 /* DEAGGR_HDR_LEN is 0 */ 784 /* PACKET_OFFSET_VALID is 0 */ 785 /* PACKET_OFFSET_LOCATION is ignored (not valid) */ 786 /* MAX_PACKET_LEN is 0 (not enforced) */ 787 788 iowrite32(val, endpoint->ipa->reg_virt + offset); 789 } 790 791 static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint) 792 { 793 u32 offset = IPA_REG_ENDP_INIT_SEQ_N_OFFSET(endpoint->endpoint_id); 794 u32 seq_type = endpoint->seq_type; 795 u32 val = 0; 796 797 if (!endpoint->toward_ipa) 798 return; /* Register not valid for RX endpoints */ 799 800 /* Sequencer type is made up of four nibbles */ 801 val |= u32_encode_bits(seq_type & 0xf, HPS_SEQ_TYPE_FMASK); 802 val |= u32_encode_bits((seq_type >> 4) & 0xf, DPS_SEQ_TYPE_FMASK); 803 /* The second two apply to replicated packets */ 804 val |= u32_encode_bits((seq_type >> 8) & 0xf, HPS_REP_SEQ_TYPE_FMASK); 805 val |= u32_encode_bits((seq_type >> 12) & 0xf, DPS_REP_SEQ_TYPE_FMASK); 806 807 iowrite32(val, endpoint->ipa->reg_virt + offset); 808 } 809 810 /** 811 * ipa_endpoint_skb_tx() - Transmit a socket buffer 812 * @endpoint: Endpoint pointer 813 * @skb: Socket buffer to send 814 * 815 * Returns: 0 if successful, or a negative error code 816 */ 817 int ipa_endpoint_skb_tx(struct ipa_endpoint *endpoint, struct sk_buff *skb) 818 { 819 struct gsi_trans *trans; 820 u32 nr_frags; 821 int ret; 822 823 /* Make sure source endpoint's TLV FIFO has enough entries to 824 * hold the linear portion of the skb and all its fragments. 825 * If not, see if we can linearize it before giving up. 826 */ 827 nr_frags = skb_shinfo(skb)->nr_frags; 828 if (1 + nr_frags > endpoint->trans_tre_max) { 829 if (skb_linearize(skb)) 830 return -E2BIG; 831 nr_frags = 0; 832 } 833 834 trans = ipa_endpoint_trans_alloc(endpoint, 1 + nr_frags); 835 if (!trans) 836 return -EBUSY; 837 838 ret = gsi_trans_skb_add(trans, skb); 839 if (ret) 840 goto err_trans_free; 841 trans->data = skb; /* transaction owns skb now */ 842 843 gsi_trans_commit(trans, !netdev_xmit_more()); 844 845 return 0; 846 847 err_trans_free: 848 gsi_trans_free(trans); 849 850 return -ENOMEM; 851 } 852 853 static void ipa_endpoint_status(struct ipa_endpoint *endpoint) 854 { 855 u32 endpoint_id = endpoint->endpoint_id; 856 struct ipa *ipa = endpoint->ipa; 857 u32 val = 0; 858 u32 offset; 859 860 offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id); 861 862 if (endpoint->data->status_enable) { 863 val |= STATUS_EN_FMASK; 864 if (endpoint->toward_ipa) { 865 enum ipa_endpoint_name name; 866 u32 status_endpoint_id; 867 868 name = endpoint->data->tx.status_endpoint; 869 status_endpoint_id = ipa->name_map[name]->endpoint_id; 870 871 val |= u32_encode_bits(status_endpoint_id, 872 STATUS_ENDP_FMASK); 873 } 874 /* STATUS_LOCATION is 0 (status element precedes packet) */ 875 /* The next field is present for IPA v4.0 and above */ 876 /* STATUS_PKT_SUPPRESS_FMASK is 0 */ 877 } 878 879 iowrite32(val, ipa->reg_virt + offset); 880 } 881 882 static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint) 883 { 884 struct gsi_trans *trans; 885 bool doorbell = false; 886 struct page *page; 887 u32 offset; 888 u32 len; 889 int ret; 890 891 page = dev_alloc_pages(get_order(IPA_RX_BUFFER_SIZE)); 892 if (!page) 893 return -ENOMEM; 894 895 trans = ipa_endpoint_trans_alloc(endpoint, 1); 896 if (!trans) 897 goto err_free_pages; 898 899 /* Offset the buffer to make space for skb headroom */ 900 offset = NET_SKB_PAD; 901 len = IPA_RX_BUFFER_SIZE - offset; 902 903 ret = gsi_trans_page_add(trans, page, len, offset); 904 if (ret) 905 goto err_trans_free; 906 trans->data = page; /* transaction owns page now */ 907 908 if (++endpoint->replenish_ready == IPA_REPLENISH_BATCH) { 909 doorbell = true; 910 endpoint->replenish_ready = 0; 911 } 912 913 gsi_trans_commit(trans, doorbell); 914 915 return 0; 916 917 err_trans_free: 918 gsi_trans_free(trans); 919 err_free_pages: 920 __free_pages(page, get_order(IPA_RX_BUFFER_SIZE)); 921 922 return -ENOMEM; 923 } 924 925 /** 926 * ipa_endpoint_replenish() - Replenish the Rx packets cache. 927 * @endpoint: Endpoint to be replenished 928 * @count: Number of buffers to send to hardware 929 * 930 * Allocate RX packet wrapper structures with maximal socket buffers 931 * for an endpoint. These are supplied to the hardware, which fills 932 * them with incoming data. 933 */ 934 static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint, u32 count) 935 { 936 struct gsi *gsi; 937 u32 backlog; 938 939 if (!endpoint->replenish_enabled) { 940 if (count) 941 atomic_add(count, &endpoint->replenish_saved); 942 return; 943 } 944 945 946 while (atomic_dec_not_zero(&endpoint->replenish_backlog)) 947 if (ipa_endpoint_replenish_one(endpoint)) 948 goto try_again_later; 949 if (count) 950 atomic_add(count, &endpoint->replenish_backlog); 951 952 return; 953 954 try_again_later: 955 /* The last one didn't succeed, so fix the backlog */ 956 backlog = atomic_inc_return(&endpoint->replenish_backlog); 957 958 if (count) 959 atomic_add(count, &endpoint->replenish_backlog); 960 961 /* Whenever a receive buffer transaction completes we'll try to 962 * replenish again. It's unlikely, but if we fail to supply even 963 * one buffer, nothing will trigger another replenish attempt. 964 * Receive buffer transactions use one TRE, so schedule work to 965 * try replenishing again if our backlog is *all* available TREs. 966 */ 967 gsi = &endpoint->ipa->gsi; 968 if (backlog == gsi_channel_tre_max(gsi, endpoint->channel_id)) 969 schedule_delayed_work(&endpoint->replenish_work, 970 msecs_to_jiffies(1)); 971 } 972 973 static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint) 974 { 975 struct gsi *gsi = &endpoint->ipa->gsi; 976 u32 max_backlog; 977 u32 saved; 978 979 endpoint->replenish_enabled = true; 980 while ((saved = atomic_xchg(&endpoint->replenish_saved, 0))) 981 atomic_add(saved, &endpoint->replenish_backlog); 982 983 /* Start replenishing if hardware currently has no buffers */ 984 max_backlog = gsi_channel_tre_max(gsi, endpoint->channel_id); 985 if (atomic_read(&endpoint->replenish_backlog) == max_backlog) 986 ipa_endpoint_replenish(endpoint, 0); 987 } 988 989 static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint) 990 { 991 u32 backlog; 992 993 endpoint->replenish_enabled = false; 994 while ((backlog = atomic_xchg(&endpoint->replenish_backlog, 0))) 995 atomic_add(backlog, &endpoint->replenish_saved); 996 } 997 998 static void ipa_endpoint_replenish_work(struct work_struct *work) 999 { 1000 struct delayed_work *dwork = to_delayed_work(work); 1001 struct ipa_endpoint *endpoint; 1002 1003 endpoint = container_of(dwork, struct ipa_endpoint, replenish_work); 1004 1005 ipa_endpoint_replenish(endpoint, 0); 1006 } 1007 1008 static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint, 1009 void *data, u32 len, u32 extra) 1010 { 1011 struct sk_buff *skb; 1012 1013 skb = __dev_alloc_skb(len, GFP_ATOMIC); 1014 if (skb) { 1015 skb_put(skb, len); 1016 memcpy(skb->data, data, len); 1017 skb->truesize += extra; 1018 } 1019 1020 /* Now receive it, or drop it if there's no netdev */ 1021 if (endpoint->netdev) 1022 ipa_modem_skb_rx(endpoint->netdev, skb); 1023 else if (skb) 1024 dev_kfree_skb_any(skb); 1025 } 1026 1027 static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint, 1028 struct page *page, u32 len) 1029 { 1030 struct sk_buff *skb; 1031 1032 /* Nothing to do if there's no netdev */ 1033 if (!endpoint->netdev) 1034 return false; 1035 1036 /* assert(len <= SKB_WITH_OVERHEAD(IPA_RX_BUFFER_SIZE-NET_SKB_PAD)); */ 1037 skb = build_skb(page_address(page), IPA_RX_BUFFER_SIZE); 1038 if (skb) { 1039 /* Reserve the headroom and account for the data */ 1040 skb_reserve(skb, NET_SKB_PAD); 1041 skb_put(skb, len); 1042 } 1043 1044 /* Receive the buffer (or record drop if unable to build it) */ 1045 ipa_modem_skb_rx(endpoint->netdev, skb); 1046 1047 return skb != NULL; 1048 } 1049 1050 /* The format of a packet status element is the same for several status 1051 * types (opcodes). The NEW_FRAG_RULE, LOG, DCMP (decompression) types 1052 * aren't currently supported 1053 */ 1054 static bool ipa_status_format_packet(enum ipa_status_opcode opcode) 1055 { 1056 switch (opcode) { 1057 case IPA_STATUS_OPCODE_PACKET: 1058 case IPA_STATUS_OPCODE_DROPPED_PACKET: 1059 case IPA_STATUS_OPCODE_SUSPENDED_PACKET: 1060 case IPA_STATUS_OPCODE_PACKET_2ND_PASS: 1061 return true; 1062 default: 1063 return false; 1064 } 1065 } 1066 1067 static bool ipa_endpoint_status_skip(struct ipa_endpoint *endpoint, 1068 const struct ipa_status *status) 1069 { 1070 u32 endpoint_id; 1071 1072 if (!ipa_status_format_packet(status->opcode)) 1073 return true; 1074 if (!status->pkt_len) 1075 return true; 1076 endpoint_id = u32_get_bits(status->endp_dst_idx, 1077 IPA_STATUS_DST_IDX_FMASK); 1078 if (endpoint_id != endpoint->endpoint_id) 1079 return true; 1080 1081 return false; /* Don't skip this packet, process it */ 1082 } 1083 1084 /* Return whether the status indicates the packet should be dropped */ 1085 static bool ipa_status_drop_packet(const struct ipa_status *status) 1086 { 1087 u32 val; 1088 1089 /* Deaggregation exceptions we drop; others we consume */ 1090 if (status->exception) 1091 return status->exception == IPA_STATUS_EXCEPTION_DEAGGR; 1092 1093 /* Drop the packet if it fails to match a routing rule; otherwise no */ 1094 val = le32_get_bits(status->flags1, IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK); 1095 1096 return val == field_max(IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK); 1097 } 1098 1099 static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint, 1100 struct page *page, u32 total_len) 1101 { 1102 void *data = page_address(page) + NET_SKB_PAD; 1103 u32 unused = IPA_RX_BUFFER_SIZE - total_len; 1104 u32 resid = total_len; 1105 1106 while (resid) { 1107 const struct ipa_status *status = data; 1108 u32 align; 1109 u32 len; 1110 1111 if (resid < sizeof(*status)) { 1112 dev_err(&endpoint->ipa->pdev->dev, 1113 "short message (%u bytes < %zu byte status)\n", 1114 resid, sizeof(*status)); 1115 break; 1116 } 1117 1118 /* Skip over status packets that lack packet data */ 1119 if (ipa_endpoint_status_skip(endpoint, status)) { 1120 data += sizeof(*status); 1121 resid -= sizeof(*status); 1122 continue; 1123 } 1124 1125 /* Compute the amount of buffer space consumed by the 1126 * packet, including the status element. If the hardware 1127 * is configured to pad packet data to an aligned boundary, 1128 * account for that. And if checksum offload is is enabled 1129 * a trailer containing computed checksum information will 1130 * be appended. 1131 */ 1132 align = endpoint->data->rx.pad_align ? : 1; 1133 len = le16_to_cpu(status->pkt_len); 1134 len = sizeof(*status) + ALIGN(len, align); 1135 if (endpoint->data->checksum) 1136 len += sizeof(struct rmnet_map_dl_csum_trailer); 1137 1138 /* Charge the new packet with a proportional fraction of 1139 * the unused space in the original receive buffer. 1140 * XXX Charge a proportion of the *whole* receive buffer? 1141 */ 1142 if (!ipa_status_drop_packet(status)) { 1143 u32 extra = unused * len / total_len; 1144 void *data2 = data + sizeof(*status); 1145 u32 len2 = le16_to_cpu(status->pkt_len); 1146 1147 /* Client receives only packet data (no status) */ 1148 ipa_endpoint_skb_copy(endpoint, data2, len2, extra); 1149 } 1150 1151 /* Consume status and the full packet it describes */ 1152 data += len; 1153 resid -= len; 1154 } 1155 } 1156 1157 /* Complete a TX transaction, command or from ipa_endpoint_skb_tx() */ 1158 static void ipa_endpoint_tx_complete(struct ipa_endpoint *endpoint, 1159 struct gsi_trans *trans) 1160 { 1161 } 1162 1163 /* Complete transaction initiated in ipa_endpoint_replenish_one() */ 1164 static void ipa_endpoint_rx_complete(struct ipa_endpoint *endpoint, 1165 struct gsi_trans *trans) 1166 { 1167 struct page *page; 1168 1169 ipa_endpoint_replenish(endpoint, 1); 1170 1171 if (trans->cancelled) 1172 return; 1173 1174 /* Parse or build a socket buffer using the actual received length */ 1175 page = trans->data; 1176 if (endpoint->data->status_enable) 1177 ipa_endpoint_status_parse(endpoint, page, trans->len); 1178 else if (ipa_endpoint_skb_build(endpoint, page, trans->len)) 1179 trans->data = NULL; /* Pages have been consumed */ 1180 } 1181 1182 void ipa_endpoint_trans_complete(struct ipa_endpoint *endpoint, 1183 struct gsi_trans *trans) 1184 { 1185 if (endpoint->toward_ipa) 1186 ipa_endpoint_tx_complete(endpoint, trans); 1187 else 1188 ipa_endpoint_rx_complete(endpoint, trans); 1189 } 1190 1191 void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint, 1192 struct gsi_trans *trans) 1193 { 1194 if (endpoint->toward_ipa) { 1195 struct ipa *ipa = endpoint->ipa; 1196 1197 /* Nothing to do for command transactions */ 1198 if (endpoint != ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]) { 1199 struct sk_buff *skb = trans->data; 1200 1201 if (skb) 1202 dev_kfree_skb_any(skb); 1203 } 1204 } else { 1205 struct page *page = trans->data; 1206 1207 if (page) 1208 __free_pages(page, get_order(IPA_RX_BUFFER_SIZE)); 1209 } 1210 } 1211 1212 void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id) 1213 { 1214 u32 val; 1215 1216 /* ROUTE_DIS is 0 */ 1217 val = u32_encode_bits(endpoint_id, ROUTE_DEF_PIPE_FMASK); 1218 val |= ROUTE_DEF_HDR_TABLE_FMASK; 1219 val |= u32_encode_bits(0, ROUTE_DEF_HDR_OFST_FMASK); 1220 val |= u32_encode_bits(endpoint_id, ROUTE_FRAG_DEF_PIPE_FMASK); 1221 val |= ROUTE_DEF_RETAIN_HDR_FMASK; 1222 1223 iowrite32(val, ipa->reg_virt + IPA_REG_ROUTE_OFFSET); 1224 } 1225 1226 void ipa_endpoint_default_route_clear(struct ipa *ipa) 1227 { 1228 ipa_endpoint_default_route_set(ipa, 0); 1229 } 1230 1231 /** 1232 * ipa_endpoint_reset_rx_aggr() - Reset RX endpoint with aggregation active 1233 * @endpoint: Endpoint to be reset 1234 * 1235 * If aggregation is active on an RX endpoint when a reset is performed 1236 * on its underlying GSI channel, a special sequence of actions must be 1237 * taken to ensure the IPA pipeline is properly cleared. 1238 * 1239 * Return: 0 if successful, or a negative error code 1240 */ 1241 static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint) 1242 { 1243 struct device *dev = &endpoint->ipa->pdev->dev; 1244 struct ipa *ipa = endpoint->ipa; 1245 struct gsi *gsi = &ipa->gsi; 1246 bool suspended = false; 1247 dma_addr_t addr; 1248 bool legacy; 1249 u32 retries; 1250 u32 len = 1; 1251 void *virt; 1252 int ret; 1253 1254 virt = kzalloc(len, GFP_KERNEL); 1255 if (!virt) 1256 return -ENOMEM; 1257 1258 addr = dma_map_single(dev, virt, len, DMA_FROM_DEVICE); 1259 if (dma_mapping_error(dev, addr)) { 1260 ret = -ENOMEM; 1261 goto out_kfree; 1262 } 1263 1264 /* Force close aggregation before issuing the reset */ 1265 ipa_endpoint_force_close(endpoint); 1266 1267 /* Reset and reconfigure the channel with the doorbell engine 1268 * disabled. Then poll until we know aggregation is no longer 1269 * active. We'll re-enable the doorbell (if appropriate) when 1270 * we reset again below. 1271 */ 1272 gsi_channel_reset(gsi, endpoint->channel_id, false); 1273 1274 /* Make sure the channel isn't suspended */ 1275 suspended = ipa_endpoint_program_suspend(endpoint, false); 1276 1277 /* Start channel and do a 1 byte read */ 1278 ret = gsi_channel_start(gsi, endpoint->channel_id); 1279 if (ret) 1280 goto out_suspend_again; 1281 1282 ret = gsi_trans_read_byte(gsi, endpoint->channel_id, addr); 1283 if (ret) 1284 goto err_endpoint_stop; 1285 1286 /* Wait for aggregation to be closed on the channel */ 1287 retries = IPA_ENDPOINT_RESET_AGGR_RETRY_MAX; 1288 do { 1289 if (!ipa_endpoint_aggr_active(endpoint)) 1290 break; 1291 msleep(1); 1292 } while (retries--); 1293 1294 /* Check one last time */ 1295 if (ipa_endpoint_aggr_active(endpoint)) 1296 dev_err(dev, "endpoint %u still active during reset\n", 1297 endpoint->endpoint_id); 1298 1299 gsi_trans_read_byte_done(gsi, endpoint->channel_id); 1300 1301 ret = gsi_channel_stop(gsi, endpoint->channel_id); 1302 if (ret) 1303 goto out_suspend_again; 1304 1305 /* Finally, reset and reconfigure the channel again (re-enabling the 1306 * the doorbell engine if appropriate). Sleep for 1 millisecond to 1307 * complete the channel reset sequence. Finish by suspending the 1308 * channel again (if necessary). 1309 */ 1310 legacy = ipa->version == IPA_VERSION_3_5_1; 1311 gsi_channel_reset(gsi, endpoint->channel_id, legacy); 1312 1313 msleep(1); 1314 1315 goto out_suspend_again; 1316 1317 err_endpoint_stop: 1318 (void)gsi_channel_stop(gsi, endpoint->channel_id); 1319 out_suspend_again: 1320 if (suspended) 1321 (void)ipa_endpoint_program_suspend(endpoint, true); 1322 dma_unmap_single(dev, addr, len, DMA_FROM_DEVICE); 1323 out_kfree: 1324 kfree(virt); 1325 1326 return ret; 1327 } 1328 1329 static void ipa_endpoint_reset(struct ipa_endpoint *endpoint) 1330 { 1331 u32 channel_id = endpoint->channel_id; 1332 struct ipa *ipa = endpoint->ipa; 1333 bool special; 1334 bool legacy; 1335 int ret = 0; 1336 1337 /* On IPA v3.5.1, if an RX endpoint is reset while aggregation 1338 * is active, we need to handle things specially to recover. 1339 * All other cases just need to reset the underlying GSI channel. 1340 * 1341 * IPA v3.5.1 enables the doorbell engine. Newer versions do not. 1342 */ 1343 legacy = ipa->version == IPA_VERSION_3_5_1; 1344 special = !endpoint->toward_ipa && endpoint->data->aggregation; 1345 if (special && ipa_endpoint_aggr_active(endpoint)) 1346 ret = ipa_endpoint_reset_rx_aggr(endpoint); 1347 else 1348 gsi_channel_reset(&ipa->gsi, channel_id, legacy); 1349 1350 if (ret) 1351 dev_err(&ipa->pdev->dev, 1352 "error %d resetting channel %u for endpoint %u\n", 1353 ret, endpoint->channel_id, endpoint->endpoint_id); 1354 } 1355 1356 static void ipa_endpoint_program(struct ipa_endpoint *endpoint) 1357 { 1358 if (endpoint->toward_ipa) 1359 ipa_endpoint_program_delay(endpoint, false); 1360 else 1361 (void)ipa_endpoint_program_suspend(endpoint, false); 1362 ipa_endpoint_init_cfg(endpoint); 1363 ipa_endpoint_init_hdr(endpoint); 1364 ipa_endpoint_init_hdr_ext(endpoint); 1365 ipa_endpoint_init_hdr_metadata_mask(endpoint); 1366 ipa_endpoint_init_mode(endpoint); 1367 ipa_endpoint_init_aggr(endpoint); 1368 ipa_endpoint_init_deaggr(endpoint); 1369 ipa_endpoint_init_seq(endpoint); 1370 ipa_endpoint_status(endpoint); 1371 } 1372 1373 int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint) 1374 { 1375 struct ipa *ipa = endpoint->ipa; 1376 struct gsi *gsi = &ipa->gsi; 1377 int ret; 1378 1379 ret = gsi_channel_start(gsi, endpoint->channel_id); 1380 if (ret) { 1381 dev_err(&ipa->pdev->dev, 1382 "error %d starting %cX channel %u for endpoint %u\n", 1383 ret, endpoint->toward_ipa ? 'T' : 'R', 1384 endpoint->channel_id, endpoint->endpoint_id); 1385 return ret; 1386 } 1387 1388 if (!endpoint->toward_ipa) { 1389 ipa_interrupt_suspend_enable(ipa->interrupt, 1390 endpoint->endpoint_id); 1391 ipa_endpoint_replenish_enable(endpoint); 1392 } 1393 1394 ipa->enabled |= BIT(endpoint->endpoint_id); 1395 1396 return 0; 1397 } 1398 1399 void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint) 1400 { 1401 u32 mask = BIT(endpoint->endpoint_id); 1402 struct ipa *ipa = endpoint->ipa; 1403 struct gsi *gsi = &ipa->gsi; 1404 int ret; 1405 1406 if (!(ipa->enabled & mask)) 1407 return; 1408 1409 ipa->enabled ^= mask; 1410 1411 if (!endpoint->toward_ipa) { 1412 ipa_endpoint_replenish_disable(endpoint); 1413 ipa_interrupt_suspend_disable(ipa->interrupt, 1414 endpoint->endpoint_id); 1415 } 1416 1417 /* Note that if stop fails, the channel's state is not well-defined */ 1418 ret = gsi_channel_stop(gsi, endpoint->channel_id); 1419 if (ret) 1420 dev_err(&ipa->pdev->dev, 1421 "error %d attempting to stop endpoint %u\n", ret, 1422 endpoint->endpoint_id); 1423 } 1424 1425 void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint) 1426 { 1427 struct device *dev = &endpoint->ipa->pdev->dev; 1428 struct gsi *gsi = &endpoint->ipa->gsi; 1429 bool stop_channel; 1430 int ret; 1431 1432 if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id))) 1433 return; 1434 1435 if (!endpoint->toward_ipa) 1436 ipa_endpoint_replenish_disable(endpoint); 1437 1438 if (!endpoint->toward_ipa) 1439 (void)ipa_endpoint_program_suspend(endpoint, true); 1440 1441 /* IPA v3.5.1 doesn't use channel stop for suspend */ 1442 stop_channel = endpoint->ipa->version != IPA_VERSION_3_5_1; 1443 ret = gsi_channel_suspend(gsi, endpoint->channel_id, stop_channel); 1444 if (ret) 1445 dev_err(dev, "error %d suspending channel %u\n", ret, 1446 endpoint->channel_id); 1447 } 1448 1449 void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint) 1450 { 1451 struct device *dev = &endpoint->ipa->pdev->dev; 1452 struct gsi *gsi = &endpoint->ipa->gsi; 1453 bool start_channel; 1454 int ret; 1455 1456 if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id))) 1457 return; 1458 1459 if (!endpoint->toward_ipa) 1460 (void)ipa_endpoint_program_suspend(endpoint, false); 1461 1462 /* IPA v3.5.1 doesn't use channel start for resume */ 1463 start_channel = endpoint->ipa->version != IPA_VERSION_3_5_1; 1464 ret = gsi_channel_resume(gsi, endpoint->channel_id, start_channel); 1465 if (ret) 1466 dev_err(dev, "error %d resuming channel %u\n", ret, 1467 endpoint->channel_id); 1468 else if (!endpoint->toward_ipa) 1469 ipa_endpoint_replenish_enable(endpoint); 1470 } 1471 1472 void ipa_endpoint_suspend(struct ipa *ipa) 1473 { 1474 if (ipa->modem_netdev) 1475 ipa_modem_suspend(ipa->modem_netdev); 1476 1477 ipa_cmd_tag_process(ipa); 1478 1479 ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]); 1480 ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]); 1481 } 1482 1483 void ipa_endpoint_resume(struct ipa *ipa) 1484 { 1485 ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]); 1486 ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]); 1487 1488 if (ipa->modem_netdev) 1489 ipa_modem_resume(ipa->modem_netdev); 1490 } 1491 1492 static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint) 1493 { 1494 struct gsi *gsi = &endpoint->ipa->gsi; 1495 u32 channel_id = endpoint->channel_id; 1496 1497 /* Only AP endpoints get set up */ 1498 if (endpoint->ee_id != GSI_EE_AP) 1499 return; 1500 1501 endpoint->trans_tre_max = gsi_channel_trans_tre_max(gsi, channel_id); 1502 if (!endpoint->toward_ipa) { 1503 /* RX transactions require a single TRE, so the maximum 1504 * backlog is the same as the maximum outstanding TREs. 1505 */ 1506 endpoint->replenish_enabled = false; 1507 atomic_set(&endpoint->replenish_saved, 1508 gsi_channel_tre_max(gsi, endpoint->channel_id)); 1509 atomic_set(&endpoint->replenish_backlog, 0); 1510 INIT_DELAYED_WORK(&endpoint->replenish_work, 1511 ipa_endpoint_replenish_work); 1512 } 1513 1514 ipa_endpoint_program(endpoint); 1515 1516 endpoint->ipa->set_up |= BIT(endpoint->endpoint_id); 1517 } 1518 1519 static void ipa_endpoint_teardown_one(struct ipa_endpoint *endpoint) 1520 { 1521 endpoint->ipa->set_up &= ~BIT(endpoint->endpoint_id); 1522 1523 if (!endpoint->toward_ipa) 1524 cancel_delayed_work_sync(&endpoint->replenish_work); 1525 1526 ipa_endpoint_reset(endpoint); 1527 } 1528 1529 void ipa_endpoint_setup(struct ipa *ipa) 1530 { 1531 u32 initialized = ipa->initialized; 1532 1533 ipa->set_up = 0; 1534 while (initialized) { 1535 u32 endpoint_id = __ffs(initialized); 1536 1537 initialized ^= BIT(endpoint_id); 1538 1539 ipa_endpoint_setup_one(&ipa->endpoint[endpoint_id]); 1540 } 1541 } 1542 1543 void ipa_endpoint_teardown(struct ipa *ipa) 1544 { 1545 u32 set_up = ipa->set_up; 1546 1547 while (set_up) { 1548 u32 endpoint_id = __fls(set_up); 1549 1550 set_up ^= BIT(endpoint_id); 1551 1552 ipa_endpoint_teardown_one(&ipa->endpoint[endpoint_id]); 1553 } 1554 ipa->set_up = 0; 1555 } 1556 1557 int ipa_endpoint_config(struct ipa *ipa) 1558 { 1559 struct device *dev = &ipa->pdev->dev; 1560 u32 initialized; 1561 u32 rx_base; 1562 u32 rx_mask; 1563 u32 tx_mask; 1564 int ret = 0; 1565 u32 max; 1566 u32 val; 1567 1568 /* Find out about the endpoints supplied by the hardware, and ensure 1569 * the highest one doesn't exceed the number we support. 1570 */ 1571 val = ioread32(ipa->reg_virt + IPA_REG_FLAVOR_0_OFFSET); 1572 1573 /* Our RX is an IPA producer */ 1574 rx_base = u32_get_bits(val, BAM_PROD_LOWEST_FMASK); 1575 max = rx_base + u32_get_bits(val, BAM_MAX_PROD_PIPES_FMASK); 1576 if (max > IPA_ENDPOINT_MAX) { 1577 dev_err(dev, "too many endpoints (%u > %u)\n", 1578 max, IPA_ENDPOINT_MAX); 1579 return -EINVAL; 1580 } 1581 rx_mask = GENMASK(max - 1, rx_base); 1582 1583 /* Our TX is an IPA consumer */ 1584 max = u32_get_bits(val, BAM_MAX_CONS_PIPES_FMASK); 1585 tx_mask = GENMASK(max - 1, 0); 1586 1587 ipa->available = rx_mask | tx_mask; 1588 1589 /* Check for initialized endpoints not supported by the hardware */ 1590 if (ipa->initialized & ~ipa->available) { 1591 dev_err(dev, "unavailable endpoint id(s) 0x%08x\n", 1592 ipa->initialized & ~ipa->available); 1593 ret = -EINVAL; /* Report other errors too */ 1594 } 1595 1596 initialized = ipa->initialized; 1597 while (initialized) { 1598 u32 endpoint_id = __ffs(initialized); 1599 struct ipa_endpoint *endpoint; 1600 1601 initialized ^= BIT(endpoint_id); 1602 1603 /* Make sure it's pointing in the right direction */ 1604 endpoint = &ipa->endpoint[endpoint_id]; 1605 if ((endpoint_id < rx_base) != !!endpoint->toward_ipa) { 1606 dev_err(dev, "endpoint id %u wrong direction\n", 1607 endpoint_id); 1608 ret = -EINVAL; 1609 } 1610 } 1611 1612 return ret; 1613 } 1614 1615 void ipa_endpoint_deconfig(struct ipa *ipa) 1616 { 1617 ipa->available = 0; /* Nothing more to do */ 1618 } 1619 1620 static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name, 1621 const struct ipa_gsi_endpoint_data *data) 1622 { 1623 struct ipa_endpoint *endpoint; 1624 1625 endpoint = &ipa->endpoint[data->endpoint_id]; 1626 1627 if (data->ee_id == GSI_EE_AP) 1628 ipa->channel_map[data->channel_id] = endpoint; 1629 ipa->name_map[name] = endpoint; 1630 1631 endpoint->ipa = ipa; 1632 endpoint->ee_id = data->ee_id; 1633 endpoint->seq_type = data->endpoint.seq_type; 1634 endpoint->channel_id = data->channel_id; 1635 endpoint->endpoint_id = data->endpoint_id; 1636 endpoint->toward_ipa = data->toward_ipa; 1637 endpoint->data = &data->endpoint.config; 1638 1639 ipa->initialized |= BIT(endpoint->endpoint_id); 1640 } 1641 1642 void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint) 1643 { 1644 endpoint->ipa->initialized &= ~BIT(endpoint->endpoint_id); 1645 1646 memset(endpoint, 0, sizeof(*endpoint)); 1647 } 1648 1649 void ipa_endpoint_exit(struct ipa *ipa) 1650 { 1651 u32 initialized = ipa->initialized; 1652 1653 while (initialized) { 1654 u32 endpoint_id = __fls(initialized); 1655 1656 initialized ^= BIT(endpoint_id); 1657 1658 ipa_endpoint_exit_one(&ipa->endpoint[endpoint_id]); 1659 } 1660 memset(ipa->name_map, 0, sizeof(ipa->name_map)); 1661 memset(ipa->channel_map, 0, sizeof(ipa->channel_map)); 1662 } 1663 1664 /* Returns a bitmask of endpoints that support filtering, or 0 on error */ 1665 u32 ipa_endpoint_init(struct ipa *ipa, u32 count, 1666 const struct ipa_gsi_endpoint_data *data) 1667 { 1668 enum ipa_endpoint_name name; 1669 u32 filter_map; 1670 1671 if (!ipa_endpoint_data_valid(ipa, count, data)) 1672 return 0; /* Error */ 1673 1674 ipa->initialized = 0; 1675 1676 filter_map = 0; 1677 for (name = 0; name < count; name++, data++) { 1678 if (ipa_gsi_endpoint_data_empty(data)) 1679 continue; /* Skip over empty slots */ 1680 1681 ipa_endpoint_init_one(ipa, name, data); 1682 1683 if (data->endpoint.filter_support) 1684 filter_map |= BIT(data->endpoint_id); 1685 } 1686 1687 if (!ipa_filter_map_valid(ipa, filter_map)) 1688 goto err_endpoint_exit; 1689 1690 return filter_map; /* Non-zero bitmask */ 1691 1692 err_endpoint_exit: 1693 ipa_endpoint_exit(ipa); 1694 1695 return 0; /* Error */ 1696 } 1697