1 // SPDX-License-Identifier: GPL-2.0 2 3 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. 4 * Copyright (C) 2019-2020 Linaro Ltd. 5 */ 6 7 #include <linux/types.h> 8 #include <linux/device.h> 9 #include <linux/slab.h> 10 #include <linux/bitfield.h> 11 #include <linux/if_rmnet.h> 12 #include <linux/dma-direction.h> 13 14 #include "gsi.h" 15 #include "gsi_trans.h" 16 #include "ipa.h" 17 #include "ipa_data.h" 18 #include "ipa_endpoint.h" 19 #include "ipa_cmd.h" 20 #include "ipa_mem.h" 21 #include "ipa_modem.h" 22 #include "ipa_table.h" 23 #include "ipa_gsi.h" 24 #include "ipa_clock.h" 25 26 #define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0) 27 28 #define IPA_REPLENISH_BATCH 16 29 30 /* RX buffer is 1 page (or a power-of-2 contiguous pages) */ 31 #define IPA_RX_BUFFER_SIZE 8192 /* PAGE_SIZE > 4096 wastes a LOT */ 32 33 /* The amount of RX buffer space consumed by standard skb overhead */ 34 #define IPA_RX_BUFFER_OVERHEAD (PAGE_SIZE - SKB_MAX_ORDER(NET_SKB_PAD, 0)) 35 36 /* Where to find the QMAP mux_id for a packet within modem-supplied metadata */ 37 #define IPA_ENDPOINT_QMAP_METADATA_MASK 0x000000ff /* host byte order */ 38 39 #define IPA_ENDPOINT_RESET_AGGR_RETRY_MAX 3 40 #define IPA_AGGR_TIME_LIMIT 500 /* microseconds */ 41 42 /** enum ipa_status_opcode - status element opcode hardware values */ 43 enum ipa_status_opcode { 44 IPA_STATUS_OPCODE_PACKET = 0x01, 45 IPA_STATUS_OPCODE_DROPPED_PACKET = 0x04, 46 IPA_STATUS_OPCODE_SUSPENDED_PACKET = 0x08, 47 IPA_STATUS_OPCODE_PACKET_2ND_PASS = 0x40, 48 }; 49 50 /** enum ipa_status_exception - status element exception type */ 51 enum ipa_status_exception { 52 /* 0 means no exception */ 53 IPA_STATUS_EXCEPTION_DEAGGR = 0x01, 54 }; 55 56 /* Status element provided by hardware */ 57 struct ipa_status { 58 u8 opcode; /* enum ipa_status_opcode */ 59 u8 exception; /* enum ipa_status_exception */ 60 __le16 mask; 61 __le16 pkt_len; 62 u8 endp_src_idx; 63 u8 endp_dst_idx; 64 __le32 metadata; 65 __le32 flags1; 66 __le64 flags2; 67 __le32 flags3; 68 __le32 flags4; 69 }; 70 71 /* Field masks for struct ipa_status structure fields */ 72 #define IPA_STATUS_DST_IDX_FMASK GENMASK(4, 0) 73 #define IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK GENMASK(31, 22) 74 75 #ifdef IPA_VALIDATE 76 77 static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count, 78 const struct ipa_gsi_endpoint_data *all_data, 79 const struct ipa_gsi_endpoint_data *data) 80 { 81 const struct ipa_gsi_endpoint_data *other_data; 82 struct device *dev = &ipa->pdev->dev; 83 enum ipa_endpoint_name other_name; 84 85 if (ipa_gsi_endpoint_data_empty(data)) 86 return true; 87 88 if (!data->toward_ipa) { 89 if (data->endpoint.filter_support) { 90 dev_err(dev, "filtering not supported for " 91 "RX endpoint %u\n", 92 data->endpoint_id); 93 return false; 94 } 95 96 return true; /* Nothing more to check for RX */ 97 } 98 99 if (data->endpoint.config.status_enable) { 100 other_name = data->endpoint.config.tx.status_endpoint; 101 if (other_name >= count) { 102 dev_err(dev, "status endpoint name %u out of range " 103 "for endpoint %u\n", 104 other_name, data->endpoint_id); 105 return false; 106 } 107 108 /* Status endpoint must be defined... */ 109 other_data = &all_data[other_name]; 110 if (ipa_gsi_endpoint_data_empty(other_data)) { 111 dev_err(dev, "DMA endpoint name %u undefined " 112 "for endpoint %u\n", 113 other_name, data->endpoint_id); 114 return false; 115 } 116 117 /* ...and has to be an RX endpoint... */ 118 if (other_data->toward_ipa) { 119 dev_err(dev, 120 "status endpoint for endpoint %u not RX\n", 121 data->endpoint_id); 122 return false; 123 } 124 125 /* ...and if it's to be an AP endpoint... */ 126 if (other_data->ee_id == GSI_EE_AP) { 127 /* ...make sure it has status enabled. */ 128 if (!other_data->endpoint.config.status_enable) { 129 dev_err(dev, 130 "status not enabled for endpoint %u\n", 131 other_data->endpoint_id); 132 return false; 133 } 134 } 135 } 136 137 if (data->endpoint.config.dma_mode) { 138 other_name = data->endpoint.config.dma_endpoint; 139 if (other_name >= count) { 140 dev_err(dev, "DMA endpoint name %u out of range " 141 "for endpoint %u\n", 142 other_name, data->endpoint_id); 143 return false; 144 } 145 146 other_data = &all_data[other_name]; 147 if (ipa_gsi_endpoint_data_empty(other_data)) { 148 dev_err(dev, "DMA endpoint name %u undefined " 149 "for endpoint %u\n", 150 other_name, data->endpoint_id); 151 return false; 152 } 153 } 154 155 return true; 156 } 157 158 static u32 aggr_byte_limit_max(enum ipa_version version) 159 { 160 if (version < IPA_VERSION_4_5) 161 return field_max(aggr_byte_limit_fmask(true)); 162 163 return field_max(aggr_byte_limit_fmask(false)); 164 } 165 166 static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count, 167 const struct ipa_gsi_endpoint_data *data) 168 { 169 const struct ipa_gsi_endpoint_data *dp = data; 170 struct device *dev = &ipa->pdev->dev; 171 enum ipa_endpoint_name name; 172 u32 limit; 173 174 /* Not sure where this constraint come from... */ 175 BUILD_BUG_ON(sizeof(struct ipa_status) % 4); 176 177 if (count > IPA_ENDPOINT_COUNT) { 178 dev_err(dev, "too many endpoints specified (%u > %u)\n", 179 count, IPA_ENDPOINT_COUNT); 180 return false; 181 } 182 183 /* The aggregation byte limit defines the point at which an 184 * aggregation window will close. It is programmed into the 185 * IPA hardware as a number of KB. We don't use "hard byte 186 * limit" aggregation, which means that we need to supply 187 * enough space in a receive buffer to hold a complete MTU 188 * plus normal skb overhead *after* that aggregation byte 189 * limit has been crossed. 190 * 191 * This check ensures we don't define a receive buffer size 192 * that would exceed what we can represent in the field that 193 * is used to program its size. 194 */ 195 limit = aggr_byte_limit_max(ipa->version) * SZ_1K; 196 limit += IPA_MTU + IPA_RX_BUFFER_OVERHEAD; 197 if (limit < IPA_RX_BUFFER_SIZE) { 198 dev_err(dev, "buffer size too big for aggregation (%u > %u)\n", 199 IPA_RX_BUFFER_SIZE, limit); 200 return false; 201 } 202 203 /* Make sure needed endpoints have defined data */ 204 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_COMMAND_TX])) { 205 dev_err(dev, "command TX endpoint not defined\n"); 206 return false; 207 } 208 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_LAN_RX])) { 209 dev_err(dev, "LAN RX endpoint not defined\n"); 210 return false; 211 } 212 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_TX])) { 213 dev_err(dev, "AP->modem TX endpoint not defined\n"); 214 return false; 215 } 216 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_RX])) { 217 dev_err(dev, "AP<-modem RX endpoint not defined\n"); 218 return false; 219 } 220 221 for (name = 0; name < count; name++, dp++) 222 if (!ipa_endpoint_data_valid_one(ipa, count, data, dp)) 223 return false; 224 225 return true; 226 } 227 228 #else /* !IPA_VALIDATE */ 229 230 static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count, 231 const struct ipa_gsi_endpoint_data *data) 232 { 233 return true; 234 } 235 236 #endif /* !IPA_VALIDATE */ 237 238 /* Allocate a transaction to use on a non-command endpoint */ 239 static struct gsi_trans *ipa_endpoint_trans_alloc(struct ipa_endpoint *endpoint, 240 u32 tre_count) 241 { 242 struct gsi *gsi = &endpoint->ipa->gsi; 243 u32 channel_id = endpoint->channel_id; 244 enum dma_data_direction direction; 245 246 direction = endpoint->toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE; 247 248 return gsi_channel_trans_alloc(gsi, channel_id, tre_count, direction); 249 } 250 251 /* suspend_delay represents suspend for RX, delay for TX endpoints. 252 * Note that suspend is not supported starting with IPA v4.0. 253 */ 254 static bool 255 ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay) 256 { 257 u32 offset = IPA_REG_ENDP_INIT_CTRL_N_OFFSET(endpoint->endpoint_id); 258 struct ipa *ipa = endpoint->ipa; 259 bool state; 260 u32 mask; 261 u32 val; 262 263 /* Suspend is not supported for IPA v4.0+. Delay doesn't work 264 * correctly on IPA v4.2. 265 * 266 * if (endpoint->toward_ipa) 267 * assert(ipa->version != IPA_VERSION_4.2); 268 * else 269 * assert(ipa->version == IPA_VERSION_3_5_1); 270 */ 271 mask = endpoint->toward_ipa ? ENDP_DELAY_FMASK : ENDP_SUSPEND_FMASK; 272 273 val = ioread32(ipa->reg_virt + offset); 274 /* Don't bother if it's already in the requested state */ 275 state = !!(val & mask); 276 if (suspend_delay != state) { 277 val ^= mask; 278 iowrite32(val, ipa->reg_virt + offset); 279 } 280 281 return state; 282 } 283 284 /* We currently don't care what the previous state was for delay mode */ 285 static void 286 ipa_endpoint_program_delay(struct ipa_endpoint *endpoint, bool enable) 287 { 288 /* assert(endpoint->toward_ipa); */ 289 290 /* Delay mode doesn't work properly for IPA v4.2 */ 291 if (endpoint->ipa->version != IPA_VERSION_4_2) 292 (void)ipa_endpoint_init_ctrl(endpoint, enable); 293 } 294 295 static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint) 296 { 297 u32 mask = BIT(endpoint->endpoint_id); 298 struct ipa *ipa = endpoint->ipa; 299 u32 offset; 300 u32 val; 301 302 /* assert(mask & ipa->available); */ 303 offset = ipa_reg_state_aggr_active_offset(ipa->version); 304 val = ioread32(ipa->reg_virt + offset); 305 306 return !!(val & mask); 307 } 308 309 static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint) 310 { 311 u32 mask = BIT(endpoint->endpoint_id); 312 struct ipa *ipa = endpoint->ipa; 313 314 /* assert(mask & ipa->available); */ 315 iowrite32(mask, ipa->reg_virt + IPA_REG_AGGR_FORCE_CLOSE_OFFSET); 316 } 317 318 /** 319 * ipa_endpoint_suspend_aggr() - Emulate suspend interrupt 320 * @endpoint: Endpoint on which to emulate a suspend 321 * 322 * Emulate suspend IPA interrupt to unsuspend an endpoint suspended 323 * with an open aggregation frame. This is to work around a hardware 324 * issue in IPA version 3.5.1 where the suspend interrupt will not be 325 * generated when it should be. 326 */ 327 static void ipa_endpoint_suspend_aggr(struct ipa_endpoint *endpoint) 328 { 329 struct ipa *ipa = endpoint->ipa; 330 331 if (!endpoint->data->aggregation) 332 return; 333 334 /* Nothing to do if the endpoint doesn't have aggregation open */ 335 if (!ipa_endpoint_aggr_active(endpoint)) 336 return; 337 338 /* Force close aggregation */ 339 ipa_endpoint_force_close(endpoint); 340 341 ipa_interrupt_simulate_suspend(ipa->interrupt); 342 } 343 344 /* Returns previous suspend state (true means suspend was enabled) */ 345 static bool 346 ipa_endpoint_program_suspend(struct ipa_endpoint *endpoint, bool enable) 347 { 348 bool suspended; 349 350 if (endpoint->ipa->version != IPA_VERSION_3_5_1) 351 return enable; /* For IPA v4.0+, no change made */ 352 353 /* assert(!endpoint->toward_ipa); */ 354 355 suspended = ipa_endpoint_init_ctrl(endpoint, enable); 356 357 /* A client suspended with an open aggregation frame will not 358 * generate a SUSPEND IPA interrupt. If enabling suspend, have 359 * ipa_endpoint_suspend_aggr() handle this. 360 */ 361 if (enable && !suspended) 362 ipa_endpoint_suspend_aggr(endpoint); 363 364 return suspended; 365 } 366 367 /* Enable or disable delay or suspend mode on all modem endpoints */ 368 void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable) 369 { 370 u32 endpoint_id; 371 372 /* DELAY mode doesn't work correctly on IPA v4.2 */ 373 if (ipa->version == IPA_VERSION_4_2) 374 return; 375 376 for (endpoint_id = 0; endpoint_id < IPA_ENDPOINT_MAX; endpoint_id++) { 377 struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id]; 378 379 if (endpoint->ee_id != GSI_EE_MODEM) 380 continue; 381 382 /* Set TX delay mode or RX suspend mode */ 383 if (endpoint->toward_ipa) 384 ipa_endpoint_program_delay(endpoint, enable); 385 else 386 (void)ipa_endpoint_program_suspend(endpoint, enable); 387 } 388 } 389 390 /* Reset all modem endpoints to use the default exception endpoint */ 391 int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa) 392 { 393 u32 initialized = ipa->initialized; 394 struct gsi_trans *trans; 395 u32 count; 396 397 /* We need one command per modem TX endpoint. We can get an upper 398 * bound on that by assuming all initialized endpoints are modem->IPA. 399 * That won't happen, and we could be more precise, but this is fine 400 * for now. We need to end the transaction with a "tag process." 401 */ 402 count = hweight32(initialized) + ipa_cmd_tag_process_count(); 403 trans = ipa_cmd_trans_alloc(ipa, count); 404 if (!trans) { 405 dev_err(&ipa->pdev->dev, 406 "no transaction to reset modem exception endpoints\n"); 407 return -EBUSY; 408 } 409 410 while (initialized) { 411 u32 endpoint_id = __ffs(initialized); 412 struct ipa_endpoint *endpoint; 413 u32 offset; 414 415 initialized ^= BIT(endpoint_id); 416 417 /* We only reset modem TX endpoints */ 418 endpoint = &ipa->endpoint[endpoint_id]; 419 if (!(endpoint->ee_id == GSI_EE_MODEM && endpoint->toward_ipa)) 420 continue; 421 422 offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id); 423 424 /* Value written is 0, and all bits are updated. That 425 * means status is disabled on the endpoint, and as a 426 * result all other fields in the register are ignored. 427 */ 428 ipa_cmd_register_write_add(trans, offset, 0, ~0, false); 429 } 430 431 ipa_cmd_tag_process_add(trans); 432 433 /* XXX This should have a 1 second timeout */ 434 gsi_trans_commit_wait(trans); 435 436 return 0; 437 } 438 439 static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint) 440 { 441 u32 offset = IPA_REG_ENDP_INIT_CFG_N_OFFSET(endpoint->endpoint_id); 442 u32 val = 0; 443 444 /* FRAG_OFFLOAD_EN is 0 */ 445 if (endpoint->data->checksum) { 446 if (endpoint->toward_ipa) { 447 u32 checksum_offset; 448 449 val |= u32_encode_bits(IPA_CS_OFFLOAD_UL, 450 CS_OFFLOAD_EN_FMASK); 451 /* Checksum header offset is in 4-byte units */ 452 checksum_offset = sizeof(struct rmnet_map_header); 453 checksum_offset /= sizeof(u32); 454 val |= u32_encode_bits(checksum_offset, 455 CS_METADATA_HDR_OFFSET_FMASK); 456 } else { 457 val |= u32_encode_bits(IPA_CS_OFFLOAD_DL, 458 CS_OFFLOAD_EN_FMASK); 459 } 460 } else { 461 val |= u32_encode_bits(IPA_CS_OFFLOAD_NONE, 462 CS_OFFLOAD_EN_FMASK); 463 } 464 /* CS_GEN_QMB_MASTER_SEL is 0 */ 465 466 iowrite32(val, endpoint->ipa->reg_virt + offset); 467 } 468 469 /** 470 * ipa_endpoint_init_hdr() - Initialize HDR endpoint configuration register 471 * @endpoint: Endpoint pointer 472 * 473 * We program QMAP endpoints so each packet received is preceded by a QMAP 474 * header structure. The QMAP header contains a 1-byte mux_id and 2-byte 475 * packet size field, and we have the IPA hardware populate both for each 476 * received packet. The header is configured (in the HDR_EXT register) 477 * to use big endian format. 478 * 479 * The packet size is written into the QMAP header's pkt_len field. That 480 * location is defined here using the HDR_OFST_PKT_SIZE field. 481 * 482 * The mux_id comes from a 4-byte metadata value supplied with each packet 483 * by the modem. It is *not* a QMAP header, but it does contain the mux_id 484 * value that we want, in its low-order byte. A bitmask defined in the 485 * endpoint's METADATA_MASK register defines which byte within the modem 486 * metadata contains the mux_id. And the OFST_METADATA field programmed 487 * here indicates where the extracted byte should be placed within the QMAP 488 * header. 489 */ 490 static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint) 491 { 492 u32 offset = IPA_REG_ENDP_INIT_HDR_N_OFFSET(endpoint->endpoint_id); 493 struct ipa *ipa = endpoint->ipa; 494 u32 val = 0; 495 496 if (endpoint->data->qmap) { 497 size_t header_size = sizeof(struct rmnet_map_header); 498 enum ipa_version version = ipa->version; 499 500 /* We might supply a checksum header after the QMAP header */ 501 if (endpoint->toward_ipa && endpoint->data->checksum) 502 header_size += sizeof(struct rmnet_map_ul_csum_header); 503 val |= ipa_header_size_encoded(version, header_size); 504 505 /* Define how to fill fields in a received QMAP header */ 506 if (!endpoint->toward_ipa) { 507 u32 offset; /* Field offset within header */ 508 509 /* Where IPA will write the metadata value */ 510 offset = offsetof(struct rmnet_map_header, mux_id); 511 val |= ipa_metadata_offset_encoded(version, offset); 512 513 /* Where IPA will write the length */ 514 offset = offsetof(struct rmnet_map_header, pkt_len); 515 /* Upper bits are stored in HDR_EXT with IPA v4.5 */ 516 if (version == IPA_VERSION_4_5) 517 offset &= field_mask(HDR_OFST_PKT_SIZE_FMASK); 518 519 val |= HDR_OFST_PKT_SIZE_VALID_FMASK; 520 val |= u32_encode_bits(offset, HDR_OFST_PKT_SIZE_FMASK); 521 } 522 /* For QMAP TX, metadata offset is 0 (modem assumes this) */ 523 val |= HDR_OFST_METADATA_VALID_FMASK; 524 525 /* HDR_ADDITIONAL_CONST_LEN is 0; (RX only) */ 526 /* HDR_A5_MUX is 0 */ 527 /* HDR_LEN_INC_DEAGG_HDR is 0 */ 528 /* HDR_METADATA_REG_VALID is 0 (TX only, version < v4.5) */ 529 } 530 531 iowrite32(val, ipa->reg_virt + offset); 532 } 533 534 static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint) 535 { 536 u32 offset = IPA_REG_ENDP_INIT_HDR_EXT_N_OFFSET(endpoint->endpoint_id); 537 u32 pad_align = endpoint->data->rx.pad_align; 538 struct ipa *ipa = endpoint->ipa; 539 u32 val = 0; 540 541 val |= HDR_ENDIANNESS_FMASK; /* big endian */ 542 543 /* A QMAP header contains a 6 bit pad field at offset 0. The RMNet 544 * driver assumes this field is meaningful in packets it receives, 545 * and assumes the header's payload length includes that padding. 546 * The RMNet driver does *not* pad packets it sends, however, so 547 * the pad field (although 0) should be ignored. 548 */ 549 if (endpoint->data->qmap && !endpoint->toward_ipa) { 550 val |= HDR_TOTAL_LEN_OR_PAD_VALID_FMASK; 551 /* HDR_TOTAL_LEN_OR_PAD is 0 (pad, not total_len) */ 552 val |= HDR_PAYLOAD_LEN_INC_PADDING_FMASK; 553 /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0 */ 554 } 555 556 /* HDR_PAYLOAD_LEN_INC_PADDING is 0 */ 557 if (!endpoint->toward_ipa) 558 val |= u32_encode_bits(pad_align, HDR_PAD_TO_ALIGNMENT_FMASK); 559 560 /* IPA v4.5 adds some most-significant bits to a few fields, 561 * two of which are defined in the HDR (not HDR_EXT) register. 562 */ 563 if (ipa->version == IPA_VERSION_4_5) { 564 /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0, so MSB is 0 */ 565 if (endpoint->data->qmap && !endpoint->toward_ipa) { 566 u32 offset; 567 568 offset = offsetof(struct rmnet_map_header, pkt_len); 569 offset >>= hweight32(HDR_OFST_PKT_SIZE_FMASK); 570 val |= u32_encode_bits(offset, 571 HDR_OFST_PKT_SIZE_MSB_FMASK); 572 /* HDR_ADDITIONAL_CONST_LEN is 0 so MSB is 0 */ 573 } 574 } 575 iowrite32(val, ipa->reg_virt + offset); 576 } 577 578 static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint) 579 { 580 u32 endpoint_id = endpoint->endpoint_id; 581 u32 val = 0; 582 u32 offset; 583 584 if (endpoint->toward_ipa) 585 return; /* Register not valid for TX endpoints */ 586 587 offset = IPA_REG_ENDP_INIT_HDR_METADATA_MASK_N_OFFSET(endpoint_id); 588 589 /* Note that HDR_ENDIANNESS indicates big endian header fields */ 590 if (endpoint->data->qmap) 591 val = cpu_to_be32(IPA_ENDPOINT_QMAP_METADATA_MASK); 592 593 iowrite32(val, endpoint->ipa->reg_virt + offset); 594 } 595 596 static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint) 597 { 598 u32 offset = IPA_REG_ENDP_INIT_MODE_N_OFFSET(endpoint->endpoint_id); 599 u32 val; 600 601 if (!endpoint->toward_ipa) 602 return; /* Register not valid for RX endpoints */ 603 604 if (endpoint->data->dma_mode) { 605 enum ipa_endpoint_name name = endpoint->data->dma_endpoint; 606 u32 dma_endpoint_id; 607 608 dma_endpoint_id = endpoint->ipa->name_map[name]->endpoint_id; 609 610 val = u32_encode_bits(IPA_DMA, MODE_FMASK); 611 val |= u32_encode_bits(dma_endpoint_id, DEST_PIPE_INDEX_FMASK); 612 } else { 613 val = u32_encode_bits(IPA_BASIC, MODE_FMASK); 614 } 615 /* All other bits unspecified (and 0) */ 616 617 iowrite32(val, endpoint->ipa->reg_virt + offset); 618 } 619 620 /* Compute the aggregation size value to use for a given buffer size */ 621 static u32 ipa_aggr_size_kb(u32 rx_buffer_size) 622 { 623 /* We don't use "hard byte limit" aggregation, so we define the 624 * aggregation limit such that our buffer has enough space *after* 625 * that limit to receive a full MTU of data, plus overhead. 626 */ 627 rx_buffer_size -= IPA_MTU + IPA_RX_BUFFER_OVERHEAD; 628 629 return rx_buffer_size / SZ_1K; 630 } 631 632 /* Encoded values for AGGR endpoint register fields */ 633 static u32 aggr_byte_limit_encoded(enum ipa_version version, u32 limit) 634 { 635 if (version < IPA_VERSION_4_5) 636 return u32_encode_bits(limit, aggr_byte_limit_fmask(true)); 637 638 return u32_encode_bits(limit, aggr_byte_limit_fmask(false)); 639 } 640 641 /* Encode the aggregation timer limit (microseconds) based on IPA version */ 642 static u32 aggr_time_limit_encoded(enum ipa_version version, u32 limit) 643 { 644 u32 gran_sel; 645 u32 fmask; 646 u32 val; 647 648 if (version < IPA_VERSION_4_5) { 649 /* We set aggregation granularity in ipa_hardware_config() */ 650 limit = DIV_ROUND_CLOSEST(limit, IPA_AGGR_GRANULARITY); 651 652 return u32_encode_bits(limit, aggr_time_limit_fmask(true)); 653 } 654 655 /* IPA v4.5 expresses the time limit using Qtime. The AP has 656 * pulse generators 0 and 1 available, which were configured 657 * in ipa_qtime_config() to have granularity 100 usec and 658 * 1 msec, respectively. Use pulse generator 0 if possible, 659 * otherwise fall back to pulse generator 1. 660 */ 661 fmask = aggr_time_limit_fmask(false); 662 val = DIV_ROUND_CLOSEST(limit, 100); 663 if (val > field_max(fmask)) { 664 /* Have to use pulse generator 1 (millisecond granularity) */ 665 gran_sel = AGGR_GRAN_SEL_FMASK; 666 val = DIV_ROUND_CLOSEST(limit, 1000); 667 } else { 668 /* We can use pulse generator 0 (100 usec granularity) */ 669 gran_sel = 0; 670 } 671 672 return gran_sel | u32_encode_bits(val, fmask); 673 } 674 675 static u32 aggr_sw_eof_active_encoded(enum ipa_version version, bool enabled) 676 { 677 u32 val = enabled ? 1 : 0; 678 679 if (version < IPA_VERSION_4_5) 680 return u32_encode_bits(val, aggr_sw_eof_active_fmask(true)); 681 682 return u32_encode_bits(val, aggr_sw_eof_active_fmask(false)); 683 } 684 685 static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint) 686 { 687 u32 offset = IPA_REG_ENDP_INIT_AGGR_N_OFFSET(endpoint->endpoint_id); 688 enum ipa_version version = endpoint->ipa->version; 689 u32 val = 0; 690 691 if (endpoint->data->aggregation) { 692 if (!endpoint->toward_ipa) { 693 bool close_eof; 694 u32 limit; 695 696 val |= u32_encode_bits(IPA_ENABLE_AGGR, AGGR_EN_FMASK); 697 val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK); 698 699 limit = ipa_aggr_size_kb(IPA_RX_BUFFER_SIZE); 700 val |= aggr_byte_limit_encoded(version, limit); 701 702 limit = IPA_AGGR_TIME_LIMIT; 703 val |= aggr_time_limit_encoded(version, limit); 704 705 /* AGGR_PKT_LIMIT is 0 (unlimited) */ 706 707 close_eof = endpoint->data->rx.aggr_close_eof; 708 val |= aggr_sw_eof_active_encoded(version, close_eof); 709 710 /* AGGR_HARD_BYTE_LIMIT_ENABLE is 0 */ 711 } else { 712 val |= u32_encode_bits(IPA_ENABLE_DEAGGR, 713 AGGR_EN_FMASK); 714 val |= u32_encode_bits(IPA_QCMAP, AGGR_TYPE_FMASK); 715 /* other fields ignored */ 716 } 717 /* AGGR_FORCE_CLOSE is 0 */ 718 /* AGGR_GRAN_SEL is 0 for IPA v4.5 */ 719 } else { 720 val |= u32_encode_bits(IPA_BYPASS_AGGR, AGGR_EN_FMASK); 721 /* other fields ignored */ 722 } 723 724 iowrite32(val, endpoint->ipa->reg_virt + offset); 725 } 726 727 /* Return the Qtime-based head-of-line blocking timer value that 728 * represents the given number of microseconds. The result 729 * includes both the timer value and the selected timer granularity. 730 */ 731 static u32 hol_block_timer_qtime_val(struct ipa *ipa, u32 microseconds) 732 { 733 u32 gran_sel; 734 u32 val; 735 736 /* IPA v4.5 expresses time limits using Qtime. The AP has 737 * pulse generators 0 and 1 available, which were configured 738 * in ipa_qtime_config() to have granularity 100 usec and 739 * 1 msec, respectively. Use pulse generator 0 if possible, 740 * otherwise fall back to pulse generator 1. 741 */ 742 val = DIV_ROUND_CLOSEST(microseconds, 100); 743 if (val > field_max(TIME_LIMIT_FMASK)) { 744 /* Have to use pulse generator 1 (millisecond granularity) */ 745 gran_sel = GRAN_SEL_FMASK; 746 val = DIV_ROUND_CLOSEST(microseconds, 1000); 747 } else { 748 /* We can use pulse generator 0 (100 usec granularity) */ 749 gran_sel = 0; 750 } 751 752 return gran_sel | u32_encode_bits(val, TIME_LIMIT_FMASK); 753 } 754 755 /* The head-of-line blocking timer is defined as a tick count. For 756 * IPA version 4.5 the tick count is based on the Qtimer, which is 757 * derived from the 19.2 MHz SoC XO clock. For older IPA versions 758 * each tick represents 128 cycles of the IPA core clock. 759 * 760 * Return the encoded value that should be written to that register 761 * that represents the timeout period provided. For IPA v4.2 this 762 * encodes a base and scale value, while for earlier versions the 763 * value is a simple tick count. 764 */ 765 static u32 hol_block_timer_val(struct ipa *ipa, u32 microseconds) 766 { 767 u32 width; 768 u32 scale; 769 u64 ticks; 770 u64 rate; 771 u32 high; 772 u32 val; 773 774 if (!microseconds) 775 return 0; /* Nothing to compute if timer period is 0 */ 776 777 if (ipa->version == IPA_VERSION_4_5) 778 return hol_block_timer_qtime_val(ipa, microseconds); 779 780 /* Use 64 bit arithmetic to avoid overflow... */ 781 rate = ipa_clock_rate(ipa); 782 ticks = DIV_ROUND_CLOSEST(microseconds * rate, 128 * USEC_PER_SEC); 783 /* ...but we still need to fit into a 32-bit register */ 784 WARN_ON(ticks > U32_MAX); 785 786 /* IPA v3.5.1 through v4.1 just record the tick count */ 787 if (ipa->version < IPA_VERSION_4_2) 788 return (u32)ticks; 789 790 /* For IPA v4.2, the tick count is represented by base and 791 * scale fields within the 32-bit timer register, where: 792 * ticks = base << scale; 793 * The best precision is achieved when the base value is as 794 * large as possible. Find the highest set bit in the tick 795 * count, and extract the number of bits in the base field 796 * such that that high bit is included. 797 */ 798 high = fls(ticks); /* 1..32 */ 799 width = HWEIGHT32(BASE_VALUE_FMASK); 800 scale = high > width ? high - width : 0; 801 if (scale) { 802 /* If we're scaling, round up to get a closer result */ 803 ticks += 1 << (scale - 1); 804 /* High bit was set, so rounding might have affected it */ 805 if (fls(ticks) != high) 806 scale++; 807 } 808 809 val = u32_encode_bits(scale, SCALE_FMASK); 810 val |= u32_encode_bits(ticks >> scale, BASE_VALUE_FMASK); 811 812 return val; 813 } 814 815 /* If microseconds is 0, timeout is immediate */ 816 static void ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint, 817 u32 microseconds) 818 { 819 u32 endpoint_id = endpoint->endpoint_id; 820 struct ipa *ipa = endpoint->ipa; 821 u32 offset; 822 u32 val; 823 824 offset = IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(endpoint_id); 825 val = hol_block_timer_val(ipa, microseconds); 826 iowrite32(val, ipa->reg_virt + offset); 827 } 828 829 static void 830 ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint, bool enable) 831 { 832 u32 endpoint_id = endpoint->endpoint_id; 833 u32 offset; 834 u32 val; 835 836 val = enable ? HOL_BLOCK_EN_FMASK : 0; 837 offset = IPA_REG_ENDP_INIT_HOL_BLOCK_EN_N_OFFSET(endpoint_id); 838 iowrite32(val, endpoint->ipa->reg_virt + offset); 839 } 840 841 void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa) 842 { 843 u32 i; 844 845 for (i = 0; i < IPA_ENDPOINT_MAX; i++) { 846 struct ipa_endpoint *endpoint = &ipa->endpoint[i]; 847 848 if (endpoint->toward_ipa || endpoint->ee_id != GSI_EE_MODEM) 849 continue; 850 851 ipa_endpoint_init_hol_block_timer(endpoint, 0); 852 ipa_endpoint_init_hol_block_enable(endpoint, true); 853 } 854 } 855 856 static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint) 857 { 858 u32 offset = IPA_REG_ENDP_INIT_DEAGGR_N_OFFSET(endpoint->endpoint_id); 859 u32 val = 0; 860 861 if (!endpoint->toward_ipa) 862 return; /* Register not valid for RX endpoints */ 863 864 /* DEAGGR_HDR_LEN is 0 */ 865 /* PACKET_OFFSET_VALID is 0 */ 866 /* PACKET_OFFSET_LOCATION is ignored (not valid) */ 867 /* MAX_PACKET_LEN is 0 (not enforced) */ 868 869 iowrite32(val, endpoint->ipa->reg_virt + offset); 870 } 871 872 static void ipa_endpoint_init_rsrc_grp(struct ipa_endpoint *endpoint) 873 { 874 u32 offset = IPA_REG_ENDP_INIT_RSRC_GRP_N_OFFSET(endpoint->endpoint_id); 875 struct ipa *ipa = endpoint->ipa; 876 u32 val; 877 878 val = rsrc_grp_encoded(ipa->version, endpoint->data->resource_group); 879 iowrite32(val, ipa->reg_virt + offset); 880 } 881 882 static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint) 883 { 884 u32 offset = IPA_REG_ENDP_INIT_SEQ_N_OFFSET(endpoint->endpoint_id); 885 u32 seq_type = endpoint->seq_type; 886 u32 val = 0; 887 888 if (!endpoint->toward_ipa) 889 return; /* Register not valid for RX endpoints */ 890 891 /* Sequencer type is made up of four nibbles */ 892 val |= u32_encode_bits(seq_type & 0xf, HPS_SEQ_TYPE_FMASK); 893 val |= u32_encode_bits((seq_type >> 4) & 0xf, DPS_SEQ_TYPE_FMASK); 894 /* The second two apply to replicated packets */ 895 val |= u32_encode_bits((seq_type >> 8) & 0xf, HPS_REP_SEQ_TYPE_FMASK); 896 val |= u32_encode_bits((seq_type >> 12) & 0xf, DPS_REP_SEQ_TYPE_FMASK); 897 898 iowrite32(val, endpoint->ipa->reg_virt + offset); 899 } 900 901 /** 902 * ipa_endpoint_skb_tx() - Transmit a socket buffer 903 * @endpoint: Endpoint pointer 904 * @skb: Socket buffer to send 905 * 906 * Returns: 0 if successful, or a negative error code 907 */ 908 int ipa_endpoint_skb_tx(struct ipa_endpoint *endpoint, struct sk_buff *skb) 909 { 910 struct gsi_trans *trans; 911 u32 nr_frags; 912 int ret; 913 914 /* Make sure source endpoint's TLV FIFO has enough entries to 915 * hold the linear portion of the skb and all its fragments. 916 * If not, see if we can linearize it before giving up. 917 */ 918 nr_frags = skb_shinfo(skb)->nr_frags; 919 if (1 + nr_frags > endpoint->trans_tre_max) { 920 if (skb_linearize(skb)) 921 return -E2BIG; 922 nr_frags = 0; 923 } 924 925 trans = ipa_endpoint_trans_alloc(endpoint, 1 + nr_frags); 926 if (!trans) 927 return -EBUSY; 928 929 ret = gsi_trans_skb_add(trans, skb); 930 if (ret) 931 goto err_trans_free; 932 trans->data = skb; /* transaction owns skb now */ 933 934 gsi_trans_commit(trans, !netdev_xmit_more()); 935 936 return 0; 937 938 err_trans_free: 939 gsi_trans_free(trans); 940 941 return -ENOMEM; 942 } 943 944 static void ipa_endpoint_status(struct ipa_endpoint *endpoint) 945 { 946 u32 endpoint_id = endpoint->endpoint_id; 947 struct ipa *ipa = endpoint->ipa; 948 u32 val = 0; 949 u32 offset; 950 951 offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id); 952 953 if (endpoint->data->status_enable) { 954 val |= STATUS_EN_FMASK; 955 if (endpoint->toward_ipa) { 956 enum ipa_endpoint_name name; 957 u32 status_endpoint_id; 958 959 name = endpoint->data->tx.status_endpoint; 960 status_endpoint_id = ipa->name_map[name]->endpoint_id; 961 962 val |= u32_encode_bits(status_endpoint_id, 963 STATUS_ENDP_FMASK); 964 } 965 /* STATUS_LOCATION is 0, meaning status element precedes 966 * packet (not present for IPA v4.5) 967 */ 968 /* STATUS_PKT_SUPPRESS_FMASK is 0 (not present for v3.5.1) */ 969 } 970 971 iowrite32(val, ipa->reg_virt + offset); 972 } 973 974 static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint) 975 { 976 struct gsi_trans *trans; 977 bool doorbell = false; 978 struct page *page; 979 u32 offset; 980 u32 len; 981 int ret; 982 983 page = dev_alloc_pages(get_order(IPA_RX_BUFFER_SIZE)); 984 if (!page) 985 return -ENOMEM; 986 987 trans = ipa_endpoint_trans_alloc(endpoint, 1); 988 if (!trans) 989 goto err_free_pages; 990 991 /* Offset the buffer to make space for skb headroom */ 992 offset = NET_SKB_PAD; 993 len = IPA_RX_BUFFER_SIZE - offset; 994 995 ret = gsi_trans_page_add(trans, page, len, offset); 996 if (ret) 997 goto err_trans_free; 998 trans->data = page; /* transaction owns page now */ 999 1000 if (++endpoint->replenish_ready == IPA_REPLENISH_BATCH) { 1001 doorbell = true; 1002 endpoint->replenish_ready = 0; 1003 } 1004 1005 gsi_trans_commit(trans, doorbell); 1006 1007 return 0; 1008 1009 err_trans_free: 1010 gsi_trans_free(trans); 1011 err_free_pages: 1012 __free_pages(page, get_order(IPA_RX_BUFFER_SIZE)); 1013 1014 return -ENOMEM; 1015 } 1016 1017 /** 1018 * ipa_endpoint_replenish() - Replenish the Rx packets cache. 1019 * @endpoint: Endpoint to be replenished 1020 * @count: Number of buffers to send to hardware 1021 * 1022 * Allocate RX packet wrapper structures with maximal socket buffers 1023 * for an endpoint. These are supplied to the hardware, which fills 1024 * them with incoming data. 1025 */ 1026 static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint, u32 count) 1027 { 1028 struct gsi *gsi; 1029 u32 backlog; 1030 1031 if (!endpoint->replenish_enabled) { 1032 if (count) 1033 atomic_add(count, &endpoint->replenish_saved); 1034 return; 1035 } 1036 1037 1038 while (atomic_dec_not_zero(&endpoint->replenish_backlog)) 1039 if (ipa_endpoint_replenish_one(endpoint)) 1040 goto try_again_later; 1041 if (count) 1042 atomic_add(count, &endpoint->replenish_backlog); 1043 1044 return; 1045 1046 try_again_later: 1047 /* The last one didn't succeed, so fix the backlog */ 1048 backlog = atomic_inc_return(&endpoint->replenish_backlog); 1049 1050 if (count) 1051 atomic_add(count, &endpoint->replenish_backlog); 1052 1053 /* Whenever a receive buffer transaction completes we'll try to 1054 * replenish again. It's unlikely, but if we fail to supply even 1055 * one buffer, nothing will trigger another replenish attempt. 1056 * Receive buffer transactions use one TRE, so schedule work to 1057 * try replenishing again if our backlog is *all* available TREs. 1058 */ 1059 gsi = &endpoint->ipa->gsi; 1060 if (backlog == gsi_channel_tre_max(gsi, endpoint->channel_id)) 1061 schedule_delayed_work(&endpoint->replenish_work, 1062 msecs_to_jiffies(1)); 1063 } 1064 1065 static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint) 1066 { 1067 struct gsi *gsi = &endpoint->ipa->gsi; 1068 u32 max_backlog; 1069 u32 saved; 1070 1071 endpoint->replenish_enabled = true; 1072 while ((saved = atomic_xchg(&endpoint->replenish_saved, 0))) 1073 atomic_add(saved, &endpoint->replenish_backlog); 1074 1075 /* Start replenishing if hardware currently has no buffers */ 1076 max_backlog = gsi_channel_tre_max(gsi, endpoint->channel_id); 1077 if (atomic_read(&endpoint->replenish_backlog) == max_backlog) 1078 ipa_endpoint_replenish(endpoint, 0); 1079 } 1080 1081 static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint) 1082 { 1083 u32 backlog; 1084 1085 endpoint->replenish_enabled = false; 1086 while ((backlog = atomic_xchg(&endpoint->replenish_backlog, 0))) 1087 atomic_add(backlog, &endpoint->replenish_saved); 1088 } 1089 1090 static void ipa_endpoint_replenish_work(struct work_struct *work) 1091 { 1092 struct delayed_work *dwork = to_delayed_work(work); 1093 struct ipa_endpoint *endpoint; 1094 1095 endpoint = container_of(dwork, struct ipa_endpoint, replenish_work); 1096 1097 ipa_endpoint_replenish(endpoint, 0); 1098 } 1099 1100 static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint, 1101 void *data, u32 len, u32 extra) 1102 { 1103 struct sk_buff *skb; 1104 1105 skb = __dev_alloc_skb(len, GFP_ATOMIC); 1106 if (skb) { 1107 skb_put(skb, len); 1108 memcpy(skb->data, data, len); 1109 skb->truesize += extra; 1110 } 1111 1112 /* Now receive it, or drop it if there's no netdev */ 1113 if (endpoint->netdev) 1114 ipa_modem_skb_rx(endpoint->netdev, skb); 1115 else if (skb) 1116 dev_kfree_skb_any(skb); 1117 } 1118 1119 static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint, 1120 struct page *page, u32 len) 1121 { 1122 struct sk_buff *skb; 1123 1124 /* Nothing to do if there's no netdev */ 1125 if (!endpoint->netdev) 1126 return false; 1127 1128 /* assert(len <= SKB_WITH_OVERHEAD(IPA_RX_BUFFER_SIZE-NET_SKB_PAD)); */ 1129 skb = build_skb(page_address(page), IPA_RX_BUFFER_SIZE); 1130 if (skb) { 1131 /* Reserve the headroom and account for the data */ 1132 skb_reserve(skb, NET_SKB_PAD); 1133 skb_put(skb, len); 1134 } 1135 1136 /* Receive the buffer (or record drop if unable to build it) */ 1137 ipa_modem_skb_rx(endpoint->netdev, skb); 1138 1139 return skb != NULL; 1140 } 1141 1142 /* The format of a packet status element is the same for several status 1143 * types (opcodes). Other types aren't currently supported. 1144 */ 1145 static bool ipa_status_format_packet(enum ipa_status_opcode opcode) 1146 { 1147 switch (opcode) { 1148 case IPA_STATUS_OPCODE_PACKET: 1149 case IPA_STATUS_OPCODE_DROPPED_PACKET: 1150 case IPA_STATUS_OPCODE_SUSPENDED_PACKET: 1151 case IPA_STATUS_OPCODE_PACKET_2ND_PASS: 1152 return true; 1153 default: 1154 return false; 1155 } 1156 } 1157 1158 static bool ipa_endpoint_status_skip(struct ipa_endpoint *endpoint, 1159 const struct ipa_status *status) 1160 { 1161 u32 endpoint_id; 1162 1163 if (!ipa_status_format_packet(status->opcode)) 1164 return true; 1165 if (!status->pkt_len) 1166 return true; 1167 endpoint_id = u32_get_bits(status->endp_dst_idx, 1168 IPA_STATUS_DST_IDX_FMASK); 1169 if (endpoint_id != endpoint->endpoint_id) 1170 return true; 1171 1172 return false; /* Don't skip this packet, process it */ 1173 } 1174 1175 /* Return whether the status indicates the packet should be dropped */ 1176 static bool ipa_status_drop_packet(const struct ipa_status *status) 1177 { 1178 u32 val; 1179 1180 /* Deaggregation exceptions we drop; all other types we consume */ 1181 if (status->exception) 1182 return status->exception == IPA_STATUS_EXCEPTION_DEAGGR; 1183 1184 /* Drop the packet if it fails to match a routing rule; otherwise no */ 1185 val = le32_get_bits(status->flags1, IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK); 1186 1187 return val == field_max(IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK); 1188 } 1189 1190 static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint, 1191 struct page *page, u32 total_len) 1192 { 1193 void *data = page_address(page) + NET_SKB_PAD; 1194 u32 unused = IPA_RX_BUFFER_SIZE - total_len; 1195 u32 resid = total_len; 1196 1197 while (resid) { 1198 const struct ipa_status *status = data; 1199 u32 align; 1200 u32 len; 1201 1202 if (resid < sizeof(*status)) { 1203 dev_err(&endpoint->ipa->pdev->dev, 1204 "short message (%u bytes < %zu byte status)\n", 1205 resid, sizeof(*status)); 1206 break; 1207 } 1208 1209 /* Skip over status packets that lack packet data */ 1210 if (ipa_endpoint_status_skip(endpoint, status)) { 1211 data += sizeof(*status); 1212 resid -= sizeof(*status); 1213 continue; 1214 } 1215 1216 /* Compute the amount of buffer space consumed by the 1217 * packet, including the status element. If the hardware 1218 * is configured to pad packet data to an aligned boundary, 1219 * account for that. And if checksum offload is is enabled 1220 * a trailer containing computed checksum information will 1221 * be appended. 1222 */ 1223 align = endpoint->data->rx.pad_align ? : 1; 1224 len = le16_to_cpu(status->pkt_len); 1225 len = sizeof(*status) + ALIGN(len, align); 1226 if (endpoint->data->checksum) 1227 len += sizeof(struct rmnet_map_dl_csum_trailer); 1228 1229 /* Charge the new packet with a proportional fraction of 1230 * the unused space in the original receive buffer. 1231 * XXX Charge a proportion of the *whole* receive buffer? 1232 */ 1233 if (!ipa_status_drop_packet(status)) { 1234 u32 extra = unused * len / total_len; 1235 void *data2 = data + sizeof(*status); 1236 u32 len2 = le16_to_cpu(status->pkt_len); 1237 1238 /* Client receives only packet data (no status) */ 1239 ipa_endpoint_skb_copy(endpoint, data2, len2, extra); 1240 } 1241 1242 /* Consume status and the full packet it describes */ 1243 data += len; 1244 resid -= len; 1245 } 1246 } 1247 1248 /* Complete a TX transaction, command or from ipa_endpoint_skb_tx() */ 1249 static void ipa_endpoint_tx_complete(struct ipa_endpoint *endpoint, 1250 struct gsi_trans *trans) 1251 { 1252 } 1253 1254 /* Complete transaction initiated in ipa_endpoint_replenish_one() */ 1255 static void ipa_endpoint_rx_complete(struct ipa_endpoint *endpoint, 1256 struct gsi_trans *trans) 1257 { 1258 struct page *page; 1259 1260 ipa_endpoint_replenish(endpoint, 1); 1261 1262 if (trans->cancelled) 1263 return; 1264 1265 /* Parse or build a socket buffer using the actual received length */ 1266 page = trans->data; 1267 if (endpoint->data->status_enable) 1268 ipa_endpoint_status_parse(endpoint, page, trans->len); 1269 else if (ipa_endpoint_skb_build(endpoint, page, trans->len)) 1270 trans->data = NULL; /* Pages have been consumed */ 1271 } 1272 1273 void ipa_endpoint_trans_complete(struct ipa_endpoint *endpoint, 1274 struct gsi_trans *trans) 1275 { 1276 if (endpoint->toward_ipa) 1277 ipa_endpoint_tx_complete(endpoint, trans); 1278 else 1279 ipa_endpoint_rx_complete(endpoint, trans); 1280 } 1281 1282 void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint, 1283 struct gsi_trans *trans) 1284 { 1285 if (endpoint->toward_ipa) { 1286 struct ipa *ipa = endpoint->ipa; 1287 1288 /* Nothing to do for command transactions */ 1289 if (endpoint != ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]) { 1290 struct sk_buff *skb = trans->data; 1291 1292 if (skb) 1293 dev_kfree_skb_any(skb); 1294 } 1295 } else { 1296 struct page *page = trans->data; 1297 1298 if (page) 1299 __free_pages(page, get_order(IPA_RX_BUFFER_SIZE)); 1300 } 1301 } 1302 1303 void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id) 1304 { 1305 u32 val; 1306 1307 /* ROUTE_DIS is 0 */ 1308 val = u32_encode_bits(endpoint_id, ROUTE_DEF_PIPE_FMASK); 1309 val |= ROUTE_DEF_HDR_TABLE_FMASK; 1310 val |= u32_encode_bits(0, ROUTE_DEF_HDR_OFST_FMASK); 1311 val |= u32_encode_bits(endpoint_id, ROUTE_FRAG_DEF_PIPE_FMASK); 1312 val |= ROUTE_DEF_RETAIN_HDR_FMASK; 1313 1314 iowrite32(val, ipa->reg_virt + IPA_REG_ROUTE_OFFSET); 1315 } 1316 1317 void ipa_endpoint_default_route_clear(struct ipa *ipa) 1318 { 1319 ipa_endpoint_default_route_set(ipa, 0); 1320 } 1321 1322 /** 1323 * ipa_endpoint_reset_rx_aggr() - Reset RX endpoint with aggregation active 1324 * @endpoint: Endpoint to be reset 1325 * 1326 * If aggregation is active on an RX endpoint when a reset is performed 1327 * on its underlying GSI channel, a special sequence of actions must be 1328 * taken to ensure the IPA pipeline is properly cleared. 1329 * 1330 * Return: 0 if successful, or a negative error code 1331 */ 1332 static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint) 1333 { 1334 struct device *dev = &endpoint->ipa->pdev->dev; 1335 struct ipa *ipa = endpoint->ipa; 1336 struct gsi *gsi = &ipa->gsi; 1337 bool suspended = false; 1338 dma_addr_t addr; 1339 u32 retries; 1340 u32 len = 1; 1341 void *virt; 1342 int ret; 1343 1344 virt = kzalloc(len, GFP_KERNEL); 1345 if (!virt) 1346 return -ENOMEM; 1347 1348 addr = dma_map_single(dev, virt, len, DMA_FROM_DEVICE); 1349 if (dma_mapping_error(dev, addr)) { 1350 ret = -ENOMEM; 1351 goto out_kfree; 1352 } 1353 1354 /* Force close aggregation before issuing the reset */ 1355 ipa_endpoint_force_close(endpoint); 1356 1357 /* Reset and reconfigure the channel with the doorbell engine 1358 * disabled. Then poll until we know aggregation is no longer 1359 * active. We'll re-enable the doorbell (if appropriate) when 1360 * we reset again below. 1361 */ 1362 gsi_channel_reset(gsi, endpoint->channel_id, false); 1363 1364 /* Make sure the channel isn't suspended */ 1365 suspended = ipa_endpoint_program_suspend(endpoint, false); 1366 1367 /* Start channel and do a 1 byte read */ 1368 ret = gsi_channel_start(gsi, endpoint->channel_id); 1369 if (ret) 1370 goto out_suspend_again; 1371 1372 ret = gsi_trans_read_byte(gsi, endpoint->channel_id, addr); 1373 if (ret) 1374 goto err_endpoint_stop; 1375 1376 /* Wait for aggregation to be closed on the channel */ 1377 retries = IPA_ENDPOINT_RESET_AGGR_RETRY_MAX; 1378 do { 1379 if (!ipa_endpoint_aggr_active(endpoint)) 1380 break; 1381 msleep(1); 1382 } while (retries--); 1383 1384 /* Check one last time */ 1385 if (ipa_endpoint_aggr_active(endpoint)) 1386 dev_err(dev, "endpoint %u still active during reset\n", 1387 endpoint->endpoint_id); 1388 1389 gsi_trans_read_byte_done(gsi, endpoint->channel_id); 1390 1391 ret = gsi_channel_stop(gsi, endpoint->channel_id); 1392 if (ret) 1393 goto out_suspend_again; 1394 1395 /* Finally, reset and reconfigure the channel again (re-enabling the 1396 * the doorbell engine if appropriate). Sleep for 1 millisecond to 1397 * complete the channel reset sequence. Finish by suspending the 1398 * channel again (if necessary). 1399 */ 1400 gsi_channel_reset(gsi, endpoint->channel_id, true); 1401 1402 msleep(1); 1403 1404 goto out_suspend_again; 1405 1406 err_endpoint_stop: 1407 (void)gsi_channel_stop(gsi, endpoint->channel_id); 1408 out_suspend_again: 1409 if (suspended) 1410 (void)ipa_endpoint_program_suspend(endpoint, true); 1411 dma_unmap_single(dev, addr, len, DMA_FROM_DEVICE); 1412 out_kfree: 1413 kfree(virt); 1414 1415 return ret; 1416 } 1417 1418 static void ipa_endpoint_reset(struct ipa_endpoint *endpoint) 1419 { 1420 u32 channel_id = endpoint->channel_id; 1421 struct ipa *ipa = endpoint->ipa; 1422 bool special; 1423 int ret = 0; 1424 1425 /* On IPA v3.5.1, if an RX endpoint is reset while aggregation 1426 * is active, we need to handle things specially to recover. 1427 * All other cases just need to reset the underlying GSI channel. 1428 */ 1429 special = ipa->version == IPA_VERSION_3_5_1 && 1430 !endpoint->toward_ipa && 1431 endpoint->data->aggregation; 1432 if (special && ipa_endpoint_aggr_active(endpoint)) 1433 ret = ipa_endpoint_reset_rx_aggr(endpoint); 1434 else 1435 gsi_channel_reset(&ipa->gsi, channel_id, true); 1436 1437 if (ret) 1438 dev_err(&ipa->pdev->dev, 1439 "error %d resetting channel %u for endpoint %u\n", 1440 ret, endpoint->channel_id, endpoint->endpoint_id); 1441 } 1442 1443 static void ipa_endpoint_program(struct ipa_endpoint *endpoint) 1444 { 1445 if (endpoint->toward_ipa) 1446 ipa_endpoint_program_delay(endpoint, false); 1447 else 1448 (void)ipa_endpoint_program_suspend(endpoint, false); 1449 ipa_endpoint_init_cfg(endpoint); 1450 ipa_endpoint_init_hdr(endpoint); 1451 ipa_endpoint_init_hdr_ext(endpoint); 1452 ipa_endpoint_init_hdr_metadata_mask(endpoint); 1453 ipa_endpoint_init_mode(endpoint); 1454 ipa_endpoint_init_aggr(endpoint); 1455 ipa_endpoint_init_deaggr(endpoint); 1456 ipa_endpoint_init_rsrc_grp(endpoint); 1457 ipa_endpoint_init_seq(endpoint); 1458 ipa_endpoint_status(endpoint); 1459 } 1460 1461 int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint) 1462 { 1463 struct ipa *ipa = endpoint->ipa; 1464 struct gsi *gsi = &ipa->gsi; 1465 int ret; 1466 1467 ret = gsi_channel_start(gsi, endpoint->channel_id); 1468 if (ret) { 1469 dev_err(&ipa->pdev->dev, 1470 "error %d starting %cX channel %u for endpoint %u\n", 1471 ret, endpoint->toward_ipa ? 'T' : 'R', 1472 endpoint->channel_id, endpoint->endpoint_id); 1473 return ret; 1474 } 1475 1476 if (!endpoint->toward_ipa) { 1477 ipa_interrupt_suspend_enable(ipa->interrupt, 1478 endpoint->endpoint_id); 1479 ipa_endpoint_replenish_enable(endpoint); 1480 } 1481 1482 ipa->enabled |= BIT(endpoint->endpoint_id); 1483 1484 return 0; 1485 } 1486 1487 void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint) 1488 { 1489 u32 mask = BIT(endpoint->endpoint_id); 1490 struct ipa *ipa = endpoint->ipa; 1491 struct gsi *gsi = &ipa->gsi; 1492 int ret; 1493 1494 if (!(ipa->enabled & mask)) 1495 return; 1496 1497 ipa->enabled ^= mask; 1498 1499 if (!endpoint->toward_ipa) { 1500 ipa_endpoint_replenish_disable(endpoint); 1501 ipa_interrupt_suspend_disable(ipa->interrupt, 1502 endpoint->endpoint_id); 1503 } 1504 1505 /* Note that if stop fails, the channel's state is not well-defined */ 1506 ret = gsi_channel_stop(gsi, endpoint->channel_id); 1507 if (ret) 1508 dev_err(&ipa->pdev->dev, 1509 "error %d attempting to stop endpoint %u\n", ret, 1510 endpoint->endpoint_id); 1511 } 1512 1513 void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint) 1514 { 1515 struct device *dev = &endpoint->ipa->pdev->dev; 1516 struct gsi *gsi = &endpoint->ipa->gsi; 1517 bool stop_channel; 1518 int ret; 1519 1520 if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id))) 1521 return; 1522 1523 if (!endpoint->toward_ipa) { 1524 ipa_endpoint_replenish_disable(endpoint); 1525 (void)ipa_endpoint_program_suspend(endpoint, true); 1526 } 1527 1528 /* IPA v3.5.1 doesn't use channel stop for suspend */ 1529 stop_channel = endpoint->ipa->version != IPA_VERSION_3_5_1; 1530 ret = gsi_channel_suspend(gsi, endpoint->channel_id, stop_channel); 1531 if (ret) 1532 dev_err(dev, "error %d suspending channel %u\n", ret, 1533 endpoint->channel_id); 1534 } 1535 1536 void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint) 1537 { 1538 struct device *dev = &endpoint->ipa->pdev->dev; 1539 struct gsi *gsi = &endpoint->ipa->gsi; 1540 bool start_channel; 1541 int ret; 1542 1543 if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id))) 1544 return; 1545 1546 if (!endpoint->toward_ipa) 1547 (void)ipa_endpoint_program_suspend(endpoint, false); 1548 1549 /* IPA v3.5.1 doesn't use channel start for resume */ 1550 start_channel = endpoint->ipa->version != IPA_VERSION_3_5_1; 1551 ret = gsi_channel_resume(gsi, endpoint->channel_id, start_channel); 1552 if (ret) 1553 dev_err(dev, "error %d resuming channel %u\n", ret, 1554 endpoint->channel_id); 1555 else if (!endpoint->toward_ipa) 1556 ipa_endpoint_replenish_enable(endpoint); 1557 } 1558 1559 void ipa_endpoint_suspend(struct ipa *ipa) 1560 { 1561 if (!ipa->setup_complete) 1562 return; 1563 1564 if (ipa->modem_netdev) 1565 ipa_modem_suspend(ipa->modem_netdev); 1566 1567 ipa_cmd_tag_process(ipa); 1568 1569 ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]); 1570 ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]); 1571 } 1572 1573 void ipa_endpoint_resume(struct ipa *ipa) 1574 { 1575 if (!ipa->setup_complete) 1576 return; 1577 1578 ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]); 1579 ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]); 1580 1581 if (ipa->modem_netdev) 1582 ipa_modem_resume(ipa->modem_netdev); 1583 } 1584 1585 static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint) 1586 { 1587 struct gsi *gsi = &endpoint->ipa->gsi; 1588 u32 channel_id = endpoint->channel_id; 1589 1590 /* Only AP endpoints get set up */ 1591 if (endpoint->ee_id != GSI_EE_AP) 1592 return; 1593 1594 endpoint->trans_tre_max = gsi_channel_trans_tre_max(gsi, channel_id); 1595 if (!endpoint->toward_ipa) { 1596 /* RX transactions require a single TRE, so the maximum 1597 * backlog is the same as the maximum outstanding TREs. 1598 */ 1599 endpoint->replenish_enabled = false; 1600 atomic_set(&endpoint->replenish_saved, 1601 gsi_channel_tre_max(gsi, endpoint->channel_id)); 1602 atomic_set(&endpoint->replenish_backlog, 0); 1603 INIT_DELAYED_WORK(&endpoint->replenish_work, 1604 ipa_endpoint_replenish_work); 1605 } 1606 1607 ipa_endpoint_program(endpoint); 1608 1609 endpoint->ipa->set_up |= BIT(endpoint->endpoint_id); 1610 } 1611 1612 static void ipa_endpoint_teardown_one(struct ipa_endpoint *endpoint) 1613 { 1614 endpoint->ipa->set_up &= ~BIT(endpoint->endpoint_id); 1615 1616 if (!endpoint->toward_ipa) 1617 cancel_delayed_work_sync(&endpoint->replenish_work); 1618 1619 ipa_endpoint_reset(endpoint); 1620 } 1621 1622 void ipa_endpoint_setup(struct ipa *ipa) 1623 { 1624 u32 initialized = ipa->initialized; 1625 1626 ipa->set_up = 0; 1627 while (initialized) { 1628 u32 endpoint_id = __ffs(initialized); 1629 1630 initialized ^= BIT(endpoint_id); 1631 1632 ipa_endpoint_setup_one(&ipa->endpoint[endpoint_id]); 1633 } 1634 } 1635 1636 void ipa_endpoint_teardown(struct ipa *ipa) 1637 { 1638 u32 set_up = ipa->set_up; 1639 1640 while (set_up) { 1641 u32 endpoint_id = __fls(set_up); 1642 1643 set_up ^= BIT(endpoint_id); 1644 1645 ipa_endpoint_teardown_one(&ipa->endpoint[endpoint_id]); 1646 } 1647 ipa->set_up = 0; 1648 } 1649 1650 int ipa_endpoint_config(struct ipa *ipa) 1651 { 1652 struct device *dev = &ipa->pdev->dev; 1653 u32 initialized; 1654 u32 rx_base; 1655 u32 rx_mask; 1656 u32 tx_mask; 1657 int ret = 0; 1658 u32 max; 1659 u32 val; 1660 1661 /* Find out about the endpoints supplied by the hardware, and ensure 1662 * the highest one doesn't exceed the number we support. 1663 */ 1664 val = ioread32(ipa->reg_virt + IPA_REG_FLAVOR_0_OFFSET); 1665 1666 /* Our RX is an IPA producer */ 1667 rx_base = u32_get_bits(val, IPA_PROD_LOWEST_FMASK); 1668 max = rx_base + u32_get_bits(val, IPA_MAX_PROD_PIPES_FMASK); 1669 if (max > IPA_ENDPOINT_MAX) { 1670 dev_err(dev, "too many endpoints (%u > %u)\n", 1671 max, IPA_ENDPOINT_MAX); 1672 return -EINVAL; 1673 } 1674 rx_mask = GENMASK(max - 1, rx_base); 1675 1676 /* Our TX is an IPA consumer */ 1677 max = u32_get_bits(val, IPA_MAX_CONS_PIPES_FMASK); 1678 tx_mask = GENMASK(max - 1, 0); 1679 1680 ipa->available = rx_mask | tx_mask; 1681 1682 /* Check for initialized endpoints not supported by the hardware */ 1683 if (ipa->initialized & ~ipa->available) { 1684 dev_err(dev, "unavailable endpoint id(s) 0x%08x\n", 1685 ipa->initialized & ~ipa->available); 1686 ret = -EINVAL; /* Report other errors too */ 1687 } 1688 1689 initialized = ipa->initialized; 1690 while (initialized) { 1691 u32 endpoint_id = __ffs(initialized); 1692 struct ipa_endpoint *endpoint; 1693 1694 initialized ^= BIT(endpoint_id); 1695 1696 /* Make sure it's pointing in the right direction */ 1697 endpoint = &ipa->endpoint[endpoint_id]; 1698 if ((endpoint_id < rx_base) != !!endpoint->toward_ipa) { 1699 dev_err(dev, "endpoint id %u wrong direction\n", 1700 endpoint_id); 1701 ret = -EINVAL; 1702 } 1703 } 1704 1705 return ret; 1706 } 1707 1708 void ipa_endpoint_deconfig(struct ipa *ipa) 1709 { 1710 ipa->available = 0; /* Nothing more to do */ 1711 } 1712 1713 static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name, 1714 const struct ipa_gsi_endpoint_data *data) 1715 { 1716 struct ipa_endpoint *endpoint; 1717 1718 endpoint = &ipa->endpoint[data->endpoint_id]; 1719 1720 if (data->ee_id == GSI_EE_AP) 1721 ipa->channel_map[data->channel_id] = endpoint; 1722 ipa->name_map[name] = endpoint; 1723 1724 endpoint->ipa = ipa; 1725 endpoint->ee_id = data->ee_id; 1726 endpoint->seq_type = data->endpoint.seq_type; 1727 endpoint->channel_id = data->channel_id; 1728 endpoint->endpoint_id = data->endpoint_id; 1729 endpoint->toward_ipa = data->toward_ipa; 1730 endpoint->data = &data->endpoint.config; 1731 1732 ipa->initialized |= BIT(endpoint->endpoint_id); 1733 } 1734 1735 void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint) 1736 { 1737 endpoint->ipa->initialized &= ~BIT(endpoint->endpoint_id); 1738 1739 memset(endpoint, 0, sizeof(*endpoint)); 1740 } 1741 1742 void ipa_endpoint_exit(struct ipa *ipa) 1743 { 1744 u32 initialized = ipa->initialized; 1745 1746 while (initialized) { 1747 u32 endpoint_id = __fls(initialized); 1748 1749 initialized ^= BIT(endpoint_id); 1750 1751 ipa_endpoint_exit_one(&ipa->endpoint[endpoint_id]); 1752 } 1753 memset(ipa->name_map, 0, sizeof(ipa->name_map)); 1754 memset(ipa->channel_map, 0, sizeof(ipa->channel_map)); 1755 } 1756 1757 /* Returns a bitmask of endpoints that support filtering, or 0 on error */ 1758 u32 ipa_endpoint_init(struct ipa *ipa, u32 count, 1759 const struct ipa_gsi_endpoint_data *data) 1760 { 1761 enum ipa_endpoint_name name; 1762 u32 filter_map; 1763 1764 if (!ipa_endpoint_data_valid(ipa, count, data)) 1765 return 0; /* Error */ 1766 1767 ipa->initialized = 0; 1768 1769 filter_map = 0; 1770 for (name = 0; name < count; name++, data++) { 1771 if (ipa_gsi_endpoint_data_empty(data)) 1772 continue; /* Skip over empty slots */ 1773 1774 ipa_endpoint_init_one(ipa, name, data); 1775 1776 if (data->endpoint.filter_support) 1777 filter_map |= BIT(data->endpoint_id); 1778 } 1779 1780 if (!ipa_filter_map_valid(ipa, filter_map)) 1781 goto err_endpoint_exit; 1782 1783 return filter_map; /* Non-zero bitmask */ 1784 1785 err_endpoint_exit: 1786 ipa_endpoint_exit(ipa); 1787 1788 return 0; /* Error */ 1789 } 1790