1 // SPDX-License-Identifier: GPL-2.0 2 3 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. 4 * Copyright (C) 2019-2020 Linaro Ltd. 5 */ 6 7 #include <linux/types.h> 8 #include <linux/device.h> 9 #include <linux/slab.h> 10 #include <linux/bitfield.h> 11 #include <linux/if_rmnet.h> 12 #include <linux/version.h> 13 #include <linux/dma-direction.h> 14 15 #include "gsi.h" 16 #include "gsi_trans.h" 17 #include "ipa.h" 18 #include "ipa_data.h" 19 #include "ipa_endpoint.h" 20 #include "ipa_cmd.h" 21 #include "ipa_mem.h" 22 #include "ipa_modem.h" 23 #include "ipa_table.h" 24 #include "ipa_gsi.h" 25 26 #define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0) 27 28 #define IPA_REPLENISH_BATCH 16 29 30 #define IPA_RX_BUFFER_SIZE (PAGE_SIZE << IPA_RX_BUFFER_ORDER) 31 #define IPA_RX_BUFFER_ORDER 1 /* 8KB endpoint RX buffers (2 pages) */ 32 33 /* The amount of RX buffer space consumed by standard skb overhead */ 34 #define IPA_RX_BUFFER_OVERHEAD (PAGE_SIZE - SKB_MAX_ORDER(NET_SKB_PAD, 0)) 35 36 #define IPA_ENDPOINT_STOP_RX_RETRIES 10 37 #define IPA_ENDPOINT_STOP_RX_SIZE 1 /* bytes */ 38 39 #define IPA_ENDPOINT_RESET_AGGR_RETRY_MAX 3 40 #define IPA_AGGR_TIME_LIMIT_DEFAULT 1000 /* microseconds */ 41 42 #define ENDPOINT_STOP_DMA_TIMEOUT 15 /* milliseconds */ 43 44 /** enum ipa_status_opcode - status element opcode hardware values */ 45 enum ipa_status_opcode { 46 IPA_STATUS_OPCODE_PACKET = 0x01, 47 IPA_STATUS_OPCODE_NEW_FRAG_RULE = 0x02, 48 IPA_STATUS_OPCODE_DROPPED_PACKET = 0x04, 49 IPA_STATUS_OPCODE_SUSPENDED_PACKET = 0x08, 50 IPA_STATUS_OPCODE_LOG = 0x10, 51 IPA_STATUS_OPCODE_DCMP = 0x20, 52 IPA_STATUS_OPCODE_PACKET_2ND_PASS = 0x40, 53 }; 54 55 /** enum ipa_status_exception - status element exception type */ 56 enum ipa_status_exception { 57 /* 0 means no exception */ 58 IPA_STATUS_EXCEPTION_DEAGGR = 0x01, 59 IPA_STATUS_EXCEPTION_IPTYPE = 0x04, 60 IPA_STATUS_EXCEPTION_PACKET_LENGTH = 0x08, 61 IPA_STATUS_EXCEPTION_FRAG_RULE_MISS = 0x10, 62 IPA_STATUS_EXCEPTION_SW_FILT = 0x20, 63 /* The meaning of the next value depends on whether the IP version */ 64 IPA_STATUS_EXCEPTION_NAT = 0x40, /* IPv4 */ 65 IPA_STATUS_EXCEPTION_IPV6CT = IPA_STATUS_EXCEPTION_NAT, 66 }; 67 68 /* Status element provided by hardware */ 69 struct ipa_status { 70 u8 opcode; /* enum ipa_status_opcode */ 71 u8 exception; /* enum ipa_status_exception */ 72 __le16 mask; 73 __le16 pkt_len; 74 u8 endp_src_idx; 75 u8 endp_dst_idx; 76 __le32 metadata; 77 __le32 flags1; 78 __le64 flags2; 79 __le32 flags3; 80 __le32 flags4; 81 }; 82 83 /* Field masks for struct ipa_status structure fields */ 84 85 #define IPA_STATUS_SRC_IDX_FMASK GENMASK(4, 0) 86 87 #define IPA_STATUS_DST_IDX_FMASK GENMASK(4, 0) 88 89 #define IPA_STATUS_FLAGS1_FLT_LOCAL_FMASK GENMASK(0, 0) 90 #define IPA_STATUS_FLAGS1_FLT_HASH_FMASK GENMASK(1, 1) 91 #define IPA_STATUS_FLAGS1_FLT_GLOBAL_FMASK GENMASK(2, 2) 92 #define IPA_STATUS_FLAGS1_FLT_RET_HDR_FMASK GENMASK(3, 3) 93 #define IPA_STATUS_FLAGS1_FLT_RULE_ID_FMASK GENMASK(13, 4) 94 #define IPA_STATUS_FLAGS1_RT_LOCAL_FMASK GENMASK(14, 14) 95 #define IPA_STATUS_FLAGS1_RT_HASH_FMASK GENMASK(15, 15) 96 #define IPA_STATUS_FLAGS1_UCP_FMASK GENMASK(16, 16) 97 #define IPA_STATUS_FLAGS1_RT_TBL_IDX_FMASK GENMASK(21, 17) 98 #define IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK GENMASK(31, 22) 99 100 #define IPA_STATUS_FLAGS2_NAT_HIT_FMASK GENMASK_ULL(0, 0) 101 #define IPA_STATUS_FLAGS2_NAT_ENTRY_IDX_FMASK GENMASK_ULL(13, 1) 102 #define IPA_STATUS_FLAGS2_NAT_TYPE_FMASK GENMASK_ULL(15, 14) 103 #define IPA_STATUS_FLAGS2_TAG_INFO_FMASK GENMASK_ULL(63, 16) 104 105 #define IPA_STATUS_FLAGS3_SEQ_NUM_FMASK GENMASK(7, 0) 106 #define IPA_STATUS_FLAGS3_TOD_CTR_FMASK GENMASK(31, 8) 107 108 #define IPA_STATUS_FLAGS4_HDR_LOCAL_FMASK GENMASK(0, 0) 109 #define IPA_STATUS_FLAGS4_HDR_OFFSET_FMASK GENMASK(10, 1) 110 #define IPA_STATUS_FLAGS4_FRAG_HIT_FMASK GENMASK(11, 11) 111 #define IPA_STATUS_FLAGS4_FRAG_RULE_FMASK GENMASK(15, 12) 112 #define IPA_STATUS_FLAGS4_HW_SPECIFIC_FMASK GENMASK(31, 16) 113 114 #ifdef IPA_VALIDATE 115 116 static void ipa_endpoint_validate_build(void) 117 { 118 /* The aggregation byte limit defines the point at which an 119 * aggregation window will close. It is programmed into the 120 * IPA hardware as a number of KB. We don't use "hard byte 121 * limit" aggregation, which means that we need to supply 122 * enough space in a receive buffer to hold a complete MTU 123 * plus normal skb overhead *after* that aggregation byte 124 * limit has been crossed. 125 * 126 * This check just ensures we don't define a receive buffer 127 * size that would exceed what we can represent in the field 128 * that is used to program its size. 129 */ 130 BUILD_BUG_ON(IPA_RX_BUFFER_SIZE > 131 field_max(AGGR_BYTE_LIMIT_FMASK) * SZ_1K + 132 IPA_MTU + IPA_RX_BUFFER_OVERHEAD); 133 134 /* I honestly don't know where this requirement comes from. But 135 * it holds, and if we someday need to loosen the constraint we 136 * can try to track it down. 137 */ 138 BUILD_BUG_ON(sizeof(struct ipa_status) % 4); 139 } 140 141 static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count, 142 const struct ipa_gsi_endpoint_data *all_data, 143 const struct ipa_gsi_endpoint_data *data) 144 { 145 const struct ipa_gsi_endpoint_data *other_data; 146 struct device *dev = &ipa->pdev->dev; 147 enum ipa_endpoint_name other_name; 148 149 if (ipa_gsi_endpoint_data_empty(data)) 150 return true; 151 152 if (!data->toward_ipa) { 153 if (data->endpoint.filter_support) { 154 dev_err(dev, "filtering not supported for " 155 "RX endpoint %u\n", 156 data->endpoint_id); 157 return false; 158 } 159 160 return true; /* Nothing more to check for RX */ 161 } 162 163 if (data->endpoint.config.status_enable) { 164 other_name = data->endpoint.config.tx.status_endpoint; 165 if (other_name >= count) { 166 dev_err(dev, "status endpoint name %u out of range " 167 "for endpoint %u\n", 168 other_name, data->endpoint_id); 169 return false; 170 } 171 172 /* Status endpoint must be defined... */ 173 other_data = &all_data[other_name]; 174 if (ipa_gsi_endpoint_data_empty(other_data)) { 175 dev_err(dev, "DMA endpoint name %u undefined " 176 "for endpoint %u\n", 177 other_name, data->endpoint_id); 178 return false; 179 } 180 181 /* ...and has to be an RX endpoint... */ 182 if (other_data->toward_ipa) { 183 dev_err(dev, 184 "status endpoint for endpoint %u not RX\n", 185 data->endpoint_id); 186 return false; 187 } 188 189 /* ...and if it's to be an AP endpoint... */ 190 if (other_data->ee_id == GSI_EE_AP) { 191 /* ...make sure it has status enabled. */ 192 if (!other_data->endpoint.config.status_enable) { 193 dev_err(dev, 194 "status not enabled for endpoint %u\n", 195 other_data->endpoint_id); 196 return false; 197 } 198 } 199 } 200 201 if (data->endpoint.config.dma_mode) { 202 other_name = data->endpoint.config.dma_endpoint; 203 if (other_name >= count) { 204 dev_err(dev, "DMA endpoint name %u out of range " 205 "for endpoint %u\n", 206 other_name, data->endpoint_id); 207 return false; 208 } 209 210 other_data = &all_data[other_name]; 211 if (ipa_gsi_endpoint_data_empty(other_data)) { 212 dev_err(dev, "DMA endpoint name %u undefined " 213 "for endpoint %u\n", 214 other_name, data->endpoint_id); 215 return false; 216 } 217 } 218 219 return true; 220 } 221 222 static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count, 223 const struct ipa_gsi_endpoint_data *data) 224 { 225 const struct ipa_gsi_endpoint_data *dp = data; 226 struct device *dev = &ipa->pdev->dev; 227 enum ipa_endpoint_name name; 228 229 ipa_endpoint_validate_build(); 230 231 if (count > IPA_ENDPOINT_COUNT) { 232 dev_err(dev, "too many endpoints specified (%u > %u)\n", 233 count, IPA_ENDPOINT_COUNT); 234 return false; 235 } 236 237 /* Make sure needed endpoints have defined data */ 238 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_COMMAND_TX])) { 239 dev_err(dev, "command TX endpoint not defined\n"); 240 return false; 241 } 242 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_LAN_RX])) { 243 dev_err(dev, "LAN RX endpoint not defined\n"); 244 return false; 245 } 246 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_TX])) { 247 dev_err(dev, "AP->modem TX endpoint not defined\n"); 248 return false; 249 } 250 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_RX])) { 251 dev_err(dev, "AP<-modem RX endpoint not defined\n"); 252 return false; 253 } 254 255 for (name = 0; name < count; name++, dp++) 256 if (!ipa_endpoint_data_valid_one(ipa, count, data, dp)) 257 return false; 258 259 return true; 260 } 261 262 #else /* !IPA_VALIDATE */ 263 264 static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count, 265 const struct ipa_gsi_endpoint_data *data) 266 { 267 return true; 268 } 269 270 #endif /* !IPA_VALIDATE */ 271 272 /* Allocate a transaction to use on a non-command endpoint */ 273 static struct gsi_trans *ipa_endpoint_trans_alloc(struct ipa_endpoint *endpoint, 274 u32 tre_count) 275 { 276 struct gsi *gsi = &endpoint->ipa->gsi; 277 u32 channel_id = endpoint->channel_id; 278 enum dma_data_direction direction; 279 280 direction = endpoint->toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE; 281 282 return gsi_channel_trans_alloc(gsi, channel_id, tre_count, direction); 283 } 284 285 /* suspend_delay represents suspend for RX, delay for TX endpoints. 286 * Note that suspend is not supported starting with IPA v4.0. 287 */ 288 static int 289 ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay) 290 { 291 u32 offset = IPA_REG_ENDP_INIT_CTRL_N_OFFSET(endpoint->endpoint_id); 292 struct ipa *ipa = endpoint->ipa; 293 u32 mask; 294 u32 val; 295 296 /* assert(ipa->version == IPA_VERSION_3_5_1 */ 297 mask = endpoint->toward_ipa ? ENDP_DELAY_FMASK : ENDP_SUSPEND_FMASK; 298 299 val = ioread32(ipa->reg_virt + offset); 300 if (suspend_delay == !!(val & mask)) 301 return -EALREADY; /* Already set to desired state */ 302 303 val ^= mask; 304 iowrite32(val, ipa->reg_virt + offset); 305 306 return 0; 307 } 308 309 /* Enable or disable delay or suspend mode on all modem endpoints */ 310 void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable) 311 { 312 bool support_suspend; 313 u32 endpoint_id; 314 315 /* DELAY mode doesn't work right on IPA v4.2 */ 316 if (ipa->version == IPA_VERSION_4_2) 317 return; 318 319 /* Only IPA v3.5.1 supports SUSPEND mode on RX endpoints */ 320 support_suspend = ipa->version == IPA_VERSION_3_5_1; 321 322 for (endpoint_id = 0; endpoint_id < IPA_ENDPOINT_MAX; endpoint_id++) { 323 struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id]; 324 325 if (endpoint->ee_id != GSI_EE_MODEM) 326 continue; 327 328 /* Set TX delay mode, or for IPA v3.5.1 RX suspend mode */ 329 if (endpoint->toward_ipa || support_suspend) 330 (void)ipa_endpoint_init_ctrl(endpoint, enable); 331 } 332 } 333 334 /* Reset all modem endpoints to use the default exception endpoint */ 335 int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa) 336 { 337 u32 initialized = ipa->initialized; 338 struct gsi_trans *trans; 339 u32 count; 340 341 /* We need one command per modem TX endpoint. We can get an upper 342 * bound on that by assuming all initialized endpoints are modem->IPA. 343 * That won't happen, and we could be more precise, but this is fine 344 * for now. We need to end the transactio with a "tag process." 345 */ 346 count = hweight32(initialized) + ipa_cmd_tag_process_count(); 347 trans = ipa_cmd_trans_alloc(ipa, count); 348 if (!trans) { 349 dev_err(&ipa->pdev->dev, 350 "no transaction to reset modem exception endpoints\n"); 351 return -EBUSY; 352 } 353 354 while (initialized) { 355 u32 endpoint_id = __ffs(initialized); 356 struct ipa_endpoint *endpoint; 357 u32 offset; 358 359 initialized ^= BIT(endpoint_id); 360 361 /* We only reset modem TX endpoints */ 362 endpoint = &ipa->endpoint[endpoint_id]; 363 if (!(endpoint->ee_id == GSI_EE_MODEM && endpoint->toward_ipa)) 364 continue; 365 366 offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id); 367 368 /* Value written is 0, and all bits are updated. That 369 * means status is disabled on the endpoint, and as a 370 * result all other fields in the register are ignored. 371 */ 372 ipa_cmd_register_write_add(trans, offset, 0, ~0, false); 373 } 374 375 ipa_cmd_tag_process_add(trans); 376 377 /* XXX This should have a 1 second timeout */ 378 gsi_trans_commit_wait(trans); 379 380 return 0; 381 } 382 383 static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint) 384 { 385 u32 offset = IPA_REG_ENDP_INIT_CFG_N_OFFSET(endpoint->endpoint_id); 386 u32 val = 0; 387 388 /* FRAG_OFFLOAD_EN is 0 */ 389 if (endpoint->data->checksum) { 390 if (endpoint->toward_ipa) { 391 u32 checksum_offset; 392 393 val |= u32_encode_bits(IPA_CS_OFFLOAD_UL, 394 CS_OFFLOAD_EN_FMASK); 395 /* Checksum header offset is in 4-byte units */ 396 checksum_offset = sizeof(struct rmnet_map_header); 397 checksum_offset /= sizeof(u32); 398 val |= u32_encode_bits(checksum_offset, 399 CS_METADATA_HDR_OFFSET_FMASK); 400 } else { 401 val |= u32_encode_bits(IPA_CS_OFFLOAD_DL, 402 CS_OFFLOAD_EN_FMASK); 403 } 404 } else { 405 val |= u32_encode_bits(IPA_CS_OFFLOAD_NONE, 406 CS_OFFLOAD_EN_FMASK); 407 } 408 /* CS_GEN_QMB_MASTER_SEL is 0 */ 409 410 iowrite32(val, endpoint->ipa->reg_virt + offset); 411 } 412 413 static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint) 414 { 415 u32 offset = IPA_REG_ENDP_INIT_HDR_N_OFFSET(endpoint->endpoint_id); 416 u32 val = 0; 417 418 if (endpoint->data->qmap) { 419 size_t header_size = sizeof(struct rmnet_map_header); 420 421 if (endpoint->toward_ipa && endpoint->data->checksum) 422 header_size += sizeof(struct rmnet_map_ul_csum_header); 423 424 val |= u32_encode_bits(header_size, HDR_LEN_FMASK); 425 /* metadata is the 4 byte rmnet_map header itself */ 426 val |= HDR_OFST_METADATA_VALID_FMASK; 427 val |= u32_encode_bits(0, HDR_OFST_METADATA_FMASK); 428 /* HDR_ADDITIONAL_CONST_LEN is 0; (IPA->AP only) */ 429 if (!endpoint->toward_ipa) { 430 u32 size_offset = offsetof(struct rmnet_map_header, 431 pkt_len); 432 433 val |= HDR_OFST_PKT_SIZE_VALID_FMASK; 434 val |= u32_encode_bits(size_offset, 435 HDR_OFST_PKT_SIZE_FMASK); 436 } 437 /* HDR_A5_MUX is 0 */ 438 /* HDR_LEN_INC_DEAGG_HDR is 0 */ 439 /* HDR_METADATA_REG_VALID is 0; (AP->IPA only) */ 440 } 441 442 iowrite32(val, endpoint->ipa->reg_virt + offset); 443 } 444 445 static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint) 446 { 447 u32 offset = IPA_REG_ENDP_INIT_HDR_EXT_N_OFFSET(endpoint->endpoint_id); 448 u32 pad_align = endpoint->data->rx.pad_align; 449 u32 val = 0; 450 451 val |= HDR_ENDIANNESS_FMASK; /* big endian */ 452 val |= HDR_TOTAL_LEN_OR_PAD_VALID_FMASK; 453 /* HDR_TOTAL_LEN_OR_PAD is 0 (pad, not total_len) */ 454 /* HDR_PAYLOAD_LEN_INC_PADDING is 0 */ 455 /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0 */ 456 if (!endpoint->toward_ipa) 457 val |= u32_encode_bits(pad_align, HDR_PAD_TO_ALIGNMENT_FMASK); 458 459 iowrite32(val, endpoint->ipa->reg_virt + offset); 460 } 461 462 /** 463 * Generate a metadata mask value that will select only the mux_id 464 * field in an rmnet_map header structure. The mux_id is at offset 465 * 1 byte from the beginning of the structure, but the metadata 466 * value is treated as a 4-byte unit. So this mask must be computed 467 * with endianness in mind. Note that ipa_endpoint_init_hdr_metadata_mask() 468 * will convert this value to the proper byte order. 469 * 470 * Marked __always_inline because this is really computing a 471 * constant value. 472 */ 473 static __always_inline __be32 ipa_rmnet_mux_id_metadata_mask(void) 474 { 475 size_t mux_id_offset = offsetof(struct rmnet_map_header, mux_id); 476 u32 mux_id_mask = 0; 477 u8 *bytes; 478 479 bytes = (u8 *)&mux_id_mask; 480 bytes[mux_id_offset] = 0xff; /* mux_id is 1 byte */ 481 482 return cpu_to_be32(mux_id_mask); 483 } 484 485 static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint) 486 { 487 u32 endpoint_id = endpoint->endpoint_id; 488 u32 val = 0; 489 u32 offset; 490 491 offset = IPA_REG_ENDP_INIT_HDR_METADATA_MASK_N_OFFSET(endpoint_id); 492 493 if (!endpoint->toward_ipa && endpoint->data->qmap) 494 val = ipa_rmnet_mux_id_metadata_mask(); 495 496 iowrite32(val, endpoint->ipa->reg_virt + offset); 497 } 498 499 static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint) 500 { 501 u32 offset = IPA_REG_ENDP_INIT_MODE_N_OFFSET(endpoint->endpoint_id); 502 u32 val; 503 504 if (endpoint->toward_ipa && endpoint->data->dma_mode) { 505 enum ipa_endpoint_name name = endpoint->data->dma_endpoint; 506 u32 dma_endpoint_id; 507 508 dma_endpoint_id = endpoint->ipa->name_map[name]->endpoint_id; 509 510 val = u32_encode_bits(IPA_DMA, MODE_FMASK); 511 val |= u32_encode_bits(dma_endpoint_id, DEST_PIPE_INDEX_FMASK); 512 } else { 513 val = u32_encode_bits(IPA_BASIC, MODE_FMASK); 514 } 515 /* Other bitfields unspecified (and 0) */ 516 517 iowrite32(val, endpoint->ipa->reg_virt + offset); 518 } 519 520 /* Compute the aggregation size value to use for a given buffer size */ 521 static u32 ipa_aggr_size_kb(u32 rx_buffer_size) 522 { 523 /* We don't use "hard byte limit" aggregation, so we define the 524 * aggregation limit such that our buffer has enough space *after* 525 * that limit to receive a full MTU of data, plus overhead. 526 */ 527 rx_buffer_size -= IPA_MTU + IPA_RX_BUFFER_OVERHEAD; 528 529 return rx_buffer_size / SZ_1K; 530 } 531 532 static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint) 533 { 534 u32 offset = IPA_REG_ENDP_INIT_AGGR_N_OFFSET(endpoint->endpoint_id); 535 u32 val = 0; 536 537 if (endpoint->data->aggregation) { 538 if (!endpoint->toward_ipa) { 539 u32 aggr_size = ipa_aggr_size_kb(IPA_RX_BUFFER_SIZE); 540 u32 limit; 541 542 val |= u32_encode_bits(IPA_ENABLE_AGGR, AGGR_EN_FMASK); 543 val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK); 544 val |= u32_encode_bits(aggr_size, 545 AGGR_BYTE_LIMIT_FMASK); 546 limit = IPA_AGGR_TIME_LIMIT_DEFAULT; 547 val |= u32_encode_bits(limit / IPA_AGGR_GRANULARITY, 548 AGGR_TIME_LIMIT_FMASK); 549 val |= u32_encode_bits(0, AGGR_PKT_LIMIT_FMASK); 550 if (endpoint->data->rx.aggr_close_eof) 551 val |= AGGR_SW_EOF_ACTIVE_FMASK; 552 /* AGGR_HARD_BYTE_LIMIT_ENABLE is 0 */ 553 } else { 554 val |= u32_encode_bits(IPA_ENABLE_DEAGGR, 555 AGGR_EN_FMASK); 556 val |= u32_encode_bits(IPA_QCMAP, AGGR_TYPE_FMASK); 557 /* other fields ignored */ 558 } 559 /* AGGR_FORCE_CLOSE is 0 */ 560 } else { 561 val |= u32_encode_bits(IPA_BYPASS_AGGR, AGGR_EN_FMASK); 562 /* other fields ignored */ 563 } 564 565 iowrite32(val, endpoint->ipa->reg_virt + offset); 566 } 567 568 /* A return value of 0 indicates an error */ 569 static u32 ipa_reg_init_hol_block_timer_val(struct ipa *ipa, u32 microseconds) 570 { 571 u32 scale; 572 u32 base; 573 u32 val; 574 575 if (!microseconds) 576 return 0; /* invalid delay */ 577 578 /* Timer is represented in units of clock ticks. */ 579 if (ipa->version < IPA_VERSION_4_2) 580 return microseconds; /* XXX Needs to be computed */ 581 582 /* IPA v4.2 represents the tick count as base * scale */ 583 scale = 1; /* XXX Needs to be computed */ 584 if (scale > field_max(SCALE_FMASK)) 585 return 0; /* scale too big */ 586 587 base = DIV_ROUND_CLOSEST(microseconds, scale); 588 if (base > field_max(BASE_VALUE_FMASK)) 589 return 0; /* microseconds too big */ 590 591 val = u32_encode_bits(scale, SCALE_FMASK); 592 val |= u32_encode_bits(base, BASE_VALUE_FMASK); 593 594 return val; 595 } 596 597 static int ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint, 598 u32 microseconds) 599 { 600 u32 endpoint_id = endpoint->endpoint_id; 601 struct ipa *ipa = endpoint->ipa; 602 u32 offset; 603 u32 val; 604 605 /* XXX We'll fix this when the register definition is clear */ 606 if (microseconds) { 607 struct device *dev = &ipa->pdev->dev; 608 609 dev_err(dev, "endpoint %u non-zero HOLB period (ignoring)\n", 610 endpoint_id); 611 microseconds = 0; 612 } 613 614 if (microseconds) { 615 val = ipa_reg_init_hol_block_timer_val(ipa, microseconds); 616 if (!val) 617 return -EINVAL; 618 } else { 619 val = 0; /* timeout is immediate */ 620 } 621 offset = IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(endpoint_id); 622 iowrite32(val, ipa->reg_virt + offset); 623 624 return 0; 625 } 626 627 static void 628 ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint, bool enable) 629 { 630 u32 endpoint_id = endpoint->endpoint_id; 631 u32 offset; 632 u32 val; 633 634 val = u32_encode_bits(enable ? 1 : 0, HOL_BLOCK_EN_FMASK); 635 offset = IPA_REG_ENDP_INIT_HOL_BLOCK_EN_N_OFFSET(endpoint_id); 636 iowrite32(val, endpoint->ipa->reg_virt + offset); 637 } 638 639 void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa) 640 { 641 u32 i; 642 643 for (i = 0; i < IPA_ENDPOINT_MAX; i++) { 644 struct ipa_endpoint *endpoint = &ipa->endpoint[i]; 645 646 if (endpoint->ee_id != GSI_EE_MODEM) 647 continue; 648 649 (void)ipa_endpoint_init_hol_block_timer(endpoint, 0); 650 ipa_endpoint_init_hol_block_enable(endpoint, true); 651 } 652 } 653 654 static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint) 655 { 656 u32 offset = IPA_REG_ENDP_INIT_DEAGGR_N_OFFSET(endpoint->endpoint_id); 657 u32 val = 0; 658 659 /* DEAGGR_HDR_LEN is 0 */ 660 /* PACKET_OFFSET_VALID is 0 */ 661 /* PACKET_OFFSET_LOCATION is ignored (not valid) */ 662 /* MAX_PACKET_LEN is 0 (not enforced) */ 663 664 iowrite32(val, endpoint->ipa->reg_virt + offset); 665 } 666 667 static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint) 668 { 669 u32 offset = IPA_REG_ENDP_INIT_SEQ_N_OFFSET(endpoint->endpoint_id); 670 u32 seq_type = endpoint->seq_type; 671 u32 val = 0; 672 673 val |= u32_encode_bits(seq_type & 0xf, HPS_SEQ_TYPE_FMASK); 674 val |= u32_encode_bits((seq_type >> 4) & 0xf, DPS_SEQ_TYPE_FMASK); 675 /* HPS_REP_SEQ_TYPE is 0 */ 676 /* DPS_REP_SEQ_TYPE is 0 */ 677 678 iowrite32(val, endpoint->ipa->reg_virt + offset); 679 } 680 681 /** 682 * ipa_endpoint_skb_tx() - Transmit a socket buffer 683 * @endpoint: Endpoint pointer 684 * @skb: Socket buffer to send 685 * 686 * Returns: 0 if successful, or a negative error code 687 */ 688 int ipa_endpoint_skb_tx(struct ipa_endpoint *endpoint, struct sk_buff *skb) 689 { 690 struct gsi_trans *trans; 691 u32 nr_frags; 692 int ret; 693 694 /* Make sure source endpoint's TLV FIFO has enough entries to 695 * hold the linear portion of the skb and all its fragments. 696 * If not, see if we can linearize it before giving up. 697 */ 698 nr_frags = skb_shinfo(skb)->nr_frags; 699 if (1 + nr_frags > endpoint->trans_tre_max) { 700 if (skb_linearize(skb)) 701 return -E2BIG; 702 nr_frags = 0; 703 } 704 705 trans = ipa_endpoint_trans_alloc(endpoint, 1 + nr_frags); 706 if (!trans) 707 return -EBUSY; 708 709 ret = gsi_trans_skb_add(trans, skb); 710 if (ret) 711 goto err_trans_free; 712 trans->data = skb; /* transaction owns skb now */ 713 714 gsi_trans_commit(trans, !netdev_xmit_more()); 715 716 return 0; 717 718 err_trans_free: 719 gsi_trans_free(trans); 720 721 return -ENOMEM; 722 } 723 724 static void ipa_endpoint_status(struct ipa_endpoint *endpoint) 725 { 726 u32 endpoint_id = endpoint->endpoint_id; 727 struct ipa *ipa = endpoint->ipa; 728 u32 val = 0; 729 u32 offset; 730 731 offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id); 732 733 if (endpoint->data->status_enable) { 734 val |= STATUS_EN_FMASK; 735 if (endpoint->toward_ipa) { 736 enum ipa_endpoint_name name; 737 u32 status_endpoint_id; 738 739 name = endpoint->data->tx.status_endpoint; 740 status_endpoint_id = ipa->name_map[name]->endpoint_id; 741 742 val |= u32_encode_bits(status_endpoint_id, 743 STATUS_ENDP_FMASK); 744 } 745 /* STATUS_LOCATION is 0 (status element precedes packet) */ 746 /* The next field is present for IPA v4.0 and above */ 747 /* STATUS_PKT_SUPPRESS_FMASK is 0 */ 748 } 749 750 iowrite32(val, ipa->reg_virt + offset); 751 } 752 753 static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint) 754 { 755 struct gsi_trans *trans; 756 bool doorbell = false; 757 struct page *page; 758 u32 offset; 759 u32 len; 760 int ret; 761 762 page = dev_alloc_pages(IPA_RX_BUFFER_ORDER); 763 if (!page) 764 return -ENOMEM; 765 766 trans = ipa_endpoint_trans_alloc(endpoint, 1); 767 if (!trans) 768 goto err_free_pages; 769 770 /* Offset the buffer to make space for skb headroom */ 771 offset = NET_SKB_PAD; 772 len = IPA_RX_BUFFER_SIZE - offset; 773 774 ret = gsi_trans_page_add(trans, page, len, offset); 775 if (ret) 776 goto err_trans_free; 777 trans->data = page; /* transaction owns page now */ 778 779 if (++endpoint->replenish_ready == IPA_REPLENISH_BATCH) { 780 doorbell = true; 781 endpoint->replenish_ready = 0; 782 } 783 784 gsi_trans_commit(trans, doorbell); 785 786 return 0; 787 788 err_trans_free: 789 gsi_trans_free(trans); 790 err_free_pages: 791 __free_pages(page, IPA_RX_BUFFER_ORDER); 792 793 return -ENOMEM; 794 } 795 796 /** 797 * ipa_endpoint_replenish() - Replenish the Rx packets cache. 798 * 799 * Allocate RX packet wrapper structures with maximal socket buffers 800 * for an endpoint. These are supplied to the hardware, which fills 801 * them with incoming data. 802 */ 803 static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint, u32 count) 804 { 805 struct gsi *gsi; 806 u32 backlog; 807 808 if (!endpoint->replenish_enabled) { 809 if (count) 810 atomic_add(count, &endpoint->replenish_saved); 811 return; 812 } 813 814 815 while (atomic_dec_not_zero(&endpoint->replenish_backlog)) 816 if (ipa_endpoint_replenish_one(endpoint)) 817 goto try_again_later; 818 if (count) 819 atomic_add(count, &endpoint->replenish_backlog); 820 821 return; 822 823 try_again_later: 824 /* The last one didn't succeed, so fix the backlog */ 825 backlog = atomic_inc_return(&endpoint->replenish_backlog); 826 827 if (count) 828 atomic_add(count, &endpoint->replenish_backlog); 829 830 /* Whenever a receive buffer transaction completes we'll try to 831 * replenish again. It's unlikely, but if we fail to supply even 832 * one buffer, nothing will trigger another replenish attempt. 833 * Receive buffer transactions use one TRE, so schedule work to 834 * try replenishing again if our backlog is *all* available TREs. 835 */ 836 gsi = &endpoint->ipa->gsi; 837 if (backlog == gsi_channel_tre_max(gsi, endpoint->channel_id)) 838 schedule_delayed_work(&endpoint->replenish_work, 839 msecs_to_jiffies(1)); 840 } 841 842 static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint) 843 { 844 struct gsi *gsi = &endpoint->ipa->gsi; 845 u32 max_backlog; 846 u32 saved; 847 848 endpoint->replenish_enabled = true; 849 while ((saved = atomic_xchg(&endpoint->replenish_saved, 0))) 850 atomic_add(saved, &endpoint->replenish_backlog); 851 852 /* Start replenishing if hardware currently has no buffers */ 853 max_backlog = gsi_channel_tre_max(gsi, endpoint->channel_id); 854 if (atomic_read(&endpoint->replenish_backlog) == max_backlog) 855 ipa_endpoint_replenish(endpoint, 0); 856 } 857 858 static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint) 859 { 860 u32 backlog; 861 862 endpoint->replenish_enabled = false; 863 while ((backlog = atomic_xchg(&endpoint->replenish_backlog, 0))) 864 atomic_add(backlog, &endpoint->replenish_saved); 865 } 866 867 static void ipa_endpoint_replenish_work(struct work_struct *work) 868 { 869 struct delayed_work *dwork = to_delayed_work(work); 870 struct ipa_endpoint *endpoint; 871 872 endpoint = container_of(dwork, struct ipa_endpoint, replenish_work); 873 874 ipa_endpoint_replenish(endpoint, 0); 875 } 876 877 static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint, 878 void *data, u32 len, u32 extra) 879 { 880 struct sk_buff *skb; 881 882 skb = __dev_alloc_skb(len, GFP_ATOMIC); 883 if (skb) { 884 skb_put(skb, len); 885 memcpy(skb->data, data, len); 886 skb->truesize += extra; 887 } 888 889 /* Now receive it, or drop it if there's no netdev */ 890 if (endpoint->netdev) 891 ipa_modem_skb_rx(endpoint->netdev, skb); 892 else if (skb) 893 dev_kfree_skb_any(skb); 894 } 895 896 static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint, 897 struct page *page, u32 len) 898 { 899 struct sk_buff *skb; 900 901 /* Nothing to do if there's no netdev */ 902 if (!endpoint->netdev) 903 return false; 904 905 /* assert(len <= SKB_WITH_OVERHEAD(IPA_RX_BUFFER_SIZE-NET_SKB_PAD)); */ 906 skb = build_skb(page_address(page), IPA_RX_BUFFER_SIZE); 907 if (skb) { 908 /* Reserve the headroom and account for the data */ 909 skb_reserve(skb, NET_SKB_PAD); 910 skb_put(skb, len); 911 } 912 913 /* Receive the buffer (or record drop if unable to build it) */ 914 ipa_modem_skb_rx(endpoint->netdev, skb); 915 916 return skb != NULL; 917 } 918 919 /* The format of a packet status element is the same for several status 920 * types (opcodes). The NEW_FRAG_RULE, LOG, DCMP (decompression) types 921 * aren't currently supported 922 */ 923 static bool ipa_status_format_packet(enum ipa_status_opcode opcode) 924 { 925 switch (opcode) { 926 case IPA_STATUS_OPCODE_PACKET: 927 case IPA_STATUS_OPCODE_DROPPED_PACKET: 928 case IPA_STATUS_OPCODE_SUSPENDED_PACKET: 929 case IPA_STATUS_OPCODE_PACKET_2ND_PASS: 930 return true; 931 default: 932 return false; 933 } 934 } 935 936 static bool ipa_endpoint_status_skip(struct ipa_endpoint *endpoint, 937 const struct ipa_status *status) 938 { 939 u32 endpoint_id; 940 941 if (!ipa_status_format_packet(status->opcode)) 942 return true; 943 if (!status->pkt_len) 944 return true; 945 endpoint_id = u32_get_bits(status->endp_dst_idx, 946 IPA_STATUS_DST_IDX_FMASK); 947 if (endpoint_id != endpoint->endpoint_id) 948 return true; 949 950 return false; /* Don't skip this packet, process it */ 951 } 952 953 /* Return whether the status indicates the packet should be dropped */ 954 static bool ipa_status_drop_packet(const struct ipa_status *status) 955 { 956 u32 val; 957 958 /* Deaggregation exceptions we drop; others we consume */ 959 if (status->exception) 960 return status->exception == IPA_STATUS_EXCEPTION_DEAGGR; 961 962 /* Drop the packet if it fails to match a routing rule; otherwise no */ 963 val = le32_get_bits(status->flags1, IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK); 964 965 return val == field_max(IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK); 966 } 967 968 static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint, 969 struct page *page, u32 total_len) 970 { 971 void *data = page_address(page) + NET_SKB_PAD; 972 u32 unused = IPA_RX_BUFFER_SIZE - total_len; 973 u32 resid = total_len; 974 975 while (resid) { 976 const struct ipa_status *status = data; 977 u32 align; 978 u32 len; 979 980 if (resid < sizeof(*status)) { 981 dev_err(&endpoint->ipa->pdev->dev, 982 "short message (%u bytes < %zu byte status)\n", 983 resid, sizeof(*status)); 984 break; 985 } 986 987 /* Skip over status packets that lack packet data */ 988 if (ipa_endpoint_status_skip(endpoint, status)) { 989 data += sizeof(*status); 990 resid -= sizeof(*status); 991 continue; 992 } 993 994 /* Compute the amount of buffer space consumed by the 995 * packet, including the status element. If the hardware 996 * is configured to pad packet data to an aligned boundary, 997 * account for that. And if checksum offload is is enabled 998 * a trailer containing computed checksum information will 999 * be appended. 1000 */ 1001 align = endpoint->data->rx.pad_align ? : 1; 1002 len = le16_to_cpu(status->pkt_len); 1003 len = sizeof(*status) + ALIGN(len, align); 1004 if (endpoint->data->checksum) 1005 len += sizeof(struct rmnet_map_dl_csum_trailer); 1006 1007 /* Charge the new packet with a proportional fraction of 1008 * the unused space in the original receive buffer. 1009 * XXX Charge a proportion of the *whole* receive buffer? 1010 */ 1011 if (!ipa_status_drop_packet(status)) { 1012 u32 extra = unused * len / total_len; 1013 void *data2 = data + sizeof(*status); 1014 u32 len2 = le16_to_cpu(status->pkt_len); 1015 1016 /* Client receives only packet data (no status) */ 1017 ipa_endpoint_skb_copy(endpoint, data2, len2, extra); 1018 } 1019 1020 /* Consume status and the full packet it describes */ 1021 data += len; 1022 resid -= len; 1023 } 1024 } 1025 1026 /* Complete a TX transaction, command or from ipa_endpoint_skb_tx() */ 1027 static void ipa_endpoint_tx_complete(struct ipa_endpoint *endpoint, 1028 struct gsi_trans *trans) 1029 { 1030 } 1031 1032 /* Complete transaction initiated in ipa_endpoint_replenish_one() */ 1033 static void ipa_endpoint_rx_complete(struct ipa_endpoint *endpoint, 1034 struct gsi_trans *trans) 1035 { 1036 struct page *page; 1037 1038 ipa_endpoint_replenish(endpoint, 1); 1039 1040 if (trans->cancelled) 1041 return; 1042 1043 /* Parse or build a socket buffer using the actual received length */ 1044 page = trans->data; 1045 if (endpoint->data->status_enable) 1046 ipa_endpoint_status_parse(endpoint, page, trans->len); 1047 else if (ipa_endpoint_skb_build(endpoint, page, trans->len)) 1048 trans->data = NULL; /* Pages have been consumed */ 1049 } 1050 1051 void ipa_endpoint_trans_complete(struct ipa_endpoint *endpoint, 1052 struct gsi_trans *trans) 1053 { 1054 if (endpoint->toward_ipa) 1055 ipa_endpoint_tx_complete(endpoint, trans); 1056 else 1057 ipa_endpoint_rx_complete(endpoint, trans); 1058 } 1059 1060 void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint, 1061 struct gsi_trans *trans) 1062 { 1063 if (endpoint->toward_ipa) { 1064 struct ipa *ipa = endpoint->ipa; 1065 1066 /* Nothing to do for command transactions */ 1067 if (endpoint != ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]) { 1068 struct sk_buff *skb = trans->data; 1069 1070 if (skb) 1071 dev_kfree_skb_any(skb); 1072 } 1073 } else { 1074 struct page *page = trans->data; 1075 1076 if (page) 1077 __free_pages(page, IPA_RX_BUFFER_ORDER); 1078 } 1079 } 1080 1081 void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id) 1082 { 1083 u32 val; 1084 1085 /* ROUTE_DIS is 0 */ 1086 val = u32_encode_bits(endpoint_id, ROUTE_DEF_PIPE_FMASK); 1087 val |= ROUTE_DEF_HDR_TABLE_FMASK; 1088 val |= u32_encode_bits(0, ROUTE_DEF_HDR_OFST_FMASK); 1089 val |= u32_encode_bits(endpoint_id, ROUTE_FRAG_DEF_PIPE_FMASK); 1090 val |= ROUTE_DEF_RETAIN_HDR_FMASK; 1091 1092 iowrite32(val, ipa->reg_virt + IPA_REG_ROUTE_OFFSET); 1093 } 1094 1095 void ipa_endpoint_default_route_clear(struct ipa *ipa) 1096 { 1097 ipa_endpoint_default_route_set(ipa, 0); 1098 } 1099 1100 static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint) 1101 { 1102 u32 mask = BIT(endpoint->endpoint_id); 1103 struct ipa *ipa = endpoint->ipa; 1104 u32 offset; 1105 u32 val; 1106 1107 /* assert(mask & ipa->available); */ 1108 offset = ipa_reg_state_aggr_active_offset(ipa->version); 1109 val = ioread32(ipa->reg_virt + offset); 1110 1111 return !!(val & mask); 1112 } 1113 1114 static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint) 1115 { 1116 u32 mask = BIT(endpoint->endpoint_id); 1117 struct ipa *ipa = endpoint->ipa; 1118 1119 /* assert(mask & ipa->available); */ 1120 iowrite32(mask, ipa->reg_virt + IPA_REG_AGGR_FORCE_CLOSE_OFFSET); 1121 } 1122 1123 /** 1124 * ipa_endpoint_reset_rx_aggr() - Reset RX endpoint with aggregation active 1125 * @endpoint: Endpoint to be reset 1126 * 1127 * If aggregation is active on an RX endpoint when a reset is performed 1128 * on its underlying GSI channel, a special sequence of actions must be 1129 * taken to ensure the IPA pipeline is properly cleared. 1130 * 1131 * @Return: 0 if successful, or a negative error code 1132 */ 1133 static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint) 1134 { 1135 struct device *dev = &endpoint->ipa->pdev->dev; 1136 struct ipa *ipa = endpoint->ipa; 1137 bool endpoint_suspended = false; 1138 struct gsi *gsi = &ipa->gsi; 1139 dma_addr_t addr; 1140 bool db_enable; 1141 u32 retries; 1142 u32 len = 1; 1143 void *virt; 1144 int ret; 1145 1146 virt = kzalloc(len, GFP_KERNEL); 1147 if (!virt) 1148 return -ENOMEM; 1149 1150 addr = dma_map_single(dev, virt, len, DMA_FROM_DEVICE); 1151 if (dma_mapping_error(dev, addr)) { 1152 ret = -ENOMEM; 1153 goto out_kfree; 1154 } 1155 1156 /* Force close aggregation before issuing the reset */ 1157 ipa_endpoint_force_close(endpoint); 1158 1159 /* Reset and reconfigure the channel with the doorbell engine 1160 * disabled. Then poll until we know aggregation is no longer 1161 * active. We'll re-enable the doorbell (if appropriate) when 1162 * we reset again below. 1163 */ 1164 gsi_channel_reset(gsi, endpoint->channel_id, false); 1165 1166 /* Make sure the channel isn't suspended */ 1167 if (endpoint->ipa->version == IPA_VERSION_3_5_1) 1168 if (!ipa_endpoint_init_ctrl(endpoint, false)) 1169 endpoint_suspended = true; 1170 1171 /* Start channel and do a 1 byte read */ 1172 ret = gsi_channel_start(gsi, endpoint->channel_id); 1173 if (ret) 1174 goto out_suspend_again; 1175 1176 ret = gsi_trans_read_byte(gsi, endpoint->channel_id, addr); 1177 if (ret) 1178 goto err_endpoint_stop; 1179 1180 /* Wait for aggregation to be closed on the channel */ 1181 retries = IPA_ENDPOINT_RESET_AGGR_RETRY_MAX; 1182 do { 1183 if (!ipa_endpoint_aggr_active(endpoint)) 1184 break; 1185 msleep(1); 1186 } while (retries--); 1187 1188 /* Check one last time */ 1189 if (ipa_endpoint_aggr_active(endpoint)) 1190 dev_err(dev, "endpoint %u still active during reset\n", 1191 endpoint->endpoint_id); 1192 1193 gsi_trans_read_byte_done(gsi, endpoint->channel_id); 1194 1195 ret = ipa_endpoint_stop(endpoint); 1196 if (ret) 1197 goto out_suspend_again; 1198 1199 /* Finally, reset and reconfigure the channel again (re-enabling the 1200 * the doorbell engine if appropriate). Sleep for 1 millisecond to 1201 * complete the channel reset sequence. Finish by suspending the 1202 * channel again (if necessary). 1203 */ 1204 db_enable = ipa->version == IPA_VERSION_3_5_1; 1205 gsi_channel_reset(gsi, endpoint->channel_id, db_enable); 1206 1207 msleep(1); 1208 1209 goto out_suspend_again; 1210 1211 err_endpoint_stop: 1212 ipa_endpoint_stop(endpoint); 1213 out_suspend_again: 1214 if (endpoint_suspended) 1215 (void)ipa_endpoint_init_ctrl(endpoint, true); 1216 dma_unmap_single(dev, addr, len, DMA_FROM_DEVICE); 1217 out_kfree: 1218 kfree(virt); 1219 1220 return ret; 1221 } 1222 1223 static void ipa_endpoint_reset(struct ipa_endpoint *endpoint) 1224 { 1225 u32 channel_id = endpoint->channel_id; 1226 struct ipa *ipa = endpoint->ipa; 1227 bool db_enable; 1228 bool special; 1229 int ret = 0; 1230 1231 /* On IPA v3.5.1, if an RX endpoint is reset while aggregation 1232 * is active, we need to handle things specially to recover. 1233 * All other cases just need to reset the underlying GSI channel. 1234 * 1235 * IPA v3.5.1 enables the doorbell engine. Newer versions do not. 1236 */ 1237 db_enable = ipa->version == IPA_VERSION_3_5_1; 1238 special = !endpoint->toward_ipa && endpoint->data->aggregation; 1239 if (special && ipa_endpoint_aggr_active(endpoint)) 1240 ret = ipa_endpoint_reset_rx_aggr(endpoint); 1241 else 1242 gsi_channel_reset(&ipa->gsi, channel_id, db_enable); 1243 1244 if (ret) 1245 dev_err(&ipa->pdev->dev, 1246 "error %d resetting channel %u for endpoint %u\n", 1247 ret, endpoint->channel_id, endpoint->endpoint_id); 1248 } 1249 1250 static int ipa_endpoint_stop_rx_dma(struct ipa *ipa) 1251 { 1252 u16 size = IPA_ENDPOINT_STOP_RX_SIZE; 1253 struct gsi_trans *trans; 1254 dma_addr_t addr; 1255 int ret; 1256 1257 trans = ipa_cmd_trans_alloc(ipa, 1); 1258 if (!trans) { 1259 dev_err(&ipa->pdev->dev, 1260 "no transaction for RX endpoint STOP workaround\n"); 1261 return -EBUSY; 1262 } 1263 1264 /* Read into the highest part of the zero memory area */ 1265 addr = ipa->zero_addr + ipa->zero_size - size; 1266 1267 ipa_cmd_dma_task_32b_addr_add(trans, size, addr, false); 1268 1269 ret = gsi_trans_commit_wait_timeout(trans, ENDPOINT_STOP_DMA_TIMEOUT); 1270 if (ret) 1271 gsi_trans_free(trans); 1272 1273 return ret; 1274 } 1275 1276 /** 1277 * ipa_endpoint_stop() - Stops a GSI channel in IPA 1278 * @client: Client whose endpoint should be stopped 1279 * 1280 * This function implements the sequence to stop a GSI channel 1281 * in IPA. This function returns when the channel is is STOP state. 1282 * 1283 * Return value: 0 on success, negative otherwise 1284 */ 1285 int ipa_endpoint_stop(struct ipa_endpoint *endpoint) 1286 { 1287 u32 retries = endpoint->toward_ipa ? 0 : IPA_ENDPOINT_STOP_RX_RETRIES; 1288 int ret; 1289 1290 do { 1291 struct ipa *ipa = endpoint->ipa; 1292 struct gsi *gsi = &ipa->gsi; 1293 1294 ret = gsi_channel_stop(gsi, endpoint->channel_id); 1295 if (ret != -EAGAIN) 1296 break; 1297 1298 if (endpoint->toward_ipa) 1299 continue; 1300 1301 /* For IPA v3.5.1, send a DMA read task and check again */ 1302 if (ipa->version == IPA_VERSION_3_5_1) { 1303 ret = ipa_endpoint_stop_rx_dma(ipa); 1304 if (ret) 1305 break; 1306 } 1307 1308 msleep(1); 1309 } while (retries--); 1310 1311 return retries ? ret : -EIO; 1312 } 1313 1314 static void ipa_endpoint_program(struct ipa_endpoint *endpoint) 1315 { 1316 struct device *dev = &endpoint->ipa->pdev->dev; 1317 int ret; 1318 1319 if (endpoint->toward_ipa) { 1320 bool delay_mode = endpoint->data->tx.delay; 1321 1322 ret = ipa_endpoint_init_ctrl(endpoint, delay_mode); 1323 /* Endpoint is expected to not be in delay mode */ 1324 if (!ret != delay_mode) { 1325 dev_warn(dev, 1326 "TX endpoint %u was %sin delay mode\n", 1327 endpoint->endpoint_id, 1328 delay_mode ? "already " : ""); 1329 } 1330 ipa_endpoint_init_hdr_ext(endpoint); 1331 ipa_endpoint_init_aggr(endpoint); 1332 ipa_endpoint_init_deaggr(endpoint); 1333 ipa_endpoint_init_seq(endpoint); 1334 } else { 1335 if (endpoint->ipa->version == IPA_VERSION_3_5_1) { 1336 if (!ipa_endpoint_init_ctrl(endpoint, false)) 1337 dev_warn(dev, 1338 "RX endpoint %u was suspended\n", 1339 endpoint->endpoint_id); 1340 } 1341 ipa_endpoint_init_hdr_ext(endpoint); 1342 ipa_endpoint_init_aggr(endpoint); 1343 } 1344 ipa_endpoint_init_cfg(endpoint); 1345 ipa_endpoint_init_hdr(endpoint); 1346 ipa_endpoint_init_hdr_metadata_mask(endpoint); 1347 ipa_endpoint_init_mode(endpoint); 1348 ipa_endpoint_status(endpoint); 1349 } 1350 1351 int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint) 1352 { 1353 struct ipa *ipa = endpoint->ipa; 1354 struct gsi *gsi = &ipa->gsi; 1355 int ret; 1356 1357 ret = gsi_channel_start(gsi, endpoint->channel_id); 1358 if (ret) { 1359 dev_err(&ipa->pdev->dev, 1360 "error %d starting %cX channel %u for endpoint %u\n", 1361 ret, endpoint->toward_ipa ? 'T' : 'R', 1362 endpoint->channel_id, endpoint->endpoint_id); 1363 return ret; 1364 } 1365 1366 if (!endpoint->toward_ipa) { 1367 ipa_interrupt_suspend_enable(ipa->interrupt, 1368 endpoint->endpoint_id); 1369 ipa_endpoint_replenish_enable(endpoint); 1370 } 1371 1372 ipa->enabled |= BIT(endpoint->endpoint_id); 1373 1374 return 0; 1375 } 1376 1377 void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint) 1378 { 1379 u32 mask = BIT(endpoint->endpoint_id); 1380 struct ipa *ipa = endpoint->ipa; 1381 int ret; 1382 1383 if (!(endpoint->ipa->enabled & mask)) 1384 return; 1385 1386 endpoint->ipa->enabled ^= mask; 1387 1388 if (!endpoint->toward_ipa) { 1389 ipa_endpoint_replenish_disable(endpoint); 1390 ipa_interrupt_suspend_disable(ipa->interrupt, 1391 endpoint->endpoint_id); 1392 } 1393 1394 /* Note that if stop fails, the channel's state is not well-defined */ 1395 ret = ipa_endpoint_stop(endpoint); 1396 if (ret) 1397 dev_err(&ipa->pdev->dev, 1398 "error %d attempting to stop endpoint %u\n", ret, 1399 endpoint->endpoint_id); 1400 } 1401 1402 /** 1403 * ipa_endpoint_suspend_aggr() - Emulate suspend interrupt 1404 * @endpoint_id: Endpoint on which to emulate a suspend 1405 * 1406 * Emulate suspend IPA interrupt to unsuspend an endpoint suspended 1407 * with an open aggregation frame. This is to work around a hardware 1408 * issue in IPA version 3.5.1 where the suspend interrupt will not be 1409 * generated when it should be. 1410 */ 1411 static void ipa_endpoint_suspend_aggr(struct ipa_endpoint *endpoint) 1412 { 1413 struct ipa *ipa = endpoint->ipa; 1414 1415 /* assert(ipa->version == IPA_VERSION_3_5_1); */ 1416 1417 if (!endpoint->data->aggregation) 1418 return; 1419 1420 /* Nothing to do if the endpoint doesn't have aggregation open */ 1421 if (!ipa_endpoint_aggr_active(endpoint)) 1422 return; 1423 1424 /* Force close aggregation */ 1425 ipa_endpoint_force_close(endpoint); 1426 1427 ipa_interrupt_simulate_suspend(ipa->interrupt); 1428 } 1429 1430 void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint) 1431 { 1432 struct device *dev = &endpoint->ipa->pdev->dev; 1433 struct gsi *gsi = &endpoint->ipa->gsi; 1434 bool stop_channel; 1435 int ret; 1436 1437 if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id))) 1438 return; 1439 1440 if (!endpoint->toward_ipa) 1441 ipa_endpoint_replenish_disable(endpoint); 1442 1443 /* IPA v3.5.1 doesn't use channel stop for suspend */ 1444 stop_channel = endpoint->ipa->version != IPA_VERSION_3_5_1; 1445 if (!endpoint->toward_ipa && !stop_channel) { 1446 /* Due to a hardware bug, a client suspended with an open 1447 * aggregation frame will not generate a SUSPEND IPA 1448 * interrupt. We work around this by force-closing the 1449 * aggregation frame, then simulating the arrival of such 1450 * an interrupt. 1451 */ 1452 WARN_ON(ipa_endpoint_init_ctrl(endpoint, true)); 1453 ipa_endpoint_suspend_aggr(endpoint); 1454 } 1455 1456 ret = gsi_channel_suspend(gsi, endpoint->channel_id, stop_channel); 1457 if (ret) 1458 dev_err(dev, "error %d suspending channel %u\n", ret, 1459 endpoint->channel_id); 1460 } 1461 1462 void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint) 1463 { 1464 struct device *dev = &endpoint->ipa->pdev->dev; 1465 struct gsi *gsi = &endpoint->ipa->gsi; 1466 bool start_channel; 1467 int ret; 1468 1469 if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id))) 1470 return; 1471 1472 /* IPA v3.5.1 doesn't use channel start for resume */ 1473 start_channel = endpoint->ipa->version != IPA_VERSION_3_5_1; 1474 if (!endpoint->toward_ipa && !start_channel) 1475 WARN_ON(ipa_endpoint_init_ctrl(endpoint, false)); 1476 1477 ret = gsi_channel_resume(gsi, endpoint->channel_id, start_channel); 1478 if (ret) 1479 dev_err(dev, "error %d resuming channel %u\n", ret, 1480 endpoint->channel_id); 1481 else if (!endpoint->toward_ipa) 1482 ipa_endpoint_replenish_enable(endpoint); 1483 } 1484 1485 void ipa_endpoint_suspend(struct ipa *ipa) 1486 { 1487 if (ipa->modem_netdev) 1488 ipa_modem_suspend(ipa->modem_netdev); 1489 1490 ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]); 1491 ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]); 1492 } 1493 1494 void ipa_endpoint_resume(struct ipa *ipa) 1495 { 1496 ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]); 1497 ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]); 1498 1499 if (ipa->modem_netdev) 1500 ipa_modem_resume(ipa->modem_netdev); 1501 } 1502 1503 static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint) 1504 { 1505 struct gsi *gsi = &endpoint->ipa->gsi; 1506 u32 channel_id = endpoint->channel_id; 1507 1508 /* Only AP endpoints get set up */ 1509 if (endpoint->ee_id != GSI_EE_AP) 1510 return; 1511 1512 endpoint->trans_tre_max = gsi_channel_trans_tre_max(gsi, channel_id); 1513 if (!endpoint->toward_ipa) { 1514 /* RX transactions require a single TRE, so the maximum 1515 * backlog is the same as the maximum outstanding TREs. 1516 */ 1517 endpoint->replenish_enabled = false; 1518 atomic_set(&endpoint->replenish_saved, 1519 gsi_channel_tre_max(gsi, endpoint->channel_id)); 1520 atomic_set(&endpoint->replenish_backlog, 0); 1521 INIT_DELAYED_WORK(&endpoint->replenish_work, 1522 ipa_endpoint_replenish_work); 1523 } 1524 1525 ipa_endpoint_program(endpoint); 1526 1527 endpoint->ipa->set_up |= BIT(endpoint->endpoint_id); 1528 } 1529 1530 static void ipa_endpoint_teardown_one(struct ipa_endpoint *endpoint) 1531 { 1532 endpoint->ipa->set_up &= ~BIT(endpoint->endpoint_id); 1533 1534 if (!endpoint->toward_ipa) 1535 cancel_delayed_work_sync(&endpoint->replenish_work); 1536 1537 ipa_endpoint_reset(endpoint); 1538 } 1539 1540 void ipa_endpoint_setup(struct ipa *ipa) 1541 { 1542 u32 initialized = ipa->initialized; 1543 1544 ipa->set_up = 0; 1545 while (initialized) { 1546 u32 endpoint_id = __ffs(initialized); 1547 1548 initialized ^= BIT(endpoint_id); 1549 1550 ipa_endpoint_setup_one(&ipa->endpoint[endpoint_id]); 1551 } 1552 } 1553 1554 void ipa_endpoint_teardown(struct ipa *ipa) 1555 { 1556 u32 set_up = ipa->set_up; 1557 1558 while (set_up) { 1559 u32 endpoint_id = __fls(set_up); 1560 1561 set_up ^= BIT(endpoint_id); 1562 1563 ipa_endpoint_teardown_one(&ipa->endpoint[endpoint_id]); 1564 } 1565 ipa->set_up = 0; 1566 } 1567 1568 int ipa_endpoint_config(struct ipa *ipa) 1569 { 1570 struct device *dev = &ipa->pdev->dev; 1571 u32 initialized; 1572 u32 rx_base; 1573 u32 rx_mask; 1574 u32 tx_mask; 1575 int ret = 0; 1576 u32 max; 1577 u32 val; 1578 1579 /* Find out about the endpoints supplied by the hardware, and ensure 1580 * the highest one doesn't exceed the number we support. 1581 */ 1582 val = ioread32(ipa->reg_virt + IPA_REG_FLAVOR_0_OFFSET); 1583 1584 /* Our RX is an IPA producer */ 1585 rx_base = u32_get_bits(val, BAM_PROD_LOWEST_FMASK); 1586 max = rx_base + u32_get_bits(val, BAM_MAX_PROD_PIPES_FMASK); 1587 if (max > IPA_ENDPOINT_MAX) { 1588 dev_err(dev, "too many endpoints (%u > %u)\n", 1589 max, IPA_ENDPOINT_MAX); 1590 return -EINVAL; 1591 } 1592 rx_mask = GENMASK(max - 1, rx_base); 1593 1594 /* Our TX is an IPA consumer */ 1595 max = u32_get_bits(val, BAM_MAX_CONS_PIPES_FMASK); 1596 tx_mask = GENMASK(max - 1, 0); 1597 1598 ipa->available = rx_mask | tx_mask; 1599 1600 /* Check for initialized endpoints not supported by the hardware */ 1601 if (ipa->initialized & ~ipa->available) { 1602 dev_err(dev, "unavailable endpoint id(s) 0x%08x\n", 1603 ipa->initialized & ~ipa->available); 1604 ret = -EINVAL; /* Report other errors too */ 1605 } 1606 1607 initialized = ipa->initialized; 1608 while (initialized) { 1609 u32 endpoint_id = __ffs(initialized); 1610 struct ipa_endpoint *endpoint; 1611 1612 initialized ^= BIT(endpoint_id); 1613 1614 /* Make sure it's pointing in the right direction */ 1615 endpoint = &ipa->endpoint[endpoint_id]; 1616 if ((endpoint_id < rx_base) != !!endpoint->toward_ipa) { 1617 dev_err(dev, "endpoint id %u wrong direction\n", 1618 endpoint_id); 1619 ret = -EINVAL; 1620 } 1621 } 1622 1623 return ret; 1624 } 1625 1626 void ipa_endpoint_deconfig(struct ipa *ipa) 1627 { 1628 ipa->available = 0; /* Nothing more to do */ 1629 } 1630 1631 static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name, 1632 const struct ipa_gsi_endpoint_data *data) 1633 { 1634 struct ipa_endpoint *endpoint; 1635 1636 endpoint = &ipa->endpoint[data->endpoint_id]; 1637 1638 if (data->ee_id == GSI_EE_AP) 1639 ipa->channel_map[data->channel_id] = endpoint; 1640 ipa->name_map[name] = endpoint; 1641 1642 endpoint->ipa = ipa; 1643 endpoint->ee_id = data->ee_id; 1644 endpoint->seq_type = data->endpoint.seq_type; 1645 endpoint->channel_id = data->channel_id; 1646 endpoint->endpoint_id = data->endpoint_id; 1647 endpoint->toward_ipa = data->toward_ipa; 1648 endpoint->data = &data->endpoint.config; 1649 1650 ipa->initialized |= BIT(endpoint->endpoint_id); 1651 } 1652 1653 void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint) 1654 { 1655 endpoint->ipa->initialized &= ~BIT(endpoint->endpoint_id); 1656 1657 memset(endpoint, 0, sizeof(*endpoint)); 1658 } 1659 1660 void ipa_endpoint_exit(struct ipa *ipa) 1661 { 1662 u32 initialized = ipa->initialized; 1663 1664 while (initialized) { 1665 u32 endpoint_id = __fls(initialized); 1666 1667 initialized ^= BIT(endpoint_id); 1668 1669 ipa_endpoint_exit_one(&ipa->endpoint[endpoint_id]); 1670 } 1671 memset(ipa->name_map, 0, sizeof(ipa->name_map)); 1672 memset(ipa->channel_map, 0, sizeof(ipa->channel_map)); 1673 } 1674 1675 /* Returns a bitmask of endpoints that support filtering, or 0 on error */ 1676 u32 ipa_endpoint_init(struct ipa *ipa, u32 count, 1677 const struct ipa_gsi_endpoint_data *data) 1678 { 1679 enum ipa_endpoint_name name; 1680 u32 filter_map; 1681 1682 if (!ipa_endpoint_data_valid(ipa, count, data)) 1683 return 0; /* Error */ 1684 1685 ipa->initialized = 0; 1686 1687 filter_map = 0; 1688 for (name = 0; name < count; name++, data++) { 1689 if (ipa_gsi_endpoint_data_empty(data)) 1690 continue; /* Skip over empty slots */ 1691 1692 ipa_endpoint_init_one(ipa, name, data); 1693 1694 if (data->endpoint.filter_support) 1695 filter_map |= BIT(data->endpoint_id); 1696 } 1697 1698 if (!ipa_filter_map_valid(ipa, filter_map)) 1699 goto err_endpoint_exit; 1700 1701 return filter_map; /* Non-zero bitmask */ 1702 1703 err_endpoint_exit: 1704 ipa_endpoint_exit(ipa); 1705 1706 return 0; /* Error */ 1707 } 1708