1 // SPDX-License-Identifier: GPL-2.0 2 3 /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. 4 * Copyright (C) 2018-2020 Linaro Ltd. 5 */ 6 7 #include <linux/types.h> 8 #include <linux/bits.h> 9 #include <linux/bitfield.h> 10 #include <linux/mutex.h> 11 #include <linux/completion.h> 12 #include <linux/io.h> 13 #include <linux/bug.h> 14 #include <linux/interrupt.h> 15 #include <linux/platform_device.h> 16 #include <linux/netdevice.h> 17 18 #include "gsi.h" 19 #include "gsi_reg.h" 20 #include "gsi_private.h" 21 #include "gsi_trans.h" 22 #include "ipa_gsi.h" 23 #include "ipa_data.h" 24 #include "ipa_version.h" 25 26 /** 27 * DOC: The IPA Generic Software Interface 28 * 29 * The generic software interface (GSI) is an integral component of the IPA, 30 * providing a well-defined communication layer between the AP subsystem 31 * and the IPA core. The modem uses the GSI layer as well. 32 * 33 * -------- --------- 34 * | | | | 35 * | AP +<---. .----+ Modem | 36 * | +--. | | .->+ | 37 * | | | | | | | | 38 * -------- | | | | --------- 39 * v | v | 40 * --+-+---+-+-- 41 * | GSI | 42 * |-----------| 43 * | | 44 * | IPA | 45 * | | 46 * ------------- 47 * 48 * In the above diagram, the AP and Modem represent "execution environments" 49 * (EEs), which are independent operating environments that use the IPA for 50 * data transfer. 51 * 52 * Each EE uses a set of unidirectional GSI "channels," which allow transfer 53 * of data to or from the IPA. A channel is implemented as a ring buffer, 54 * with a DRAM-resident array of "transfer elements" (TREs) available to 55 * describe transfers to or from other EEs through the IPA. A transfer 56 * element can also contain an immediate command, requesting the IPA perform 57 * actions other than data transfer. 58 * 59 * Each TRE refers to a block of data--also located DRAM. After writing one 60 * or more TREs to a channel, the writer (either the IPA or an EE) writes a 61 * doorbell register to inform the receiving side how many elements have 62 * been written. 63 * 64 * Each channel has a GSI "event ring" associated with it. An event ring 65 * is implemented very much like a channel ring, but is always directed from 66 * the IPA to an EE. The IPA notifies an EE (such as the AP) about channel 67 * events by adding an entry to the event ring associated with the channel. 68 * The GSI then writes its doorbell for the event ring, causing the target 69 * EE to be interrupted. Each entry in an event ring contains a pointer 70 * to the channel TRE whose completion the event represents. 71 * 72 * Each TRE in a channel ring has a set of flags. One flag indicates whether 73 * the completion of the transfer operation generates an entry (and possibly 74 * an interrupt) in the channel's event ring. Other flags allow transfer 75 * elements to be chained together, forming a single logical transaction. 76 * TRE flags are used to control whether and when interrupts are generated 77 * to signal completion of channel transfers. 78 * 79 * Elements in channel and event rings are completed (or consumed) strictly 80 * in order. Completion of one entry implies the completion of all preceding 81 * entries. A single completion interrupt can therefore communicate the 82 * completion of many transfers. 83 * 84 * Note that all GSI registers are little-endian, which is the assumed 85 * endianness of I/O space accesses. The accessor functions perform byte 86 * swapping if needed (i.e., for a big endian CPU). 87 */ 88 89 /* Delay period for interrupt moderation (in 32KHz IPA internal timer ticks) */ 90 #define GSI_EVT_RING_INT_MODT (32 * 1) /* 1ms under 32KHz clock */ 91 92 #define GSI_CMD_TIMEOUT 5 /* seconds */ 93 94 #define GSI_CHANNEL_STOP_RX_RETRIES 10 95 #define GSI_CHANNEL_MODEM_HALT_RETRIES 10 96 97 #define GSI_MHI_EVENT_ID_START 10 /* 1st reserved event id */ 98 #define GSI_MHI_EVENT_ID_END 16 /* Last reserved event id */ 99 100 #define GSI_ISR_MAX_ITER 50 /* Detect interrupt storms */ 101 102 /* An entry in an event ring */ 103 struct gsi_event { 104 __le64 xfer_ptr; 105 __le16 len; 106 u8 reserved1; 107 u8 code; 108 __le16 reserved2; 109 u8 type; 110 u8 chid; 111 }; 112 113 /** gsi_channel_scratch_gpi - GPI protocol scratch register 114 * @max_outstanding_tre: 115 * Defines the maximum number of TREs allowed in a single transaction 116 * on a channel (in bytes). This determines the amount of prefetch 117 * performed by the hardware. We configure this to equal the size of 118 * the TLV FIFO for the channel. 119 * @outstanding_threshold: 120 * Defines the threshold (in bytes) determining when the sequencer 121 * should update the channel doorbell. We configure this to equal 122 * the size of two TREs. 123 */ 124 struct gsi_channel_scratch_gpi { 125 u64 reserved1; 126 u16 reserved2; 127 u16 max_outstanding_tre; 128 u16 reserved3; 129 u16 outstanding_threshold; 130 }; 131 132 /** gsi_channel_scratch - channel scratch configuration area 133 * 134 * The exact interpretation of this register is protocol-specific. 135 * We only use GPI channels; see struct gsi_channel_scratch_gpi, above. 136 */ 137 union gsi_channel_scratch { 138 struct gsi_channel_scratch_gpi gpi; 139 struct { 140 u32 word1; 141 u32 word2; 142 u32 word3; 143 u32 word4; 144 } data; 145 }; 146 147 /* Check things that can be validated at build time. */ 148 static void gsi_validate_build(void) 149 { 150 /* This is used as a divisor */ 151 BUILD_BUG_ON(!GSI_RING_ELEMENT_SIZE); 152 153 /* Code assumes the size of channel and event ring element are 154 * the same (and fixed). Make sure the size of an event ring 155 * element is what's expected. 156 */ 157 BUILD_BUG_ON(sizeof(struct gsi_event) != GSI_RING_ELEMENT_SIZE); 158 159 /* Hardware requires a 2^n ring size. We ensure the number of 160 * elements in an event ring is a power of 2 elsewhere; this 161 * ensure the elements themselves meet the requirement. 162 */ 163 BUILD_BUG_ON(!is_power_of_2(GSI_RING_ELEMENT_SIZE)); 164 165 /* The channel element size must fit in this field */ 166 BUILD_BUG_ON(GSI_RING_ELEMENT_SIZE > field_max(ELEMENT_SIZE_FMASK)); 167 168 /* The event ring element size must fit in this field */ 169 BUILD_BUG_ON(GSI_RING_ELEMENT_SIZE > field_max(EV_ELEMENT_SIZE_FMASK)); 170 } 171 172 /* Return the channel id associated with a given channel */ 173 static u32 gsi_channel_id(struct gsi_channel *channel) 174 { 175 return channel - &channel->gsi->channel[0]; 176 } 177 178 /* Update the GSI IRQ type register with the cached value */ 179 static void gsi_irq_type_update(struct gsi *gsi, u32 val) 180 { 181 gsi->type_enabled_bitmap = val; 182 iowrite32(val, gsi->virt + GSI_CNTXT_TYPE_IRQ_MSK_OFFSET); 183 } 184 185 static void gsi_irq_type_enable(struct gsi *gsi, enum gsi_irq_type_id type_id) 186 { 187 gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(type_id)); 188 } 189 190 static void gsi_irq_type_disable(struct gsi *gsi, enum gsi_irq_type_id type_id) 191 { 192 gsi_irq_type_update(gsi, gsi->type_enabled_bitmap & ~BIT(type_id)); 193 } 194 195 /* Turn off all GSI interrupts initially */ 196 static void gsi_irq_setup(struct gsi *gsi) 197 { 198 u32 adjust; 199 200 /* Disable all interrupt types */ 201 gsi_irq_type_update(gsi, 0); 202 203 /* Clear all type-specific interrupt masks */ 204 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET); 205 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET); 206 iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET); 207 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET); 208 209 /* Reverse the offset adjustment for inter-EE register offsets */ 210 adjust = gsi->version < IPA_VERSION_4_5 ? 0 : GSI_EE_REG_ADJUST; 211 iowrite32(0, gsi->virt + adjust + GSI_INTER_EE_SRC_CH_IRQ_OFFSET); 212 iowrite32(0, gsi->virt + adjust + GSI_INTER_EE_SRC_EV_CH_IRQ_OFFSET); 213 214 iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET); 215 } 216 217 /* Turn off all GSI interrupts when we're all done */ 218 static void gsi_irq_teardown(struct gsi *gsi) 219 { 220 /* Nothing to do */ 221 } 222 223 static void gsi_irq_ieob_enable(struct gsi *gsi, u32 evt_ring_id) 224 { 225 bool enable_ieob = !gsi->ieob_enabled_bitmap; 226 u32 val; 227 228 gsi->ieob_enabled_bitmap |= BIT(evt_ring_id); 229 val = gsi->ieob_enabled_bitmap; 230 iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET); 231 232 /* Enable the interrupt type if this is the first channel enabled */ 233 if (enable_ieob) 234 gsi_irq_type_enable(gsi, GSI_IEOB); 235 } 236 237 static void gsi_irq_ieob_disable(struct gsi *gsi, u32 evt_ring_id) 238 { 239 u32 val; 240 241 gsi->ieob_enabled_bitmap &= ~BIT(evt_ring_id); 242 243 /* Disable the interrupt type if this was the last enabled channel */ 244 if (!gsi->ieob_enabled_bitmap) 245 gsi_irq_type_disable(gsi, GSI_IEOB); 246 247 val = gsi->ieob_enabled_bitmap; 248 iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET); 249 } 250 251 /* Enable all GSI_interrupt types */ 252 static void gsi_irq_enable(struct gsi *gsi) 253 { 254 u32 val; 255 256 /* Global interrupts include hardware error reports. Enable 257 * that so we can at least report the error should it occur. 258 */ 259 iowrite32(BIT(ERROR_INT), gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET); 260 gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(GSI_GLOB_EE)); 261 262 /* General GSI interrupts are reported to all EEs; if they occur 263 * they are unrecoverable (without reset). A breakpoint interrupt 264 * also exists, but we don't support that. We want to be notified 265 * of errors so we can report them, even if they can't be handled. 266 */ 267 val = BIT(BUS_ERROR); 268 val |= BIT(CMD_FIFO_OVRFLOW); 269 val |= BIT(MCS_STACK_OVRFLOW); 270 iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET); 271 gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(GSI_GENERAL)); 272 } 273 274 /* Disable all GSI interrupt types */ 275 static void gsi_irq_disable(struct gsi *gsi) 276 { 277 gsi_irq_type_update(gsi, 0); 278 279 /* Clear the type-specific interrupt masks set by gsi_irq_enable() */ 280 iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET); 281 iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET); 282 } 283 284 /* Return the virtual address associated with a ring index */ 285 void *gsi_ring_virt(struct gsi_ring *ring, u32 index) 286 { 287 /* Note: index *must* be used modulo the ring count here */ 288 return ring->virt + (index % ring->count) * GSI_RING_ELEMENT_SIZE; 289 } 290 291 /* Return the 32-bit DMA address associated with a ring index */ 292 static u32 gsi_ring_addr(struct gsi_ring *ring, u32 index) 293 { 294 return (ring->addr & GENMASK(31, 0)) + index * GSI_RING_ELEMENT_SIZE; 295 } 296 297 /* Return the ring index of a 32-bit ring offset */ 298 static u32 gsi_ring_index(struct gsi_ring *ring, u32 offset) 299 { 300 return (offset - gsi_ring_addr(ring, 0)) / GSI_RING_ELEMENT_SIZE; 301 } 302 303 /* Issue a GSI command by writing a value to a register, then wait for 304 * completion to be signaled. Returns true if the command completes 305 * or false if it times out. 306 */ 307 static bool 308 gsi_command(struct gsi *gsi, u32 reg, u32 val, struct completion *completion) 309 { 310 reinit_completion(completion); 311 312 iowrite32(val, gsi->virt + reg); 313 314 return !!wait_for_completion_timeout(completion, GSI_CMD_TIMEOUT * HZ); 315 } 316 317 /* Return the hardware's notion of the current state of an event ring */ 318 static enum gsi_evt_ring_state 319 gsi_evt_ring_state(struct gsi *gsi, u32 evt_ring_id) 320 { 321 u32 val; 322 323 val = ioread32(gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id)); 324 325 return u32_get_bits(val, EV_CHSTATE_FMASK); 326 } 327 328 /* Issue an event ring command and wait for it to complete */ 329 static void evt_ring_command(struct gsi *gsi, u32 evt_ring_id, 330 enum gsi_evt_cmd_opcode opcode) 331 { 332 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id]; 333 struct completion *completion = &evt_ring->completion; 334 struct device *dev = gsi->dev; 335 bool success; 336 u32 val; 337 338 /* We only perform one event ring command at a time, and event 339 * control interrupts should only occur when such a command 340 * is issued here. Only permit *this* event ring to trigger 341 * an interrupt, and only enable the event control IRQ type 342 * when we expect it to occur. 343 * 344 * There's a small chance that a previous command completed 345 * after the interrupt was disabled, so make sure we have no 346 * pending interrupts before we enable them. 347 */ 348 iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET); 349 350 val = BIT(evt_ring_id); 351 iowrite32(val, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET); 352 gsi_irq_type_enable(gsi, GSI_EV_CTRL); 353 354 val = u32_encode_bits(evt_ring_id, EV_CHID_FMASK); 355 val |= u32_encode_bits(opcode, EV_OPCODE_FMASK); 356 357 success = gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val, completion); 358 359 /* Disable the interrupt again */ 360 gsi_irq_type_disable(gsi, GSI_EV_CTRL); 361 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET); 362 363 if (success) 364 return; 365 366 dev_err(dev, "GSI command %u for event ring %u timed out, state %u\n", 367 opcode, evt_ring_id, evt_ring->state); 368 } 369 370 /* Allocate an event ring in NOT_ALLOCATED state */ 371 static int gsi_evt_ring_alloc_command(struct gsi *gsi, u32 evt_ring_id) 372 { 373 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id]; 374 375 /* Get initial event ring state */ 376 evt_ring->state = gsi_evt_ring_state(gsi, evt_ring_id); 377 if (evt_ring->state != GSI_EVT_RING_STATE_NOT_ALLOCATED) { 378 dev_err(gsi->dev, "event ring %u bad state %u before alloc\n", 379 evt_ring_id, evt_ring->state); 380 return -EINVAL; 381 } 382 383 evt_ring_command(gsi, evt_ring_id, GSI_EVT_ALLOCATE); 384 385 /* If successful the event ring state will have changed */ 386 if (evt_ring->state == GSI_EVT_RING_STATE_ALLOCATED) 387 return 0; 388 389 dev_err(gsi->dev, "event ring %u bad state %u after alloc\n", 390 evt_ring_id, evt_ring->state); 391 392 return -EIO; 393 } 394 395 /* Reset a GSI event ring in ALLOCATED or ERROR state. */ 396 static void gsi_evt_ring_reset_command(struct gsi *gsi, u32 evt_ring_id) 397 { 398 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id]; 399 enum gsi_evt_ring_state state = evt_ring->state; 400 401 if (state != GSI_EVT_RING_STATE_ALLOCATED && 402 state != GSI_EVT_RING_STATE_ERROR) { 403 dev_err(gsi->dev, "event ring %u bad state %u before reset\n", 404 evt_ring_id, evt_ring->state); 405 return; 406 } 407 408 evt_ring_command(gsi, evt_ring_id, GSI_EVT_RESET); 409 410 /* If successful the event ring state will have changed */ 411 if (evt_ring->state == GSI_EVT_RING_STATE_ALLOCATED) 412 return; 413 414 dev_err(gsi->dev, "event ring %u bad state %u after reset\n", 415 evt_ring_id, evt_ring->state); 416 } 417 418 /* Issue a hardware de-allocation request for an allocated event ring */ 419 static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id) 420 { 421 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id]; 422 423 if (evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) { 424 dev_err(gsi->dev, "event ring %u state %u before dealloc\n", 425 evt_ring_id, evt_ring->state); 426 return; 427 } 428 429 evt_ring_command(gsi, evt_ring_id, GSI_EVT_DE_ALLOC); 430 431 /* If successful the event ring state will have changed */ 432 if (evt_ring->state == GSI_EVT_RING_STATE_NOT_ALLOCATED) 433 return; 434 435 dev_err(gsi->dev, "event ring %u bad state %u after dealloc\n", 436 evt_ring_id, evt_ring->state); 437 } 438 439 /* Fetch the current state of a channel from hardware */ 440 static enum gsi_channel_state gsi_channel_state(struct gsi_channel *channel) 441 { 442 u32 channel_id = gsi_channel_id(channel); 443 void *virt = channel->gsi->virt; 444 u32 val; 445 446 val = ioread32(virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id)); 447 448 return u32_get_bits(val, CHSTATE_FMASK); 449 } 450 451 /* Issue a channel command and wait for it to complete */ 452 static void 453 gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode) 454 { 455 struct completion *completion = &channel->completion; 456 u32 channel_id = gsi_channel_id(channel); 457 struct gsi *gsi = channel->gsi; 458 struct device *dev = gsi->dev; 459 bool success; 460 u32 val; 461 462 /* We only perform one channel command at a time, and channel 463 * control interrupts should only occur when such a command is 464 * issued here. So we only permit *this* channel to trigger 465 * an interrupt and only enable the channel control IRQ type 466 * when we expect it to occur. 467 * 468 * There's a small chance that a previous command completed 469 * after the interrupt was disabled, so make sure we have no 470 * pending interrupts before we enable them. 471 */ 472 iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET); 473 474 val = BIT(channel_id); 475 iowrite32(val, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET); 476 gsi_irq_type_enable(gsi, GSI_CH_CTRL); 477 478 val = u32_encode_bits(channel_id, CH_CHID_FMASK); 479 val |= u32_encode_bits(opcode, CH_OPCODE_FMASK); 480 success = gsi_command(gsi, GSI_CH_CMD_OFFSET, val, completion); 481 482 /* Disable the interrupt again */ 483 gsi_irq_type_disable(gsi, GSI_CH_CTRL); 484 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET); 485 486 if (success) 487 return; 488 489 dev_err(dev, "GSI command %u for channel %u timed out, state %u\n", 490 opcode, channel_id, gsi_channel_state(channel)); 491 } 492 493 /* Allocate GSI channel in NOT_ALLOCATED state */ 494 static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id) 495 { 496 struct gsi_channel *channel = &gsi->channel[channel_id]; 497 struct device *dev = gsi->dev; 498 enum gsi_channel_state state; 499 500 /* Get initial channel state */ 501 state = gsi_channel_state(channel); 502 if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED) { 503 dev_err(dev, "channel %u bad state %u before alloc\n", 504 channel_id, state); 505 return -EINVAL; 506 } 507 508 gsi_channel_command(channel, GSI_CH_ALLOCATE); 509 510 /* If successful the channel state will have changed */ 511 state = gsi_channel_state(channel); 512 if (state == GSI_CHANNEL_STATE_ALLOCATED) 513 return 0; 514 515 dev_err(dev, "channel %u bad state %u after alloc\n", 516 channel_id, state); 517 518 return -EIO; 519 } 520 521 /* Start an ALLOCATED channel */ 522 static int gsi_channel_start_command(struct gsi_channel *channel) 523 { 524 struct device *dev = channel->gsi->dev; 525 enum gsi_channel_state state; 526 527 state = gsi_channel_state(channel); 528 if (state != GSI_CHANNEL_STATE_ALLOCATED && 529 state != GSI_CHANNEL_STATE_STOPPED) { 530 dev_err(dev, "channel %u bad state %u before start\n", 531 gsi_channel_id(channel), state); 532 return -EINVAL; 533 } 534 535 gsi_channel_command(channel, GSI_CH_START); 536 537 /* If successful the channel state will have changed */ 538 state = gsi_channel_state(channel); 539 if (state == GSI_CHANNEL_STATE_STARTED) 540 return 0; 541 542 dev_err(dev, "channel %u bad state %u after start\n", 543 gsi_channel_id(channel), state); 544 545 return -EIO; 546 } 547 548 /* Stop a GSI channel in STARTED state */ 549 static int gsi_channel_stop_command(struct gsi_channel *channel) 550 { 551 struct device *dev = channel->gsi->dev; 552 enum gsi_channel_state state; 553 554 state = gsi_channel_state(channel); 555 556 /* Channel could have entered STOPPED state since last call 557 * if it timed out. If so, we're done. 558 */ 559 if (state == GSI_CHANNEL_STATE_STOPPED) 560 return 0; 561 562 if (state != GSI_CHANNEL_STATE_STARTED && 563 state != GSI_CHANNEL_STATE_STOP_IN_PROC) { 564 dev_err(dev, "channel %u bad state %u before stop\n", 565 gsi_channel_id(channel), state); 566 return -EINVAL; 567 } 568 569 gsi_channel_command(channel, GSI_CH_STOP); 570 571 /* If successful the channel state will have changed */ 572 state = gsi_channel_state(channel); 573 if (state == GSI_CHANNEL_STATE_STOPPED) 574 return 0; 575 576 /* We may have to try again if stop is in progress */ 577 if (state == GSI_CHANNEL_STATE_STOP_IN_PROC) 578 return -EAGAIN; 579 580 dev_err(dev, "channel %u bad state %u after stop\n", 581 gsi_channel_id(channel), state); 582 583 return -EIO; 584 } 585 586 /* Reset a GSI channel in ALLOCATED or ERROR state. */ 587 static void gsi_channel_reset_command(struct gsi_channel *channel) 588 { 589 struct device *dev = channel->gsi->dev; 590 enum gsi_channel_state state; 591 592 msleep(1); /* A short delay is required before a RESET command */ 593 594 state = gsi_channel_state(channel); 595 if (state != GSI_CHANNEL_STATE_STOPPED && 596 state != GSI_CHANNEL_STATE_ERROR) { 597 /* No need to reset a channel already in ALLOCATED state */ 598 if (state != GSI_CHANNEL_STATE_ALLOCATED) 599 dev_err(dev, "channel %u bad state %u before reset\n", 600 gsi_channel_id(channel), state); 601 return; 602 } 603 604 gsi_channel_command(channel, GSI_CH_RESET); 605 606 /* If successful the channel state will have changed */ 607 state = gsi_channel_state(channel); 608 if (state != GSI_CHANNEL_STATE_ALLOCATED) 609 dev_err(dev, "channel %u bad state %u after reset\n", 610 gsi_channel_id(channel), state); 611 } 612 613 /* Deallocate an ALLOCATED GSI channel */ 614 static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id) 615 { 616 struct gsi_channel *channel = &gsi->channel[channel_id]; 617 struct device *dev = gsi->dev; 618 enum gsi_channel_state state; 619 620 state = gsi_channel_state(channel); 621 if (state != GSI_CHANNEL_STATE_ALLOCATED) { 622 dev_err(dev, "channel %u bad state %u before dealloc\n", 623 channel_id, state); 624 return; 625 } 626 627 gsi_channel_command(channel, GSI_CH_DE_ALLOC); 628 629 /* If successful the channel state will have changed */ 630 state = gsi_channel_state(channel); 631 632 if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED) 633 dev_err(dev, "channel %u bad state %u after dealloc\n", 634 channel_id, state); 635 } 636 637 /* Ring an event ring doorbell, reporting the last entry processed by the AP. 638 * The index argument (modulo the ring count) is the first unfilled entry, so 639 * we supply one less than that with the doorbell. Update the event ring 640 * index field with the value provided. 641 */ 642 static void gsi_evt_ring_doorbell(struct gsi *gsi, u32 evt_ring_id, u32 index) 643 { 644 struct gsi_ring *ring = &gsi->evt_ring[evt_ring_id].ring; 645 u32 val; 646 647 ring->index = index; /* Next unused entry */ 648 649 /* Note: index *must* be used modulo the ring count here */ 650 val = gsi_ring_addr(ring, (index - 1) % ring->count); 651 iowrite32(val, gsi->virt + GSI_EV_CH_E_DOORBELL_0_OFFSET(evt_ring_id)); 652 } 653 654 /* Program an event ring for use */ 655 static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id) 656 { 657 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id]; 658 size_t size = evt_ring->ring.count * GSI_RING_ELEMENT_SIZE; 659 u32 val; 660 661 /* We program all event rings as GPI type/protocol */ 662 val = u32_encode_bits(GSI_CHANNEL_TYPE_GPI, EV_CHTYPE_FMASK); 663 val |= EV_INTYPE_FMASK; 664 val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, EV_ELEMENT_SIZE_FMASK); 665 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id)); 666 667 val = u32_encode_bits(size, EV_R_LENGTH_FMASK); 668 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_1_OFFSET(evt_ring_id)); 669 670 /* The context 2 and 3 registers store the low-order and 671 * high-order 32 bits of the address of the event ring, 672 * respectively. 673 */ 674 val = evt_ring->ring.addr & GENMASK(31, 0); 675 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_2_OFFSET(evt_ring_id)); 676 677 val = evt_ring->ring.addr >> 32; 678 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_3_OFFSET(evt_ring_id)); 679 680 /* Enable interrupt moderation by setting the moderation delay */ 681 val = u32_encode_bits(GSI_EVT_RING_INT_MODT, MODT_FMASK); 682 val |= u32_encode_bits(1, MODC_FMASK); /* comes from channel */ 683 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_8_OFFSET(evt_ring_id)); 684 685 /* No MSI write data, and MSI address high and low address is 0 */ 686 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_9_OFFSET(evt_ring_id)); 687 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_10_OFFSET(evt_ring_id)); 688 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_11_OFFSET(evt_ring_id)); 689 690 /* We don't need to get event read pointer updates */ 691 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_12_OFFSET(evt_ring_id)); 692 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_13_OFFSET(evt_ring_id)); 693 694 /* Finally, tell the hardware we've completed event 0 (arbitrary) */ 695 gsi_evt_ring_doorbell(gsi, evt_ring_id, 0); 696 } 697 698 /* Return the last (most recent) transaction completed on a channel. */ 699 static struct gsi_trans *gsi_channel_trans_last(struct gsi_channel *channel) 700 { 701 struct gsi_trans_info *trans_info = &channel->trans_info; 702 struct gsi_trans *trans; 703 704 spin_lock_bh(&trans_info->spinlock); 705 706 if (!list_empty(&trans_info->complete)) 707 trans = list_last_entry(&trans_info->complete, 708 struct gsi_trans, links); 709 else if (!list_empty(&trans_info->polled)) 710 trans = list_last_entry(&trans_info->polled, 711 struct gsi_trans, links); 712 else 713 trans = NULL; 714 715 /* Caller will wait for this, so take a reference */ 716 if (trans) 717 refcount_inc(&trans->refcount); 718 719 spin_unlock_bh(&trans_info->spinlock); 720 721 return trans; 722 } 723 724 /* Wait for transaction activity on a channel to complete */ 725 static void gsi_channel_trans_quiesce(struct gsi_channel *channel) 726 { 727 struct gsi_trans *trans; 728 729 /* Get the last transaction, and wait for it to complete */ 730 trans = gsi_channel_trans_last(channel); 731 if (trans) { 732 wait_for_completion(&trans->completion); 733 gsi_trans_free(trans); 734 } 735 } 736 737 /* Stop channel activity. Transactions may not be allocated until thawed. */ 738 static void gsi_channel_freeze(struct gsi_channel *channel) 739 { 740 gsi_channel_trans_quiesce(channel); 741 742 napi_disable(&channel->napi); 743 744 gsi_irq_ieob_disable(channel->gsi, channel->evt_ring_id); 745 } 746 747 /* Allow transactions to be used on the channel again. */ 748 static void gsi_channel_thaw(struct gsi_channel *channel) 749 { 750 gsi_irq_ieob_enable(channel->gsi, channel->evt_ring_id); 751 752 napi_enable(&channel->napi); 753 } 754 755 /* Program a channel for use */ 756 static void gsi_channel_program(struct gsi_channel *channel, bool doorbell) 757 { 758 size_t size = channel->tre_ring.count * GSI_RING_ELEMENT_SIZE; 759 u32 channel_id = gsi_channel_id(channel); 760 union gsi_channel_scratch scr = { }; 761 struct gsi_channel_scratch_gpi *gpi; 762 struct gsi *gsi = channel->gsi; 763 u32 wrr_weight = 0; 764 u32 val; 765 766 /* Arbitrarily pick TRE 0 as the first channel element to use */ 767 channel->tre_ring.index = 0; 768 769 /* We program all channels as GPI type/protocol */ 770 val = u32_encode_bits(GSI_CHANNEL_TYPE_GPI, CHTYPE_PROTOCOL_FMASK); 771 if (channel->toward_ipa) 772 val |= CHTYPE_DIR_FMASK; 773 val |= u32_encode_bits(channel->evt_ring_id, ERINDEX_FMASK); 774 val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, ELEMENT_SIZE_FMASK); 775 iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id)); 776 777 val = u32_encode_bits(size, R_LENGTH_FMASK); 778 iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_1_OFFSET(channel_id)); 779 780 /* The context 2 and 3 registers store the low-order and 781 * high-order 32 bits of the address of the channel ring, 782 * respectively. 783 */ 784 val = channel->tre_ring.addr & GENMASK(31, 0); 785 iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_2_OFFSET(channel_id)); 786 787 val = channel->tre_ring.addr >> 32; 788 iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_3_OFFSET(channel_id)); 789 790 /* Command channel gets low weighted round-robin priority */ 791 if (channel->command) 792 wrr_weight = field_max(WRR_WEIGHT_FMASK); 793 val = u32_encode_bits(wrr_weight, WRR_WEIGHT_FMASK); 794 795 /* Max prefetch is 1 segment (do not set MAX_PREFETCH_FMASK) */ 796 797 /* We enable the doorbell engine for IPA v3.5.1 */ 798 if (gsi->version == IPA_VERSION_3_5_1 && doorbell) 799 val |= USE_DB_ENG_FMASK; 800 801 /* v4.0 introduces an escape buffer for prefetch. We use it 802 * on all but the AP command channel. 803 */ 804 if (gsi->version != IPA_VERSION_3_5_1 && !channel->command) { 805 /* If not otherwise set, prefetch buffers are used */ 806 if (gsi->version < IPA_VERSION_4_5) 807 val |= USE_ESCAPE_BUF_ONLY_FMASK; 808 else 809 val |= u32_encode_bits(GSI_ESCAPE_BUF_ONLY, 810 PREFETCH_MODE_FMASK); 811 } 812 813 iowrite32(val, gsi->virt + GSI_CH_C_QOS_OFFSET(channel_id)); 814 815 /* Now update the scratch registers for GPI protocol */ 816 gpi = &scr.gpi; 817 gpi->max_outstanding_tre = gsi_channel_trans_tre_max(gsi, channel_id) * 818 GSI_RING_ELEMENT_SIZE; 819 gpi->outstanding_threshold = 2 * GSI_RING_ELEMENT_SIZE; 820 821 val = scr.data.word1; 822 iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_0_OFFSET(channel_id)); 823 824 val = scr.data.word2; 825 iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_1_OFFSET(channel_id)); 826 827 val = scr.data.word3; 828 iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_2_OFFSET(channel_id)); 829 830 /* We must preserve the upper 16 bits of the last scratch register. 831 * The next sequence assumes those bits remain unchanged between the 832 * read and the write. 833 */ 834 val = ioread32(gsi->virt + GSI_CH_C_SCRATCH_3_OFFSET(channel_id)); 835 val = (scr.data.word4 & GENMASK(31, 16)) | (val & GENMASK(15, 0)); 836 iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_3_OFFSET(channel_id)); 837 838 /* All done! */ 839 } 840 841 static void gsi_channel_deprogram(struct gsi_channel *channel) 842 { 843 /* Nothing to do */ 844 } 845 846 /* Start an allocated GSI channel */ 847 int gsi_channel_start(struct gsi *gsi, u32 channel_id) 848 { 849 struct gsi_channel *channel = &gsi->channel[channel_id]; 850 int ret; 851 852 mutex_lock(&gsi->mutex); 853 854 ret = gsi_channel_start_command(channel); 855 856 mutex_unlock(&gsi->mutex); 857 858 gsi_channel_thaw(channel); 859 860 return ret; 861 } 862 863 /* Stop a started channel */ 864 int gsi_channel_stop(struct gsi *gsi, u32 channel_id) 865 { 866 struct gsi_channel *channel = &gsi->channel[channel_id]; 867 u32 retries; 868 int ret; 869 870 gsi_channel_freeze(channel); 871 872 /* RX channels might require a little time to enter STOPPED state */ 873 retries = channel->toward_ipa ? 0 : GSI_CHANNEL_STOP_RX_RETRIES; 874 875 mutex_lock(&gsi->mutex); 876 877 do { 878 ret = gsi_channel_stop_command(channel); 879 if (ret != -EAGAIN) 880 break; 881 msleep(1); 882 } while (retries--); 883 884 mutex_unlock(&gsi->mutex); 885 886 /* Thaw the channel if we need to retry (or on error) */ 887 if (ret) 888 gsi_channel_thaw(channel); 889 890 return ret; 891 } 892 893 /* Reset and reconfigure a channel, (possibly) enabling the doorbell engine */ 894 void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool doorbell) 895 { 896 struct gsi_channel *channel = &gsi->channel[channel_id]; 897 898 mutex_lock(&gsi->mutex); 899 900 gsi_channel_reset_command(channel); 901 /* Due to a hardware quirk we may need to reset RX channels twice. */ 902 if (gsi->version == IPA_VERSION_3_5_1 && !channel->toward_ipa) 903 gsi_channel_reset_command(channel); 904 905 gsi_channel_program(channel, doorbell); 906 gsi_channel_trans_cancel_pending(channel); 907 908 mutex_unlock(&gsi->mutex); 909 } 910 911 /* Stop a STARTED channel for suspend (using stop if requested) */ 912 int gsi_channel_suspend(struct gsi *gsi, u32 channel_id, bool stop) 913 { 914 struct gsi_channel *channel = &gsi->channel[channel_id]; 915 916 if (stop) 917 return gsi_channel_stop(gsi, channel_id); 918 919 gsi_channel_freeze(channel); 920 921 return 0; 922 } 923 924 /* Resume a suspended channel (starting will be requested if STOPPED) */ 925 int gsi_channel_resume(struct gsi *gsi, u32 channel_id, bool start) 926 { 927 struct gsi_channel *channel = &gsi->channel[channel_id]; 928 929 if (start) 930 return gsi_channel_start(gsi, channel_id); 931 932 gsi_channel_thaw(channel); 933 934 return 0; 935 } 936 937 /** 938 * gsi_channel_tx_queued() - Report queued TX transfers for a channel 939 * @channel: Channel for which to report 940 * 941 * Report to the network stack the number of bytes and transactions that 942 * have been queued to hardware since last call. This and the next function 943 * supply information used by the network stack for throttling. 944 * 945 * For each channel we track the number of transactions used and bytes of 946 * data those transactions represent. We also track what those values are 947 * each time this function is called. Subtracting the two tells us 948 * the number of bytes and transactions that have been added between 949 * successive calls. 950 * 951 * Calling this each time we ring the channel doorbell allows us to 952 * provide accurate information to the network stack about how much 953 * work we've given the hardware at any point in time. 954 */ 955 void gsi_channel_tx_queued(struct gsi_channel *channel) 956 { 957 u32 trans_count; 958 u32 byte_count; 959 960 byte_count = channel->byte_count - channel->queued_byte_count; 961 trans_count = channel->trans_count - channel->queued_trans_count; 962 channel->queued_byte_count = channel->byte_count; 963 channel->queued_trans_count = channel->trans_count; 964 965 ipa_gsi_channel_tx_queued(channel->gsi, gsi_channel_id(channel), 966 trans_count, byte_count); 967 } 968 969 /** 970 * gsi_channel_tx_update() - Report completed TX transfers 971 * @channel: Channel that has completed transmitting packets 972 * @trans: Last transation known to be complete 973 * 974 * Compute the number of transactions and bytes that have been transferred 975 * over a TX channel since the given transaction was committed. Report this 976 * information to the network stack. 977 * 978 * At the time a transaction is committed, we record its channel's 979 * committed transaction and byte counts *in the transaction*. 980 * Completions are signaled by the hardware with an interrupt, and 981 * we can determine the latest completed transaction at that time. 982 * 983 * The difference between the byte/transaction count recorded in 984 * the transaction and the count last time we recorded a completion 985 * tells us exactly how much data has been transferred between 986 * completions. 987 * 988 * Calling this each time we learn of a newly-completed transaction 989 * allows us to provide accurate information to the network stack 990 * about how much work has been completed by the hardware at a given 991 * point in time. 992 */ 993 static void 994 gsi_channel_tx_update(struct gsi_channel *channel, struct gsi_trans *trans) 995 { 996 u64 byte_count = trans->byte_count + trans->len; 997 u64 trans_count = trans->trans_count + 1; 998 999 byte_count -= channel->compl_byte_count; 1000 channel->compl_byte_count += byte_count; 1001 trans_count -= channel->compl_trans_count; 1002 channel->compl_trans_count += trans_count; 1003 1004 ipa_gsi_channel_tx_completed(channel->gsi, gsi_channel_id(channel), 1005 trans_count, byte_count); 1006 } 1007 1008 /* Channel control interrupt handler */ 1009 static void gsi_isr_chan_ctrl(struct gsi *gsi) 1010 { 1011 u32 channel_mask; 1012 1013 channel_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_CH_IRQ_OFFSET); 1014 iowrite32(channel_mask, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET); 1015 1016 while (channel_mask) { 1017 u32 channel_id = __ffs(channel_mask); 1018 struct gsi_channel *channel; 1019 1020 channel_mask ^= BIT(channel_id); 1021 1022 channel = &gsi->channel[channel_id]; 1023 1024 complete(&channel->completion); 1025 } 1026 } 1027 1028 /* Event ring control interrupt handler */ 1029 static void gsi_isr_evt_ctrl(struct gsi *gsi) 1030 { 1031 u32 event_mask; 1032 1033 event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_OFFSET); 1034 iowrite32(event_mask, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET); 1035 1036 while (event_mask) { 1037 u32 evt_ring_id = __ffs(event_mask); 1038 struct gsi_evt_ring *evt_ring; 1039 1040 event_mask ^= BIT(evt_ring_id); 1041 1042 evt_ring = &gsi->evt_ring[evt_ring_id]; 1043 evt_ring->state = gsi_evt_ring_state(gsi, evt_ring_id); 1044 1045 complete(&evt_ring->completion); 1046 } 1047 } 1048 1049 /* Global channel error interrupt handler */ 1050 static void 1051 gsi_isr_glob_chan_err(struct gsi *gsi, u32 err_ee, u32 channel_id, u32 code) 1052 { 1053 if (code == GSI_OUT_OF_RESOURCES) { 1054 dev_err(gsi->dev, "channel %u out of resources\n", channel_id); 1055 complete(&gsi->channel[channel_id].completion); 1056 return; 1057 } 1058 1059 /* Report, but otherwise ignore all other error codes */ 1060 dev_err(gsi->dev, "channel %u global error ee 0x%08x code 0x%08x\n", 1061 channel_id, err_ee, code); 1062 } 1063 1064 /* Global event error interrupt handler */ 1065 static void 1066 gsi_isr_glob_evt_err(struct gsi *gsi, u32 err_ee, u32 evt_ring_id, u32 code) 1067 { 1068 if (code == GSI_OUT_OF_RESOURCES) { 1069 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id]; 1070 u32 channel_id = gsi_channel_id(evt_ring->channel); 1071 1072 complete(&evt_ring->completion); 1073 dev_err(gsi->dev, "evt_ring for channel %u out of resources\n", 1074 channel_id); 1075 return; 1076 } 1077 1078 /* Report, but otherwise ignore all other error codes */ 1079 dev_err(gsi->dev, "event ring %u global error ee %u code 0x%08x\n", 1080 evt_ring_id, err_ee, code); 1081 } 1082 1083 /* Global error interrupt handler */ 1084 static void gsi_isr_glob_err(struct gsi *gsi) 1085 { 1086 enum gsi_err_type type; 1087 enum gsi_err_code code; 1088 u32 which; 1089 u32 val; 1090 u32 ee; 1091 1092 /* Get the logged error, then reinitialize the log */ 1093 val = ioread32(gsi->virt + GSI_ERROR_LOG_OFFSET); 1094 iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET); 1095 iowrite32(~0, gsi->virt + GSI_ERROR_LOG_CLR_OFFSET); 1096 1097 ee = u32_get_bits(val, ERR_EE_FMASK); 1098 type = u32_get_bits(val, ERR_TYPE_FMASK); 1099 which = u32_get_bits(val, ERR_VIRT_IDX_FMASK); 1100 code = u32_get_bits(val, ERR_CODE_FMASK); 1101 1102 if (type == GSI_ERR_TYPE_CHAN) 1103 gsi_isr_glob_chan_err(gsi, ee, which, code); 1104 else if (type == GSI_ERR_TYPE_EVT) 1105 gsi_isr_glob_evt_err(gsi, ee, which, code); 1106 else /* type GSI_ERR_TYPE_GLOB should be fatal */ 1107 dev_err(gsi->dev, "unexpected global error 0x%08x\n", type); 1108 } 1109 1110 /* Generic EE interrupt handler */ 1111 static void gsi_isr_gp_int1(struct gsi *gsi) 1112 { 1113 u32 result; 1114 u32 val; 1115 1116 /* This interrupt is used to handle completions of the two GENERIC 1117 * GSI commands. We use these to allocate and halt channels on 1118 * the modem's behalf due to a hardware quirk on IPA v4.2. Once 1119 * allocated, the modem "owns" these channels, and as a result we 1120 * have no way of knowing the channel's state at any given time. 1121 * 1122 * It is recommended that we halt the modem channels we allocated 1123 * when shutting down, but it's possible the channel isn't running 1124 * at the time we issue the HALT command. We'll get an error in 1125 * that case, but it's harmless (the channel is already halted). 1126 * 1127 * For this reason, we silently ignore a CHANNEL_NOT_RUNNING error 1128 * if we receive it. 1129 */ 1130 val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET); 1131 result = u32_get_bits(val, GENERIC_EE_RESULT_FMASK); 1132 1133 switch (result) { 1134 case GENERIC_EE_SUCCESS: 1135 case GENERIC_EE_CHANNEL_NOT_RUNNING: 1136 gsi->result = 0; 1137 break; 1138 1139 case GENERIC_EE_RETRY: 1140 gsi->result = -EAGAIN; 1141 break; 1142 1143 default: 1144 dev_err(gsi->dev, "global INT1 generic result %u\n", result); 1145 gsi->result = -EIO; 1146 break; 1147 } 1148 1149 complete(&gsi->completion); 1150 } 1151 1152 /* Inter-EE interrupt handler */ 1153 static void gsi_isr_glob_ee(struct gsi *gsi) 1154 { 1155 u32 val; 1156 1157 val = ioread32(gsi->virt + GSI_CNTXT_GLOB_IRQ_STTS_OFFSET); 1158 1159 if (val & BIT(ERROR_INT)) 1160 gsi_isr_glob_err(gsi); 1161 1162 iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_CLR_OFFSET); 1163 1164 val &= ~BIT(ERROR_INT); 1165 1166 if (val & BIT(GP_INT1)) { 1167 val ^= BIT(GP_INT1); 1168 gsi_isr_gp_int1(gsi); 1169 } 1170 1171 if (val) 1172 dev_err(gsi->dev, "unexpected global interrupt 0x%08x\n", val); 1173 } 1174 1175 /* I/O completion interrupt event */ 1176 static void gsi_isr_ieob(struct gsi *gsi) 1177 { 1178 u32 event_mask; 1179 1180 event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_OFFSET); 1181 iowrite32(event_mask, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_CLR_OFFSET); 1182 1183 while (event_mask) { 1184 u32 evt_ring_id = __ffs(event_mask); 1185 1186 event_mask ^= BIT(evt_ring_id); 1187 1188 gsi_irq_ieob_disable(gsi, evt_ring_id); 1189 napi_schedule(&gsi->evt_ring[evt_ring_id].channel->napi); 1190 } 1191 } 1192 1193 /* General event interrupts represent serious problems, so report them */ 1194 static void gsi_isr_general(struct gsi *gsi) 1195 { 1196 struct device *dev = gsi->dev; 1197 u32 val; 1198 1199 val = ioread32(gsi->virt + GSI_CNTXT_GSI_IRQ_STTS_OFFSET); 1200 iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_CLR_OFFSET); 1201 1202 dev_err(dev, "unexpected general interrupt 0x%08x\n", val); 1203 } 1204 1205 /** 1206 * gsi_isr() - Top level GSI interrupt service routine 1207 * @irq: Interrupt number (ignored) 1208 * @dev_id: GSI pointer supplied to request_irq() 1209 * 1210 * This is the main handler function registered for the GSI IRQ. Each type 1211 * of interrupt has a separate handler function that is called from here. 1212 */ 1213 static irqreturn_t gsi_isr(int irq, void *dev_id) 1214 { 1215 struct gsi *gsi = dev_id; 1216 u32 intr_mask; 1217 u32 cnt = 0; 1218 1219 /* enum gsi_irq_type_id defines GSI interrupt types */ 1220 while ((intr_mask = ioread32(gsi->virt + GSI_CNTXT_TYPE_IRQ_OFFSET))) { 1221 /* intr_mask contains bitmask of pending GSI interrupts */ 1222 do { 1223 u32 gsi_intr = BIT(__ffs(intr_mask)); 1224 1225 intr_mask ^= gsi_intr; 1226 1227 switch (gsi_intr) { 1228 case BIT(GSI_CH_CTRL): 1229 gsi_isr_chan_ctrl(gsi); 1230 break; 1231 case BIT(GSI_EV_CTRL): 1232 gsi_isr_evt_ctrl(gsi); 1233 break; 1234 case BIT(GSI_GLOB_EE): 1235 gsi_isr_glob_ee(gsi); 1236 break; 1237 case BIT(GSI_IEOB): 1238 gsi_isr_ieob(gsi); 1239 break; 1240 case BIT(GSI_GENERAL): 1241 gsi_isr_general(gsi); 1242 break; 1243 default: 1244 dev_err(gsi->dev, 1245 "unrecognized interrupt type 0x%08x\n", 1246 gsi_intr); 1247 break; 1248 } 1249 } while (intr_mask); 1250 1251 if (++cnt > GSI_ISR_MAX_ITER) { 1252 dev_err(gsi->dev, "interrupt flood\n"); 1253 break; 1254 } 1255 } 1256 1257 return IRQ_HANDLED; 1258 } 1259 1260 static int gsi_irq_init(struct gsi *gsi, struct platform_device *pdev) 1261 { 1262 struct device *dev = &pdev->dev; 1263 unsigned int irq; 1264 int ret; 1265 1266 ret = platform_get_irq_byname(pdev, "gsi"); 1267 if (ret <= 0) { 1268 dev_err(dev, "DT error %d getting \"gsi\" IRQ property\n", ret); 1269 return ret ? : -EINVAL; 1270 } 1271 irq = ret; 1272 1273 ret = request_irq(irq, gsi_isr, 0, "gsi", gsi); 1274 if (ret) { 1275 dev_err(dev, "error %d requesting \"gsi\" IRQ\n", ret); 1276 return ret; 1277 } 1278 gsi->irq = irq; 1279 1280 return 0; 1281 } 1282 1283 static void gsi_irq_exit(struct gsi *gsi) 1284 { 1285 free_irq(gsi->irq, gsi); 1286 } 1287 1288 /* Return the transaction associated with a transfer completion event */ 1289 static struct gsi_trans *gsi_event_trans(struct gsi_channel *channel, 1290 struct gsi_event *event) 1291 { 1292 u32 tre_offset; 1293 u32 tre_index; 1294 1295 /* Event xfer_ptr records the TRE it's associated with */ 1296 tre_offset = le64_to_cpu(event->xfer_ptr) & GENMASK(31, 0); 1297 tre_index = gsi_ring_index(&channel->tre_ring, tre_offset); 1298 1299 return gsi_channel_trans_mapped(channel, tre_index); 1300 } 1301 1302 /** 1303 * gsi_evt_ring_rx_update() - Record lengths of received data 1304 * @evt_ring: Event ring associated with channel that received packets 1305 * @index: Event index in ring reported by hardware 1306 * 1307 * Events for RX channels contain the actual number of bytes received into 1308 * the buffer. Every event has a transaction associated with it, and here 1309 * we update transactions to record their actual received lengths. 1310 * 1311 * This function is called whenever we learn that the GSI hardware has filled 1312 * new events since the last time we checked. The ring's index field tells 1313 * the first entry in need of processing. The index provided is the 1314 * first *unfilled* event in the ring (following the last filled one). 1315 * 1316 * Events are sequential within the event ring, and transactions are 1317 * sequential within the transaction pool. 1318 * 1319 * Note that @index always refers to an element *within* the event ring. 1320 */ 1321 static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index) 1322 { 1323 struct gsi_channel *channel = evt_ring->channel; 1324 struct gsi_ring *ring = &evt_ring->ring; 1325 struct gsi_trans_info *trans_info; 1326 struct gsi_event *event_done; 1327 struct gsi_event *event; 1328 struct gsi_trans *trans; 1329 u32 byte_count = 0; 1330 u32 old_index; 1331 u32 event_avail; 1332 1333 trans_info = &channel->trans_info; 1334 1335 /* We'll start with the oldest un-processed event. RX channels 1336 * replenish receive buffers in single-TRE transactions, so we 1337 * can just map that event to its transaction. Transactions 1338 * associated with completion events are consecutive. 1339 */ 1340 old_index = ring->index; 1341 event = gsi_ring_virt(ring, old_index); 1342 trans = gsi_event_trans(channel, event); 1343 1344 /* Compute the number of events to process before we wrap, 1345 * and determine when we'll be done processing events. 1346 */ 1347 event_avail = ring->count - old_index % ring->count; 1348 event_done = gsi_ring_virt(ring, index); 1349 do { 1350 trans->len = __le16_to_cpu(event->len); 1351 byte_count += trans->len; 1352 1353 /* Move on to the next event and transaction */ 1354 if (--event_avail) 1355 event++; 1356 else 1357 event = gsi_ring_virt(ring, 0); 1358 trans = gsi_trans_pool_next(&trans_info->pool, trans); 1359 } while (event != event_done); 1360 1361 /* We record RX bytes when they are received */ 1362 channel->byte_count += byte_count; 1363 channel->trans_count++; 1364 } 1365 1366 /* Initialize a ring, including allocating DMA memory for its entries */ 1367 static int gsi_ring_alloc(struct gsi *gsi, struct gsi_ring *ring, u32 count) 1368 { 1369 size_t size = count * GSI_RING_ELEMENT_SIZE; 1370 struct device *dev = gsi->dev; 1371 dma_addr_t addr; 1372 1373 /* Hardware requires a 2^n ring size, with alignment equal to size */ 1374 ring->virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL); 1375 if (ring->virt && addr % size) { 1376 dma_free_coherent(dev, size, ring->virt, ring->addr); 1377 dev_err(dev, "unable to alloc 0x%zx-aligned ring buffer\n", 1378 size); 1379 return -EINVAL; /* Not a good error value, but distinct */ 1380 } else if (!ring->virt) { 1381 return -ENOMEM; 1382 } 1383 ring->addr = addr; 1384 ring->count = count; 1385 1386 return 0; 1387 } 1388 1389 /* Free a previously-allocated ring */ 1390 static void gsi_ring_free(struct gsi *gsi, struct gsi_ring *ring) 1391 { 1392 size_t size = ring->count * GSI_RING_ELEMENT_SIZE; 1393 1394 dma_free_coherent(gsi->dev, size, ring->virt, ring->addr); 1395 } 1396 1397 /* Allocate an available event ring id */ 1398 static int gsi_evt_ring_id_alloc(struct gsi *gsi) 1399 { 1400 u32 evt_ring_id; 1401 1402 if (gsi->event_bitmap == ~0U) { 1403 dev_err(gsi->dev, "event rings exhausted\n"); 1404 return -ENOSPC; 1405 } 1406 1407 evt_ring_id = ffz(gsi->event_bitmap); 1408 gsi->event_bitmap |= BIT(evt_ring_id); 1409 1410 return (int)evt_ring_id; 1411 } 1412 1413 /* Free a previously-allocated event ring id */ 1414 static void gsi_evt_ring_id_free(struct gsi *gsi, u32 evt_ring_id) 1415 { 1416 gsi->event_bitmap &= ~BIT(evt_ring_id); 1417 } 1418 1419 /* Ring a channel doorbell, reporting the first un-filled entry */ 1420 void gsi_channel_doorbell(struct gsi_channel *channel) 1421 { 1422 struct gsi_ring *tre_ring = &channel->tre_ring; 1423 u32 channel_id = gsi_channel_id(channel); 1424 struct gsi *gsi = channel->gsi; 1425 u32 val; 1426 1427 /* Note: index *must* be used modulo the ring count here */ 1428 val = gsi_ring_addr(tre_ring, tre_ring->index % tre_ring->count); 1429 iowrite32(val, gsi->virt + GSI_CH_C_DOORBELL_0_OFFSET(channel_id)); 1430 } 1431 1432 /* Consult hardware, move any newly completed transactions to completed list */ 1433 static void gsi_channel_update(struct gsi_channel *channel) 1434 { 1435 u32 evt_ring_id = channel->evt_ring_id; 1436 struct gsi *gsi = channel->gsi; 1437 struct gsi_evt_ring *evt_ring; 1438 struct gsi_trans *trans; 1439 struct gsi_ring *ring; 1440 u32 offset; 1441 u32 index; 1442 1443 evt_ring = &gsi->evt_ring[evt_ring_id]; 1444 ring = &evt_ring->ring; 1445 1446 /* See if there's anything new to process; if not, we're done. Note 1447 * that index always refers to an entry *within* the event ring. 1448 */ 1449 offset = GSI_EV_CH_E_CNTXT_4_OFFSET(evt_ring_id); 1450 index = gsi_ring_index(ring, ioread32(gsi->virt + offset)); 1451 if (index == ring->index % ring->count) 1452 return; 1453 1454 /* Get the transaction for the latest completed event. Take a 1455 * reference to keep it from completing before we give the events 1456 * for this and previous transactions back to the hardware. 1457 */ 1458 trans = gsi_event_trans(channel, gsi_ring_virt(ring, index - 1)); 1459 refcount_inc(&trans->refcount); 1460 1461 /* For RX channels, update each completed transaction with the number 1462 * of bytes that were actually received. For TX channels, report 1463 * the number of transactions and bytes this completion represents 1464 * up the network stack. 1465 */ 1466 if (channel->toward_ipa) 1467 gsi_channel_tx_update(channel, trans); 1468 else 1469 gsi_evt_ring_rx_update(evt_ring, index); 1470 1471 gsi_trans_move_complete(trans); 1472 1473 /* Tell the hardware we've handled these events */ 1474 gsi_evt_ring_doorbell(channel->gsi, channel->evt_ring_id, index); 1475 1476 gsi_trans_free(trans); 1477 } 1478 1479 /** 1480 * gsi_channel_poll_one() - Return a single completed transaction on a channel 1481 * @channel: Channel to be polled 1482 * 1483 * Return: Transaction pointer, or null if none are available 1484 * 1485 * This function returns the first entry on a channel's completed transaction 1486 * list. If that list is empty, the hardware is consulted to determine 1487 * whether any new transactions have completed. If so, they're moved to the 1488 * completed list and the new first entry is returned. If there are no more 1489 * completed transactions, a null pointer is returned. 1490 */ 1491 static struct gsi_trans *gsi_channel_poll_one(struct gsi_channel *channel) 1492 { 1493 struct gsi_trans *trans; 1494 1495 /* Get the first transaction from the completed list */ 1496 trans = gsi_channel_trans_complete(channel); 1497 if (!trans) { 1498 /* List is empty; see if there's more to do */ 1499 gsi_channel_update(channel); 1500 trans = gsi_channel_trans_complete(channel); 1501 } 1502 1503 if (trans) 1504 gsi_trans_move_polled(trans); 1505 1506 return trans; 1507 } 1508 1509 /** 1510 * gsi_channel_poll() - NAPI poll function for a channel 1511 * @napi: NAPI structure for the channel 1512 * @budget: Budget supplied by NAPI core 1513 * 1514 * Return: Number of items polled (<= budget) 1515 * 1516 * Single transactions completed by hardware are polled until either 1517 * the budget is exhausted, or there are no more. Each transaction 1518 * polled is passed to gsi_trans_complete(), to perform remaining 1519 * completion processing and retire/free the transaction. 1520 */ 1521 static int gsi_channel_poll(struct napi_struct *napi, int budget) 1522 { 1523 struct gsi_channel *channel; 1524 int count = 0; 1525 1526 channel = container_of(napi, struct gsi_channel, napi); 1527 while (count < budget) { 1528 struct gsi_trans *trans; 1529 1530 count++; 1531 trans = gsi_channel_poll_one(channel); 1532 if (!trans) 1533 break; 1534 gsi_trans_complete(trans); 1535 } 1536 1537 if (count < budget) { 1538 napi_complete(&channel->napi); 1539 gsi_irq_ieob_enable(channel->gsi, channel->evt_ring_id); 1540 } 1541 1542 return count; 1543 } 1544 1545 /* The event bitmap represents which event ids are available for allocation. 1546 * Set bits are not available, clear bits can be used. This function 1547 * initializes the map so all events supported by the hardware are available, 1548 * then precludes any reserved events from being allocated. 1549 */ 1550 static u32 gsi_event_bitmap_init(u32 evt_ring_max) 1551 { 1552 u32 event_bitmap = GENMASK(BITS_PER_LONG - 1, evt_ring_max); 1553 1554 event_bitmap |= GENMASK(GSI_MHI_EVENT_ID_END, GSI_MHI_EVENT_ID_START); 1555 1556 return event_bitmap; 1557 } 1558 1559 /* Setup function for event rings */ 1560 static void gsi_evt_ring_setup(struct gsi *gsi) 1561 { 1562 /* Nothing to do */ 1563 } 1564 1565 /* Inverse of gsi_evt_ring_setup() */ 1566 static void gsi_evt_ring_teardown(struct gsi *gsi) 1567 { 1568 /* Nothing to do */ 1569 } 1570 1571 /* Setup function for a single channel */ 1572 static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id) 1573 { 1574 struct gsi_channel *channel = &gsi->channel[channel_id]; 1575 u32 evt_ring_id = channel->evt_ring_id; 1576 int ret; 1577 1578 if (!channel->gsi) 1579 return 0; /* Ignore uninitialized channels */ 1580 1581 ret = gsi_evt_ring_alloc_command(gsi, evt_ring_id); 1582 if (ret) 1583 return ret; 1584 1585 gsi_evt_ring_program(gsi, evt_ring_id); 1586 1587 ret = gsi_channel_alloc_command(gsi, channel_id); 1588 if (ret) 1589 goto err_evt_ring_de_alloc; 1590 1591 gsi_channel_program(channel, true); 1592 1593 if (channel->toward_ipa) 1594 netif_tx_napi_add(&gsi->dummy_dev, &channel->napi, 1595 gsi_channel_poll, NAPI_POLL_WEIGHT); 1596 else 1597 netif_napi_add(&gsi->dummy_dev, &channel->napi, 1598 gsi_channel_poll, NAPI_POLL_WEIGHT); 1599 1600 return 0; 1601 1602 err_evt_ring_de_alloc: 1603 /* We've done nothing with the event ring yet so don't reset */ 1604 gsi_evt_ring_de_alloc_command(gsi, evt_ring_id); 1605 1606 return ret; 1607 } 1608 1609 /* Inverse of gsi_channel_setup_one() */ 1610 static void gsi_channel_teardown_one(struct gsi *gsi, u32 channel_id) 1611 { 1612 struct gsi_channel *channel = &gsi->channel[channel_id]; 1613 u32 evt_ring_id = channel->evt_ring_id; 1614 1615 if (!channel->gsi) 1616 return; /* Ignore uninitialized channels */ 1617 1618 netif_napi_del(&channel->napi); 1619 1620 gsi_channel_deprogram(channel); 1621 gsi_channel_de_alloc_command(gsi, channel_id); 1622 gsi_evt_ring_reset_command(gsi, evt_ring_id); 1623 gsi_evt_ring_de_alloc_command(gsi, evt_ring_id); 1624 } 1625 1626 static int gsi_generic_command(struct gsi *gsi, u32 channel_id, 1627 enum gsi_generic_cmd_opcode opcode) 1628 { 1629 struct completion *completion = &gsi->completion; 1630 bool success; 1631 u32 val; 1632 1633 /* The error global interrupt type is always enabled (until we 1634 * teardown), so we won't change that. A generic EE command 1635 * completes with a GSI global interrupt of type GP_INT1. We 1636 * only perform one generic command at a time (to allocate or 1637 * halt a modem channel) and only from this function. So we 1638 * enable the GP_INT1 IRQ type here while we're expecting it. 1639 */ 1640 val = BIT(ERROR_INT) | BIT(GP_INT1); 1641 iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET); 1642 1643 /* First zero the result code field */ 1644 val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET); 1645 val &= ~GENERIC_EE_RESULT_FMASK; 1646 iowrite32(val, gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET); 1647 1648 /* Now issue the command */ 1649 val = u32_encode_bits(opcode, GENERIC_OPCODE_FMASK); 1650 val |= u32_encode_bits(channel_id, GENERIC_CHID_FMASK); 1651 val |= u32_encode_bits(GSI_EE_MODEM, GENERIC_EE_FMASK); 1652 1653 success = gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val, completion); 1654 1655 /* Disable the GP_INT1 IRQ type again */ 1656 iowrite32(BIT(ERROR_INT), gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET); 1657 1658 if (success) 1659 return gsi->result; 1660 1661 dev_err(gsi->dev, "GSI generic command %u to channel %u timed out\n", 1662 opcode, channel_id); 1663 1664 return -ETIMEDOUT; 1665 } 1666 1667 static int gsi_modem_channel_alloc(struct gsi *gsi, u32 channel_id) 1668 { 1669 return gsi_generic_command(gsi, channel_id, 1670 GSI_GENERIC_ALLOCATE_CHANNEL); 1671 } 1672 1673 static void gsi_modem_channel_halt(struct gsi *gsi, u32 channel_id) 1674 { 1675 u32 retries = GSI_CHANNEL_MODEM_HALT_RETRIES; 1676 int ret; 1677 1678 do 1679 ret = gsi_generic_command(gsi, channel_id, 1680 GSI_GENERIC_HALT_CHANNEL); 1681 while (ret == -EAGAIN && retries--); 1682 1683 if (ret) 1684 dev_err(gsi->dev, "error %d halting modem channel %u\n", 1685 ret, channel_id); 1686 } 1687 1688 /* Setup function for channels */ 1689 static int gsi_channel_setup(struct gsi *gsi) 1690 { 1691 u32 channel_id = 0; 1692 u32 mask; 1693 int ret; 1694 1695 gsi_evt_ring_setup(gsi); 1696 gsi_irq_enable(gsi); 1697 1698 mutex_lock(&gsi->mutex); 1699 1700 do { 1701 ret = gsi_channel_setup_one(gsi, channel_id); 1702 if (ret) 1703 goto err_unwind; 1704 } while (++channel_id < gsi->channel_count); 1705 1706 /* Make sure no channels were defined that hardware does not support */ 1707 while (channel_id < GSI_CHANNEL_COUNT_MAX) { 1708 struct gsi_channel *channel = &gsi->channel[channel_id++]; 1709 1710 if (!channel->gsi) 1711 continue; /* Ignore uninitialized channels */ 1712 1713 dev_err(gsi->dev, "channel %u not supported by hardware\n", 1714 channel_id - 1); 1715 channel_id = gsi->channel_count; 1716 goto err_unwind; 1717 } 1718 1719 /* Allocate modem channels if necessary */ 1720 mask = gsi->modem_channel_bitmap; 1721 while (mask) { 1722 u32 modem_channel_id = __ffs(mask); 1723 1724 ret = gsi_modem_channel_alloc(gsi, modem_channel_id); 1725 if (ret) 1726 goto err_unwind_modem; 1727 1728 /* Clear bit from mask only after success (for unwind) */ 1729 mask ^= BIT(modem_channel_id); 1730 } 1731 1732 mutex_unlock(&gsi->mutex); 1733 1734 return 0; 1735 1736 err_unwind_modem: 1737 /* Compute which modem channels need to be deallocated */ 1738 mask ^= gsi->modem_channel_bitmap; 1739 while (mask) { 1740 channel_id = __fls(mask); 1741 1742 mask ^= BIT(channel_id); 1743 1744 gsi_modem_channel_halt(gsi, channel_id); 1745 } 1746 1747 err_unwind: 1748 while (channel_id--) 1749 gsi_channel_teardown_one(gsi, channel_id); 1750 1751 mutex_unlock(&gsi->mutex); 1752 1753 gsi_irq_disable(gsi); 1754 gsi_evt_ring_teardown(gsi); 1755 1756 return ret; 1757 } 1758 1759 /* Inverse of gsi_channel_setup() */ 1760 static void gsi_channel_teardown(struct gsi *gsi) 1761 { 1762 u32 mask = gsi->modem_channel_bitmap; 1763 u32 channel_id; 1764 1765 mutex_lock(&gsi->mutex); 1766 1767 while (mask) { 1768 channel_id = __fls(mask); 1769 1770 mask ^= BIT(channel_id); 1771 1772 gsi_modem_channel_halt(gsi, channel_id); 1773 } 1774 1775 channel_id = gsi->channel_count - 1; 1776 do 1777 gsi_channel_teardown_one(gsi, channel_id); 1778 while (channel_id--); 1779 1780 mutex_unlock(&gsi->mutex); 1781 1782 gsi_irq_disable(gsi); 1783 gsi_evt_ring_teardown(gsi); 1784 } 1785 1786 /* Setup function for GSI. GSI firmware must be loaded and initialized */ 1787 int gsi_setup(struct gsi *gsi) 1788 { 1789 struct device *dev = gsi->dev; 1790 u32 val; 1791 int ret; 1792 1793 /* Here is where we first touch the GSI hardware */ 1794 val = ioread32(gsi->virt + GSI_GSI_STATUS_OFFSET); 1795 if (!(val & ENABLED_FMASK)) { 1796 dev_err(dev, "GSI has not been enabled\n"); 1797 return -EIO; 1798 } 1799 1800 gsi_irq_setup(gsi); 1801 1802 val = ioread32(gsi->virt + GSI_GSI_HW_PARAM_2_OFFSET); 1803 1804 gsi->channel_count = u32_get_bits(val, NUM_CH_PER_EE_FMASK); 1805 if (!gsi->channel_count) { 1806 dev_err(dev, "GSI reports zero channels supported\n"); 1807 return -EINVAL; 1808 } 1809 if (gsi->channel_count > GSI_CHANNEL_COUNT_MAX) { 1810 dev_warn(dev, 1811 "limiting to %u channels; hardware supports %u\n", 1812 GSI_CHANNEL_COUNT_MAX, gsi->channel_count); 1813 gsi->channel_count = GSI_CHANNEL_COUNT_MAX; 1814 } 1815 1816 gsi->evt_ring_count = u32_get_bits(val, NUM_EV_PER_EE_FMASK); 1817 if (!gsi->evt_ring_count) { 1818 dev_err(dev, "GSI reports zero event rings supported\n"); 1819 return -EINVAL; 1820 } 1821 if (gsi->evt_ring_count > GSI_EVT_RING_COUNT_MAX) { 1822 dev_warn(dev, 1823 "limiting to %u event rings; hardware supports %u\n", 1824 GSI_EVT_RING_COUNT_MAX, gsi->evt_ring_count); 1825 gsi->evt_ring_count = GSI_EVT_RING_COUNT_MAX; 1826 } 1827 1828 /* Initialize the error log */ 1829 iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET); 1830 1831 /* Writing 1 indicates IRQ interrupts; 0 would be MSI */ 1832 iowrite32(1, gsi->virt + GSI_CNTXT_INTSET_OFFSET); 1833 1834 ret = gsi_channel_setup(gsi); 1835 if (ret) 1836 gsi_irq_teardown(gsi); 1837 1838 return ret; 1839 } 1840 1841 /* Inverse of gsi_setup() */ 1842 void gsi_teardown(struct gsi *gsi) 1843 { 1844 gsi_channel_teardown(gsi); 1845 gsi_irq_teardown(gsi); 1846 } 1847 1848 /* Initialize a channel's event ring */ 1849 static int gsi_channel_evt_ring_init(struct gsi_channel *channel) 1850 { 1851 struct gsi *gsi = channel->gsi; 1852 struct gsi_evt_ring *evt_ring; 1853 int ret; 1854 1855 ret = gsi_evt_ring_id_alloc(gsi); 1856 if (ret < 0) 1857 return ret; 1858 channel->evt_ring_id = ret; 1859 1860 evt_ring = &gsi->evt_ring[channel->evt_ring_id]; 1861 evt_ring->channel = channel; 1862 1863 ret = gsi_ring_alloc(gsi, &evt_ring->ring, channel->event_count); 1864 if (!ret) 1865 return 0; /* Success! */ 1866 1867 dev_err(gsi->dev, "error %d allocating channel %u event ring\n", 1868 ret, gsi_channel_id(channel)); 1869 1870 gsi_evt_ring_id_free(gsi, channel->evt_ring_id); 1871 1872 return ret; 1873 } 1874 1875 /* Inverse of gsi_channel_evt_ring_init() */ 1876 static void gsi_channel_evt_ring_exit(struct gsi_channel *channel) 1877 { 1878 u32 evt_ring_id = channel->evt_ring_id; 1879 struct gsi *gsi = channel->gsi; 1880 struct gsi_evt_ring *evt_ring; 1881 1882 evt_ring = &gsi->evt_ring[evt_ring_id]; 1883 gsi_ring_free(gsi, &evt_ring->ring); 1884 gsi_evt_ring_id_free(gsi, evt_ring_id); 1885 } 1886 1887 /* Init function for event rings */ 1888 static void gsi_evt_ring_init(struct gsi *gsi) 1889 { 1890 u32 evt_ring_id = 0; 1891 1892 gsi->event_bitmap = gsi_event_bitmap_init(GSI_EVT_RING_COUNT_MAX); 1893 gsi->ieob_enabled_bitmap = 0; 1894 do 1895 init_completion(&gsi->evt_ring[evt_ring_id].completion); 1896 while (++evt_ring_id < GSI_EVT_RING_COUNT_MAX); 1897 } 1898 1899 /* Inverse of gsi_evt_ring_init() */ 1900 static void gsi_evt_ring_exit(struct gsi *gsi) 1901 { 1902 /* Nothing to do */ 1903 } 1904 1905 static bool gsi_channel_data_valid(struct gsi *gsi, 1906 const struct ipa_gsi_endpoint_data *data) 1907 { 1908 #ifdef IPA_VALIDATION 1909 u32 channel_id = data->channel_id; 1910 struct device *dev = gsi->dev; 1911 1912 /* Make sure channel ids are in the range driver supports */ 1913 if (channel_id >= GSI_CHANNEL_COUNT_MAX) { 1914 dev_err(dev, "bad channel id %u; must be less than %u\n", 1915 channel_id, GSI_CHANNEL_COUNT_MAX); 1916 return false; 1917 } 1918 1919 if (data->ee_id != GSI_EE_AP && data->ee_id != GSI_EE_MODEM) { 1920 dev_err(dev, "bad EE id %u; not AP or modem\n", data->ee_id); 1921 return false; 1922 } 1923 1924 if (!data->channel.tlv_count || 1925 data->channel.tlv_count > GSI_TLV_MAX) { 1926 dev_err(dev, "channel %u bad tlv_count %u; must be 1..%u\n", 1927 channel_id, data->channel.tlv_count, GSI_TLV_MAX); 1928 return false; 1929 } 1930 1931 /* We have to allow at least one maximally-sized transaction to 1932 * be outstanding (which would use tlv_count TREs). Given how 1933 * gsi_channel_tre_max() is computed, tre_count has to be almost 1934 * twice the TLV FIFO size to satisfy this requirement. 1935 */ 1936 if (data->channel.tre_count < 2 * data->channel.tlv_count - 1) { 1937 dev_err(dev, "channel %u TLV count %u exceeds TRE count %u\n", 1938 channel_id, data->channel.tlv_count, 1939 data->channel.tre_count); 1940 return false; 1941 } 1942 1943 if (!is_power_of_2(data->channel.tre_count)) { 1944 dev_err(dev, "channel %u bad tre_count %u; not power of 2\n", 1945 channel_id, data->channel.tre_count); 1946 return false; 1947 } 1948 1949 if (!is_power_of_2(data->channel.event_count)) { 1950 dev_err(dev, "channel %u bad event_count %u; not power of 2\n", 1951 channel_id, data->channel.event_count); 1952 return false; 1953 } 1954 #endif /* IPA_VALIDATION */ 1955 1956 return true; 1957 } 1958 1959 /* Init function for a single channel */ 1960 static int gsi_channel_init_one(struct gsi *gsi, 1961 const struct ipa_gsi_endpoint_data *data, 1962 bool command) 1963 { 1964 struct gsi_channel *channel; 1965 u32 tre_count; 1966 int ret; 1967 1968 if (!gsi_channel_data_valid(gsi, data)) 1969 return -EINVAL; 1970 1971 /* Worst case we need an event for every outstanding TRE */ 1972 if (data->channel.tre_count > data->channel.event_count) { 1973 tre_count = data->channel.event_count; 1974 dev_warn(gsi->dev, "channel %u limited to %u TREs\n", 1975 data->channel_id, tre_count); 1976 } else { 1977 tre_count = data->channel.tre_count; 1978 } 1979 1980 channel = &gsi->channel[data->channel_id]; 1981 memset(channel, 0, sizeof(*channel)); 1982 1983 channel->gsi = gsi; 1984 channel->toward_ipa = data->toward_ipa; 1985 channel->command = command; 1986 channel->tlv_count = data->channel.tlv_count; 1987 channel->tre_count = tre_count; 1988 channel->event_count = data->channel.event_count; 1989 init_completion(&channel->completion); 1990 1991 ret = gsi_channel_evt_ring_init(channel); 1992 if (ret) 1993 goto err_clear_gsi; 1994 1995 ret = gsi_ring_alloc(gsi, &channel->tre_ring, data->channel.tre_count); 1996 if (ret) { 1997 dev_err(gsi->dev, "error %d allocating channel %u ring\n", 1998 ret, data->channel_id); 1999 goto err_channel_evt_ring_exit; 2000 } 2001 2002 ret = gsi_channel_trans_init(gsi, data->channel_id); 2003 if (ret) 2004 goto err_ring_free; 2005 2006 if (command) { 2007 u32 tre_max = gsi_channel_tre_max(gsi, data->channel_id); 2008 2009 ret = ipa_cmd_pool_init(channel, tre_max); 2010 } 2011 if (!ret) 2012 return 0; /* Success! */ 2013 2014 gsi_channel_trans_exit(channel); 2015 err_ring_free: 2016 gsi_ring_free(gsi, &channel->tre_ring); 2017 err_channel_evt_ring_exit: 2018 gsi_channel_evt_ring_exit(channel); 2019 err_clear_gsi: 2020 channel->gsi = NULL; /* Mark it not (fully) initialized */ 2021 2022 return ret; 2023 } 2024 2025 /* Inverse of gsi_channel_init_one() */ 2026 static void gsi_channel_exit_one(struct gsi_channel *channel) 2027 { 2028 if (!channel->gsi) 2029 return; /* Ignore uninitialized channels */ 2030 2031 if (channel->command) 2032 ipa_cmd_pool_exit(channel); 2033 gsi_channel_trans_exit(channel); 2034 gsi_ring_free(channel->gsi, &channel->tre_ring); 2035 gsi_channel_evt_ring_exit(channel); 2036 } 2037 2038 /* Init function for channels */ 2039 static int gsi_channel_init(struct gsi *gsi, u32 count, 2040 const struct ipa_gsi_endpoint_data *data) 2041 { 2042 bool modem_alloc; 2043 int ret = 0; 2044 u32 i; 2045 2046 /* IPA v4.2 requires the AP to allocate channels for the modem */ 2047 modem_alloc = gsi->version == IPA_VERSION_4_2; 2048 2049 gsi_evt_ring_init(gsi); 2050 2051 /* The endpoint data array is indexed by endpoint name */ 2052 for (i = 0; i < count; i++) { 2053 bool command = i == IPA_ENDPOINT_AP_COMMAND_TX; 2054 2055 if (ipa_gsi_endpoint_data_empty(&data[i])) 2056 continue; /* Skip over empty slots */ 2057 2058 /* Mark modem channels to be allocated (hardware workaround) */ 2059 if (data[i].ee_id == GSI_EE_MODEM) { 2060 if (modem_alloc) 2061 gsi->modem_channel_bitmap |= 2062 BIT(data[i].channel_id); 2063 continue; 2064 } 2065 2066 ret = gsi_channel_init_one(gsi, &data[i], command); 2067 if (ret) 2068 goto err_unwind; 2069 } 2070 2071 return ret; 2072 2073 err_unwind: 2074 while (i--) { 2075 if (ipa_gsi_endpoint_data_empty(&data[i])) 2076 continue; 2077 if (modem_alloc && data[i].ee_id == GSI_EE_MODEM) { 2078 gsi->modem_channel_bitmap &= ~BIT(data[i].channel_id); 2079 continue; 2080 } 2081 gsi_channel_exit_one(&gsi->channel[data->channel_id]); 2082 } 2083 gsi_evt_ring_exit(gsi); 2084 2085 return ret; 2086 } 2087 2088 /* Inverse of gsi_channel_init() */ 2089 static void gsi_channel_exit(struct gsi *gsi) 2090 { 2091 u32 channel_id = GSI_CHANNEL_COUNT_MAX - 1; 2092 2093 do 2094 gsi_channel_exit_one(&gsi->channel[channel_id]); 2095 while (channel_id--); 2096 gsi->modem_channel_bitmap = 0; 2097 2098 gsi_evt_ring_exit(gsi); 2099 } 2100 2101 /* Init function for GSI. GSI hardware does not need to be "ready" */ 2102 int gsi_init(struct gsi *gsi, struct platform_device *pdev, 2103 enum ipa_version version, u32 count, 2104 const struct ipa_gsi_endpoint_data *data) 2105 { 2106 struct device *dev = &pdev->dev; 2107 struct resource *res; 2108 resource_size_t size; 2109 u32 adjust; 2110 int ret; 2111 2112 gsi_validate_build(); 2113 2114 gsi->dev = dev; 2115 gsi->version = version; 2116 2117 /* The GSI layer performs NAPI on all endpoints. NAPI requires a 2118 * network device structure, but the GSI layer does not have one, 2119 * so we must create a dummy network device for this purpose. 2120 */ 2121 init_dummy_netdev(&gsi->dummy_dev); 2122 2123 /* Get GSI memory range and map it */ 2124 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gsi"); 2125 if (!res) { 2126 dev_err(dev, "DT error getting \"gsi\" memory property\n"); 2127 return -ENODEV; 2128 } 2129 2130 size = resource_size(res); 2131 if (res->start > U32_MAX || size > U32_MAX - res->start) { 2132 dev_err(dev, "DT memory resource \"gsi\" out of range\n"); 2133 return -EINVAL; 2134 } 2135 2136 /* Make sure we can make our pointer adjustment if necessary */ 2137 adjust = gsi->version < IPA_VERSION_4_5 ? 0 : GSI_EE_REG_ADJUST; 2138 if (res->start < adjust) { 2139 dev_err(dev, "DT memory resource \"gsi\" too low (< %u)\n", 2140 adjust); 2141 return -EINVAL; 2142 } 2143 2144 gsi->virt = ioremap(res->start, size); 2145 if (!gsi->virt) { 2146 dev_err(dev, "unable to remap \"gsi\" memory\n"); 2147 return -ENOMEM; 2148 } 2149 /* Adjust register range pointer downward for newer IPA versions */ 2150 gsi->virt -= adjust; 2151 2152 init_completion(&gsi->completion); 2153 2154 ret = gsi_irq_init(gsi, pdev); 2155 if (ret) 2156 goto err_iounmap; 2157 2158 ret = gsi_channel_init(gsi, count, data); 2159 if (ret) 2160 goto err_irq_exit; 2161 2162 mutex_init(&gsi->mutex); 2163 2164 return 0; 2165 2166 err_irq_exit: 2167 gsi_irq_exit(gsi); 2168 err_iounmap: 2169 iounmap(gsi->virt); 2170 2171 return ret; 2172 } 2173 2174 /* Inverse of gsi_init() */ 2175 void gsi_exit(struct gsi *gsi) 2176 { 2177 mutex_destroy(&gsi->mutex); 2178 gsi_channel_exit(gsi); 2179 gsi_irq_exit(gsi); 2180 iounmap(gsi->virt); 2181 } 2182 2183 /* The maximum number of outstanding TREs on a channel. This limits 2184 * a channel's maximum number of transactions outstanding (worst case 2185 * is one TRE per transaction). 2186 * 2187 * The absolute limit is the number of TREs in the channel's TRE ring, 2188 * and in theory we should be able use all of them. But in practice, 2189 * doing that led to the hardware reporting exhaustion of event ring 2190 * slots for writing completion information. So the hardware limit 2191 * would be (tre_count - 1). 2192 * 2193 * We reduce it a bit further though. Transaction resource pools are 2194 * sized to be a little larger than this maximum, to allow resource 2195 * allocations to always be contiguous. The number of entries in a 2196 * TRE ring buffer is a power of 2, and the extra resources in a pool 2197 * tends to nearly double the memory allocated for it. Reducing the 2198 * maximum number of outstanding TREs allows the number of entries in 2199 * a pool to avoid crossing that power-of-2 boundary, and this can 2200 * substantially reduce pool memory requirements. The number we 2201 * reduce it by matches the number added in gsi_trans_pool_init(). 2202 */ 2203 u32 gsi_channel_tre_max(struct gsi *gsi, u32 channel_id) 2204 { 2205 struct gsi_channel *channel = &gsi->channel[channel_id]; 2206 2207 /* Hardware limit is channel->tre_count - 1 */ 2208 return channel->tre_count - (channel->tlv_count - 1); 2209 } 2210 2211 /* Returns the maximum number of TREs in a single transaction for a channel */ 2212 u32 gsi_channel_trans_tre_max(struct gsi *gsi, u32 channel_id) 2213 { 2214 struct gsi_channel *channel = &gsi->channel[channel_id]; 2215 2216 return channel->tlv_count; 2217 } 2218