1 // SPDX-License-Identifier: GPL-2.0 2 3 /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. 4 * Copyright (C) 2018-2021 Linaro Ltd. 5 */ 6 7 #include <linux/types.h> 8 #include <linux/bits.h> 9 #include <linux/bitfield.h> 10 #include <linux/mutex.h> 11 #include <linux/completion.h> 12 #include <linux/io.h> 13 #include <linux/bug.h> 14 #include <linux/interrupt.h> 15 #include <linux/platform_device.h> 16 #include <linux/netdevice.h> 17 18 #include "gsi.h" 19 #include "gsi_reg.h" 20 #include "gsi_private.h" 21 #include "gsi_trans.h" 22 #include "ipa_gsi.h" 23 #include "ipa_data.h" 24 #include "ipa_version.h" 25 26 /** 27 * DOC: The IPA Generic Software Interface 28 * 29 * The generic software interface (GSI) is an integral component of the IPA, 30 * providing a well-defined communication layer between the AP subsystem 31 * and the IPA core. The modem uses the GSI layer as well. 32 * 33 * -------- --------- 34 * | | | | 35 * | AP +<---. .----+ Modem | 36 * | +--. | | .->+ | 37 * | | | | | | | | 38 * -------- | | | | --------- 39 * v | v | 40 * --+-+---+-+-- 41 * | GSI | 42 * |-----------| 43 * | | 44 * | IPA | 45 * | | 46 * ------------- 47 * 48 * In the above diagram, the AP and Modem represent "execution environments" 49 * (EEs), which are independent operating environments that use the IPA for 50 * data transfer. 51 * 52 * Each EE uses a set of unidirectional GSI "channels," which allow transfer 53 * of data to or from the IPA. A channel is implemented as a ring buffer, 54 * with a DRAM-resident array of "transfer elements" (TREs) available to 55 * describe transfers to or from other EEs through the IPA. A transfer 56 * element can also contain an immediate command, requesting the IPA perform 57 * actions other than data transfer. 58 * 59 * Each TRE refers to a block of data--also located DRAM. After writing one 60 * or more TREs to a channel, the writer (either the IPA or an EE) writes a 61 * doorbell register to inform the receiving side how many elements have 62 * been written. 63 * 64 * Each channel has a GSI "event ring" associated with it. An event ring 65 * is implemented very much like a channel ring, but is always directed from 66 * the IPA to an EE. The IPA notifies an EE (such as the AP) about channel 67 * events by adding an entry to the event ring associated with the channel. 68 * The GSI then writes its doorbell for the event ring, causing the target 69 * EE to be interrupted. Each entry in an event ring contains a pointer 70 * to the channel TRE whose completion the event represents. 71 * 72 * Each TRE in a channel ring has a set of flags. One flag indicates whether 73 * the completion of the transfer operation generates an entry (and possibly 74 * an interrupt) in the channel's event ring. Other flags allow transfer 75 * elements to be chained together, forming a single logical transaction. 76 * TRE flags are used to control whether and when interrupts are generated 77 * to signal completion of channel transfers. 78 * 79 * Elements in channel and event rings are completed (or consumed) strictly 80 * in order. Completion of one entry implies the completion of all preceding 81 * entries. A single completion interrupt can therefore communicate the 82 * completion of many transfers. 83 * 84 * Note that all GSI registers are little-endian, which is the assumed 85 * endianness of I/O space accesses. The accessor functions perform byte 86 * swapping if needed (i.e., for a big endian CPU). 87 */ 88 89 /* Delay period for interrupt moderation (in 32KHz IPA internal timer ticks) */ 90 #define GSI_EVT_RING_INT_MODT (32 * 1) /* 1ms under 32KHz clock */ 91 92 #define GSI_CMD_TIMEOUT 50 /* milliseconds */ 93 94 #define GSI_CHANNEL_STOP_RETRIES 10 95 #define GSI_CHANNEL_MODEM_HALT_RETRIES 10 96 97 #define GSI_MHI_EVENT_ID_START 10 /* 1st reserved event id */ 98 #define GSI_MHI_EVENT_ID_END 16 /* Last reserved event id */ 99 100 #define GSI_ISR_MAX_ITER 50 /* Detect interrupt storms */ 101 102 /* An entry in an event ring */ 103 struct gsi_event { 104 __le64 xfer_ptr; 105 __le16 len; 106 u8 reserved1; 107 u8 code; 108 __le16 reserved2; 109 u8 type; 110 u8 chid; 111 }; 112 113 /** gsi_channel_scratch_gpi - GPI protocol scratch register 114 * @max_outstanding_tre: 115 * Defines the maximum number of TREs allowed in a single transaction 116 * on a channel (in bytes). This determines the amount of prefetch 117 * performed by the hardware. We configure this to equal the size of 118 * the TLV FIFO for the channel. 119 * @outstanding_threshold: 120 * Defines the threshold (in bytes) determining when the sequencer 121 * should update the channel doorbell. We configure this to equal 122 * the size of two TREs. 123 */ 124 struct gsi_channel_scratch_gpi { 125 u64 reserved1; 126 u16 reserved2; 127 u16 max_outstanding_tre; 128 u16 reserved3; 129 u16 outstanding_threshold; 130 }; 131 132 /** gsi_channel_scratch - channel scratch configuration area 133 * 134 * The exact interpretation of this register is protocol-specific. 135 * We only use GPI channels; see struct gsi_channel_scratch_gpi, above. 136 */ 137 union gsi_channel_scratch { 138 struct gsi_channel_scratch_gpi gpi; 139 struct { 140 u32 word1; 141 u32 word2; 142 u32 word3; 143 u32 word4; 144 } data; 145 }; 146 147 /* Check things that can be validated at build time. */ 148 static void gsi_validate_build(void) 149 { 150 /* This is used as a divisor */ 151 BUILD_BUG_ON(!GSI_RING_ELEMENT_SIZE); 152 153 /* Code assumes the size of channel and event ring element are 154 * the same (and fixed). Make sure the size of an event ring 155 * element is what's expected. 156 */ 157 BUILD_BUG_ON(sizeof(struct gsi_event) != GSI_RING_ELEMENT_SIZE); 158 159 /* Hardware requires a 2^n ring size. We ensure the number of 160 * elements in an event ring is a power of 2 elsewhere; this 161 * ensure the elements themselves meet the requirement. 162 */ 163 BUILD_BUG_ON(!is_power_of_2(GSI_RING_ELEMENT_SIZE)); 164 165 /* The channel element size must fit in this field */ 166 BUILD_BUG_ON(GSI_RING_ELEMENT_SIZE > field_max(ELEMENT_SIZE_FMASK)); 167 168 /* The event ring element size must fit in this field */ 169 BUILD_BUG_ON(GSI_RING_ELEMENT_SIZE > field_max(EV_ELEMENT_SIZE_FMASK)); 170 } 171 172 /* Return the channel id associated with a given channel */ 173 static u32 gsi_channel_id(struct gsi_channel *channel) 174 { 175 return channel - &channel->gsi->channel[0]; 176 } 177 178 /* An initialized channel has a non-null GSI pointer */ 179 static bool gsi_channel_initialized(struct gsi_channel *channel) 180 { 181 return !!channel->gsi; 182 } 183 184 /* Update the GSI IRQ type register with the cached value */ 185 static void gsi_irq_type_update(struct gsi *gsi, u32 val) 186 { 187 gsi->type_enabled_bitmap = val; 188 iowrite32(val, gsi->virt + GSI_CNTXT_TYPE_IRQ_MSK_OFFSET); 189 } 190 191 static void gsi_irq_type_enable(struct gsi *gsi, enum gsi_irq_type_id type_id) 192 { 193 gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(type_id)); 194 } 195 196 static void gsi_irq_type_disable(struct gsi *gsi, enum gsi_irq_type_id type_id) 197 { 198 gsi_irq_type_update(gsi, gsi->type_enabled_bitmap & ~BIT(type_id)); 199 } 200 201 /* Event ring commands are performed one at a time. Their completion 202 * is signaled by the event ring control GSI interrupt type, which is 203 * only enabled when we issue an event ring command. Only the event 204 * ring being operated on has this interrupt enabled. 205 */ 206 static void gsi_irq_ev_ctrl_enable(struct gsi *gsi, u32 evt_ring_id) 207 { 208 u32 val = BIT(evt_ring_id); 209 210 /* There's a small chance that a previous command completed 211 * after the interrupt was disabled, so make sure we have no 212 * pending interrupts before we enable them. 213 */ 214 iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET); 215 216 iowrite32(val, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET); 217 gsi_irq_type_enable(gsi, GSI_EV_CTRL); 218 } 219 220 /* Disable event ring control interrupts */ 221 static void gsi_irq_ev_ctrl_disable(struct gsi *gsi) 222 { 223 gsi_irq_type_disable(gsi, GSI_EV_CTRL); 224 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET); 225 } 226 227 /* Channel commands are performed one at a time. Their completion is 228 * signaled by the channel control GSI interrupt type, which is only 229 * enabled when we issue a channel command. Only the channel being 230 * operated on has this interrupt enabled. 231 */ 232 static void gsi_irq_ch_ctrl_enable(struct gsi *gsi, u32 channel_id) 233 { 234 u32 val = BIT(channel_id); 235 236 /* There's a small chance that a previous command completed 237 * after the interrupt was disabled, so make sure we have no 238 * pending interrupts before we enable them. 239 */ 240 iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET); 241 242 iowrite32(val, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET); 243 gsi_irq_type_enable(gsi, GSI_CH_CTRL); 244 } 245 246 /* Disable channel control interrupts */ 247 static void gsi_irq_ch_ctrl_disable(struct gsi *gsi) 248 { 249 gsi_irq_type_disable(gsi, GSI_CH_CTRL); 250 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET); 251 } 252 253 static void gsi_irq_ieob_enable_one(struct gsi *gsi, u32 evt_ring_id) 254 { 255 bool enable_ieob = !gsi->ieob_enabled_bitmap; 256 u32 val; 257 258 gsi->ieob_enabled_bitmap |= BIT(evt_ring_id); 259 val = gsi->ieob_enabled_bitmap; 260 iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET); 261 262 /* Enable the interrupt type if this is the first channel enabled */ 263 if (enable_ieob) 264 gsi_irq_type_enable(gsi, GSI_IEOB); 265 } 266 267 static void gsi_irq_ieob_disable(struct gsi *gsi, u32 event_mask) 268 { 269 u32 val; 270 271 gsi->ieob_enabled_bitmap &= ~event_mask; 272 273 /* Disable the interrupt type if this was the last enabled channel */ 274 if (!gsi->ieob_enabled_bitmap) 275 gsi_irq_type_disable(gsi, GSI_IEOB); 276 277 val = gsi->ieob_enabled_bitmap; 278 iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET); 279 } 280 281 static void gsi_irq_ieob_disable_one(struct gsi *gsi, u32 evt_ring_id) 282 { 283 gsi_irq_ieob_disable(gsi, BIT(evt_ring_id)); 284 } 285 286 /* Enable all GSI_interrupt types */ 287 static void gsi_irq_enable(struct gsi *gsi) 288 { 289 u32 val; 290 291 /* Global interrupts include hardware error reports. Enable 292 * that so we can at least report the error should it occur. 293 */ 294 iowrite32(BIT(ERROR_INT), gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET); 295 gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(GSI_GLOB_EE)); 296 297 /* General GSI interrupts are reported to all EEs; if they occur 298 * they are unrecoverable (without reset). A breakpoint interrupt 299 * also exists, but we don't support that. We want to be notified 300 * of errors so we can report them, even if they can't be handled. 301 */ 302 val = BIT(BUS_ERROR); 303 val |= BIT(CMD_FIFO_OVRFLOW); 304 val |= BIT(MCS_STACK_OVRFLOW); 305 iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET); 306 gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(GSI_GENERAL)); 307 } 308 309 /* Disable all GSI interrupt types */ 310 static void gsi_irq_disable(struct gsi *gsi) 311 { 312 gsi_irq_type_update(gsi, 0); 313 314 /* Clear the type-specific interrupt masks set by gsi_irq_enable() */ 315 iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET); 316 iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET); 317 } 318 319 /* Return the virtual address associated with a ring index */ 320 void *gsi_ring_virt(struct gsi_ring *ring, u32 index) 321 { 322 /* Note: index *must* be used modulo the ring count here */ 323 return ring->virt + (index % ring->count) * GSI_RING_ELEMENT_SIZE; 324 } 325 326 /* Return the 32-bit DMA address associated with a ring index */ 327 static u32 gsi_ring_addr(struct gsi_ring *ring, u32 index) 328 { 329 return lower_32_bits(ring->addr) + index * GSI_RING_ELEMENT_SIZE; 330 } 331 332 /* Return the ring index of a 32-bit ring offset */ 333 static u32 gsi_ring_index(struct gsi_ring *ring, u32 offset) 334 { 335 return (offset - gsi_ring_addr(ring, 0)) / GSI_RING_ELEMENT_SIZE; 336 } 337 338 /* Issue a GSI command by writing a value to a register, then wait for 339 * completion to be signaled. Returns true if the command completes 340 * or false if it times out. 341 */ 342 static bool 343 gsi_command(struct gsi *gsi, u32 reg, u32 val, struct completion *completion) 344 { 345 unsigned long timeout = msecs_to_jiffies(GSI_CMD_TIMEOUT); 346 347 reinit_completion(completion); 348 349 iowrite32(val, gsi->virt + reg); 350 351 return !!wait_for_completion_timeout(completion, timeout); 352 } 353 354 /* Return the hardware's notion of the current state of an event ring */ 355 static enum gsi_evt_ring_state 356 gsi_evt_ring_state(struct gsi *gsi, u32 evt_ring_id) 357 { 358 u32 val; 359 360 val = ioread32(gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id)); 361 362 return u32_get_bits(val, EV_CHSTATE_FMASK); 363 } 364 365 /* Issue an event ring command and wait for it to complete */ 366 static void gsi_evt_ring_command(struct gsi *gsi, u32 evt_ring_id, 367 enum gsi_evt_cmd_opcode opcode) 368 { 369 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id]; 370 struct completion *completion = &evt_ring->completion; 371 struct device *dev = gsi->dev; 372 bool timeout; 373 u32 val; 374 375 /* Enable the completion interrupt for the command */ 376 gsi_irq_ev_ctrl_enable(gsi, evt_ring_id); 377 378 val = u32_encode_bits(evt_ring_id, EV_CHID_FMASK); 379 val |= u32_encode_bits(opcode, EV_OPCODE_FMASK); 380 381 timeout = !gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val, completion); 382 383 gsi_irq_ev_ctrl_disable(gsi); 384 385 if (!timeout) 386 return; 387 388 dev_err(dev, "GSI command %u for event ring %u timed out, state %u\n", 389 opcode, evt_ring_id, gsi_evt_ring_state(gsi, evt_ring_id)); 390 } 391 392 /* Allocate an event ring in NOT_ALLOCATED state */ 393 static int gsi_evt_ring_alloc_command(struct gsi *gsi, u32 evt_ring_id) 394 { 395 enum gsi_evt_ring_state state; 396 397 /* Get initial event ring state */ 398 state = gsi_evt_ring_state(gsi, evt_ring_id); 399 if (state != GSI_EVT_RING_STATE_NOT_ALLOCATED) { 400 dev_err(gsi->dev, "event ring %u bad state %u before alloc\n", 401 evt_ring_id, state); 402 return -EINVAL; 403 } 404 405 gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_ALLOCATE); 406 407 /* If successful the event ring state will have changed */ 408 state = gsi_evt_ring_state(gsi, evt_ring_id); 409 if (state == GSI_EVT_RING_STATE_ALLOCATED) 410 return 0; 411 412 dev_err(gsi->dev, "event ring %u bad state %u after alloc\n", 413 evt_ring_id, state); 414 415 return -EIO; 416 } 417 418 /* Reset a GSI event ring in ALLOCATED or ERROR state. */ 419 static void gsi_evt_ring_reset_command(struct gsi *gsi, u32 evt_ring_id) 420 { 421 enum gsi_evt_ring_state state; 422 423 state = gsi_evt_ring_state(gsi, evt_ring_id); 424 if (state != GSI_EVT_RING_STATE_ALLOCATED && 425 state != GSI_EVT_RING_STATE_ERROR) { 426 dev_err(gsi->dev, "event ring %u bad state %u before reset\n", 427 evt_ring_id, state); 428 return; 429 } 430 431 gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_RESET); 432 433 /* If successful the event ring state will have changed */ 434 state = gsi_evt_ring_state(gsi, evt_ring_id); 435 if (state == GSI_EVT_RING_STATE_ALLOCATED) 436 return; 437 438 dev_err(gsi->dev, "event ring %u bad state %u after reset\n", 439 evt_ring_id, state); 440 } 441 442 /* Issue a hardware de-allocation request for an allocated event ring */ 443 static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id) 444 { 445 enum gsi_evt_ring_state state; 446 447 state = gsi_evt_ring_state(gsi, evt_ring_id); 448 if (state != GSI_EVT_RING_STATE_ALLOCATED) { 449 dev_err(gsi->dev, "event ring %u state %u before dealloc\n", 450 evt_ring_id, state); 451 return; 452 } 453 454 gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_DE_ALLOC); 455 456 /* If successful the event ring state will have changed */ 457 state = gsi_evt_ring_state(gsi, evt_ring_id); 458 if (state == GSI_EVT_RING_STATE_NOT_ALLOCATED) 459 return; 460 461 dev_err(gsi->dev, "event ring %u bad state %u after dealloc\n", 462 evt_ring_id, state); 463 } 464 465 /* Fetch the current state of a channel from hardware */ 466 static enum gsi_channel_state gsi_channel_state(struct gsi_channel *channel) 467 { 468 u32 channel_id = gsi_channel_id(channel); 469 void __iomem *virt = channel->gsi->virt; 470 u32 val; 471 472 val = ioread32(virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id)); 473 474 return u32_get_bits(val, CHSTATE_FMASK); 475 } 476 477 /* Issue a channel command and wait for it to complete */ 478 static void 479 gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode) 480 { 481 struct completion *completion = &channel->completion; 482 u32 channel_id = gsi_channel_id(channel); 483 struct gsi *gsi = channel->gsi; 484 struct device *dev = gsi->dev; 485 bool timeout; 486 u32 val; 487 488 /* Enable the completion interrupt for the command */ 489 gsi_irq_ch_ctrl_enable(gsi, channel_id); 490 491 val = u32_encode_bits(channel_id, CH_CHID_FMASK); 492 val |= u32_encode_bits(opcode, CH_OPCODE_FMASK); 493 timeout = !gsi_command(gsi, GSI_CH_CMD_OFFSET, val, completion); 494 495 gsi_irq_ch_ctrl_disable(gsi); 496 497 if (!timeout) 498 return; 499 500 dev_err(dev, "GSI command %u for channel %u timed out, state %u\n", 501 opcode, channel_id, gsi_channel_state(channel)); 502 } 503 504 /* Allocate GSI channel in NOT_ALLOCATED state */ 505 static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id) 506 { 507 struct gsi_channel *channel = &gsi->channel[channel_id]; 508 struct device *dev = gsi->dev; 509 enum gsi_channel_state state; 510 511 /* Get initial channel state */ 512 state = gsi_channel_state(channel); 513 if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED) { 514 dev_err(dev, "channel %u bad state %u before alloc\n", 515 channel_id, state); 516 return -EINVAL; 517 } 518 519 gsi_channel_command(channel, GSI_CH_ALLOCATE); 520 521 /* If successful the channel state will have changed */ 522 state = gsi_channel_state(channel); 523 if (state == GSI_CHANNEL_STATE_ALLOCATED) 524 return 0; 525 526 dev_err(dev, "channel %u bad state %u after alloc\n", 527 channel_id, state); 528 529 return -EIO; 530 } 531 532 /* Start an ALLOCATED channel */ 533 static int gsi_channel_start_command(struct gsi_channel *channel) 534 { 535 struct device *dev = channel->gsi->dev; 536 enum gsi_channel_state state; 537 538 state = gsi_channel_state(channel); 539 if (state != GSI_CHANNEL_STATE_ALLOCATED && 540 state != GSI_CHANNEL_STATE_STOPPED) { 541 dev_err(dev, "channel %u bad state %u before start\n", 542 gsi_channel_id(channel), state); 543 return -EINVAL; 544 } 545 546 gsi_channel_command(channel, GSI_CH_START); 547 548 /* If successful the channel state will have changed */ 549 state = gsi_channel_state(channel); 550 if (state == GSI_CHANNEL_STATE_STARTED) 551 return 0; 552 553 dev_err(dev, "channel %u bad state %u after start\n", 554 gsi_channel_id(channel), state); 555 556 return -EIO; 557 } 558 559 /* Stop a GSI channel in STARTED state */ 560 static int gsi_channel_stop_command(struct gsi_channel *channel) 561 { 562 struct device *dev = channel->gsi->dev; 563 enum gsi_channel_state state; 564 565 state = gsi_channel_state(channel); 566 567 /* Channel could have entered STOPPED state since last call 568 * if it timed out. If so, we're done. 569 */ 570 if (state == GSI_CHANNEL_STATE_STOPPED) 571 return 0; 572 573 if (state != GSI_CHANNEL_STATE_STARTED && 574 state != GSI_CHANNEL_STATE_STOP_IN_PROC) { 575 dev_err(dev, "channel %u bad state %u before stop\n", 576 gsi_channel_id(channel), state); 577 return -EINVAL; 578 } 579 580 gsi_channel_command(channel, GSI_CH_STOP); 581 582 /* If successful the channel state will have changed */ 583 state = gsi_channel_state(channel); 584 if (state == GSI_CHANNEL_STATE_STOPPED) 585 return 0; 586 587 /* We may have to try again if stop is in progress */ 588 if (state == GSI_CHANNEL_STATE_STOP_IN_PROC) 589 return -EAGAIN; 590 591 dev_err(dev, "channel %u bad state %u after stop\n", 592 gsi_channel_id(channel), state); 593 594 return -EIO; 595 } 596 597 /* Reset a GSI channel in ALLOCATED or ERROR state. */ 598 static void gsi_channel_reset_command(struct gsi_channel *channel) 599 { 600 struct device *dev = channel->gsi->dev; 601 enum gsi_channel_state state; 602 603 /* A short delay is required before a RESET command */ 604 usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC); 605 606 state = gsi_channel_state(channel); 607 if (state != GSI_CHANNEL_STATE_STOPPED && 608 state != GSI_CHANNEL_STATE_ERROR) { 609 /* No need to reset a channel already in ALLOCATED state */ 610 if (state != GSI_CHANNEL_STATE_ALLOCATED) 611 dev_err(dev, "channel %u bad state %u before reset\n", 612 gsi_channel_id(channel), state); 613 return; 614 } 615 616 gsi_channel_command(channel, GSI_CH_RESET); 617 618 /* If successful the channel state will have changed */ 619 state = gsi_channel_state(channel); 620 if (state != GSI_CHANNEL_STATE_ALLOCATED) 621 dev_err(dev, "channel %u bad state %u after reset\n", 622 gsi_channel_id(channel), state); 623 } 624 625 /* Deallocate an ALLOCATED GSI channel */ 626 static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id) 627 { 628 struct gsi_channel *channel = &gsi->channel[channel_id]; 629 struct device *dev = gsi->dev; 630 enum gsi_channel_state state; 631 632 state = gsi_channel_state(channel); 633 if (state != GSI_CHANNEL_STATE_ALLOCATED) { 634 dev_err(dev, "channel %u bad state %u before dealloc\n", 635 channel_id, state); 636 return; 637 } 638 639 gsi_channel_command(channel, GSI_CH_DE_ALLOC); 640 641 /* If successful the channel state will have changed */ 642 state = gsi_channel_state(channel); 643 644 if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED) 645 dev_err(dev, "channel %u bad state %u after dealloc\n", 646 channel_id, state); 647 } 648 649 /* Ring an event ring doorbell, reporting the last entry processed by the AP. 650 * The index argument (modulo the ring count) is the first unfilled entry, so 651 * we supply one less than that with the doorbell. Update the event ring 652 * index field with the value provided. 653 */ 654 static void gsi_evt_ring_doorbell(struct gsi *gsi, u32 evt_ring_id, u32 index) 655 { 656 struct gsi_ring *ring = &gsi->evt_ring[evt_ring_id].ring; 657 u32 val; 658 659 ring->index = index; /* Next unused entry */ 660 661 /* Note: index *must* be used modulo the ring count here */ 662 val = gsi_ring_addr(ring, (index - 1) % ring->count); 663 iowrite32(val, gsi->virt + GSI_EV_CH_E_DOORBELL_0_OFFSET(evt_ring_id)); 664 } 665 666 /* Program an event ring for use */ 667 static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id) 668 { 669 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id]; 670 size_t size = evt_ring->ring.count * GSI_RING_ELEMENT_SIZE; 671 u32 val; 672 673 /* We program all event rings as GPI type/protocol */ 674 val = u32_encode_bits(GSI_CHANNEL_TYPE_GPI, EV_CHTYPE_FMASK); 675 val |= EV_INTYPE_FMASK; 676 val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, EV_ELEMENT_SIZE_FMASK); 677 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id)); 678 679 val = ev_r_length_encoded(gsi->version, size); 680 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_1_OFFSET(evt_ring_id)); 681 682 /* The context 2 and 3 registers store the low-order and 683 * high-order 32 bits of the address of the event ring, 684 * respectively. 685 */ 686 val = lower_32_bits(evt_ring->ring.addr); 687 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_2_OFFSET(evt_ring_id)); 688 val = upper_32_bits(evt_ring->ring.addr); 689 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_3_OFFSET(evt_ring_id)); 690 691 /* Enable interrupt moderation by setting the moderation delay */ 692 val = u32_encode_bits(GSI_EVT_RING_INT_MODT, MODT_FMASK); 693 val |= u32_encode_bits(1, MODC_FMASK); /* comes from channel */ 694 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_8_OFFSET(evt_ring_id)); 695 696 /* No MSI write data, and MSI address high and low address is 0 */ 697 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_9_OFFSET(evt_ring_id)); 698 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_10_OFFSET(evt_ring_id)); 699 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_11_OFFSET(evt_ring_id)); 700 701 /* We don't need to get event read pointer updates */ 702 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_12_OFFSET(evt_ring_id)); 703 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_13_OFFSET(evt_ring_id)); 704 705 /* Finally, tell the hardware we've completed event 0 (arbitrary) */ 706 gsi_evt_ring_doorbell(gsi, evt_ring_id, 0); 707 } 708 709 /* Find the transaction whose completion indicates a channel is quiesced */ 710 static struct gsi_trans *gsi_channel_trans_last(struct gsi_channel *channel) 711 { 712 struct gsi_trans_info *trans_info = &channel->trans_info; 713 const struct list_head *list; 714 struct gsi_trans *trans; 715 716 spin_lock_bh(&trans_info->spinlock); 717 718 /* There is a small chance a TX transaction got allocated just 719 * before we disabled transmits, so check for that. 720 */ 721 if (channel->toward_ipa) { 722 list = &trans_info->alloc; 723 if (!list_empty(list)) 724 goto done; 725 list = &trans_info->pending; 726 if (!list_empty(list)) 727 goto done; 728 } 729 730 /* Otherwise (TX or RX) we want to wait for anything that 731 * has completed, or has been polled but not released yet. 732 */ 733 list = &trans_info->complete; 734 if (!list_empty(list)) 735 goto done; 736 list = &trans_info->polled; 737 if (list_empty(list)) 738 list = NULL; 739 done: 740 trans = list ? list_last_entry(list, struct gsi_trans, links) : NULL; 741 742 /* Caller will wait for this, so take a reference */ 743 if (trans) 744 refcount_inc(&trans->refcount); 745 746 spin_unlock_bh(&trans_info->spinlock); 747 748 return trans; 749 } 750 751 /* Wait for transaction activity on a channel to complete */ 752 static void gsi_channel_trans_quiesce(struct gsi_channel *channel) 753 { 754 struct gsi_trans *trans; 755 756 /* Get the last transaction, and wait for it to complete */ 757 trans = gsi_channel_trans_last(channel); 758 if (trans) { 759 wait_for_completion(&trans->completion); 760 gsi_trans_free(trans); 761 } 762 } 763 764 /* Program a channel for use; there is no gsi_channel_deprogram() */ 765 static void gsi_channel_program(struct gsi_channel *channel, bool doorbell) 766 { 767 size_t size = channel->tre_ring.count * GSI_RING_ELEMENT_SIZE; 768 u32 channel_id = gsi_channel_id(channel); 769 union gsi_channel_scratch scr = { }; 770 struct gsi_channel_scratch_gpi *gpi; 771 struct gsi *gsi = channel->gsi; 772 u32 wrr_weight = 0; 773 u32 val; 774 775 /* Arbitrarily pick TRE 0 as the first channel element to use */ 776 channel->tre_ring.index = 0; 777 778 /* We program all channels as GPI type/protocol */ 779 val = chtype_protocol_encoded(gsi->version, GSI_CHANNEL_TYPE_GPI); 780 if (channel->toward_ipa) 781 val |= CHTYPE_DIR_FMASK; 782 val |= u32_encode_bits(channel->evt_ring_id, ERINDEX_FMASK); 783 val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, ELEMENT_SIZE_FMASK); 784 iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id)); 785 786 val = r_length_encoded(gsi->version, size); 787 iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_1_OFFSET(channel_id)); 788 789 /* The context 2 and 3 registers store the low-order and 790 * high-order 32 bits of the address of the channel ring, 791 * respectively. 792 */ 793 val = lower_32_bits(channel->tre_ring.addr); 794 iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_2_OFFSET(channel_id)); 795 val = upper_32_bits(channel->tre_ring.addr); 796 iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_3_OFFSET(channel_id)); 797 798 /* Command channel gets low weighted round-robin priority */ 799 if (channel->command) 800 wrr_weight = field_max(WRR_WEIGHT_FMASK); 801 val = u32_encode_bits(wrr_weight, WRR_WEIGHT_FMASK); 802 803 /* Max prefetch is 1 segment (do not set MAX_PREFETCH_FMASK) */ 804 805 /* No need to use the doorbell engine starting at IPA v4.0 */ 806 if (gsi->version < IPA_VERSION_4_0 && doorbell) 807 val |= USE_DB_ENG_FMASK; 808 809 /* v4.0 introduces an escape buffer for prefetch. We use it 810 * on all but the AP command channel. 811 */ 812 if (gsi->version >= IPA_VERSION_4_0 && !channel->command) { 813 /* If not otherwise set, prefetch buffers are used */ 814 if (gsi->version < IPA_VERSION_4_5) 815 val |= USE_ESCAPE_BUF_ONLY_FMASK; 816 else 817 val |= u32_encode_bits(GSI_ESCAPE_BUF_ONLY, 818 PREFETCH_MODE_FMASK); 819 } 820 /* All channels set DB_IN_BYTES */ 821 if (gsi->version >= IPA_VERSION_4_9) 822 val |= DB_IN_BYTES; 823 824 iowrite32(val, gsi->virt + GSI_CH_C_QOS_OFFSET(channel_id)); 825 826 /* Now update the scratch registers for GPI protocol */ 827 gpi = &scr.gpi; 828 gpi->max_outstanding_tre = gsi_channel_trans_tre_max(gsi, channel_id) * 829 GSI_RING_ELEMENT_SIZE; 830 gpi->outstanding_threshold = 2 * GSI_RING_ELEMENT_SIZE; 831 832 val = scr.data.word1; 833 iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_0_OFFSET(channel_id)); 834 835 val = scr.data.word2; 836 iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_1_OFFSET(channel_id)); 837 838 val = scr.data.word3; 839 iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_2_OFFSET(channel_id)); 840 841 /* We must preserve the upper 16 bits of the last scratch register. 842 * The next sequence assumes those bits remain unchanged between the 843 * read and the write. 844 */ 845 val = ioread32(gsi->virt + GSI_CH_C_SCRATCH_3_OFFSET(channel_id)); 846 val = (scr.data.word4 & GENMASK(31, 16)) | (val & GENMASK(15, 0)); 847 iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_3_OFFSET(channel_id)); 848 849 /* All done! */ 850 } 851 852 static int __gsi_channel_start(struct gsi_channel *channel, bool resume) 853 { 854 struct gsi *gsi = channel->gsi; 855 int ret; 856 857 /* Prior to IPA v4.0 suspend/resume is not implemented by GSI */ 858 if (resume && gsi->version < IPA_VERSION_4_0) 859 return 0; 860 861 mutex_lock(&gsi->mutex); 862 863 ret = gsi_channel_start_command(channel); 864 865 mutex_unlock(&gsi->mutex); 866 867 return ret; 868 } 869 870 /* Start an allocated GSI channel */ 871 int gsi_channel_start(struct gsi *gsi, u32 channel_id) 872 { 873 struct gsi_channel *channel = &gsi->channel[channel_id]; 874 int ret; 875 876 /* Enable NAPI and the completion interrupt */ 877 napi_enable(&channel->napi); 878 gsi_irq_ieob_enable_one(gsi, channel->evt_ring_id); 879 880 ret = __gsi_channel_start(channel, false); 881 if (ret) { 882 gsi_irq_ieob_disable_one(gsi, channel->evt_ring_id); 883 napi_disable(&channel->napi); 884 } 885 886 return ret; 887 } 888 889 static int gsi_channel_stop_retry(struct gsi_channel *channel) 890 { 891 u32 retries = GSI_CHANNEL_STOP_RETRIES; 892 int ret; 893 894 do { 895 ret = gsi_channel_stop_command(channel); 896 if (ret != -EAGAIN) 897 break; 898 usleep_range(3 * USEC_PER_MSEC, 5 * USEC_PER_MSEC); 899 } while (retries--); 900 901 return ret; 902 } 903 904 static int __gsi_channel_stop(struct gsi_channel *channel, bool suspend) 905 { 906 struct gsi *gsi = channel->gsi; 907 int ret; 908 909 /* Wait for any underway transactions to complete before stopping. */ 910 gsi_channel_trans_quiesce(channel); 911 912 /* Prior to IPA v4.0 suspend/resume is not implemented by GSI */ 913 if (suspend && gsi->version < IPA_VERSION_4_0) 914 return 0; 915 916 mutex_lock(&gsi->mutex); 917 918 ret = gsi_channel_stop_retry(channel); 919 920 mutex_unlock(&gsi->mutex); 921 922 return ret; 923 } 924 925 /* Stop a started channel */ 926 int gsi_channel_stop(struct gsi *gsi, u32 channel_id) 927 { 928 struct gsi_channel *channel = &gsi->channel[channel_id]; 929 int ret; 930 931 ret = __gsi_channel_stop(channel, false); 932 if (ret) 933 return ret; 934 935 /* Disable the completion interrupt and NAPI if successful */ 936 gsi_irq_ieob_disable_one(gsi, channel->evt_ring_id); 937 napi_disable(&channel->napi); 938 939 return 0; 940 } 941 942 /* Reset and reconfigure a channel, (possibly) enabling the doorbell engine */ 943 void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool doorbell) 944 { 945 struct gsi_channel *channel = &gsi->channel[channel_id]; 946 947 mutex_lock(&gsi->mutex); 948 949 gsi_channel_reset_command(channel); 950 /* Due to a hardware quirk we may need to reset RX channels twice. */ 951 if (gsi->version < IPA_VERSION_4_0 && !channel->toward_ipa) 952 gsi_channel_reset_command(channel); 953 954 gsi_channel_program(channel, doorbell); 955 gsi_channel_trans_cancel_pending(channel); 956 957 mutex_unlock(&gsi->mutex); 958 } 959 960 /* Stop a started channel for suspend */ 961 int gsi_channel_suspend(struct gsi *gsi, u32 channel_id) 962 { 963 struct gsi_channel *channel = &gsi->channel[channel_id]; 964 int ret; 965 966 ret = __gsi_channel_stop(channel, true); 967 if (ret) 968 return ret; 969 970 /* Ensure NAPI polling has finished. */ 971 napi_synchronize(&channel->napi); 972 973 return 0; 974 } 975 976 /* Resume a suspended channel (starting if stopped) */ 977 int gsi_channel_resume(struct gsi *gsi, u32 channel_id) 978 { 979 struct gsi_channel *channel = &gsi->channel[channel_id]; 980 981 return __gsi_channel_start(channel, true); 982 } 983 984 /* Prevent all GSI interrupts while suspended */ 985 void gsi_suspend(struct gsi *gsi) 986 { 987 disable_irq(gsi->irq); 988 } 989 990 /* Allow all GSI interrupts again when resuming */ 991 void gsi_resume(struct gsi *gsi) 992 { 993 enable_irq(gsi->irq); 994 } 995 996 /** 997 * gsi_channel_tx_queued() - Report queued TX transfers for a channel 998 * @channel: Channel for which to report 999 * 1000 * Report to the network stack the number of bytes and transactions that 1001 * have been queued to hardware since last call. This and the next function 1002 * supply information used by the network stack for throttling. 1003 * 1004 * For each channel we track the number of transactions used and bytes of 1005 * data those transactions represent. We also track what those values are 1006 * each time this function is called. Subtracting the two tells us 1007 * the number of bytes and transactions that have been added between 1008 * successive calls. 1009 * 1010 * Calling this each time we ring the channel doorbell allows us to 1011 * provide accurate information to the network stack about how much 1012 * work we've given the hardware at any point in time. 1013 */ 1014 void gsi_channel_tx_queued(struct gsi_channel *channel) 1015 { 1016 u32 trans_count; 1017 u32 byte_count; 1018 1019 byte_count = channel->byte_count - channel->queued_byte_count; 1020 trans_count = channel->trans_count - channel->queued_trans_count; 1021 channel->queued_byte_count = channel->byte_count; 1022 channel->queued_trans_count = channel->trans_count; 1023 1024 ipa_gsi_channel_tx_queued(channel->gsi, gsi_channel_id(channel), 1025 trans_count, byte_count); 1026 } 1027 1028 /** 1029 * gsi_channel_tx_update() - Report completed TX transfers 1030 * @channel: Channel that has completed transmitting packets 1031 * @trans: Last transation known to be complete 1032 * 1033 * Compute the number of transactions and bytes that have been transferred 1034 * over a TX channel since the given transaction was committed. Report this 1035 * information to the network stack. 1036 * 1037 * At the time a transaction is committed, we record its channel's 1038 * committed transaction and byte counts *in the transaction*. 1039 * Completions are signaled by the hardware with an interrupt, and 1040 * we can determine the latest completed transaction at that time. 1041 * 1042 * The difference between the byte/transaction count recorded in 1043 * the transaction and the count last time we recorded a completion 1044 * tells us exactly how much data has been transferred between 1045 * completions. 1046 * 1047 * Calling this each time we learn of a newly-completed transaction 1048 * allows us to provide accurate information to the network stack 1049 * about how much work has been completed by the hardware at a given 1050 * point in time. 1051 */ 1052 static void 1053 gsi_channel_tx_update(struct gsi_channel *channel, struct gsi_trans *trans) 1054 { 1055 u64 byte_count = trans->byte_count + trans->len; 1056 u64 trans_count = trans->trans_count + 1; 1057 1058 byte_count -= channel->compl_byte_count; 1059 channel->compl_byte_count += byte_count; 1060 trans_count -= channel->compl_trans_count; 1061 channel->compl_trans_count += trans_count; 1062 1063 ipa_gsi_channel_tx_completed(channel->gsi, gsi_channel_id(channel), 1064 trans_count, byte_count); 1065 } 1066 1067 /* Channel control interrupt handler */ 1068 static void gsi_isr_chan_ctrl(struct gsi *gsi) 1069 { 1070 u32 channel_mask; 1071 1072 channel_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_CH_IRQ_OFFSET); 1073 iowrite32(channel_mask, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET); 1074 1075 while (channel_mask) { 1076 u32 channel_id = __ffs(channel_mask); 1077 struct gsi_channel *channel; 1078 1079 channel_mask ^= BIT(channel_id); 1080 1081 channel = &gsi->channel[channel_id]; 1082 1083 complete(&channel->completion); 1084 } 1085 } 1086 1087 /* Event ring control interrupt handler */ 1088 static void gsi_isr_evt_ctrl(struct gsi *gsi) 1089 { 1090 u32 event_mask; 1091 1092 event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_OFFSET); 1093 iowrite32(event_mask, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET); 1094 1095 while (event_mask) { 1096 u32 evt_ring_id = __ffs(event_mask); 1097 struct gsi_evt_ring *evt_ring; 1098 1099 event_mask ^= BIT(evt_ring_id); 1100 1101 evt_ring = &gsi->evt_ring[evt_ring_id]; 1102 1103 complete(&evt_ring->completion); 1104 } 1105 } 1106 1107 /* Global channel error interrupt handler */ 1108 static void 1109 gsi_isr_glob_chan_err(struct gsi *gsi, u32 err_ee, u32 channel_id, u32 code) 1110 { 1111 if (code == GSI_OUT_OF_RESOURCES) { 1112 dev_err(gsi->dev, "channel %u out of resources\n", channel_id); 1113 complete(&gsi->channel[channel_id].completion); 1114 return; 1115 } 1116 1117 /* Report, but otherwise ignore all other error codes */ 1118 dev_err(gsi->dev, "channel %u global error ee 0x%08x code 0x%08x\n", 1119 channel_id, err_ee, code); 1120 } 1121 1122 /* Global event error interrupt handler */ 1123 static void 1124 gsi_isr_glob_evt_err(struct gsi *gsi, u32 err_ee, u32 evt_ring_id, u32 code) 1125 { 1126 if (code == GSI_OUT_OF_RESOURCES) { 1127 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id]; 1128 u32 channel_id = gsi_channel_id(evt_ring->channel); 1129 1130 complete(&evt_ring->completion); 1131 dev_err(gsi->dev, "evt_ring for channel %u out of resources\n", 1132 channel_id); 1133 return; 1134 } 1135 1136 /* Report, but otherwise ignore all other error codes */ 1137 dev_err(gsi->dev, "event ring %u global error ee %u code 0x%08x\n", 1138 evt_ring_id, err_ee, code); 1139 } 1140 1141 /* Global error interrupt handler */ 1142 static void gsi_isr_glob_err(struct gsi *gsi) 1143 { 1144 enum gsi_err_type type; 1145 enum gsi_err_code code; 1146 u32 which; 1147 u32 val; 1148 u32 ee; 1149 1150 /* Get the logged error, then reinitialize the log */ 1151 val = ioread32(gsi->virt + GSI_ERROR_LOG_OFFSET); 1152 iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET); 1153 iowrite32(~0, gsi->virt + GSI_ERROR_LOG_CLR_OFFSET); 1154 1155 ee = u32_get_bits(val, ERR_EE_FMASK); 1156 type = u32_get_bits(val, ERR_TYPE_FMASK); 1157 which = u32_get_bits(val, ERR_VIRT_IDX_FMASK); 1158 code = u32_get_bits(val, ERR_CODE_FMASK); 1159 1160 if (type == GSI_ERR_TYPE_CHAN) 1161 gsi_isr_glob_chan_err(gsi, ee, which, code); 1162 else if (type == GSI_ERR_TYPE_EVT) 1163 gsi_isr_glob_evt_err(gsi, ee, which, code); 1164 else /* type GSI_ERR_TYPE_GLOB should be fatal */ 1165 dev_err(gsi->dev, "unexpected global error 0x%08x\n", type); 1166 } 1167 1168 /* Generic EE interrupt handler */ 1169 static void gsi_isr_gp_int1(struct gsi *gsi) 1170 { 1171 u32 result; 1172 u32 val; 1173 1174 /* This interrupt is used to handle completions of the two GENERIC 1175 * GSI commands. We use these to allocate and halt channels on 1176 * the modem's behalf due to a hardware quirk on IPA v4.2. Once 1177 * allocated, the modem "owns" these channels, and as a result we 1178 * have no way of knowing the channel's state at any given time. 1179 * 1180 * It is recommended that we halt the modem channels we allocated 1181 * when shutting down, but it's possible the channel isn't running 1182 * at the time we issue the HALT command. We'll get an error in 1183 * that case, but it's harmless (the channel is already halted). 1184 * 1185 * For this reason, we silently ignore a CHANNEL_NOT_RUNNING error 1186 * if we receive it. 1187 */ 1188 val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET); 1189 result = u32_get_bits(val, GENERIC_EE_RESULT_FMASK); 1190 1191 switch (result) { 1192 case GENERIC_EE_SUCCESS: 1193 case GENERIC_EE_CHANNEL_NOT_RUNNING: 1194 gsi->result = 0; 1195 break; 1196 1197 case GENERIC_EE_RETRY: 1198 gsi->result = -EAGAIN; 1199 break; 1200 1201 default: 1202 dev_err(gsi->dev, "global INT1 generic result %u\n", result); 1203 gsi->result = -EIO; 1204 break; 1205 } 1206 1207 complete(&gsi->completion); 1208 } 1209 1210 /* Inter-EE interrupt handler */ 1211 static void gsi_isr_glob_ee(struct gsi *gsi) 1212 { 1213 u32 val; 1214 1215 val = ioread32(gsi->virt + GSI_CNTXT_GLOB_IRQ_STTS_OFFSET); 1216 1217 if (val & BIT(ERROR_INT)) 1218 gsi_isr_glob_err(gsi); 1219 1220 iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_CLR_OFFSET); 1221 1222 val &= ~BIT(ERROR_INT); 1223 1224 if (val & BIT(GP_INT1)) { 1225 val ^= BIT(GP_INT1); 1226 gsi_isr_gp_int1(gsi); 1227 } 1228 1229 if (val) 1230 dev_err(gsi->dev, "unexpected global interrupt 0x%08x\n", val); 1231 } 1232 1233 /* I/O completion interrupt event */ 1234 static void gsi_isr_ieob(struct gsi *gsi) 1235 { 1236 u32 event_mask; 1237 1238 event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_OFFSET); 1239 gsi_irq_ieob_disable(gsi, event_mask); 1240 iowrite32(event_mask, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_CLR_OFFSET); 1241 1242 while (event_mask) { 1243 u32 evt_ring_id = __ffs(event_mask); 1244 1245 event_mask ^= BIT(evt_ring_id); 1246 1247 napi_schedule(&gsi->evt_ring[evt_ring_id].channel->napi); 1248 } 1249 } 1250 1251 /* General event interrupts represent serious problems, so report them */ 1252 static void gsi_isr_general(struct gsi *gsi) 1253 { 1254 struct device *dev = gsi->dev; 1255 u32 val; 1256 1257 val = ioread32(gsi->virt + GSI_CNTXT_GSI_IRQ_STTS_OFFSET); 1258 iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_CLR_OFFSET); 1259 1260 dev_err(dev, "unexpected general interrupt 0x%08x\n", val); 1261 } 1262 1263 /** 1264 * gsi_isr() - Top level GSI interrupt service routine 1265 * @irq: Interrupt number (ignored) 1266 * @dev_id: GSI pointer supplied to request_irq() 1267 * 1268 * This is the main handler function registered for the GSI IRQ. Each type 1269 * of interrupt has a separate handler function that is called from here. 1270 */ 1271 static irqreturn_t gsi_isr(int irq, void *dev_id) 1272 { 1273 struct gsi *gsi = dev_id; 1274 u32 intr_mask; 1275 u32 cnt = 0; 1276 1277 /* enum gsi_irq_type_id defines GSI interrupt types */ 1278 while ((intr_mask = ioread32(gsi->virt + GSI_CNTXT_TYPE_IRQ_OFFSET))) { 1279 /* intr_mask contains bitmask of pending GSI interrupts */ 1280 do { 1281 u32 gsi_intr = BIT(__ffs(intr_mask)); 1282 1283 intr_mask ^= gsi_intr; 1284 1285 switch (gsi_intr) { 1286 case BIT(GSI_CH_CTRL): 1287 gsi_isr_chan_ctrl(gsi); 1288 break; 1289 case BIT(GSI_EV_CTRL): 1290 gsi_isr_evt_ctrl(gsi); 1291 break; 1292 case BIT(GSI_GLOB_EE): 1293 gsi_isr_glob_ee(gsi); 1294 break; 1295 case BIT(GSI_IEOB): 1296 gsi_isr_ieob(gsi); 1297 break; 1298 case BIT(GSI_GENERAL): 1299 gsi_isr_general(gsi); 1300 break; 1301 default: 1302 dev_err(gsi->dev, 1303 "unrecognized interrupt type 0x%08x\n", 1304 gsi_intr); 1305 break; 1306 } 1307 } while (intr_mask); 1308 1309 if (++cnt > GSI_ISR_MAX_ITER) { 1310 dev_err(gsi->dev, "interrupt flood\n"); 1311 break; 1312 } 1313 } 1314 1315 return IRQ_HANDLED; 1316 } 1317 1318 /* Init function for GSI IRQ lookup; there is no gsi_irq_exit() */ 1319 static int gsi_irq_init(struct gsi *gsi, struct platform_device *pdev) 1320 { 1321 int ret; 1322 1323 ret = platform_get_irq_byname(pdev, "gsi"); 1324 if (ret <= 0) 1325 return ret ? : -EINVAL; 1326 1327 gsi->irq = ret; 1328 1329 return 0; 1330 } 1331 1332 /* Return the transaction associated with a transfer completion event */ 1333 static struct gsi_trans *gsi_event_trans(struct gsi_channel *channel, 1334 struct gsi_event *event) 1335 { 1336 u32 tre_offset; 1337 u32 tre_index; 1338 1339 /* Event xfer_ptr records the TRE it's associated with */ 1340 tre_offset = lower_32_bits(le64_to_cpu(event->xfer_ptr)); 1341 tre_index = gsi_ring_index(&channel->tre_ring, tre_offset); 1342 1343 return gsi_channel_trans_mapped(channel, tre_index); 1344 } 1345 1346 /** 1347 * gsi_evt_ring_rx_update() - Record lengths of received data 1348 * @evt_ring: Event ring associated with channel that received packets 1349 * @index: Event index in ring reported by hardware 1350 * 1351 * Events for RX channels contain the actual number of bytes received into 1352 * the buffer. Every event has a transaction associated with it, and here 1353 * we update transactions to record their actual received lengths. 1354 * 1355 * This function is called whenever we learn that the GSI hardware has filled 1356 * new events since the last time we checked. The ring's index field tells 1357 * the first entry in need of processing. The index provided is the 1358 * first *unfilled* event in the ring (following the last filled one). 1359 * 1360 * Events are sequential within the event ring, and transactions are 1361 * sequential within the transaction pool. 1362 * 1363 * Note that @index always refers to an element *within* the event ring. 1364 */ 1365 static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index) 1366 { 1367 struct gsi_channel *channel = evt_ring->channel; 1368 struct gsi_ring *ring = &evt_ring->ring; 1369 struct gsi_trans_info *trans_info; 1370 struct gsi_event *event_done; 1371 struct gsi_event *event; 1372 struct gsi_trans *trans; 1373 u32 byte_count = 0; 1374 u32 old_index; 1375 u32 event_avail; 1376 1377 trans_info = &channel->trans_info; 1378 1379 /* We'll start with the oldest un-processed event. RX channels 1380 * replenish receive buffers in single-TRE transactions, so we 1381 * can just map that event to its transaction. Transactions 1382 * associated with completion events are consecutive. 1383 */ 1384 old_index = ring->index; 1385 event = gsi_ring_virt(ring, old_index); 1386 trans = gsi_event_trans(channel, event); 1387 1388 /* Compute the number of events to process before we wrap, 1389 * and determine when we'll be done processing events. 1390 */ 1391 event_avail = ring->count - old_index % ring->count; 1392 event_done = gsi_ring_virt(ring, index); 1393 do { 1394 trans->len = __le16_to_cpu(event->len); 1395 byte_count += trans->len; 1396 1397 /* Move on to the next event and transaction */ 1398 if (--event_avail) 1399 event++; 1400 else 1401 event = gsi_ring_virt(ring, 0); 1402 trans = gsi_trans_pool_next(&trans_info->pool, trans); 1403 } while (event != event_done); 1404 1405 /* We record RX bytes when they are received */ 1406 channel->byte_count += byte_count; 1407 channel->trans_count++; 1408 } 1409 1410 /* Initialize a ring, including allocating DMA memory for its entries */ 1411 static int gsi_ring_alloc(struct gsi *gsi, struct gsi_ring *ring, u32 count) 1412 { 1413 u32 size = count * GSI_RING_ELEMENT_SIZE; 1414 struct device *dev = gsi->dev; 1415 dma_addr_t addr; 1416 1417 /* Hardware requires a 2^n ring size, with alignment equal to size. 1418 * The DMA address returned by dma_alloc_coherent() is guaranteed to 1419 * be a power-of-2 number of pages, which satisfies the requirement. 1420 */ 1421 ring->virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL); 1422 if (!ring->virt) 1423 return -ENOMEM; 1424 1425 ring->addr = addr; 1426 ring->count = count; 1427 1428 return 0; 1429 } 1430 1431 /* Free a previously-allocated ring */ 1432 static void gsi_ring_free(struct gsi *gsi, struct gsi_ring *ring) 1433 { 1434 size_t size = ring->count * GSI_RING_ELEMENT_SIZE; 1435 1436 dma_free_coherent(gsi->dev, size, ring->virt, ring->addr); 1437 } 1438 1439 /* Allocate an available event ring id */ 1440 static int gsi_evt_ring_id_alloc(struct gsi *gsi) 1441 { 1442 u32 evt_ring_id; 1443 1444 if (gsi->event_bitmap == ~0U) { 1445 dev_err(gsi->dev, "event rings exhausted\n"); 1446 return -ENOSPC; 1447 } 1448 1449 evt_ring_id = ffz(gsi->event_bitmap); 1450 gsi->event_bitmap |= BIT(evt_ring_id); 1451 1452 return (int)evt_ring_id; 1453 } 1454 1455 /* Free a previously-allocated event ring id */ 1456 static void gsi_evt_ring_id_free(struct gsi *gsi, u32 evt_ring_id) 1457 { 1458 gsi->event_bitmap &= ~BIT(evt_ring_id); 1459 } 1460 1461 /* Ring a channel doorbell, reporting the first un-filled entry */ 1462 void gsi_channel_doorbell(struct gsi_channel *channel) 1463 { 1464 struct gsi_ring *tre_ring = &channel->tre_ring; 1465 u32 channel_id = gsi_channel_id(channel); 1466 struct gsi *gsi = channel->gsi; 1467 u32 val; 1468 1469 /* Note: index *must* be used modulo the ring count here */ 1470 val = gsi_ring_addr(tre_ring, tre_ring->index % tre_ring->count); 1471 iowrite32(val, gsi->virt + GSI_CH_C_DOORBELL_0_OFFSET(channel_id)); 1472 } 1473 1474 /* Consult hardware, move any newly completed transactions to completed list */ 1475 static struct gsi_trans *gsi_channel_update(struct gsi_channel *channel) 1476 { 1477 u32 evt_ring_id = channel->evt_ring_id; 1478 struct gsi *gsi = channel->gsi; 1479 struct gsi_evt_ring *evt_ring; 1480 struct gsi_trans *trans; 1481 struct gsi_ring *ring; 1482 u32 offset; 1483 u32 index; 1484 1485 evt_ring = &gsi->evt_ring[evt_ring_id]; 1486 ring = &evt_ring->ring; 1487 1488 /* See if there's anything new to process; if not, we're done. Note 1489 * that index always refers to an entry *within* the event ring. 1490 */ 1491 offset = GSI_EV_CH_E_CNTXT_4_OFFSET(evt_ring_id); 1492 index = gsi_ring_index(ring, ioread32(gsi->virt + offset)); 1493 if (index == ring->index % ring->count) 1494 return NULL; 1495 1496 /* Get the transaction for the latest completed event. Take a 1497 * reference to keep it from completing before we give the events 1498 * for this and previous transactions back to the hardware. 1499 */ 1500 trans = gsi_event_trans(channel, gsi_ring_virt(ring, index - 1)); 1501 refcount_inc(&trans->refcount); 1502 1503 /* For RX channels, update each completed transaction with the number 1504 * of bytes that were actually received. For TX channels, report 1505 * the number of transactions and bytes this completion represents 1506 * up the network stack. 1507 */ 1508 if (channel->toward_ipa) 1509 gsi_channel_tx_update(channel, trans); 1510 else 1511 gsi_evt_ring_rx_update(evt_ring, index); 1512 1513 gsi_trans_move_complete(trans); 1514 1515 /* Tell the hardware we've handled these events */ 1516 gsi_evt_ring_doorbell(channel->gsi, channel->evt_ring_id, index); 1517 1518 gsi_trans_free(trans); 1519 1520 return gsi_channel_trans_complete(channel); 1521 } 1522 1523 /** 1524 * gsi_channel_poll_one() - Return a single completed transaction on a channel 1525 * @channel: Channel to be polled 1526 * 1527 * Return: Transaction pointer, or null if none are available 1528 * 1529 * This function returns the first entry on a channel's completed transaction 1530 * list. If that list is empty, the hardware is consulted to determine 1531 * whether any new transactions have completed. If so, they're moved to the 1532 * completed list and the new first entry is returned. If there are no more 1533 * completed transactions, a null pointer is returned. 1534 */ 1535 static struct gsi_trans *gsi_channel_poll_one(struct gsi_channel *channel) 1536 { 1537 struct gsi_trans *trans; 1538 1539 /* Get the first transaction from the completed list */ 1540 trans = gsi_channel_trans_complete(channel); 1541 if (!trans) /* List is empty; see if there's more to do */ 1542 trans = gsi_channel_update(channel); 1543 1544 if (trans) 1545 gsi_trans_move_polled(trans); 1546 1547 return trans; 1548 } 1549 1550 /** 1551 * gsi_channel_poll() - NAPI poll function for a channel 1552 * @napi: NAPI structure for the channel 1553 * @budget: Budget supplied by NAPI core 1554 * 1555 * Return: Number of items polled (<= budget) 1556 * 1557 * Single transactions completed by hardware are polled until either 1558 * the budget is exhausted, or there are no more. Each transaction 1559 * polled is passed to gsi_trans_complete(), to perform remaining 1560 * completion processing and retire/free the transaction. 1561 */ 1562 static int gsi_channel_poll(struct napi_struct *napi, int budget) 1563 { 1564 struct gsi_channel *channel; 1565 int count; 1566 1567 channel = container_of(napi, struct gsi_channel, napi); 1568 for (count = 0; count < budget; count++) { 1569 struct gsi_trans *trans; 1570 1571 trans = gsi_channel_poll_one(channel); 1572 if (!trans) 1573 break; 1574 gsi_trans_complete(trans); 1575 } 1576 1577 if (count < budget && napi_complete(napi)) 1578 gsi_irq_ieob_enable_one(channel->gsi, channel->evt_ring_id); 1579 1580 return count; 1581 } 1582 1583 /* The event bitmap represents which event ids are available for allocation. 1584 * Set bits are not available, clear bits can be used. This function 1585 * initializes the map so all events supported by the hardware are available, 1586 * then precludes any reserved events from being allocated. 1587 */ 1588 static u32 gsi_event_bitmap_init(u32 evt_ring_max) 1589 { 1590 u32 event_bitmap = GENMASK(BITS_PER_LONG - 1, evt_ring_max); 1591 1592 event_bitmap |= GENMASK(GSI_MHI_EVENT_ID_END, GSI_MHI_EVENT_ID_START); 1593 1594 return event_bitmap; 1595 } 1596 1597 /* Setup function for a single channel */ 1598 static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id) 1599 { 1600 struct gsi_channel *channel = &gsi->channel[channel_id]; 1601 u32 evt_ring_id = channel->evt_ring_id; 1602 int ret; 1603 1604 if (!gsi_channel_initialized(channel)) 1605 return 0; 1606 1607 ret = gsi_evt_ring_alloc_command(gsi, evt_ring_id); 1608 if (ret) 1609 return ret; 1610 1611 gsi_evt_ring_program(gsi, evt_ring_id); 1612 1613 ret = gsi_channel_alloc_command(gsi, channel_id); 1614 if (ret) 1615 goto err_evt_ring_de_alloc; 1616 1617 gsi_channel_program(channel, true); 1618 1619 if (channel->toward_ipa) 1620 netif_tx_napi_add(&gsi->dummy_dev, &channel->napi, 1621 gsi_channel_poll, NAPI_POLL_WEIGHT); 1622 else 1623 netif_napi_add(&gsi->dummy_dev, &channel->napi, 1624 gsi_channel_poll, NAPI_POLL_WEIGHT); 1625 1626 return 0; 1627 1628 err_evt_ring_de_alloc: 1629 /* We've done nothing with the event ring yet so don't reset */ 1630 gsi_evt_ring_de_alloc_command(gsi, evt_ring_id); 1631 1632 return ret; 1633 } 1634 1635 /* Inverse of gsi_channel_setup_one() */ 1636 static void gsi_channel_teardown_one(struct gsi *gsi, u32 channel_id) 1637 { 1638 struct gsi_channel *channel = &gsi->channel[channel_id]; 1639 u32 evt_ring_id = channel->evt_ring_id; 1640 1641 if (!gsi_channel_initialized(channel)) 1642 return; 1643 1644 netif_napi_del(&channel->napi); 1645 1646 gsi_channel_de_alloc_command(gsi, channel_id); 1647 gsi_evt_ring_reset_command(gsi, evt_ring_id); 1648 gsi_evt_ring_de_alloc_command(gsi, evt_ring_id); 1649 } 1650 1651 static int gsi_generic_command(struct gsi *gsi, u32 channel_id, 1652 enum gsi_generic_cmd_opcode opcode) 1653 { 1654 struct completion *completion = &gsi->completion; 1655 bool timeout; 1656 u32 val; 1657 1658 /* The error global interrupt type is always enabled (until we 1659 * teardown), so we won't change that. A generic EE command 1660 * completes with a GSI global interrupt of type GP_INT1. We 1661 * only perform one generic command at a time (to allocate or 1662 * halt a modem channel) and only from this function. So we 1663 * enable the GP_INT1 IRQ type here while we're expecting it. 1664 */ 1665 val = BIT(ERROR_INT) | BIT(GP_INT1); 1666 iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET); 1667 1668 /* First zero the result code field */ 1669 val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET); 1670 val &= ~GENERIC_EE_RESULT_FMASK; 1671 iowrite32(val, gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET); 1672 1673 /* Now issue the command */ 1674 val = u32_encode_bits(opcode, GENERIC_OPCODE_FMASK); 1675 val |= u32_encode_bits(channel_id, GENERIC_CHID_FMASK); 1676 val |= u32_encode_bits(GSI_EE_MODEM, GENERIC_EE_FMASK); 1677 1678 timeout = !gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val, completion); 1679 1680 /* Disable the GP_INT1 IRQ type again */ 1681 iowrite32(BIT(ERROR_INT), gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET); 1682 1683 if (!timeout) 1684 return gsi->result; 1685 1686 dev_err(gsi->dev, "GSI generic command %u to channel %u timed out\n", 1687 opcode, channel_id); 1688 1689 return -ETIMEDOUT; 1690 } 1691 1692 static int gsi_modem_channel_alloc(struct gsi *gsi, u32 channel_id) 1693 { 1694 return gsi_generic_command(gsi, channel_id, 1695 GSI_GENERIC_ALLOCATE_CHANNEL); 1696 } 1697 1698 static void gsi_modem_channel_halt(struct gsi *gsi, u32 channel_id) 1699 { 1700 u32 retries = GSI_CHANNEL_MODEM_HALT_RETRIES; 1701 int ret; 1702 1703 do 1704 ret = gsi_generic_command(gsi, channel_id, 1705 GSI_GENERIC_HALT_CHANNEL); 1706 while (ret == -EAGAIN && retries--); 1707 1708 if (ret) 1709 dev_err(gsi->dev, "error %d halting modem channel %u\n", 1710 ret, channel_id); 1711 } 1712 1713 /* Setup function for channels */ 1714 static int gsi_channel_setup(struct gsi *gsi) 1715 { 1716 u32 channel_id = 0; 1717 u32 mask; 1718 int ret; 1719 1720 gsi_irq_enable(gsi); 1721 1722 mutex_lock(&gsi->mutex); 1723 1724 do { 1725 ret = gsi_channel_setup_one(gsi, channel_id); 1726 if (ret) 1727 goto err_unwind; 1728 } while (++channel_id < gsi->channel_count); 1729 1730 /* Make sure no channels were defined that hardware does not support */ 1731 while (channel_id < GSI_CHANNEL_COUNT_MAX) { 1732 struct gsi_channel *channel = &gsi->channel[channel_id++]; 1733 1734 if (!gsi_channel_initialized(channel)) 1735 continue; 1736 1737 ret = -EINVAL; 1738 dev_err(gsi->dev, "channel %u not supported by hardware\n", 1739 channel_id - 1); 1740 channel_id = gsi->channel_count; 1741 goto err_unwind; 1742 } 1743 1744 /* Allocate modem channels if necessary */ 1745 mask = gsi->modem_channel_bitmap; 1746 while (mask) { 1747 u32 modem_channel_id = __ffs(mask); 1748 1749 ret = gsi_modem_channel_alloc(gsi, modem_channel_id); 1750 if (ret) 1751 goto err_unwind_modem; 1752 1753 /* Clear bit from mask only after success (for unwind) */ 1754 mask ^= BIT(modem_channel_id); 1755 } 1756 1757 mutex_unlock(&gsi->mutex); 1758 1759 return 0; 1760 1761 err_unwind_modem: 1762 /* Compute which modem channels need to be deallocated */ 1763 mask ^= gsi->modem_channel_bitmap; 1764 while (mask) { 1765 channel_id = __fls(mask); 1766 1767 mask ^= BIT(channel_id); 1768 1769 gsi_modem_channel_halt(gsi, channel_id); 1770 } 1771 1772 err_unwind: 1773 while (channel_id--) 1774 gsi_channel_teardown_one(gsi, channel_id); 1775 1776 mutex_unlock(&gsi->mutex); 1777 1778 gsi_irq_disable(gsi); 1779 1780 return ret; 1781 } 1782 1783 /* Inverse of gsi_channel_setup() */ 1784 static void gsi_channel_teardown(struct gsi *gsi) 1785 { 1786 u32 mask = gsi->modem_channel_bitmap; 1787 u32 channel_id; 1788 1789 mutex_lock(&gsi->mutex); 1790 1791 while (mask) { 1792 channel_id = __fls(mask); 1793 1794 mask ^= BIT(channel_id); 1795 1796 gsi_modem_channel_halt(gsi, channel_id); 1797 } 1798 1799 channel_id = gsi->channel_count - 1; 1800 do 1801 gsi_channel_teardown_one(gsi, channel_id); 1802 while (channel_id--); 1803 1804 mutex_unlock(&gsi->mutex); 1805 1806 gsi_irq_disable(gsi); 1807 } 1808 1809 /* Turn off all GSI interrupts initially */ 1810 static int gsi_irq_setup(struct gsi *gsi) 1811 { 1812 int ret; 1813 1814 /* Writing 1 indicates IRQ interrupts; 0 would be MSI */ 1815 iowrite32(1, gsi->virt + GSI_CNTXT_INTSET_OFFSET); 1816 1817 /* Disable all interrupt types */ 1818 gsi_irq_type_update(gsi, 0); 1819 1820 /* Clear all type-specific interrupt masks */ 1821 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET); 1822 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET); 1823 iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET); 1824 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET); 1825 1826 /* The inter-EE interrupts are not supported for IPA v3.0-v3.1 */ 1827 if (gsi->version > IPA_VERSION_3_1) { 1828 u32 offset; 1829 1830 /* These registers are in the non-adjusted address range */ 1831 offset = GSI_INTER_EE_SRC_CH_IRQ_MSK_OFFSET; 1832 iowrite32(0, gsi->virt_raw + offset); 1833 offset = GSI_INTER_EE_SRC_EV_CH_IRQ_MSK_OFFSET; 1834 iowrite32(0, gsi->virt_raw + offset); 1835 } 1836 1837 iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET); 1838 1839 ret = request_irq(gsi->irq, gsi_isr, 0, "gsi", gsi); 1840 if (ret) 1841 dev_err(gsi->dev, "error %d requesting \"gsi\" IRQ\n", ret); 1842 1843 return ret; 1844 } 1845 1846 static void gsi_irq_teardown(struct gsi *gsi) 1847 { 1848 free_irq(gsi->irq, gsi); 1849 } 1850 1851 /* Get # supported channel and event rings; there is no gsi_ring_teardown() */ 1852 static int gsi_ring_setup(struct gsi *gsi) 1853 { 1854 struct device *dev = gsi->dev; 1855 u32 count; 1856 u32 val; 1857 1858 if (gsi->version < IPA_VERSION_3_5_1) { 1859 /* No HW_PARAM_2 register prior to IPA v3.5.1, assume the max */ 1860 gsi->channel_count = GSI_CHANNEL_COUNT_MAX; 1861 gsi->evt_ring_count = GSI_EVT_RING_COUNT_MAX; 1862 1863 return 0; 1864 } 1865 1866 val = ioread32(gsi->virt + GSI_GSI_HW_PARAM_2_OFFSET); 1867 1868 count = u32_get_bits(val, NUM_CH_PER_EE_FMASK); 1869 if (!count) { 1870 dev_err(dev, "GSI reports zero channels supported\n"); 1871 return -EINVAL; 1872 } 1873 if (count > GSI_CHANNEL_COUNT_MAX) { 1874 dev_warn(dev, "limiting to %u channels; hardware supports %u\n", 1875 GSI_CHANNEL_COUNT_MAX, count); 1876 count = GSI_CHANNEL_COUNT_MAX; 1877 } 1878 gsi->channel_count = count; 1879 1880 count = u32_get_bits(val, NUM_EV_PER_EE_FMASK); 1881 if (!count) { 1882 dev_err(dev, "GSI reports zero event rings supported\n"); 1883 return -EINVAL; 1884 } 1885 if (count > GSI_EVT_RING_COUNT_MAX) { 1886 dev_warn(dev, 1887 "limiting to %u event rings; hardware supports %u\n", 1888 GSI_EVT_RING_COUNT_MAX, count); 1889 count = GSI_EVT_RING_COUNT_MAX; 1890 } 1891 gsi->evt_ring_count = count; 1892 1893 return 0; 1894 } 1895 1896 /* Setup function for GSI. GSI firmware must be loaded and initialized */ 1897 int gsi_setup(struct gsi *gsi) 1898 { 1899 u32 val; 1900 int ret; 1901 1902 /* Here is where we first touch the GSI hardware */ 1903 val = ioread32(gsi->virt + GSI_GSI_STATUS_OFFSET); 1904 if (!(val & ENABLED_FMASK)) { 1905 dev_err(gsi->dev, "GSI has not been enabled\n"); 1906 return -EIO; 1907 } 1908 1909 ret = gsi_irq_setup(gsi); 1910 if (ret) 1911 return ret; 1912 1913 ret = gsi_ring_setup(gsi); /* No matching teardown required */ 1914 if (ret) 1915 goto err_irq_teardown; 1916 1917 /* Initialize the error log */ 1918 iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET); 1919 1920 ret = gsi_channel_setup(gsi); 1921 if (ret) 1922 goto err_irq_teardown; 1923 1924 return 0; 1925 1926 err_irq_teardown: 1927 gsi_irq_teardown(gsi); 1928 1929 return ret; 1930 } 1931 1932 /* Inverse of gsi_setup() */ 1933 void gsi_teardown(struct gsi *gsi) 1934 { 1935 gsi_channel_teardown(gsi); 1936 gsi_irq_teardown(gsi); 1937 } 1938 1939 /* Initialize a channel's event ring */ 1940 static int gsi_channel_evt_ring_init(struct gsi_channel *channel) 1941 { 1942 struct gsi *gsi = channel->gsi; 1943 struct gsi_evt_ring *evt_ring; 1944 int ret; 1945 1946 ret = gsi_evt_ring_id_alloc(gsi); 1947 if (ret < 0) 1948 return ret; 1949 channel->evt_ring_id = ret; 1950 1951 evt_ring = &gsi->evt_ring[channel->evt_ring_id]; 1952 evt_ring->channel = channel; 1953 1954 ret = gsi_ring_alloc(gsi, &evt_ring->ring, channel->event_count); 1955 if (!ret) 1956 return 0; /* Success! */ 1957 1958 dev_err(gsi->dev, "error %d allocating channel %u event ring\n", 1959 ret, gsi_channel_id(channel)); 1960 1961 gsi_evt_ring_id_free(gsi, channel->evt_ring_id); 1962 1963 return ret; 1964 } 1965 1966 /* Inverse of gsi_channel_evt_ring_init() */ 1967 static void gsi_channel_evt_ring_exit(struct gsi_channel *channel) 1968 { 1969 u32 evt_ring_id = channel->evt_ring_id; 1970 struct gsi *gsi = channel->gsi; 1971 struct gsi_evt_ring *evt_ring; 1972 1973 evt_ring = &gsi->evt_ring[evt_ring_id]; 1974 gsi_ring_free(gsi, &evt_ring->ring); 1975 gsi_evt_ring_id_free(gsi, evt_ring_id); 1976 } 1977 1978 /* Init function for event rings; there is no gsi_evt_ring_exit() */ 1979 static void gsi_evt_ring_init(struct gsi *gsi) 1980 { 1981 u32 evt_ring_id = 0; 1982 1983 gsi->event_bitmap = gsi_event_bitmap_init(GSI_EVT_RING_COUNT_MAX); 1984 gsi->ieob_enabled_bitmap = 0; 1985 do 1986 init_completion(&gsi->evt_ring[evt_ring_id].completion); 1987 while (++evt_ring_id < GSI_EVT_RING_COUNT_MAX); 1988 } 1989 1990 static bool gsi_channel_data_valid(struct gsi *gsi, 1991 const struct ipa_gsi_endpoint_data *data) 1992 { 1993 u32 channel_id = data->channel_id; 1994 struct device *dev = gsi->dev; 1995 1996 /* Make sure channel ids are in the range driver supports */ 1997 if (channel_id >= GSI_CHANNEL_COUNT_MAX) { 1998 dev_err(dev, "bad channel id %u; must be less than %u\n", 1999 channel_id, GSI_CHANNEL_COUNT_MAX); 2000 return false; 2001 } 2002 2003 if (data->ee_id != GSI_EE_AP && data->ee_id != GSI_EE_MODEM) { 2004 dev_err(dev, "bad EE id %u; not AP or modem\n", data->ee_id); 2005 return false; 2006 } 2007 2008 if (!data->channel.tlv_count || 2009 data->channel.tlv_count > GSI_TLV_MAX) { 2010 dev_err(dev, "channel %u bad tlv_count %u; must be 1..%u\n", 2011 channel_id, data->channel.tlv_count, GSI_TLV_MAX); 2012 return false; 2013 } 2014 2015 /* We have to allow at least one maximally-sized transaction to 2016 * be outstanding (which would use tlv_count TREs). Given how 2017 * gsi_channel_tre_max() is computed, tre_count has to be almost 2018 * twice the TLV FIFO size to satisfy this requirement. 2019 */ 2020 if (data->channel.tre_count < 2 * data->channel.tlv_count - 1) { 2021 dev_err(dev, "channel %u TLV count %u exceeds TRE count %u\n", 2022 channel_id, data->channel.tlv_count, 2023 data->channel.tre_count); 2024 return false; 2025 } 2026 2027 if (!is_power_of_2(data->channel.tre_count)) { 2028 dev_err(dev, "channel %u bad tre_count %u; not power of 2\n", 2029 channel_id, data->channel.tre_count); 2030 return false; 2031 } 2032 2033 if (!is_power_of_2(data->channel.event_count)) { 2034 dev_err(dev, "channel %u bad event_count %u; not power of 2\n", 2035 channel_id, data->channel.event_count); 2036 return false; 2037 } 2038 2039 return true; 2040 } 2041 2042 /* Init function for a single channel */ 2043 static int gsi_channel_init_one(struct gsi *gsi, 2044 const struct ipa_gsi_endpoint_data *data, 2045 bool command) 2046 { 2047 struct gsi_channel *channel; 2048 u32 tre_count; 2049 int ret; 2050 2051 if (!gsi_channel_data_valid(gsi, data)) 2052 return -EINVAL; 2053 2054 /* Worst case we need an event for every outstanding TRE */ 2055 if (data->channel.tre_count > data->channel.event_count) { 2056 tre_count = data->channel.event_count; 2057 dev_warn(gsi->dev, "channel %u limited to %u TREs\n", 2058 data->channel_id, tre_count); 2059 } else { 2060 tre_count = data->channel.tre_count; 2061 } 2062 2063 channel = &gsi->channel[data->channel_id]; 2064 memset(channel, 0, sizeof(*channel)); 2065 2066 channel->gsi = gsi; 2067 channel->toward_ipa = data->toward_ipa; 2068 channel->command = command; 2069 channel->tlv_count = data->channel.tlv_count; 2070 channel->tre_count = tre_count; 2071 channel->event_count = data->channel.event_count; 2072 init_completion(&channel->completion); 2073 2074 ret = gsi_channel_evt_ring_init(channel); 2075 if (ret) 2076 goto err_clear_gsi; 2077 2078 ret = gsi_ring_alloc(gsi, &channel->tre_ring, data->channel.tre_count); 2079 if (ret) { 2080 dev_err(gsi->dev, "error %d allocating channel %u ring\n", 2081 ret, data->channel_id); 2082 goto err_channel_evt_ring_exit; 2083 } 2084 2085 ret = gsi_channel_trans_init(gsi, data->channel_id); 2086 if (ret) 2087 goto err_ring_free; 2088 2089 if (command) { 2090 u32 tre_max = gsi_channel_tre_max(gsi, data->channel_id); 2091 2092 ret = ipa_cmd_pool_init(channel, tre_max); 2093 } 2094 if (!ret) 2095 return 0; /* Success! */ 2096 2097 gsi_channel_trans_exit(channel); 2098 err_ring_free: 2099 gsi_ring_free(gsi, &channel->tre_ring); 2100 err_channel_evt_ring_exit: 2101 gsi_channel_evt_ring_exit(channel); 2102 err_clear_gsi: 2103 channel->gsi = NULL; /* Mark it not (fully) initialized */ 2104 2105 return ret; 2106 } 2107 2108 /* Inverse of gsi_channel_init_one() */ 2109 static void gsi_channel_exit_one(struct gsi_channel *channel) 2110 { 2111 if (!gsi_channel_initialized(channel)) 2112 return; 2113 2114 if (channel->command) 2115 ipa_cmd_pool_exit(channel); 2116 gsi_channel_trans_exit(channel); 2117 gsi_ring_free(channel->gsi, &channel->tre_ring); 2118 gsi_channel_evt_ring_exit(channel); 2119 } 2120 2121 /* Init function for channels */ 2122 static int gsi_channel_init(struct gsi *gsi, u32 count, 2123 const struct ipa_gsi_endpoint_data *data) 2124 { 2125 bool modem_alloc; 2126 int ret = 0; 2127 u32 i; 2128 2129 /* IPA v4.2 requires the AP to allocate channels for the modem */ 2130 modem_alloc = gsi->version == IPA_VERSION_4_2; 2131 2132 gsi_evt_ring_init(gsi); /* No matching exit required */ 2133 2134 /* The endpoint data array is indexed by endpoint name */ 2135 for (i = 0; i < count; i++) { 2136 bool command = i == IPA_ENDPOINT_AP_COMMAND_TX; 2137 2138 if (ipa_gsi_endpoint_data_empty(&data[i])) 2139 continue; /* Skip over empty slots */ 2140 2141 /* Mark modem channels to be allocated (hardware workaround) */ 2142 if (data[i].ee_id == GSI_EE_MODEM) { 2143 if (modem_alloc) 2144 gsi->modem_channel_bitmap |= 2145 BIT(data[i].channel_id); 2146 continue; 2147 } 2148 2149 ret = gsi_channel_init_one(gsi, &data[i], command); 2150 if (ret) 2151 goto err_unwind; 2152 } 2153 2154 return ret; 2155 2156 err_unwind: 2157 while (i--) { 2158 if (ipa_gsi_endpoint_data_empty(&data[i])) 2159 continue; 2160 if (modem_alloc && data[i].ee_id == GSI_EE_MODEM) { 2161 gsi->modem_channel_bitmap &= ~BIT(data[i].channel_id); 2162 continue; 2163 } 2164 gsi_channel_exit_one(&gsi->channel[data->channel_id]); 2165 } 2166 2167 return ret; 2168 } 2169 2170 /* Inverse of gsi_channel_init() */ 2171 static void gsi_channel_exit(struct gsi *gsi) 2172 { 2173 u32 channel_id = GSI_CHANNEL_COUNT_MAX - 1; 2174 2175 do 2176 gsi_channel_exit_one(&gsi->channel[channel_id]); 2177 while (channel_id--); 2178 gsi->modem_channel_bitmap = 0; 2179 } 2180 2181 /* Init function for GSI. GSI hardware does not need to be "ready" */ 2182 int gsi_init(struct gsi *gsi, struct platform_device *pdev, 2183 enum ipa_version version, u32 count, 2184 const struct ipa_gsi_endpoint_data *data) 2185 { 2186 struct device *dev = &pdev->dev; 2187 struct resource *res; 2188 resource_size_t size; 2189 u32 adjust; 2190 int ret; 2191 2192 gsi_validate_build(); 2193 2194 gsi->dev = dev; 2195 gsi->version = version; 2196 2197 /* GSI uses NAPI on all channels. Create a dummy network device 2198 * for the channel NAPI contexts to be associated with. 2199 */ 2200 init_dummy_netdev(&gsi->dummy_dev); 2201 2202 /* Get GSI memory range and map it */ 2203 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gsi"); 2204 if (!res) { 2205 dev_err(dev, "DT error getting \"gsi\" memory property\n"); 2206 return -ENODEV; 2207 } 2208 2209 size = resource_size(res); 2210 if (res->start > U32_MAX || size > U32_MAX - res->start) { 2211 dev_err(dev, "DT memory resource \"gsi\" out of range\n"); 2212 return -EINVAL; 2213 } 2214 2215 /* Make sure we can make our pointer adjustment if necessary */ 2216 adjust = gsi->version < IPA_VERSION_4_5 ? 0 : GSI_EE_REG_ADJUST; 2217 if (res->start < adjust) { 2218 dev_err(dev, "DT memory resource \"gsi\" too low (< %u)\n", 2219 adjust); 2220 return -EINVAL; 2221 } 2222 2223 gsi->virt_raw = ioremap(res->start, size); 2224 if (!gsi->virt_raw) { 2225 dev_err(dev, "unable to remap \"gsi\" memory\n"); 2226 return -ENOMEM; 2227 } 2228 /* Most registers are accessed using an adjusted register range */ 2229 gsi->virt = gsi->virt_raw - adjust; 2230 2231 init_completion(&gsi->completion); 2232 2233 ret = gsi_irq_init(gsi, pdev); /* No matching exit required */ 2234 if (ret) 2235 goto err_iounmap; 2236 2237 ret = gsi_channel_init(gsi, count, data); 2238 if (ret) 2239 goto err_iounmap; 2240 2241 mutex_init(&gsi->mutex); 2242 2243 return 0; 2244 2245 err_iounmap: 2246 iounmap(gsi->virt_raw); 2247 2248 return ret; 2249 } 2250 2251 /* Inverse of gsi_init() */ 2252 void gsi_exit(struct gsi *gsi) 2253 { 2254 mutex_destroy(&gsi->mutex); 2255 gsi_channel_exit(gsi); 2256 iounmap(gsi->virt_raw); 2257 } 2258 2259 /* The maximum number of outstanding TREs on a channel. This limits 2260 * a channel's maximum number of transactions outstanding (worst case 2261 * is one TRE per transaction). 2262 * 2263 * The absolute limit is the number of TREs in the channel's TRE ring, 2264 * and in theory we should be able use all of them. But in practice, 2265 * doing that led to the hardware reporting exhaustion of event ring 2266 * slots for writing completion information. So the hardware limit 2267 * would be (tre_count - 1). 2268 * 2269 * We reduce it a bit further though. Transaction resource pools are 2270 * sized to be a little larger than this maximum, to allow resource 2271 * allocations to always be contiguous. The number of entries in a 2272 * TRE ring buffer is a power of 2, and the extra resources in a pool 2273 * tends to nearly double the memory allocated for it. Reducing the 2274 * maximum number of outstanding TREs allows the number of entries in 2275 * a pool to avoid crossing that power-of-2 boundary, and this can 2276 * substantially reduce pool memory requirements. The number we 2277 * reduce it by matches the number added in gsi_trans_pool_init(). 2278 */ 2279 u32 gsi_channel_tre_max(struct gsi *gsi, u32 channel_id) 2280 { 2281 struct gsi_channel *channel = &gsi->channel[channel_id]; 2282 2283 /* Hardware limit is channel->tre_count - 1 */ 2284 return channel->tre_count - (channel->tlv_count - 1); 2285 } 2286 2287 /* Returns the maximum number of TREs in a single transaction for a channel */ 2288 u32 gsi_channel_trans_tre_max(struct gsi *gsi, u32 channel_id) 2289 { 2290 struct gsi_channel *channel = &gsi->channel[channel_id]; 2291 2292 return channel->tlv_count; 2293 } 2294