1 // SPDX-License-Identifier: GPL-2.0 2 3 /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. 4 * Copyright (C) 2018-2021 Linaro Ltd. 5 */ 6 7 #include <linux/types.h> 8 #include <linux/bits.h> 9 #include <linux/bitfield.h> 10 #include <linux/mutex.h> 11 #include <linux/completion.h> 12 #include <linux/io.h> 13 #include <linux/bug.h> 14 #include <linux/interrupt.h> 15 #include <linux/platform_device.h> 16 #include <linux/netdevice.h> 17 18 #include "gsi.h" 19 #include "gsi_reg.h" 20 #include "gsi_private.h" 21 #include "gsi_trans.h" 22 #include "ipa_gsi.h" 23 #include "ipa_data.h" 24 #include "ipa_version.h" 25 26 /** 27 * DOC: The IPA Generic Software Interface 28 * 29 * The generic software interface (GSI) is an integral component of the IPA, 30 * providing a well-defined communication layer between the AP subsystem 31 * and the IPA core. The modem uses the GSI layer as well. 32 * 33 * -------- --------- 34 * | | | | 35 * | AP +<---. .----+ Modem | 36 * | +--. | | .->+ | 37 * | | | | | | | | 38 * -------- | | | | --------- 39 * v | v | 40 * --+-+---+-+-- 41 * | GSI | 42 * |-----------| 43 * | | 44 * | IPA | 45 * | | 46 * ------------- 47 * 48 * In the above diagram, the AP and Modem represent "execution environments" 49 * (EEs), which are independent operating environments that use the IPA for 50 * data transfer. 51 * 52 * Each EE uses a set of unidirectional GSI "channels," which allow transfer 53 * of data to or from the IPA. A channel is implemented as a ring buffer, 54 * with a DRAM-resident array of "transfer elements" (TREs) available to 55 * describe transfers to or from other EEs through the IPA. A transfer 56 * element can also contain an immediate command, requesting the IPA perform 57 * actions other than data transfer. 58 * 59 * Each TRE refers to a block of data--also located DRAM. After writing one 60 * or more TREs to a channel, the writer (either the IPA or an EE) writes a 61 * doorbell register to inform the receiving side how many elements have 62 * been written. 63 * 64 * Each channel has a GSI "event ring" associated with it. An event ring 65 * is implemented very much like a channel ring, but is always directed from 66 * the IPA to an EE. The IPA notifies an EE (such as the AP) about channel 67 * events by adding an entry to the event ring associated with the channel. 68 * The GSI then writes its doorbell for the event ring, causing the target 69 * EE to be interrupted. Each entry in an event ring contains a pointer 70 * to the channel TRE whose completion the event represents. 71 * 72 * Each TRE in a channel ring has a set of flags. One flag indicates whether 73 * the completion of the transfer operation generates an entry (and possibly 74 * an interrupt) in the channel's event ring. Other flags allow transfer 75 * elements to be chained together, forming a single logical transaction. 76 * TRE flags are used to control whether and when interrupts are generated 77 * to signal completion of channel transfers. 78 * 79 * Elements in channel and event rings are completed (or consumed) strictly 80 * in order. Completion of one entry implies the completion of all preceding 81 * entries. A single completion interrupt can therefore communicate the 82 * completion of many transfers. 83 * 84 * Note that all GSI registers are little-endian, which is the assumed 85 * endianness of I/O space accesses. The accessor functions perform byte 86 * swapping if needed (i.e., for a big endian CPU). 87 */ 88 89 /* Delay period for interrupt moderation (in 32KHz IPA internal timer ticks) */ 90 #define GSI_EVT_RING_INT_MODT (32 * 1) /* 1ms under 32KHz clock */ 91 92 #define GSI_CMD_TIMEOUT 50 /* milliseconds */ 93 94 #define GSI_CHANNEL_STOP_RETRIES 10 95 #define GSI_CHANNEL_MODEM_HALT_RETRIES 10 96 97 #define GSI_MHI_EVENT_ID_START 10 /* 1st reserved event id */ 98 #define GSI_MHI_EVENT_ID_END 16 /* Last reserved event id */ 99 100 #define GSI_ISR_MAX_ITER 50 /* Detect interrupt storms */ 101 102 /* An entry in an event ring */ 103 struct gsi_event { 104 __le64 xfer_ptr; 105 __le16 len; 106 u8 reserved1; 107 u8 code; 108 __le16 reserved2; 109 u8 type; 110 u8 chid; 111 }; 112 113 /** gsi_channel_scratch_gpi - GPI protocol scratch register 114 * @max_outstanding_tre: 115 * Defines the maximum number of TREs allowed in a single transaction 116 * on a channel (in bytes). This determines the amount of prefetch 117 * performed by the hardware. We configure this to equal the size of 118 * the TLV FIFO for the channel. 119 * @outstanding_threshold: 120 * Defines the threshold (in bytes) determining when the sequencer 121 * should update the channel doorbell. We configure this to equal 122 * the size of two TREs. 123 */ 124 struct gsi_channel_scratch_gpi { 125 u64 reserved1; 126 u16 reserved2; 127 u16 max_outstanding_tre; 128 u16 reserved3; 129 u16 outstanding_threshold; 130 }; 131 132 /** gsi_channel_scratch - channel scratch configuration area 133 * 134 * The exact interpretation of this register is protocol-specific. 135 * We only use GPI channels; see struct gsi_channel_scratch_gpi, above. 136 */ 137 union gsi_channel_scratch { 138 struct gsi_channel_scratch_gpi gpi; 139 struct { 140 u32 word1; 141 u32 word2; 142 u32 word3; 143 u32 word4; 144 } data; 145 }; 146 147 /* Check things that can be validated at build time. */ 148 static void gsi_validate_build(void) 149 { 150 /* This is used as a divisor */ 151 BUILD_BUG_ON(!GSI_RING_ELEMENT_SIZE); 152 153 /* Code assumes the size of channel and event ring element are 154 * the same (and fixed). Make sure the size of an event ring 155 * element is what's expected. 156 */ 157 BUILD_BUG_ON(sizeof(struct gsi_event) != GSI_RING_ELEMENT_SIZE); 158 159 /* Hardware requires a 2^n ring size. We ensure the number of 160 * elements in an event ring is a power of 2 elsewhere; this 161 * ensure the elements themselves meet the requirement. 162 */ 163 BUILD_BUG_ON(!is_power_of_2(GSI_RING_ELEMENT_SIZE)); 164 165 /* The channel element size must fit in this field */ 166 BUILD_BUG_ON(GSI_RING_ELEMENT_SIZE > field_max(ELEMENT_SIZE_FMASK)); 167 168 /* The event ring element size must fit in this field */ 169 BUILD_BUG_ON(GSI_RING_ELEMENT_SIZE > field_max(EV_ELEMENT_SIZE_FMASK)); 170 } 171 172 /* Return the channel id associated with a given channel */ 173 static u32 gsi_channel_id(struct gsi_channel *channel) 174 { 175 return channel - &channel->gsi->channel[0]; 176 } 177 178 /* An initialized channel has a non-null GSI pointer */ 179 static bool gsi_channel_initialized(struct gsi_channel *channel) 180 { 181 return !!channel->gsi; 182 } 183 184 /* Update the GSI IRQ type register with the cached value */ 185 static void gsi_irq_type_update(struct gsi *gsi, u32 val) 186 { 187 gsi->type_enabled_bitmap = val; 188 iowrite32(val, gsi->virt + GSI_CNTXT_TYPE_IRQ_MSK_OFFSET); 189 } 190 191 static void gsi_irq_type_enable(struct gsi *gsi, enum gsi_irq_type_id type_id) 192 { 193 gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(type_id)); 194 } 195 196 static void gsi_irq_type_disable(struct gsi *gsi, enum gsi_irq_type_id type_id) 197 { 198 gsi_irq_type_update(gsi, gsi->type_enabled_bitmap & ~BIT(type_id)); 199 } 200 201 /* Turn off all GSI interrupts initially */ 202 static void gsi_irq_setup(struct gsi *gsi) 203 { 204 /* Disable all interrupt types */ 205 gsi_irq_type_update(gsi, 0); 206 207 /* Clear all type-specific interrupt masks */ 208 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET); 209 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET); 210 iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET); 211 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET); 212 213 /* The inter-EE registers are in the non-adjusted address range */ 214 iowrite32(0, gsi->virt_raw + GSI_INTER_EE_SRC_CH_IRQ_OFFSET); 215 iowrite32(0, gsi->virt_raw + GSI_INTER_EE_SRC_EV_CH_IRQ_OFFSET); 216 217 iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET); 218 } 219 220 /* Turn off all GSI interrupts when we're all done */ 221 static void gsi_irq_teardown(struct gsi *gsi) 222 { 223 /* Nothing to do */ 224 } 225 226 /* Event ring commands are performed one at a time. Their completion 227 * is signaled by the event ring control GSI interrupt type, which is 228 * only enabled when we issue an event ring command. Only the event 229 * ring being operated on has this interrupt enabled. 230 */ 231 static void gsi_irq_ev_ctrl_enable(struct gsi *gsi, u32 evt_ring_id) 232 { 233 u32 val = BIT(evt_ring_id); 234 235 /* There's a small chance that a previous command completed 236 * after the interrupt was disabled, so make sure we have no 237 * pending interrupts before we enable them. 238 */ 239 iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET); 240 241 iowrite32(val, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET); 242 gsi_irq_type_enable(gsi, GSI_EV_CTRL); 243 } 244 245 /* Disable event ring control interrupts */ 246 static void gsi_irq_ev_ctrl_disable(struct gsi *gsi) 247 { 248 gsi_irq_type_disable(gsi, GSI_EV_CTRL); 249 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET); 250 } 251 252 /* Channel commands are performed one at a time. Their completion is 253 * signaled by the channel control GSI interrupt type, which is only 254 * enabled when we issue a channel command. Only the channel being 255 * operated on has this interrupt enabled. 256 */ 257 static void gsi_irq_ch_ctrl_enable(struct gsi *gsi, u32 channel_id) 258 { 259 u32 val = BIT(channel_id); 260 261 /* There's a small chance that a previous command completed 262 * after the interrupt was disabled, so make sure we have no 263 * pending interrupts before we enable them. 264 */ 265 iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET); 266 267 iowrite32(val, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET); 268 gsi_irq_type_enable(gsi, GSI_CH_CTRL); 269 } 270 271 /* Disable channel control interrupts */ 272 static void gsi_irq_ch_ctrl_disable(struct gsi *gsi) 273 { 274 gsi_irq_type_disable(gsi, GSI_CH_CTRL); 275 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET); 276 } 277 278 static void gsi_irq_ieob_enable_one(struct gsi *gsi, u32 evt_ring_id) 279 { 280 bool enable_ieob = !gsi->ieob_enabled_bitmap; 281 u32 val; 282 283 gsi->ieob_enabled_bitmap |= BIT(evt_ring_id); 284 val = gsi->ieob_enabled_bitmap; 285 iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET); 286 287 /* Enable the interrupt type if this is the first channel enabled */ 288 if (enable_ieob) 289 gsi_irq_type_enable(gsi, GSI_IEOB); 290 } 291 292 static void gsi_irq_ieob_disable(struct gsi *gsi, u32 event_mask) 293 { 294 u32 val; 295 296 gsi->ieob_enabled_bitmap &= ~event_mask; 297 298 /* Disable the interrupt type if this was the last enabled channel */ 299 if (!gsi->ieob_enabled_bitmap) 300 gsi_irq_type_disable(gsi, GSI_IEOB); 301 302 val = gsi->ieob_enabled_bitmap; 303 iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET); 304 } 305 306 static void gsi_irq_ieob_disable_one(struct gsi *gsi, u32 evt_ring_id) 307 { 308 gsi_irq_ieob_disable(gsi, BIT(evt_ring_id)); 309 } 310 311 /* Enable all GSI_interrupt types */ 312 static void gsi_irq_enable(struct gsi *gsi) 313 { 314 u32 val; 315 316 /* Global interrupts include hardware error reports. Enable 317 * that so we can at least report the error should it occur. 318 */ 319 iowrite32(BIT(ERROR_INT), gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET); 320 gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(GSI_GLOB_EE)); 321 322 /* General GSI interrupts are reported to all EEs; if they occur 323 * they are unrecoverable (without reset). A breakpoint interrupt 324 * also exists, but we don't support that. We want to be notified 325 * of errors so we can report them, even if they can't be handled. 326 */ 327 val = BIT(BUS_ERROR); 328 val |= BIT(CMD_FIFO_OVRFLOW); 329 val |= BIT(MCS_STACK_OVRFLOW); 330 iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET); 331 gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(GSI_GENERAL)); 332 } 333 334 /* Disable all GSI interrupt types */ 335 static void gsi_irq_disable(struct gsi *gsi) 336 { 337 gsi_irq_type_update(gsi, 0); 338 339 /* Clear the type-specific interrupt masks set by gsi_irq_enable() */ 340 iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET); 341 iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET); 342 } 343 344 /* Return the virtual address associated with a ring index */ 345 void *gsi_ring_virt(struct gsi_ring *ring, u32 index) 346 { 347 /* Note: index *must* be used modulo the ring count here */ 348 return ring->virt + (index % ring->count) * GSI_RING_ELEMENT_SIZE; 349 } 350 351 /* Return the 32-bit DMA address associated with a ring index */ 352 static u32 gsi_ring_addr(struct gsi_ring *ring, u32 index) 353 { 354 return (ring->addr & GENMASK(31, 0)) + index * GSI_RING_ELEMENT_SIZE; 355 } 356 357 /* Return the ring index of a 32-bit ring offset */ 358 static u32 gsi_ring_index(struct gsi_ring *ring, u32 offset) 359 { 360 return (offset - gsi_ring_addr(ring, 0)) / GSI_RING_ELEMENT_SIZE; 361 } 362 363 /* Issue a GSI command by writing a value to a register, then wait for 364 * completion to be signaled. Returns true if the command completes 365 * or false if it times out. 366 */ 367 static bool 368 gsi_command(struct gsi *gsi, u32 reg, u32 val, struct completion *completion) 369 { 370 unsigned long timeout = msecs_to_jiffies(GSI_CMD_TIMEOUT); 371 372 reinit_completion(completion); 373 374 iowrite32(val, gsi->virt + reg); 375 376 return !!wait_for_completion_timeout(completion, timeout); 377 } 378 379 /* Return the hardware's notion of the current state of an event ring */ 380 static enum gsi_evt_ring_state 381 gsi_evt_ring_state(struct gsi *gsi, u32 evt_ring_id) 382 { 383 u32 val; 384 385 val = ioread32(gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id)); 386 387 return u32_get_bits(val, EV_CHSTATE_FMASK); 388 } 389 390 /* Issue an event ring command and wait for it to complete */ 391 static void gsi_evt_ring_command(struct gsi *gsi, u32 evt_ring_id, 392 enum gsi_evt_cmd_opcode opcode) 393 { 394 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id]; 395 struct completion *completion = &evt_ring->completion; 396 struct device *dev = gsi->dev; 397 bool timeout; 398 u32 val; 399 400 /* Enable the completion interrupt for the command */ 401 gsi_irq_ev_ctrl_enable(gsi, evt_ring_id); 402 403 val = u32_encode_bits(evt_ring_id, EV_CHID_FMASK); 404 val |= u32_encode_bits(opcode, EV_OPCODE_FMASK); 405 406 timeout = !gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val, completion); 407 408 gsi_irq_ev_ctrl_disable(gsi); 409 410 if (!timeout) 411 return; 412 413 dev_err(dev, "GSI command %u for event ring %u timed out, state %u\n", 414 opcode, evt_ring_id, gsi_evt_ring_state(gsi, evt_ring_id)); 415 } 416 417 /* Allocate an event ring in NOT_ALLOCATED state */ 418 static int gsi_evt_ring_alloc_command(struct gsi *gsi, u32 evt_ring_id) 419 { 420 enum gsi_evt_ring_state state; 421 422 /* Get initial event ring state */ 423 state = gsi_evt_ring_state(gsi, evt_ring_id); 424 if (state != GSI_EVT_RING_STATE_NOT_ALLOCATED) { 425 dev_err(gsi->dev, "event ring %u bad state %u before alloc\n", 426 evt_ring_id, state); 427 return -EINVAL; 428 } 429 430 gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_ALLOCATE); 431 432 /* If successful the event ring state will have changed */ 433 state = gsi_evt_ring_state(gsi, evt_ring_id); 434 if (state == GSI_EVT_RING_STATE_ALLOCATED) 435 return 0; 436 437 dev_err(gsi->dev, "event ring %u bad state %u after alloc\n", 438 evt_ring_id, state); 439 440 return -EIO; 441 } 442 443 /* Reset a GSI event ring in ALLOCATED or ERROR state. */ 444 static void gsi_evt_ring_reset_command(struct gsi *gsi, u32 evt_ring_id) 445 { 446 enum gsi_evt_ring_state state; 447 448 state = gsi_evt_ring_state(gsi, evt_ring_id); 449 if (state != GSI_EVT_RING_STATE_ALLOCATED && 450 state != GSI_EVT_RING_STATE_ERROR) { 451 dev_err(gsi->dev, "event ring %u bad state %u before reset\n", 452 evt_ring_id, state); 453 return; 454 } 455 456 gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_RESET); 457 458 /* If successful the event ring state will have changed */ 459 state = gsi_evt_ring_state(gsi, evt_ring_id); 460 if (state == GSI_EVT_RING_STATE_ALLOCATED) 461 return; 462 463 dev_err(gsi->dev, "event ring %u bad state %u after reset\n", 464 evt_ring_id, state); 465 } 466 467 /* Issue a hardware de-allocation request for an allocated event ring */ 468 static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id) 469 { 470 enum gsi_evt_ring_state state; 471 472 state = gsi_evt_ring_state(gsi, evt_ring_id); 473 if (state != GSI_EVT_RING_STATE_ALLOCATED) { 474 dev_err(gsi->dev, "event ring %u state %u before dealloc\n", 475 evt_ring_id, state); 476 return; 477 } 478 479 gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_DE_ALLOC); 480 481 /* If successful the event ring state will have changed */ 482 state = gsi_evt_ring_state(gsi, evt_ring_id); 483 if (state == GSI_EVT_RING_STATE_NOT_ALLOCATED) 484 return; 485 486 dev_err(gsi->dev, "event ring %u bad state %u after dealloc\n", 487 evt_ring_id, state); 488 } 489 490 /* Fetch the current state of a channel from hardware */ 491 static enum gsi_channel_state gsi_channel_state(struct gsi_channel *channel) 492 { 493 u32 channel_id = gsi_channel_id(channel); 494 void __iomem *virt = channel->gsi->virt; 495 u32 val; 496 497 val = ioread32(virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id)); 498 499 return u32_get_bits(val, CHSTATE_FMASK); 500 } 501 502 /* Issue a channel command and wait for it to complete */ 503 static void 504 gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode) 505 { 506 struct completion *completion = &channel->completion; 507 u32 channel_id = gsi_channel_id(channel); 508 struct gsi *gsi = channel->gsi; 509 struct device *dev = gsi->dev; 510 bool timeout; 511 u32 val; 512 513 /* Enable the completion interrupt for the command */ 514 gsi_irq_ch_ctrl_enable(gsi, channel_id); 515 516 val = u32_encode_bits(channel_id, CH_CHID_FMASK); 517 val |= u32_encode_bits(opcode, CH_OPCODE_FMASK); 518 timeout = !gsi_command(gsi, GSI_CH_CMD_OFFSET, val, completion); 519 520 gsi_irq_ch_ctrl_disable(gsi); 521 522 if (!timeout) 523 return; 524 525 dev_err(dev, "GSI command %u for channel %u timed out, state %u\n", 526 opcode, channel_id, gsi_channel_state(channel)); 527 } 528 529 /* Allocate GSI channel in NOT_ALLOCATED state */ 530 static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id) 531 { 532 struct gsi_channel *channel = &gsi->channel[channel_id]; 533 struct device *dev = gsi->dev; 534 enum gsi_channel_state state; 535 536 /* Get initial channel state */ 537 state = gsi_channel_state(channel); 538 if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED) { 539 dev_err(dev, "channel %u bad state %u before alloc\n", 540 channel_id, state); 541 return -EINVAL; 542 } 543 544 gsi_channel_command(channel, GSI_CH_ALLOCATE); 545 546 /* If successful the channel state will have changed */ 547 state = gsi_channel_state(channel); 548 if (state == GSI_CHANNEL_STATE_ALLOCATED) 549 return 0; 550 551 dev_err(dev, "channel %u bad state %u after alloc\n", 552 channel_id, state); 553 554 return -EIO; 555 } 556 557 /* Start an ALLOCATED channel */ 558 static int gsi_channel_start_command(struct gsi_channel *channel) 559 { 560 struct device *dev = channel->gsi->dev; 561 enum gsi_channel_state state; 562 563 state = gsi_channel_state(channel); 564 if (state != GSI_CHANNEL_STATE_ALLOCATED && 565 state != GSI_CHANNEL_STATE_STOPPED) { 566 dev_err(dev, "channel %u bad state %u before start\n", 567 gsi_channel_id(channel), state); 568 return -EINVAL; 569 } 570 571 gsi_channel_command(channel, GSI_CH_START); 572 573 /* If successful the channel state will have changed */ 574 state = gsi_channel_state(channel); 575 if (state == GSI_CHANNEL_STATE_STARTED) 576 return 0; 577 578 dev_err(dev, "channel %u bad state %u after start\n", 579 gsi_channel_id(channel), state); 580 581 return -EIO; 582 } 583 584 /* Stop a GSI channel in STARTED state */ 585 static int gsi_channel_stop_command(struct gsi_channel *channel) 586 { 587 struct device *dev = channel->gsi->dev; 588 enum gsi_channel_state state; 589 590 state = gsi_channel_state(channel); 591 592 /* Channel could have entered STOPPED state since last call 593 * if it timed out. If so, we're done. 594 */ 595 if (state == GSI_CHANNEL_STATE_STOPPED) 596 return 0; 597 598 if (state != GSI_CHANNEL_STATE_STARTED && 599 state != GSI_CHANNEL_STATE_STOP_IN_PROC) { 600 dev_err(dev, "channel %u bad state %u before stop\n", 601 gsi_channel_id(channel), state); 602 return -EINVAL; 603 } 604 605 gsi_channel_command(channel, GSI_CH_STOP); 606 607 /* If successful the channel state will have changed */ 608 state = gsi_channel_state(channel); 609 if (state == GSI_CHANNEL_STATE_STOPPED) 610 return 0; 611 612 /* We may have to try again if stop is in progress */ 613 if (state == GSI_CHANNEL_STATE_STOP_IN_PROC) 614 return -EAGAIN; 615 616 dev_err(dev, "channel %u bad state %u after stop\n", 617 gsi_channel_id(channel), state); 618 619 return -EIO; 620 } 621 622 /* Reset a GSI channel in ALLOCATED or ERROR state. */ 623 static void gsi_channel_reset_command(struct gsi_channel *channel) 624 { 625 struct device *dev = channel->gsi->dev; 626 enum gsi_channel_state state; 627 628 /* A short delay is required before a RESET command */ 629 usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC); 630 631 state = gsi_channel_state(channel); 632 if (state != GSI_CHANNEL_STATE_STOPPED && 633 state != GSI_CHANNEL_STATE_ERROR) { 634 /* No need to reset a channel already in ALLOCATED state */ 635 if (state != GSI_CHANNEL_STATE_ALLOCATED) 636 dev_err(dev, "channel %u bad state %u before reset\n", 637 gsi_channel_id(channel), state); 638 return; 639 } 640 641 gsi_channel_command(channel, GSI_CH_RESET); 642 643 /* If successful the channel state will have changed */ 644 state = gsi_channel_state(channel); 645 if (state != GSI_CHANNEL_STATE_ALLOCATED) 646 dev_err(dev, "channel %u bad state %u after reset\n", 647 gsi_channel_id(channel), state); 648 } 649 650 /* Deallocate an ALLOCATED GSI channel */ 651 static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id) 652 { 653 struct gsi_channel *channel = &gsi->channel[channel_id]; 654 struct device *dev = gsi->dev; 655 enum gsi_channel_state state; 656 657 state = gsi_channel_state(channel); 658 if (state != GSI_CHANNEL_STATE_ALLOCATED) { 659 dev_err(dev, "channel %u bad state %u before dealloc\n", 660 channel_id, state); 661 return; 662 } 663 664 gsi_channel_command(channel, GSI_CH_DE_ALLOC); 665 666 /* If successful the channel state will have changed */ 667 state = gsi_channel_state(channel); 668 669 if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED) 670 dev_err(dev, "channel %u bad state %u after dealloc\n", 671 channel_id, state); 672 } 673 674 /* Ring an event ring doorbell, reporting the last entry processed by the AP. 675 * The index argument (modulo the ring count) is the first unfilled entry, so 676 * we supply one less than that with the doorbell. Update the event ring 677 * index field with the value provided. 678 */ 679 static void gsi_evt_ring_doorbell(struct gsi *gsi, u32 evt_ring_id, u32 index) 680 { 681 struct gsi_ring *ring = &gsi->evt_ring[evt_ring_id].ring; 682 u32 val; 683 684 ring->index = index; /* Next unused entry */ 685 686 /* Note: index *must* be used modulo the ring count here */ 687 val = gsi_ring_addr(ring, (index - 1) % ring->count); 688 iowrite32(val, gsi->virt + GSI_EV_CH_E_DOORBELL_0_OFFSET(evt_ring_id)); 689 } 690 691 /* Program an event ring for use */ 692 static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id) 693 { 694 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id]; 695 size_t size = evt_ring->ring.count * GSI_RING_ELEMENT_SIZE; 696 u32 val; 697 698 /* We program all event rings as GPI type/protocol */ 699 val = u32_encode_bits(GSI_CHANNEL_TYPE_GPI, EV_CHTYPE_FMASK); 700 val |= EV_INTYPE_FMASK; 701 val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, EV_ELEMENT_SIZE_FMASK); 702 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id)); 703 704 val = u32_encode_bits(size, EV_R_LENGTH_FMASK); 705 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_1_OFFSET(evt_ring_id)); 706 707 /* The context 2 and 3 registers store the low-order and 708 * high-order 32 bits of the address of the event ring, 709 * respectively. 710 */ 711 val = evt_ring->ring.addr & GENMASK(31, 0); 712 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_2_OFFSET(evt_ring_id)); 713 714 val = evt_ring->ring.addr >> 32; 715 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_3_OFFSET(evt_ring_id)); 716 717 /* Enable interrupt moderation by setting the moderation delay */ 718 val = u32_encode_bits(GSI_EVT_RING_INT_MODT, MODT_FMASK); 719 val |= u32_encode_bits(1, MODC_FMASK); /* comes from channel */ 720 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_8_OFFSET(evt_ring_id)); 721 722 /* No MSI write data, and MSI address high and low address is 0 */ 723 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_9_OFFSET(evt_ring_id)); 724 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_10_OFFSET(evt_ring_id)); 725 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_11_OFFSET(evt_ring_id)); 726 727 /* We don't need to get event read pointer updates */ 728 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_12_OFFSET(evt_ring_id)); 729 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_13_OFFSET(evt_ring_id)); 730 731 /* Finally, tell the hardware we've completed event 0 (arbitrary) */ 732 gsi_evt_ring_doorbell(gsi, evt_ring_id, 0); 733 } 734 735 /* Find the transaction whose completion indicates a channel is quiesced */ 736 static struct gsi_trans *gsi_channel_trans_last(struct gsi_channel *channel) 737 { 738 struct gsi_trans_info *trans_info = &channel->trans_info; 739 const struct list_head *list; 740 struct gsi_trans *trans; 741 742 spin_lock_bh(&trans_info->spinlock); 743 744 /* There is a small chance a TX transaction got allocated just 745 * before we disabled transmits, so check for that. 746 */ 747 if (channel->toward_ipa) { 748 list = &trans_info->alloc; 749 if (!list_empty(list)) 750 goto done; 751 list = &trans_info->pending; 752 if (!list_empty(list)) 753 goto done; 754 } 755 756 /* Otherwise (TX or RX) we want to wait for anything that 757 * has completed, or has been polled but not released yet. 758 */ 759 list = &trans_info->complete; 760 if (!list_empty(list)) 761 goto done; 762 list = &trans_info->polled; 763 if (list_empty(list)) 764 list = NULL; 765 done: 766 trans = list ? list_last_entry(list, struct gsi_trans, links) : NULL; 767 768 /* Caller will wait for this, so take a reference */ 769 if (trans) 770 refcount_inc(&trans->refcount); 771 772 spin_unlock_bh(&trans_info->spinlock); 773 774 return trans; 775 } 776 777 /* Wait for transaction activity on a channel to complete */ 778 static void gsi_channel_trans_quiesce(struct gsi_channel *channel) 779 { 780 struct gsi_trans *trans; 781 782 /* Get the last transaction, and wait for it to complete */ 783 trans = gsi_channel_trans_last(channel); 784 if (trans) { 785 wait_for_completion(&trans->completion); 786 gsi_trans_free(trans); 787 } 788 } 789 790 /* Program a channel for use */ 791 static void gsi_channel_program(struct gsi_channel *channel, bool doorbell) 792 { 793 size_t size = channel->tre_ring.count * GSI_RING_ELEMENT_SIZE; 794 u32 channel_id = gsi_channel_id(channel); 795 union gsi_channel_scratch scr = { }; 796 struct gsi_channel_scratch_gpi *gpi; 797 struct gsi *gsi = channel->gsi; 798 u32 wrr_weight = 0; 799 u32 val; 800 801 /* Arbitrarily pick TRE 0 as the first channel element to use */ 802 channel->tre_ring.index = 0; 803 804 /* We program all channels as GPI type/protocol */ 805 val = u32_encode_bits(GSI_CHANNEL_TYPE_GPI, CHTYPE_PROTOCOL_FMASK); 806 if (channel->toward_ipa) 807 val |= CHTYPE_DIR_FMASK; 808 val |= u32_encode_bits(channel->evt_ring_id, ERINDEX_FMASK); 809 val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, ELEMENT_SIZE_FMASK); 810 iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id)); 811 812 val = u32_encode_bits(size, R_LENGTH_FMASK); 813 iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_1_OFFSET(channel_id)); 814 815 /* The context 2 and 3 registers store the low-order and 816 * high-order 32 bits of the address of the channel ring, 817 * respectively. 818 */ 819 val = channel->tre_ring.addr & GENMASK(31, 0); 820 iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_2_OFFSET(channel_id)); 821 822 val = channel->tre_ring.addr >> 32; 823 iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_3_OFFSET(channel_id)); 824 825 /* Command channel gets low weighted round-robin priority */ 826 if (channel->command) 827 wrr_weight = field_max(WRR_WEIGHT_FMASK); 828 val = u32_encode_bits(wrr_weight, WRR_WEIGHT_FMASK); 829 830 /* Max prefetch is 1 segment (do not set MAX_PREFETCH_FMASK) */ 831 832 /* We enable the doorbell engine for IPA v3.5.1 */ 833 if (gsi->version == IPA_VERSION_3_5_1 && doorbell) 834 val |= USE_DB_ENG_FMASK; 835 836 /* v4.0 introduces an escape buffer for prefetch. We use it 837 * on all but the AP command channel. 838 */ 839 if (gsi->version != IPA_VERSION_3_5_1 && !channel->command) { 840 /* If not otherwise set, prefetch buffers are used */ 841 if (gsi->version < IPA_VERSION_4_5) 842 val |= USE_ESCAPE_BUF_ONLY_FMASK; 843 else 844 val |= u32_encode_bits(GSI_ESCAPE_BUF_ONLY, 845 PREFETCH_MODE_FMASK); 846 } 847 848 iowrite32(val, gsi->virt + GSI_CH_C_QOS_OFFSET(channel_id)); 849 850 /* Now update the scratch registers for GPI protocol */ 851 gpi = &scr.gpi; 852 gpi->max_outstanding_tre = gsi_channel_trans_tre_max(gsi, channel_id) * 853 GSI_RING_ELEMENT_SIZE; 854 gpi->outstanding_threshold = 2 * GSI_RING_ELEMENT_SIZE; 855 856 val = scr.data.word1; 857 iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_0_OFFSET(channel_id)); 858 859 val = scr.data.word2; 860 iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_1_OFFSET(channel_id)); 861 862 val = scr.data.word3; 863 iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_2_OFFSET(channel_id)); 864 865 /* We must preserve the upper 16 bits of the last scratch register. 866 * The next sequence assumes those bits remain unchanged between the 867 * read and the write. 868 */ 869 val = ioread32(gsi->virt + GSI_CH_C_SCRATCH_3_OFFSET(channel_id)); 870 val = (scr.data.word4 & GENMASK(31, 16)) | (val & GENMASK(15, 0)); 871 iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_3_OFFSET(channel_id)); 872 873 /* All done! */ 874 } 875 876 static void gsi_channel_deprogram(struct gsi_channel *channel) 877 { 878 /* Nothing to do */ 879 } 880 881 static int __gsi_channel_start(struct gsi_channel *channel, bool start) 882 { 883 struct gsi *gsi = channel->gsi; 884 int ret; 885 886 if (!start) 887 return 0; 888 889 mutex_lock(&gsi->mutex); 890 891 ret = gsi_channel_start_command(channel); 892 893 mutex_unlock(&gsi->mutex); 894 895 return ret; 896 } 897 898 /* Start an allocated GSI channel */ 899 int gsi_channel_start(struct gsi *gsi, u32 channel_id) 900 { 901 struct gsi_channel *channel = &gsi->channel[channel_id]; 902 int ret; 903 904 /* Enable NAPI and the completion interrupt */ 905 napi_enable(&channel->napi); 906 gsi_irq_ieob_enable_one(gsi, channel->evt_ring_id); 907 908 ret = __gsi_channel_start(channel, true); 909 if (ret) { 910 gsi_irq_ieob_disable_one(gsi, channel->evt_ring_id); 911 napi_disable(&channel->napi); 912 } 913 914 return ret; 915 } 916 917 static int gsi_channel_stop_retry(struct gsi_channel *channel) 918 { 919 u32 retries = GSI_CHANNEL_STOP_RETRIES; 920 int ret; 921 922 do { 923 ret = gsi_channel_stop_command(channel); 924 if (ret != -EAGAIN) 925 break; 926 usleep_range(3 * USEC_PER_MSEC, 5 * USEC_PER_MSEC); 927 } while (retries--); 928 929 return ret; 930 } 931 932 static int __gsi_channel_stop(struct gsi_channel *channel, bool stop) 933 { 934 struct gsi *gsi = channel->gsi; 935 int ret; 936 937 /* Wait for any underway transactions to complete before stopping. */ 938 gsi_channel_trans_quiesce(channel); 939 940 if (!stop) 941 return 0; 942 943 mutex_lock(&gsi->mutex); 944 945 ret = gsi_channel_stop_retry(channel); 946 947 mutex_unlock(&gsi->mutex); 948 949 return ret; 950 } 951 952 /* Stop a started channel */ 953 int gsi_channel_stop(struct gsi *gsi, u32 channel_id) 954 { 955 struct gsi_channel *channel = &gsi->channel[channel_id]; 956 int ret; 957 958 ret = __gsi_channel_stop(channel, true); 959 if (ret) 960 return ret; 961 962 /* Disable the completion interrupt and NAPI if successful */ 963 gsi_irq_ieob_disable_one(gsi, channel->evt_ring_id); 964 napi_disable(&channel->napi); 965 966 return 0; 967 } 968 969 /* Reset and reconfigure a channel, (possibly) enabling the doorbell engine */ 970 void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool doorbell) 971 { 972 struct gsi_channel *channel = &gsi->channel[channel_id]; 973 974 mutex_lock(&gsi->mutex); 975 976 gsi_channel_reset_command(channel); 977 /* Due to a hardware quirk we may need to reset RX channels twice. */ 978 if (gsi->version == IPA_VERSION_3_5_1 && !channel->toward_ipa) 979 gsi_channel_reset_command(channel); 980 981 gsi_channel_program(channel, doorbell); 982 gsi_channel_trans_cancel_pending(channel); 983 984 mutex_unlock(&gsi->mutex); 985 } 986 987 /* Stop a STARTED channel for suspend (using stop if requested) */ 988 int gsi_channel_suspend(struct gsi *gsi, u32 channel_id, bool stop) 989 { 990 struct gsi_channel *channel = &gsi->channel[channel_id]; 991 int ret; 992 993 ret = __gsi_channel_stop(channel, stop); 994 if (ret) 995 return ret; 996 997 /* Ensure NAPI polling has finished. */ 998 napi_synchronize(&channel->napi); 999 1000 return 0; 1001 } 1002 1003 /* Resume a suspended channel (starting will be requested if STOPPED) */ 1004 int gsi_channel_resume(struct gsi *gsi, u32 channel_id, bool start) 1005 { 1006 struct gsi_channel *channel = &gsi->channel[channel_id]; 1007 1008 return __gsi_channel_start(channel, start); 1009 } 1010 1011 /** 1012 * gsi_channel_tx_queued() - Report queued TX transfers for a channel 1013 * @channel: Channel for which to report 1014 * 1015 * Report to the network stack the number of bytes and transactions that 1016 * have been queued to hardware since last call. This and the next function 1017 * supply information used by the network stack for throttling. 1018 * 1019 * For each channel we track the number of transactions used and bytes of 1020 * data those transactions represent. We also track what those values are 1021 * each time this function is called. Subtracting the two tells us 1022 * the number of bytes and transactions that have been added between 1023 * successive calls. 1024 * 1025 * Calling this each time we ring the channel doorbell allows us to 1026 * provide accurate information to the network stack about how much 1027 * work we've given the hardware at any point in time. 1028 */ 1029 void gsi_channel_tx_queued(struct gsi_channel *channel) 1030 { 1031 u32 trans_count; 1032 u32 byte_count; 1033 1034 byte_count = channel->byte_count - channel->queued_byte_count; 1035 trans_count = channel->trans_count - channel->queued_trans_count; 1036 channel->queued_byte_count = channel->byte_count; 1037 channel->queued_trans_count = channel->trans_count; 1038 1039 ipa_gsi_channel_tx_queued(channel->gsi, gsi_channel_id(channel), 1040 trans_count, byte_count); 1041 } 1042 1043 /** 1044 * gsi_channel_tx_update() - Report completed TX transfers 1045 * @channel: Channel that has completed transmitting packets 1046 * @trans: Last transation known to be complete 1047 * 1048 * Compute the number of transactions and bytes that have been transferred 1049 * over a TX channel since the given transaction was committed. Report this 1050 * information to the network stack. 1051 * 1052 * At the time a transaction is committed, we record its channel's 1053 * committed transaction and byte counts *in the transaction*. 1054 * Completions are signaled by the hardware with an interrupt, and 1055 * we can determine the latest completed transaction at that time. 1056 * 1057 * The difference between the byte/transaction count recorded in 1058 * the transaction and the count last time we recorded a completion 1059 * tells us exactly how much data has been transferred between 1060 * completions. 1061 * 1062 * Calling this each time we learn of a newly-completed transaction 1063 * allows us to provide accurate information to the network stack 1064 * about how much work has been completed by the hardware at a given 1065 * point in time. 1066 */ 1067 static void 1068 gsi_channel_tx_update(struct gsi_channel *channel, struct gsi_trans *trans) 1069 { 1070 u64 byte_count = trans->byte_count + trans->len; 1071 u64 trans_count = trans->trans_count + 1; 1072 1073 byte_count -= channel->compl_byte_count; 1074 channel->compl_byte_count += byte_count; 1075 trans_count -= channel->compl_trans_count; 1076 channel->compl_trans_count += trans_count; 1077 1078 ipa_gsi_channel_tx_completed(channel->gsi, gsi_channel_id(channel), 1079 trans_count, byte_count); 1080 } 1081 1082 /* Channel control interrupt handler */ 1083 static void gsi_isr_chan_ctrl(struct gsi *gsi) 1084 { 1085 u32 channel_mask; 1086 1087 channel_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_CH_IRQ_OFFSET); 1088 iowrite32(channel_mask, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET); 1089 1090 while (channel_mask) { 1091 u32 channel_id = __ffs(channel_mask); 1092 struct gsi_channel *channel; 1093 1094 channel_mask ^= BIT(channel_id); 1095 1096 channel = &gsi->channel[channel_id]; 1097 1098 complete(&channel->completion); 1099 } 1100 } 1101 1102 /* Event ring control interrupt handler */ 1103 static void gsi_isr_evt_ctrl(struct gsi *gsi) 1104 { 1105 u32 event_mask; 1106 1107 event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_OFFSET); 1108 iowrite32(event_mask, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET); 1109 1110 while (event_mask) { 1111 u32 evt_ring_id = __ffs(event_mask); 1112 struct gsi_evt_ring *evt_ring; 1113 1114 event_mask ^= BIT(evt_ring_id); 1115 1116 evt_ring = &gsi->evt_ring[evt_ring_id]; 1117 1118 complete(&evt_ring->completion); 1119 } 1120 } 1121 1122 /* Global channel error interrupt handler */ 1123 static void 1124 gsi_isr_glob_chan_err(struct gsi *gsi, u32 err_ee, u32 channel_id, u32 code) 1125 { 1126 if (code == GSI_OUT_OF_RESOURCES) { 1127 dev_err(gsi->dev, "channel %u out of resources\n", channel_id); 1128 complete(&gsi->channel[channel_id].completion); 1129 return; 1130 } 1131 1132 /* Report, but otherwise ignore all other error codes */ 1133 dev_err(gsi->dev, "channel %u global error ee 0x%08x code 0x%08x\n", 1134 channel_id, err_ee, code); 1135 } 1136 1137 /* Global event error interrupt handler */ 1138 static void 1139 gsi_isr_glob_evt_err(struct gsi *gsi, u32 err_ee, u32 evt_ring_id, u32 code) 1140 { 1141 if (code == GSI_OUT_OF_RESOURCES) { 1142 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id]; 1143 u32 channel_id = gsi_channel_id(evt_ring->channel); 1144 1145 complete(&evt_ring->completion); 1146 dev_err(gsi->dev, "evt_ring for channel %u out of resources\n", 1147 channel_id); 1148 return; 1149 } 1150 1151 /* Report, but otherwise ignore all other error codes */ 1152 dev_err(gsi->dev, "event ring %u global error ee %u code 0x%08x\n", 1153 evt_ring_id, err_ee, code); 1154 } 1155 1156 /* Global error interrupt handler */ 1157 static void gsi_isr_glob_err(struct gsi *gsi) 1158 { 1159 enum gsi_err_type type; 1160 enum gsi_err_code code; 1161 u32 which; 1162 u32 val; 1163 u32 ee; 1164 1165 /* Get the logged error, then reinitialize the log */ 1166 val = ioread32(gsi->virt + GSI_ERROR_LOG_OFFSET); 1167 iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET); 1168 iowrite32(~0, gsi->virt + GSI_ERROR_LOG_CLR_OFFSET); 1169 1170 ee = u32_get_bits(val, ERR_EE_FMASK); 1171 type = u32_get_bits(val, ERR_TYPE_FMASK); 1172 which = u32_get_bits(val, ERR_VIRT_IDX_FMASK); 1173 code = u32_get_bits(val, ERR_CODE_FMASK); 1174 1175 if (type == GSI_ERR_TYPE_CHAN) 1176 gsi_isr_glob_chan_err(gsi, ee, which, code); 1177 else if (type == GSI_ERR_TYPE_EVT) 1178 gsi_isr_glob_evt_err(gsi, ee, which, code); 1179 else /* type GSI_ERR_TYPE_GLOB should be fatal */ 1180 dev_err(gsi->dev, "unexpected global error 0x%08x\n", type); 1181 } 1182 1183 /* Generic EE interrupt handler */ 1184 static void gsi_isr_gp_int1(struct gsi *gsi) 1185 { 1186 u32 result; 1187 u32 val; 1188 1189 /* This interrupt is used to handle completions of the two GENERIC 1190 * GSI commands. We use these to allocate and halt channels on 1191 * the modem's behalf due to a hardware quirk on IPA v4.2. Once 1192 * allocated, the modem "owns" these channels, and as a result we 1193 * have no way of knowing the channel's state at any given time. 1194 * 1195 * It is recommended that we halt the modem channels we allocated 1196 * when shutting down, but it's possible the channel isn't running 1197 * at the time we issue the HALT command. We'll get an error in 1198 * that case, but it's harmless (the channel is already halted). 1199 * 1200 * For this reason, we silently ignore a CHANNEL_NOT_RUNNING error 1201 * if we receive it. 1202 */ 1203 val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET); 1204 result = u32_get_bits(val, GENERIC_EE_RESULT_FMASK); 1205 1206 switch (result) { 1207 case GENERIC_EE_SUCCESS: 1208 case GENERIC_EE_CHANNEL_NOT_RUNNING: 1209 gsi->result = 0; 1210 break; 1211 1212 case GENERIC_EE_RETRY: 1213 gsi->result = -EAGAIN; 1214 break; 1215 1216 default: 1217 dev_err(gsi->dev, "global INT1 generic result %u\n", result); 1218 gsi->result = -EIO; 1219 break; 1220 } 1221 1222 complete(&gsi->completion); 1223 } 1224 1225 /* Inter-EE interrupt handler */ 1226 static void gsi_isr_glob_ee(struct gsi *gsi) 1227 { 1228 u32 val; 1229 1230 val = ioread32(gsi->virt + GSI_CNTXT_GLOB_IRQ_STTS_OFFSET); 1231 1232 if (val & BIT(ERROR_INT)) 1233 gsi_isr_glob_err(gsi); 1234 1235 iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_CLR_OFFSET); 1236 1237 val &= ~BIT(ERROR_INT); 1238 1239 if (val & BIT(GP_INT1)) { 1240 val ^= BIT(GP_INT1); 1241 gsi_isr_gp_int1(gsi); 1242 } 1243 1244 if (val) 1245 dev_err(gsi->dev, "unexpected global interrupt 0x%08x\n", val); 1246 } 1247 1248 /* I/O completion interrupt event */ 1249 static void gsi_isr_ieob(struct gsi *gsi) 1250 { 1251 u32 event_mask; 1252 1253 event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_OFFSET); 1254 gsi_irq_ieob_disable(gsi, event_mask); 1255 iowrite32(event_mask, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_CLR_OFFSET); 1256 1257 while (event_mask) { 1258 u32 evt_ring_id = __ffs(event_mask); 1259 1260 event_mask ^= BIT(evt_ring_id); 1261 1262 napi_schedule(&gsi->evt_ring[evt_ring_id].channel->napi); 1263 } 1264 } 1265 1266 /* General event interrupts represent serious problems, so report them */ 1267 static void gsi_isr_general(struct gsi *gsi) 1268 { 1269 struct device *dev = gsi->dev; 1270 u32 val; 1271 1272 val = ioread32(gsi->virt + GSI_CNTXT_GSI_IRQ_STTS_OFFSET); 1273 iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_CLR_OFFSET); 1274 1275 dev_err(dev, "unexpected general interrupt 0x%08x\n", val); 1276 } 1277 1278 /** 1279 * gsi_isr() - Top level GSI interrupt service routine 1280 * @irq: Interrupt number (ignored) 1281 * @dev_id: GSI pointer supplied to request_irq() 1282 * 1283 * This is the main handler function registered for the GSI IRQ. Each type 1284 * of interrupt has a separate handler function that is called from here. 1285 */ 1286 static irqreturn_t gsi_isr(int irq, void *dev_id) 1287 { 1288 struct gsi *gsi = dev_id; 1289 u32 intr_mask; 1290 u32 cnt = 0; 1291 1292 /* enum gsi_irq_type_id defines GSI interrupt types */ 1293 while ((intr_mask = ioread32(gsi->virt + GSI_CNTXT_TYPE_IRQ_OFFSET))) { 1294 /* intr_mask contains bitmask of pending GSI interrupts */ 1295 do { 1296 u32 gsi_intr = BIT(__ffs(intr_mask)); 1297 1298 intr_mask ^= gsi_intr; 1299 1300 switch (gsi_intr) { 1301 case BIT(GSI_CH_CTRL): 1302 gsi_isr_chan_ctrl(gsi); 1303 break; 1304 case BIT(GSI_EV_CTRL): 1305 gsi_isr_evt_ctrl(gsi); 1306 break; 1307 case BIT(GSI_GLOB_EE): 1308 gsi_isr_glob_ee(gsi); 1309 break; 1310 case BIT(GSI_IEOB): 1311 gsi_isr_ieob(gsi); 1312 break; 1313 case BIT(GSI_GENERAL): 1314 gsi_isr_general(gsi); 1315 break; 1316 default: 1317 dev_err(gsi->dev, 1318 "unrecognized interrupt type 0x%08x\n", 1319 gsi_intr); 1320 break; 1321 } 1322 } while (intr_mask); 1323 1324 if (++cnt > GSI_ISR_MAX_ITER) { 1325 dev_err(gsi->dev, "interrupt flood\n"); 1326 break; 1327 } 1328 } 1329 1330 return IRQ_HANDLED; 1331 } 1332 1333 static int gsi_irq_init(struct gsi *gsi, struct platform_device *pdev) 1334 { 1335 struct device *dev = &pdev->dev; 1336 unsigned int irq; 1337 int ret; 1338 1339 ret = platform_get_irq_byname(pdev, "gsi"); 1340 if (ret <= 0) { 1341 dev_err(dev, "DT error %d getting \"gsi\" IRQ property\n", ret); 1342 return ret ? : -EINVAL; 1343 } 1344 irq = ret; 1345 1346 ret = request_irq(irq, gsi_isr, 0, "gsi", gsi); 1347 if (ret) { 1348 dev_err(dev, "error %d requesting \"gsi\" IRQ\n", ret); 1349 return ret; 1350 } 1351 gsi->irq = irq; 1352 1353 return 0; 1354 } 1355 1356 static void gsi_irq_exit(struct gsi *gsi) 1357 { 1358 free_irq(gsi->irq, gsi); 1359 } 1360 1361 /* Return the transaction associated with a transfer completion event */ 1362 static struct gsi_trans *gsi_event_trans(struct gsi_channel *channel, 1363 struct gsi_event *event) 1364 { 1365 u32 tre_offset; 1366 u32 tre_index; 1367 1368 /* Event xfer_ptr records the TRE it's associated with */ 1369 tre_offset = le64_to_cpu(event->xfer_ptr) & GENMASK(31, 0); 1370 tre_index = gsi_ring_index(&channel->tre_ring, tre_offset); 1371 1372 return gsi_channel_trans_mapped(channel, tre_index); 1373 } 1374 1375 /** 1376 * gsi_evt_ring_rx_update() - Record lengths of received data 1377 * @evt_ring: Event ring associated with channel that received packets 1378 * @index: Event index in ring reported by hardware 1379 * 1380 * Events for RX channels contain the actual number of bytes received into 1381 * the buffer. Every event has a transaction associated with it, and here 1382 * we update transactions to record their actual received lengths. 1383 * 1384 * This function is called whenever we learn that the GSI hardware has filled 1385 * new events since the last time we checked. The ring's index field tells 1386 * the first entry in need of processing. The index provided is the 1387 * first *unfilled* event in the ring (following the last filled one). 1388 * 1389 * Events are sequential within the event ring, and transactions are 1390 * sequential within the transaction pool. 1391 * 1392 * Note that @index always refers to an element *within* the event ring. 1393 */ 1394 static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index) 1395 { 1396 struct gsi_channel *channel = evt_ring->channel; 1397 struct gsi_ring *ring = &evt_ring->ring; 1398 struct gsi_trans_info *trans_info; 1399 struct gsi_event *event_done; 1400 struct gsi_event *event; 1401 struct gsi_trans *trans; 1402 u32 byte_count = 0; 1403 u32 old_index; 1404 u32 event_avail; 1405 1406 trans_info = &channel->trans_info; 1407 1408 /* We'll start with the oldest un-processed event. RX channels 1409 * replenish receive buffers in single-TRE transactions, so we 1410 * can just map that event to its transaction. Transactions 1411 * associated with completion events are consecutive. 1412 */ 1413 old_index = ring->index; 1414 event = gsi_ring_virt(ring, old_index); 1415 trans = gsi_event_trans(channel, event); 1416 1417 /* Compute the number of events to process before we wrap, 1418 * and determine when we'll be done processing events. 1419 */ 1420 event_avail = ring->count - old_index % ring->count; 1421 event_done = gsi_ring_virt(ring, index); 1422 do { 1423 trans->len = __le16_to_cpu(event->len); 1424 byte_count += trans->len; 1425 1426 /* Move on to the next event and transaction */ 1427 if (--event_avail) 1428 event++; 1429 else 1430 event = gsi_ring_virt(ring, 0); 1431 trans = gsi_trans_pool_next(&trans_info->pool, trans); 1432 } while (event != event_done); 1433 1434 /* We record RX bytes when they are received */ 1435 channel->byte_count += byte_count; 1436 channel->trans_count++; 1437 } 1438 1439 /* Initialize a ring, including allocating DMA memory for its entries */ 1440 static int gsi_ring_alloc(struct gsi *gsi, struct gsi_ring *ring, u32 count) 1441 { 1442 size_t size = count * GSI_RING_ELEMENT_SIZE; 1443 struct device *dev = gsi->dev; 1444 dma_addr_t addr; 1445 1446 /* Hardware requires a 2^n ring size, with alignment equal to size */ 1447 ring->virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL); 1448 if (ring->virt && addr % size) { 1449 dma_free_coherent(dev, size, ring->virt, addr); 1450 dev_err(dev, "unable to alloc 0x%zx-aligned ring buffer\n", 1451 size); 1452 return -EINVAL; /* Not a good error value, but distinct */ 1453 } else if (!ring->virt) { 1454 return -ENOMEM; 1455 } 1456 ring->addr = addr; 1457 ring->count = count; 1458 1459 return 0; 1460 } 1461 1462 /* Free a previously-allocated ring */ 1463 static void gsi_ring_free(struct gsi *gsi, struct gsi_ring *ring) 1464 { 1465 size_t size = ring->count * GSI_RING_ELEMENT_SIZE; 1466 1467 dma_free_coherent(gsi->dev, size, ring->virt, ring->addr); 1468 } 1469 1470 /* Allocate an available event ring id */ 1471 static int gsi_evt_ring_id_alloc(struct gsi *gsi) 1472 { 1473 u32 evt_ring_id; 1474 1475 if (gsi->event_bitmap == ~0U) { 1476 dev_err(gsi->dev, "event rings exhausted\n"); 1477 return -ENOSPC; 1478 } 1479 1480 evt_ring_id = ffz(gsi->event_bitmap); 1481 gsi->event_bitmap |= BIT(evt_ring_id); 1482 1483 return (int)evt_ring_id; 1484 } 1485 1486 /* Free a previously-allocated event ring id */ 1487 static void gsi_evt_ring_id_free(struct gsi *gsi, u32 evt_ring_id) 1488 { 1489 gsi->event_bitmap &= ~BIT(evt_ring_id); 1490 } 1491 1492 /* Ring a channel doorbell, reporting the first un-filled entry */ 1493 void gsi_channel_doorbell(struct gsi_channel *channel) 1494 { 1495 struct gsi_ring *tre_ring = &channel->tre_ring; 1496 u32 channel_id = gsi_channel_id(channel); 1497 struct gsi *gsi = channel->gsi; 1498 u32 val; 1499 1500 /* Note: index *must* be used modulo the ring count here */ 1501 val = gsi_ring_addr(tre_ring, tre_ring->index % tre_ring->count); 1502 iowrite32(val, gsi->virt + GSI_CH_C_DOORBELL_0_OFFSET(channel_id)); 1503 } 1504 1505 /* Consult hardware, move any newly completed transactions to completed list */ 1506 static struct gsi_trans *gsi_channel_update(struct gsi_channel *channel) 1507 { 1508 u32 evt_ring_id = channel->evt_ring_id; 1509 struct gsi *gsi = channel->gsi; 1510 struct gsi_evt_ring *evt_ring; 1511 struct gsi_trans *trans; 1512 struct gsi_ring *ring; 1513 u32 offset; 1514 u32 index; 1515 1516 evt_ring = &gsi->evt_ring[evt_ring_id]; 1517 ring = &evt_ring->ring; 1518 1519 /* See if there's anything new to process; if not, we're done. Note 1520 * that index always refers to an entry *within* the event ring. 1521 */ 1522 offset = GSI_EV_CH_E_CNTXT_4_OFFSET(evt_ring_id); 1523 index = gsi_ring_index(ring, ioread32(gsi->virt + offset)); 1524 if (index == ring->index % ring->count) 1525 return NULL; 1526 1527 /* Get the transaction for the latest completed event. Take a 1528 * reference to keep it from completing before we give the events 1529 * for this and previous transactions back to the hardware. 1530 */ 1531 trans = gsi_event_trans(channel, gsi_ring_virt(ring, index - 1)); 1532 refcount_inc(&trans->refcount); 1533 1534 /* For RX channels, update each completed transaction with the number 1535 * of bytes that were actually received. For TX channels, report 1536 * the number of transactions and bytes this completion represents 1537 * up the network stack. 1538 */ 1539 if (channel->toward_ipa) 1540 gsi_channel_tx_update(channel, trans); 1541 else 1542 gsi_evt_ring_rx_update(evt_ring, index); 1543 1544 gsi_trans_move_complete(trans); 1545 1546 /* Tell the hardware we've handled these events */ 1547 gsi_evt_ring_doorbell(channel->gsi, channel->evt_ring_id, index); 1548 1549 gsi_trans_free(trans); 1550 1551 return gsi_channel_trans_complete(channel); 1552 } 1553 1554 /** 1555 * gsi_channel_poll_one() - Return a single completed transaction on a channel 1556 * @channel: Channel to be polled 1557 * 1558 * Return: Transaction pointer, or null if none are available 1559 * 1560 * This function returns the first entry on a channel's completed transaction 1561 * list. If that list is empty, the hardware is consulted to determine 1562 * whether any new transactions have completed. If so, they're moved to the 1563 * completed list and the new first entry is returned. If there are no more 1564 * completed transactions, a null pointer is returned. 1565 */ 1566 static struct gsi_trans *gsi_channel_poll_one(struct gsi_channel *channel) 1567 { 1568 struct gsi_trans *trans; 1569 1570 /* Get the first transaction from the completed list */ 1571 trans = gsi_channel_trans_complete(channel); 1572 if (!trans) /* List is empty; see if there's more to do */ 1573 trans = gsi_channel_update(channel); 1574 1575 if (trans) 1576 gsi_trans_move_polled(trans); 1577 1578 return trans; 1579 } 1580 1581 /** 1582 * gsi_channel_poll() - NAPI poll function for a channel 1583 * @napi: NAPI structure for the channel 1584 * @budget: Budget supplied by NAPI core 1585 * 1586 * Return: Number of items polled (<= budget) 1587 * 1588 * Single transactions completed by hardware are polled until either 1589 * the budget is exhausted, or there are no more. Each transaction 1590 * polled is passed to gsi_trans_complete(), to perform remaining 1591 * completion processing and retire/free the transaction. 1592 */ 1593 static int gsi_channel_poll(struct napi_struct *napi, int budget) 1594 { 1595 struct gsi_channel *channel; 1596 int count; 1597 1598 channel = container_of(napi, struct gsi_channel, napi); 1599 for (count = 0; count < budget; count++) { 1600 struct gsi_trans *trans; 1601 1602 trans = gsi_channel_poll_one(channel); 1603 if (!trans) 1604 break; 1605 gsi_trans_complete(trans); 1606 } 1607 1608 if (count < budget && napi_complete(napi)) 1609 gsi_irq_ieob_enable_one(channel->gsi, channel->evt_ring_id); 1610 1611 return count; 1612 } 1613 1614 /* The event bitmap represents which event ids are available for allocation. 1615 * Set bits are not available, clear bits can be used. This function 1616 * initializes the map so all events supported by the hardware are available, 1617 * then precludes any reserved events from being allocated. 1618 */ 1619 static u32 gsi_event_bitmap_init(u32 evt_ring_max) 1620 { 1621 u32 event_bitmap = GENMASK(BITS_PER_LONG - 1, evt_ring_max); 1622 1623 event_bitmap |= GENMASK(GSI_MHI_EVENT_ID_END, GSI_MHI_EVENT_ID_START); 1624 1625 return event_bitmap; 1626 } 1627 1628 /* Setup function for event rings */ 1629 static void gsi_evt_ring_setup(struct gsi *gsi) 1630 { 1631 /* Nothing to do */ 1632 } 1633 1634 /* Inverse of gsi_evt_ring_setup() */ 1635 static void gsi_evt_ring_teardown(struct gsi *gsi) 1636 { 1637 /* Nothing to do */ 1638 } 1639 1640 /* Setup function for a single channel */ 1641 static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id) 1642 { 1643 struct gsi_channel *channel = &gsi->channel[channel_id]; 1644 u32 evt_ring_id = channel->evt_ring_id; 1645 int ret; 1646 1647 if (!gsi_channel_initialized(channel)) 1648 return 0; 1649 1650 ret = gsi_evt_ring_alloc_command(gsi, evt_ring_id); 1651 if (ret) 1652 return ret; 1653 1654 gsi_evt_ring_program(gsi, evt_ring_id); 1655 1656 ret = gsi_channel_alloc_command(gsi, channel_id); 1657 if (ret) 1658 goto err_evt_ring_de_alloc; 1659 1660 gsi_channel_program(channel, true); 1661 1662 if (channel->toward_ipa) 1663 netif_tx_napi_add(&gsi->dummy_dev, &channel->napi, 1664 gsi_channel_poll, NAPI_POLL_WEIGHT); 1665 else 1666 netif_napi_add(&gsi->dummy_dev, &channel->napi, 1667 gsi_channel_poll, NAPI_POLL_WEIGHT); 1668 1669 return 0; 1670 1671 err_evt_ring_de_alloc: 1672 /* We've done nothing with the event ring yet so don't reset */ 1673 gsi_evt_ring_de_alloc_command(gsi, evt_ring_id); 1674 1675 return ret; 1676 } 1677 1678 /* Inverse of gsi_channel_setup_one() */ 1679 static void gsi_channel_teardown_one(struct gsi *gsi, u32 channel_id) 1680 { 1681 struct gsi_channel *channel = &gsi->channel[channel_id]; 1682 u32 evt_ring_id = channel->evt_ring_id; 1683 1684 if (!gsi_channel_initialized(channel)) 1685 return; 1686 1687 netif_napi_del(&channel->napi); 1688 1689 gsi_channel_deprogram(channel); 1690 gsi_channel_de_alloc_command(gsi, channel_id); 1691 gsi_evt_ring_reset_command(gsi, evt_ring_id); 1692 gsi_evt_ring_de_alloc_command(gsi, evt_ring_id); 1693 } 1694 1695 static int gsi_generic_command(struct gsi *gsi, u32 channel_id, 1696 enum gsi_generic_cmd_opcode opcode) 1697 { 1698 struct completion *completion = &gsi->completion; 1699 bool timeout; 1700 u32 val; 1701 1702 /* The error global interrupt type is always enabled (until we 1703 * teardown), so we won't change that. A generic EE command 1704 * completes with a GSI global interrupt of type GP_INT1. We 1705 * only perform one generic command at a time (to allocate or 1706 * halt a modem channel) and only from this function. So we 1707 * enable the GP_INT1 IRQ type here while we're expecting it. 1708 */ 1709 val = BIT(ERROR_INT) | BIT(GP_INT1); 1710 iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET); 1711 1712 /* First zero the result code field */ 1713 val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET); 1714 val &= ~GENERIC_EE_RESULT_FMASK; 1715 iowrite32(val, gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET); 1716 1717 /* Now issue the command */ 1718 val = u32_encode_bits(opcode, GENERIC_OPCODE_FMASK); 1719 val |= u32_encode_bits(channel_id, GENERIC_CHID_FMASK); 1720 val |= u32_encode_bits(GSI_EE_MODEM, GENERIC_EE_FMASK); 1721 1722 timeout = !gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val, completion); 1723 1724 /* Disable the GP_INT1 IRQ type again */ 1725 iowrite32(BIT(ERROR_INT), gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET); 1726 1727 if (!timeout) 1728 return gsi->result; 1729 1730 dev_err(gsi->dev, "GSI generic command %u to channel %u timed out\n", 1731 opcode, channel_id); 1732 1733 return -ETIMEDOUT; 1734 } 1735 1736 static int gsi_modem_channel_alloc(struct gsi *gsi, u32 channel_id) 1737 { 1738 return gsi_generic_command(gsi, channel_id, 1739 GSI_GENERIC_ALLOCATE_CHANNEL); 1740 } 1741 1742 static void gsi_modem_channel_halt(struct gsi *gsi, u32 channel_id) 1743 { 1744 u32 retries = GSI_CHANNEL_MODEM_HALT_RETRIES; 1745 int ret; 1746 1747 do 1748 ret = gsi_generic_command(gsi, channel_id, 1749 GSI_GENERIC_HALT_CHANNEL); 1750 while (ret == -EAGAIN && retries--); 1751 1752 if (ret) 1753 dev_err(gsi->dev, "error %d halting modem channel %u\n", 1754 ret, channel_id); 1755 } 1756 1757 /* Setup function for channels */ 1758 static int gsi_channel_setup(struct gsi *gsi) 1759 { 1760 u32 channel_id = 0; 1761 u32 mask; 1762 int ret; 1763 1764 gsi_evt_ring_setup(gsi); 1765 gsi_irq_enable(gsi); 1766 1767 mutex_lock(&gsi->mutex); 1768 1769 do { 1770 ret = gsi_channel_setup_one(gsi, channel_id); 1771 if (ret) 1772 goto err_unwind; 1773 } while (++channel_id < gsi->channel_count); 1774 1775 /* Make sure no channels were defined that hardware does not support */ 1776 while (channel_id < GSI_CHANNEL_COUNT_MAX) { 1777 struct gsi_channel *channel = &gsi->channel[channel_id++]; 1778 1779 if (!gsi_channel_initialized(channel)) 1780 continue; 1781 1782 ret = -EINVAL; 1783 dev_err(gsi->dev, "channel %u not supported by hardware\n", 1784 channel_id - 1); 1785 channel_id = gsi->channel_count; 1786 goto err_unwind; 1787 } 1788 1789 /* Allocate modem channels if necessary */ 1790 mask = gsi->modem_channel_bitmap; 1791 while (mask) { 1792 u32 modem_channel_id = __ffs(mask); 1793 1794 ret = gsi_modem_channel_alloc(gsi, modem_channel_id); 1795 if (ret) 1796 goto err_unwind_modem; 1797 1798 /* Clear bit from mask only after success (for unwind) */ 1799 mask ^= BIT(modem_channel_id); 1800 } 1801 1802 mutex_unlock(&gsi->mutex); 1803 1804 return 0; 1805 1806 err_unwind_modem: 1807 /* Compute which modem channels need to be deallocated */ 1808 mask ^= gsi->modem_channel_bitmap; 1809 while (mask) { 1810 channel_id = __fls(mask); 1811 1812 mask ^= BIT(channel_id); 1813 1814 gsi_modem_channel_halt(gsi, channel_id); 1815 } 1816 1817 err_unwind: 1818 while (channel_id--) 1819 gsi_channel_teardown_one(gsi, channel_id); 1820 1821 mutex_unlock(&gsi->mutex); 1822 1823 gsi_irq_disable(gsi); 1824 gsi_evt_ring_teardown(gsi); 1825 1826 return ret; 1827 } 1828 1829 /* Inverse of gsi_channel_setup() */ 1830 static void gsi_channel_teardown(struct gsi *gsi) 1831 { 1832 u32 mask = gsi->modem_channel_bitmap; 1833 u32 channel_id; 1834 1835 mutex_lock(&gsi->mutex); 1836 1837 while (mask) { 1838 channel_id = __fls(mask); 1839 1840 mask ^= BIT(channel_id); 1841 1842 gsi_modem_channel_halt(gsi, channel_id); 1843 } 1844 1845 channel_id = gsi->channel_count - 1; 1846 do 1847 gsi_channel_teardown_one(gsi, channel_id); 1848 while (channel_id--); 1849 1850 mutex_unlock(&gsi->mutex); 1851 1852 gsi_irq_disable(gsi); 1853 gsi_evt_ring_teardown(gsi); 1854 } 1855 1856 /* Setup function for GSI. GSI firmware must be loaded and initialized */ 1857 int gsi_setup(struct gsi *gsi) 1858 { 1859 struct device *dev = gsi->dev; 1860 u32 val; 1861 int ret; 1862 1863 /* Here is where we first touch the GSI hardware */ 1864 val = ioread32(gsi->virt + GSI_GSI_STATUS_OFFSET); 1865 if (!(val & ENABLED_FMASK)) { 1866 dev_err(dev, "GSI has not been enabled\n"); 1867 return -EIO; 1868 } 1869 1870 gsi_irq_setup(gsi); 1871 1872 val = ioread32(gsi->virt + GSI_GSI_HW_PARAM_2_OFFSET); 1873 1874 gsi->channel_count = u32_get_bits(val, NUM_CH_PER_EE_FMASK); 1875 if (!gsi->channel_count) { 1876 dev_err(dev, "GSI reports zero channels supported\n"); 1877 return -EINVAL; 1878 } 1879 if (gsi->channel_count > GSI_CHANNEL_COUNT_MAX) { 1880 dev_warn(dev, 1881 "limiting to %u channels; hardware supports %u\n", 1882 GSI_CHANNEL_COUNT_MAX, gsi->channel_count); 1883 gsi->channel_count = GSI_CHANNEL_COUNT_MAX; 1884 } 1885 1886 gsi->evt_ring_count = u32_get_bits(val, NUM_EV_PER_EE_FMASK); 1887 if (!gsi->evt_ring_count) { 1888 dev_err(dev, "GSI reports zero event rings supported\n"); 1889 return -EINVAL; 1890 } 1891 if (gsi->evt_ring_count > GSI_EVT_RING_COUNT_MAX) { 1892 dev_warn(dev, 1893 "limiting to %u event rings; hardware supports %u\n", 1894 GSI_EVT_RING_COUNT_MAX, gsi->evt_ring_count); 1895 gsi->evt_ring_count = GSI_EVT_RING_COUNT_MAX; 1896 } 1897 1898 /* Initialize the error log */ 1899 iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET); 1900 1901 /* Writing 1 indicates IRQ interrupts; 0 would be MSI */ 1902 iowrite32(1, gsi->virt + GSI_CNTXT_INTSET_OFFSET); 1903 1904 ret = gsi_channel_setup(gsi); 1905 if (ret) 1906 gsi_irq_teardown(gsi); 1907 1908 return ret; 1909 } 1910 1911 /* Inverse of gsi_setup() */ 1912 void gsi_teardown(struct gsi *gsi) 1913 { 1914 gsi_channel_teardown(gsi); 1915 gsi_irq_teardown(gsi); 1916 } 1917 1918 /* Initialize a channel's event ring */ 1919 static int gsi_channel_evt_ring_init(struct gsi_channel *channel) 1920 { 1921 struct gsi *gsi = channel->gsi; 1922 struct gsi_evt_ring *evt_ring; 1923 int ret; 1924 1925 ret = gsi_evt_ring_id_alloc(gsi); 1926 if (ret < 0) 1927 return ret; 1928 channel->evt_ring_id = ret; 1929 1930 evt_ring = &gsi->evt_ring[channel->evt_ring_id]; 1931 evt_ring->channel = channel; 1932 1933 ret = gsi_ring_alloc(gsi, &evt_ring->ring, channel->event_count); 1934 if (!ret) 1935 return 0; /* Success! */ 1936 1937 dev_err(gsi->dev, "error %d allocating channel %u event ring\n", 1938 ret, gsi_channel_id(channel)); 1939 1940 gsi_evt_ring_id_free(gsi, channel->evt_ring_id); 1941 1942 return ret; 1943 } 1944 1945 /* Inverse of gsi_channel_evt_ring_init() */ 1946 static void gsi_channel_evt_ring_exit(struct gsi_channel *channel) 1947 { 1948 u32 evt_ring_id = channel->evt_ring_id; 1949 struct gsi *gsi = channel->gsi; 1950 struct gsi_evt_ring *evt_ring; 1951 1952 evt_ring = &gsi->evt_ring[evt_ring_id]; 1953 gsi_ring_free(gsi, &evt_ring->ring); 1954 gsi_evt_ring_id_free(gsi, evt_ring_id); 1955 } 1956 1957 /* Init function for event rings */ 1958 static void gsi_evt_ring_init(struct gsi *gsi) 1959 { 1960 u32 evt_ring_id = 0; 1961 1962 gsi->event_bitmap = gsi_event_bitmap_init(GSI_EVT_RING_COUNT_MAX); 1963 gsi->ieob_enabled_bitmap = 0; 1964 do 1965 init_completion(&gsi->evt_ring[evt_ring_id].completion); 1966 while (++evt_ring_id < GSI_EVT_RING_COUNT_MAX); 1967 } 1968 1969 /* Inverse of gsi_evt_ring_init() */ 1970 static void gsi_evt_ring_exit(struct gsi *gsi) 1971 { 1972 /* Nothing to do */ 1973 } 1974 1975 static bool gsi_channel_data_valid(struct gsi *gsi, 1976 const struct ipa_gsi_endpoint_data *data) 1977 { 1978 #ifdef IPA_VALIDATION 1979 u32 channel_id = data->channel_id; 1980 struct device *dev = gsi->dev; 1981 1982 /* Make sure channel ids are in the range driver supports */ 1983 if (channel_id >= GSI_CHANNEL_COUNT_MAX) { 1984 dev_err(dev, "bad channel id %u; must be less than %u\n", 1985 channel_id, GSI_CHANNEL_COUNT_MAX); 1986 return false; 1987 } 1988 1989 if (data->ee_id != GSI_EE_AP && data->ee_id != GSI_EE_MODEM) { 1990 dev_err(dev, "bad EE id %u; not AP or modem\n", data->ee_id); 1991 return false; 1992 } 1993 1994 if (!data->channel.tlv_count || 1995 data->channel.tlv_count > GSI_TLV_MAX) { 1996 dev_err(dev, "channel %u bad tlv_count %u; must be 1..%u\n", 1997 channel_id, data->channel.tlv_count, GSI_TLV_MAX); 1998 return false; 1999 } 2000 2001 /* We have to allow at least one maximally-sized transaction to 2002 * be outstanding (which would use tlv_count TREs). Given how 2003 * gsi_channel_tre_max() is computed, tre_count has to be almost 2004 * twice the TLV FIFO size to satisfy this requirement. 2005 */ 2006 if (data->channel.tre_count < 2 * data->channel.tlv_count - 1) { 2007 dev_err(dev, "channel %u TLV count %u exceeds TRE count %u\n", 2008 channel_id, data->channel.tlv_count, 2009 data->channel.tre_count); 2010 return false; 2011 } 2012 2013 if (!is_power_of_2(data->channel.tre_count)) { 2014 dev_err(dev, "channel %u bad tre_count %u; not power of 2\n", 2015 channel_id, data->channel.tre_count); 2016 return false; 2017 } 2018 2019 if (!is_power_of_2(data->channel.event_count)) { 2020 dev_err(dev, "channel %u bad event_count %u; not power of 2\n", 2021 channel_id, data->channel.event_count); 2022 return false; 2023 } 2024 #endif /* IPA_VALIDATION */ 2025 2026 return true; 2027 } 2028 2029 /* Init function for a single channel */ 2030 static int gsi_channel_init_one(struct gsi *gsi, 2031 const struct ipa_gsi_endpoint_data *data, 2032 bool command) 2033 { 2034 struct gsi_channel *channel; 2035 u32 tre_count; 2036 int ret; 2037 2038 if (!gsi_channel_data_valid(gsi, data)) 2039 return -EINVAL; 2040 2041 /* Worst case we need an event for every outstanding TRE */ 2042 if (data->channel.tre_count > data->channel.event_count) { 2043 tre_count = data->channel.event_count; 2044 dev_warn(gsi->dev, "channel %u limited to %u TREs\n", 2045 data->channel_id, tre_count); 2046 } else { 2047 tre_count = data->channel.tre_count; 2048 } 2049 2050 channel = &gsi->channel[data->channel_id]; 2051 memset(channel, 0, sizeof(*channel)); 2052 2053 channel->gsi = gsi; 2054 channel->toward_ipa = data->toward_ipa; 2055 channel->command = command; 2056 channel->tlv_count = data->channel.tlv_count; 2057 channel->tre_count = tre_count; 2058 channel->event_count = data->channel.event_count; 2059 init_completion(&channel->completion); 2060 2061 ret = gsi_channel_evt_ring_init(channel); 2062 if (ret) 2063 goto err_clear_gsi; 2064 2065 ret = gsi_ring_alloc(gsi, &channel->tre_ring, data->channel.tre_count); 2066 if (ret) { 2067 dev_err(gsi->dev, "error %d allocating channel %u ring\n", 2068 ret, data->channel_id); 2069 goto err_channel_evt_ring_exit; 2070 } 2071 2072 ret = gsi_channel_trans_init(gsi, data->channel_id); 2073 if (ret) 2074 goto err_ring_free; 2075 2076 if (command) { 2077 u32 tre_max = gsi_channel_tre_max(gsi, data->channel_id); 2078 2079 ret = ipa_cmd_pool_init(channel, tre_max); 2080 } 2081 if (!ret) 2082 return 0; /* Success! */ 2083 2084 gsi_channel_trans_exit(channel); 2085 err_ring_free: 2086 gsi_ring_free(gsi, &channel->tre_ring); 2087 err_channel_evt_ring_exit: 2088 gsi_channel_evt_ring_exit(channel); 2089 err_clear_gsi: 2090 channel->gsi = NULL; /* Mark it not (fully) initialized */ 2091 2092 return ret; 2093 } 2094 2095 /* Inverse of gsi_channel_init_one() */ 2096 static void gsi_channel_exit_one(struct gsi_channel *channel) 2097 { 2098 if (!gsi_channel_initialized(channel)) 2099 return; 2100 2101 if (channel->command) 2102 ipa_cmd_pool_exit(channel); 2103 gsi_channel_trans_exit(channel); 2104 gsi_ring_free(channel->gsi, &channel->tre_ring); 2105 gsi_channel_evt_ring_exit(channel); 2106 } 2107 2108 /* Init function for channels */ 2109 static int gsi_channel_init(struct gsi *gsi, u32 count, 2110 const struct ipa_gsi_endpoint_data *data) 2111 { 2112 bool modem_alloc; 2113 int ret = 0; 2114 u32 i; 2115 2116 /* IPA v4.2 requires the AP to allocate channels for the modem */ 2117 modem_alloc = gsi->version == IPA_VERSION_4_2; 2118 2119 gsi_evt_ring_init(gsi); 2120 2121 /* The endpoint data array is indexed by endpoint name */ 2122 for (i = 0; i < count; i++) { 2123 bool command = i == IPA_ENDPOINT_AP_COMMAND_TX; 2124 2125 if (ipa_gsi_endpoint_data_empty(&data[i])) 2126 continue; /* Skip over empty slots */ 2127 2128 /* Mark modem channels to be allocated (hardware workaround) */ 2129 if (data[i].ee_id == GSI_EE_MODEM) { 2130 if (modem_alloc) 2131 gsi->modem_channel_bitmap |= 2132 BIT(data[i].channel_id); 2133 continue; 2134 } 2135 2136 ret = gsi_channel_init_one(gsi, &data[i], command); 2137 if (ret) 2138 goto err_unwind; 2139 } 2140 2141 return ret; 2142 2143 err_unwind: 2144 while (i--) { 2145 if (ipa_gsi_endpoint_data_empty(&data[i])) 2146 continue; 2147 if (modem_alloc && data[i].ee_id == GSI_EE_MODEM) { 2148 gsi->modem_channel_bitmap &= ~BIT(data[i].channel_id); 2149 continue; 2150 } 2151 gsi_channel_exit_one(&gsi->channel[data->channel_id]); 2152 } 2153 gsi_evt_ring_exit(gsi); 2154 2155 return ret; 2156 } 2157 2158 /* Inverse of gsi_channel_init() */ 2159 static void gsi_channel_exit(struct gsi *gsi) 2160 { 2161 u32 channel_id = GSI_CHANNEL_COUNT_MAX - 1; 2162 2163 do 2164 gsi_channel_exit_one(&gsi->channel[channel_id]); 2165 while (channel_id--); 2166 gsi->modem_channel_bitmap = 0; 2167 2168 gsi_evt_ring_exit(gsi); 2169 } 2170 2171 /* Init function for GSI. GSI hardware does not need to be "ready" */ 2172 int gsi_init(struct gsi *gsi, struct platform_device *pdev, 2173 enum ipa_version version, u32 count, 2174 const struct ipa_gsi_endpoint_data *data) 2175 { 2176 struct device *dev = &pdev->dev; 2177 struct resource *res; 2178 resource_size_t size; 2179 u32 adjust; 2180 int ret; 2181 2182 gsi_validate_build(); 2183 2184 gsi->dev = dev; 2185 gsi->version = version; 2186 2187 /* GSI uses NAPI on all channels. Create a dummy network device 2188 * for the channel NAPI contexts to be associated with. 2189 */ 2190 init_dummy_netdev(&gsi->dummy_dev); 2191 2192 /* Get GSI memory range and map it */ 2193 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gsi"); 2194 if (!res) { 2195 dev_err(dev, "DT error getting \"gsi\" memory property\n"); 2196 return -ENODEV; 2197 } 2198 2199 size = resource_size(res); 2200 if (res->start > U32_MAX || size > U32_MAX - res->start) { 2201 dev_err(dev, "DT memory resource \"gsi\" out of range\n"); 2202 return -EINVAL; 2203 } 2204 2205 /* Make sure we can make our pointer adjustment if necessary */ 2206 adjust = gsi->version < IPA_VERSION_4_5 ? 0 : GSI_EE_REG_ADJUST; 2207 if (res->start < adjust) { 2208 dev_err(dev, "DT memory resource \"gsi\" too low (< %u)\n", 2209 adjust); 2210 return -EINVAL; 2211 } 2212 2213 gsi->virt_raw = ioremap(res->start, size); 2214 if (!gsi->virt_raw) { 2215 dev_err(dev, "unable to remap \"gsi\" memory\n"); 2216 return -ENOMEM; 2217 } 2218 /* Most registers are accessed using an adjusted register range */ 2219 gsi->virt = gsi->virt_raw - adjust; 2220 2221 init_completion(&gsi->completion); 2222 2223 ret = gsi_irq_init(gsi, pdev); 2224 if (ret) 2225 goto err_iounmap; 2226 2227 ret = gsi_channel_init(gsi, count, data); 2228 if (ret) 2229 goto err_irq_exit; 2230 2231 mutex_init(&gsi->mutex); 2232 2233 return 0; 2234 2235 err_irq_exit: 2236 gsi_irq_exit(gsi); 2237 err_iounmap: 2238 iounmap(gsi->virt_raw); 2239 2240 return ret; 2241 } 2242 2243 /* Inverse of gsi_init() */ 2244 void gsi_exit(struct gsi *gsi) 2245 { 2246 mutex_destroy(&gsi->mutex); 2247 gsi_channel_exit(gsi); 2248 gsi_irq_exit(gsi); 2249 iounmap(gsi->virt_raw); 2250 } 2251 2252 /* The maximum number of outstanding TREs on a channel. This limits 2253 * a channel's maximum number of transactions outstanding (worst case 2254 * is one TRE per transaction). 2255 * 2256 * The absolute limit is the number of TREs in the channel's TRE ring, 2257 * and in theory we should be able use all of them. But in practice, 2258 * doing that led to the hardware reporting exhaustion of event ring 2259 * slots for writing completion information. So the hardware limit 2260 * would be (tre_count - 1). 2261 * 2262 * We reduce it a bit further though. Transaction resource pools are 2263 * sized to be a little larger than this maximum, to allow resource 2264 * allocations to always be contiguous. The number of entries in a 2265 * TRE ring buffer is a power of 2, and the extra resources in a pool 2266 * tends to nearly double the memory allocated for it. Reducing the 2267 * maximum number of outstanding TREs allows the number of entries in 2268 * a pool to avoid crossing that power-of-2 boundary, and this can 2269 * substantially reduce pool memory requirements. The number we 2270 * reduce it by matches the number added in gsi_trans_pool_init(). 2271 */ 2272 u32 gsi_channel_tre_max(struct gsi *gsi, u32 channel_id) 2273 { 2274 struct gsi_channel *channel = &gsi->channel[channel_id]; 2275 2276 /* Hardware limit is channel->tre_count - 1 */ 2277 return channel->tre_count - (channel->tlv_count - 1); 2278 } 2279 2280 /* Returns the maximum number of TREs in a single transaction for a channel */ 2281 u32 gsi_channel_trans_tre_max(struct gsi *gsi, u32 channel_id) 2282 { 2283 struct gsi_channel *channel = &gsi->channel[channel_id]; 2284 2285 return channel->tlv_count; 2286 } 2287