1 // SPDX-License-Identifier: GPL-2.0 2 3 /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. 4 * Copyright (C) 2018-2021 Linaro Ltd. 5 */ 6 7 #include <linux/types.h> 8 #include <linux/bits.h> 9 #include <linux/bitfield.h> 10 #include <linux/mutex.h> 11 #include <linux/completion.h> 12 #include <linux/io.h> 13 #include <linux/bug.h> 14 #include <linux/interrupt.h> 15 #include <linux/platform_device.h> 16 #include <linux/netdevice.h> 17 18 #include "gsi.h" 19 #include "gsi_reg.h" 20 #include "gsi_private.h" 21 #include "gsi_trans.h" 22 #include "ipa_gsi.h" 23 #include "ipa_data.h" 24 #include "ipa_version.h" 25 26 /** 27 * DOC: The IPA Generic Software Interface 28 * 29 * The generic software interface (GSI) is an integral component of the IPA, 30 * providing a well-defined communication layer between the AP subsystem 31 * and the IPA core. The modem uses the GSI layer as well. 32 * 33 * -------- --------- 34 * | | | | 35 * | AP +<---. .----+ Modem | 36 * | +--. | | .->+ | 37 * | | | | | | | | 38 * -------- | | | | --------- 39 * v | v | 40 * --+-+---+-+-- 41 * | GSI | 42 * |-----------| 43 * | | 44 * | IPA | 45 * | | 46 * ------------- 47 * 48 * In the above diagram, the AP and Modem represent "execution environments" 49 * (EEs), which are independent operating environments that use the IPA for 50 * data transfer. 51 * 52 * Each EE uses a set of unidirectional GSI "channels," which allow transfer 53 * of data to or from the IPA. A channel is implemented as a ring buffer, 54 * with a DRAM-resident array of "transfer elements" (TREs) available to 55 * describe transfers to or from other EEs through the IPA. A transfer 56 * element can also contain an immediate command, requesting the IPA perform 57 * actions other than data transfer. 58 * 59 * Each TRE refers to a block of data--also located DRAM. After writing one 60 * or more TREs to a channel, the writer (either the IPA or an EE) writes a 61 * doorbell register to inform the receiving side how many elements have 62 * been written. 63 * 64 * Each channel has a GSI "event ring" associated with it. An event ring 65 * is implemented very much like a channel ring, but is always directed from 66 * the IPA to an EE. The IPA notifies an EE (such as the AP) about channel 67 * events by adding an entry to the event ring associated with the channel. 68 * The GSI then writes its doorbell for the event ring, causing the target 69 * EE to be interrupted. Each entry in an event ring contains a pointer 70 * to the channel TRE whose completion the event represents. 71 * 72 * Each TRE in a channel ring has a set of flags. One flag indicates whether 73 * the completion of the transfer operation generates an entry (and possibly 74 * an interrupt) in the channel's event ring. Other flags allow transfer 75 * elements to be chained together, forming a single logical transaction. 76 * TRE flags are used to control whether and when interrupts are generated 77 * to signal completion of channel transfers. 78 * 79 * Elements in channel and event rings are completed (or consumed) strictly 80 * in order. Completion of one entry implies the completion of all preceding 81 * entries. A single completion interrupt can therefore communicate the 82 * completion of many transfers. 83 * 84 * Note that all GSI registers are little-endian, which is the assumed 85 * endianness of I/O space accesses. The accessor functions perform byte 86 * swapping if needed (i.e., for a big endian CPU). 87 */ 88 89 /* Delay period for interrupt moderation (in 32KHz IPA internal timer ticks) */ 90 #define GSI_EVT_RING_INT_MODT (32 * 1) /* 1ms under 32KHz clock */ 91 92 #define GSI_CMD_TIMEOUT 50 /* milliseconds */ 93 94 #define GSI_CHANNEL_STOP_RETRIES 10 95 #define GSI_CHANNEL_MODEM_HALT_RETRIES 10 96 97 #define GSI_MHI_EVENT_ID_START 10 /* 1st reserved event id */ 98 #define GSI_MHI_EVENT_ID_END 16 /* Last reserved event id */ 99 100 #define GSI_ISR_MAX_ITER 50 /* Detect interrupt storms */ 101 102 /* An entry in an event ring */ 103 struct gsi_event { 104 __le64 xfer_ptr; 105 __le16 len; 106 u8 reserved1; 107 u8 code; 108 __le16 reserved2; 109 u8 type; 110 u8 chid; 111 }; 112 113 /** gsi_channel_scratch_gpi - GPI protocol scratch register 114 * @max_outstanding_tre: 115 * Defines the maximum number of TREs allowed in a single transaction 116 * on a channel (in bytes). This determines the amount of prefetch 117 * performed by the hardware. We configure this to equal the size of 118 * the TLV FIFO for the channel. 119 * @outstanding_threshold: 120 * Defines the threshold (in bytes) determining when the sequencer 121 * should update the channel doorbell. We configure this to equal 122 * the size of two TREs. 123 */ 124 struct gsi_channel_scratch_gpi { 125 u64 reserved1; 126 u16 reserved2; 127 u16 max_outstanding_tre; 128 u16 reserved3; 129 u16 outstanding_threshold; 130 }; 131 132 /** gsi_channel_scratch - channel scratch configuration area 133 * 134 * The exact interpretation of this register is protocol-specific. 135 * We only use GPI channels; see struct gsi_channel_scratch_gpi, above. 136 */ 137 union gsi_channel_scratch { 138 struct gsi_channel_scratch_gpi gpi; 139 struct { 140 u32 word1; 141 u32 word2; 142 u32 word3; 143 u32 word4; 144 } data; 145 }; 146 147 /* Check things that can be validated at build time. */ 148 static void gsi_validate_build(void) 149 { 150 /* This is used as a divisor */ 151 BUILD_BUG_ON(!GSI_RING_ELEMENT_SIZE); 152 153 /* Code assumes the size of channel and event ring element are 154 * the same (and fixed). Make sure the size of an event ring 155 * element is what's expected. 156 */ 157 BUILD_BUG_ON(sizeof(struct gsi_event) != GSI_RING_ELEMENT_SIZE); 158 159 /* Hardware requires a 2^n ring size. We ensure the number of 160 * elements in an event ring is a power of 2 elsewhere; this 161 * ensure the elements themselves meet the requirement. 162 */ 163 BUILD_BUG_ON(!is_power_of_2(GSI_RING_ELEMENT_SIZE)); 164 165 /* The channel element size must fit in this field */ 166 BUILD_BUG_ON(GSI_RING_ELEMENT_SIZE > field_max(ELEMENT_SIZE_FMASK)); 167 168 /* The event ring element size must fit in this field */ 169 BUILD_BUG_ON(GSI_RING_ELEMENT_SIZE > field_max(EV_ELEMENT_SIZE_FMASK)); 170 } 171 172 /* Return the channel id associated with a given channel */ 173 static u32 gsi_channel_id(struct gsi_channel *channel) 174 { 175 return channel - &channel->gsi->channel[0]; 176 } 177 178 /* An initialized channel has a non-null GSI pointer */ 179 static bool gsi_channel_initialized(struct gsi_channel *channel) 180 { 181 return !!channel->gsi; 182 } 183 184 /* Update the GSI IRQ type register with the cached value */ 185 static void gsi_irq_type_update(struct gsi *gsi, u32 val) 186 { 187 gsi->type_enabled_bitmap = val; 188 iowrite32(val, gsi->virt + GSI_CNTXT_TYPE_IRQ_MSK_OFFSET); 189 } 190 191 static void gsi_irq_type_enable(struct gsi *gsi, enum gsi_irq_type_id type_id) 192 { 193 gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(type_id)); 194 } 195 196 static void gsi_irq_type_disable(struct gsi *gsi, enum gsi_irq_type_id type_id) 197 { 198 gsi_irq_type_update(gsi, gsi->type_enabled_bitmap & ~BIT(type_id)); 199 } 200 201 /* Turn off all GSI interrupts initially; there is no gsi_irq_teardown() */ 202 static void gsi_irq_setup(struct gsi *gsi) 203 { 204 /* Disable all interrupt types */ 205 gsi_irq_type_update(gsi, 0); 206 207 /* Clear all type-specific interrupt masks */ 208 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET); 209 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET); 210 iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET); 211 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET); 212 213 /* The inter-EE registers are in the non-adjusted address range */ 214 iowrite32(0, gsi->virt_raw + GSI_INTER_EE_SRC_CH_IRQ_MSK_OFFSET); 215 iowrite32(0, gsi->virt_raw + GSI_INTER_EE_SRC_EV_CH_IRQ_MSK_OFFSET); 216 217 iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET); 218 } 219 220 /* Event ring commands are performed one at a time. Their completion 221 * is signaled by the event ring control GSI interrupt type, which is 222 * only enabled when we issue an event ring command. Only the event 223 * ring being operated on has this interrupt enabled. 224 */ 225 static void gsi_irq_ev_ctrl_enable(struct gsi *gsi, u32 evt_ring_id) 226 { 227 u32 val = BIT(evt_ring_id); 228 229 /* There's a small chance that a previous command completed 230 * after the interrupt was disabled, so make sure we have no 231 * pending interrupts before we enable them. 232 */ 233 iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET); 234 235 iowrite32(val, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET); 236 gsi_irq_type_enable(gsi, GSI_EV_CTRL); 237 } 238 239 /* Disable event ring control interrupts */ 240 static void gsi_irq_ev_ctrl_disable(struct gsi *gsi) 241 { 242 gsi_irq_type_disable(gsi, GSI_EV_CTRL); 243 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET); 244 } 245 246 /* Channel commands are performed one at a time. Their completion is 247 * signaled by the channel control GSI interrupt type, which is only 248 * enabled when we issue a channel command. Only the channel being 249 * operated on has this interrupt enabled. 250 */ 251 static void gsi_irq_ch_ctrl_enable(struct gsi *gsi, u32 channel_id) 252 { 253 u32 val = BIT(channel_id); 254 255 /* There's a small chance that a previous command completed 256 * after the interrupt was disabled, so make sure we have no 257 * pending interrupts before we enable them. 258 */ 259 iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET); 260 261 iowrite32(val, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET); 262 gsi_irq_type_enable(gsi, GSI_CH_CTRL); 263 } 264 265 /* Disable channel control interrupts */ 266 static void gsi_irq_ch_ctrl_disable(struct gsi *gsi) 267 { 268 gsi_irq_type_disable(gsi, GSI_CH_CTRL); 269 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET); 270 } 271 272 static void gsi_irq_ieob_enable_one(struct gsi *gsi, u32 evt_ring_id) 273 { 274 bool enable_ieob = !gsi->ieob_enabled_bitmap; 275 u32 val; 276 277 gsi->ieob_enabled_bitmap |= BIT(evt_ring_id); 278 val = gsi->ieob_enabled_bitmap; 279 iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET); 280 281 /* Enable the interrupt type if this is the first channel enabled */ 282 if (enable_ieob) 283 gsi_irq_type_enable(gsi, GSI_IEOB); 284 } 285 286 static void gsi_irq_ieob_disable(struct gsi *gsi, u32 event_mask) 287 { 288 u32 val; 289 290 gsi->ieob_enabled_bitmap &= ~event_mask; 291 292 /* Disable the interrupt type if this was the last enabled channel */ 293 if (!gsi->ieob_enabled_bitmap) 294 gsi_irq_type_disable(gsi, GSI_IEOB); 295 296 val = gsi->ieob_enabled_bitmap; 297 iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET); 298 } 299 300 static void gsi_irq_ieob_disable_one(struct gsi *gsi, u32 evt_ring_id) 301 { 302 gsi_irq_ieob_disable(gsi, BIT(evt_ring_id)); 303 } 304 305 /* Enable all GSI_interrupt types */ 306 static void gsi_irq_enable(struct gsi *gsi) 307 { 308 u32 val; 309 310 /* Global interrupts include hardware error reports. Enable 311 * that so we can at least report the error should it occur. 312 */ 313 iowrite32(BIT(ERROR_INT), gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET); 314 gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(GSI_GLOB_EE)); 315 316 /* General GSI interrupts are reported to all EEs; if they occur 317 * they are unrecoverable (without reset). A breakpoint interrupt 318 * also exists, but we don't support that. We want to be notified 319 * of errors so we can report them, even if they can't be handled. 320 */ 321 val = BIT(BUS_ERROR); 322 val |= BIT(CMD_FIFO_OVRFLOW); 323 val |= BIT(MCS_STACK_OVRFLOW); 324 iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET); 325 gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(GSI_GENERAL)); 326 } 327 328 /* Disable all GSI interrupt types */ 329 static void gsi_irq_disable(struct gsi *gsi) 330 { 331 gsi_irq_type_update(gsi, 0); 332 333 /* Clear the type-specific interrupt masks set by gsi_irq_enable() */ 334 iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET); 335 iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET); 336 } 337 338 /* Return the virtual address associated with a ring index */ 339 void *gsi_ring_virt(struct gsi_ring *ring, u32 index) 340 { 341 /* Note: index *must* be used modulo the ring count here */ 342 return ring->virt + (index % ring->count) * GSI_RING_ELEMENT_SIZE; 343 } 344 345 /* Return the 32-bit DMA address associated with a ring index */ 346 static u32 gsi_ring_addr(struct gsi_ring *ring, u32 index) 347 { 348 return lower_32_bits(ring->addr) + index * GSI_RING_ELEMENT_SIZE; 349 } 350 351 /* Return the ring index of a 32-bit ring offset */ 352 static u32 gsi_ring_index(struct gsi_ring *ring, u32 offset) 353 { 354 return (offset - gsi_ring_addr(ring, 0)) / GSI_RING_ELEMENT_SIZE; 355 } 356 357 /* Issue a GSI command by writing a value to a register, then wait for 358 * completion to be signaled. Returns true if the command completes 359 * or false if it times out. 360 */ 361 static bool 362 gsi_command(struct gsi *gsi, u32 reg, u32 val, struct completion *completion) 363 { 364 unsigned long timeout = msecs_to_jiffies(GSI_CMD_TIMEOUT); 365 366 reinit_completion(completion); 367 368 iowrite32(val, gsi->virt + reg); 369 370 return !!wait_for_completion_timeout(completion, timeout); 371 } 372 373 /* Return the hardware's notion of the current state of an event ring */ 374 static enum gsi_evt_ring_state 375 gsi_evt_ring_state(struct gsi *gsi, u32 evt_ring_id) 376 { 377 u32 val; 378 379 val = ioread32(gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id)); 380 381 return u32_get_bits(val, EV_CHSTATE_FMASK); 382 } 383 384 /* Issue an event ring command and wait for it to complete */ 385 static void gsi_evt_ring_command(struct gsi *gsi, u32 evt_ring_id, 386 enum gsi_evt_cmd_opcode opcode) 387 { 388 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id]; 389 struct completion *completion = &evt_ring->completion; 390 struct device *dev = gsi->dev; 391 bool timeout; 392 u32 val; 393 394 /* Enable the completion interrupt for the command */ 395 gsi_irq_ev_ctrl_enable(gsi, evt_ring_id); 396 397 val = u32_encode_bits(evt_ring_id, EV_CHID_FMASK); 398 val |= u32_encode_bits(opcode, EV_OPCODE_FMASK); 399 400 timeout = !gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val, completion); 401 402 gsi_irq_ev_ctrl_disable(gsi); 403 404 if (!timeout) 405 return; 406 407 dev_err(dev, "GSI command %u for event ring %u timed out, state %u\n", 408 opcode, evt_ring_id, gsi_evt_ring_state(gsi, evt_ring_id)); 409 } 410 411 /* Allocate an event ring in NOT_ALLOCATED state */ 412 static int gsi_evt_ring_alloc_command(struct gsi *gsi, u32 evt_ring_id) 413 { 414 enum gsi_evt_ring_state state; 415 416 /* Get initial event ring state */ 417 state = gsi_evt_ring_state(gsi, evt_ring_id); 418 if (state != GSI_EVT_RING_STATE_NOT_ALLOCATED) { 419 dev_err(gsi->dev, "event ring %u bad state %u before alloc\n", 420 evt_ring_id, state); 421 return -EINVAL; 422 } 423 424 gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_ALLOCATE); 425 426 /* If successful the event ring state will have changed */ 427 state = gsi_evt_ring_state(gsi, evt_ring_id); 428 if (state == GSI_EVT_RING_STATE_ALLOCATED) 429 return 0; 430 431 dev_err(gsi->dev, "event ring %u bad state %u after alloc\n", 432 evt_ring_id, state); 433 434 return -EIO; 435 } 436 437 /* Reset a GSI event ring in ALLOCATED or ERROR state. */ 438 static void gsi_evt_ring_reset_command(struct gsi *gsi, u32 evt_ring_id) 439 { 440 enum gsi_evt_ring_state state; 441 442 state = gsi_evt_ring_state(gsi, evt_ring_id); 443 if (state != GSI_EVT_RING_STATE_ALLOCATED && 444 state != GSI_EVT_RING_STATE_ERROR) { 445 dev_err(gsi->dev, "event ring %u bad state %u before reset\n", 446 evt_ring_id, state); 447 return; 448 } 449 450 gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_RESET); 451 452 /* If successful the event ring state will have changed */ 453 state = gsi_evt_ring_state(gsi, evt_ring_id); 454 if (state == GSI_EVT_RING_STATE_ALLOCATED) 455 return; 456 457 dev_err(gsi->dev, "event ring %u bad state %u after reset\n", 458 evt_ring_id, state); 459 } 460 461 /* Issue a hardware de-allocation request for an allocated event ring */ 462 static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id) 463 { 464 enum gsi_evt_ring_state state; 465 466 state = gsi_evt_ring_state(gsi, evt_ring_id); 467 if (state != GSI_EVT_RING_STATE_ALLOCATED) { 468 dev_err(gsi->dev, "event ring %u state %u before dealloc\n", 469 evt_ring_id, state); 470 return; 471 } 472 473 gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_DE_ALLOC); 474 475 /* If successful the event ring state will have changed */ 476 state = gsi_evt_ring_state(gsi, evt_ring_id); 477 if (state == GSI_EVT_RING_STATE_NOT_ALLOCATED) 478 return; 479 480 dev_err(gsi->dev, "event ring %u bad state %u after dealloc\n", 481 evt_ring_id, state); 482 } 483 484 /* Fetch the current state of a channel from hardware */ 485 static enum gsi_channel_state gsi_channel_state(struct gsi_channel *channel) 486 { 487 u32 channel_id = gsi_channel_id(channel); 488 void __iomem *virt = channel->gsi->virt; 489 u32 val; 490 491 val = ioread32(virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id)); 492 493 return u32_get_bits(val, CHSTATE_FMASK); 494 } 495 496 /* Issue a channel command and wait for it to complete */ 497 static void 498 gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode) 499 { 500 struct completion *completion = &channel->completion; 501 u32 channel_id = gsi_channel_id(channel); 502 struct gsi *gsi = channel->gsi; 503 struct device *dev = gsi->dev; 504 bool timeout; 505 u32 val; 506 507 /* Enable the completion interrupt for the command */ 508 gsi_irq_ch_ctrl_enable(gsi, channel_id); 509 510 val = u32_encode_bits(channel_id, CH_CHID_FMASK); 511 val |= u32_encode_bits(opcode, CH_OPCODE_FMASK); 512 timeout = !gsi_command(gsi, GSI_CH_CMD_OFFSET, val, completion); 513 514 gsi_irq_ch_ctrl_disable(gsi); 515 516 if (!timeout) 517 return; 518 519 dev_err(dev, "GSI command %u for channel %u timed out, state %u\n", 520 opcode, channel_id, gsi_channel_state(channel)); 521 } 522 523 /* Allocate GSI channel in NOT_ALLOCATED state */ 524 static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id) 525 { 526 struct gsi_channel *channel = &gsi->channel[channel_id]; 527 struct device *dev = gsi->dev; 528 enum gsi_channel_state state; 529 530 /* Get initial channel state */ 531 state = gsi_channel_state(channel); 532 if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED) { 533 dev_err(dev, "channel %u bad state %u before alloc\n", 534 channel_id, state); 535 return -EINVAL; 536 } 537 538 gsi_channel_command(channel, GSI_CH_ALLOCATE); 539 540 /* If successful the channel state will have changed */ 541 state = gsi_channel_state(channel); 542 if (state == GSI_CHANNEL_STATE_ALLOCATED) 543 return 0; 544 545 dev_err(dev, "channel %u bad state %u after alloc\n", 546 channel_id, state); 547 548 return -EIO; 549 } 550 551 /* Start an ALLOCATED channel */ 552 static int gsi_channel_start_command(struct gsi_channel *channel) 553 { 554 struct device *dev = channel->gsi->dev; 555 enum gsi_channel_state state; 556 557 state = gsi_channel_state(channel); 558 if (state != GSI_CHANNEL_STATE_ALLOCATED && 559 state != GSI_CHANNEL_STATE_STOPPED) { 560 dev_err(dev, "channel %u bad state %u before start\n", 561 gsi_channel_id(channel), state); 562 return -EINVAL; 563 } 564 565 gsi_channel_command(channel, GSI_CH_START); 566 567 /* If successful the channel state will have changed */ 568 state = gsi_channel_state(channel); 569 if (state == GSI_CHANNEL_STATE_STARTED) 570 return 0; 571 572 dev_err(dev, "channel %u bad state %u after start\n", 573 gsi_channel_id(channel), state); 574 575 return -EIO; 576 } 577 578 /* Stop a GSI channel in STARTED state */ 579 static int gsi_channel_stop_command(struct gsi_channel *channel) 580 { 581 struct device *dev = channel->gsi->dev; 582 enum gsi_channel_state state; 583 584 state = gsi_channel_state(channel); 585 586 /* Channel could have entered STOPPED state since last call 587 * if it timed out. If so, we're done. 588 */ 589 if (state == GSI_CHANNEL_STATE_STOPPED) 590 return 0; 591 592 if (state != GSI_CHANNEL_STATE_STARTED && 593 state != GSI_CHANNEL_STATE_STOP_IN_PROC) { 594 dev_err(dev, "channel %u bad state %u before stop\n", 595 gsi_channel_id(channel), state); 596 return -EINVAL; 597 } 598 599 gsi_channel_command(channel, GSI_CH_STOP); 600 601 /* If successful the channel state will have changed */ 602 state = gsi_channel_state(channel); 603 if (state == GSI_CHANNEL_STATE_STOPPED) 604 return 0; 605 606 /* We may have to try again if stop is in progress */ 607 if (state == GSI_CHANNEL_STATE_STOP_IN_PROC) 608 return -EAGAIN; 609 610 dev_err(dev, "channel %u bad state %u after stop\n", 611 gsi_channel_id(channel), state); 612 613 return -EIO; 614 } 615 616 /* Reset a GSI channel in ALLOCATED or ERROR state. */ 617 static void gsi_channel_reset_command(struct gsi_channel *channel) 618 { 619 struct device *dev = channel->gsi->dev; 620 enum gsi_channel_state state; 621 622 /* A short delay is required before a RESET command */ 623 usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC); 624 625 state = gsi_channel_state(channel); 626 if (state != GSI_CHANNEL_STATE_STOPPED && 627 state != GSI_CHANNEL_STATE_ERROR) { 628 /* No need to reset a channel already in ALLOCATED state */ 629 if (state != GSI_CHANNEL_STATE_ALLOCATED) 630 dev_err(dev, "channel %u bad state %u before reset\n", 631 gsi_channel_id(channel), state); 632 return; 633 } 634 635 gsi_channel_command(channel, GSI_CH_RESET); 636 637 /* If successful the channel state will have changed */ 638 state = gsi_channel_state(channel); 639 if (state != GSI_CHANNEL_STATE_ALLOCATED) 640 dev_err(dev, "channel %u bad state %u after reset\n", 641 gsi_channel_id(channel), state); 642 } 643 644 /* Deallocate an ALLOCATED GSI channel */ 645 static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id) 646 { 647 struct gsi_channel *channel = &gsi->channel[channel_id]; 648 struct device *dev = gsi->dev; 649 enum gsi_channel_state state; 650 651 state = gsi_channel_state(channel); 652 if (state != GSI_CHANNEL_STATE_ALLOCATED) { 653 dev_err(dev, "channel %u bad state %u before dealloc\n", 654 channel_id, state); 655 return; 656 } 657 658 gsi_channel_command(channel, GSI_CH_DE_ALLOC); 659 660 /* If successful the channel state will have changed */ 661 state = gsi_channel_state(channel); 662 663 if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED) 664 dev_err(dev, "channel %u bad state %u after dealloc\n", 665 channel_id, state); 666 } 667 668 /* Ring an event ring doorbell, reporting the last entry processed by the AP. 669 * The index argument (modulo the ring count) is the first unfilled entry, so 670 * we supply one less than that with the doorbell. Update the event ring 671 * index field with the value provided. 672 */ 673 static void gsi_evt_ring_doorbell(struct gsi *gsi, u32 evt_ring_id, u32 index) 674 { 675 struct gsi_ring *ring = &gsi->evt_ring[evt_ring_id].ring; 676 u32 val; 677 678 ring->index = index; /* Next unused entry */ 679 680 /* Note: index *must* be used modulo the ring count here */ 681 val = gsi_ring_addr(ring, (index - 1) % ring->count); 682 iowrite32(val, gsi->virt + GSI_EV_CH_E_DOORBELL_0_OFFSET(evt_ring_id)); 683 } 684 685 /* Program an event ring for use */ 686 static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id) 687 { 688 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id]; 689 size_t size = evt_ring->ring.count * GSI_RING_ELEMENT_SIZE; 690 u32 val; 691 692 /* We program all event rings as GPI type/protocol */ 693 val = u32_encode_bits(GSI_CHANNEL_TYPE_GPI, EV_CHTYPE_FMASK); 694 val |= EV_INTYPE_FMASK; 695 val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, EV_ELEMENT_SIZE_FMASK); 696 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id)); 697 698 val = ev_r_length_encoded(gsi->version, size); 699 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_1_OFFSET(evt_ring_id)); 700 701 /* The context 2 and 3 registers store the low-order and 702 * high-order 32 bits of the address of the event ring, 703 * respectively. 704 */ 705 val = lower_32_bits(evt_ring->ring.addr); 706 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_2_OFFSET(evt_ring_id)); 707 val = upper_32_bits(evt_ring->ring.addr); 708 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_3_OFFSET(evt_ring_id)); 709 710 /* Enable interrupt moderation by setting the moderation delay */ 711 val = u32_encode_bits(GSI_EVT_RING_INT_MODT, MODT_FMASK); 712 val |= u32_encode_bits(1, MODC_FMASK); /* comes from channel */ 713 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_8_OFFSET(evt_ring_id)); 714 715 /* No MSI write data, and MSI address high and low address is 0 */ 716 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_9_OFFSET(evt_ring_id)); 717 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_10_OFFSET(evt_ring_id)); 718 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_11_OFFSET(evt_ring_id)); 719 720 /* We don't need to get event read pointer updates */ 721 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_12_OFFSET(evt_ring_id)); 722 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_13_OFFSET(evt_ring_id)); 723 724 /* Finally, tell the hardware we've completed event 0 (arbitrary) */ 725 gsi_evt_ring_doorbell(gsi, evt_ring_id, 0); 726 } 727 728 /* Find the transaction whose completion indicates a channel is quiesced */ 729 static struct gsi_trans *gsi_channel_trans_last(struct gsi_channel *channel) 730 { 731 struct gsi_trans_info *trans_info = &channel->trans_info; 732 const struct list_head *list; 733 struct gsi_trans *trans; 734 735 spin_lock_bh(&trans_info->spinlock); 736 737 /* There is a small chance a TX transaction got allocated just 738 * before we disabled transmits, so check for that. 739 */ 740 if (channel->toward_ipa) { 741 list = &trans_info->alloc; 742 if (!list_empty(list)) 743 goto done; 744 list = &trans_info->pending; 745 if (!list_empty(list)) 746 goto done; 747 } 748 749 /* Otherwise (TX or RX) we want to wait for anything that 750 * has completed, or has been polled but not released yet. 751 */ 752 list = &trans_info->complete; 753 if (!list_empty(list)) 754 goto done; 755 list = &trans_info->polled; 756 if (list_empty(list)) 757 list = NULL; 758 done: 759 trans = list ? list_last_entry(list, struct gsi_trans, links) : NULL; 760 761 /* Caller will wait for this, so take a reference */ 762 if (trans) 763 refcount_inc(&trans->refcount); 764 765 spin_unlock_bh(&trans_info->spinlock); 766 767 return trans; 768 } 769 770 /* Wait for transaction activity on a channel to complete */ 771 static void gsi_channel_trans_quiesce(struct gsi_channel *channel) 772 { 773 struct gsi_trans *trans; 774 775 /* Get the last transaction, and wait for it to complete */ 776 trans = gsi_channel_trans_last(channel); 777 if (trans) { 778 wait_for_completion(&trans->completion); 779 gsi_trans_free(trans); 780 } 781 } 782 783 /* Program a channel for use; there is no gsi_channel_deprogram() */ 784 static void gsi_channel_program(struct gsi_channel *channel, bool doorbell) 785 { 786 size_t size = channel->tre_ring.count * GSI_RING_ELEMENT_SIZE; 787 u32 channel_id = gsi_channel_id(channel); 788 union gsi_channel_scratch scr = { }; 789 struct gsi_channel_scratch_gpi *gpi; 790 struct gsi *gsi = channel->gsi; 791 u32 wrr_weight = 0; 792 u32 val; 793 794 /* Arbitrarily pick TRE 0 as the first channel element to use */ 795 channel->tre_ring.index = 0; 796 797 /* We program all channels as GPI type/protocol */ 798 val = chtype_protocol_encoded(gsi->version, GSI_CHANNEL_TYPE_GPI); 799 if (channel->toward_ipa) 800 val |= CHTYPE_DIR_FMASK; 801 val |= u32_encode_bits(channel->evt_ring_id, ERINDEX_FMASK); 802 val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, ELEMENT_SIZE_FMASK); 803 iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id)); 804 805 val = r_length_encoded(gsi->version, size); 806 iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_1_OFFSET(channel_id)); 807 808 /* The context 2 and 3 registers store the low-order and 809 * high-order 32 bits of the address of the channel ring, 810 * respectively. 811 */ 812 val = lower_32_bits(channel->tre_ring.addr); 813 iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_2_OFFSET(channel_id)); 814 val = upper_32_bits(channel->tre_ring.addr); 815 iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_3_OFFSET(channel_id)); 816 817 /* Command channel gets low weighted round-robin priority */ 818 if (channel->command) 819 wrr_weight = field_max(WRR_WEIGHT_FMASK); 820 val = u32_encode_bits(wrr_weight, WRR_WEIGHT_FMASK); 821 822 /* Max prefetch is 1 segment (do not set MAX_PREFETCH_FMASK) */ 823 824 /* No need to use the doorbell engine starting at IPA v4.0 */ 825 if (gsi->version < IPA_VERSION_4_0 && doorbell) 826 val |= USE_DB_ENG_FMASK; 827 828 /* v4.0 introduces an escape buffer for prefetch. We use it 829 * on all but the AP command channel. 830 */ 831 if (gsi->version >= IPA_VERSION_4_0 && !channel->command) { 832 /* If not otherwise set, prefetch buffers are used */ 833 if (gsi->version < IPA_VERSION_4_5) 834 val |= USE_ESCAPE_BUF_ONLY_FMASK; 835 else 836 val |= u32_encode_bits(GSI_ESCAPE_BUF_ONLY, 837 PREFETCH_MODE_FMASK); 838 } 839 /* All channels set DB_IN_BYTES */ 840 if (gsi->version >= IPA_VERSION_4_9) 841 val |= DB_IN_BYTES; 842 843 iowrite32(val, gsi->virt + GSI_CH_C_QOS_OFFSET(channel_id)); 844 845 /* Now update the scratch registers for GPI protocol */ 846 gpi = &scr.gpi; 847 gpi->max_outstanding_tre = gsi_channel_trans_tre_max(gsi, channel_id) * 848 GSI_RING_ELEMENT_SIZE; 849 gpi->outstanding_threshold = 2 * GSI_RING_ELEMENT_SIZE; 850 851 val = scr.data.word1; 852 iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_0_OFFSET(channel_id)); 853 854 val = scr.data.word2; 855 iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_1_OFFSET(channel_id)); 856 857 val = scr.data.word3; 858 iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_2_OFFSET(channel_id)); 859 860 /* We must preserve the upper 16 bits of the last scratch register. 861 * The next sequence assumes those bits remain unchanged between the 862 * read and the write. 863 */ 864 val = ioread32(gsi->virt + GSI_CH_C_SCRATCH_3_OFFSET(channel_id)); 865 val = (scr.data.word4 & GENMASK(31, 16)) | (val & GENMASK(15, 0)); 866 iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_3_OFFSET(channel_id)); 867 868 /* All done! */ 869 } 870 871 static int __gsi_channel_start(struct gsi_channel *channel, bool start) 872 { 873 struct gsi *gsi = channel->gsi; 874 int ret; 875 876 if (!start) 877 return 0; 878 879 mutex_lock(&gsi->mutex); 880 881 ret = gsi_channel_start_command(channel); 882 883 mutex_unlock(&gsi->mutex); 884 885 return ret; 886 } 887 888 /* Start an allocated GSI channel */ 889 int gsi_channel_start(struct gsi *gsi, u32 channel_id) 890 { 891 struct gsi_channel *channel = &gsi->channel[channel_id]; 892 int ret; 893 894 /* Enable NAPI and the completion interrupt */ 895 napi_enable(&channel->napi); 896 gsi_irq_ieob_enable_one(gsi, channel->evt_ring_id); 897 898 ret = __gsi_channel_start(channel, true); 899 if (ret) { 900 gsi_irq_ieob_disable_one(gsi, channel->evt_ring_id); 901 napi_disable(&channel->napi); 902 } 903 904 return ret; 905 } 906 907 static int gsi_channel_stop_retry(struct gsi_channel *channel) 908 { 909 u32 retries = GSI_CHANNEL_STOP_RETRIES; 910 int ret; 911 912 do { 913 ret = gsi_channel_stop_command(channel); 914 if (ret != -EAGAIN) 915 break; 916 usleep_range(3 * USEC_PER_MSEC, 5 * USEC_PER_MSEC); 917 } while (retries--); 918 919 return ret; 920 } 921 922 static int __gsi_channel_stop(struct gsi_channel *channel, bool stop) 923 { 924 struct gsi *gsi = channel->gsi; 925 int ret; 926 927 /* Wait for any underway transactions to complete before stopping. */ 928 gsi_channel_trans_quiesce(channel); 929 930 if (!stop) 931 return 0; 932 933 mutex_lock(&gsi->mutex); 934 935 ret = gsi_channel_stop_retry(channel); 936 937 mutex_unlock(&gsi->mutex); 938 939 return ret; 940 } 941 942 /* Stop a started channel */ 943 int gsi_channel_stop(struct gsi *gsi, u32 channel_id) 944 { 945 struct gsi_channel *channel = &gsi->channel[channel_id]; 946 int ret; 947 948 ret = __gsi_channel_stop(channel, true); 949 if (ret) 950 return ret; 951 952 /* Disable the completion interrupt and NAPI if successful */ 953 gsi_irq_ieob_disable_one(gsi, channel->evt_ring_id); 954 napi_disable(&channel->napi); 955 956 return 0; 957 } 958 959 /* Reset and reconfigure a channel, (possibly) enabling the doorbell engine */ 960 void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool doorbell) 961 { 962 struct gsi_channel *channel = &gsi->channel[channel_id]; 963 964 mutex_lock(&gsi->mutex); 965 966 gsi_channel_reset_command(channel); 967 /* Due to a hardware quirk we may need to reset RX channels twice. */ 968 if (gsi->version < IPA_VERSION_4_0 && !channel->toward_ipa) 969 gsi_channel_reset_command(channel); 970 971 gsi_channel_program(channel, doorbell); 972 gsi_channel_trans_cancel_pending(channel); 973 974 mutex_unlock(&gsi->mutex); 975 } 976 977 /* Stop a STARTED channel for suspend (using stop if requested) */ 978 int gsi_channel_suspend(struct gsi *gsi, u32 channel_id, bool stop) 979 { 980 struct gsi_channel *channel = &gsi->channel[channel_id]; 981 int ret; 982 983 ret = __gsi_channel_stop(channel, stop); 984 if (ret) 985 return ret; 986 987 /* Ensure NAPI polling has finished. */ 988 napi_synchronize(&channel->napi); 989 990 return 0; 991 } 992 993 /* Resume a suspended channel (starting will be requested if STOPPED) */ 994 int gsi_channel_resume(struct gsi *gsi, u32 channel_id, bool start) 995 { 996 struct gsi_channel *channel = &gsi->channel[channel_id]; 997 998 return __gsi_channel_start(channel, start); 999 } 1000 1001 /** 1002 * gsi_channel_tx_queued() - Report queued TX transfers for a channel 1003 * @channel: Channel for which to report 1004 * 1005 * Report to the network stack the number of bytes and transactions that 1006 * have been queued to hardware since last call. This and the next function 1007 * supply information used by the network stack for throttling. 1008 * 1009 * For each channel we track the number of transactions used and bytes of 1010 * data those transactions represent. We also track what those values are 1011 * each time this function is called. Subtracting the two tells us 1012 * the number of bytes and transactions that have been added between 1013 * successive calls. 1014 * 1015 * Calling this each time we ring the channel doorbell allows us to 1016 * provide accurate information to the network stack about how much 1017 * work we've given the hardware at any point in time. 1018 */ 1019 void gsi_channel_tx_queued(struct gsi_channel *channel) 1020 { 1021 u32 trans_count; 1022 u32 byte_count; 1023 1024 byte_count = channel->byte_count - channel->queued_byte_count; 1025 trans_count = channel->trans_count - channel->queued_trans_count; 1026 channel->queued_byte_count = channel->byte_count; 1027 channel->queued_trans_count = channel->trans_count; 1028 1029 ipa_gsi_channel_tx_queued(channel->gsi, gsi_channel_id(channel), 1030 trans_count, byte_count); 1031 } 1032 1033 /** 1034 * gsi_channel_tx_update() - Report completed TX transfers 1035 * @channel: Channel that has completed transmitting packets 1036 * @trans: Last transation known to be complete 1037 * 1038 * Compute the number of transactions and bytes that have been transferred 1039 * over a TX channel since the given transaction was committed. Report this 1040 * information to the network stack. 1041 * 1042 * At the time a transaction is committed, we record its channel's 1043 * committed transaction and byte counts *in the transaction*. 1044 * Completions are signaled by the hardware with an interrupt, and 1045 * we can determine the latest completed transaction at that time. 1046 * 1047 * The difference between the byte/transaction count recorded in 1048 * the transaction and the count last time we recorded a completion 1049 * tells us exactly how much data has been transferred between 1050 * completions. 1051 * 1052 * Calling this each time we learn of a newly-completed transaction 1053 * allows us to provide accurate information to the network stack 1054 * about how much work has been completed by the hardware at a given 1055 * point in time. 1056 */ 1057 static void 1058 gsi_channel_tx_update(struct gsi_channel *channel, struct gsi_trans *trans) 1059 { 1060 u64 byte_count = trans->byte_count + trans->len; 1061 u64 trans_count = trans->trans_count + 1; 1062 1063 byte_count -= channel->compl_byte_count; 1064 channel->compl_byte_count += byte_count; 1065 trans_count -= channel->compl_trans_count; 1066 channel->compl_trans_count += trans_count; 1067 1068 ipa_gsi_channel_tx_completed(channel->gsi, gsi_channel_id(channel), 1069 trans_count, byte_count); 1070 } 1071 1072 /* Channel control interrupt handler */ 1073 static void gsi_isr_chan_ctrl(struct gsi *gsi) 1074 { 1075 u32 channel_mask; 1076 1077 channel_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_CH_IRQ_OFFSET); 1078 iowrite32(channel_mask, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET); 1079 1080 while (channel_mask) { 1081 u32 channel_id = __ffs(channel_mask); 1082 struct gsi_channel *channel; 1083 1084 channel_mask ^= BIT(channel_id); 1085 1086 channel = &gsi->channel[channel_id]; 1087 1088 complete(&channel->completion); 1089 } 1090 } 1091 1092 /* Event ring control interrupt handler */ 1093 static void gsi_isr_evt_ctrl(struct gsi *gsi) 1094 { 1095 u32 event_mask; 1096 1097 event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_OFFSET); 1098 iowrite32(event_mask, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET); 1099 1100 while (event_mask) { 1101 u32 evt_ring_id = __ffs(event_mask); 1102 struct gsi_evt_ring *evt_ring; 1103 1104 event_mask ^= BIT(evt_ring_id); 1105 1106 evt_ring = &gsi->evt_ring[evt_ring_id]; 1107 1108 complete(&evt_ring->completion); 1109 } 1110 } 1111 1112 /* Global channel error interrupt handler */ 1113 static void 1114 gsi_isr_glob_chan_err(struct gsi *gsi, u32 err_ee, u32 channel_id, u32 code) 1115 { 1116 if (code == GSI_OUT_OF_RESOURCES) { 1117 dev_err(gsi->dev, "channel %u out of resources\n", channel_id); 1118 complete(&gsi->channel[channel_id].completion); 1119 return; 1120 } 1121 1122 /* Report, but otherwise ignore all other error codes */ 1123 dev_err(gsi->dev, "channel %u global error ee 0x%08x code 0x%08x\n", 1124 channel_id, err_ee, code); 1125 } 1126 1127 /* Global event error interrupt handler */ 1128 static void 1129 gsi_isr_glob_evt_err(struct gsi *gsi, u32 err_ee, u32 evt_ring_id, u32 code) 1130 { 1131 if (code == GSI_OUT_OF_RESOURCES) { 1132 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id]; 1133 u32 channel_id = gsi_channel_id(evt_ring->channel); 1134 1135 complete(&evt_ring->completion); 1136 dev_err(gsi->dev, "evt_ring for channel %u out of resources\n", 1137 channel_id); 1138 return; 1139 } 1140 1141 /* Report, but otherwise ignore all other error codes */ 1142 dev_err(gsi->dev, "event ring %u global error ee %u code 0x%08x\n", 1143 evt_ring_id, err_ee, code); 1144 } 1145 1146 /* Global error interrupt handler */ 1147 static void gsi_isr_glob_err(struct gsi *gsi) 1148 { 1149 enum gsi_err_type type; 1150 enum gsi_err_code code; 1151 u32 which; 1152 u32 val; 1153 u32 ee; 1154 1155 /* Get the logged error, then reinitialize the log */ 1156 val = ioread32(gsi->virt + GSI_ERROR_LOG_OFFSET); 1157 iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET); 1158 iowrite32(~0, gsi->virt + GSI_ERROR_LOG_CLR_OFFSET); 1159 1160 ee = u32_get_bits(val, ERR_EE_FMASK); 1161 type = u32_get_bits(val, ERR_TYPE_FMASK); 1162 which = u32_get_bits(val, ERR_VIRT_IDX_FMASK); 1163 code = u32_get_bits(val, ERR_CODE_FMASK); 1164 1165 if (type == GSI_ERR_TYPE_CHAN) 1166 gsi_isr_glob_chan_err(gsi, ee, which, code); 1167 else if (type == GSI_ERR_TYPE_EVT) 1168 gsi_isr_glob_evt_err(gsi, ee, which, code); 1169 else /* type GSI_ERR_TYPE_GLOB should be fatal */ 1170 dev_err(gsi->dev, "unexpected global error 0x%08x\n", type); 1171 } 1172 1173 /* Generic EE interrupt handler */ 1174 static void gsi_isr_gp_int1(struct gsi *gsi) 1175 { 1176 u32 result; 1177 u32 val; 1178 1179 /* This interrupt is used to handle completions of the two GENERIC 1180 * GSI commands. We use these to allocate and halt channels on 1181 * the modem's behalf due to a hardware quirk on IPA v4.2. Once 1182 * allocated, the modem "owns" these channels, and as a result we 1183 * have no way of knowing the channel's state at any given time. 1184 * 1185 * It is recommended that we halt the modem channels we allocated 1186 * when shutting down, but it's possible the channel isn't running 1187 * at the time we issue the HALT command. We'll get an error in 1188 * that case, but it's harmless (the channel is already halted). 1189 * 1190 * For this reason, we silently ignore a CHANNEL_NOT_RUNNING error 1191 * if we receive it. 1192 */ 1193 val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET); 1194 result = u32_get_bits(val, GENERIC_EE_RESULT_FMASK); 1195 1196 switch (result) { 1197 case GENERIC_EE_SUCCESS: 1198 case GENERIC_EE_CHANNEL_NOT_RUNNING: 1199 gsi->result = 0; 1200 break; 1201 1202 case GENERIC_EE_RETRY: 1203 gsi->result = -EAGAIN; 1204 break; 1205 1206 default: 1207 dev_err(gsi->dev, "global INT1 generic result %u\n", result); 1208 gsi->result = -EIO; 1209 break; 1210 } 1211 1212 complete(&gsi->completion); 1213 } 1214 1215 /* Inter-EE interrupt handler */ 1216 static void gsi_isr_glob_ee(struct gsi *gsi) 1217 { 1218 u32 val; 1219 1220 val = ioread32(gsi->virt + GSI_CNTXT_GLOB_IRQ_STTS_OFFSET); 1221 1222 if (val & BIT(ERROR_INT)) 1223 gsi_isr_glob_err(gsi); 1224 1225 iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_CLR_OFFSET); 1226 1227 val &= ~BIT(ERROR_INT); 1228 1229 if (val & BIT(GP_INT1)) { 1230 val ^= BIT(GP_INT1); 1231 gsi_isr_gp_int1(gsi); 1232 } 1233 1234 if (val) 1235 dev_err(gsi->dev, "unexpected global interrupt 0x%08x\n", val); 1236 } 1237 1238 /* I/O completion interrupt event */ 1239 static void gsi_isr_ieob(struct gsi *gsi) 1240 { 1241 u32 event_mask; 1242 1243 event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_OFFSET); 1244 gsi_irq_ieob_disable(gsi, event_mask); 1245 iowrite32(event_mask, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_CLR_OFFSET); 1246 1247 while (event_mask) { 1248 u32 evt_ring_id = __ffs(event_mask); 1249 1250 event_mask ^= BIT(evt_ring_id); 1251 1252 napi_schedule(&gsi->evt_ring[evt_ring_id].channel->napi); 1253 } 1254 } 1255 1256 /* General event interrupts represent serious problems, so report them */ 1257 static void gsi_isr_general(struct gsi *gsi) 1258 { 1259 struct device *dev = gsi->dev; 1260 u32 val; 1261 1262 val = ioread32(gsi->virt + GSI_CNTXT_GSI_IRQ_STTS_OFFSET); 1263 iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_CLR_OFFSET); 1264 1265 dev_err(dev, "unexpected general interrupt 0x%08x\n", val); 1266 } 1267 1268 /** 1269 * gsi_isr() - Top level GSI interrupt service routine 1270 * @irq: Interrupt number (ignored) 1271 * @dev_id: GSI pointer supplied to request_irq() 1272 * 1273 * This is the main handler function registered for the GSI IRQ. Each type 1274 * of interrupt has a separate handler function that is called from here. 1275 */ 1276 static irqreturn_t gsi_isr(int irq, void *dev_id) 1277 { 1278 struct gsi *gsi = dev_id; 1279 u32 intr_mask; 1280 u32 cnt = 0; 1281 1282 /* enum gsi_irq_type_id defines GSI interrupt types */ 1283 while ((intr_mask = ioread32(gsi->virt + GSI_CNTXT_TYPE_IRQ_OFFSET))) { 1284 /* intr_mask contains bitmask of pending GSI interrupts */ 1285 do { 1286 u32 gsi_intr = BIT(__ffs(intr_mask)); 1287 1288 intr_mask ^= gsi_intr; 1289 1290 switch (gsi_intr) { 1291 case BIT(GSI_CH_CTRL): 1292 gsi_isr_chan_ctrl(gsi); 1293 break; 1294 case BIT(GSI_EV_CTRL): 1295 gsi_isr_evt_ctrl(gsi); 1296 break; 1297 case BIT(GSI_GLOB_EE): 1298 gsi_isr_glob_ee(gsi); 1299 break; 1300 case BIT(GSI_IEOB): 1301 gsi_isr_ieob(gsi); 1302 break; 1303 case BIT(GSI_GENERAL): 1304 gsi_isr_general(gsi); 1305 break; 1306 default: 1307 dev_err(gsi->dev, 1308 "unrecognized interrupt type 0x%08x\n", 1309 gsi_intr); 1310 break; 1311 } 1312 } while (intr_mask); 1313 1314 if (++cnt > GSI_ISR_MAX_ITER) { 1315 dev_err(gsi->dev, "interrupt flood\n"); 1316 break; 1317 } 1318 } 1319 1320 return IRQ_HANDLED; 1321 } 1322 1323 static int gsi_irq_init(struct gsi *gsi, struct platform_device *pdev) 1324 { 1325 struct device *dev = &pdev->dev; 1326 unsigned int irq; 1327 int ret; 1328 1329 ret = platform_get_irq_byname(pdev, "gsi"); 1330 if (ret <= 0) 1331 return ret ? : -EINVAL; 1332 1333 irq = ret; 1334 1335 ret = request_irq(irq, gsi_isr, 0, "gsi", gsi); 1336 if (ret) { 1337 dev_err(dev, "error %d requesting \"gsi\" IRQ\n", ret); 1338 return ret; 1339 } 1340 gsi->irq = irq; 1341 1342 return 0; 1343 } 1344 1345 static void gsi_irq_exit(struct gsi *gsi) 1346 { 1347 free_irq(gsi->irq, gsi); 1348 } 1349 1350 /* Return the transaction associated with a transfer completion event */ 1351 static struct gsi_trans *gsi_event_trans(struct gsi_channel *channel, 1352 struct gsi_event *event) 1353 { 1354 u32 tre_offset; 1355 u32 tre_index; 1356 1357 /* Event xfer_ptr records the TRE it's associated with */ 1358 tre_offset = lower_32_bits(le64_to_cpu(event->xfer_ptr)); 1359 tre_index = gsi_ring_index(&channel->tre_ring, tre_offset); 1360 1361 return gsi_channel_trans_mapped(channel, tre_index); 1362 } 1363 1364 /** 1365 * gsi_evt_ring_rx_update() - Record lengths of received data 1366 * @evt_ring: Event ring associated with channel that received packets 1367 * @index: Event index in ring reported by hardware 1368 * 1369 * Events for RX channels contain the actual number of bytes received into 1370 * the buffer. Every event has a transaction associated with it, and here 1371 * we update transactions to record their actual received lengths. 1372 * 1373 * This function is called whenever we learn that the GSI hardware has filled 1374 * new events since the last time we checked. The ring's index field tells 1375 * the first entry in need of processing. The index provided is the 1376 * first *unfilled* event in the ring (following the last filled one). 1377 * 1378 * Events are sequential within the event ring, and transactions are 1379 * sequential within the transaction pool. 1380 * 1381 * Note that @index always refers to an element *within* the event ring. 1382 */ 1383 static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index) 1384 { 1385 struct gsi_channel *channel = evt_ring->channel; 1386 struct gsi_ring *ring = &evt_ring->ring; 1387 struct gsi_trans_info *trans_info; 1388 struct gsi_event *event_done; 1389 struct gsi_event *event; 1390 struct gsi_trans *trans; 1391 u32 byte_count = 0; 1392 u32 old_index; 1393 u32 event_avail; 1394 1395 trans_info = &channel->trans_info; 1396 1397 /* We'll start with the oldest un-processed event. RX channels 1398 * replenish receive buffers in single-TRE transactions, so we 1399 * can just map that event to its transaction. Transactions 1400 * associated with completion events are consecutive. 1401 */ 1402 old_index = ring->index; 1403 event = gsi_ring_virt(ring, old_index); 1404 trans = gsi_event_trans(channel, event); 1405 1406 /* Compute the number of events to process before we wrap, 1407 * and determine when we'll be done processing events. 1408 */ 1409 event_avail = ring->count - old_index % ring->count; 1410 event_done = gsi_ring_virt(ring, index); 1411 do { 1412 trans->len = __le16_to_cpu(event->len); 1413 byte_count += trans->len; 1414 1415 /* Move on to the next event and transaction */ 1416 if (--event_avail) 1417 event++; 1418 else 1419 event = gsi_ring_virt(ring, 0); 1420 trans = gsi_trans_pool_next(&trans_info->pool, trans); 1421 } while (event != event_done); 1422 1423 /* We record RX bytes when they are received */ 1424 channel->byte_count += byte_count; 1425 channel->trans_count++; 1426 } 1427 1428 /* Initialize a ring, including allocating DMA memory for its entries */ 1429 static int gsi_ring_alloc(struct gsi *gsi, struct gsi_ring *ring, u32 count) 1430 { 1431 u32 size = count * GSI_RING_ELEMENT_SIZE; 1432 struct device *dev = gsi->dev; 1433 dma_addr_t addr; 1434 1435 /* Hardware requires a 2^n ring size, with alignment equal to size. 1436 * The DMA address returned by dma_alloc_coherent() is guaranteed to 1437 * be a power-of-2 number of pages, which satisfies the requirement. 1438 */ 1439 ring->virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL); 1440 if (!ring->virt) 1441 return -ENOMEM; 1442 1443 ring->addr = addr; 1444 ring->count = count; 1445 1446 return 0; 1447 } 1448 1449 /* Free a previously-allocated ring */ 1450 static void gsi_ring_free(struct gsi *gsi, struct gsi_ring *ring) 1451 { 1452 size_t size = ring->count * GSI_RING_ELEMENT_SIZE; 1453 1454 dma_free_coherent(gsi->dev, size, ring->virt, ring->addr); 1455 } 1456 1457 /* Allocate an available event ring id */ 1458 static int gsi_evt_ring_id_alloc(struct gsi *gsi) 1459 { 1460 u32 evt_ring_id; 1461 1462 if (gsi->event_bitmap == ~0U) { 1463 dev_err(gsi->dev, "event rings exhausted\n"); 1464 return -ENOSPC; 1465 } 1466 1467 evt_ring_id = ffz(gsi->event_bitmap); 1468 gsi->event_bitmap |= BIT(evt_ring_id); 1469 1470 return (int)evt_ring_id; 1471 } 1472 1473 /* Free a previously-allocated event ring id */ 1474 static void gsi_evt_ring_id_free(struct gsi *gsi, u32 evt_ring_id) 1475 { 1476 gsi->event_bitmap &= ~BIT(evt_ring_id); 1477 } 1478 1479 /* Ring a channel doorbell, reporting the first un-filled entry */ 1480 void gsi_channel_doorbell(struct gsi_channel *channel) 1481 { 1482 struct gsi_ring *tre_ring = &channel->tre_ring; 1483 u32 channel_id = gsi_channel_id(channel); 1484 struct gsi *gsi = channel->gsi; 1485 u32 val; 1486 1487 /* Note: index *must* be used modulo the ring count here */ 1488 val = gsi_ring_addr(tre_ring, tre_ring->index % tre_ring->count); 1489 iowrite32(val, gsi->virt + GSI_CH_C_DOORBELL_0_OFFSET(channel_id)); 1490 } 1491 1492 /* Consult hardware, move any newly completed transactions to completed list */ 1493 static struct gsi_trans *gsi_channel_update(struct gsi_channel *channel) 1494 { 1495 u32 evt_ring_id = channel->evt_ring_id; 1496 struct gsi *gsi = channel->gsi; 1497 struct gsi_evt_ring *evt_ring; 1498 struct gsi_trans *trans; 1499 struct gsi_ring *ring; 1500 u32 offset; 1501 u32 index; 1502 1503 evt_ring = &gsi->evt_ring[evt_ring_id]; 1504 ring = &evt_ring->ring; 1505 1506 /* See if there's anything new to process; if not, we're done. Note 1507 * that index always refers to an entry *within* the event ring. 1508 */ 1509 offset = GSI_EV_CH_E_CNTXT_4_OFFSET(evt_ring_id); 1510 index = gsi_ring_index(ring, ioread32(gsi->virt + offset)); 1511 if (index == ring->index % ring->count) 1512 return NULL; 1513 1514 /* Get the transaction for the latest completed event. Take a 1515 * reference to keep it from completing before we give the events 1516 * for this and previous transactions back to the hardware. 1517 */ 1518 trans = gsi_event_trans(channel, gsi_ring_virt(ring, index - 1)); 1519 refcount_inc(&trans->refcount); 1520 1521 /* For RX channels, update each completed transaction with the number 1522 * of bytes that were actually received. For TX channels, report 1523 * the number of transactions and bytes this completion represents 1524 * up the network stack. 1525 */ 1526 if (channel->toward_ipa) 1527 gsi_channel_tx_update(channel, trans); 1528 else 1529 gsi_evt_ring_rx_update(evt_ring, index); 1530 1531 gsi_trans_move_complete(trans); 1532 1533 /* Tell the hardware we've handled these events */ 1534 gsi_evt_ring_doorbell(channel->gsi, channel->evt_ring_id, index); 1535 1536 gsi_trans_free(trans); 1537 1538 return gsi_channel_trans_complete(channel); 1539 } 1540 1541 /** 1542 * gsi_channel_poll_one() - Return a single completed transaction on a channel 1543 * @channel: Channel to be polled 1544 * 1545 * Return: Transaction pointer, or null if none are available 1546 * 1547 * This function returns the first entry on a channel's completed transaction 1548 * list. If that list is empty, the hardware is consulted to determine 1549 * whether any new transactions have completed. If so, they're moved to the 1550 * completed list and the new first entry is returned. If there are no more 1551 * completed transactions, a null pointer is returned. 1552 */ 1553 static struct gsi_trans *gsi_channel_poll_one(struct gsi_channel *channel) 1554 { 1555 struct gsi_trans *trans; 1556 1557 /* Get the first transaction from the completed list */ 1558 trans = gsi_channel_trans_complete(channel); 1559 if (!trans) /* List is empty; see if there's more to do */ 1560 trans = gsi_channel_update(channel); 1561 1562 if (trans) 1563 gsi_trans_move_polled(trans); 1564 1565 return trans; 1566 } 1567 1568 /** 1569 * gsi_channel_poll() - NAPI poll function for a channel 1570 * @napi: NAPI structure for the channel 1571 * @budget: Budget supplied by NAPI core 1572 * 1573 * Return: Number of items polled (<= budget) 1574 * 1575 * Single transactions completed by hardware are polled until either 1576 * the budget is exhausted, or there are no more. Each transaction 1577 * polled is passed to gsi_trans_complete(), to perform remaining 1578 * completion processing and retire/free the transaction. 1579 */ 1580 static int gsi_channel_poll(struct napi_struct *napi, int budget) 1581 { 1582 struct gsi_channel *channel; 1583 int count; 1584 1585 channel = container_of(napi, struct gsi_channel, napi); 1586 for (count = 0; count < budget; count++) { 1587 struct gsi_trans *trans; 1588 1589 trans = gsi_channel_poll_one(channel); 1590 if (!trans) 1591 break; 1592 gsi_trans_complete(trans); 1593 } 1594 1595 if (count < budget && napi_complete(napi)) 1596 gsi_irq_ieob_enable_one(channel->gsi, channel->evt_ring_id); 1597 1598 return count; 1599 } 1600 1601 /* The event bitmap represents which event ids are available for allocation. 1602 * Set bits are not available, clear bits can be used. This function 1603 * initializes the map so all events supported by the hardware are available, 1604 * then precludes any reserved events from being allocated. 1605 */ 1606 static u32 gsi_event_bitmap_init(u32 evt_ring_max) 1607 { 1608 u32 event_bitmap = GENMASK(BITS_PER_LONG - 1, evt_ring_max); 1609 1610 event_bitmap |= GENMASK(GSI_MHI_EVENT_ID_END, GSI_MHI_EVENT_ID_START); 1611 1612 return event_bitmap; 1613 } 1614 1615 /* Setup function for a single channel */ 1616 static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id) 1617 { 1618 struct gsi_channel *channel = &gsi->channel[channel_id]; 1619 u32 evt_ring_id = channel->evt_ring_id; 1620 int ret; 1621 1622 if (!gsi_channel_initialized(channel)) 1623 return 0; 1624 1625 ret = gsi_evt_ring_alloc_command(gsi, evt_ring_id); 1626 if (ret) 1627 return ret; 1628 1629 gsi_evt_ring_program(gsi, evt_ring_id); 1630 1631 ret = gsi_channel_alloc_command(gsi, channel_id); 1632 if (ret) 1633 goto err_evt_ring_de_alloc; 1634 1635 gsi_channel_program(channel, true); 1636 1637 if (channel->toward_ipa) 1638 netif_tx_napi_add(&gsi->dummy_dev, &channel->napi, 1639 gsi_channel_poll, NAPI_POLL_WEIGHT); 1640 else 1641 netif_napi_add(&gsi->dummy_dev, &channel->napi, 1642 gsi_channel_poll, NAPI_POLL_WEIGHT); 1643 1644 return 0; 1645 1646 err_evt_ring_de_alloc: 1647 /* We've done nothing with the event ring yet so don't reset */ 1648 gsi_evt_ring_de_alloc_command(gsi, evt_ring_id); 1649 1650 return ret; 1651 } 1652 1653 /* Inverse of gsi_channel_setup_one() */ 1654 static void gsi_channel_teardown_one(struct gsi *gsi, u32 channel_id) 1655 { 1656 struct gsi_channel *channel = &gsi->channel[channel_id]; 1657 u32 evt_ring_id = channel->evt_ring_id; 1658 1659 if (!gsi_channel_initialized(channel)) 1660 return; 1661 1662 netif_napi_del(&channel->napi); 1663 1664 gsi_channel_de_alloc_command(gsi, channel_id); 1665 gsi_evt_ring_reset_command(gsi, evt_ring_id); 1666 gsi_evt_ring_de_alloc_command(gsi, evt_ring_id); 1667 } 1668 1669 static int gsi_generic_command(struct gsi *gsi, u32 channel_id, 1670 enum gsi_generic_cmd_opcode opcode) 1671 { 1672 struct completion *completion = &gsi->completion; 1673 bool timeout; 1674 u32 val; 1675 1676 /* The error global interrupt type is always enabled (until we 1677 * teardown), so we won't change that. A generic EE command 1678 * completes with a GSI global interrupt of type GP_INT1. We 1679 * only perform one generic command at a time (to allocate or 1680 * halt a modem channel) and only from this function. So we 1681 * enable the GP_INT1 IRQ type here while we're expecting it. 1682 */ 1683 val = BIT(ERROR_INT) | BIT(GP_INT1); 1684 iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET); 1685 1686 /* First zero the result code field */ 1687 val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET); 1688 val &= ~GENERIC_EE_RESULT_FMASK; 1689 iowrite32(val, gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET); 1690 1691 /* Now issue the command */ 1692 val = u32_encode_bits(opcode, GENERIC_OPCODE_FMASK); 1693 val |= u32_encode_bits(channel_id, GENERIC_CHID_FMASK); 1694 val |= u32_encode_bits(GSI_EE_MODEM, GENERIC_EE_FMASK); 1695 1696 timeout = !gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val, completion); 1697 1698 /* Disable the GP_INT1 IRQ type again */ 1699 iowrite32(BIT(ERROR_INT), gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET); 1700 1701 if (!timeout) 1702 return gsi->result; 1703 1704 dev_err(gsi->dev, "GSI generic command %u to channel %u timed out\n", 1705 opcode, channel_id); 1706 1707 return -ETIMEDOUT; 1708 } 1709 1710 static int gsi_modem_channel_alloc(struct gsi *gsi, u32 channel_id) 1711 { 1712 return gsi_generic_command(gsi, channel_id, 1713 GSI_GENERIC_ALLOCATE_CHANNEL); 1714 } 1715 1716 static void gsi_modem_channel_halt(struct gsi *gsi, u32 channel_id) 1717 { 1718 u32 retries = GSI_CHANNEL_MODEM_HALT_RETRIES; 1719 int ret; 1720 1721 do 1722 ret = gsi_generic_command(gsi, channel_id, 1723 GSI_GENERIC_HALT_CHANNEL); 1724 while (ret == -EAGAIN && retries--); 1725 1726 if (ret) 1727 dev_err(gsi->dev, "error %d halting modem channel %u\n", 1728 ret, channel_id); 1729 } 1730 1731 /* Setup function for channels */ 1732 static int gsi_channel_setup(struct gsi *gsi) 1733 { 1734 u32 channel_id = 0; 1735 u32 mask; 1736 int ret; 1737 1738 gsi_irq_enable(gsi); 1739 1740 mutex_lock(&gsi->mutex); 1741 1742 do { 1743 ret = gsi_channel_setup_one(gsi, channel_id); 1744 if (ret) 1745 goto err_unwind; 1746 } while (++channel_id < gsi->channel_count); 1747 1748 /* Make sure no channels were defined that hardware does not support */ 1749 while (channel_id < GSI_CHANNEL_COUNT_MAX) { 1750 struct gsi_channel *channel = &gsi->channel[channel_id++]; 1751 1752 if (!gsi_channel_initialized(channel)) 1753 continue; 1754 1755 ret = -EINVAL; 1756 dev_err(gsi->dev, "channel %u not supported by hardware\n", 1757 channel_id - 1); 1758 channel_id = gsi->channel_count; 1759 goto err_unwind; 1760 } 1761 1762 /* Allocate modem channels if necessary */ 1763 mask = gsi->modem_channel_bitmap; 1764 while (mask) { 1765 u32 modem_channel_id = __ffs(mask); 1766 1767 ret = gsi_modem_channel_alloc(gsi, modem_channel_id); 1768 if (ret) 1769 goto err_unwind_modem; 1770 1771 /* Clear bit from mask only after success (for unwind) */ 1772 mask ^= BIT(modem_channel_id); 1773 } 1774 1775 mutex_unlock(&gsi->mutex); 1776 1777 return 0; 1778 1779 err_unwind_modem: 1780 /* Compute which modem channels need to be deallocated */ 1781 mask ^= gsi->modem_channel_bitmap; 1782 while (mask) { 1783 channel_id = __fls(mask); 1784 1785 mask ^= BIT(channel_id); 1786 1787 gsi_modem_channel_halt(gsi, channel_id); 1788 } 1789 1790 err_unwind: 1791 while (channel_id--) 1792 gsi_channel_teardown_one(gsi, channel_id); 1793 1794 mutex_unlock(&gsi->mutex); 1795 1796 gsi_irq_disable(gsi); 1797 1798 return ret; 1799 } 1800 1801 /* Inverse of gsi_channel_setup() */ 1802 static void gsi_channel_teardown(struct gsi *gsi) 1803 { 1804 u32 mask = gsi->modem_channel_bitmap; 1805 u32 channel_id; 1806 1807 mutex_lock(&gsi->mutex); 1808 1809 while (mask) { 1810 channel_id = __fls(mask); 1811 1812 mask ^= BIT(channel_id); 1813 1814 gsi_modem_channel_halt(gsi, channel_id); 1815 } 1816 1817 channel_id = gsi->channel_count - 1; 1818 do 1819 gsi_channel_teardown_one(gsi, channel_id); 1820 while (channel_id--); 1821 1822 mutex_unlock(&gsi->mutex); 1823 1824 gsi_irq_disable(gsi); 1825 } 1826 1827 /* Setup function for GSI. GSI firmware must be loaded and initialized */ 1828 int gsi_setup(struct gsi *gsi) 1829 { 1830 struct device *dev = gsi->dev; 1831 u32 val; 1832 1833 /* Here is where we first touch the GSI hardware */ 1834 val = ioread32(gsi->virt + GSI_GSI_STATUS_OFFSET); 1835 if (!(val & ENABLED_FMASK)) { 1836 dev_err(dev, "GSI has not been enabled\n"); 1837 return -EIO; 1838 } 1839 1840 gsi_irq_setup(gsi); /* No matching teardown required */ 1841 1842 val = ioread32(gsi->virt + GSI_GSI_HW_PARAM_2_OFFSET); 1843 1844 gsi->channel_count = u32_get_bits(val, NUM_CH_PER_EE_FMASK); 1845 if (!gsi->channel_count) { 1846 dev_err(dev, "GSI reports zero channels supported\n"); 1847 return -EINVAL; 1848 } 1849 if (gsi->channel_count > GSI_CHANNEL_COUNT_MAX) { 1850 dev_warn(dev, 1851 "limiting to %u channels; hardware supports %u\n", 1852 GSI_CHANNEL_COUNT_MAX, gsi->channel_count); 1853 gsi->channel_count = GSI_CHANNEL_COUNT_MAX; 1854 } 1855 1856 gsi->evt_ring_count = u32_get_bits(val, NUM_EV_PER_EE_FMASK); 1857 if (!gsi->evt_ring_count) { 1858 dev_err(dev, "GSI reports zero event rings supported\n"); 1859 return -EINVAL; 1860 } 1861 if (gsi->evt_ring_count > GSI_EVT_RING_COUNT_MAX) { 1862 dev_warn(dev, 1863 "limiting to %u event rings; hardware supports %u\n", 1864 GSI_EVT_RING_COUNT_MAX, gsi->evt_ring_count); 1865 gsi->evt_ring_count = GSI_EVT_RING_COUNT_MAX; 1866 } 1867 1868 /* Initialize the error log */ 1869 iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET); 1870 1871 /* Writing 1 indicates IRQ interrupts; 0 would be MSI */ 1872 iowrite32(1, gsi->virt + GSI_CNTXT_INTSET_OFFSET); 1873 1874 return gsi_channel_setup(gsi); 1875 } 1876 1877 /* Inverse of gsi_setup() */ 1878 void gsi_teardown(struct gsi *gsi) 1879 { 1880 gsi_channel_teardown(gsi); 1881 } 1882 1883 /* Initialize a channel's event ring */ 1884 static int gsi_channel_evt_ring_init(struct gsi_channel *channel) 1885 { 1886 struct gsi *gsi = channel->gsi; 1887 struct gsi_evt_ring *evt_ring; 1888 int ret; 1889 1890 ret = gsi_evt_ring_id_alloc(gsi); 1891 if (ret < 0) 1892 return ret; 1893 channel->evt_ring_id = ret; 1894 1895 evt_ring = &gsi->evt_ring[channel->evt_ring_id]; 1896 evt_ring->channel = channel; 1897 1898 ret = gsi_ring_alloc(gsi, &evt_ring->ring, channel->event_count); 1899 if (!ret) 1900 return 0; /* Success! */ 1901 1902 dev_err(gsi->dev, "error %d allocating channel %u event ring\n", 1903 ret, gsi_channel_id(channel)); 1904 1905 gsi_evt_ring_id_free(gsi, channel->evt_ring_id); 1906 1907 return ret; 1908 } 1909 1910 /* Inverse of gsi_channel_evt_ring_init() */ 1911 static void gsi_channel_evt_ring_exit(struct gsi_channel *channel) 1912 { 1913 u32 evt_ring_id = channel->evt_ring_id; 1914 struct gsi *gsi = channel->gsi; 1915 struct gsi_evt_ring *evt_ring; 1916 1917 evt_ring = &gsi->evt_ring[evt_ring_id]; 1918 gsi_ring_free(gsi, &evt_ring->ring); 1919 gsi_evt_ring_id_free(gsi, evt_ring_id); 1920 } 1921 1922 /* Init function for event rings; there is no gsi_evt_ring_exit() */ 1923 static void gsi_evt_ring_init(struct gsi *gsi) 1924 { 1925 u32 evt_ring_id = 0; 1926 1927 gsi->event_bitmap = gsi_event_bitmap_init(GSI_EVT_RING_COUNT_MAX); 1928 gsi->ieob_enabled_bitmap = 0; 1929 do 1930 init_completion(&gsi->evt_ring[evt_ring_id].completion); 1931 while (++evt_ring_id < GSI_EVT_RING_COUNT_MAX); 1932 } 1933 1934 static bool gsi_channel_data_valid(struct gsi *gsi, 1935 const struct ipa_gsi_endpoint_data *data) 1936 { 1937 #ifdef IPA_VALIDATION 1938 u32 channel_id = data->channel_id; 1939 struct device *dev = gsi->dev; 1940 1941 /* Make sure channel ids are in the range driver supports */ 1942 if (channel_id >= GSI_CHANNEL_COUNT_MAX) { 1943 dev_err(dev, "bad channel id %u; must be less than %u\n", 1944 channel_id, GSI_CHANNEL_COUNT_MAX); 1945 return false; 1946 } 1947 1948 if (data->ee_id != GSI_EE_AP && data->ee_id != GSI_EE_MODEM) { 1949 dev_err(dev, "bad EE id %u; not AP or modem\n", data->ee_id); 1950 return false; 1951 } 1952 1953 if (!data->channel.tlv_count || 1954 data->channel.tlv_count > GSI_TLV_MAX) { 1955 dev_err(dev, "channel %u bad tlv_count %u; must be 1..%u\n", 1956 channel_id, data->channel.tlv_count, GSI_TLV_MAX); 1957 return false; 1958 } 1959 1960 /* We have to allow at least one maximally-sized transaction to 1961 * be outstanding (which would use tlv_count TREs). Given how 1962 * gsi_channel_tre_max() is computed, tre_count has to be almost 1963 * twice the TLV FIFO size to satisfy this requirement. 1964 */ 1965 if (data->channel.tre_count < 2 * data->channel.tlv_count - 1) { 1966 dev_err(dev, "channel %u TLV count %u exceeds TRE count %u\n", 1967 channel_id, data->channel.tlv_count, 1968 data->channel.tre_count); 1969 return false; 1970 } 1971 1972 if (!is_power_of_2(data->channel.tre_count)) { 1973 dev_err(dev, "channel %u bad tre_count %u; not power of 2\n", 1974 channel_id, data->channel.tre_count); 1975 return false; 1976 } 1977 1978 if (!is_power_of_2(data->channel.event_count)) { 1979 dev_err(dev, "channel %u bad event_count %u; not power of 2\n", 1980 channel_id, data->channel.event_count); 1981 return false; 1982 } 1983 #endif /* IPA_VALIDATION */ 1984 1985 return true; 1986 } 1987 1988 /* Init function for a single channel */ 1989 static int gsi_channel_init_one(struct gsi *gsi, 1990 const struct ipa_gsi_endpoint_data *data, 1991 bool command) 1992 { 1993 struct gsi_channel *channel; 1994 u32 tre_count; 1995 int ret; 1996 1997 if (!gsi_channel_data_valid(gsi, data)) 1998 return -EINVAL; 1999 2000 /* Worst case we need an event for every outstanding TRE */ 2001 if (data->channel.tre_count > data->channel.event_count) { 2002 tre_count = data->channel.event_count; 2003 dev_warn(gsi->dev, "channel %u limited to %u TREs\n", 2004 data->channel_id, tre_count); 2005 } else { 2006 tre_count = data->channel.tre_count; 2007 } 2008 2009 channel = &gsi->channel[data->channel_id]; 2010 memset(channel, 0, sizeof(*channel)); 2011 2012 channel->gsi = gsi; 2013 channel->toward_ipa = data->toward_ipa; 2014 channel->command = command; 2015 channel->tlv_count = data->channel.tlv_count; 2016 channel->tre_count = tre_count; 2017 channel->event_count = data->channel.event_count; 2018 init_completion(&channel->completion); 2019 2020 ret = gsi_channel_evt_ring_init(channel); 2021 if (ret) 2022 goto err_clear_gsi; 2023 2024 ret = gsi_ring_alloc(gsi, &channel->tre_ring, data->channel.tre_count); 2025 if (ret) { 2026 dev_err(gsi->dev, "error %d allocating channel %u ring\n", 2027 ret, data->channel_id); 2028 goto err_channel_evt_ring_exit; 2029 } 2030 2031 ret = gsi_channel_trans_init(gsi, data->channel_id); 2032 if (ret) 2033 goto err_ring_free; 2034 2035 if (command) { 2036 u32 tre_max = gsi_channel_tre_max(gsi, data->channel_id); 2037 2038 ret = ipa_cmd_pool_init(channel, tre_max); 2039 } 2040 if (!ret) 2041 return 0; /* Success! */ 2042 2043 gsi_channel_trans_exit(channel); 2044 err_ring_free: 2045 gsi_ring_free(gsi, &channel->tre_ring); 2046 err_channel_evt_ring_exit: 2047 gsi_channel_evt_ring_exit(channel); 2048 err_clear_gsi: 2049 channel->gsi = NULL; /* Mark it not (fully) initialized */ 2050 2051 return ret; 2052 } 2053 2054 /* Inverse of gsi_channel_init_one() */ 2055 static void gsi_channel_exit_one(struct gsi_channel *channel) 2056 { 2057 if (!gsi_channel_initialized(channel)) 2058 return; 2059 2060 if (channel->command) 2061 ipa_cmd_pool_exit(channel); 2062 gsi_channel_trans_exit(channel); 2063 gsi_ring_free(channel->gsi, &channel->tre_ring); 2064 gsi_channel_evt_ring_exit(channel); 2065 } 2066 2067 /* Init function for channels */ 2068 static int gsi_channel_init(struct gsi *gsi, u32 count, 2069 const struct ipa_gsi_endpoint_data *data) 2070 { 2071 bool modem_alloc; 2072 int ret = 0; 2073 u32 i; 2074 2075 /* IPA v4.2 requires the AP to allocate channels for the modem */ 2076 modem_alloc = gsi->version == IPA_VERSION_4_2; 2077 2078 gsi_evt_ring_init(gsi); /* No matching exit required */ 2079 2080 /* The endpoint data array is indexed by endpoint name */ 2081 for (i = 0; i < count; i++) { 2082 bool command = i == IPA_ENDPOINT_AP_COMMAND_TX; 2083 2084 if (ipa_gsi_endpoint_data_empty(&data[i])) 2085 continue; /* Skip over empty slots */ 2086 2087 /* Mark modem channels to be allocated (hardware workaround) */ 2088 if (data[i].ee_id == GSI_EE_MODEM) { 2089 if (modem_alloc) 2090 gsi->modem_channel_bitmap |= 2091 BIT(data[i].channel_id); 2092 continue; 2093 } 2094 2095 ret = gsi_channel_init_one(gsi, &data[i], command); 2096 if (ret) 2097 goto err_unwind; 2098 } 2099 2100 return ret; 2101 2102 err_unwind: 2103 while (i--) { 2104 if (ipa_gsi_endpoint_data_empty(&data[i])) 2105 continue; 2106 if (modem_alloc && data[i].ee_id == GSI_EE_MODEM) { 2107 gsi->modem_channel_bitmap &= ~BIT(data[i].channel_id); 2108 continue; 2109 } 2110 gsi_channel_exit_one(&gsi->channel[data->channel_id]); 2111 } 2112 2113 return ret; 2114 } 2115 2116 /* Inverse of gsi_channel_init() */ 2117 static void gsi_channel_exit(struct gsi *gsi) 2118 { 2119 u32 channel_id = GSI_CHANNEL_COUNT_MAX - 1; 2120 2121 do 2122 gsi_channel_exit_one(&gsi->channel[channel_id]); 2123 while (channel_id--); 2124 gsi->modem_channel_bitmap = 0; 2125 } 2126 2127 /* Init function for GSI. GSI hardware does not need to be "ready" */ 2128 int gsi_init(struct gsi *gsi, struct platform_device *pdev, 2129 enum ipa_version version, u32 count, 2130 const struct ipa_gsi_endpoint_data *data) 2131 { 2132 struct device *dev = &pdev->dev; 2133 struct resource *res; 2134 resource_size_t size; 2135 u32 adjust; 2136 int ret; 2137 2138 gsi_validate_build(); 2139 2140 gsi->dev = dev; 2141 gsi->version = version; 2142 2143 /* GSI uses NAPI on all channels. Create a dummy network device 2144 * for the channel NAPI contexts to be associated with. 2145 */ 2146 init_dummy_netdev(&gsi->dummy_dev); 2147 2148 /* Get GSI memory range and map it */ 2149 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gsi"); 2150 if (!res) { 2151 dev_err(dev, "DT error getting \"gsi\" memory property\n"); 2152 return -ENODEV; 2153 } 2154 2155 size = resource_size(res); 2156 if (res->start > U32_MAX || size > U32_MAX - res->start) { 2157 dev_err(dev, "DT memory resource \"gsi\" out of range\n"); 2158 return -EINVAL; 2159 } 2160 2161 /* Make sure we can make our pointer adjustment if necessary */ 2162 adjust = gsi->version < IPA_VERSION_4_5 ? 0 : GSI_EE_REG_ADJUST; 2163 if (res->start < adjust) { 2164 dev_err(dev, "DT memory resource \"gsi\" too low (< %u)\n", 2165 adjust); 2166 return -EINVAL; 2167 } 2168 2169 gsi->virt_raw = ioremap(res->start, size); 2170 if (!gsi->virt_raw) { 2171 dev_err(dev, "unable to remap \"gsi\" memory\n"); 2172 return -ENOMEM; 2173 } 2174 /* Most registers are accessed using an adjusted register range */ 2175 gsi->virt = gsi->virt_raw - adjust; 2176 2177 init_completion(&gsi->completion); 2178 2179 ret = gsi_irq_init(gsi, pdev); 2180 if (ret) 2181 goto err_iounmap; 2182 2183 ret = gsi_channel_init(gsi, count, data); 2184 if (ret) 2185 goto err_irq_exit; 2186 2187 mutex_init(&gsi->mutex); 2188 2189 return 0; 2190 2191 err_irq_exit: 2192 gsi_irq_exit(gsi); 2193 err_iounmap: 2194 iounmap(gsi->virt_raw); 2195 2196 return ret; 2197 } 2198 2199 /* Inverse of gsi_init() */ 2200 void gsi_exit(struct gsi *gsi) 2201 { 2202 mutex_destroy(&gsi->mutex); 2203 gsi_channel_exit(gsi); 2204 gsi_irq_exit(gsi); 2205 iounmap(gsi->virt_raw); 2206 } 2207 2208 /* The maximum number of outstanding TREs on a channel. This limits 2209 * a channel's maximum number of transactions outstanding (worst case 2210 * is one TRE per transaction). 2211 * 2212 * The absolute limit is the number of TREs in the channel's TRE ring, 2213 * and in theory we should be able use all of them. But in practice, 2214 * doing that led to the hardware reporting exhaustion of event ring 2215 * slots for writing completion information. So the hardware limit 2216 * would be (tre_count - 1). 2217 * 2218 * We reduce it a bit further though. Transaction resource pools are 2219 * sized to be a little larger than this maximum, to allow resource 2220 * allocations to always be contiguous. The number of entries in a 2221 * TRE ring buffer is a power of 2, and the extra resources in a pool 2222 * tends to nearly double the memory allocated for it. Reducing the 2223 * maximum number of outstanding TREs allows the number of entries in 2224 * a pool to avoid crossing that power-of-2 boundary, and this can 2225 * substantially reduce pool memory requirements. The number we 2226 * reduce it by matches the number added in gsi_trans_pool_init(). 2227 */ 2228 u32 gsi_channel_tre_max(struct gsi *gsi, u32 channel_id) 2229 { 2230 struct gsi_channel *channel = &gsi->channel[channel_id]; 2231 2232 /* Hardware limit is channel->tre_count - 1 */ 2233 return channel->tre_count - (channel->tlv_count - 1); 2234 } 2235 2236 /* Returns the maximum number of TREs in a single transaction for a channel */ 2237 u32 gsi_channel_trans_tre_max(struct gsi *gsi, u32 channel_id) 2238 { 2239 struct gsi_channel *channel = &gsi->channel[channel_id]; 2240 2241 return channel->tlv_count; 2242 } 2243