1 // SPDX-License-Identifier: GPL-2.0 2 3 /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. 4 * Copyright (C) 2018-2021 Linaro Ltd. 5 */ 6 7 #include <linux/types.h> 8 #include <linux/bits.h> 9 #include <linux/bitfield.h> 10 #include <linux/mutex.h> 11 #include <linux/completion.h> 12 #include <linux/io.h> 13 #include <linux/bug.h> 14 #include <linux/interrupt.h> 15 #include <linux/platform_device.h> 16 #include <linux/netdevice.h> 17 18 #include "gsi.h" 19 #include "gsi_reg.h" 20 #include "gsi_private.h" 21 #include "gsi_trans.h" 22 #include "ipa_gsi.h" 23 #include "ipa_data.h" 24 #include "ipa_version.h" 25 26 /** 27 * DOC: The IPA Generic Software Interface 28 * 29 * The generic software interface (GSI) is an integral component of the IPA, 30 * providing a well-defined communication layer between the AP subsystem 31 * and the IPA core. The modem uses the GSI layer as well. 32 * 33 * -------- --------- 34 * | | | | 35 * | AP +<---. .----+ Modem | 36 * | +--. | | .->+ | 37 * | | | | | | | | 38 * -------- | | | | --------- 39 * v | v | 40 * --+-+---+-+-- 41 * | GSI | 42 * |-----------| 43 * | | 44 * | IPA | 45 * | | 46 * ------------- 47 * 48 * In the above diagram, the AP and Modem represent "execution environments" 49 * (EEs), which are independent operating environments that use the IPA for 50 * data transfer. 51 * 52 * Each EE uses a set of unidirectional GSI "channels," which allow transfer 53 * of data to or from the IPA. A channel is implemented as a ring buffer, 54 * with a DRAM-resident array of "transfer elements" (TREs) available to 55 * describe transfers to or from other EEs through the IPA. A transfer 56 * element can also contain an immediate command, requesting the IPA perform 57 * actions other than data transfer. 58 * 59 * Each TRE refers to a block of data--also located DRAM. After writing one 60 * or more TREs to a channel, the writer (either the IPA or an EE) writes a 61 * doorbell register to inform the receiving side how many elements have 62 * been written. 63 * 64 * Each channel has a GSI "event ring" associated with it. An event ring 65 * is implemented very much like a channel ring, but is always directed from 66 * the IPA to an EE. The IPA notifies an EE (such as the AP) about channel 67 * events by adding an entry to the event ring associated with the channel. 68 * The GSI then writes its doorbell for the event ring, causing the target 69 * EE to be interrupted. Each entry in an event ring contains a pointer 70 * to the channel TRE whose completion the event represents. 71 * 72 * Each TRE in a channel ring has a set of flags. One flag indicates whether 73 * the completion of the transfer operation generates an entry (and possibly 74 * an interrupt) in the channel's event ring. Other flags allow transfer 75 * elements to be chained together, forming a single logical transaction. 76 * TRE flags are used to control whether and when interrupts are generated 77 * to signal completion of channel transfers. 78 * 79 * Elements in channel and event rings are completed (or consumed) strictly 80 * in order. Completion of one entry implies the completion of all preceding 81 * entries. A single completion interrupt can therefore communicate the 82 * completion of many transfers. 83 * 84 * Note that all GSI registers are little-endian, which is the assumed 85 * endianness of I/O space accesses. The accessor functions perform byte 86 * swapping if needed (i.e., for a big endian CPU). 87 */ 88 89 /* Delay period for interrupt moderation (in 32KHz IPA internal timer ticks) */ 90 #define GSI_EVT_RING_INT_MODT (32 * 1) /* 1ms under 32KHz clock */ 91 92 #define GSI_CMD_TIMEOUT 50 /* milliseconds */ 93 94 #define GSI_CHANNEL_STOP_RETRIES 10 95 #define GSI_CHANNEL_MODEM_HALT_RETRIES 10 96 97 #define GSI_MHI_EVENT_ID_START 10 /* 1st reserved event id */ 98 #define GSI_MHI_EVENT_ID_END 16 /* Last reserved event id */ 99 100 #define GSI_ISR_MAX_ITER 50 /* Detect interrupt storms */ 101 102 /* An entry in an event ring */ 103 struct gsi_event { 104 __le64 xfer_ptr; 105 __le16 len; 106 u8 reserved1; 107 u8 code; 108 __le16 reserved2; 109 u8 type; 110 u8 chid; 111 }; 112 113 /** gsi_channel_scratch_gpi - GPI protocol scratch register 114 * @max_outstanding_tre: 115 * Defines the maximum number of TREs allowed in a single transaction 116 * on a channel (in bytes). This determines the amount of prefetch 117 * performed by the hardware. We configure this to equal the size of 118 * the TLV FIFO for the channel. 119 * @outstanding_threshold: 120 * Defines the threshold (in bytes) determining when the sequencer 121 * should update the channel doorbell. We configure this to equal 122 * the size of two TREs. 123 */ 124 struct gsi_channel_scratch_gpi { 125 u64 reserved1; 126 u16 reserved2; 127 u16 max_outstanding_tre; 128 u16 reserved3; 129 u16 outstanding_threshold; 130 }; 131 132 /** gsi_channel_scratch - channel scratch configuration area 133 * 134 * The exact interpretation of this register is protocol-specific. 135 * We only use GPI channels; see struct gsi_channel_scratch_gpi, above. 136 */ 137 union gsi_channel_scratch { 138 struct gsi_channel_scratch_gpi gpi; 139 struct { 140 u32 word1; 141 u32 word2; 142 u32 word3; 143 u32 word4; 144 } data; 145 }; 146 147 /* Check things that can be validated at build time. */ 148 static void gsi_validate_build(void) 149 { 150 /* This is used as a divisor */ 151 BUILD_BUG_ON(!GSI_RING_ELEMENT_SIZE); 152 153 /* Code assumes the size of channel and event ring element are 154 * the same (and fixed). Make sure the size of an event ring 155 * element is what's expected. 156 */ 157 BUILD_BUG_ON(sizeof(struct gsi_event) != GSI_RING_ELEMENT_SIZE); 158 159 /* Hardware requires a 2^n ring size. We ensure the number of 160 * elements in an event ring is a power of 2 elsewhere; this 161 * ensure the elements themselves meet the requirement. 162 */ 163 BUILD_BUG_ON(!is_power_of_2(GSI_RING_ELEMENT_SIZE)); 164 165 /* The channel element size must fit in this field */ 166 BUILD_BUG_ON(GSI_RING_ELEMENT_SIZE > field_max(ELEMENT_SIZE_FMASK)); 167 168 /* The event ring element size must fit in this field */ 169 BUILD_BUG_ON(GSI_RING_ELEMENT_SIZE > field_max(EV_ELEMENT_SIZE_FMASK)); 170 } 171 172 /* Return the channel id associated with a given channel */ 173 static u32 gsi_channel_id(struct gsi_channel *channel) 174 { 175 return channel - &channel->gsi->channel[0]; 176 } 177 178 /* An initialized channel has a non-null GSI pointer */ 179 static bool gsi_channel_initialized(struct gsi_channel *channel) 180 { 181 return !!channel->gsi; 182 } 183 184 /* Update the GSI IRQ type register with the cached value */ 185 static void gsi_irq_type_update(struct gsi *gsi, u32 val) 186 { 187 gsi->type_enabled_bitmap = val; 188 iowrite32(val, gsi->virt + GSI_CNTXT_TYPE_IRQ_MSK_OFFSET); 189 } 190 191 static void gsi_irq_type_enable(struct gsi *gsi, enum gsi_irq_type_id type_id) 192 { 193 gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(type_id)); 194 } 195 196 static void gsi_irq_type_disable(struct gsi *gsi, enum gsi_irq_type_id type_id) 197 { 198 gsi_irq_type_update(gsi, gsi->type_enabled_bitmap & ~BIT(type_id)); 199 } 200 201 /* Turn off all GSI interrupts initially; there is no gsi_irq_teardown() */ 202 static void gsi_irq_setup(struct gsi *gsi) 203 { 204 /* Disable all interrupt types */ 205 gsi_irq_type_update(gsi, 0); 206 207 /* Clear all type-specific interrupt masks */ 208 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET); 209 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET); 210 iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET); 211 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET); 212 213 /* The inter-EE interrupts are not supported for IPA v3.0-v3.1 */ 214 if (gsi->version > IPA_VERSION_3_1) { 215 u32 offset; 216 217 /* These registers are in the non-adjusted address range */ 218 offset = GSI_INTER_EE_SRC_CH_IRQ_MSK_OFFSET; 219 iowrite32(0, gsi->virt_raw + offset); 220 offset = GSI_INTER_EE_SRC_EV_CH_IRQ_MSK_OFFSET; 221 iowrite32(0, gsi->virt_raw + offset); 222 } 223 224 iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET); 225 } 226 227 /* Get # supported channel and event rings; there is no gsi_ring_teardown() */ 228 static int gsi_ring_setup(struct gsi *gsi) 229 { 230 struct device *dev = gsi->dev; 231 u32 count; 232 u32 val; 233 234 if (gsi->version < IPA_VERSION_3_5_1) { 235 /* No HW_PARAM_2 register prior to IPA v3.5.1, assume the max */ 236 gsi->channel_count = GSI_CHANNEL_COUNT_MAX; 237 gsi->evt_ring_count = GSI_EVT_RING_COUNT_MAX; 238 239 return 0; 240 } 241 242 val = ioread32(gsi->virt + GSI_GSI_HW_PARAM_2_OFFSET); 243 244 count = u32_get_bits(val, NUM_CH_PER_EE_FMASK); 245 if (!count) { 246 dev_err(dev, "GSI reports zero channels supported\n"); 247 return -EINVAL; 248 } 249 if (count > GSI_CHANNEL_COUNT_MAX) { 250 dev_warn(dev, "limiting to %u channels; hardware supports %u\n", 251 GSI_CHANNEL_COUNT_MAX, count); 252 count = GSI_CHANNEL_COUNT_MAX; 253 } 254 gsi->channel_count = count; 255 256 count = u32_get_bits(val, NUM_EV_PER_EE_FMASK); 257 if (!count) { 258 dev_err(dev, "GSI reports zero event rings supported\n"); 259 return -EINVAL; 260 } 261 if (count > GSI_EVT_RING_COUNT_MAX) { 262 dev_warn(dev, 263 "limiting to %u event rings; hardware supports %u\n", 264 GSI_EVT_RING_COUNT_MAX, count); 265 count = GSI_EVT_RING_COUNT_MAX; 266 } 267 gsi->evt_ring_count = count; 268 269 return 0; 270 } 271 272 /* Event ring commands are performed one at a time. Their completion 273 * is signaled by the event ring control GSI interrupt type, which is 274 * only enabled when we issue an event ring command. Only the event 275 * ring being operated on has this interrupt enabled. 276 */ 277 static void gsi_irq_ev_ctrl_enable(struct gsi *gsi, u32 evt_ring_id) 278 { 279 u32 val = BIT(evt_ring_id); 280 281 /* There's a small chance that a previous command completed 282 * after the interrupt was disabled, so make sure we have no 283 * pending interrupts before we enable them. 284 */ 285 iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET); 286 287 iowrite32(val, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET); 288 gsi_irq_type_enable(gsi, GSI_EV_CTRL); 289 } 290 291 /* Disable event ring control interrupts */ 292 static void gsi_irq_ev_ctrl_disable(struct gsi *gsi) 293 { 294 gsi_irq_type_disable(gsi, GSI_EV_CTRL); 295 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET); 296 } 297 298 /* Channel commands are performed one at a time. Their completion is 299 * signaled by the channel control GSI interrupt type, which is only 300 * enabled when we issue a channel command. Only the channel being 301 * operated on has this interrupt enabled. 302 */ 303 static void gsi_irq_ch_ctrl_enable(struct gsi *gsi, u32 channel_id) 304 { 305 u32 val = BIT(channel_id); 306 307 /* There's a small chance that a previous command completed 308 * after the interrupt was disabled, so make sure we have no 309 * pending interrupts before we enable them. 310 */ 311 iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET); 312 313 iowrite32(val, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET); 314 gsi_irq_type_enable(gsi, GSI_CH_CTRL); 315 } 316 317 /* Disable channel control interrupts */ 318 static void gsi_irq_ch_ctrl_disable(struct gsi *gsi) 319 { 320 gsi_irq_type_disable(gsi, GSI_CH_CTRL); 321 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET); 322 } 323 324 static void gsi_irq_ieob_enable_one(struct gsi *gsi, u32 evt_ring_id) 325 { 326 bool enable_ieob = !gsi->ieob_enabled_bitmap; 327 u32 val; 328 329 gsi->ieob_enabled_bitmap |= BIT(evt_ring_id); 330 val = gsi->ieob_enabled_bitmap; 331 iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET); 332 333 /* Enable the interrupt type if this is the first channel enabled */ 334 if (enable_ieob) 335 gsi_irq_type_enable(gsi, GSI_IEOB); 336 } 337 338 static void gsi_irq_ieob_disable(struct gsi *gsi, u32 event_mask) 339 { 340 u32 val; 341 342 gsi->ieob_enabled_bitmap &= ~event_mask; 343 344 /* Disable the interrupt type if this was the last enabled channel */ 345 if (!gsi->ieob_enabled_bitmap) 346 gsi_irq_type_disable(gsi, GSI_IEOB); 347 348 val = gsi->ieob_enabled_bitmap; 349 iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET); 350 } 351 352 static void gsi_irq_ieob_disable_one(struct gsi *gsi, u32 evt_ring_id) 353 { 354 gsi_irq_ieob_disable(gsi, BIT(evt_ring_id)); 355 } 356 357 /* Enable all GSI_interrupt types */ 358 static void gsi_irq_enable(struct gsi *gsi) 359 { 360 u32 val; 361 362 /* Global interrupts include hardware error reports. Enable 363 * that so we can at least report the error should it occur. 364 */ 365 iowrite32(BIT(ERROR_INT), gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET); 366 gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(GSI_GLOB_EE)); 367 368 /* General GSI interrupts are reported to all EEs; if they occur 369 * they are unrecoverable (without reset). A breakpoint interrupt 370 * also exists, but we don't support that. We want to be notified 371 * of errors so we can report them, even if they can't be handled. 372 */ 373 val = BIT(BUS_ERROR); 374 val |= BIT(CMD_FIFO_OVRFLOW); 375 val |= BIT(MCS_STACK_OVRFLOW); 376 iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET); 377 gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(GSI_GENERAL)); 378 } 379 380 /* Disable all GSI interrupt types */ 381 static void gsi_irq_disable(struct gsi *gsi) 382 { 383 gsi_irq_type_update(gsi, 0); 384 385 /* Clear the type-specific interrupt masks set by gsi_irq_enable() */ 386 iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET); 387 iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET); 388 } 389 390 /* Return the virtual address associated with a ring index */ 391 void *gsi_ring_virt(struct gsi_ring *ring, u32 index) 392 { 393 /* Note: index *must* be used modulo the ring count here */ 394 return ring->virt + (index % ring->count) * GSI_RING_ELEMENT_SIZE; 395 } 396 397 /* Return the 32-bit DMA address associated with a ring index */ 398 static u32 gsi_ring_addr(struct gsi_ring *ring, u32 index) 399 { 400 return lower_32_bits(ring->addr) + index * GSI_RING_ELEMENT_SIZE; 401 } 402 403 /* Return the ring index of a 32-bit ring offset */ 404 static u32 gsi_ring_index(struct gsi_ring *ring, u32 offset) 405 { 406 return (offset - gsi_ring_addr(ring, 0)) / GSI_RING_ELEMENT_SIZE; 407 } 408 409 /* Issue a GSI command by writing a value to a register, then wait for 410 * completion to be signaled. Returns true if the command completes 411 * or false if it times out. 412 */ 413 static bool 414 gsi_command(struct gsi *gsi, u32 reg, u32 val, struct completion *completion) 415 { 416 unsigned long timeout = msecs_to_jiffies(GSI_CMD_TIMEOUT); 417 418 reinit_completion(completion); 419 420 iowrite32(val, gsi->virt + reg); 421 422 return !!wait_for_completion_timeout(completion, timeout); 423 } 424 425 /* Return the hardware's notion of the current state of an event ring */ 426 static enum gsi_evt_ring_state 427 gsi_evt_ring_state(struct gsi *gsi, u32 evt_ring_id) 428 { 429 u32 val; 430 431 val = ioread32(gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id)); 432 433 return u32_get_bits(val, EV_CHSTATE_FMASK); 434 } 435 436 /* Issue an event ring command and wait for it to complete */ 437 static void gsi_evt_ring_command(struct gsi *gsi, u32 evt_ring_id, 438 enum gsi_evt_cmd_opcode opcode) 439 { 440 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id]; 441 struct completion *completion = &evt_ring->completion; 442 struct device *dev = gsi->dev; 443 bool timeout; 444 u32 val; 445 446 /* Enable the completion interrupt for the command */ 447 gsi_irq_ev_ctrl_enable(gsi, evt_ring_id); 448 449 val = u32_encode_bits(evt_ring_id, EV_CHID_FMASK); 450 val |= u32_encode_bits(opcode, EV_OPCODE_FMASK); 451 452 timeout = !gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val, completion); 453 454 gsi_irq_ev_ctrl_disable(gsi); 455 456 if (!timeout) 457 return; 458 459 dev_err(dev, "GSI command %u for event ring %u timed out, state %u\n", 460 opcode, evt_ring_id, gsi_evt_ring_state(gsi, evt_ring_id)); 461 } 462 463 /* Allocate an event ring in NOT_ALLOCATED state */ 464 static int gsi_evt_ring_alloc_command(struct gsi *gsi, u32 evt_ring_id) 465 { 466 enum gsi_evt_ring_state state; 467 468 /* Get initial event ring state */ 469 state = gsi_evt_ring_state(gsi, evt_ring_id); 470 if (state != GSI_EVT_RING_STATE_NOT_ALLOCATED) { 471 dev_err(gsi->dev, "event ring %u bad state %u before alloc\n", 472 evt_ring_id, state); 473 return -EINVAL; 474 } 475 476 gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_ALLOCATE); 477 478 /* If successful the event ring state will have changed */ 479 state = gsi_evt_ring_state(gsi, evt_ring_id); 480 if (state == GSI_EVT_RING_STATE_ALLOCATED) 481 return 0; 482 483 dev_err(gsi->dev, "event ring %u bad state %u after alloc\n", 484 evt_ring_id, state); 485 486 return -EIO; 487 } 488 489 /* Reset a GSI event ring in ALLOCATED or ERROR state. */ 490 static void gsi_evt_ring_reset_command(struct gsi *gsi, u32 evt_ring_id) 491 { 492 enum gsi_evt_ring_state state; 493 494 state = gsi_evt_ring_state(gsi, evt_ring_id); 495 if (state != GSI_EVT_RING_STATE_ALLOCATED && 496 state != GSI_EVT_RING_STATE_ERROR) { 497 dev_err(gsi->dev, "event ring %u bad state %u before reset\n", 498 evt_ring_id, state); 499 return; 500 } 501 502 gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_RESET); 503 504 /* If successful the event ring state will have changed */ 505 state = gsi_evt_ring_state(gsi, evt_ring_id); 506 if (state == GSI_EVT_RING_STATE_ALLOCATED) 507 return; 508 509 dev_err(gsi->dev, "event ring %u bad state %u after reset\n", 510 evt_ring_id, state); 511 } 512 513 /* Issue a hardware de-allocation request for an allocated event ring */ 514 static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id) 515 { 516 enum gsi_evt_ring_state state; 517 518 state = gsi_evt_ring_state(gsi, evt_ring_id); 519 if (state != GSI_EVT_RING_STATE_ALLOCATED) { 520 dev_err(gsi->dev, "event ring %u state %u before dealloc\n", 521 evt_ring_id, state); 522 return; 523 } 524 525 gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_DE_ALLOC); 526 527 /* If successful the event ring state will have changed */ 528 state = gsi_evt_ring_state(gsi, evt_ring_id); 529 if (state == GSI_EVT_RING_STATE_NOT_ALLOCATED) 530 return; 531 532 dev_err(gsi->dev, "event ring %u bad state %u after dealloc\n", 533 evt_ring_id, state); 534 } 535 536 /* Fetch the current state of a channel from hardware */ 537 static enum gsi_channel_state gsi_channel_state(struct gsi_channel *channel) 538 { 539 u32 channel_id = gsi_channel_id(channel); 540 void __iomem *virt = channel->gsi->virt; 541 u32 val; 542 543 val = ioread32(virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id)); 544 545 return u32_get_bits(val, CHSTATE_FMASK); 546 } 547 548 /* Issue a channel command and wait for it to complete */ 549 static void 550 gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode) 551 { 552 struct completion *completion = &channel->completion; 553 u32 channel_id = gsi_channel_id(channel); 554 struct gsi *gsi = channel->gsi; 555 struct device *dev = gsi->dev; 556 bool timeout; 557 u32 val; 558 559 /* Enable the completion interrupt for the command */ 560 gsi_irq_ch_ctrl_enable(gsi, channel_id); 561 562 val = u32_encode_bits(channel_id, CH_CHID_FMASK); 563 val |= u32_encode_bits(opcode, CH_OPCODE_FMASK); 564 timeout = !gsi_command(gsi, GSI_CH_CMD_OFFSET, val, completion); 565 566 gsi_irq_ch_ctrl_disable(gsi); 567 568 if (!timeout) 569 return; 570 571 dev_err(dev, "GSI command %u for channel %u timed out, state %u\n", 572 opcode, channel_id, gsi_channel_state(channel)); 573 } 574 575 /* Allocate GSI channel in NOT_ALLOCATED state */ 576 static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id) 577 { 578 struct gsi_channel *channel = &gsi->channel[channel_id]; 579 struct device *dev = gsi->dev; 580 enum gsi_channel_state state; 581 582 /* Get initial channel state */ 583 state = gsi_channel_state(channel); 584 if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED) { 585 dev_err(dev, "channel %u bad state %u before alloc\n", 586 channel_id, state); 587 return -EINVAL; 588 } 589 590 gsi_channel_command(channel, GSI_CH_ALLOCATE); 591 592 /* If successful the channel state will have changed */ 593 state = gsi_channel_state(channel); 594 if (state == GSI_CHANNEL_STATE_ALLOCATED) 595 return 0; 596 597 dev_err(dev, "channel %u bad state %u after alloc\n", 598 channel_id, state); 599 600 return -EIO; 601 } 602 603 /* Start an ALLOCATED channel */ 604 static int gsi_channel_start_command(struct gsi_channel *channel) 605 { 606 struct device *dev = channel->gsi->dev; 607 enum gsi_channel_state state; 608 609 state = gsi_channel_state(channel); 610 if (state != GSI_CHANNEL_STATE_ALLOCATED && 611 state != GSI_CHANNEL_STATE_STOPPED) { 612 dev_err(dev, "channel %u bad state %u before start\n", 613 gsi_channel_id(channel), state); 614 return -EINVAL; 615 } 616 617 gsi_channel_command(channel, GSI_CH_START); 618 619 /* If successful the channel state will have changed */ 620 state = gsi_channel_state(channel); 621 if (state == GSI_CHANNEL_STATE_STARTED) 622 return 0; 623 624 dev_err(dev, "channel %u bad state %u after start\n", 625 gsi_channel_id(channel), state); 626 627 return -EIO; 628 } 629 630 /* Stop a GSI channel in STARTED state */ 631 static int gsi_channel_stop_command(struct gsi_channel *channel) 632 { 633 struct device *dev = channel->gsi->dev; 634 enum gsi_channel_state state; 635 636 state = gsi_channel_state(channel); 637 638 /* Channel could have entered STOPPED state since last call 639 * if it timed out. If so, we're done. 640 */ 641 if (state == GSI_CHANNEL_STATE_STOPPED) 642 return 0; 643 644 if (state != GSI_CHANNEL_STATE_STARTED && 645 state != GSI_CHANNEL_STATE_STOP_IN_PROC) { 646 dev_err(dev, "channel %u bad state %u before stop\n", 647 gsi_channel_id(channel), state); 648 return -EINVAL; 649 } 650 651 gsi_channel_command(channel, GSI_CH_STOP); 652 653 /* If successful the channel state will have changed */ 654 state = gsi_channel_state(channel); 655 if (state == GSI_CHANNEL_STATE_STOPPED) 656 return 0; 657 658 /* We may have to try again if stop is in progress */ 659 if (state == GSI_CHANNEL_STATE_STOP_IN_PROC) 660 return -EAGAIN; 661 662 dev_err(dev, "channel %u bad state %u after stop\n", 663 gsi_channel_id(channel), state); 664 665 return -EIO; 666 } 667 668 /* Reset a GSI channel in ALLOCATED or ERROR state. */ 669 static void gsi_channel_reset_command(struct gsi_channel *channel) 670 { 671 struct device *dev = channel->gsi->dev; 672 enum gsi_channel_state state; 673 674 /* A short delay is required before a RESET command */ 675 usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC); 676 677 state = gsi_channel_state(channel); 678 if (state != GSI_CHANNEL_STATE_STOPPED && 679 state != GSI_CHANNEL_STATE_ERROR) { 680 /* No need to reset a channel already in ALLOCATED state */ 681 if (state != GSI_CHANNEL_STATE_ALLOCATED) 682 dev_err(dev, "channel %u bad state %u before reset\n", 683 gsi_channel_id(channel), state); 684 return; 685 } 686 687 gsi_channel_command(channel, GSI_CH_RESET); 688 689 /* If successful the channel state will have changed */ 690 state = gsi_channel_state(channel); 691 if (state != GSI_CHANNEL_STATE_ALLOCATED) 692 dev_err(dev, "channel %u bad state %u after reset\n", 693 gsi_channel_id(channel), state); 694 } 695 696 /* Deallocate an ALLOCATED GSI channel */ 697 static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id) 698 { 699 struct gsi_channel *channel = &gsi->channel[channel_id]; 700 struct device *dev = gsi->dev; 701 enum gsi_channel_state state; 702 703 state = gsi_channel_state(channel); 704 if (state != GSI_CHANNEL_STATE_ALLOCATED) { 705 dev_err(dev, "channel %u bad state %u before dealloc\n", 706 channel_id, state); 707 return; 708 } 709 710 gsi_channel_command(channel, GSI_CH_DE_ALLOC); 711 712 /* If successful the channel state will have changed */ 713 state = gsi_channel_state(channel); 714 715 if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED) 716 dev_err(dev, "channel %u bad state %u after dealloc\n", 717 channel_id, state); 718 } 719 720 /* Ring an event ring doorbell, reporting the last entry processed by the AP. 721 * The index argument (modulo the ring count) is the first unfilled entry, so 722 * we supply one less than that with the doorbell. Update the event ring 723 * index field with the value provided. 724 */ 725 static void gsi_evt_ring_doorbell(struct gsi *gsi, u32 evt_ring_id, u32 index) 726 { 727 struct gsi_ring *ring = &gsi->evt_ring[evt_ring_id].ring; 728 u32 val; 729 730 ring->index = index; /* Next unused entry */ 731 732 /* Note: index *must* be used modulo the ring count here */ 733 val = gsi_ring_addr(ring, (index - 1) % ring->count); 734 iowrite32(val, gsi->virt + GSI_EV_CH_E_DOORBELL_0_OFFSET(evt_ring_id)); 735 } 736 737 /* Program an event ring for use */ 738 static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id) 739 { 740 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id]; 741 size_t size = evt_ring->ring.count * GSI_RING_ELEMENT_SIZE; 742 u32 val; 743 744 /* We program all event rings as GPI type/protocol */ 745 val = u32_encode_bits(GSI_CHANNEL_TYPE_GPI, EV_CHTYPE_FMASK); 746 val |= EV_INTYPE_FMASK; 747 val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, EV_ELEMENT_SIZE_FMASK); 748 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id)); 749 750 val = ev_r_length_encoded(gsi->version, size); 751 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_1_OFFSET(evt_ring_id)); 752 753 /* The context 2 and 3 registers store the low-order and 754 * high-order 32 bits of the address of the event ring, 755 * respectively. 756 */ 757 val = lower_32_bits(evt_ring->ring.addr); 758 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_2_OFFSET(evt_ring_id)); 759 val = upper_32_bits(evt_ring->ring.addr); 760 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_3_OFFSET(evt_ring_id)); 761 762 /* Enable interrupt moderation by setting the moderation delay */ 763 val = u32_encode_bits(GSI_EVT_RING_INT_MODT, MODT_FMASK); 764 val |= u32_encode_bits(1, MODC_FMASK); /* comes from channel */ 765 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_8_OFFSET(evt_ring_id)); 766 767 /* No MSI write data, and MSI address high and low address is 0 */ 768 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_9_OFFSET(evt_ring_id)); 769 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_10_OFFSET(evt_ring_id)); 770 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_11_OFFSET(evt_ring_id)); 771 772 /* We don't need to get event read pointer updates */ 773 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_12_OFFSET(evt_ring_id)); 774 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_13_OFFSET(evt_ring_id)); 775 776 /* Finally, tell the hardware we've completed event 0 (arbitrary) */ 777 gsi_evt_ring_doorbell(gsi, evt_ring_id, 0); 778 } 779 780 /* Find the transaction whose completion indicates a channel is quiesced */ 781 static struct gsi_trans *gsi_channel_trans_last(struct gsi_channel *channel) 782 { 783 struct gsi_trans_info *trans_info = &channel->trans_info; 784 const struct list_head *list; 785 struct gsi_trans *trans; 786 787 spin_lock_bh(&trans_info->spinlock); 788 789 /* There is a small chance a TX transaction got allocated just 790 * before we disabled transmits, so check for that. 791 */ 792 if (channel->toward_ipa) { 793 list = &trans_info->alloc; 794 if (!list_empty(list)) 795 goto done; 796 list = &trans_info->pending; 797 if (!list_empty(list)) 798 goto done; 799 } 800 801 /* Otherwise (TX or RX) we want to wait for anything that 802 * has completed, or has been polled but not released yet. 803 */ 804 list = &trans_info->complete; 805 if (!list_empty(list)) 806 goto done; 807 list = &trans_info->polled; 808 if (list_empty(list)) 809 list = NULL; 810 done: 811 trans = list ? list_last_entry(list, struct gsi_trans, links) : NULL; 812 813 /* Caller will wait for this, so take a reference */ 814 if (trans) 815 refcount_inc(&trans->refcount); 816 817 spin_unlock_bh(&trans_info->spinlock); 818 819 return trans; 820 } 821 822 /* Wait for transaction activity on a channel to complete */ 823 static void gsi_channel_trans_quiesce(struct gsi_channel *channel) 824 { 825 struct gsi_trans *trans; 826 827 /* Get the last transaction, and wait for it to complete */ 828 trans = gsi_channel_trans_last(channel); 829 if (trans) { 830 wait_for_completion(&trans->completion); 831 gsi_trans_free(trans); 832 } 833 } 834 835 /* Program a channel for use; there is no gsi_channel_deprogram() */ 836 static void gsi_channel_program(struct gsi_channel *channel, bool doorbell) 837 { 838 size_t size = channel->tre_ring.count * GSI_RING_ELEMENT_SIZE; 839 u32 channel_id = gsi_channel_id(channel); 840 union gsi_channel_scratch scr = { }; 841 struct gsi_channel_scratch_gpi *gpi; 842 struct gsi *gsi = channel->gsi; 843 u32 wrr_weight = 0; 844 u32 val; 845 846 /* Arbitrarily pick TRE 0 as the first channel element to use */ 847 channel->tre_ring.index = 0; 848 849 /* We program all channels as GPI type/protocol */ 850 val = chtype_protocol_encoded(gsi->version, GSI_CHANNEL_TYPE_GPI); 851 if (channel->toward_ipa) 852 val |= CHTYPE_DIR_FMASK; 853 val |= u32_encode_bits(channel->evt_ring_id, ERINDEX_FMASK); 854 val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, ELEMENT_SIZE_FMASK); 855 iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id)); 856 857 val = r_length_encoded(gsi->version, size); 858 iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_1_OFFSET(channel_id)); 859 860 /* The context 2 and 3 registers store the low-order and 861 * high-order 32 bits of the address of the channel ring, 862 * respectively. 863 */ 864 val = lower_32_bits(channel->tre_ring.addr); 865 iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_2_OFFSET(channel_id)); 866 val = upper_32_bits(channel->tre_ring.addr); 867 iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_3_OFFSET(channel_id)); 868 869 /* Command channel gets low weighted round-robin priority */ 870 if (channel->command) 871 wrr_weight = field_max(WRR_WEIGHT_FMASK); 872 val = u32_encode_bits(wrr_weight, WRR_WEIGHT_FMASK); 873 874 /* Max prefetch is 1 segment (do not set MAX_PREFETCH_FMASK) */ 875 876 /* No need to use the doorbell engine starting at IPA v4.0 */ 877 if (gsi->version < IPA_VERSION_4_0 && doorbell) 878 val |= USE_DB_ENG_FMASK; 879 880 /* v4.0 introduces an escape buffer for prefetch. We use it 881 * on all but the AP command channel. 882 */ 883 if (gsi->version >= IPA_VERSION_4_0 && !channel->command) { 884 /* If not otherwise set, prefetch buffers are used */ 885 if (gsi->version < IPA_VERSION_4_5) 886 val |= USE_ESCAPE_BUF_ONLY_FMASK; 887 else 888 val |= u32_encode_bits(GSI_ESCAPE_BUF_ONLY, 889 PREFETCH_MODE_FMASK); 890 } 891 /* All channels set DB_IN_BYTES */ 892 if (gsi->version >= IPA_VERSION_4_9) 893 val |= DB_IN_BYTES; 894 895 iowrite32(val, gsi->virt + GSI_CH_C_QOS_OFFSET(channel_id)); 896 897 /* Now update the scratch registers for GPI protocol */ 898 gpi = &scr.gpi; 899 gpi->max_outstanding_tre = gsi_channel_trans_tre_max(gsi, channel_id) * 900 GSI_RING_ELEMENT_SIZE; 901 gpi->outstanding_threshold = 2 * GSI_RING_ELEMENT_SIZE; 902 903 val = scr.data.word1; 904 iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_0_OFFSET(channel_id)); 905 906 val = scr.data.word2; 907 iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_1_OFFSET(channel_id)); 908 909 val = scr.data.word3; 910 iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_2_OFFSET(channel_id)); 911 912 /* We must preserve the upper 16 bits of the last scratch register. 913 * The next sequence assumes those bits remain unchanged between the 914 * read and the write. 915 */ 916 val = ioread32(gsi->virt + GSI_CH_C_SCRATCH_3_OFFSET(channel_id)); 917 val = (scr.data.word4 & GENMASK(31, 16)) | (val & GENMASK(15, 0)); 918 iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_3_OFFSET(channel_id)); 919 920 /* All done! */ 921 } 922 923 static int __gsi_channel_start(struct gsi_channel *channel, bool start) 924 { 925 struct gsi *gsi = channel->gsi; 926 int ret; 927 928 if (!start) 929 return 0; 930 931 mutex_lock(&gsi->mutex); 932 933 ret = gsi_channel_start_command(channel); 934 935 mutex_unlock(&gsi->mutex); 936 937 return ret; 938 } 939 940 /* Start an allocated GSI channel */ 941 int gsi_channel_start(struct gsi *gsi, u32 channel_id) 942 { 943 struct gsi_channel *channel = &gsi->channel[channel_id]; 944 int ret; 945 946 /* Enable NAPI and the completion interrupt */ 947 napi_enable(&channel->napi); 948 gsi_irq_ieob_enable_one(gsi, channel->evt_ring_id); 949 950 ret = __gsi_channel_start(channel, true); 951 if (ret) { 952 gsi_irq_ieob_disable_one(gsi, channel->evt_ring_id); 953 napi_disable(&channel->napi); 954 } 955 956 return ret; 957 } 958 959 static int gsi_channel_stop_retry(struct gsi_channel *channel) 960 { 961 u32 retries = GSI_CHANNEL_STOP_RETRIES; 962 int ret; 963 964 do { 965 ret = gsi_channel_stop_command(channel); 966 if (ret != -EAGAIN) 967 break; 968 usleep_range(3 * USEC_PER_MSEC, 5 * USEC_PER_MSEC); 969 } while (retries--); 970 971 return ret; 972 } 973 974 static int __gsi_channel_stop(struct gsi_channel *channel, bool stop) 975 { 976 struct gsi *gsi = channel->gsi; 977 int ret; 978 979 /* Wait for any underway transactions to complete before stopping. */ 980 gsi_channel_trans_quiesce(channel); 981 982 if (!stop) 983 return 0; 984 985 mutex_lock(&gsi->mutex); 986 987 ret = gsi_channel_stop_retry(channel); 988 989 mutex_unlock(&gsi->mutex); 990 991 return ret; 992 } 993 994 /* Stop a started channel */ 995 int gsi_channel_stop(struct gsi *gsi, u32 channel_id) 996 { 997 struct gsi_channel *channel = &gsi->channel[channel_id]; 998 int ret; 999 1000 ret = __gsi_channel_stop(channel, true); 1001 if (ret) 1002 return ret; 1003 1004 /* Disable the completion interrupt and NAPI if successful */ 1005 gsi_irq_ieob_disable_one(gsi, channel->evt_ring_id); 1006 napi_disable(&channel->napi); 1007 1008 return 0; 1009 } 1010 1011 /* Reset and reconfigure a channel, (possibly) enabling the doorbell engine */ 1012 void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool doorbell) 1013 { 1014 struct gsi_channel *channel = &gsi->channel[channel_id]; 1015 1016 mutex_lock(&gsi->mutex); 1017 1018 gsi_channel_reset_command(channel); 1019 /* Due to a hardware quirk we may need to reset RX channels twice. */ 1020 if (gsi->version < IPA_VERSION_4_0 && !channel->toward_ipa) 1021 gsi_channel_reset_command(channel); 1022 1023 gsi_channel_program(channel, doorbell); 1024 gsi_channel_trans_cancel_pending(channel); 1025 1026 mutex_unlock(&gsi->mutex); 1027 } 1028 1029 /* Stop a STARTED channel for suspend (using stop if requested) */ 1030 int gsi_channel_suspend(struct gsi *gsi, u32 channel_id, bool stop) 1031 { 1032 struct gsi_channel *channel = &gsi->channel[channel_id]; 1033 int ret; 1034 1035 ret = __gsi_channel_stop(channel, stop); 1036 if (ret) 1037 return ret; 1038 1039 /* Ensure NAPI polling has finished. */ 1040 napi_synchronize(&channel->napi); 1041 1042 return 0; 1043 } 1044 1045 /* Resume a suspended channel (starting will be requested if STOPPED) */ 1046 int gsi_channel_resume(struct gsi *gsi, u32 channel_id, bool start) 1047 { 1048 struct gsi_channel *channel = &gsi->channel[channel_id]; 1049 1050 return __gsi_channel_start(channel, start); 1051 } 1052 1053 /** 1054 * gsi_channel_tx_queued() - Report queued TX transfers for a channel 1055 * @channel: Channel for which to report 1056 * 1057 * Report to the network stack the number of bytes and transactions that 1058 * have been queued to hardware since last call. This and the next function 1059 * supply information used by the network stack for throttling. 1060 * 1061 * For each channel we track the number of transactions used and bytes of 1062 * data those transactions represent. We also track what those values are 1063 * each time this function is called. Subtracting the two tells us 1064 * the number of bytes and transactions that have been added between 1065 * successive calls. 1066 * 1067 * Calling this each time we ring the channel doorbell allows us to 1068 * provide accurate information to the network stack about how much 1069 * work we've given the hardware at any point in time. 1070 */ 1071 void gsi_channel_tx_queued(struct gsi_channel *channel) 1072 { 1073 u32 trans_count; 1074 u32 byte_count; 1075 1076 byte_count = channel->byte_count - channel->queued_byte_count; 1077 trans_count = channel->trans_count - channel->queued_trans_count; 1078 channel->queued_byte_count = channel->byte_count; 1079 channel->queued_trans_count = channel->trans_count; 1080 1081 ipa_gsi_channel_tx_queued(channel->gsi, gsi_channel_id(channel), 1082 trans_count, byte_count); 1083 } 1084 1085 /** 1086 * gsi_channel_tx_update() - Report completed TX transfers 1087 * @channel: Channel that has completed transmitting packets 1088 * @trans: Last transation known to be complete 1089 * 1090 * Compute the number of transactions and bytes that have been transferred 1091 * over a TX channel since the given transaction was committed. Report this 1092 * information to the network stack. 1093 * 1094 * At the time a transaction is committed, we record its channel's 1095 * committed transaction and byte counts *in the transaction*. 1096 * Completions are signaled by the hardware with an interrupt, and 1097 * we can determine the latest completed transaction at that time. 1098 * 1099 * The difference between the byte/transaction count recorded in 1100 * the transaction and the count last time we recorded a completion 1101 * tells us exactly how much data has been transferred between 1102 * completions. 1103 * 1104 * Calling this each time we learn of a newly-completed transaction 1105 * allows us to provide accurate information to the network stack 1106 * about how much work has been completed by the hardware at a given 1107 * point in time. 1108 */ 1109 static void 1110 gsi_channel_tx_update(struct gsi_channel *channel, struct gsi_trans *trans) 1111 { 1112 u64 byte_count = trans->byte_count + trans->len; 1113 u64 trans_count = trans->trans_count + 1; 1114 1115 byte_count -= channel->compl_byte_count; 1116 channel->compl_byte_count += byte_count; 1117 trans_count -= channel->compl_trans_count; 1118 channel->compl_trans_count += trans_count; 1119 1120 ipa_gsi_channel_tx_completed(channel->gsi, gsi_channel_id(channel), 1121 trans_count, byte_count); 1122 } 1123 1124 /* Channel control interrupt handler */ 1125 static void gsi_isr_chan_ctrl(struct gsi *gsi) 1126 { 1127 u32 channel_mask; 1128 1129 channel_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_CH_IRQ_OFFSET); 1130 iowrite32(channel_mask, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET); 1131 1132 while (channel_mask) { 1133 u32 channel_id = __ffs(channel_mask); 1134 struct gsi_channel *channel; 1135 1136 channel_mask ^= BIT(channel_id); 1137 1138 channel = &gsi->channel[channel_id]; 1139 1140 complete(&channel->completion); 1141 } 1142 } 1143 1144 /* Event ring control interrupt handler */ 1145 static void gsi_isr_evt_ctrl(struct gsi *gsi) 1146 { 1147 u32 event_mask; 1148 1149 event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_OFFSET); 1150 iowrite32(event_mask, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET); 1151 1152 while (event_mask) { 1153 u32 evt_ring_id = __ffs(event_mask); 1154 struct gsi_evt_ring *evt_ring; 1155 1156 event_mask ^= BIT(evt_ring_id); 1157 1158 evt_ring = &gsi->evt_ring[evt_ring_id]; 1159 1160 complete(&evt_ring->completion); 1161 } 1162 } 1163 1164 /* Global channel error interrupt handler */ 1165 static void 1166 gsi_isr_glob_chan_err(struct gsi *gsi, u32 err_ee, u32 channel_id, u32 code) 1167 { 1168 if (code == GSI_OUT_OF_RESOURCES) { 1169 dev_err(gsi->dev, "channel %u out of resources\n", channel_id); 1170 complete(&gsi->channel[channel_id].completion); 1171 return; 1172 } 1173 1174 /* Report, but otherwise ignore all other error codes */ 1175 dev_err(gsi->dev, "channel %u global error ee 0x%08x code 0x%08x\n", 1176 channel_id, err_ee, code); 1177 } 1178 1179 /* Global event error interrupt handler */ 1180 static void 1181 gsi_isr_glob_evt_err(struct gsi *gsi, u32 err_ee, u32 evt_ring_id, u32 code) 1182 { 1183 if (code == GSI_OUT_OF_RESOURCES) { 1184 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id]; 1185 u32 channel_id = gsi_channel_id(evt_ring->channel); 1186 1187 complete(&evt_ring->completion); 1188 dev_err(gsi->dev, "evt_ring for channel %u out of resources\n", 1189 channel_id); 1190 return; 1191 } 1192 1193 /* Report, but otherwise ignore all other error codes */ 1194 dev_err(gsi->dev, "event ring %u global error ee %u code 0x%08x\n", 1195 evt_ring_id, err_ee, code); 1196 } 1197 1198 /* Global error interrupt handler */ 1199 static void gsi_isr_glob_err(struct gsi *gsi) 1200 { 1201 enum gsi_err_type type; 1202 enum gsi_err_code code; 1203 u32 which; 1204 u32 val; 1205 u32 ee; 1206 1207 /* Get the logged error, then reinitialize the log */ 1208 val = ioread32(gsi->virt + GSI_ERROR_LOG_OFFSET); 1209 iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET); 1210 iowrite32(~0, gsi->virt + GSI_ERROR_LOG_CLR_OFFSET); 1211 1212 ee = u32_get_bits(val, ERR_EE_FMASK); 1213 type = u32_get_bits(val, ERR_TYPE_FMASK); 1214 which = u32_get_bits(val, ERR_VIRT_IDX_FMASK); 1215 code = u32_get_bits(val, ERR_CODE_FMASK); 1216 1217 if (type == GSI_ERR_TYPE_CHAN) 1218 gsi_isr_glob_chan_err(gsi, ee, which, code); 1219 else if (type == GSI_ERR_TYPE_EVT) 1220 gsi_isr_glob_evt_err(gsi, ee, which, code); 1221 else /* type GSI_ERR_TYPE_GLOB should be fatal */ 1222 dev_err(gsi->dev, "unexpected global error 0x%08x\n", type); 1223 } 1224 1225 /* Generic EE interrupt handler */ 1226 static void gsi_isr_gp_int1(struct gsi *gsi) 1227 { 1228 u32 result; 1229 u32 val; 1230 1231 /* This interrupt is used to handle completions of the two GENERIC 1232 * GSI commands. We use these to allocate and halt channels on 1233 * the modem's behalf due to a hardware quirk on IPA v4.2. Once 1234 * allocated, the modem "owns" these channels, and as a result we 1235 * have no way of knowing the channel's state at any given time. 1236 * 1237 * It is recommended that we halt the modem channels we allocated 1238 * when shutting down, but it's possible the channel isn't running 1239 * at the time we issue the HALT command. We'll get an error in 1240 * that case, but it's harmless (the channel is already halted). 1241 * 1242 * For this reason, we silently ignore a CHANNEL_NOT_RUNNING error 1243 * if we receive it. 1244 */ 1245 val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET); 1246 result = u32_get_bits(val, GENERIC_EE_RESULT_FMASK); 1247 1248 switch (result) { 1249 case GENERIC_EE_SUCCESS: 1250 case GENERIC_EE_CHANNEL_NOT_RUNNING: 1251 gsi->result = 0; 1252 break; 1253 1254 case GENERIC_EE_RETRY: 1255 gsi->result = -EAGAIN; 1256 break; 1257 1258 default: 1259 dev_err(gsi->dev, "global INT1 generic result %u\n", result); 1260 gsi->result = -EIO; 1261 break; 1262 } 1263 1264 complete(&gsi->completion); 1265 } 1266 1267 /* Inter-EE interrupt handler */ 1268 static void gsi_isr_glob_ee(struct gsi *gsi) 1269 { 1270 u32 val; 1271 1272 val = ioread32(gsi->virt + GSI_CNTXT_GLOB_IRQ_STTS_OFFSET); 1273 1274 if (val & BIT(ERROR_INT)) 1275 gsi_isr_glob_err(gsi); 1276 1277 iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_CLR_OFFSET); 1278 1279 val &= ~BIT(ERROR_INT); 1280 1281 if (val & BIT(GP_INT1)) { 1282 val ^= BIT(GP_INT1); 1283 gsi_isr_gp_int1(gsi); 1284 } 1285 1286 if (val) 1287 dev_err(gsi->dev, "unexpected global interrupt 0x%08x\n", val); 1288 } 1289 1290 /* I/O completion interrupt event */ 1291 static void gsi_isr_ieob(struct gsi *gsi) 1292 { 1293 u32 event_mask; 1294 1295 event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_OFFSET); 1296 gsi_irq_ieob_disable(gsi, event_mask); 1297 iowrite32(event_mask, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_CLR_OFFSET); 1298 1299 while (event_mask) { 1300 u32 evt_ring_id = __ffs(event_mask); 1301 1302 event_mask ^= BIT(evt_ring_id); 1303 1304 napi_schedule(&gsi->evt_ring[evt_ring_id].channel->napi); 1305 } 1306 } 1307 1308 /* General event interrupts represent serious problems, so report them */ 1309 static void gsi_isr_general(struct gsi *gsi) 1310 { 1311 struct device *dev = gsi->dev; 1312 u32 val; 1313 1314 val = ioread32(gsi->virt + GSI_CNTXT_GSI_IRQ_STTS_OFFSET); 1315 iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_CLR_OFFSET); 1316 1317 dev_err(dev, "unexpected general interrupt 0x%08x\n", val); 1318 } 1319 1320 /** 1321 * gsi_isr() - Top level GSI interrupt service routine 1322 * @irq: Interrupt number (ignored) 1323 * @dev_id: GSI pointer supplied to request_irq() 1324 * 1325 * This is the main handler function registered for the GSI IRQ. Each type 1326 * of interrupt has a separate handler function that is called from here. 1327 */ 1328 static irqreturn_t gsi_isr(int irq, void *dev_id) 1329 { 1330 struct gsi *gsi = dev_id; 1331 u32 intr_mask; 1332 u32 cnt = 0; 1333 1334 /* enum gsi_irq_type_id defines GSI interrupt types */ 1335 while ((intr_mask = ioread32(gsi->virt + GSI_CNTXT_TYPE_IRQ_OFFSET))) { 1336 /* intr_mask contains bitmask of pending GSI interrupts */ 1337 do { 1338 u32 gsi_intr = BIT(__ffs(intr_mask)); 1339 1340 intr_mask ^= gsi_intr; 1341 1342 switch (gsi_intr) { 1343 case BIT(GSI_CH_CTRL): 1344 gsi_isr_chan_ctrl(gsi); 1345 break; 1346 case BIT(GSI_EV_CTRL): 1347 gsi_isr_evt_ctrl(gsi); 1348 break; 1349 case BIT(GSI_GLOB_EE): 1350 gsi_isr_glob_ee(gsi); 1351 break; 1352 case BIT(GSI_IEOB): 1353 gsi_isr_ieob(gsi); 1354 break; 1355 case BIT(GSI_GENERAL): 1356 gsi_isr_general(gsi); 1357 break; 1358 default: 1359 dev_err(gsi->dev, 1360 "unrecognized interrupt type 0x%08x\n", 1361 gsi_intr); 1362 break; 1363 } 1364 } while (intr_mask); 1365 1366 if (++cnt > GSI_ISR_MAX_ITER) { 1367 dev_err(gsi->dev, "interrupt flood\n"); 1368 break; 1369 } 1370 } 1371 1372 return IRQ_HANDLED; 1373 } 1374 1375 static int gsi_irq_init(struct gsi *gsi, struct platform_device *pdev) 1376 { 1377 struct device *dev = &pdev->dev; 1378 unsigned int irq; 1379 int ret; 1380 1381 ret = platform_get_irq_byname(pdev, "gsi"); 1382 if (ret <= 0) 1383 return ret ? : -EINVAL; 1384 1385 irq = ret; 1386 1387 ret = request_irq(irq, gsi_isr, 0, "gsi", gsi); 1388 if (ret) { 1389 dev_err(dev, "error %d requesting \"gsi\" IRQ\n", ret); 1390 return ret; 1391 } 1392 gsi->irq = irq; 1393 1394 return 0; 1395 } 1396 1397 static void gsi_irq_exit(struct gsi *gsi) 1398 { 1399 free_irq(gsi->irq, gsi); 1400 } 1401 1402 /* Return the transaction associated with a transfer completion event */ 1403 static struct gsi_trans *gsi_event_trans(struct gsi_channel *channel, 1404 struct gsi_event *event) 1405 { 1406 u32 tre_offset; 1407 u32 tre_index; 1408 1409 /* Event xfer_ptr records the TRE it's associated with */ 1410 tre_offset = lower_32_bits(le64_to_cpu(event->xfer_ptr)); 1411 tre_index = gsi_ring_index(&channel->tre_ring, tre_offset); 1412 1413 return gsi_channel_trans_mapped(channel, tre_index); 1414 } 1415 1416 /** 1417 * gsi_evt_ring_rx_update() - Record lengths of received data 1418 * @evt_ring: Event ring associated with channel that received packets 1419 * @index: Event index in ring reported by hardware 1420 * 1421 * Events for RX channels contain the actual number of bytes received into 1422 * the buffer. Every event has a transaction associated with it, and here 1423 * we update transactions to record their actual received lengths. 1424 * 1425 * This function is called whenever we learn that the GSI hardware has filled 1426 * new events since the last time we checked. The ring's index field tells 1427 * the first entry in need of processing. The index provided is the 1428 * first *unfilled* event in the ring (following the last filled one). 1429 * 1430 * Events are sequential within the event ring, and transactions are 1431 * sequential within the transaction pool. 1432 * 1433 * Note that @index always refers to an element *within* the event ring. 1434 */ 1435 static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index) 1436 { 1437 struct gsi_channel *channel = evt_ring->channel; 1438 struct gsi_ring *ring = &evt_ring->ring; 1439 struct gsi_trans_info *trans_info; 1440 struct gsi_event *event_done; 1441 struct gsi_event *event; 1442 struct gsi_trans *trans; 1443 u32 byte_count = 0; 1444 u32 old_index; 1445 u32 event_avail; 1446 1447 trans_info = &channel->trans_info; 1448 1449 /* We'll start with the oldest un-processed event. RX channels 1450 * replenish receive buffers in single-TRE transactions, so we 1451 * can just map that event to its transaction. Transactions 1452 * associated with completion events are consecutive. 1453 */ 1454 old_index = ring->index; 1455 event = gsi_ring_virt(ring, old_index); 1456 trans = gsi_event_trans(channel, event); 1457 1458 /* Compute the number of events to process before we wrap, 1459 * and determine when we'll be done processing events. 1460 */ 1461 event_avail = ring->count - old_index % ring->count; 1462 event_done = gsi_ring_virt(ring, index); 1463 do { 1464 trans->len = __le16_to_cpu(event->len); 1465 byte_count += trans->len; 1466 1467 /* Move on to the next event and transaction */ 1468 if (--event_avail) 1469 event++; 1470 else 1471 event = gsi_ring_virt(ring, 0); 1472 trans = gsi_trans_pool_next(&trans_info->pool, trans); 1473 } while (event != event_done); 1474 1475 /* We record RX bytes when they are received */ 1476 channel->byte_count += byte_count; 1477 channel->trans_count++; 1478 } 1479 1480 /* Initialize a ring, including allocating DMA memory for its entries */ 1481 static int gsi_ring_alloc(struct gsi *gsi, struct gsi_ring *ring, u32 count) 1482 { 1483 u32 size = count * GSI_RING_ELEMENT_SIZE; 1484 struct device *dev = gsi->dev; 1485 dma_addr_t addr; 1486 1487 /* Hardware requires a 2^n ring size, with alignment equal to size. 1488 * The DMA address returned by dma_alloc_coherent() is guaranteed to 1489 * be a power-of-2 number of pages, which satisfies the requirement. 1490 */ 1491 ring->virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL); 1492 if (!ring->virt) 1493 return -ENOMEM; 1494 1495 ring->addr = addr; 1496 ring->count = count; 1497 1498 return 0; 1499 } 1500 1501 /* Free a previously-allocated ring */ 1502 static void gsi_ring_free(struct gsi *gsi, struct gsi_ring *ring) 1503 { 1504 size_t size = ring->count * GSI_RING_ELEMENT_SIZE; 1505 1506 dma_free_coherent(gsi->dev, size, ring->virt, ring->addr); 1507 } 1508 1509 /* Allocate an available event ring id */ 1510 static int gsi_evt_ring_id_alloc(struct gsi *gsi) 1511 { 1512 u32 evt_ring_id; 1513 1514 if (gsi->event_bitmap == ~0U) { 1515 dev_err(gsi->dev, "event rings exhausted\n"); 1516 return -ENOSPC; 1517 } 1518 1519 evt_ring_id = ffz(gsi->event_bitmap); 1520 gsi->event_bitmap |= BIT(evt_ring_id); 1521 1522 return (int)evt_ring_id; 1523 } 1524 1525 /* Free a previously-allocated event ring id */ 1526 static void gsi_evt_ring_id_free(struct gsi *gsi, u32 evt_ring_id) 1527 { 1528 gsi->event_bitmap &= ~BIT(evt_ring_id); 1529 } 1530 1531 /* Ring a channel doorbell, reporting the first un-filled entry */ 1532 void gsi_channel_doorbell(struct gsi_channel *channel) 1533 { 1534 struct gsi_ring *tre_ring = &channel->tre_ring; 1535 u32 channel_id = gsi_channel_id(channel); 1536 struct gsi *gsi = channel->gsi; 1537 u32 val; 1538 1539 /* Note: index *must* be used modulo the ring count here */ 1540 val = gsi_ring_addr(tre_ring, tre_ring->index % tre_ring->count); 1541 iowrite32(val, gsi->virt + GSI_CH_C_DOORBELL_0_OFFSET(channel_id)); 1542 } 1543 1544 /* Consult hardware, move any newly completed transactions to completed list */ 1545 static struct gsi_trans *gsi_channel_update(struct gsi_channel *channel) 1546 { 1547 u32 evt_ring_id = channel->evt_ring_id; 1548 struct gsi *gsi = channel->gsi; 1549 struct gsi_evt_ring *evt_ring; 1550 struct gsi_trans *trans; 1551 struct gsi_ring *ring; 1552 u32 offset; 1553 u32 index; 1554 1555 evt_ring = &gsi->evt_ring[evt_ring_id]; 1556 ring = &evt_ring->ring; 1557 1558 /* See if there's anything new to process; if not, we're done. Note 1559 * that index always refers to an entry *within* the event ring. 1560 */ 1561 offset = GSI_EV_CH_E_CNTXT_4_OFFSET(evt_ring_id); 1562 index = gsi_ring_index(ring, ioread32(gsi->virt + offset)); 1563 if (index == ring->index % ring->count) 1564 return NULL; 1565 1566 /* Get the transaction for the latest completed event. Take a 1567 * reference to keep it from completing before we give the events 1568 * for this and previous transactions back to the hardware. 1569 */ 1570 trans = gsi_event_trans(channel, gsi_ring_virt(ring, index - 1)); 1571 refcount_inc(&trans->refcount); 1572 1573 /* For RX channels, update each completed transaction with the number 1574 * of bytes that were actually received. For TX channels, report 1575 * the number of transactions and bytes this completion represents 1576 * up the network stack. 1577 */ 1578 if (channel->toward_ipa) 1579 gsi_channel_tx_update(channel, trans); 1580 else 1581 gsi_evt_ring_rx_update(evt_ring, index); 1582 1583 gsi_trans_move_complete(trans); 1584 1585 /* Tell the hardware we've handled these events */ 1586 gsi_evt_ring_doorbell(channel->gsi, channel->evt_ring_id, index); 1587 1588 gsi_trans_free(trans); 1589 1590 return gsi_channel_trans_complete(channel); 1591 } 1592 1593 /** 1594 * gsi_channel_poll_one() - Return a single completed transaction on a channel 1595 * @channel: Channel to be polled 1596 * 1597 * Return: Transaction pointer, or null if none are available 1598 * 1599 * This function returns the first entry on a channel's completed transaction 1600 * list. If that list is empty, the hardware is consulted to determine 1601 * whether any new transactions have completed. If so, they're moved to the 1602 * completed list and the new first entry is returned. If there are no more 1603 * completed transactions, a null pointer is returned. 1604 */ 1605 static struct gsi_trans *gsi_channel_poll_one(struct gsi_channel *channel) 1606 { 1607 struct gsi_trans *trans; 1608 1609 /* Get the first transaction from the completed list */ 1610 trans = gsi_channel_trans_complete(channel); 1611 if (!trans) /* List is empty; see if there's more to do */ 1612 trans = gsi_channel_update(channel); 1613 1614 if (trans) 1615 gsi_trans_move_polled(trans); 1616 1617 return trans; 1618 } 1619 1620 /** 1621 * gsi_channel_poll() - NAPI poll function for a channel 1622 * @napi: NAPI structure for the channel 1623 * @budget: Budget supplied by NAPI core 1624 * 1625 * Return: Number of items polled (<= budget) 1626 * 1627 * Single transactions completed by hardware are polled until either 1628 * the budget is exhausted, or there are no more. Each transaction 1629 * polled is passed to gsi_trans_complete(), to perform remaining 1630 * completion processing and retire/free the transaction. 1631 */ 1632 static int gsi_channel_poll(struct napi_struct *napi, int budget) 1633 { 1634 struct gsi_channel *channel; 1635 int count; 1636 1637 channel = container_of(napi, struct gsi_channel, napi); 1638 for (count = 0; count < budget; count++) { 1639 struct gsi_trans *trans; 1640 1641 trans = gsi_channel_poll_one(channel); 1642 if (!trans) 1643 break; 1644 gsi_trans_complete(trans); 1645 } 1646 1647 if (count < budget && napi_complete(napi)) 1648 gsi_irq_ieob_enable_one(channel->gsi, channel->evt_ring_id); 1649 1650 return count; 1651 } 1652 1653 /* The event bitmap represents which event ids are available for allocation. 1654 * Set bits are not available, clear bits can be used. This function 1655 * initializes the map so all events supported by the hardware are available, 1656 * then precludes any reserved events from being allocated. 1657 */ 1658 static u32 gsi_event_bitmap_init(u32 evt_ring_max) 1659 { 1660 u32 event_bitmap = GENMASK(BITS_PER_LONG - 1, evt_ring_max); 1661 1662 event_bitmap |= GENMASK(GSI_MHI_EVENT_ID_END, GSI_MHI_EVENT_ID_START); 1663 1664 return event_bitmap; 1665 } 1666 1667 /* Setup function for a single channel */ 1668 static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id) 1669 { 1670 struct gsi_channel *channel = &gsi->channel[channel_id]; 1671 u32 evt_ring_id = channel->evt_ring_id; 1672 int ret; 1673 1674 if (!gsi_channel_initialized(channel)) 1675 return 0; 1676 1677 ret = gsi_evt_ring_alloc_command(gsi, evt_ring_id); 1678 if (ret) 1679 return ret; 1680 1681 gsi_evt_ring_program(gsi, evt_ring_id); 1682 1683 ret = gsi_channel_alloc_command(gsi, channel_id); 1684 if (ret) 1685 goto err_evt_ring_de_alloc; 1686 1687 gsi_channel_program(channel, true); 1688 1689 if (channel->toward_ipa) 1690 netif_tx_napi_add(&gsi->dummy_dev, &channel->napi, 1691 gsi_channel_poll, NAPI_POLL_WEIGHT); 1692 else 1693 netif_napi_add(&gsi->dummy_dev, &channel->napi, 1694 gsi_channel_poll, NAPI_POLL_WEIGHT); 1695 1696 return 0; 1697 1698 err_evt_ring_de_alloc: 1699 /* We've done nothing with the event ring yet so don't reset */ 1700 gsi_evt_ring_de_alloc_command(gsi, evt_ring_id); 1701 1702 return ret; 1703 } 1704 1705 /* Inverse of gsi_channel_setup_one() */ 1706 static void gsi_channel_teardown_one(struct gsi *gsi, u32 channel_id) 1707 { 1708 struct gsi_channel *channel = &gsi->channel[channel_id]; 1709 u32 evt_ring_id = channel->evt_ring_id; 1710 1711 if (!gsi_channel_initialized(channel)) 1712 return; 1713 1714 netif_napi_del(&channel->napi); 1715 1716 gsi_channel_de_alloc_command(gsi, channel_id); 1717 gsi_evt_ring_reset_command(gsi, evt_ring_id); 1718 gsi_evt_ring_de_alloc_command(gsi, evt_ring_id); 1719 } 1720 1721 static int gsi_generic_command(struct gsi *gsi, u32 channel_id, 1722 enum gsi_generic_cmd_opcode opcode) 1723 { 1724 struct completion *completion = &gsi->completion; 1725 bool timeout; 1726 u32 val; 1727 1728 /* The error global interrupt type is always enabled (until we 1729 * teardown), so we won't change that. A generic EE command 1730 * completes with a GSI global interrupt of type GP_INT1. We 1731 * only perform one generic command at a time (to allocate or 1732 * halt a modem channel) and only from this function. So we 1733 * enable the GP_INT1 IRQ type here while we're expecting it. 1734 */ 1735 val = BIT(ERROR_INT) | BIT(GP_INT1); 1736 iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET); 1737 1738 /* First zero the result code field */ 1739 val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET); 1740 val &= ~GENERIC_EE_RESULT_FMASK; 1741 iowrite32(val, gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET); 1742 1743 /* Now issue the command */ 1744 val = u32_encode_bits(opcode, GENERIC_OPCODE_FMASK); 1745 val |= u32_encode_bits(channel_id, GENERIC_CHID_FMASK); 1746 val |= u32_encode_bits(GSI_EE_MODEM, GENERIC_EE_FMASK); 1747 1748 timeout = !gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val, completion); 1749 1750 /* Disable the GP_INT1 IRQ type again */ 1751 iowrite32(BIT(ERROR_INT), gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET); 1752 1753 if (!timeout) 1754 return gsi->result; 1755 1756 dev_err(gsi->dev, "GSI generic command %u to channel %u timed out\n", 1757 opcode, channel_id); 1758 1759 return -ETIMEDOUT; 1760 } 1761 1762 static int gsi_modem_channel_alloc(struct gsi *gsi, u32 channel_id) 1763 { 1764 return gsi_generic_command(gsi, channel_id, 1765 GSI_GENERIC_ALLOCATE_CHANNEL); 1766 } 1767 1768 static void gsi_modem_channel_halt(struct gsi *gsi, u32 channel_id) 1769 { 1770 u32 retries = GSI_CHANNEL_MODEM_HALT_RETRIES; 1771 int ret; 1772 1773 do 1774 ret = gsi_generic_command(gsi, channel_id, 1775 GSI_GENERIC_HALT_CHANNEL); 1776 while (ret == -EAGAIN && retries--); 1777 1778 if (ret) 1779 dev_err(gsi->dev, "error %d halting modem channel %u\n", 1780 ret, channel_id); 1781 } 1782 1783 /* Setup function for channels */ 1784 static int gsi_channel_setup(struct gsi *gsi) 1785 { 1786 u32 channel_id = 0; 1787 u32 mask; 1788 int ret; 1789 1790 gsi_irq_enable(gsi); 1791 1792 mutex_lock(&gsi->mutex); 1793 1794 do { 1795 ret = gsi_channel_setup_one(gsi, channel_id); 1796 if (ret) 1797 goto err_unwind; 1798 } while (++channel_id < gsi->channel_count); 1799 1800 /* Make sure no channels were defined that hardware does not support */ 1801 while (channel_id < GSI_CHANNEL_COUNT_MAX) { 1802 struct gsi_channel *channel = &gsi->channel[channel_id++]; 1803 1804 if (!gsi_channel_initialized(channel)) 1805 continue; 1806 1807 ret = -EINVAL; 1808 dev_err(gsi->dev, "channel %u not supported by hardware\n", 1809 channel_id - 1); 1810 channel_id = gsi->channel_count; 1811 goto err_unwind; 1812 } 1813 1814 /* Allocate modem channels if necessary */ 1815 mask = gsi->modem_channel_bitmap; 1816 while (mask) { 1817 u32 modem_channel_id = __ffs(mask); 1818 1819 ret = gsi_modem_channel_alloc(gsi, modem_channel_id); 1820 if (ret) 1821 goto err_unwind_modem; 1822 1823 /* Clear bit from mask only after success (for unwind) */ 1824 mask ^= BIT(modem_channel_id); 1825 } 1826 1827 mutex_unlock(&gsi->mutex); 1828 1829 return 0; 1830 1831 err_unwind_modem: 1832 /* Compute which modem channels need to be deallocated */ 1833 mask ^= gsi->modem_channel_bitmap; 1834 while (mask) { 1835 channel_id = __fls(mask); 1836 1837 mask ^= BIT(channel_id); 1838 1839 gsi_modem_channel_halt(gsi, channel_id); 1840 } 1841 1842 err_unwind: 1843 while (channel_id--) 1844 gsi_channel_teardown_one(gsi, channel_id); 1845 1846 mutex_unlock(&gsi->mutex); 1847 1848 gsi_irq_disable(gsi); 1849 1850 return ret; 1851 } 1852 1853 /* Inverse of gsi_channel_setup() */ 1854 static void gsi_channel_teardown(struct gsi *gsi) 1855 { 1856 u32 mask = gsi->modem_channel_bitmap; 1857 u32 channel_id; 1858 1859 mutex_lock(&gsi->mutex); 1860 1861 while (mask) { 1862 channel_id = __fls(mask); 1863 1864 mask ^= BIT(channel_id); 1865 1866 gsi_modem_channel_halt(gsi, channel_id); 1867 } 1868 1869 channel_id = gsi->channel_count - 1; 1870 do 1871 gsi_channel_teardown_one(gsi, channel_id); 1872 while (channel_id--); 1873 1874 mutex_unlock(&gsi->mutex); 1875 1876 gsi_irq_disable(gsi); 1877 } 1878 1879 /* Setup function for GSI. GSI firmware must be loaded and initialized */ 1880 int gsi_setup(struct gsi *gsi) 1881 { 1882 u32 val; 1883 int ret; 1884 1885 /* Here is where we first touch the GSI hardware */ 1886 val = ioread32(gsi->virt + GSI_GSI_STATUS_OFFSET); 1887 if (!(val & ENABLED_FMASK)) { 1888 dev_err(gsi->dev, "GSI has not been enabled\n"); 1889 return -EIO; 1890 } 1891 1892 gsi_irq_setup(gsi); /* No matching teardown required */ 1893 1894 ret = gsi_ring_setup(gsi); /* No matching teardown required */ 1895 if (ret) 1896 return ret; 1897 1898 /* Initialize the error log */ 1899 iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET); 1900 1901 /* Writing 1 indicates IRQ interrupts; 0 would be MSI */ 1902 iowrite32(1, gsi->virt + GSI_CNTXT_INTSET_OFFSET); 1903 1904 return gsi_channel_setup(gsi); 1905 } 1906 1907 /* Inverse of gsi_setup() */ 1908 void gsi_teardown(struct gsi *gsi) 1909 { 1910 gsi_channel_teardown(gsi); 1911 } 1912 1913 /* Initialize a channel's event ring */ 1914 static int gsi_channel_evt_ring_init(struct gsi_channel *channel) 1915 { 1916 struct gsi *gsi = channel->gsi; 1917 struct gsi_evt_ring *evt_ring; 1918 int ret; 1919 1920 ret = gsi_evt_ring_id_alloc(gsi); 1921 if (ret < 0) 1922 return ret; 1923 channel->evt_ring_id = ret; 1924 1925 evt_ring = &gsi->evt_ring[channel->evt_ring_id]; 1926 evt_ring->channel = channel; 1927 1928 ret = gsi_ring_alloc(gsi, &evt_ring->ring, channel->event_count); 1929 if (!ret) 1930 return 0; /* Success! */ 1931 1932 dev_err(gsi->dev, "error %d allocating channel %u event ring\n", 1933 ret, gsi_channel_id(channel)); 1934 1935 gsi_evt_ring_id_free(gsi, channel->evt_ring_id); 1936 1937 return ret; 1938 } 1939 1940 /* Inverse of gsi_channel_evt_ring_init() */ 1941 static void gsi_channel_evt_ring_exit(struct gsi_channel *channel) 1942 { 1943 u32 evt_ring_id = channel->evt_ring_id; 1944 struct gsi *gsi = channel->gsi; 1945 struct gsi_evt_ring *evt_ring; 1946 1947 evt_ring = &gsi->evt_ring[evt_ring_id]; 1948 gsi_ring_free(gsi, &evt_ring->ring); 1949 gsi_evt_ring_id_free(gsi, evt_ring_id); 1950 } 1951 1952 /* Init function for event rings; there is no gsi_evt_ring_exit() */ 1953 static void gsi_evt_ring_init(struct gsi *gsi) 1954 { 1955 u32 evt_ring_id = 0; 1956 1957 gsi->event_bitmap = gsi_event_bitmap_init(GSI_EVT_RING_COUNT_MAX); 1958 gsi->ieob_enabled_bitmap = 0; 1959 do 1960 init_completion(&gsi->evt_ring[evt_ring_id].completion); 1961 while (++evt_ring_id < GSI_EVT_RING_COUNT_MAX); 1962 } 1963 1964 static bool gsi_channel_data_valid(struct gsi *gsi, 1965 const struct ipa_gsi_endpoint_data *data) 1966 { 1967 #ifdef IPA_VALIDATION 1968 u32 channel_id = data->channel_id; 1969 struct device *dev = gsi->dev; 1970 1971 /* Make sure channel ids are in the range driver supports */ 1972 if (channel_id >= GSI_CHANNEL_COUNT_MAX) { 1973 dev_err(dev, "bad channel id %u; must be less than %u\n", 1974 channel_id, GSI_CHANNEL_COUNT_MAX); 1975 return false; 1976 } 1977 1978 if (data->ee_id != GSI_EE_AP && data->ee_id != GSI_EE_MODEM) { 1979 dev_err(dev, "bad EE id %u; not AP or modem\n", data->ee_id); 1980 return false; 1981 } 1982 1983 if (!data->channel.tlv_count || 1984 data->channel.tlv_count > GSI_TLV_MAX) { 1985 dev_err(dev, "channel %u bad tlv_count %u; must be 1..%u\n", 1986 channel_id, data->channel.tlv_count, GSI_TLV_MAX); 1987 return false; 1988 } 1989 1990 /* We have to allow at least one maximally-sized transaction to 1991 * be outstanding (which would use tlv_count TREs). Given how 1992 * gsi_channel_tre_max() is computed, tre_count has to be almost 1993 * twice the TLV FIFO size to satisfy this requirement. 1994 */ 1995 if (data->channel.tre_count < 2 * data->channel.tlv_count - 1) { 1996 dev_err(dev, "channel %u TLV count %u exceeds TRE count %u\n", 1997 channel_id, data->channel.tlv_count, 1998 data->channel.tre_count); 1999 return false; 2000 } 2001 2002 if (!is_power_of_2(data->channel.tre_count)) { 2003 dev_err(dev, "channel %u bad tre_count %u; not power of 2\n", 2004 channel_id, data->channel.tre_count); 2005 return false; 2006 } 2007 2008 if (!is_power_of_2(data->channel.event_count)) { 2009 dev_err(dev, "channel %u bad event_count %u; not power of 2\n", 2010 channel_id, data->channel.event_count); 2011 return false; 2012 } 2013 #endif /* IPA_VALIDATION */ 2014 2015 return true; 2016 } 2017 2018 /* Init function for a single channel */ 2019 static int gsi_channel_init_one(struct gsi *gsi, 2020 const struct ipa_gsi_endpoint_data *data, 2021 bool command) 2022 { 2023 struct gsi_channel *channel; 2024 u32 tre_count; 2025 int ret; 2026 2027 if (!gsi_channel_data_valid(gsi, data)) 2028 return -EINVAL; 2029 2030 /* Worst case we need an event for every outstanding TRE */ 2031 if (data->channel.tre_count > data->channel.event_count) { 2032 tre_count = data->channel.event_count; 2033 dev_warn(gsi->dev, "channel %u limited to %u TREs\n", 2034 data->channel_id, tre_count); 2035 } else { 2036 tre_count = data->channel.tre_count; 2037 } 2038 2039 channel = &gsi->channel[data->channel_id]; 2040 memset(channel, 0, sizeof(*channel)); 2041 2042 channel->gsi = gsi; 2043 channel->toward_ipa = data->toward_ipa; 2044 channel->command = command; 2045 channel->tlv_count = data->channel.tlv_count; 2046 channel->tre_count = tre_count; 2047 channel->event_count = data->channel.event_count; 2048 init_completion(&channel->completion); 2049 2050 ret = gsi_channel_evt_ring_init(channel); 2051 if (ret) 2052 goto err_clear_gsi; 2053 2054 ret = gsi_ring_alloc(gsi, &channel->tre_ring, data->channel.tre_count); 2055 if (ret) { 2056 dev_err(gsi->dev, "error %d allocating channel %u ring\n", 2057 ret, data->channel_id); 2058 goto err_channel_evt_ring_exit; 2059 } 2060 2061 ret = gsi_channel_trans_init(gsi, data->channel_id); 2062 if (ret) 2063 goto err_ring_free; 2064 2065 if (command) { 2066 u32 tre_max = gsi_channel_tre_max(gsi, data->channel_id); 2067 2068 ret = ipa_cmd_pool_init(channel, tre_max); 2069 } 2070 if (!ret) 2071 return 0; /* Success! */ 2072 2073 gsi_channel_trans_exit(channel); 2074 err_ring_free: 2075 gsi_ring_free(gsi, &channel->tre_ring); 2076 err_channel_evt_ring_exit: 2077 gsi_channel_evt_ring_exit(channel); 2078 err_clear_gsi: 2079 channel->gsi = NULL; /* Mark it not (fully) initialized */ 2080 2081 return ret; 2082 } 2083 2084 /* Inverse of gsi_channel_init_one() */ 2085 static void gsi_channel_exit_one(struct gsi_channel *channel) 2086 { 2087 if (!gsi_channel_initialized(channel)) 2088 return; 2089 2090 if (channel->command) 2091 ipa_cmd_pool_exit(channel); 2092 gsi_channel_trans_exit(channel); 2093 gsi_ring_free(channel->gsi, &channel->tre_ring); 2094 gsi_channel_evt_ring_exit(channel); 2095 } 2096 2097 /* Init function for channels */ 2098 static int gsi_channel_init(struct gsi *gsi, u32 count, 2099 const struct ipa_gsi_endpoint_data *data) 2100 { 2101 bool modem_alloc; 2102 int ret = 0; 2103 u32 i; 2104 2105 /* IPA v4.2 requires the AP to allocate channels for the modem */ 2106 modem_alloc = gsi->version == IPA_VERSION_4_2; 2107 2108 gsi_evt_ring_init(gsi); /* No matching exit required */ 2109 2110 /* The endpoint data array is indexed by endpoint name */ 2111 for (i = 0; i < count; i++) { 2112 bool command = i == IPA_ENDPOINT_AP_COMMAND_TX; 2113 2114 if (ipa_gsi_endpoint_data_empty(&data[i])) 2115 continue; /* Skip over empty slots */ 2116 2117 /* Mark modem channels to be allocated (hardware workaround) */ 2118 if (data[i].ee_id == GSI_EE_MODEM) { 2119 if (modem_alloc) 2120 gsi->modem_channel_bitmap |= 2121 BIT(data[i].channel_id); 2122 continue; 2123 } 2124 2125 ret = gsi_channel_init_one(gsi, &data[i], command); 2126 if (ret) 2127 goto err_unwind; 2128 } 2129 2130 return ret; 2131 2132 err_unwind: 2133 while (i--) { 2134 if (ipa_gsi_endpoint_data_empty(&data[i])) 2135 continue; 2136 if (modem_alloc && data[i].ee_id == GSI_EE_MODEM) { 2137 gsi->modem_channel_bitmap &= ~BIT(data[i].channel_id); 2138 continue; 2139 } 2140 gsi_channel_exit_one(&gsi->channel[data->channel_id]); 2141 } 2142 2143 return ret; 2144 } 2145 2146 /* Inverse of gsi_channel_init() */ 2147 static void gsi_channel_exit(struct gsi *gsi) 2148 { 2149 u32 channel_id = GSI_CHANNEL_COUNT_MAX - 1; 2150 2151 do 2152 gsi_channel_exit_one(&gsi->channel[channel_id]); 2153 while (channel_id--); 2154 gsi->modem_channel_bitmap = 0; 2155 } 2156 2157 /* Init function for GSI. GSI hardware does not need to be "ready" */ 2158 int gsi_init(struct gsi *gsi, struct platform_device *pdev, 2159 enum ipa_version version, u32 count, 2160 const struct ipa_gsi_endpoint_data *data) 2161 { 2162 struct device *dev = &pdev->dev; 2163 struct resource *res; 2164 resource_size_t size; 2165 u32 adjust; 2166 int ret; 2167 2168 gsi_validate_build(); 2169 2170 gsi->dev = dev; 2171 gsi->version = version; 2172 2173 /* GSI uses NAPI on all channels. Create a dummy network device 2174 * for the channel NAPI contexts to be associated with. 2175 */ 2176 init_dummy_netdev(&gsi->dummy_dev); 2177 2178 /* Get GSI memory range and map it */ 2179 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gsi"); 2180 if (!res) { 2181 dev_err(dev, "DT error getting \"gsi\" memory property\n"); 2182 return -ENODEV; 2183 } 2184 2185 size = resource_size(res); 2186 if (res->start > U32_MAX || size > U32_MAX - res->start) { 2187 dev_err(dev, "DT memory resource \"gsi\" out of range\n"); 2188 return -EINVAL; 2189 } 2190 2191 /* Make sure we can make our pointer adjustment if necessary */ 2192 adjust = gsi->version < IPA_VERSION_4_5 ? 0 : GSI_EE_REG_ADJUST; 2193 if (res->start < adjust) { 2194 dev_err(dev, "DT memory resource \"gsi\" too low (< %u)\n", 2195 adjust); 2196 return -EINVAL; 2197 } 2198 2199 gsi->virt_raw = ioremap(res->start, size); 2200 if (!gsi->virt_raw) { 2201 dev_err(dev, "unable to remap \"gsi\" memory\n"); 2202 return -ENOMEM; 2203 } 2204 /* Most registers are accessed using an adjusted register range */ 2205 gsi->virt = gsi->virt_raw - adjust; 2206 2207 init_completion(&gsi->completion); 2208 2209 ret = gsi_irq_init(gsi, pdev); 2210 if (ret) 2211 goto err_iounmap; 2212 2213 ret = gsi_channel_init(gsi, count, data); 2214 if (ret) 2215 goto err_irq_exit; 2216 2217 mutex_init(&gsi->mutex); 2218 2219 return 0; 2220 2221 err_irq_exit: 2222 gsi_irq_exit(gsi); 2223 err_iounmap: 2224 iounmap(gsi->virt_raw); 2225 2226 return ret; 2227 } 2228 2229 /* Inverse of gsi_init() */ 2230 void gsi_exit(struct gsi *gsi) 2231 { 2232 mutex_destroy(&gsi->mutex); 2233 gsi_channel_exit(gsi); 2234 gsi_irq_exit(gsi); 2235 iounmap(gsi->virt_raw); 2236 } 2237 2238 /* The maximum number of outstanding TREs on a channel. This limits 2239 * a channel's maximum number of transactions outstanding (worst case 2240 * is one TRE per transaction). 2241 * 2242 * The absolute limit is the number of TREs in the channel's TRE ring, 2243 * and in theory we should be able use all of them. But in practice, 2244 * doing that led to the hardware reporting exhaustion of event ring 2245 * slots for writing completion information. So the hardware limit 2246 * would be (tre_count - 1). 2247 * 2248 * We reduce it a bit further though. Transaction resource pools are 2249 * sized to be a little larger than this maximum, to allow resource 2250 * allocations to always be contiguous. The number of entries in a 2251 * TRE ring buffer is a power of 2, and the extra resources in a pool 2252 * tends to nearly double the memory allocated for it. Reducing the 2253 * maximum number of outstanding TREs allows the number of entries in 2254 * a pool to avoid crossing that power-of-2 boundary, and this can 2255 * substantially reduce pool memory requirements. The number we 2256 * reduce it by matches the number added in gsi_trans_pool_init(). 2257 */ 2258 u32 gsi_channel_tre_max(struct gsi *gsi, u32 channel_id) 2259 { 2260 struct gsi_channel *channel = &gsi->channel[channel_id]; 2261 2262 /* Hardware limit is channel->tre_count - 1 */ 2263 return channel->tre_count - (channel->tlv_count - 1); 2264 } 2265 2266 /* Returns the maximum number of TREs in a single transaction for a channel */ 2267 u32 gsi_channel_trans_tre_max(struct gsi *gsi, u32 channel_id) 2268 { 2269 struct gsi_channel *channel = &gsi->channel[channel_id]; 2270 2271 return channel->tlv_count; 2272 } 2273