1 // SPDX-License-Identifier: GPL-2.0 2 3 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. 4 * Copyright (C) 2019-2020 Linaro Ltd. 5 */ 6 7 #include <linux/types.h> 8 #include <linux/bits.h> 9 #include <linux/bitfield.h> 10 #include <linux/refcount.h> 11 #include <linux/scatterlist.h> 12 #include <linux/dma-direction.h> 13 14 #include "gsi.h" 15 #include "gsi_private.h" 16 #include "gsi_trans.h" 17 #include "ipa_gsi.h" 18 #include "ipa_data.h" 19 #include "ipa_cmd.h" 20 21 /** 22 * DOC: GSI Transactions 23 * 24 * A GSI transaction abstracts the behavior of a GSI channel by representing 25 * everything about a related group of IPA commands in a single structure. 26 * (A "command" in this sense is either a data transfer or an IPA immediate 27 * command.) Most details of interaction with the GSI hardware are managed 28 * by the GSI transaction core, allowing users to simply describe commands 29 * to be performed. When a transaction has completed a callback function 30 * (dependent on the type of endpoint associated with the channel) allows 31 * cleanup of resources associated with the transaction. 32 * 33 * To perform a command (or set of them), a user of the GSI transaction 34 * interface allocates a transaction, indicating the number of TREs required 35 * (one per command). If sufficient TREs are available, they are reserved 36 * for use in the transaction and the allocation succeeds. This way 37 * exhaustion of the available TREs in a channel ring is detected 38 * as early as possible. All resources required to complete a transaction 39 * are allocated at transaction allocation time. 40 * 41 * Commands performed as part of a transaction are represented in an array 42 * of Linux scatterlist structures. This array is allocated with the 43 * transaction, and its entries are initialized using standard scatterlist 44 * functions (such as sg_set_buf() or skb_to_sgvec()). 45 * 46 * Once a transaction's scatterlist structures have been initialized, the 47 * transaction is committed. The caller is responsible for mapping buffers 48 * for DMA if necessary, and this should be done *before* allocating 49 * the transaction. Between a successful allocation and commit of a 50 * transaction no errors should occur. 51 * 52 * Committing transfers ownership of the entire transaction to the GSI 53 * transaction core. The GSI transaction code formats the content of 54 * the scatterlist array into the channel ring buffer and informs the 55 * hardware that new TREs are available to process. 56 * 57 * The last TRE in each transaction is marked to interrupt the AP when the 58 * GSI hardware has completed it. Because transfers described by TREs are 59 * performed strictly in order, signaling the completion of just the last 60 * TRE in the transaction is sufficient to indicate the full transaction 61 * is complete. 62 * 63 * When a transaction is complete, ipa_gsi_trans_complete() is called by the 64 * GSI code into the IPA layer, allowing it to perform any final cleanup 65 * required before the transaction is freed. 66 */ 67 68 /* Hardware values representing a transfer element type */ 69 enum gsi_tre_type { 70 GSI_RE_XFER = 0x2, 71 GSI_RE_IMMD_CMD = 0x3, 72 }; 73 74 /* An entry in a channel ring */ 75 struct gsi_tre { 76 __le64 addr; /* DMA address */ 77 __le16 len_opcode; /* length in bytes or enum IPA_CMD_* */ 78 __le16 reserved; 79 __le32 flags; /* TRE_FLAGS_* */ 80 }; 81 82 /* gsi_tre->flags mask values (in CPU byte order) */ 83 #define TRE_FLAGS_CHAIN_FMASK GENMASK(0, 0) 84 #define TRE_FLAGS_IEOT_FMASK GENMASK(9, 9) 85 #define TRE_FLAGS_BEI_FMASK GENMASK(10, 10) 86 #define TRE_FLAGS_TYPE_FMASK GENMASK(23, 16) 87 88 int gsi_trans_pool_init(struct gsi_trans_pool *pool, size_t size, u32 count, 89 u32 max_alloc) 90 { 91 void *virt; 92 93 if (!size) 94 return -EINVAL; 95 if (count < max_alloc) 96 return -EINVAL; 97 if (!max_alloc) 98 return -EINVAL; 99 100 /* By allocating a few extra entries in our pool (one less 101 * than the maximum number that will be requested in a 102 * single allocation), we can always satisfy requests without 103 * ever worrying about straddling the end of the pool array. 104 * If there aren't enough entries starting at the free index, 105 * we just allocate free entries from the beginning of the pool. 106 */ 107 virt = kcalloc(count + max_alloc - 1, size, GFP_KERNEL); 108 if (!virt) 109 return -ENOMEM; 110 111 pool->base = virt; 112 /* If the allocator gave us any extra memory, use it */ 113 pool->count = ksize(pool->base) / size; 114 pool->free = 0; 115 pool->max_alloc = max_alloc; 116 pool->size = size; 117 pool->addr = 0; /* Only used for DMA pools */ 118 119 return 0; 120 } 121 122 void gsi_trans_pool_exit(struct gsi_trans_pool *pool) 123 { 124 kfree(pool->base); 125 memset(pool, 0, sizeof(*pool)); 126 } 127 128 /* Allocate the requested number of (zeroed) entries from the pool */ 129 /* Home-grown DMA pool. This way we can preallocate and use the tre_count 130 * to guarantee allocations will succeed. Even though we specify max_alloc 131 * (and it can be more than one), we only allow allocation of a single 132 * element from a DMA pool. 133 */ 134 int gsi_trans_pool_init_dma(struct device *dev, struct gsi_trans_pool *pool, 135 size_t size, u32 count, u32 max_alloc) 136 { 137 size_t total_size; 138 dma_addr_t addr; 139 void *virt; 140 141 if (!size) 142 return -EINVAL; 143 if (count < max_alloc) 144 return -EINVAL; 145 if (!max_alloc) 146 return -EINVAL; 147 148 /* Don't let allocations cross a power-of-two boundary */ 149 size = __roundup_pow_of_two(size); 150 total_size = (count + max_alloc - 1) * size; 151 152 /* The allocator will give us a power-of-2 number of pages 153 * sufficient to satisfy our request. Round up our requested 154 * size to avoid any unused space in the allocation. This way 155 * gsi_trans_pool_exit_dma() can assume the total allocated 156 * size is exactly (count * size). 157 */ 158 total_size = get_order(total_size) << PAGE_SHIFT; 159 160 virt = dma_alloc_coherent(dev, total_size, &addr, GFP_KERNEL); 161 if (!virt) 162 return -ENOMEM; 163 164 pool->base = virt; 165 pool->count = total_size / size; 166 pool->free = 0; 167 pool->size = size; 168 pool->max_alloc = max_alloc; 169 pool->addr = addr; 170 171 return 0; 172 } 173 174 void gsi_trans_pool_exit_dma(struct device *dev, struct gsi_trans_pool *pool) 175 { 176 size_t total_size = pool->count * pool->size; 177 178 dma_free_coherent(dev, total_size, pool->base, pool->addr); 179 memset(pool, 0, sizeof(*pool)); 180 } 181 182 /* Return the byte offset of the next free entry in the pool */ 183 static u32 gsi_trans_pool_alloc_common(struct gsi_trans_pool *pool, u32 count) 184 { 185 u32 offset; 186 187 WARN_ON(!count); 188 WARN_ON(count > pool->max_alloc); 189 190 /* Allocate from beginning if wrap would occur */ 191 if (count > pool->count - pool->free) 192 pool->free = 0; 193 194 offset = pool->free * pool->size; 195 pool->free += count; 196 memset(pool->base + offset, 0, count * pool->size); 197 198 return offset; 199 } 200 201 /* Allocate a contiguous block of zeroed entries from a pool */ 202 void *gsi_trans_pool_alloc(struct gsi_trans_pool *pool, u32 count) 203 { 204 return pool->base + gsi_trans_pool_alloc_common(pool, count); 205 } 206 207 /* Allocate a single zeroed entry from a DMA pool */ 208 void *gsi_trans_pool_alloc_dma(struct gsi_trans_pool *pool, dma_addr_t *addr) 209 { 210 u32 offset = gsi_trans_pool_alloc_common(pool, 1); 211 212 *addr = pool->addr + offset; 213 214 return pool->base + offset; 215 } 216 217 /* Map a TRE ring entry index to the transaction it is associated with */ 218 static void gsi_trans_map(struct gsi_trans *trans, u32 index) 219 { 220 struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id]; 221 222 /* The completion event will indicate the last TRE used */ 223 index += trans->used_count - 1; 224 225 /* Note: index *must* be used modulo the ring count here */ 226 channel->trans_info.map[index % channel->tre_ring.count] = trans; 227 } 228 229 /* Return the transaction mapped to a given ring entry */ 230 struct gsi_trans * 231 gsi_channel_trans_mapped(struct gsi_channel *channel, u32 index) 232 { 233 /* Note: index *must* be used modulo the ring count here */ 234 return channel->trans_info.map[index % channel->tre_ring.count]; 235 } 236 237 /* Return the oldest completed transaction for a channel (or null) */ 238 struct gsi_trans *gsi_channel_trans_complete(struct gsi_channel *channel) 239 { 240 struct gsi_trans_info *trans_info = &channel->trans_info; 241 u16 trans_id = trans_info->completed_id; 242 243 if (trans_id == trans_info->pending_id) 244 return NULL; 245 246 return &trans_info->trans[trans_id %= channel->tre_count]; 247 } 248 249 /* Move a transaction from allocated to committed state */ 250 static void gsi_trans_move_committed(struct gsi_trans *trans) 251 { 252 struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id]; 253 struct gsi_trans_info *trans_info = &channel->trans_info; 254 255 /* This allocated transaction is now committed */ 256 trans_info->allocated_id++; 257 } 258 259 /* Move committed transactions to pending state */ 260 static void gsi_trans_move_pending(struct gsi_trans *trans) 261 { 262 struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id]; 263 struct gsi_trans_info *trans_info = &channel->trans_info; 264 u16 trans_index = trans - &trans_info->trans[0]; 265 u16 delta; 266 267 /* These committed transactions are now pending */ 268 delta = trans_index - trans_info->committed_id + 1; 269 trans_info->committed_id += delta % channel->tre_count; 270 } 271 272 /* Move pending transactions to completed state */ 273 void gsi_trans_move_complete(struct gsi_trans *trans) 274 { 275 struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id]; 276 struct gsi_trans_info *trans_info = &channel->trans_info; 277 u16 trans_index = trans - trans_info->trans; 278 u16 delta; 279 280 /* These pending transactions are now completed */ 281 delta = trans_index - trans_info->pending_id + 1; 282 delta %= channel->tre_count; 283 trans_info->pending_id += delta; 284 } 285 286 /* Move a transaction from completed to polled state */ 287 void gsi_trans_move_polled(struct gsi_trans *trans) 288 { 289 struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id]; 290 struct gsi_trans_info *trans_info = &channel->trans_info; 291 292 /* This completed transaction is now polled */ 293 trans_info->completed_id++; 294 } 295 296 /* Reserve some number of TREs on a channel. Returns true if successful */ 297 static bool 298 gsi_trans_tre_reserve(struct gsi_trans_info *trans_info, u32 tre_count) 299 { 300 int avail = atomic_read(&trans_info->tre_avail); 301 int new; 302 303 do { 304 new = avail - (int)tre_count; 305 if (unlikely(new < 0)) 306 return false; 307 } while (!atomic_try_cmpxchg(&trans_info->tre_avail, &avail, new)); 308 309 return true; 310 } 311 312 /* Release previously-reserved TRE entries to a channel */ 313 static void 314 gsi_trans_tre_release(struct gsi_trans_info *trans_info, u32 tre_count) 315 { 316 atomic_add(tre_count, &trans_info->tre_avail); 317 } 318 319 /* Return true if no transactions are allocated, false otherwise */ 320 bool gsi_channel_trans_idle(struct gsi *gsi, u32 channel_id) 321 { 322 u32 tre_max = gsi_channel_tre_max(gsi, channel_id); 323 struct gsi_trans_info *trans_info; 324 325 trans_info = &gsi->channel[channel_id].trans_info; 326 327 return atomic_read(&trans_info->tre_avail) == tre_max; 328 } 329 330 /* Allocate a GSI transaction on a channel */ 331 struct gsi_trans *gsi_channel_trans_alloc(struct gsi *gsi, u32 channel_id, 332 u32 tre_count, 333 enum dma_data_direction direction) 334 { 335 struct gsi_channel *channel = &gsi->channel[channel_id]; 336 struct gsi_trans_info *trans_info; 337 struct gsi_trans *trans; 338 u16 trans_index; 339 340 if (WARN_ON(tre_count > channel->trans_tre_max)) 341 return NULL; 342 343 trans_info = &channel->trans_info; 344 345 /* If we can't reserve the TREs for the transaction, we're done */ 346 if (!gsi_trans_tre_reserve(trans_info, tre_count)) 347 return NULL; 348 349 trans_index = trans_info->free_id % channel->tre_count; 350 trans = &trans_info->trans[trans_index]; 351 memset(trans, 0, sizeof(*trans)); 352 353 /* Initialize non-zero fields in the transaction */ 354 trans->gsi = gsi; 355 trans->channel_id = channel_id; 356 trans->rsvd_count = tre_count; 357 init_completion(&trans->completion); 358 359 /* Allocate the scatterlist */ 360 trans->sgl = gsi_trans_pool_alloc(&trans_info->sg_pool, tre_count); 361 sg_init_marker(trans->sgl, tre_count); 362 363 trans->direction = direction; 364 refcount_set(&trans->refcount, 1); 365 366 /* This free transaction is now allocated */ 367 trans_info->free_id++; 368 369 return trans; 370 } 371 372 /* Free a previously-allocated transaction */ 373 void gsi_trans_free(struct gsi_trans *trans) 374 { 375 struct gsi_trans_info *trans_info; 376 377 if (!refcount_dec_and_test(&trans->refcount)) 378 return; 379 380 /* Unused transactions are allocated but never committed, pending, 381 * completed, or polled. 382 */ 383 trans_info = &trans->gsi->channel[trans->channel_id].trans_info; 384 if (!trans->used_count) { 385 trans_info->allocated_id++; 386 trans_info->committed_id++; 387 trans_info->pending_id++; 388 trans_info->completed_id++; 389 } else { 390 ipa_gsi_trans_release(trans); 391 } 392 393 /* This transaction is now free */ 394 trans_info->polled_id++; 395 396 /* Releasing the reserved TREs implicitly frees the sgl[] and 397 * (if present) info[] arrays, plus the transaction itself. 398 */ 399 gsi_trans_tre_release(trans_info, trans->rsvd_count); 400 } 401 402 /* Add an immediate command to a transaction */ 403 void gsi_trans_cmd_add(struct gsi_trans *trans, void *buf, u32 size, 404 dma_addr_t addr, enum ipa_cmd_opcode opcode) 405 { 406 u32 which = trans->used_count++; 407 struct scatterlist *sg; 408 409 WARN_ON(which >= trans->rsvd_count); 410 411 /* Commands are quite different from data transfer requests. 412 * Their payloads come from a pool whose memory is allocated 413 * using dma_alloc_coherent(). We therefore do *not* map them 414 * for DMA (unlike what we do for pages and skbs). 415 * 416 * When a transaction completes, the SGL is normally unmapped. 417 * A command transaction has direction DMA_NONE, which tells 418 * gsi_trans_complete() to skip the unmapping step. 419 * 420 * The only things we use directly in a command scatter/gather 421 * entry are the DMA address and length. We still need the SG 422 * table flags to be maintained though, so assign a NULL page 423 * pointer for that purpose. 424 */ 425 sg = &trans->sgl[which]; 426 sg_assign_page(sg, NULL); 427 sg_dma_address(sg) = addr; 428 sg_dma_len(sg) = size; 429 430 trans->cmd_opcode[which] = opcode; 431 } 432 433 /* Add a page transfer to a transaction. It will fill the only TRE. */ 434 int gsi_trans_page_add(struct gsi_trans *trans, struct page *page, u32 size, 435 u32 offset) 436 { 437 struct scatterlist *sg = &trans->sgl[0]; 438 int ret; 439 440 if (WARN_ON(trans->rsvd_count != 1)) 441 return -EINVAL; 442 if (WARN_ON(trans->used_count)) 443 return -EINVAL; 444 445 sg_set_page(sg, page, size, offset); 446 ret = dma_map_sg(trans->gsi->dev, sg, 1, trans->direction); 447 if (!ret) 448 return -ENOMEM; 449 450 trans->used_count++; /* Transaction now owns the (DMA mapped) page */ 451 452 return 0; 453 } 454 455 /* Add an SKB transfer to a transaction. No other TREs will be used. */ 456 int gsi_trans_skb_add(struct gsi_trans *trans, struct sk_buff *skb) 457 { 458 struct scatterlist *sg = &trans->sgl[0]; 459 u32 used_count; 460 int ret; 461 462 if (WARN_ON(trans->rsvd_count != 1)) 463 return -EINVAL; 464 if (WARN_ON(trans->used_count)) 465 return -EINVAL; 466 467 /* skb->len will not be 0 (checked early) */ 468 ret = skb_to_sgvec(skb, sg, 0, skb->len); 469 if (ret < 0) 470 return ret; 471 used_count = ret; 472 473 ret = dma_map_sg(trans->gsi->dev, sg, used_count, trans->direction); 474 if (!ret) 475 return -ENOMEM; 476 477 /* Transaction now owns the (DMA mapped) skb */ 478 trans->used_count += used_count; 479 480 return 0; 481 } 482 483 /* Compute the length/opcode value to use for a TRE */ 484 static __le16 gsi_tre_len_opcode(enum ipa_cmd_opcode opcode, u32 len) 485 { 486 return opcode == IPA_CMD_NONE ? cpu_to_le16((u16)len) 487 : cpu_to_le16((u16)opcode); 488 } 489 490 /* Compute the flags value to use for a given TRE */ 491 static __le32 gsi_tre_flags(bool last_tre, bool bei, enum ipa_cmd_opcode opcode) 492 { 493 enum gsi_tre_type tre_type; 494 u32 tre_flags; 495 496 tre_type = opcode == IPA_CMD_NONE ? GSI_RE_XFER : GSI_RE_IMMD_CMD; 497 tre_flags = u32_encode_bits(tre_type, TRE_FLAGS_TYPE_FMASK); 498 499 /* Last TRE contains interrupt flags */ 500 if (last_tre) { 501 /* All transactions end in a transfer completion interrupt */ 502 tre_flags |= TRE_FLAGS_IEOT_FMASK; 503 /* Don't interrupt when outbound commands are acknowledged */ 504 if (bei) 505 tre_flags |= TRE_FLAGS_BEI_FMASK; 506 } else { /* All others indicate there's more to come */ 507 tre_flags |= TRE_FLAGS_CHAIN_FMASK; 508 } 509 510 return cpu_to_le32(tre_flags); 511 } 512 513 static void gsi_trans_tre_fill(struct gsi_tre *dest_tre, dma_addr_t addr, 514 u32 len, bool last_tre, bool bei, 515 enum ipa_cmd_opcode opcode) 516 { 517 struct gsi_tre tre; 518 519 tre.addr = cpu_to_le64(addr); 520 tre.len_opcode = gsi_tre_len_opcode(opcode, len); 521 tre.reserved = 0; 522 tre.flags = gsi_tre_flags(last_tre, bei, opcode); 523 524 /* ARM64 can write 16 bytes as a unit with a single instruction. 525 * Doing the assignment this way is an attempt to make that happen. 526 */ 527 *dest_tre = tre; 528 } 529 530 /** 531 * __gsi_trans_commit() - Common GSI transaction commit code 532 * @trans: Transaction to commit 533 * @ring_db: Whether to tell the hardware about these queued transfers 534 * 535 * Formats channel ring TRE entries based on the content of the scatterlist. 536 * Maps a transaction pointer to the last ring entry used for the transaction, 537 * so it can be recovered when it completes. Moves the transaction to the 538 * pending list. Finally, updates the channel ring pointer and optionally 539 * rings the doorbell. 540 */ 541 static void __gsi_trans_commit(struct gsi_trans *trans, bool ring_db) 542 { 543 struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id]; 544 struct gsi_ring *tre_ring = &channel->tre_ring; 545 enum ipa_cmd_opcode opcode = IPA_CMD_NONE; 546 bool bei = channel->toward_ipa; 547 struct gsi_tre *dest_tre; 548 struct scatterlist *sg; 549 u32 byte_count = 0; 550 u8 *cmd_opcode; 551 u32 avail; 552 u32 i; 553 554 WARN_ON(!trans->used_count); 555 556 /* Consume the entries. If we cross the end of the ring while 557 * filling them we'll switch to the beginning to finish. 558 * If there is no info array we're doing a simple data 559 * transfer request, whose opcode is IPA_CMD_NONE. 560 */ 561 cmd_opcode = channel->command ? &trans->cmd_opcode[0] : NULL; 562 avail = tre_ring->count - tre_ring->index % tre_ring->count; 563 dest_tre = gsi_ring_virt(tre_ring, tre_ring->index); 564 for_each_sg(trans->sgl, sg, trans->used_count, i) { 565 bool last_tre = i == trans->used_count - 1; 566 dma_addr_t addr = sg_dma_address(sg); 567 u32 len = sg_dma_len(sg); 568 569 byte_count += len; 570 if (!avail--) 571 dest_tre = gsi_ring_virt(tre_ring, 0); 572 if (cmd_opcode) 573 opcode = *cmd_opcode++; 574 575 gsi_trans_tre_fill(dest_tre, addr, len, last_tre, bei, opcode); 576 dest_tre++; 577 } 578 /* Associate the TRE with the transaction */ 579 gsi_trans_map(trans, tre_ring->index); 580 581 tre_ring->index += trans->used_count; 582 583 trans->len = byte_count; 584 if (channel->toward_ipa) 585 gsi_trans_tx_committed(trans); 586 587 gsi_trans_move_committed(trans); 588 589 /* Ring doorbell if requested, or if all TREs are allocated */ 590 if (ring_db || !atomic_read(&channel->trans_info.tre_avail)) { 591 /* Report what we're handing off to hardware for TX channels */ 592 if (channel->toward_ipa) 593 gsi_trans_tx_queued(trans); 594 gsi_trans_move_pending(trans); 595 gsi_channel_doorbell(channel); 596 } 597 } 598 599 /* Commit a GSI transaction */ 600 void gsi_trans_commit(struct gsi_trans *trans, bool ring_db) 601 { 602 if (trans->used_count) 603 __gsi_trans_commit(trans, ring_db); 604 else 605 gsi_trans_free(trans); 606 } 607 608 /* Commit a GSI transaction and wait for it to complete */ 609 void gsi_trans_commit_wait(struct gsi_trans *trans) 610 { 611 if (!trans->used_count) 612 goto out_trans_free; 613 614 refcount_inc(&trans->refcount); 615 616 __gsi_trans_commit(trans, true); 617 618 wait_for_completion(&trans->completion); 619 620 out_trans_free: 621 gsi_trans_free(trans); 622 } 623 624 /* Process the completion of a transaction; called while polling */ 625 void gsi_trans_complete(struct gsi_trans *trans) 626 { 627 /* If the entire SGL was mapped when added, unmap it now */ 628 if (trans->direction != DMA_NONE) 629 dma_unmap_sg(trans->gsi->dev, trans->sgl, trans->used_count, 630 trans->direction); 631 632 ipa_gsi_trans_complete(trans); 633 634 complete(&trans->completion); 635 636 gsi_trans_free(trans); 637 } 638 639 /* Cancel a channel's pending transactions */ 640 void gsi_channel_trans_cancel_pending(struct gsi_channel *channel) 641 { 642 struct gsi_trans_info *trans_info = &channel->trans_info; 643 u16 trans_id = trans_info->pending_id; 644 645 /* channel->gsi->mutex is held by caller */ 646 647 /* If there are no pending transactions, we're done */ 648 if (trans_id == trans_info->committed_id) 649 return; 650 651 /* Mark all pending transactions cancelled */ 652 do { 653 struct gsi_trans *trans; 654 655 trans = &trans_info->trans[trans_id % channel->tre_count]; 656 trans->cancelled = true; 657 } while (++trans_id != trans_info->committed_id); 658 659 /* All pending transactions are now completed */ 660 trans_info->pending_id = trans_info->committed_id; 661 662 /* Schedule NAPI polling to complete the cancelled transactions */ 663 napi_schedule(&channel->napi); 664 } 665 666 /* Issue a command to read a single byte from a channel */ 667 int gsi_trans_read_byte(struct gsi *gsi, u32 channel_id, dma_addr_t addr) 668 { 669 struct gsi_channel *channel = &gsi->channel[channel_id]; 670 struct gsi_ring *tre_ring = &channel->tre_ring; 671 struct gsi_trans_info *trans_info; 672 struct gsi_tre *dest_tre; 673 674 trans_info = &channel->trans_info; 675 676 /* First reserve the TRE, if possible */ 677 if (!gsi_trans_tre_reserve(trans_info, 1)) 678 return -EBUSY; 679 680 /* Now fill the reserved TRE and tell the hardware */ 681 682 dest_tre = gsi_ring_virt(tre_ring, tre_ring->index); 683 gsi_trans_tre_fill(dest_tre, addr, 1, true, false, IPA_CMD_NONE); 684 685 tre_ring->index++; 686 gsi_channel_doorbell(channel); 687 688 return 0; 689 } 690 691 /* Mark a gsi_trans_read_byte() request done */ 692 void gsi_trans_read_byte_done(struct gsi *gsi, u32 channel_id) 693 { 694 struct gsi_channel *channel = &gsi->channel[channel_id]; 695 696 gsi_trans_tre_release(&channel->trans_info, 1); 697 } 698 699 /* Initialize a channel's GSI transaction info */ 700 int gsi_channel_trans_init(struct gsi *gsi, u32 channel_id) 701 { 702 struct gsi_channel *channel = &gsi->channel[channel_id]; 703 u32 tre_count = channel->tre_count; 704 struct gsi_trans_info *trans_info; 705 u32 tre_max; 706 int ret; 707 708 /* Ensure the size of a channel element is what's expected */ 709 BUILD_BUG_ON(sizeof(struct gsi_tre) != GSI_RING_ELEMENT_SIZE); 710 711 trans_info = &channel->trans_info; 712 713 /* The tre_avail field is what ultimately limits the number of 714 * outstanding transactions and their resources. A transaction 715 * allocation succeeds only if the TREs available are sufficient 716 * for what the transaction might need. 717 */ 718 tre_max = gsi_channel_tre_max(channel->gsi, channel_id); 719 atomic_set(&trans_info->tre_avail, tre_max); 720 721 /* We can't use more TREs than the number available in the ring. 722 * This limits the number of transactions that can be outstanding. 723 * Worst case is one TRE per transaction (but we actually limit 724 * it to something a little less than that). By allocating a 725 * power-of-two number of transactions we can use an index 726 * modulo that number to determine the next one that's free. 727 * Transactions are allocated one at a time. 728 */ 729 trans_info->trans = kcalloc(tre_count, sizeof(*trans_info->trans), 730 GFP_KERNEL); 731 if (!trans_info->trans) 732 return -ENOMEM; 733 trans_info->free_id = 0; /* all modulo channel->tre_count */ 734 trans_info->allocated_id = 0; 735 trans_info->committed_id = 0; 736 trans_info->pending_id = 0; 737 trans_info->completed_id = 0; 738 trans_info->polled_id = 0; 739 740 /* A completion event contains a pointer to the TRE that caused 741 * the event (which will be the last one used by the transaction). 742 * Each entry in this map records the transaction associated 743 * with a corresponding completed TRE. 744 */ 745 trans_info->map = kcalloc(tre_count, sizeof(*trans_info->map), 746 GFP_KERNEL); 747 if (!trans_info->map) { 748 ret = -ENOMEM; 749 goto err_trans_free; 750 } 751 752 /* A transaction uses a scatterlist array to represent the data 753 * transfers implemented by the transaction. Each scatterlist 754 * element is used to fill a single TRE when the transaction is 755 * committed. So we need as many scatterlist elements as the 756 * maximum number of TREs that can be outstanding. 757 */ 758 ret = gsi_trans_pool_init(&trans_info->sg_pool, 759 sizeof(struct scatterlist), 760 tre_max, channel->trans_tre_max); 761 if (ret) 762 goto err_map_free; 763 764 765 return 0; 766 767 err_map_free: 768 kfree(trans_info->map); 769 err_trans_free: 770 kfree(trans_info->trans); 771 772 dev_err(gsi->dev, "error %d initializing channel %u transactions\n", 773 ret, channel_id); 774 775 return ret; 776 } 777 778 /* Inverse of gsi_channel_trans_init() */ 779 void gsi_channel_trans_exit(struct gsi_channel *channel) 780 { 781 struct gsi_trans_info *trans_info = &channel->trans_info; 782 783 gsi_trans_pool_exit(&trans_info->sg_pool); 784 kfree(trans_info->trans); 785 kfree(trans_info->map); 786 } 787