1 // SPDX-License-Identifier: GPL-2.0 2 3 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. 4 * Copyright (C) 2019-2020 Linaro Ltd. 5 */ 6 7 #include <linux/types.h> 8 #include <linux/bits.h> 9 #include <linux/bitfield.h> 10 #include <linux/refcount.h> 11 #include <linux/scatterlist.h> 12 #include <linux/dma-direction.h> 13 14 #include "gsi.h" 15 #include "gsi_private.h" 16 #include "gsi_trans.h" 17 #include "ipa_gsi.h" 18 #include "ipa_data.h" 19 #include "ipa_cmd.h" 20 21 /** 22 * DOC: GSI Transactions 23 * 24 * A GSI transaction abstracts the behavior of a GSI channel by representing 25 * everything about a related group of IPA commands in a single structure. 26 * (A "command" in this sense is either a data transfer or an IPA immediate 27 * command.) Most details of interaction with the GSI hardware are managed 28 * by the GSI transaction core, allowing users to simply describe commands 29 * to be performed. When a transaction has completed a callback function 30 * (dependent on the type of endpoint associated with the channel) allows 31 * cleanup of resources associated with the transaction. 32 * 33 * To perform a command (or set of them), a user of the GSI transaction 34 * interface allocates a transaction, indicating the number of TREs required 35 * (one per command). If sufficient TREs are available, they are reserved 36 * for use in the transaction and the allocation succeeds. This way 37 * exhaustion of the available TREs in a channel ring is detected 38 * as early as possible. All resources required to complete a transaction 39 * are allocated at transaction allocation time. 40 * 41 * Commands performed as part of a transaction are represented in an array 42 * of Linux scatterlist structures. This array is allocated with the 43 * transaction, and its entries are initialized using standard scatterlist 44 * functions (such as sg_set_buf() or skb_to_sgvec()). 45 * 46 * Once a transaction's scatterlist structures have been initialized, the 47 * transaction is committed. The caller is responsible for mapping buffers 48 * for DMA if necessary, and this should be done *before* allocating 49 * the transaction. Between a successful allocation and commit of a 50 * transaction no errors should occur. 51 * 52 * Committing transfers ownership of the entire transaction to the GSI 53 * transaction core. The GSI transaction code formats the content of 54 * the scatterlist array into the channel ring buffer and informs the 55 * hardware that new TREs are available to process. 56 * 57 * The last TRE in each transaction is marked to interrupt the AP when the 58 * GSI hardware has completed it. Because transfers described by TREs are 59 * performed strictly in order, signaling the completion of just the last 60 * TRE in the transaction is sufficient to indicate the full transaction 61 * is complete. 62 * 63 * When a transaction is complete, ipa_gsi_trans_complete() is called by the 64 * GSI code into the IPA layer, allowing it to perform any final cleanup 65 * required before the transaction is freed. 66 */ 67 68 /* Hardware values representing a transfer element type */ 69 enum gsi_tre_type { 70 GSI_RE_XFER = 0x2, 71 GSI_RE_IMMD_CMD = 0x3, 72 }; 73 74 /* An entry in a channel ring */ 75 struct gsi_tre { 76 __le64 addr; /* DMA address */ 77 __le16 len_opcode; /* length in bytes or enum IPA_CMD_* */ 78 __le16 reserved; 79 __le32 flags; /* TRE_FLAGS_* */ 80 }; 81 82 /* gsi_tre->flags mask values (in CPU byte order) */ 83 #define TRE_FLAGS_CHAIN_FMASK GENMASK(0, 0) 84 #define TRE_FLAGS_IEOT_FMASK GENMASK(9, 9) 85 #define TRE_FLAGS_BEI_FMASK GENMASK(10, 10) 86 #define TRE_FLAGS_TYPE_FMASK GENMASK(23, 16) 87 88 int gsi_trans_pool_init(struct gsi_trans_pool *pool, size_t size, u32 count, 89 u32 max_alloc) 90 { 91 void *virt; 92 93 #ifdef IPA_VALIDATE 94 if (!size || size % 8) 95 return -EINVAL; 96 if (count < max_alloc) 97 return -EINVAL; 98 if (!max_alloc) 99 return -EINVAL; 100 #endif /* IPA_VALIDATE */ 101 102 /* By allocating a few extra entries in our pool (one less 103 * than the maximum number that will be requested in a 104 * single allocation), we can always satisfy requests without 105 * ever worrying about straddling the end of the pool array. 106 * If there aren't enough entries starting at the free index, 107 * we just allocate free entries from the beginning of the pool. 108 */ 109 virt = kcalloc(count + max_alloc - 1, size, GFP_KERNEL); 110 if (!virt) 111 return -ENOMEM; 112 113 pool->base = virt; 114 /* If the allocator gave us any extra memory, use it */ 115 pool->count = ksize(pool->base) / size; 116 pool->free = 0; 117 pool->max_alloc = max_alloc; 118 pool->size = size; 119 pool->addr = 0; /* Only used for DMA pools */ 120 121 return 0; 122 } 123 124 void gsi_trans_pool_exit(struct gsi_trans_pool *pool) 125 { 126 kfree(pool->base); 127 memset(pool, 0, sizeof(*pool)); 128 } 129 130 /* Allocate the requested number of (zeroed) entries from the pool */ 131 /* Home-grown DMA pool. This way we can preallocate and use the tre_count 132 * to guarantee allocations will succeed. Even though we specify max_alloc 133 * (and it can be more than one), we only allow allocation of a single 134 * element from a DMA pool. 135 */ 136 int gsi_trans_pool_init_dma(struct device *dev, struct gsi_trans_pool *pool, 137 size_t size, u32 count, u32 max_alloc) 138 { 139 size_t total_size; 140 dma_addr_t addr; 141 void *virt; 142 143 #ifdef IPA_VALIDATE 144 if (!size || size % 8) 145 return -EINVAL; 146 if (count < max_alloc) 147 return -EINVAL; 148 if (!max_alloc) 149 return -EINVAL; 150 #endif /* IPA_VALIDATE */ 151 152 /* Don't let allocations cross a power-of-two boundary */ 153 size = __roundup_pow_of_two(size); 154 total_size = (count + max_alloc - 1) * size; 155 156 /* The allocator will give us a power-of-2 number of pages. But we 157 * can't guarantee that, so request it. That way we won't waste any 158 * memory that would be available beyond the required space. 159 */ 160 total_size = get_order(total_size) << PAGE_SHIFT; 161 162 virt = dma_alloc_coherent(dev, total_size, &addr, GFP_KERNEL); 163 if (!virt) 164 return -ENOMEM; 165 166 pool->base = virt; 167 pool->count = total_size / size; 168 pool->free = 0; 169 pool->size = size; 170 pool->max_alloc = max_alloc; 171 pool->addr = addr; 172 173 return 0; 174 } 175 176 void gsi_trans_pool_exit_dma(struct device *dev, struct gsi_trans_pool *pool) 177 { 178 dma_free_coherent(dev, pool->size, pool->base, pool->addr); 179 memset(pool, 0, sizeof(*pool)); 180 } 181 182 /* Return the byte offset of the next free entry in the pool */ 183 static u32 gsi_trans_pool_alloc_common(struct gsi_trans_pool *pool, u32 count) 184 { 185 u32 offset; 186 187 /* assert(count > 0); */ 188 /* assert(count <= pool->max_alloc); */ 189 190 /* Allocate from beginning if wrap would occur */ 191 if (count > pool->count - pool->free) 192 pool->free = 0; 193 194 offset = pool->free * pool->size; 195 pool->free += count; 196 memset(pool->base + offset, 0, count * pool->size); 197 198 return offset; 199 } 200 201 /* Allocate a contiguous block of zeroed entries from a pool */ 202 void *gsi_trans_pool_alloc(struct gsi_trans_pool *pool, u32 count) 203 { 204 return pool->base + gsi_trans_pool_alloc_common(pool, count); 205 } 206 207 /* Allocate a single zeroed entry from a DMA pool */ 208 void *gsi_trans_pool_alloc_dma(struct gsi_trans_pool *pool, dma_addr_t *addr) 209 { 210 u32 offset = gsi_trans_pool_alloc_common(pool, 1); 211 212 *addr = pool->addr + offset; 213 214 return pool->base + offset; 215 } 216 217 /* Return the pool element that immediately follows the one given. 218 * This only works done if elements are allocated one at a time. 219 */ 220 void *gsi_trans_pool_next(struct gsi_trans_pool *pool, void *element) 221 { 222 void *end = pool->base + pool->count * pool->size; 223 224 /* assert(element >= pool->base); */ 225 /* assert(element < end); */ 226 /* assert(pool->max_alloc == 1); */ 227 element += pool->size; 228 229 return element < end ? element : pool->base; 230 } 231 232 /* Map a given ring entry index to the transaction associated with it */ 233 static void gsi_channel_trans_map(struct gsi_channel *channel, u32 index, 234 struct gsi_trans *trans) 235 { 236 /* Note: index *must* be used modulo the ring count here */ 237 channel->trans_info.map[index % channel->tre_ring.count] = trans; 238 } 239 240 /* Return the transaction mapped to a given ring entry */ 241 struct gsi_trans * 242 gsi_channel_trans_mapped(struct gsi_channel *channel, u32 index) 243 { 244 /* Note: index *must* be used modulo the ring count here */ 245 return channel->trans_info.map[index % channel->tre_ring.count]; 246 } 247 248 /* Return the oldest completed transaction for a channel (or null) */ 249 struct gsi_trans *gsi_channel_trans_complete(struct gsi_channel *channel) 250 { 251 return list_first_entry_or_null(&channel->trans_info.complete, 252 struct gsi_trans, links); 253 } 254 255 /* Move a transaction from the allocated list to the pending list */ 256 static void gsi_trans_move_pending(struct gsi_trans *trans) 257 { 258 struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id]; 259 struct gsi_trans_info *trans_info = &channel->trans_info; 260 261 spin_lock_bh(&trans_info->spinlock); 262 263 list_move_tail(&trans->links, &trans_info->pending); 264 265 spin_unlock_bh(&trans_info->spinlock); 266 } 267 268 /* Move a transaction and all of its predecessors from the pending list 269 * to the completed list. 270 */ 271 void gsi_trans_move_complete(struct gsi_trans *trans) 272 { 273 struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id]; 274 struct gsi_trans_info *trans_info = &channel->trans_info; 275 struct list_head list; 276 277 spin_lock_bh(&trans_info->spinlock); 278 279 /* Move this transaction and all predecessors to completed list */ 280 list_cut_position(&list, &trans_info->pending, &trans->links); 281 list_splice_tail(&list, &trans_info->complete); 282 283 spin_unlock_bh(&trans_info->spinlock); 284 } 285 286 /* Move a transaction from the completed list to the polled list */ 287 void gsi_trans_move_polled(struct gsi_trans *trans) 288 { 289 struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id]; 290 struct gsi_trans_info *trans_info = &channel->trans_info; 291 292 spin_lock_bh(&trans_info->spinlock); 293 294 list_move_tail(&trans->links, &trans_info->polled); 295 296 spin_unlock_bh(&trans_info->spinlock); 297 } 298 299 /* Reserve some number of TREs on a channel. Returns true if successful */ 300 static bool 301 gsi_trans_tre_reserve(struct gsi_trans_info *trans_info, u32 tre_count) 302 { 303 int avail = atomic_read(&trans_info->tre_avail); 304 int new; 305 306 do { 307 new = avail - (int)tre_count; 308 if (unlikely(new < 0)) 309 return false; 310 } while (!atomic_try_cmpxchg(&trans_info->tre_avail, &avail, new)); 311 312 return true; 313 } 314 315 /* Release previously-reserved TRE entries to a channel */ 316 static void 317 gsi_trans_tre_release(struct gsi_trans_info *trans_info, u32 tre_count) 318 { 319 atomic_add(tre_count, &trans_info->tre_avail); 320 } 321 322 /* Allocate a GSI transaction on a channel */ 323 struct gsi_trans *gsi_channel_trans_alloc(struct gsi *gsi, u32 channel_id, 324 u32 tre_count, 325 enum dma_data_direction direction) 326 { 327 struct gsi_channel *channel = &gsi->channel[channel_id]; 328 struct gsi_trans_info *trans_info; 329 struct gsi_trans *trans; 330 331 /* assert(tre_count <= gsi_channel_trans_tre_max(gsi, channel_id)); */ 332 333 trans_info = &channel->trans_info; 334 335 /* We reserve the TREs now, but consume them at commit time. 336 * If there aren't enough available, we're done. 337 */ 338 if (!gsi_trans_tre_reserve(trans_info, tre_count)) 339 return NULL; 340 341 /* Allocate and initialize non-zero fields in the the transaction */ 342 trans = gsi_trans_pool_alloc(&trans_info->pool, 1); 343 trans->gsi = gsi; 344 trans->channel_id = channel_id; 345 trans->tre_count = tre_count; 346 init_completion(&trans->completion); 347 348 /* Allocate the scatterlist and (if requested) info entries. */ 349 trans->sgl = gsi_trans_pool_alloc(&trans_info->sg_pool, tre_count); 350 sg_init_marker(trans->sgl, tre_count); 351 352 trans->direction = direction; 353 354 spin_lock_bh(&trans_info->spinlock); 355 356 list_add_tail(&trans->links, &trans_info->alloc); 357 358 spin_unlock_bh(&trans_info->spinlock); 359 360 refcount_set(&trans->refcount, 1); 361 362 return trans; 363 } 364 365 /* Free a previously-allocated transaction */ 366 void gsi_trans_free(struct gsi_trans *trans) 367 { 368 refcount_t *refcount = &trans->refcount; 369 struct gsi_trans_info *trans_info; 370 bool last; 371 372 /* We must hold the lock to release the last reference */ 373 if (refcount_dec_not_one(refcount)) 374 return; 375 376 trans_info = &trans->gsi->channel[trans->channel_id].trans_info; 377 378 spin_lock_bh(&trans_info->spinlock); 379 380 /* Reference might have been added before we got the lock */ 381 last = refcount_dec_and_test(refcount); 382 if (last) 383 list_del(&trans->links); 384 385 spin_unlock_bh(&trans_info->spinlock); 386 387 if (!last) 388 return; 389 390 ipa_gsi_trans_release(trans); 391 392 /* Releasing the reserved TREs implicitly frees the sgl[] and 393 * (if present) info[] arrays, plus the transaction itself. 394 */ 395 gsi_trans_tre_release(trans_info, trans->tre_count); 396 } 397 398 /* Add an immediate command to a transaction */ 399 void gsi_trans_cmd_add(struct gsi_trans *trans, void *buf, u32 size, 400 dma_addr_t addr, enum dma_data_direction direction, 401 enum ipa_cmd_opcode opcode) 402 { 403 struct ipa_cmd_info *info; 404 u32 which = trans->used++; 405 struct scatterlist *sg; 406 407 /* assert(which < trans->tre_count); */ 408 409 /* Commands are quite different from data transfer requests. 410 * Their payloads come from a pool whose memory is allocated 411 * using dma_alloc_coherent(). We therefore do *not* map them 412 * for DMA (unlike what we do for pages and skbs). 413 * 414 * When a transaction completes, the SGL is normally unmapped. 415 * A command transaction has direction DMA_NONE, which tells 416 * gsi_trans_complete() to skip the unmapping step. 417 * 418 * The only things we use directly in a command scatter/gather 419 * entry are the DMA address and length. We still need the SG 420 * table flags to be maintained though, so assign a NULL page 421 * pointer for that purpose. 422 */ 423 sg = &trans->sgl[which]; 424 sg_assign_page(sg, NULL); 425 sg_dma_address(sg) = addr; 426 sg_dma_len(sg) = size; 427 428 info = &trans->info[which]; 429 info->opcode = opcode; 430 info->direction = direction; 431 } 432 433 /* Add a page transfer to a transaction. It will fill the only TRE. */ 434 int gsi_trans_page_add(struct gsi_trans *trans, struct page *page, u32 size, 435 u32 offset) 436 { 437 struct scatterlist *sg = &trans->sgl[0]; 438 int ret; 439 440 /* assert(trans->tre_count == 1); */ 441 /* assert(!trans->used); */ 442 443 sg_set_page(sg, page, size, offset); 444 ret = dma_map_sg(trans->gsi->dev, sg, 1, trans->direction); 445 if (!ret) 446 return -ENOMEM; 447 448 trans->used++; /* Transaction now owns the (DMA mapped) page */ 449 450 return 0; 451 } 452 453 /* Add an SKB transfer to a transaction. No other TREs will be used. */ 454 int gsi_trans_skb_add(struct gsi_trans *trans, struct sk_buff *skb) 455 { 456 struct scatterlist *sg = &trans->sgl[0]; 457 u32 used; 458 int ret; 459 460 /* assert(trans->tre_count == 1); */ 461 /* assert(!trans->used); */ 462 463 /* skb->len will not be 0 (checked early) */ 464 ret = skb_to_sgvec(skb, sg, 0, skb->len); 465 if (ret < 0) 466 return ret; 467 used = ret; 468 469 ret = dma_map_sg(trans->gsi->dev, sg, used, trans->direction); 470 if (!ret) 471 return -ENOMEM; 472 473 trans->used += used; /* Transaction now owns the (DMA mapped) skb */ 474 475 return 0; 476 } 477 478 /* Compute the length/opcode value to use for a TRE */ 479 static __le16 gsi_tre_len_opcode(enum ipa_cmd_opcode opcode, u32 len) 480 { 481 return opcode == IPA_CMD_NONE ? cpu_to_le16((u16)len) 482 : cpu_to_le16((u16)opcode); 483 } 484 485 /* Compute the flags value to use for a given TRE */ 486 static __le32 gsi_tre_flags(bool last_tre, bool bei, enum ipa_cmd_opcode opcode) 487 { 488 enum gsi_tre_type tre_type; 489 u32 tre_flags; 490 491 tre_type = opcode == IPA_CMD_NONE ? GSI_RE_XFER : GSI_RE_IMMD_CMD; 492 tre_flags = u32_encode_bits(tre_type, TRE_FLAGS_TYPE_FMASK); 493 494 /* Last TRE contains interrupt flags */ 495 if (last_tre) { 496 /* All transactions end in a transfer completion interrupt */ 497 tre_flags |= TRE_FLAGS_IEOT_FMASK; 498 /* Don't interrupt when outbound commands are acknowledged */ 499 if (bei) 500 tre_flags |= TRE_FLAGS_BEI_FMASK; 501 } else { /* All others indicate there's more to come */ 502 tre_flags |= TRE_FLAGS_CHAIN_FMASK; 503 } 504 505 return cpu_to_le32(tre_flags); 506 } 507 508 static void gsi_trans_tre_fill(struct gsi_tre *dest_tre, dma_addr_t addr, 509 u32 len, bool last_tre, bool bei, 510 enum ipa_cmd_opcode opcode) 511 { 512 struct gsi_tre tre; 513 514 tre.addr = cpu_to_le64(addr); 515 tre.len_opcode = gsi_tre_len_opcode(opcode, len); 516 tre.reserved = 0; 517 tre.flags = gsi_tre_flags(last_tre, bei, opcode); 518 519 /* ARM64 can write 16 bytes as a unit with a single instruction. 520 * Doing the assignment this way is an attempt to make that happen. 521 */ 522 *dest_tre = tre; 523 } 524 525 /** 526 * __gsi_trans_commit() - Common GSI transaction commit code 527 * @trans: Transaction to commit 528 * @ring_db: Whether to tell the hardware about these queued transfers 529 * 530 * Formats channel ring TRE entries based on the content of the scatterlist. 531 * Maps a transaction pointer to the last ring entry used for the transaction, 532 * so it can be recovered when it completes. Moves the transaction to the 533 * pending list. Finally, updates the channel ring pointer and optionally 534 * rings the doorbell. 535 */ 536 static void __gsi_trans_commit(struct gsi_trans *trans, bool ring_db) 537 { 538 struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id]; 539 struct gsi_ring *ring = &channel->tre_ring; 540 enum ipa_cmd_opcode opcode = IPA_CMD_NONE; 541 bool bei = channel->toward_ipa; 542 struct ipa_cmd_info *info; 543 struct gsi_tre *dest_tre; 544 struct scatterlist *sg; 545 u32 byte_count = 0; 546 u32 avail; 547 u32 i; 548 549 /* assert(trans->used > 0); */ 550 551 /* Consume the entries. If we cross the end of the ring while 552 * filling them we'll switch to the beginning to finish. 553 * If there is no info array we're doing a simple data 554 * transfer request, whose opcode is IPA_CMD_NONE. 555 */ 556 info = trans->info ? &trans->info[0] : NULL; 557 avail = ring->count - ring->index % ring->count; 558 dest_tre = gsi_ring_virt(ring, ring->index); 559 for_each_sg(trans->sgl, sg, trans->used, i) { 560 bool last_tre = i == trans->used - 1; 561 dma_addr_t addr = sg_dma_address(sg); 562 u32 len = sg_dma_len(sg); 563 564 byte_count += len; 565 if (!avail--) 566 dest_tre = gsi_ring_virt(ring, 0); 567 if (info) 568 opcode = info++->opcode; 569 570 gsi_trans_tre_fill(dest_tre, addr, len, last_tre, bei, opcode); 571 dest_tre++; 572 } 573 ring->index += trans->used; 574 575 if (channel->toward_ipa) { 576 /* We record TX bytes when they are sent */ 577 trans->len = byte_count; 578 trans->trans_count = channel->trans_count; 579 trans->byte_count = channel->byte_count; 580 channel->trans_count++; 581 channel->byte_count += byte_count; 582 } 583 584 /* Associate the last TRE with the transaction */ 585 gsi_channel_trans_map(channel, ring->index - 1, trans); 586 587 gsi_trans_move_pending(trans); 588 589 /* Ring doorbell if requested, or if all TREs are allocated */ 590 if (ring_db || !atomic_read(&channel->trans_info.tre_avail)) { 591 /* Report what we're handing off to hardware for TX channels */ 592 if (channel->toward_ipa) 593 gsi_channel_tx_queued(channel); 594 gsi_channel_doorbell(channel); 595 } 596 } 597 598 /* Commit a GSI transaction */ 599 void gsi_trans_commit(struct gsi_trans *trans, bool ring_db) 600 { 601 if (trans->used) 602 __gsi_trans_commit(trans, ring_db); 603 else 604 gsi_trans_free(trans); 605 } 606 607 /* Commit a GSI transaction and wait for it to complete */ 608 void gsi_trans_commit_wait(struct gsi_trans *trans) 609 { 610 if (!trans->used) 611 goto out_trans_free; 612 613 refcount_inc(&trans->refcount); 614 615 __gsi_trans_commit(trans, true); 616 617 wait_for_completion(&trans->completion); 618 619 out_trans_free: 620 gsi_trans_free(trans); 621 } 622 623 /* Commit a GSI transaction and wait for it to complete, with timeout */ 624 int gsi_trans_commit_wait_timeout(struct gsi_trans *trans, 625 unsigned long timeout) 626 { 627 unsigned long timeout_jiffies = msecs_to_jiffies(timeout); 628 unsigned long remaining = 1; /* In case of empty transaction */ 629 630 if (!trans->used) 631 goto out_trans_free; 632 633 refcount_inc(&trans->refcount); 634 635 __gsi_trans_commit(trans, true); 636 637 remaining = wait_for_completion_timeout(&trans->completion, 638 timeout_jiffies); 639 out_trans_free: 640 gsi_trans_free(trans); 641 642 return remaining ? 0 : -ETIMEDOUT; 643 } 644 645 /* Process the completion of a transaction; called while polling */ 646 void gsi_trans_complete(struct gsi_trans *trans) 647 { 648 /* If the entire SGL was mapped when added, unmap it now */ 649 if (trans->direction != DMA_NONE) 650 dma_unmap_sg(trans->gsi->dev, trans->sgl, trans->used, 651 trans->direction); 652 653 ipa_gsi_trans_complete(trans); 654 655 complete(&trans->completion); 656 657 gsi_trans_free(trans); 658 } 659 660 /* Cancel a channel's pending transactions */ 661 void gsi_channel_trans_cancel_pending(struct gsi_channel *channel) 662 { 663 struct gsi_trans_info *trans_info = &channel->trans_info; 664 struct gsi_trans *trans; 665 bool cancelled; 666 667 /* channel->gsi->mutex is held by caller */ 668 spin_lock_bh(&trans_info->spinlock); 669 670 cancelled = !list_empty(&trans_info->pending); 671 list_for_each_entry(trans, &trans_info->pending, links) 672 trans->cancelled = true; 673 674 list_splice_tail_init(&trans_info->pending, &trans_info->complete); 675 676 spin_unlock_bh(&trans_info->spinlock); 677 678 /* Schedule NAPI polling to complete the cancelled transactions */ 679 if (cancelled) 680 napi_schedule(&channel->napi); 681 } 682 683 /* Issue a command to read a single byte from a channel */ 684 int gsi_trans_read_byte(struct gsi *gsi, u32 channel_id, dma_addr_t addr) 685 { 686 struct gsi_channel *channel = &gsi->channel[channel_id]; 687 struct gsi_ring *ring = &channel->tre_ring; 688 struct gsi_trans_info *trans_info; 689 struct gsi_tre *dest_tre; 690 691 trans_info = &channel->trans_info; 692 693 /* First reserve the TRE, if possible */ 694 if (!gsi_trans_tre_reserve(trans_info, 1)) 695 return -EBUSY; 696 697 /* Now fill the the reserved TRE and tell the hardware */ 698 699 dest_tre = gsi_ring_virt(ring, ring->index); 700 gsi_trans_tre_fill(dest_tre, addr, 1, true, false, IPA_CMD_NONE); 701 702 ring->index++; 703 gsi_channel_doorbell(channel); 704 705 return 0; 706 } 707 708 /* Mark a gsi_trans_read_byte() request done */ 709 void gsi_trans_read_byte_done(struct gsi *gsi, u32 channel_id) 710 { 711 struct gsi_channel *channel = &gsi->channel[channel_id]; 712 713 gsi_trans_tre_release(&channel->trans_info, 1); 714 } 715 716 /* Initialize a channel's GSI transaction info */ 717 int gsi_channel_trans_init(struct gsi *gsi, u32 channel_id) 718 { 719 struct gsi_channel *channel = &gsi->channel[channel_id]; 720 struct gsi_trans_info *trans_info; 721 u32 tre_max; 722 int ret; 723 724 /* Ensure the size of a channel element is what's expected */ 725 BUILD_BUG_ON(sizeof(struct gsi_tre) != GSI_RING_ELEMENT_SIZE); 726 727 /* The map array is used to determine what transaction is associated 728 * with a TRE that the hardware reports has completed. We need one 729 * map entry per TRE. 730 */ 731 trans_info = &channel->trans_info; 732 trans_info->map = kcalloc(channel->tre_count, sizeof(*trans_info->map), 733 GFP_KERNEL); 734 if (!trans_info->map) 735 return -ENOMEM; 736 737 /* We can't use more TREs than there are available in the ring. 738 * This limits the number of transactions that can be oustanding. 739 * Worst case is one TRE per transaction (but we actually limit 740 * it to something a little less than that). We allocate resources 741 * for transactions (including transaction structures) based on 742 * this maximum number. 743 */ 744 tre_max = gsi_channel_tre_max(channel->gsi, channel_id); 745 746 /* Transactions are allocated one at a time. */ 747 ret = gsi_trans_pool_init(&trans_info->pool, sizeof(struct gsi_trans), 748 tre_max, 1); 749 if (ret) 750 goto err_kfree; 751 752 /* A transaction uses a scatterlist array to represent the data 753 * transfers implemented by the transaction. Each scatterlist 754 * element is used to fill a single TRE when the transaction is 755 * committed. So we need as many scatterlist elements as the 756 * maximum number of TREs that can be outstanding. 757 * 758 * All TREs in a transaction must fit within the channel's TLV FIFO. 759 * A transaction on a channel can allocate as many TREs as that but 760 * no more. 761 */ 762 ret = gsi_trans_pool_init(&trans_info->sg_pool, 763 sizeof(struct scatterlist), 764 tre_max, channel->tlv_count); 765 if (ret) 766 goto err_trans_pool_exit; 767 768 /* Finally, the tre_avail field is what ultimately limits the number 769 * of outstanding transactions and their resources. A transaction 770 * allocation succeeds only if the TREs available are sufficient for 771 * what the transaction might need. Transaction resource pools are 772 * sized based on the maximum number of outstanding TREs, so there 773 * will always be resources available if there are TREs available. 774 */ 775 atomic_set(&trans_info->tre_avail, tre_max); 776 777 spin_lock_init(&trans_info->spinlock); 778 INIT_LIST_HEAD(&trans_info->alloc); 779 INIT_LIST_HEAD(&trans_info->pending); 780 INIT_LIST_HEAD(&trans_info->complete); 781 INIT_LIST_HEAD(&trans_info->polled); 782 783 return 0; 784 785 err_trans_pool_exit: 786 gsi_trans_pool_exit(&trans_info->pool); 787 err_kfree: 788 kfree(trans_info->map); 789 790 dev_err(gsi->dev, "error %d initializing channel %u transactions\n", 791 ret, channel_id); 792 793 return ret; 794 } 795 796 /* Inverse of gsi_channel_trans_init() */ 797 void gsi_channel_trans_exit(struct gsi_channel *channel) 798 { 799 struct gsi_trans_info *trans_info = &channel->trans_info; 800 801 gsi_trans_pool_exit(&trans_info->sg_pool); 802 gsi_trans_pool_exit(&trans_info->pool); 803 kfree(trans_info->map); 804 } 805