1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Intel(R) Trace Hub Memory Storage Unit 4 * 5 * Copyright (C) 2014-2015 Intel Corporation. 6 */ 7 8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 10 #include <linux/types.h> 11 #include <linux/module.h> 12 #include <linux/device.h> 13 #include <linux/uaccess.h> 14 #include <linux/sizes.h> 15 #include <linux/printk.h> 16 #include <linux/slab.h> 17 #include <linux/mm.h> 18 #include <linux/fs.h> 19 #include <linux/io.h> 20 #include <linux/workqueue.h> 21 #include <linux/dma-mapping.h> 22 23 #ifdef CONFIG_X86 24 #include <asm/set_memory.h> 25 #endif 26 27 #include <linux/intel_th.h> 28 #include "intel_th.h" 29 #include "msu.h" 30 31 #define msc_dev(x) (&(x)->thdev->dev) 32 33 /* 34 * Lockout state transitions: 35 * READY -> INUSE -+-> LOCKED -+-> READY -> etc. 36 * \-----------/ 37 * WIN_READY: window can be used by HW 38 * WIN_INUSE: window is in use 39 * WIN_LOCKED: window is filled up and is being processed by the buffer 40 * handling code 41 * 42 * All state transitions happen automatically, except for the LOCKED->READY, 43 * which needs to be signalled by the buffer code by calling 44 * intel_th_msc_window_unlock(). 45 * 46 * When the interrupt handler has to switch to the next window, it checks 47 * whether it's READY, and if it is, it performs the switch and tracing 48 * continues. If it's LOCKED, it stops the trace. 49 */ 50 enum lockout_state { 51 WIN_READY = 0, 52 WIN_INUSE, 53 WIN_LOCKED 54 }; 55 56 /** 57 * struct msc_window - multiblock mode window descriptor 58 * @entry: window list linkage (msc::win_list) 59 * @pgoff: page offset into the buffer that this window starts at 60 * @lockout: lockout state, see comment below 61 * @lo_lock: lockout state serialization 62 * @nr_blocks: number of blocks (pages) in this window 63 * @nr_segs: number of segments in this window (<= @nr_blocks) 64 * @_sgt: array of block descriptors 65 * @sgt: array of block descriptors 66 */ 67 struct msc_window { 68 struct list_head entry; 69 unsigned long pgoff; 70 enum lockout_state lockout; 71 spinlock_t lo_lock; 72 unsigned int nr_blocks; 73 unsigned int nr_segs; 74 struct msc *msc; 75 struct sg_table _sgt; 76 struct sg_table *sgt; 77 }; 78 79 /** 80 * struct msc_iter - iterator for msc buffer 81 * @entry: msc::iter_list linkage 82 * @msc: pointer to the MSC device 83 * @start_win: oldest window 84 * @win: current window 85 * @offset: current logical offset into the buffer 86 * @start_block: oldest block in the window 87 * @block: block number in the window 88 * @block_off: offset into current block 89 * @wrap_count: block wrapping handling 90 * @eof: end of buffer reached 91 */ 92 struct msc_iter { 93 struct list_head entry; 94 struct msc *msc; 95 struct msc_window *start_win; 96 struct msc_window *win; 97 unsigned long offset; 98 struct scatterlist *start_block; 99 struct scatterlist *block; 100 unsigned int block_off; 101 unsigned int wrap_count; 102 unsigned int eof; 103 }; 104 105 /** 106 * struct msc - MSC device representation 107 * @reg_base: register window base address 108 * @thdev: intel_th_device pointer 109 * @mbuf: MSU buffer, if assigned 110 * @mbuf_priv MSU buffer's private data, if @mbuf 111 * @win_list: list of windows in multiblock mode 112 * @single_sgt: single mode buffer 113 * @cur_win: current window 114 * @nr_pages: total number of pages allocated for this buffer 115 * @single_sz: amount of data in single mode 116 * @single_wrap: single mode wrap occurred 117 * @base: buffer's base pointer 118 * @base_addr: buffer's base address 119 * @user_count: number of users of the buffer 120 * @mmap_count: number of mappings 121 * @buf_mutex: mutex to serialize access to buffer-related bits 122 123 * @enabled: MSC is enabled 124 * @wrap: wrapping is enabled 125 * @mode: MSC operating mode 126 * @burst_len: write burst length 127 * @index: number of this MSC in the MSU 128 */ 129 struct msc { 130 void __iomem *reg_base; 131 void __iomem *msu_base; 132 struct intel_th_device *thdev; 133 134 const struct msu_buffer *mbuf; 135 void *mbuf_priv; 136 137 struct work_struct work; 138 struct list_head win_list; 139 struct sg_table single_sgt; 140 struct msc_window *cur_win; 141 struct msc_window *switch_on_unlock; 142 unsigned long nr_pages; 143 unsigned long single_sz; 144 unsigned int single_wrap : 1; 145 void *base; 146 dma_addr_t base_addr; 147 u32 orig_addr; 148 u32 orig_sz; 149 150 /* <0: no buffer, 0: no users, >0: active users */ 151 atomic_t user_count; 152 153 atomic_t mmap_count; 154 struct mutex buf_mutex; 155 156 struct list_head iter_list; 157 158 bool stop_on_full; 159 160 /* config */ 161 unsigned int enabled : 1, 162 wrap : 1, 163 do_irq : 1, 164 multi_is_broken : 1; 165 unsigned int mode; 166 unsigned int burst_len; 167 unsigned int index; 168 }; 169 170 static LIST_HEAD(msu_buffer_list); 171 static DEFINE_MUTEX(msu_buffer_mutex); 172 173 /** 174 * struct msu_buffer_entry - internal MSU buffer bookkeeping 175 * @entry: link to msu_buffer_list 176 * @mbuf: MSU buffer object 177 * @owner: module that provides this MSU buffer 178 */ 179 struct msu_buffer_entry { 180 struct list_head entry; 181 const struct msu_buffer *mbuf; 182 struct module *owner; 183 }; 184 185 static struct msu_buffer_entry *__msu_buffer_entry_find(const char *name) 186 { 187 struct msu_buffer_entry *mbe; 188 189 lockdep_assert_held(&msu_buffer_mutex); 190 191 list_for_each_entry(mbe, &msu_buffer_list, entry) { 192 if (!strcmp(mbe->mbuf->name, name)) 193 return mbe; 194 } 195 196 return NULL; 197 } 198 199 static const struct msu_buffer * 200 msu_buffer_get(const char *name) 201 { 202 struct msu_buffer_entry *mbe; 203 204 mutex_lock(&msu_buffer_mutex); 205 mbe = __msu_buffer_entry_find(name); 206 if (mbe && !try_module_get(mbe->owner)) 207 mbe = NULL; 208 mutex_unlock(&msu_buffer_mutex); 209 210 return mbe ? mbe->mbuf : NULL; 211 } 212 213 static void msu_buffer_put(const struct msu_buffer *mbuf) 214 { 215 struct msu_buffer_entry *mbe; 216 217 mutex_lock(&msu_buffer_mutex); 218 mbe = __msu_buffer_entry_find(mbuf->name); 219 if (mbe) 220 module_put(mbe->owner); 221 mutex_unlock(&msu_buffer_mutex); 222 } 223 224 int intel_th_msu_buffer_register(const struct msu_buffer *mbuf, 225 struct module *owner) 226 { 227 struct msu_buffer_entry *mbe; 228 int ret = 0; 229 230 mbe = kzalloc(sizeof(*mbe), GFP_KERNEL); 231 if (!mbe) 232 return -ENOMEM; 233 234 mutex_lock(&msu_buffer_mutex); 235 if (__msu_buffer_entry_find(mbuf->name)) { 236 ret = -EEXIST; 237 kfree(mbe); 238 goto unlock; 239 } 240 241 mbe->mbuf = mbuf; 242 mbe->owner = owner; 243 list_add_tail(&mbe->entry, &msu_buffer_list); 244 unlock: 245 mutex_unlock(&msu_buffer_mutex); 246 247 return ret; 248 } 249 EXPORT_SYMBOL_GPL(intel_th_msu_buffer_register); 250 251 void intel_th_msu_buffer_unregister(const struct msu_buffer *mbuf) 252 { 253 struct msu_buffer_entry *mbe; 254 255 mutex_lock(&msu_buffer_mutex); 256 mbe = __msu_buffer_entry_find(mbuf->name); 257 if (mbe) { 258 list_del(&mbe->entry); 259 kfree(mbe); 260 } 261 mutex_unlock(&msu_buffer_mutex); 262 } 263 EXPORT_SYMBOL_GPL(intel_th_msu_buffer_unregister); 264 265 static inline bool msc_block_is_empty(struct msc_block_desc *bdesc) 266 { 267 /* header hasn't been written */ 268 if (!bdesc->valid_dw) 269 return true; 270 271 /* valid_dw includes the header */ 272 if (!msc_data_sz(bdesc)) 273 return true; 274 275 return false; 276 } 277 278 static inline struct scatterlist *msc_win_base_sg(struct msc_window *win) 279 { 280 return win->sgt->sgl; 281 } 282 283 static inline struct msc_block_desc *msc_win_base(struct msc_window *win) 284 { 285 return sg_virt(msc_win_base_sg(win)); 286 } 287 288 static inline dma_addr_t msc_win_base_dma(struct msc_window *win) 289 { 290 return sg_dma_address(msc_win_base_sg(win)); 291 } 292 293 static inline unsigned long 294 msc_win_base_pfn(struct msc_window *win) 295 { 296 return PFN_DOWN(msc_win_base_dma(win)); 297 } 298 299 /** 300 * msc_is_last_win() - check if a window is the last one for a given MSC 301 * @win: window 302 * Return: true if @win is the last window in MSC's multiblock buffer 303 */ 304 static inline bool msc_is_last_win(struct msc_window *win) 305 { 306 return win->entry.next == &win->msc->win_list; 307 } 308 309 /** 310 * msc_next_window() - return next window in the multiblock buffer 311 * @win: current window 312 * 313 * Return: window following the current one 314 */ 315 static struct msc_window *msc_next_window(struct msc_window *win) 316 { 317 if (msc_is_last_win(win)) 318 return list_first_entry(&win->msc->win_list, struct msc_window, 319 entry); 320 321 return list_next_entry(win, entry); 322 } 323 324 static size_t msc_win_total_sz(struct msc_window *win) 325 { 326 struct scatterlist *sg; 327 unsigned int blk; 328 size_t size = 0; 329 330 for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) { 331 struct msc_block_desc *bdesc = sg_virt(sg); 332 333 if (msc_block_wrapped(bdesc)) 334 return (size_t)win->nr_blocks << PAGE_SHIFT; 335 336 size += msc_total_sz(bdesc); 337 if (msc_block_last_written(bdesc)) 338 break; 339 } 340 341 return size; 342 } 343 344 /** 345 * msc_find_window() - find a window matching a given sg_table 346 * @msc: MSC device 347 * @sgt: SG table of the window 348 * @nonempty: skip over empty windows 349 * 350 * Return: MSC window structure pointer or NULL if the window 351 * could not be found. 352 */ 353 static struct msc_window * 354 msc_find_window(struct msc *msc, struct sg_table *sgt, bool nonempty) 355 { 356 struct msc_window *win; 357 unsigned int found = 0; 358 359 if (list_empty(&msc->win_list)) 360 return NULL; 361 362 /* 363 * we might need a radix tree for this, depending on how 364 * many windows a typical user would allocate; ideally it's 365 * something like 2, in which case we're good 366 */ 367 list_for_each_entry(win, &msc->win_list, entry) { 368 if (win->sgt == sgt) 369 found++; 370 371 /* skip the empty ones */ 372 if (nonempty && msc_block_is_empty(msc_win_base(win))) 373 continue; 374 375 if (found) 376 return win; 377 } 378 379 return NULL; 380 } 381 382 /** 383 * msc_oldest_window() - locate the window with oldest data 384 * @msc: MSC device 385 * 386 * This should only be used in multiblock mode. Caller should hold the 387 * msc::user_count reference. 388 * 389 * Return: the oldest window with valid data 390 */ 391 static struct msc_window *msc_oldest_window(struct msc *msc) 392 { 393 struct msc_window *win; 394 395 if (list_empty(&msc->win_list)) 396 return NULL; 397 398 win = msc_find_window(msc, msc_next_window(msc->cur_win)->sgt, true); 399 if (win) 400 return win; 401 402 return list_first_entry(&msc->win_list, struct msc_window, entry); 403 } 404 405 /** 406 * msc_win_oldest_sg() - locate the oldest block in a given window 407 * @win: window to look at 408 * 409 * Return: index of the block with the oldest data 410 */ 411 static struct scatterlist *msc_win_oldest_sg(struct msc_window *win) 412 { 413 unsigned int blk; 414 struct scatterlist *sg; 415 struct msc_block_desc *bdesc = msc_win_base(win); 416 417 /* without wrapping, first block is the oldest */ 418 if (!msc_block_wrapped(bdesc)) 419 return msc_win_base_sg(win); 420 421 /* 422 * with wrapping, last written block contains both the newest and the 423 * oldest data for this window. 424 */ 425 for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) { 426 struct msc_block_desc *bdesc = sg_virt(sg); 427 428 if (msc_block_last_written(bdesc)) 429 return sg; 430 } 431 432 return msc_win_base_sg(win); 433 } 434 435 static struct msc_block_desc *msc_iter_bdesc(struct msc_iter *iter) 436 { 437 return sg_virt(iter->block); 438 } 439 440 static struct msc_iter *msc_iter_install(struct msc *msc) 441 { 442 struct msc_iter *iter; 443 444 iter = kzalloc(sizeof(*iter), GFP_KERNEL); 445 if (!iter) 446 return ERR_PTR(-ENOMEM); 447 448 mutex_lock(&msc->buf_mutex); 449 450 /* 451 * Reading and tracing are mutually exclusive; if msc is 452 * enabled, open() will fail; otherwise existing readers 453 * will prevent enabling the msc and the rest of fops don't 454 * need to worry about it. 455 */ 456 if (msc->enabled) { 457 kfree(iter); 458 iter = ERR_PTR(-EBUSY); 459 goto unlock; 460 } 461 462 iter->msc = msc; 463 464 list_add_tail(&iter->entry, &msc->iter_list); 465 unlock: 466 mutex_unlock(&msc->buf_mutex); 467 468 return iter; 469 } 470 471 static void msc_iter_remove(struct msc_iter *iter, struct msc *msc) 472 { 473 mutex_lock(&msc->buf_mutex); 474 list_del(&iter->entry); 475 mutex_unlock(&msc->buf_mutex); 476 477 kfree(iter); 478 } 479 480 static void msc_iter_block_start(struct msc_iter *iter) 481 { 482 if (iter->start_block) 483 return; 484 485 iter->start_block = msc_win_oldest_sg(iter->win); 486 iter->block = iter->start_block; 487 iter->wrap_count = 0; 488 489 /* 490 * start with the block with oldest data; if data has wrapped 491 * in this window, it should be in this block 492 */ 493 if (msc_block_wrapped(msc_iter_bdesc(iter))) 494 iter->wrap_count = 2; 495 496 } 497 498 static int msc_iter_win_start(struct msc_iter *iter, struct msc *msc) 499 { 500 /* already started, nothing to do */ 501 if (iter->start_win) 502 return 0; 503 504 iter->start_win = msc_oldest_window(msc); 505 if (!iter->start_win) 506 return -EINVAL; 507 508 iter->win = iter->start_win; 509 iter->start_block = NULL; 510 511 msc_iter_block_start(iter); 512 513 return 0; 514 } 515 516 static int msc_iter_win_advance(struct msc_iter *iter) 517 { 518 iter->win = msc_next_window(iter->win); 519 iter->start_block = NULL; 520 521 if (iter->win == iter->start_win) { 522 iter->eof++; 523 return 1; 524 } 525 526 msc_iter_block_start(iter); 527 528 return 0; 529 } 530 531 static int msc_iter_block_advance(struct msc_iter *iter) 532 { 533 iter->block_off = 0; 534 535 /* wrapping */ 536 if (iter->wrap_count && iter->block == iter->start_block) { 537 iter->wrap_count--; 538 if (!iter->wrap_count) 539 /* copied newest data from the wrapped block */ 540 return msc_iter_win_advance(iter); 541 } 542 543 /* no wrapping, check for last written block */ 544 if (!iter->wrap_count && msc_block_last_written(msc_iter_bdesc(iter))) 545 /* copied newest data for the window */ 546 return msc_iter_win_advance(iter); 547 548 /* block advance */ 549 if (sg_is_last(iter->block)) 550 iter->block = msc_win_base_sg(iter->win); 551 else 552 iter->block = sg_next(iter->block); 553 554 /* no wrapping, sanity check in case there is no last written block */ 555 if (!iter->wrap_count && iter->block == iter->start_block) 556 return msc_iter_win_advance(iter); 557 558 return 0; 559 } 560 561 /** 562 * msc_buffer_iterate() - go through multiblock buffer's data 563 * @iter: iterator structure 564 * @size: amount of data to scan 565 * @data: callback's private data 566 * @fn: iterator callback 567 * 568 * This will start at the window which will be written to next (containing 569 * the oldest data) and work its way to the current window, calling @fn 570 * for each chunk of data as it goes. 571 * 572 * Caller should have msc::user_count reference to make sure the buffer 573 * doesn't disappear from under us. 574 * 575 * Return: amount of data actually scanned. 576 */ 577 static ssize_t 578 msc_buffer_iterate(struct msc_iter *iter, size_t size, void *data, 579 unsigned long (*fn)(void *, void *, size_t)) 580 { 581 struct msc *msc = iter->msc; 582 size_t len = size; 583 unsigned int advance; 584 585 if (iter->eof) 586 return 0; 587 588 /* start with the oldest window */ 589 if (msc_iter_win_start(iter, msc)) 590 return 0; 591 592 do { 593 unsigned long data_bytes = msc_data_sz(msc_iter_bdesc(iter)); 594 void *src = (void *)msc_iter_bdesc(iter) + MSC_BDESC; 595 size_t tocopy = data_bytes, copied = 0; 596 size_t remaining = 0; 597 598 advance = 1; 599 600 /* 601 * If block wrapping happened, we need to visit the last block 602 * twice, because it contains both the oldest and the newest 603 * data in this window. 604 * 605 * First time (wrap_count==2), in the very beginning, to collect 606 * the oldest data, which is in the range 607 * (data_bytes..DATA_IN_PAGE). 608 * 609 * Second time (wrap_count==1), it's just like any other block, 610 * containing data in the range of [MSC_BDESC..data_bytes]. 611 */ 612 if (iter->block == iter->start_block && iter->wrap_count == 2) { 613 tocopy = DATA_IN_PAGE - data_bytes; 614 src += data_bytes; 615 } 616 617 if (!tocopy) 618 goto next_block; 619 620 tocopy -= iter->block_off; 621 src += iter->block_off; 622 623 if (len < tocopy) { 624 tocopy = len; 625 advance = 0; 626 } 627 628 remaining = fn(data, src, tocopy); 629 630 if (remaining) 631 advance = 0; 632 633 copied = tocopy - remaining; 634 len -= copied; 635 iter->block_off += copied; 636 iter->offset += copied; 637 638 if (!advance) 639 break; 640 641 next_block: 642 if (msc_iter_block_advance(iter)) 643 break; 644 645 } while (len); 646 647 return size - len; 648 } 649 650 /** 651 * msc_buffer_clear_hw_header() - clear hw header for multiblock 652 * @msc: MSC device 653 */ 654 static void msc_buffer_clear_hw_header(struct msc *msc) 655 { 656 struct msc_window *win; 657 struct scatterlist *sg; 658 659 list_for_each_entry(win, &msc->win_list, entry) { 660 unsigned int blk; 661 size_t hw_sz = sizeof(struct msc_block_desc) - 662 offsetof(struct msc_block_desc, hw_tag); 663 664 for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) { 665 struct msc_block_desc *bdesc = sg_virt(sg); 666 667 memset(&bdesc->hw_tag, 0, hw_sz); 668 } 669 } 670 } 671 672 static int intel_th_msu_init(struct msc *msc) 673 { 674 u32 mintctl, msusts; 675 676 if (!msc->do_irq) 677 return 0; 678 679 if (!msc->mbuf) 680 return 0; 681 682 mintctl = ioread32(msc->msu_base + REG_MSU_MINTCTL); 683 mintctl |= msc->index ? M1BLIE : M0BLIE; 684 iowrite32(mintctl, msc->msu_base + REG_MSU_MINTCTL); 685 if (mintctl != ioread32(msc->msu_base + REG_MSU_MINTCTL)) { 686 dev_info(msc_dev(msc), "MINTCTL ignores writes: no usable interrupts\n"); 687 msc->do_irq = 0; 688 return 0; 689 } 690 691 msusts = ioread32(msc->msu_base + REG_MSU_MSUSTS); 692 iowrite32(msusts, msc->msu_base + REG_MSU_MSUSTS); 693 694 return 0; 695 } 696 697 static void intel_th_msu_deinit(struct msc *msc) 698 { 699 u32 mintctl; 700 701 if (!msc->do_irq) 702 return; 703 704 mintctl = ioread32(msc->msu_base + REG_MSU_MINTCTL); 705 mintctl &= msc->index ? ~M1BLIE : ~M0BLIE; 706 iowrite32(mintctl, msc->msu_base + REG_MSU_MINTCTL); 707 } 708 709 static int msc_win_set_lockout(struct msc_window *win, 710 enum lockout_state expect, 711 enum lockout_state new) 712 { 713 enum lockout_state old; 714 unsigned long flags; 715 int ret = 0; 716 717 if (!win->msc->mbuf) 718 return 0; 719 720 spin_lock_irqsave(&win->lo_lock, flags); 721 old = win->lockout; 722 723 if (old != expect) { 724 ret = -EINVAL; 725 goto unlock; 726 } 727 728 win->lockout = new; 729 730 if (old == expect && new == WIN_LOCKED) 731 atomic_inc(&win->msc->user_count); 732 else if (old == expect && old == WIN_LOCKED) 733 atomic_dec(&win->msc->user_count); 734 735 unlock: 736 spin_unlock_irqrestore(&win->lo_lock, flags); 737 738 if (ret) { 739 if (expect == WIN_READY && old == WIN_LOCKED) 740 return -EBUSY; 741 742 /* from intel_th_msc_window_unlock(), don't warn if not locked */ 743 if (expect == WIN_LOCKED && old == new) 744 return 0; 745 746 dev_warn_ratelimited(msc_dev(win->msc), 747 "expected lockout state %d, got %d\n", 748 expect, old); 749 } 750 751 return ret; 752 } 753 /** 754 * msc_configure() - set up MSC hardware 755 * @msc: the MSC device to configure 756 * 757 * Program storage mode, wrapping, burst length and trace buffer address 758 * into a given MSC. Then, enable tracing and set msc::enabled. 759 * The latter is serialized on msc::buf_mutex, so make sure to hold it. 760 */ 761 static int msc_configure(struct msc *msc) 762 { 763 u32 reg; 764 765 lockdep_assert_held(&msc->buf_mutex); 766 767 if (msc->mode > MSC_MODE_MULTI) 768 return -EINVAL; 769 770 if (msc->mode == MSC_MODE_MULTI) { 771 if (msc_win_set_lockout(msc->cur_win, WIN_READY, WIN_INUSE)) 772 return -EBUSY; 773 774 msc_buffer_clear_hw_header(msc); 775 } 776 777 msc->orig_addr = ioread32(msc->reg_base + REG_MSU_MSC0BAR); 778 msc->orig_sz = ioread32(msc->reg_base + REG_MSU_MSC0SIZE); 779 780 reg = msc->base_addr >> PAGE_SHIFT; 781 iowrite32(reg, msc->reg_base + REG_MSU_MSC0BAR); 782 783 if (msc->mode == MSC_MODE_SINGLE) { 784 reg = msc->nr_pages; 785 iowrite32(reg, msc->reg_base + REG_MSU_MSC0SIZE); 786 } 787 788 reg = ioread32(msc->reg_base + REG_MSU_MSC0CTL); 789 reg &= ~(MSC_MODE | MSC_WRAPEN | MSC_EN | MSC_RD_HDR_OVRD); 790 791 reg |= MSC_EN; 792 reg |= msc->mode << __ffs(MSC_MODE); 793 reg |= msc->burst_len << __ffs(MSC_LEN); 794 795 if (msc->wrap) 796 reg |= MSC_WRAPEN; 797 798 iowrite32(reg, msc->reg_base + REG_MSU_MSC0CTL); 799 800 intel_th_msu_init(msc); 801 802 msc->thdev->output.multiblock = msc->mode == MSC_MODE_MULTI; 803 intel_th_trace_enable(msc->thdev); 804 msc->enabled = 1; 805 806 if (msc->mbuf && msc->mbuf->activate) 807 msc->mbuf->activate(msc->mbuf_priv); 808 809 return 0; 810 } 811 812 /** 813 * msc_disable() - disable MSC hardware 814 * @msc: MSC device to disable 815 * 816 * If @msc is enabled, disable tracing on the switch and then disable MSC 817 * storage. Caller must hold msc::buf_mutex. 818 */ 819 static void msc_disable(struct msc *msc) 820 { 821 struct msc_window *win = msc->cur_win; 822 u32 reg; 823 824 lockdep_assert_held(&msc->buf_mutex); 825 826 if (msc->mode == MSC_MODE_MULTI) 827 msc_win_set_lockout(win, WIN_INUSE, WIN_LOCKED); 828 829 if (msc->mbuf && msc->mbuf->deactivate) 830 msc->mbuf->deactivate(msc->mbuf_priv); 831 intel_th_msu_deinit(msc); 832 intel_th_trace_disable(msc->thdev); 833 834 if (msc->mode == MSC_MODE_SINGLE) { 835 reg = ioread32(msc->reg_base + REG_MSU_MSC0STS); 836 msc->single_wrap = !!(reg & MSCSTS_WRAPSTAT); 837 838 reg = ioread32(msc->reg_base + REG_MSU_MSC0MWP); 839 msc->single_sz = reg & ((msc->nr_pages << PAGE_SHIFT) - 1); 840 dev_dbg(msc_dev(msc), "MSCnMWP: %08x/%08lx, wrap: %d\n", 841 reg, msc->single_sz, msc->single_wrap); 842 } 843 844 reg = ioread32(msc->reg_base + REG_MSU_MSC0CTL); 845 reg &= ~MSC_EN; 846 iowrite32(reg, msc->reg_base + REG_MSU_MSC0CTL); 847 848 if (msc->mbuf && msc->mbuf->ready) 849 msc->mbuf->ready(msc->mbuf_priv, win->sgt, 850 msc_win_total_sz(win)); 851 852 msc->enabled = 0; 853 854 iowrite32(msc->orig_addr, msc->reg_base + REG_MSU_MSC0BAR); 855 iowrite32(msc->orig_sz, msc->reg_base + REG_MSU_MSC0SIZE); 856 857 dev_dbg(msc_dev(msc), "MSCnNWSA: %08x\n", 858 ioread32(msc->reg_base + REG_MSU_MSC0NWSA)); 859 860 reg = ioread32(msc->reg_base + REG_MSU_MSC0STS); 861 dev_dbg(msc_dev(msc), "MSCnSTS: %08x\n", reg); 862 863 reg = ioread32(msc->reg_base + REG_MSU_MSUSTS); 864 reg &= msc->index ? MSUSTS_MSC1BLAST : MSUSTS_MSC0BLAST; 865 iowrite32(reg, msc->reg_base + REG_MSU_MSUSTS); 866 } 867 868 static int intel_th_msc_activate(struct intel_th_device *thdev) 869 { 870 struct msc *msc = dev_get_drvdata(&thdev->dev); 871 int ret = -EBUSY; 872 873 if (!atomic_inc_unless_negative(&msc->user_count)) 874 return -ENODEV; 875 876 mutex_lock(&msc->buf_mutex); 877 878 /* if there are readers, refuse */ 879 if (list_empty(&msc->iter_list)) 880 ret = msc_configure(msc); 881 882 mutex_unlock(&msc->buf_mutex); 883 884 if (ret) 885 atomic_dec(&msc->user_count); 886 887 return ret; 888 } 889 890 static void intel_th_msc_deactivate(struct intel_th_device *thdev) 891 { 892 struct msc *msc = dev_get_drvdata(&thdev->dev); 893 894 mutex_lock(&msc->buf_mutex); 895 if (msc->enabled) { 896 msc_disable(msc); 897 atomic_dec(&msc->user_count); 898 } 899 mutex_unlock(&msc->buf_mutex); 900 } 901 902 /** 903 * msc_buffer_contig_alloc() - allocate a contiguous buffer for SINGLE mode 904 * @msc: MSC device 905 * @size: allocation size in bytes 906 * 907 * This modifies msc::base, which requires msc::buf_mutex to serialize, so the 908 * caller is expected to hold it. 909 * 910 * Return: 0 on success, -errno otherwise. 911 */ 912 static int msc_buffer_contig_alloc(struct msc *msc, unsigned long size) 913 { 914 unsigned long nr_pages = size >> PAGE_SHIFT; 915 unsigned int order = get_order(size); 916 struct page *page; 917 int ret; 918 919 if (!size) 920 return 0; 921 922 ret = sg_alloc_table(&msc->single_sgt, 1, GFP_KERNEL); 923 if (ret) 924 goto err_out; 925 926 ret = -ENOMEM; 927 page = alloc_pages(GFP_KERNEL | __GFP_ZERO | GFP_DMA32, order); 928 if (!page) 929 goto err_free_sgt; 930 931 split_page(page, order); 932 sg_set_buf(msc->single_sgt.sgl, page_address(page), size); 933 934 ret = dma_map_sg(msc_dev(msc)->parent->parent, msc->single_sgt.sgl, 1, 935 DMA_FROM_DEVICE); 936 if (ret < 0) 937 goto err_free_pages; 938 939 msc->nr_pages = nr_pages; 940 msc->base = page_address(page); 941 msc->base_addr = sg_dma_address(msc->single_sgt.sgl); 942 943 return 0; 944 945 err_free_pages: 946 __free_pages(page, order); 947 948 err_free_sgt: 949 sg_free_table(&msc->single_sgt); 950 951 err_out: 952 return ret; 953 } 954 955 /** 956 * msc_buffer_contig_free() - free a contiguous buffer 957 * @msc: MSC configured in SINGLE mode 958 */ 959 static void msc_buffer_contig_free(struct msc *msc) 960 { 961 unsigned long off; 962 963 dma_unmap_sg(msc_dev(msc)->parent->parent, msc->single_sgt.sgl, 964 1, DMA_FROM_DEVICE); 965 sg_free_table(&msc->single_sgt); 966 967 for (off = 0; off < msc->nr_pages << PAGE_SHIFT; off += PAGE_SIZE) { 968 struct page *page = virt_to_page(msc->base + off); 969 970 page->mapping = NULL; 971 __free_page(page); 972 } 973 974 msc->nr_pages = 0; 975 } 976 977 /** 978 * msc_buffer_contig_get_page() - find a page at a given offset 979 * @msc: MSC configured in SINGLE mode 980 * @pgoff: page offset 981 * 982 * Return: page, if @pgoff is within the range, NULL otherwise. 983 */ 984 static struct page *msc_buffer_contig_get_page(struct msc *msc, 985 unsigned long pgoff) 986 { 987 if (pgoff >= msc->nr_pages) 988 return NULL; 989 990 return virt_to_page(msc->base + (pgoff << PAGE_SHIFT)); 991 } 992 993 static int __msc_buffer_win_alloc(struct msc_window *win, 994 unsigned int nr_segs) 995 { 996 struct scatterlist *sg_ptr; 997 void *block; 998 int i, ret; 999 1000 ret = sg_alloc_table(win->sgt, nr_segs, GFP_KERNEL); 1001 if (ret) 1002 return -ENOMEM; 1003 1004 for_each_sg(win->sgt->sgl, sg_ptr, nr_segs, i) { 1005 block = dma_alloc_coherent(msc_dev(win->msc)->parent->parent, 1006 PAGE_SIZE, &sg_dma_address(sg_ptr), 1007 GFP_KERNEL); 1008 if (!block) 1009 goto err_nomem; 1010 1011 sg_set_buf(sg_ptr, block, PAGE_SIZE); 1012 } 1013 1014 return nr_segs; 1015 1016 err_nomem: 1017 for_each_sg(win->sgt->sgl, sg_ptr, i, ret) 1018 dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE, 1019 sg_virt(sg_ptr), sg_dma_address(sg_ptr)); 1020 1021 sg_free_table(win->sgt); 1022 1023 return -ENOMEM; 1024 } 1025 1026 #ifdef CONFIG_X86 1027 static void msc_buffer_set_uc(struct msc_window *win, unsigned int nr_segs) 1028 { 1029 struct scatterlist *sg_ptr; 1030 int i; 1031 1032 for_each_sg(win->sgt->sgl, sg_ptr, nr_segs, i) { 1033 /* Set the page as uncached */ 1034 set_memory_uc((unsigned long)sg_virt(sg_ptr), 1035 PFN_DOWN(sg_ptr->length)); 1036 } 1037 } 1038 1039 static void msc_buffer_set_wb(struct msc_window *win) 1040 { 1041 struct scatterlist *sg_ptr; 1042 int i; 1043 1044 for_each_sg(win->sgt->sgl, sg_ptr, win->nr_segs, i) { 1045 /* Reset the page to write-back */ 1046 set_memory_wb((unsigned long)sg_virt(sg_ptr), 1047 PFN_DOWN(sg_ptr->length)); 1048 } 1049 } 1050 #else /* !X86 */ 1051 static inline void 1052 msc_buffer_set_uc(struct msc_window *win, unsigned int nr_segs) {} 1053 static inline void msc_buffer_set_wb(struct msc_window *win) {} 1054 #endif /* CONFIG_X86 */ 1055 1056 /** 1057 * msc_buffer_win_alloc() - alloc a window for a multiblock mode 1058 * @msc: MSC device 1059 * @nr_blocks: number of pages in this window 1060 * 1061 * This modifies msc::win_list and msc::base, which requires msc::buf_mutex 1062 * to serialize, so the caller is expected to hold it. 1063 * 1064 * Return: 0 on success, -errno otherwise. 1065 */ 1066 static int msc_buffer_win_alloc(struct msc *msc, unsigned int nr_blocks) 1067 { 1068 struct msc_window *win; 1069 int ret = -ENOMEM; 1070 1071 if (!nr_blocks) 1072 return 0; 1073 1074 win = kzalloc(sizeof(*win), GFP_KERNEL); 1075 if (!win) 1076 return -ENOMEM; 1077 1078 win->msc = msc; 1079 win->sgt = &win->_sgt; 1080 win->lockout = WIN_READY; 1081 spin_lock_init(&win->lo_lock); 1082 1083 if (!list_empty(&msc->win_list)) { 1084 struct msc_window *prev = list_last_entry(&msc->win_list, 1085 struct msc_window, 1086 entry); 1087 1088 win->pgoff = prev->pgoff + prev->nr_blocks; 1089 } 1090 1091 if (msc->mbuf && msc->mbuf->alloc_window) 1092 ret = msc->mbuf->alloc_window(msc->mbuf_priv, &win->sgt, 1093 nr_blocks << PAGE_SHIFT); 1094 else 1095 ret = __msc_buffer_win_alloc(win, nr_blocks); 1096 1097 if (ret <= 0) 1098 goto err_nomem; 1099 1100 msc_buffer_set_uc(win, ret); 1101 1102 win->nr_segs = ret; 1103 win->nr_blocks = nr_blocks; 1104 1105 if (list_empty(&msc->win_list)) { 1106 msc->base = msc_win_base(win); 1107 msc->base_addr = msc_win_base_dma(win); 1108 msc->cur_win = win; 1109 } 1110 1111 list_add_tail(&win->entry, &msc->win_list); 1112 msc->nr_pages += nr_blocks; 1113 1114 return 0; 1115 1116 err_nomem: 1117 kfree(win); 1118 1119 return ret; 1120 } 1121 1122 static void __msc_buffer_win_free(struct msc *msc, struct msc_window *win) 1123 { 1124 struct scatterlist *sg; 1125 int i; 1126 1127 for_each_sg(win->sgt->sgl, sg, win->nr_segs, i) { 1128 struct page *page = sg_page(sg); 1129 1130 page->mapping = NULL; 1131 dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE, 1132 sg_virt(sg), sg_dma_address(sg)); 1133 } 1134 sg_free_table(win->sgt); 1135 } 1136 1137 /** 1138 * msc_buffer_win_free() - free a window from MSC's window list 1139 * @msc: MSC device 1140 * @win: window to free 1141 * 1142 * This modifies msc::win_list and msc::base, which requires msc::buf_mutex 1143 * to serialize, so the caller is expected to hold it. 1144 */ 1145 static void msc_buffer_win_free(struct msc *msc, struct msc_window *win) 1146 { 1147 msc->nr_pages -= win->nr_blocks; 1148 1149 list_del(&win->entry); 1150 if (list_empty(&msc->win_list)) { 1151 msc->base = NULL; 1152 msc->base_addr = 0; 1153 } 1154 1155 msc_buffer_set_wb(win); 1156 1157 if (msc->mbuf && msc->mbuf->free_window) 1158 msc->mbuf->free_window(msc->mbuf_priv, win->sgt); 1159 else 1160 __msc_buffer_win_free(msc, win); 1161 1162 kfree(win); 1163 } 1164 1165 /** 1166 * msc_buffer_relink() - set up block descriptors for multiblock mode 1167 * @msc: MSC device 1168 * 1169 * This traverses msc::win_list, which requires msc::buf_mutex to serialize, 1170 * so the caller is expected to hold it. 1171 */ 1172 static void msc_buffer_relink(struct msc *msc) 1173 { 1174 struct msc_window *win, *next_win; 1175 1176 /* call with msc::mutex locked */ 1177 list_for_each_entry(win, &msc->win_list, entry) { 1178 struct scatterlist *sg; 1179 unsigned int blk; 1180 u32 sw_tag = 0; 1181 1182 /* 1183 * Last window's next_win should point to the first window 1184 * and MSC_SW_TAG_LASTWIN should be set. 1185 */ 1186 if (msc_is_last_win(win)) { 1187 sw_tag |= MSC_SW_TAG_LASTWIN; 1188 next_win = list_first_entry(&msc->win_list, 1189 struct msc_window, entry); 1190 } else { 1191 next_win = list_next_entry(win, entry); 1192 } 1193 1194 for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) { 1195 struct msc_block_desc *bdesc = sg_virt(sg); 1196 1197 memset(bdesc, 0, sizeof(*bdesc)); 1198 1199 bdesc->next_win = msc_win_base_pfn(next_win); 1200 1201 /* 1202 * Similarly to last window, last block should point 1203 * to the first one. 1204 */ 1205 if (blk == win->nr_segs - 1) { 1206 sw_tag |= MSC_SW_TAG_LASTBLK; 1207 bdesc->next_blk = msc_win_base_pfn(win); 1208 } else { 1209 dma_addr_t addr = sg_dma_address(sg_next(sg)); 1210 1211 bdesc->next_blk = PFN_DOWN(addr); 1212 } 1213 1214 bdesc->sw_tag = sw_tag; 1215 bdesc->block_sz = sg->length / 64; 1216 } 1217 } 1218 1219 /* 1220 * Make the above writes globally visible before tracing is 1221 * enabled to make sure hardware sees them coherently. 1222 */ 1223 wmb(); 1224 } 1225 1226 static void msc_buffer_multi_free(struct msc *msc) 1227 { 1228 struct msc_window *win, *iter; 1229 1230 list_for_each_entry_safe(win, iter, &msc->win_list, entry) 1231 msc_buffer_win_free(msc, win); 1232 } 1233 1234 static int msc_buffer_multi_alloc(struct msc *msc, unsigned long *nr_pages, 1235 unsigned int nr_wins) 1236 { 1237 int ret, i; 1238 1239 for (i = 0; i < nr_wins; i++) { 1240 ret = msc_buffer_win_alloc(msc, nr_pages[i]); 1241 if (ret) { 1242 msc_buffer_multi_free(msc); 1243 return ret; 1244 } 1245 } 1246 1247 msc_buffer_relink(msc); 1248 1249 return 0; 1250 } 1251 1252 /** 1253 * msc_buffer_free() - free buffers for MSC 1254 * @msc: MSC device 1255 * 1256 * Free MSC's storage buffers. 1257 * 1258 * This modifies msc::win_list and msc::base, which requires msc::buf_mutex to 1259 * serialize, so the caller is expected to hold it. 1260 */ 1261 static void msc_buffer_free(struct msc *msc) 1262 { 1263 if (msc->mode == MSC_MODE_SINGLE) 1264 msc_buffer_contig_free(msc); 1265 else if (msc->mode == MSC_MODE_MULTI) 1266 msc_buffer_multi_free(msc); 1267 } 1268 1269 /** 1270 * msc_buffer_alloc() - allocate a buffer for MSC 1271 * @msc: MSC device 1272 * @size: allocation size in bytes 1273 * 1274 * Allocate a storage buffer for MSC, depending on the msc::mode, it will be 1275 * either done via msc_buffer_contig_alloc() for SINGLE operation mode or 1276 * msc_buffer_win_alloc() for multiblock operation. The latter allocates one 1277 * window per invocation, so in multiblock mode this can be called multiple 1278 * times for the same MSC to allocate multiple windows. 1279 * 1280 * This modifies msc::win_list and msc::base, which requires msc::buf_mutex 1281 * to serialize, so the caller is expected to hold it. 1282 * 1283 * Return: 0 on success, -errno otherwise. 1284 */ 1285 static int msc_buffer_alloc(struct msc *msc, unsigned long *nr_pages, 1286 unsigned int nr_wins) 1287 { 1288 int ret; 1289 1290 /* -1: buffer not allocated */ 1291 if (atomic_read(&msc->user_count) != -1) 1292 return -EBUSY; 1293 1294 if (msc->mode == MSC_MODE_SINGLE) { 1295 if (nr_wins != 1) 1296 return -EINVAL; 1297 1298 ret = msc_buffer_contig_alloc(msc, nr_pages[0] << PAGE_SHIFT); 1299 } else if (msc->mode == MSC_MODE_MULTI) { 1300 ret = msc_buffer_multi_alloc(msc, nr_pages, nr_wins); 1301 } else { 1302 ret = -EINVAL; 1303 } 1304 1305 if (!ret) { 1306 /* allocation should be visible before the counter goes to 0 */ 1307 smp_mb__before_atomic(); 1308 1309 if (WARN_ON_ONCE(atomic_cmpxchg(&msc->user_count, -1, 0) != -1)) 1310 return -EINVAL; 1311 } 1312 1313 return ret; 1314 } 1315 1316 /** 1317 * msc_buffer_unlocked_free_unless_used() - free a buffer unless it's in use 1318 * @msc: MSC device 1319 * 1320 * This will free MSC buffer unless it is in use or there is no allocated 1321 * buffer. 1322 * Caller needs to hold msc::buf_mutex. 1323 * 1324 * Return: 0 on successful deallocation or if there was no buffer to 1325 * deallocate, -EBUSY if there are active users. 1326 */ 1327 static int msc_buffer_unlocked_free_unless_used(struct msc *msc) 1328 { 1329 int count, ret = 0; 1330 1331 count = atomic_cmpxchg(&msc->user_count, 0, -1); 1332 1333 /* > 0: buffer is allocated and has users */ 1334 if (count > 0) 1335 ret = -EBUSY; 1336 /* 0: buffer is allocated, no users */ 1337 else if (!count) 1338 msc_buffer_free(msc); 1339 /* < 0: no buffer, nothing to do */ 1340 1341 return ret; 1342 } 1343 1344 /** 1345 * msc_buffer_free_unless_used() - free a buffer unless it's in use 1346 * @msc: MSC device 1347 * 1348 * This is a locked version of msc_buffer_unlocked_free_unless_used(). 1349 */ 1350 static int msc_buffer_free_unless_used(struct msc *msc) 1351 { 1352 int ret; 1353 1354 mutex_lock(&msc->buf_mutex); 1355 ret = msc_buffer_unlocked_free_unless_used(msc); 1356 mutex_unlock(&msc->buf_mutex); 1357 1358 return ret; 1359 } 1360 1361 /** 1362 * msc_buffer_get_page() - get MSC buffer page at a given offset 1363 * @msc: MSC device 1364 * @pgoff: page offset into the storage buffer 1365 * 1366 * This traverses msc::win_list, so holding msc::buf_mutex is expected from 1367 * the caller. 1368 * 1369 * Return: page if @pgoff corresponds to a valid buffer page or NULL. 1370 */ 1371 static struct page *msc_buffer_get_page(struct msc *msc, unsigned long pgoff) 1372 { 1373 struct msc_window *win; 1374 struct scatterlist *sg; 1375 unsigned int blk; 1376 1377 if (msc->mode == MSC_MODE_SINGLE) 1378 return msc_buffer_contig_get_page(msc, pgoff); 1379 1380 list_for_each_entry(win, &msc->win_list, entry) 1381 if (pgoff >= win->pgoff && pgoff < win->pgoff + win->nr_blocks) 1382 goto found; 1383 1384 return NULL; 1385 1386 found: 1387 pgoff -= win->pgoff; 1388 1389 for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) { 1390 struct page *page = sg_page(sg); 1391 size_t pgsz = PFN_DOWN(sg->length); 1392 1393 if (pgoff < pgsz) 1394 return page + pgoff; 1395 1396 pgoff -= pgsz; 1397 } 1398 1399 return NULL; 1400 } 1401 1402 /** 1403 * struct msc_win_to_user_struct - data for copy_to_user() callback 1404 * @buf: userspace buffer to copy data to 1405 * @offset: running offset 1406 */ 1407 struct msc_win_to_user_struct { 1408 char __user *buf; 1409 unsigned long offset; 1410 }; 1411 1412 /** 1413 * msc_win_to_user() - iterator for msc_buffer_iterate() to copy data to user 1414 * @data: callback's private data 1415 * @src: source buffer 1416 * @len: amount of data to copy from the source buffer 1417 */ 1418 static unsigned long msc_win_to_user(void *data, void *src, size_t len) 1419 { 1420 struct msc_win_to_user_struct *u = data; 1421 unsigned long ret; 1422 1423 ret = copy_to_user(u->buf + u->offset, src, len); 1424 u->offset += len - ret; 1425 1426 return ret; 1427 } 1428 1429 1430 /* 1431 * file operations' callbacks 1432 */ 1433 1434 static int intel_th_msc_open(struct inode *inode, struct file *file) 1435 { 1436 struct intel_th_device *thdev = file->private_data; 1437 struct msc *msc = dev_get_drvdata(&thdev->dev); 1438 struct msc_iter *iter; 1439 1440 if (!capable(CAP_SYS_RAWIO)) 1441 return -EPERM; 1442 1443 iter = msc_iter_install(msc); 1444 if (IS_ERR(iter)) 1445 return PTR_ERR(iter); 1446 1447 file->private_data = iter; 1448 1449 return nonseekable_open(inode, file); 1450 } 1451 1452 static int intel_th_msc_release(struct inode *inode, struct file *file) 1453 { 1454 struct msc_iter *iter = file->private_data; 1455 struct msc *msc = iter->msc; 1456 1457 msc_iter_remove(iter, msc); 1458 1459 return 0; 1460 } 1461 1462 static ssize_t 1463 msc_single_to_user(struct msc *msc, char __user *buf, loff_t off, size_t len) 1464 { 1465 unsigned long size = msc->nr_pages << PAGE_SHIFT, rem = len; 1466 unsigned long start = off, tocopy = 0; 1467 1468 if (msc->single_wrap) { 1469 start += msc->single_sz; 1470 if (start < size) { 1471 tocopy = min(rem, size - start); 1472 if (copy_to_user(buf, msc->base + start, tocopy)) 1473 return -EFAULT; 1474 1475 buf += tocopy; 1476 rem -= tocopy; 1477 start += tocopy; 1478 } 1479 1480 start &= size - 1; 1481 if (rem) { 1482 tocopy = min(rem, msc->single_sz - start); 1483 if (copy_to_user(buf, msc->base + start, tocopy)) 1484 return -EFAULT; 1485 1486 rem -= tocopy; 1487 } 1488 1489 return len - rem; 1490 } 1491 1492 if (copy_to_user(buf, msc->base + start, rem)) 1493 return -EFAULT; 1494 1495 return len; 1496 } 1497 1498 static ssize_t intel_th_msc_read(struct file *file, char __user *buf, 1499 size_t len, loff_t *ppos) 1500 { 1501 struct msc_iter *iter = file->private_data; 1502 struct msc *msc = iter->msc; 1503 size_t size; 1504 loff_t off = *ppos; 1505 ssize_t ret = 0; 1506 1507 if (!atomic_inc_unless_negative(&msc->user_count)) 1508 return 0; 1509 1510 if (msc->mode == MSC_MODE_SINGLE && !msc->single_wrap) 1511 size = msc->single_sz; 1512 else 1513 size = msc->nr_pages << PAGE_SHIFT; 1514 1515 if (!size) 1516 goto put_count; 1517 1518 if (off >= size) 1519 goto put_count; 1520 1521 if (off + len >= size) 1522 len = size - off; 1523 1524 if (msc->mode == MSC_MODE_SINGLE) { 1525 ret = msc_single_to_user(msc, buf, off, len); 1526 if (ret >= 0) 1527 *ppos += ret; 1528 } else if (msc->mode == MSC_MODE_MULTI) { 1529 struct msc_win_to_user_struct u = { 1530 .buf = buf, 1531 .offset = 0, 1532 }; 1533 1534 ret = msc_buffer_iterate(iter, len, &u, msc_win_to_user); 1535 if (ret >= 0) 1536 *ppos = iter->offset; 1537 } else { 1538 ret = -EINVAL; 1539 } 1540 1541 put_count: 1542 atomic_dec(&msc->user_count); 1543 1544 return ret; 1545 } 1546 1547 /* 1548 * vm operations callbacks (vm_ops) 1549 */ 1550 1551 static void msc_mmap_open(struct vm_area_struct *vma) 1552 { 1553 struct msc_iter *iter = vma->vm_file->private_data; 1554 struct msc *msc = iter->msc; 1555 1556 atomic_inc(&msc->mmap_count); 1557 } 1558 1559 static void msc_mmap_close(struct vm_area_struct *vma) 1560 { 1561 struct msc_iter *iter = vma->vm_file->private_data; 1562 struct msc *msc = iter->msc; 1563 unsigned long pg; 1564 1565 if (!atomic_dec_and_mutex_lock(&msc->mmap_count, &msc->buf_mutex)) 1566 return; 1567 1568 /* drop page _refcounts */ 1569 for (pg = 0; pg < msc->nr_pages; pg++) { 1570 struct page *page = msc_buffer_get_page(msc, pg); 1571 1572 if (WARN_ON_ONCE(!page)) 1573 continue; 1574 1575 if (page->mapping) 1576 page->mapping = NULL; 1577 } 1578 1579 /* last mapping -- drop user_count */ 1580 atomic_dec(&msc->user_count); 1581 mutex_unlock(&msc->buf_mutex); 1582 } 1583 1584 static vm_fault_t msc_mmap_fault(struct vm_fault *vmf) 1585 { 1586 struct msc_iter *iter = vmf->vma->vm_file->private_data; 1587 struct msc *msc = iter->msc; 1588 1589 vmf->page = msc_buffer_get_page(msc, vmf->pgoff); 1590 if (!vmf->page) 1591 return VM_FAULT_SIGBUS; 1592 1593 get_page(vmf->page); 1594 vmf->page->mapping = vmf->vma->vm_file->f_mapping; 1595 vmf->page->index = vmf->pgoff; 1596 1597 return 0; 1598 } 1599 1600 static const struct vm_operations_struct msc_mmap_ops = { 1601 .open = msc_mmap_open, 1602 .close = msc_mmap_close, 1603 .fault = msc_mmap_fault, 1604 }; 1605 1606 static int intel_th_msc_mmap(struct file *file, struct vm_area_struct *vma) 1607 { 1608 unsigned long size = vma->vm_end - vma->vm_start; 1609 struct msc_iter *iter = vma->vm_file->private_data; 1610 struct msc *msc = iter->msc; 1611 int ret = -EINVAL; 1612 1613 if (!size || offset_in_page(size)) 1614 return -EINVAL; 1615 1616 if (vma->vm_pgoff) 1617 return -EINVAL; 1618 1619 /* grab user_count once per mmap; drop in msc_mmap_close() */ 1620 if (!atomic_inc_unless_negative(&msc->user_count)) 1621 return -EINVAL; 1622 1623 if (msc->mode != MSC_MODE_SINGLE && 1624 msc->mode != MSC_MODE_MULTI) 1625 goto out; 1626 1627 if (size >> PAGE_SHIFT != msc->nr_pages) 1628 goto out; 1629 1630 atomic_set(&msc->mmap_count, 1); 1631 ret = 0; 1632 1633 out: 1634 if (ret) 1635 atomic_dec(&msc->user_count); 1636 1637 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 1638 vma->vm_flags |= VM_DONTEXPAND | VM_DONTCOPY; 1639 vma->vm_ops = &msc_mmap_ops; 1640 return ret; 1641 } 1642 1643 static const struct file_operations intel_th_msc_fops = { 1644 .open = intel_th_msc_open, 1645 .release = intel_th_msc_release, 1646 .read = intel_th_msc_read, 1647 .mmap = intel_th_msc_mmap, 1648 .llseek = no_llseek, 1649 .owner = THIS_MODULE, 1650 }; 1651 1652 static void intel_th_msc_wait_empty(struct intel_th_device *thdev) 1653 { 1654 struct msc *msc = dev_get_drvdata(&thdev->dev); 1655 unsigned long count; 1656 u32 reg; 1657 1658 for (reg = 0, count = MSC_PLE_WAITLOOP_DEPTH; 1659 count && !(reg & MSCSTS_PLE); count--) { 1660 reg = __raw_readl(msc->reg_base + REG_MSU_MSC0STS); 1661 cpu_relax(); 1662 } 1663 1664 if (!count) 1665 dev_dbg(msc_dev(msc), "timeout waiting for MSC0 PLE\n"); 1666 } 1667 1668 static int intel_th_msc_init(struct msc *msc) 1669 { 1670 atomic_set(&msc->user_count, -1); 1671 1672 msc->mode = msc->multi_is_broken ? MSC_MODE_SINGLE : MSC_MODE_MULTI; 1673 mutex_init(&msc->buf_mutex); 1674 INIT_LIST_HEAD(&msc->win_list); 1675 INIT_LIST_HEAD(&msc->iter_list); 1676 1677 msc->burst_len = 1678 (ioread32(msc->reg_base + REG_MSU_MSC0CTL) & MSC_LEN) >> 1679 __ffs(MSC_LEN); 1680 1681 return 0; 1682 } 1683 1684 static int msc_win_switch(struct msc *msc) 1685 { 1686 struct msc_window *first; 1687 1688 if (list_empty(&msc->win_list)) 1689 return -EINVAL; 1690 1691 first = list_first_entry(&msc->win_list, struct msc_window, entry); 1692 1693 if (msc_is_last_win(msc->cur_win)) 1694 msc->cur_win = first; 1695 else 1696 msc->cur_win = list_next_entry(msc->cur_win, entry); 1697 1698 msc->base = msc_win_base(msc->cur_win); 1699 msc->base_addr = msc_win_base_dma(msc->cur_win); 1700 1701 intel_th_trace_switch(msc->thdev); 1702 1703 return 0; 1704 } 1705 1706 /** 1707 * intel_th_msc_window_unlock - put the window back in rotation 1708 * @dev: MSC device to which this relates 1709 * @sgt: buffer's sg_table for the window, does nothing if NULL 1710 */ 1711 void intel_th_msc_window_unlock(struct device *dev, struct sg_table *sgt) 1712 { 1713 struct msc *msc = dev_get_drvdata(dev); 1714 struct msc_window *win; 1715 1716 if (!sgt) 1717 return; 1718 1719 win = msc_find_window(msc, sgt, false); 1720 if (!win) 1721 return; 1722 1723 msc_win_set_lockout(win, WIN_LOCKED, WIN_READY); 1724 if (msc->switch_on_unlock == win) { 1725 msc->switch_on_unlock = NULL; 1726 msc_win_switch(msc); 1727 } 1728 } 1729 EXPORT_SYMBOL_GPL(intel_th_msc_window_unlock); 1730 1731 static void msc_work(struct work_struct *work) 1732 { 1733 struct msc *msc = container_of(work, struct msc, work); 1734 1735 intel_th_msc_deactivate(msc->thdev); 1736 } 1737 1738 static irqreturn_t intel_th_msc_interrupt(struct intel_th_device *thdev) 1739 { 1740 struct msc *msc = dev_get_drvdata(&thdev->dev); 1741 u32 msusts = ioread32(msc->msu_base + REG_MSU_MSUSTS); 1742 u32 mask = msc->index ? MSUSTS_MSC1BLAST : MSUSTS_MSC0BLAST; 1743 struct msc_window *win, *next_win; 1744 1745 if (!msc->do_irq || !msc->mbuf) 1746 return IRQ_NONE; 1747 1748 msusts &= mask; 1749 1750 if (!msusts) 1751 return msc->enabled ? IRQ_HANDLED : IRQ_NONE; 1752 1753 iowrite32(msusts, msc->msu_base + REG_MSU_MSUSTS); 1754 1755 if (!msc->enabled) 1756 return IRQ_NONE; 1757 1758 /* grab the window before we do the switch */ 1759 win = msc->cur_win; 1760 if (!win) 1761 return IRQ_HANDLED; 1762 next_win = msc_next_window(win); 1763 if (!next_win) 1764 return IRQ_HANDLED; 1765 1766 /* next window: if READY, proceed, if LOCKED, stop the trace */ 1767 if (msc_win_set_lockout(next_win, WIN_READY, WIN_INUSE)) { 1768 if (msc->stop_on_full) 1769 schedule_work(&msc->work); 1770 else 1771 msc->switch_on_unlock = next_win; 1772 1773 return IRQ_HANDLED; 1774 } 1775 1776 /* current window: INUSE -> LOCKED */ 1777 msc_win_set_lockout(win, WIN_INUSE, WIN_LOCKED); 1778 1779 msc_win_switch(msc); 1780 1781 if (msc->mbuf && msc->mbuf->ready) 1782 msc->mbuf->ready(msc->mbuf_priv, win->sgt, 1783 msc_win_total_sz(win)); 1784 1785 return IRQ_HANDLED; 1786 } 1787 1788 static const char * const msc_mode[] = { 1789 [MSC_MODE_SINGLE] = "single", 1790 [MSC_MODE_MULTI] = "multi", 1791 [MSC_MODE_EXI] = "ExI", 1792 [MSC_MODE_DEBUG] = "debug", 1793 }; 1794 1795 static ssize_t 1796 wrap_show(struct device *dev, struct device_attribute *attr, char *buf) 1797 { 1798 struct msc *msc = dev_get_drvdata(dev); 1799 1800 return scnprintf(buf, PAGE_SIZE, "%d\n", msc->wrap); 1801 } 1802 1803 static ssize_t 1804 wrap_store(struct device *dev, struct device_attribute *attr, const char *buf, 1805 size_t size) 1806 { 1807 struct msc *msc = dev_get_drvdata(dev); 1808 unsigned long val; 1809 int ret; 1810 1811 ret = kstrtoul(buf, 10, &val); 1812 if (ret) 1813 return ret; 1814 1815 msc->wrap = !!val; 1816 1817 return size; 1818 } 1819 1820 static DEVICE_ATTR_RW(wrap); 1821 1822 static void msc_buffer_unassign(struct msc *msc) 1823 { 1824 lockdep_assert_held(&msc->buf_mutex); 1825 1826 if (!msc->mbuf) 1827 return; 1828 1829 msc->mbuf->unassign(msc->mbuf_priv); 1830 msu_buffer_put(msc->mbuf); 1831 msc->mbuf_priv = NULL; 1832 msc->mbuf = NULL; 1833 } 1834 1835 static ssize_t 1836 mode_show(struct device *dev, struct device_attribute *attr, char *buf) 1837 { 1838 struct msc *msc = dev_get_drvdata(dev); 1839 const char *mode = msc_mode[msc->mode]; 1840 ssize_t ret; 1841 1842 mutex_lock(&msc->buf_mutex); 1843 if (msc->mbuf) 1844 mode = msc->mbuf->name; 1845 ret = scnprintf(buf, PAGE_SIZE, "%s\n", mode); 1846 mutex_unlock(&msc->buf_mutex); 1847 1848 return ret; 1849 } 1850 1851 static ssize_t 1852 mode_store(struct device *dev, struct device_attribute *attr, const char *buf, 1853 size_t size) 1854 { 1855 const struct msu_buffer *mbuf = NULL; 1856 struct msc *msc = dev_get_drvdata(dev); 1857 size_t len = size; 1858 char *cp, *mode; 1859 int i, ret; 1860 1861 if (!capable(CAP_SYS_RAWIO)) 1862 return -EPERM; 1863 1864 cp = memchr(buf, '\n', len); 1865 if (cp) 1866 len = cp - buf; 1867 1868 mode = kstrndup(buf, len, GFP_KERNEL); 1869 if (!mode) 1870 return -ENOMEM; 1871 1872 i = match_string(msc_mode, ARRAY_SIZE(msc_mode), mode); 1873 if (i >= 0) { 1874 kfree(mode); 1875 goto found; 1876 } 1877 1878 /* Buffer sinks only work with a usable IRQ */ 1879 if (!msc->do_irq) { 1880 kfree(mode); 1881 return -EINVAL; 1882 } 1883 1884 mbuf = msu_buffer_get(mode); 1885 kfree(mode); 1886 if (mbuf) 1887 goto found; 1888 1889 return -EINVAL; 1890 1891 found: 1892 if (i == MSC_MODE_MULTI && msc->multi_is_broken) 1893 return -EOPNOTSUPP; 1894 1895 mutex_lock(&msc->buf_mutex); 1896 ret = 0; 1897 1898 /* Same buffer: do nothing */ 1899 if (mbuf && mbuf == msc->mbuf) { 1900 /* put the extra reference we just got */ 1901 msu_buffer_put(mbuf); 1902 goto unlock; 1903 } 1904 1905 ret = msc_buffer_unlocked_free_unless_used(msc); 1906 if (ret) 1907 goto unlock; 1908 1909 if (mbuf) { 1910 void *mbuf_priv = mbuf->assign(dev, &i); 1911 1912 if (!mbuf_priv) { 1913 ret = -ENOMEM; 1914 goto unlock; 1915 } 1916 1917 msc_buffer_unassign(msc); 1918 msc->mbuf_priv = mbuf_priv; 1919 msc->mbuf = mbuf; 1920 } else { 1921 msc_buffer_unassign(msc); 1922 } 1923 1924 msc->mode = i; 1925 1926 unlock: 1927 if (ret && mbuf) 1928 msu_buffer_put(mbuf); 1929 mutex_unlock(&msc->buf_mutex); 1930 1931 return ret ? ret : size; 1932 } 1933 1934 static DEVICE_ATTR_RW(mode); 1935 1936 static ssize_t 1937 nr_pages_show(struct device *dev, struct device_attribute *attr, char *buf) 1938 { 1939 struct msc *msc = dev_get_drvdata(dev); 1940 struct msc_window *win; 1941 size_t count = 0; 1942 1943 mutex_lock(&msc->buf_mutex); 1944 1945 if (msc->mode == MSC_MODE_SINGLE) 1946 count = scnprintf(buf, PAGE_SIZE, "%ld\n", msc->nr_pages); 1947 else if (msc->mode == MSC_MODE_MULTI) { 1948 list_for_each_entry(win, &msc->win_list, entry) { 1949 count += scnprintf(buf + count, PAGE_SIZE - count, 1950 "%d%c", win->nr_blocks, 1951 msc_is_last_win(win) ? '\n' : ','); 1952 } 1953 } else { 1954 count = scnprintf(buf, PAGE_SIZE, "unsupported\n"); 1955 } 1956 1957 mutex_unlock(&msc->buf_mutex); 1958 1959 return count; 1960 } 1961 1962 static ssize_t 1963 nr_pages_store(struct device *dev, struct device_attribute *attr, 1964 const char *buf, size_t size) 1965 { 1966 struct msc *msc = dev_get_drvdata(dev); 1967 unsigned long val, *win = NULL, *rewin; 1968 size_t len = size; 1969 const char *p = buf; 1970 char *end, *s; 1971 int ret, nr_wins = 0; 1972 1973 if (!capable(CAP_SYS_RAWIO)) 1974 return -EPERM; 1975 1976 ret = msc_buffer_free_unless_used(msc); 1977 if (ret) 1978 return ret; 1979 1980 /* scan the comma-separated list of allocation sizes */ 1981 end = memchr(buf, '\n', len); 1982 if (end) 1983 len = end - buf; 1984 1985 do { 1986 end = memchr(p, ',', len); 1987 s = kstrndup(p, end ? end - p : len, GFP_KERNEL); 1988 if (!s) { 1989 ret = -ENOMEM; 1990 goto free_win; 1991 } 1992 1993 ret = kstrtoul(s, 10, &val); 1994 kfree(s); 1995 1996 if (ret || !val) 1997 goto free_win; 1998 1999 if (nr_wins && msc->mode == MSC_MODE_SINGLE) { 2000 ret = -EINVAL; 2001 goto free_win; 2002 } 2003 2004 nr_wins++; 2005 rewin = krealloc_array(win, nr_wins, sizeof(*win), GFP_KERNEL); 2006 if (!rewin) { 2007 kfree(win); 2008 return -ENOMEM; 2009 } 2010 2011 win = rewin; 2012 win[nr_wins - 1] = val; 2013 2014 if (!end) 2015 break; 2016 2017 /* consume the number and the following comma, hence +1 */ 2018 len -= end - p + 1; 2019 p = end + 1; 2020 } while (len); 2021 2022 mutex_lock(&msc->buf_mutex); 2023 ret = msc_buffer_alloc(msc, win, nr_wins); 2024 mutex_unlock(&msc->buf_mutex); 2025 2026 free_win: 2027 kfree(win); 2028 2029 return ret ? ret : size; 2030 } 2031 2032 static DEVICE_ATTR_RW(nr_pages); 2033 2034 static ssize_t 2035 win_switch_store(struct device *dev, struct device_attribute *attr, 2036 const char *buf, size_t size) 2037 { 2038 struct msc *msc = dev_get_drvdata(dev); 2039 unsigned long val; 2040 int ret; 2041 2042 ret = kstrtoul(buf, 10, &val); 2043 if (ret) 2044 return ret; 2045 2046 if (val != 1) 2047 return -EINVAL; 2048 2049 ret = -EINVAL; 2050 mutex_lock(&msc->buf_mutex); 2051 /* 2052 * Window switch can only happen in the "multi" mode. 2053 * If a external buffer is engaged, they have the full 2054 * control over window switching. 2055 */ 2056 if (msc->mode == MSC_MODE_MULTI && !msc->mbuf) 2057 ret = msc_win_switch(msc); 2058 mutex_unlock(&msc->buf_mutex); 2059 2060 return ret ? ret : size; 2061 } 2062 2063 static DEVICE_ATTR_WO(win_switch); 2064 2065 static ssize_t stop_on_full_show(struct device *dev, 2066 struct device_attribute *attr, char *buf) 2067 { 2068 struct msc *msc = dev_get_drvdata(dev); 2069 2070 return sprintf(buf, "%d\n", msc->stop_on_full); 2071 } 2072 2073 static ssize_t stop_on_full_store(struct device *dev, 2074 struct device_attribute *attr, 2075 const char *buf, size_t size) 2076 { 2077 struct msc *msc = dev_get_drvdata(dev); 2078 int ret; 2079 2080 ret = kstrtobool(buf, &msc->stop_on_full); 2081 if (ret) 2082 return ret; 2083 2084 return size; 2085 } 2086 2087 static DEVICE_ATTR_RW(stop_on_full); 2088 2089 static struct attribute *msc_output_attrs[] = { 2090 &dev_attr_wrap.attr, 2091 &dev_attr_mode.attr, 2092 &dev_attr_nr_pages.attr, 2093 &dev_attr_win_switch.attr, 2094 &dev_attr_stop_on_full.attr, 2095 NULL, 2096 }; 2097 2098 static const struct attribute_group msc_output_group = { 2099 .attrs = msc_output_attrs, 2100 }; 2101 2102 static int intel_th_msc_probe(struct intel_th_device *thdev) 2103 { 2104 struct device *dev = &thdev->dev; 2105 struct resource *res; 2106 struct msc *msc; 2107 void __iomem *base; 2108 int err; 2109 2110 res = intel_th_device_get_resource(thdev, IORESOURCE_MEM, 0); 2111 if (!res) 2112 return -ENODEV; 2113 2114 base = devm_ioremap(dev, res->start, resource_size(res)); 2115 if (!base) 2116 return -ENOMEM; 2117 2118 msc = devm_kzalloc(dev, sizeof(*msc), GFP_KERNEL); 2119 if (!msc) 2120 return -ENOMEM; 2121 2122 res = intel_th_device_get_resource(thdev, IORESOURCE_IRQ, 1); 2123 if (!res) 2124 msc->do_irq = 1; 2125 2126 if (INTEL_TH_CAP(to_intel_th(thdev), multi_is_broken)) 2127 msc->multi_is_broken = 1; 2128 2129 msc->index = thdev->id; 2130 2131 msc->thdev = thdev; 2132 msc->reg_base = base + msc->index * 0x100; 2133 msc->msu_base = base; 2134 2135 INIT_WORK(&msc->work, msc_work); 2136 err = intel_th_msc_init(msc); 2137 if (err) 2138 return err; 2139 2140 dev_set_drvdata(dev, msc); 2141 2142 return 0; 2143 } 2144 2145 static void intel_th_msc_remove(struct intel_th_device *thdev) 2146 { 2147 struct msc *msc = dev_get_drvdata(&thdev->dev); 2148 int ret; 2149 2150 intel_th_msc_deactivate(thdev); 2151 2152 /* 2153 * Buffers should not be used at this point except if the 2154 * output character device is still open and the parent 2155 * device gets detached from its bus, which is a FIXME. 2156 */ 2157 ret = msc_buffer_free_unless_used(msc); 2158 WARN_ON_ONCE(ret); 2159 } 2160 2161 static struct intel_th_driver intel_th_msc_driver = { 2162 .probe = intel_th_msc_probe, 2163 .remove = intel_th_msc_remove, 2164 .irq = intel_th_msc_interrupt, 2165 .wait_empty = intel_th_msc_wait_empty, 2166 .activate = intel_th_msc_activate, 2167 .deactivate = intel_th_msc_deactivate, 2168 .fops = &intel_th_msc_fops, 2169 .attr_group = &msc_output_group, 2170 .driver = { 2171 .name = "msc", 2172 .owner = THIS_MODULE, 2173 }, 2174 }; 2175 2176 module_driver(intel_th_msc_driver, 2177 intel_th_driver_register, 2178 intel_th_driver_unregister); 2179 2180 MODULE_LICENSE("GPL v2"); 2181 MODULE_DESCRIPTION("Intel(R) Trace Hub Memory Storage Unit driver"); 2182 MODULE_AUTHOR("Alexander Shishkin <alexander.shishkin@linux.intel.com>"); 2183