1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Intel(R) Trace Hub Memory Storage Unit 4 * 5 * Copyright (C) 2014-2015 Intel Corporation. 6 */ 7 8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 10 #include <linux/types.h> 11 #include <linux/module.h> 12 #include <linux/device.h> 13 #include <linux/uaccess.h> 14 #include <linux/sizes.h> 15 #include <linux/printk.h> 16 #include <linux/slab.h> 17 #include <linux/mm.h> 18 #include <linux/fs.h> 19 #include <linux/io.h> 20 #include <linux/workqueue.h> 21 #include <linux/dma-mapping.h> 22 23 #ifdef CONFIG_X86 24 #include <asm/set_memory.h> 25 #endif 26 27 #include <linux/intel_th.h> 28 #include "intel_th.h" 29 #include "msu.h" 30 31 #define msc_dev(x) (&(x)->thdev->dev) 32 33 /* 34 * Lockout state transitions: 35 * READY -> INUSE -+-> LOCKED -+-> READY -> etc. 36 * \-----------/ 37 * WIN_READY: window can be used by HW 38 * WIN_INUSE: window is in use 39 * WIN_LOCKED: window is filled up and is being processed by the buffer 40 * handling code 41 * 42 * All state transitions happen automatically, except for the LOCKED->READY, 43 * which needs to be signalled by the buffer code by calling 44 * intel_th_msc_window_unlock(). 45 * 46 * When the interrupt handler has to switch to the next window, it checks 47 * whether it's READY, and if it is, it performs the switch and tracing 48 * continues. If it's LOCKED, it stops the trace. 49 */ 50 enum lockout_state { 51 WIN_READY = 0, 52 WIN_INUSE, 53 WIN_LOCKED 54 }; 55 56 /** 57 * struct msc_window - multiblock mode window descriptor 58 * @entry: window list linkage (msc::win_list) 59 * @pgoff: page offset into the buffer that this window starts at 60 * @lockout: lockout state, see comment below 61 * @lo_lock: lockout state serialization 62 * @nr_blocks: number of blocks (pages) in this window 63 * @nr_segs: number of segments in this window (<= @nr_blocks) 64 * @_sgt: array of block descriptors 65 * @sgt: array of block descriptors 66 */ 67 struct msc_window { 68 struct list_head entry; 69 unsigned long pgoff; 70 enum lockout_state lockout; 71 spinlock_t lo_lock; 72 unsigned int nr_blocks; 73 unsigned int nr_segs; 74 struct msc *msc; 75 struct sg_table _sgt; 76 struct sg_table *sgt; 77 }; 78 79 /** 80 * struct msc_iter - iterator for msc buffer 81 * @entry: msc::iter_list linkage 82 * @msc: pointer to the MSC device 83 * @start_win: oldest window 84 * @win: current window 85 * @offset: current logical offset into the buffer 86 * @start_block: oldest block in the window 87 * @block: block number in the window 88 * @block_off: offset into current block 89 * @wrap_count: block wrapping handling 90 * @eof: end of buffer reached 91 */ 92 struct msc_iter { 93 struct list_head entry; 94 struct msc *msc; 95 struct msc_window *start_win; 96 struct msc_window *win; 97 unsigned long offset; 98 struct scatterlist *start_block; 99 struct scatterlist *block; 100 unsigned int block_off; 101 unsigned int wrap_count; 102 unsigned int eof; 103 }; 104 105 /** 106 * struct msc - MSC device representation 107 * @reg_base: register window base address 108 * @thdev: intel_th_device pointer 109 * @mbuf: MSU buffer, if assigned 110 * @mbuf_priv MSU buffer's private data, if @mbuf 111 * @win_list: list of windows in multiblock mode 112 * @single_sgt: single mode buffer 113 * @cur_win: current window 114 * @nr_pages: total number of pages allocated for this buffer 115 * @single_sz: amount of data in single mode 116 * @single_wrap: single mode wrap occurred 117 * @base: buffer's base pointer 118 * @base_addr: buffer's base address 119 * @user_count: number of users of the buffer 120 * @mmap_count: number of mappings 121 * @buf_mutex: mutex to serialize access to buffer-related bits 122 123 * @enabled: MSC is enabled 124 * @wrap: wrapping is enabled 125 * @mode: MSC operating mode 126 * @burst_len: write burst length 127 * @index: number of this MSC in the MSU 128 */ 129 struct msc { 130 void __iomem *reg_base; 131 void __iomem *msu_base; 132 struct intel_th_device *thdev; 133 134 const struct msu_buffer *mbuf; 135 void *mbuf_priv; 136 137 struct work_struct work; 138 struct list_head win_list; 139 struct sg_table single_sgt; 140 struct msc_window *cur_win; 141 unsigned long nr_pages; 142 unsigned long single_sz; 143 unsigned int single_wrap : 1; 144 void *base; 145 dma_addr_t base_addr; 146 u32 orig_addr; 147 u32 orig_sz; 148 149 /* <0: no buffer, 0: no users, >0: active users */ 150 atomic_t user_count; 151 152 atomic_t mmap_count; 153 struct mutex buf_mutex; 154 155 struct list_head iter_list; 156 157 /* config */ 158 unsigned int enabled : 1, 159 wrap : 1, 160 do_irq : 1; 161 unsigned int mode; 162 unsigned int burst_len; 163 unsigned int index; 164 }; 165 166 static LIST_HEAD(msu_buffer_list); 167 static struct mutex msu_buffer_mutex; 168 169 /** 170 * struct msu_buffer_entry - internal MSU buffer bookkeeping 171 * @entry: link to msu_buffer_list 172 * @mbuf: MSU buffer object 173 * @owner: module that provides this MSU buffer 174 */ 175 struct msu_buffer_entry { 176 struct list_head entry; 177 const struct msu_buffer *mbuf; 178 struct module *owner; 179 }; 180 181 static struct msu_buffer_entry *__msu_buffer_entry_find(const char *name) 182 { 183 struct msu_buffer_entry *mbe; 184 185 lockdep_assert_held(&msu_buffer_mutex); 186 187 list_for_each_entry(mbe, &msu_buffer_list, entry) { 188 if (!strcmp(mbe->mbuf->name, name)) 189 return mbe; 190 } 191 192 return NULL; 193 } 194 195 static const struct msu_buffer * 196 msu_buffer_get(const char *name) 197 { 198 struct msu_buffer_entry *mbe; 199 200 mutex_lock(&msu_buffer_mutex); 201 mbe = __msu_buffer_entry_find(name); 202 if (mbe && !try_module_get(mbe->owner)) 203 mbe = NULL; 204 mutex_unlock(&msu_buffer_mutex); 205 206 return mbe ? mbe->mbuf : NULL; 207 } 208 209 static void msu_buffer_put(const struct msu_buffer *mbuf) 210 { 211 struct msu_buffer_entry *mbe; 212 213 mutex_lock(&msu_buffer_mutex); 214 mbe = __msu_buffer_entry_find(mbuf->name); 215 if (mbe) 216 module_put(mbe->owner); 217 mutex_unlock(&msu_buffer_mutex); 218 } 219 220 int intel_th_msu_buffer_register(const struct msu_buffer *mbuf, 221 struct module *owner) 222 { 223 struct msu_buffer_entry *mbe; 224 int ret = 0; 225 226 mbe = kzalloc(sizeof(*mbe), GFP_KERNEL); 227 if (!mbe) 228 return -ENOMEM; 229 230 mutex_lock(&msu_buffer_mutex); 231 if (__msu_buffer_entry_find(mbuf->name)) { 232 ret = -EEXIST; 233 kfree(mbe); 234 goto unlock; 235 } 236 237 mbe->mbuf = mbuf; 238 mbe->owner = owner; 239 list_add_tail(&mbe->entry, &msu_buffer_list); 240 unlock: 241 mutex_unlock(&msu_buffer_mutex); 242 243 return ret; 244 } 245 EXPORT_SYMBOL_GPL(intel_th_msu_buffer_register); 246 247 void intel_th_msu_buffer_unregister(const struct msu_buffer *mbuf) 248 { 249 struct msu_buffer_entry *mbe; 250 251 mutex_lock(&msu_buffer_mutex); 252 mbe = __msu_buffer_entry_find(mbuf->name); 253 if (mbe) { 254 list_del(&mbe->entry); 255 kfree(mbe); 256 } 257 mutex_unlock(&msu_buffer_mutex); 258 } 259 EXPORT_SYMBOL_GPL(intel_th_msu_buffer_unregister); 260 261 static inline bool msc_block_is_empty(struct msc_block_desc *bdesc) 262 { 263 /* header hasn't been written */ 264 if (!bdesc->valid_dw) 265 return true; 266 267 /* valid_dw includes the header */ 268 if (!msc_data_sz(bdesc)) 269 return true; 270 271 return false; 272 } 273 274 static inline struct scatterlist *msc_win_base_sg(struct msc_window *win) 275 { 276 return win->sgt->sgl; 277 } 278 279 static inline struct msc_block_desc *msc_win_base(struct msc_window *win) 280 { 281 return sg_virt(msc_win_base_sg(win)); 282 } 283 284 static inline dma_addr_t msc_win_base_dma(struct msc_window *win) 285 { 286 return sg_dma_address(msc_win_base_sg(win)); 287 } 288 289 static inline unsigned long 290 msc_win_base_pfn(struct msc_window *win) 291 { 292 return PFN_DOWN(msc_win_base_dma(win)); 293 } 294 295 /** 296 * msc_is_last_win() - check if a window is the last one for a given MSC 297 * @win: window 298 * Return: true if @win is the last window in MSC's multiblock buffer 299 */ 300 static inline bool msc_is_last_win(struct msc_window *win) 301 { 302 return win->entry.next == &win->msc->win_list; 303 } 304 305 /** 306 * msc_next_window() - return next window in the multiblock buffer 307 * @win: current window 308 * 309 * Return: window following the current one 310 */ 311 static struct msc_window *msc_next_window(struct msc_window *win) 312 { 313 if (msc_is_last_win(win)) 314 return list_first_entry(&win->msc->win_list, struct msc_window, 315 entry); 316 317 return list_next_entry(win, entry); 318 } 319 320 static size_t msc_win_total_sz(struct msc_window *win) 321 { 322 struct scatterlist *sg; 323 unsigned int blk; 324 size_t size = 0; 325 326 for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) { 327 struct msc_block_desc *bdesc = sg_virt(sg); 328 329 if (msc_block_wrapped(bdesc)) 330 return win->nr_blocks << PAGE_SHIFT; 331 332 size += msc_total_sz(bdesc); 333 if (msc_block_last_written(bdesc)) 334 break; 335 } 336 337 return size; 338 } 339 340 /** 341 * msc_find_window() - find a window matching a given sg_table 342 * @msc: MSC device 343 * @sgt: SG table of the window 344 * @nonempty: skip over empty windows 345 * 346 * Return: MSC window structure pointer or NULL if the window 347 * could not be found. 348 */ 349 static struct msc_window * 350 msc_find_window(struct msc *msc, struct sg_table *sgt, bool nonempty) 351 { 352 struct msc_window *win; 353 unsigned int found = 0; 354 355 if (list_empty(&msc->win_list)) 356 return NULL; 357 358 /* 359 * we might need a radix tree for this, depending on how 360 * many windows a typical user would allocate; ideally it's 361 * something like 2, in which case we're good 362 */ 363 list_for_each_entry(win, &msc->win_list, entry) { 364 if (win->sgt == sgt) 365 found++; 366 367 /* skip the empty ones */ 368 if (nonempty && msc_block_is_empty(msc_win_base(win))) 369 continue; 370 371 if (found) 372 return win; 373 } 374 375 return NULL; 376 } 377 378 /** 379 * msc_oldest_window() - locate the window with oldest data 380 * @msc: MSC device 381 * 382 * This should only be used in multiblock mode. Caller should hold the 383 * msc::user_count reference. 384 * 385 * Return: the oldest window with valid data 386 */ 387 static struct msc_window *msc_oldest_window(struct msc *msc) 388 { 389 struct msc_window *win; 390 391 if (list_empty(&msc->win_list)) 392 return NULL; 393 394 win = msc_find_window(msc, msc_next_window(msc->cur_win)->sgt, true); 395 if (win) 396 return win; 397 398 return list_first_entry(&msc->win_list, struct msc_window, entry); 399 } 400 401 /** 402 * msc_win_oldest_sg() - locate the oldest block in a given window 403 * @win: window to look at 404 * 405 * Return: index of the block with the oldest data 406 */ 407 static struct scatterlist *msc_win_oldest_sg(struct msc_window *win) 408 { 409 unsigned int blk; 410 struct scatterlist *sg; 411 struct msc_block_desc *bdesc = msc_win_base(win); 412 413 /* without wrapping, first block is the oldest */ 414 if (!msc_block_wrapped(bdesc)) 415 return msc_win_base_sg(win); 416 417 /* 418 * with wrapping, last written block contains both the newest and the 419 * oldest data for this window. 420 */ 421 for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) { 422 struct msc_block_desc *bdesc = sg_virt(sg); 423 424 if (msc_block_last_written(bdesc)) 425 return sg; 426 } 427 428 return msc_win_base_sg(win); 429 } 430 431 static struct msc_block_desc *msc_iter_bdesc(struct msc_iter *iter) 432 { 433 return sg_virt(iter->block); 434 } 435 436 static struct msc_iter *msc_iter_install(struct msc *msc) 437 { 438 struct msc_iter *iter; 439 440 iter = kzalloc(sizeof(*iter), GFP_KERNEL); 441 if (!iter) 442 return ERR_PTR(-ENOMEM); 443 444 mutex_lock(&msc->buf_mutex); 445 446 /* 447 * Reading and tracing are mutually exclusive; if msc is 448 * enabled, open() will fail; otherwise existing readers 449 * will prevent enabling the msc and the rest of fops don't 450 * need to worry about it. 451 */ 452 if (msc->enabled) { 453 kfree(iter); 454 iter = ERR_PTR(-EBUSY); 455 goto unlock; 456 } 457 458 iter->msc = msc; 459 460 list_add_tail(&iter->entry, &msc->iter_list); 461 unlock: 462 mutex_unlock(&msc->buf_mutex); 463 464 return iter; 465 } 466 467 static void msc_iter_remove(struct msc_iter *iter, struct msc *msc) 468 { 469 mutex_lock(&msc->buf_mutex); 470 list_del(&iter->entry); 471 mutex_unlock(&msc->buf_mutex); 472 473 kfree(iter); 474 } 475 476 static void msc_iter_block_start(struct msc_iter *iter) 477 { 478 if (iter->start_block) 479 return; 480 481 iter->start_block = msc_win_oldest_sg(iter->win); 482 iter->block = iter->start_block; 483 iter->wrap_count = 0; 484 485 /* 486 * start with the block with oldest data; if data has wrapped 487 * in this window, it should be in this block 488 */ 489 if (msc_block_wrapped(msc_iter_bdesc(iter))) 490 iter->wrap_count = 2; 491 492 } 493 494 static int msc_iter_win_start(struct msc_iter *iter, struct msc *msc) 495 { 496 /* already started, nothing to do */ 497 if (iter->start_win) 498 return 0; 499 500 iter->start_win = msc_oldest_window(msc); 501 if (!iter->start_win) 502 return -EINVAL; 503 504 iter->win = iter->start_win; 505 iter->start_block = NULL; 506 507 msc_iter_block_start(iter); 508 509 return 0; 510 } 511 512 static int msc_iter_win_advance(struct msc_iter *iter) 513 { 514 iter->win = msc_next_window(iter->win); 515 iter->start_block = NULL; 516 517 if (iter->win == iter->start_win) { 518 iter->eof++; 519 return 1; 520 } 521 522 msc_iter_block_start(iter); 523 524 return 0; 525 } 526 527 static int msc_iter_block_advance(struct msc_iter *iter) 528 { 529 iter->block_off = 0; 530 531 /* wrapping */ 532 if (iter->wrap_count && iter->block == iter->start_block) { 533 iter->wrap_count--; 534 if (!iter->wrap_count) 535 /* copied newest data from the wrapped block */ 536 return msc_iter_win_advance(iter); 537 } 538 539 /* no wrapping, check for last written block */ 540 if (!iter->wrap_count && msc_block_last_written(msc_iter_bdesc(iter))) 541 /* copied newest data for the window */ 542 return msc_iter_win_advance(iter); 543 544 /* block advance */ 545 if (sg_is_last(iter->block)) 546 iter->block = msc_win_base_sg(iter->win); 547 else 548 iter->block = sg_next(iter->block); 549 550 /* no wrapping, sanity check in case there is no last written block */ 551 if (!iter->wrap_count && iter->block == iter->start_block) 552 return msc_iter_win_advance(iter); 553 554 return 0; 555 } 556 557 /** 558 * msc_buffer_iterate() - go through multiblock buffer's data 559 * @iter: iterator structure 560 * @size: amount of data to scan 561 * @data: callback's private data 562 * @fn: iterator callback 563 * 564 * This will start at the window which will be written to next (containing 565 * the oldest data) and work its way to the current window, calling @fn 566 * for each chunk of data as it goes. 567 * 568 * Caller should have msc::user_count reference to make sure the buffer 569 * doesn't disappear from under us. 570 * 571 * Return: amount of data actually scanned. 572 */ 573 static ssize_t 574 msc_buffer_iterate(struct msc_iter *iter, size_t size, void *data, 575 unsigned long (*fn)(void *, void *, size_t)) 576 { 577 struct msc *msc = iter->msc; 578 size_t len = size; 579 unsigned int advance; 580 581 if (iter->eof) 582 return 0; 583 584 /* start with the oldest window */ 585 if (msc_iter_win_start(iter, msc)) 586 return 0; 587 588 do { 589 unsigned long data_bytes = msc_data_sz(msc_iter_bdesc(iter)); 590 void *src = (void *)msc_iter_bdesc(iter) + MSC_BDESC; 591 size_t tocopy = data_bytes, copied = 0; 592 size_t remaining = 0; 593 594 advance = 1; 595 596 /* 597 * If block wrapping happened, we need to visit the last block 598 * twice, because it contains both the oldest and the newest 599 * data in this window. 600 * 601 * First time (wrap_count==2), in the very beginning, to collect 602 * the oldest data, which is in the range 603 * (data_bytes..DATA_IN_PAGE). 604 * 605 * Second time (wrap_count==1), it's just like any other block, 606 * containing data in the range of [MSC_BDESC..data_bytes]. 607 */ 608 if (iter->block == iter->start_block && iter->wrap_count == 2) { 609 tocopy = DATA_IN_PAGE - data_bytes; 610 src += data_bytes; 611 } 612 613 if (!tocopy) 614 goto next_block; 615 616 tocopy -= iter->block_off; 617 src += iter->block_off; 618 619 if (len < tocopy) { 620 tocopy = len; 621 advance = 0; 622 } 623 624 remaining = fn(data, src, tocopy); 625 626 if (remaining) 627 advance = 0; 628 629 copied = tocopy - remaining; 630 len -= copied; 631 iter->block_off += copied; 632 iter->offset += copied; 633 634 if (!advance) 635 break; 636 637 next_block: 638 if (msc_iter_block_advance(iter)) 639 break; 640 641 } while (len); 642 643 return size - len; 644 } 645 646 /** 647 * msc_buffer_clear_hw_header() - clear hw header for multiblock 648 * @msc: MSC device 649 */ 650 static void msc_buffer_clear_hw_header(struct msc *msc) 651 { 652 struct msc_window *win; 653 struct scatterlist *sg; 654 655 list_for_each_entry(win, &msc->win_list, entry) { 656 unsigned int blk; 657 size_t hw_sz = sizeof(struct msc_block_desc) - 658 offsetof(struct msc_block_desc, hw_tag); 659 660 for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) { 661 struct msc_block_desc *bdesc = sg_virt(sg); 662 663 memset(&bdesc->hw_tag, 0, hw_sz); 664 } 665 } 666 } 667 668 static int intel_th_msu_init(struct msc *msc) 669 { 670 u32 mintctl, msusts; 671 672 if (!msc->do_irq) 673 return 0; 674 675 if (!msc->mbuf) 676 return 0; 677 678 mintctl = ioread32(msc->msu_base + REG_MSU_MINTCTL); 679 mintctl |= msc->index ? M1BLIE : M0BLIE; 680 iowrite32(mintctl, msc->msu_base + REG_MSU_MINTCTL); 681 if (mintctl != ioread32(msc->msu_base + REG_MSU_MINTCTL)) { 682 dev_info(msc_dev(msc), "MINTCTL ignores writes: no usable interrupts\n"); 683 msc->do_irq = 0; 684 return 0; 685 } 686 687 msusts = ioread32(msc->msu_base + REG_MSU_MSUSTS); 688 iowrite32(msusts, msc->msu_base + REG_MSU_MSUSTS); 689 690 return 0; 691 } 692 693 static void intel_th_msu_deinit(struct msc *msc) 694 { 695 u32 mintctl; 696 697 if (!msc->do_irq) 698 return; 699 700 mintctl = ioread32(msc->msu_base + REG_MSU_MINTCTL); 701 mintctl &= msc->index ? ~M1BLIE : ~M0BLIE; 702 iowrite32(mintctl, msc->msu_base + REG_MSU_MINTCTL); 703 } 704 705 static int msc_win_set_lockout(struct msc_window *win, 706 enum lockout_state expect, 707 enum lockout_state new) 708 { 709 enum lockout_state old; 710 unsigned long flags; 711 int ret = 0; 712 713 if (!win->msc->mbuf) 714 return 0; 715 716 spin_lock_irqsave(&win->lo_lock, flags); 717 old = win->lockout; 718 719 if (old != expect) { 720 ret = -EINVAL; 721 dev_warn_ratelimited(msc_dev(win->msc), 722 "expected lockout state %d, got %d\n", 723 expect, old); 724 goto unlock; 725 } 726 727 win->lockout = new; 728 729 if (old == expect && new == WIN_LOCKED) 730 atomic_inc(&win->msc->user_count); 731 else if (old == expect && old == WIN_LOCKED) 732 atomic_dec(&win->msc->user_count); 733 734 unlock: 735 spin_unlock_irqrestore(&win->lo_lock, flags); 736 737 if (ret) { 738 if (expect == WIN_READY && old == WIN_LOCKED) 739 return -EBUSY; 740 741 /* from intel_th_msc_window_unlock(), don't warn if not locked */ 742 if (expect == WIN_LOCKED && old == new) 743 return 0; 744 } 745 746 return ret; 747 } 748 /** 749 * msc_configure() - set up MSC hardware 750 * @msc: the MSC device to configure 751 * 752 * Program storage mode, wrapping, burst length and trace buffer address 753 * into a given MSC. Then, enable tracing and set msc::enabled. 754 * The latter is serialized on msc::buf_mutex, so make sure to hold it. 755 */ 756 static int msc_configure(struct msc *msc) 757 { 758 u32 reg; 759 760 lockdep_assert_held(&msc->buf_mutex); 761 762 if (msc->mode > MSC_MODE_MULTI) 763 return -ENOTSUPP; 764 765 if (msc->mode == MSC_MODE_MULTI) { 766 if (msc_win_set_lockout(msc->cur_win, WIN_READY, WIN_INUSE)) 767 return -EBUSY; 768 769 msc_buffer_clear_hw_header(msc); 770 } 771 772 msc->orig_addr = ioread32(msc->reg_base + REG_MSU_MSC0BAR); 773 msc->orig_sz = ioread32(msc->reg_base + REG_MSU_MSC0SIZE); 774 775 reg = msc->base_addr >> PAGE_SHIFT; 776 iowrite32(reg, msc->reg_base + REG_MSU_MSC0BAR); 777 778 if (msc->mode == MSC_MODE_SINGLE) { 779 reg = msc->nr_pages; 780 iowrite32(reg, msc->reg_base + REG_MSU_MSC0SIZE); 781 } 782 783 reg = ioread32(msc->reg_base + REG_MSU_MSC0CTL); 784 reg &= ~(MSC_MODE | MSC_WRAPEN | MSC_EN | MSC_RD_HDR_OVRD); 785 786 reg |= MSC_EN; 787 reg |= msc->mode << __ffs(MSC_MODE); 788 reg |= msc->burst_len << __ffs(MSC_LEN); 789 790 if (msc->wrap) 791 reg |= MSC_WRAPEN; 792 793 iowrite32(reg, msc->reg_base + REG_MSU_MSC0CTL); 794 795 intel_th_msu_init(msc); 796 797 msc->thdev->output.multiblock = msc->mode == MSC_MODE_MULTI; 798 intel_th_trace_enable(msc->thdev); 799 msc->enabled = 1; 800 801 if (msc->mbuf && msc->mbuf->activate) 802 msc->mbuf->activate(msc->mbuf_priv); 803 804 return 0; 805 } 806 807 /** 808 * msc_disable() - disable MSC hardware 809 * @msc: MSC device to disable 810 * 811 * If @msc is enabled, disable tracing on the switch and then disable MSC 812 * storage. Caller must hold msc::buf_mutex. 813 */ 814 static void msc_disable(struct msc *msc) 815 { 816 struct msc_window *win = msc->cur_win; 817 u32 reg; 818 819 lockdep_assert_held(&msc->buf_mutex); 820 821 if (msc->mode == MSC_MODE_MULTI) 822 msc_win_set_lockout(win, WIN_INUSE, WIN_LOCKED); 823 824 if (msc->mbuf && msc->mbuf->deactivate) 825 msc->mbuf->deactivate(msc->mbuf_priv); 826 intel_th_msu_deinit(msc); 827 intel_th_trace_disable(msc->thdev); 828 829 if (msc->mode == MSC_MODE_SINGLE) { 830 reg = ioread32(msc->reg_base + REG_MSU_MSC0STS); 831 msc->single_wrap = !!(reg & MSCSTS_WRAPSTAT); 832 833 reg = ioread32(msc->reg_base + REG_MSU_MSC0MWP); 834 msc->single_sz = reg & ((msc->nr_pages << PAGE_SHIFT) - 1); 835 dev_dbg(msc_dev(msc), "MSCnMWP: %08x/%08lx, wrap: %d\n", 836 reg, msc->single_sz, msc->single_wrap); 837 } 838 839 reg = ioread32(msc->reg_base + REG_MSU_MSC0CTL); 840 reg &= ~MSC_EN; 841 iowrite32(reg, msc->reg_base + REG_MSU_MSC0CTL); 842 843 if (msc->mbuf && msc->mbuf->ready) 844 msc->mbuf->ready(msc->mbuf_priv, win->sgt, 845 msc_win_total_sz(win)); 846 847 msc->enabled = 0; 848 849 iowrite32(msc->orig_addr, msc->reg_base + REG_MSU_MSC0BAR); 850 iowrite32(msc->orig_sz, msc->reg_base + REG_MSU_MSC0SIZE); 851 852 dev_dbg(msc_dev(msc), "MSCnNWSA: %08x\n", 853 ioread32(msc->reg_base + REG_MSU_MSC0NWSA)); 854 855 reg = ioread32(msc->reg_base + REG_MSU_MSC0STS); 856 dev_dbg(msc_dev(msc), "MSCnSTS: %08x\n", reg); 857 858 reg = ioread32(msc->reg_base + REG_MSU_MSUSTS); 859 reg &= msc->index ? MSUSTS_MSC1BLAST : MSUSTS_MSC0BLAST; 860 iowrite32(reg, msc->reg_base + REG_MSU_MSUSTS); 861 } 862 863 static int intel_th_msc_activate(struct intel_th_device *thdev) 864 { 865 struct msc *msc = dev_get_drvdata(&thdev->dev); 866 int ret = -EBUSY; 867 868 if (!atomic_inc_unless_negative(&msc->user_count)) 869 return -ENODEV; 870 871 mutex_lock(&msc->buf_mutex); 872 873 /* if there are readers, refuse */ 874 if (list_empty(&msc->iter_list)) 875 ret = msc_configure(msc); 876 877 mutex_unlock(&msc->buf_mutex); 878 879 if (ret) 880 atomic_dec(&msc->user_count); 881 882 return ret; 883 } 884 885 static void intel_th_msc_deactivate(struct intel_th_device *thdev) 886 { 887 struct msc *msc = dev_get_drvdata(&thdev->dev); 888 889 mutex_lock(&msc->buf_mutex); 890 if (msc->enabled) { 891 msc_disable(msc); 892 atomic_dec(&msc->user_count); 893 } 894 mutex_unlock(&msc->buf_mutex); 895 } 896 897 /** 898 * msc_buffer_contig_alloc() - allocate a contiguous buffer for SINGLE mode 899 * @msc: MSC device 900 * @size: allocation size in bytes 901 * 902 * This modifies msc::base, which requires msc::buf_mutex to serialize, so the 903 * caller is expected to hold it. 904 * 905 * Return: 0 on success, -errno otherwise. 906 */ 907 static int msc_buffer_contig_alloc(struct msc *msc, unsigned long size) 908 { 909 unsigned long nr_pages = size >> PAGE_SHIFT; 910 unsigned int order = get_order(size); 911 struct page *page; 912 int ret; 913 914 if (!size) 915 return 0; 916 917 ret = sg_alloc_table(&msc->single_sgt, 1, GFP_KERNEL); 918 if (ret) 919 goto err_out; 920 921 ret = -ENOMEM; 922 page = alloc_pages(GFP_KERNEL | __GFP_ZERO | GFP_DMA32, order); 923 if (!page) 924 goto err_free_sgt; 925 926 split_page(page, order); 927 sg_set_buf(msc->single_sgt.sgl, page_address(page), size); 928 929 ret = dma_map_sg(msc_dev(msc)->parent->parent, msc->single_sgt.sgl, 1, 930 DMA_FROM_DEVICE); 931 if (ret < 0) 932 goto err_free_pages; 933 934 msc->nr_pages = nr_pages; 935 msc->base = page_address(page); 936 msc->base_addr = sg_dma_address(msc->single_sgt.sgl); 937 938 return 0; 939 940 err_free_pages: 941 __free_pages(page, order); 942 943 err_free_sgt: 944 sg_free_table(&msc->single_sgt); 945 946 err_out: 947 return ret; 948 } 949 950 /** 951 * msc_buffer_contig_free() - free a contiguous buffer 952 * @msc: MSC configured in SINGLE mode 953 */ 954 static void msc_buffer_contig_free(struct msc *msc) 955 { 956 unsigned long off; 957 958 dma_unmap_sg(msc_dev(msc)->parent->parent, msc->single_sgt.sgl, 959 1, DMA_FROM_DEVICE); 960 sg_free_table(&msc->single_sgt); 961 962 for (off = 0; off < msc->nr_pages << PAGE_SHIFT; off += PAGE_SIZE) { 963 struct page *page = virt_to_page(msc->base + off); 964 965 page->mapping = NULL; 966 __free_page(page); 967 } 968 969 msc->nr_pages = 0; 970 } 971 972 /** 973 * msc_buffer_contig_get_page() - find a page at a given offset 974 * @msc: MSC configured in SINGLE mode 975 * @pgoff: page offset 976 * 977 * Return: page, if @pgoff is within the range, NULL otherwise. 978 */ 979 static struct page *msc_buffer_contig_get_page(struct msc *msc, 980 unsigned long pgoff) 981 { 982 if (pgoff >= msc->nr_pages) 983 return NULL; 984 985 return virt_to_page(msc->base + (pgoff << PAGE_SHIFT)); 986 } 987 988 static int __msc_buffer_win_alloc(struct msc_window *win, 989 unsigned int nr_segs) 990 { 991 struct scatterlist *sg_ptr; 992 void *block; 993 int i, ret; 994 995 ret = sg_alloc_table(win->sgt, nr_segs, GFP_KERNEL); 996 if (ret) 997 return -ENOMEM; 998 999 for_each_sg(win->sgt->sgl, sg_ptr, nr_segs, i) { 1000 block = dma_alloc_coherent(msc_dev(win->msc)->parent->parent, 1001 PAGE_SIZE, &sg_dma_address(sg_ptr), 1002 GFP_KERNEL); 1003 if (!block) 1004 goto err_nomem; 1005 1006 sg_set_buf(sg_ptr, block, PAGE_SIZE); 1007 } 1008 1009 return nr_segs; 1010 1011 err_nomem: 1012 for_each_sg(win->sgt->sgl, sg_ptr, i, ret) 1013 dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE, 1014 sg_virt(sg_ptr), sg_dma_address(sg_ptr)); 1015 1016 sg_free_table(win->sgt); 1017 1018 return -ENOMEM; 1019 } 1020 1021 #ifdef CONFIG_X86 1022 static void msc_buffer_set_uc(struct msc_window *win, unsigned int nr_segs) 1023 { 1024 struct scatterlist *sg_ptr; 1025 int i; 1026 1027 for_each_sg(win->sgt->sgl, sg_ptr, nr_segs, i) { 1028 /* Set the page as uncached */ 1029 set_memory_uc((unsigned long)sg_virt(sg_ptr), 1030 PFN_DOWN(sg_ptr->length)); 1031 } 1032 } 1033 1034 static void msc_buffer_set_wb(struct msc_window *win) 1035 { 1036 struct scatterlist *sg_ptr; 1037 int i; 1038 1039 for_each_sg(win->sgt->sgl, sg_ptr, win->nr_segs, i) { 1040 /* Reset the page to write-back */ 1041 set_memory_wb((unsigned long)sg_virt(sg_ptr), 1042 PFN_DOWN(sg_ptr->length)); 1043 } 1044 } 1045 #else /* !X86 */ 1046 static inline void 1047 msc_buffer_set_uc(struct msc_window *win, unsigned int nr_segs) {} 1048 static inline void msc_buffer_set_wb(struct msc_window *win) {} 1049 #endif /* CONFIG_X86 */ 1050 1051 /** 1052 * msc_buffer_win_alloc() - alloc a window for a multiblock mode 1053 * @msc: MSC device 1054 * @nr_blocks: number of pages in this window 1055 * 1056 * This modifies msc::win_list and msc::base, which requires msc::buf_mutex 1057 * to serialize, so the caller is expected to hold it. 1058 * 1059 * Return: 0 on success, -errno otherwise. 1060 */ 1061 static int msc_buffer_win_alloc(struct msc *msc, unsigned int nr_blocks) 1062 { 1063 struct msc_window *win; 1064 int ret = -ENOMEM; 1065 1066 if (!nr_blocks) 1067 return 0; 1068 1069 win = kzalloc(sizeof(*win), GFP_KERNEL); 1070 if (!win) 1071 return -ENOMEM; 1072 1073 win->msc = msc; 1074 win->sgt = &win->_sgt; 1075 win->lockout = WIN_READY; 1076 spin_lock_init(&win->lo_lock); 1077 1078 if (!list_empty(&msc->win_list)) { 1079 struct msc_window *prev = list_last_entry(&msc->win_list, 1080 struct msc_window, 1081 entry); 1082 1083 win->pgoff = prev->pgoff + prev->nr_blocks; 1084 } 1085 1086 if (msc->mbuf && msc->mbuf->alloc_window) 1087 ret = msc->mbuf->alloc_window(msc->mbuf_priv, &win->sgt, 1088 nr_blocks << PAGE_SHIFT); 1089 else 1090 ret = __msc_buffer_win_alloc(win, nr_blocks); 1091 1092 if (ret <= 0) 1093 goto err_nomem; 1094 1095 msc_buffer_set_uc(win, ret); 1096 1097 win->nr_segs = ret; 1098 win->nr_blocks = nr_blocks; 1099 1100 if (list_empty(&msc->win_list)) { 1101 msc->base = msc_win_base(win); 1102 msc->base_addr = msc_win_base_dma(win); 1103 msc->cur_win = win; 1104 } 1105 1106 list_add_tail(&win->entry, &msc->win_list); 1107 msc->nr_pages += nr_blocks; 1108 1109 return 0; 1110 1111 err_nomem: 1112 kfree(win); 1113 1114 return ret; 1115 } 1116 1117 static void __msc_buffer_win_free(struct msc *msc, struct msc_window *win) 1118 { 1119 struct scatterlist *sg; 1120 int i; 1121 1122 for_each_sg(win->sgt->sgl, sg, win->nr_segs, i) { 1123 struct page *page = sg_page(sg); 1124 1125 page->mapping = NULL; 1126 dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE, 1127 sg_virt(sg), sg_dma_address(sg)); 1128 } 1129 sg_free_table(win->sgt); 1130 } 1131 1132 /** 1133 * msc_buffer_win_free() - free a window from MSC's window list 1134 * @msc: MSC device 1135 * @win: window to free 1136 * 1137 * This modifies msc::win_list and msc::base, which requires msc::buf_mutex 1138 * to serialize, so the caller is expected to hold it. 1139 */ 1140 static void msc_buffer_win_free(struct msc *msc, struct msc_window *win) 1141 { 1142 msc->nr_pages -= win->nr_blocks; 1143 1144 list_del(&win->entry); 1145 if (list_empty(&msc->win_list)) { 1146 msc->base = NULL; 1147 msc->base_addr = 0; 1148 } 1149 1150 msc_buffer_set_wb(win); 1151 1152 if (msc->mbuf && msc->mbuf->free_window) 1153 msc->mbuf->free_window(msc->mbuf_priv, win->sgt); 1154 else 1155 __msc_buffer_win_free(msc, win); 1156 1157 kfree(win); 1158 } 1159 1160 /** 1161 * msc_buffer_relink() - set up block descriptors for multiblock mode 1162 * @msc: MSC device 1163 * 1164 * This traverses msc::win_list, which requires msc::buf_mutex to serialize, 1165 * so the caller is expected to hold it. 1166 */ 1167 static void msc_buffer_relink(struct msc *msc) 1168 { 1169 struct msc_window *win, *next_win; 1170 1171 /* call with msc::mutex locked */ 1172 list_for_each_entry(win, &msc->win_list, entry) { 1173 struct scatterlist *sg; 1174 unsigned int blk; 1175 u32 sw_tag = 0; 1176 1177 /* 1178 * Last window's next_win should point to the first window 1179 * and MSC_SW_TAG_LASTWIN should be set. 1180 */ 1181 if (msc_is_last_win(win)) { 1182 sw_tag |= MSC_SW_TAG_LASTWIN; 1183 next_win = list_first_entry(&msc->win_list, 1184 struct msc_window, entry); 1185 } else { 1186 next_win = list_next_entry(win, entry); 1187 } 1188 1189 for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) { 1190 struct msc_block_desc *bdesc = sg_virt(sg); 1191 1192 memset(bdesc, 0, sizeof(*bdesc)); 1193 1194 bdesc->next_win = msc_win_base_pfn(next_win); 1195 1196 /* 1197 * Similarly to last window, last block should point 1198 * to the first one. 1199 */ 1200 if (blk == win->nr_segs - 1) { 1201 sw_tag |= MSC_SW_TAG_LASTBLK; 1202 bdesc->next_blk = msc_win_base_pfn(win); 1203 } else { 1204 dma_addr_t addr = sg_dma_address(sg_next(sg)); 1205 1206 bdesc->next_blk = PFN_DOWN(addr); 1207 } 1208 1209 bdesc->sw_tag = sw_tag; 1210 bdesc->block_sz = sg->length / 64; 1211 } 1212 } 1213 1214 /* 1215 * Make the above writes globally visible before tracing is 1216 * enabled to make sure hardware sees them coherently. 1217 */ 1218 wmb(); 1219 } 1220 1221 static void msc_buffer_multi_free(struct msc *msc) 1222 { 1223 struct msc_window *win, *iter; 1224 1225 list_for_each_entry_safe(win, iter, &msc->win_list, entry) 1226 msc_buffer_win_free(msc, win); 1227 } 1228 1229 static int msc_buffer_multi_alloc(struct msc *msc, unsigned long *nr_pages, 1230 unsigned int nr_wins) 1231 { 1232 int ret, i; 1233 1234 for (i = 0; i < nr_wins; i++) { 1235 ret = msc_buffer_win_alloc(msc, nr_pages[i]); 1236 if (ret) { 1237 msc_buffer_multi_free(msc); 1238 return ret; 1239 } 1240 } 1241 1242 msc_buffer_relink(msc); 1243 1244 return 0; 1245 } 1246 1247 /** 1248 * msc_buffer_free() - free buffers for MSC 1249 * @msc: MSC device 1250 * 1251 * Free MSC's storage buffers. 1252 * 1253 * This modifies msc::win_list and msc::base, which requires msc::buf_mutex to 1254 * serialize, so the caller is expected to hold it. 1255 */ 1256 static void msc_buffer_free(struct msc *msc) 1257 { 1258 if (msc->mode == MSC_MODE_SINGLE) 1259 msc_buffer_contig_free(msc); 1260 else if (msc->mode == MSC_MODE_MULTI) 1261 msc_buffer_multi_free(msc); 1262 } 1263 1264 /** 1265 * msc_buffer_alloc() - allocate a buffer for MSC 1266 * @msc: MSC device 1267 * @size: allocation size in bytes 1268 * 1269 * Allocate a storage buffer for MSC, depending on the msc::mode, it will be 1270 * either done via msc_buffer_contig_alloc() for SINGLE operation mode or 1271 * msc_buffer_win_alloc() for multiblock operation. The latter allocates one 1272 * window per invocation, so in multiblock mode this can be called multiple 1273 * times for the same MSC to allocate multiple windows. 1274 * 1275 * This modifies msc::win_list and msc::base, which requires msc::buf_mutex 1276 * to serialize, so the caller is expected to hold it. 1277 * 1278 * Return: 0 on success, -errno otherwise. 1279 */ 1280 static int msc_buffer_alloc(struct msc *msc, unsigned long *nr_pages, 1281 unsigned int nr_wins) 1282 { 1283 int ret; 1284 1285 /* -1: buffer not allocated */ 1286 if (atomic_read(&msc->user_count) != -1) 1287 return -EBUSY; 1288 1289 if (msc->mode == MSC_MODE_SINGLE) { 1290 if (nr_wins != 1) 1291 return -EINVAL; 1292 1293 ret = msc_buffer_contig_alloc(msc, nr_pages[0] << PAGE_SHIFT); 1294 } else if (msc->mode == MSC_MODE_MULTI) { 1295 ret = msc_buffer_multi_alloc(msc, nr_pages, nr_wins); 1296 } else { 1297 ret = -ENOTSUPP; 1298 } 1299 1300 if (!ret) { 1301 /* allocation should be visible before the counter goes to 0 */ 1302 smp_mb__before_atomic(); 1303 1304 if (WARN_ON_ONCE(atomic_cmpxchg(&msc->user_count, -1, 0) != -1)) 1305 return -EINVAL; 1306 } 1307 1308 return ret; 1309 } 1310 1311 /** 1312 * msc_buffer_unlocked_free_unless_used() - free a buffer unless it's in use 1313 * @msc: MSC device 1314 * 1315 * This will free MSC buffer unless it is in use or there is no allocated 1316 * buffer. 1317 * Caller needs to hold msc::buf_mutex. 1318 * 1319 * Return: 0 on successful deallocation or if there was no buffer to 1320 * deallocate, -EBUSY if there are active users. 1321 */ 1322 static int msc_buffer_unlocked_free_unless_used(struct msc *msc) 1323 { 1324 int count, ret = 0; 1325 1326 count = atomic_cmpxchg(&msc->user_count, 0, -1); 1327 1328 /* > 0: buffer is allocated and has users */ 1329 if (count > 0) 1330 ret = -EBUSY; 1331 /* 0: buffer is allocated, no users */ 1332 else if (!count) 1333 msc_buffer_free(msc); 1334 /* < 0: no buffer, nothing to do */ 1335 1336 return ret; 1337 } 1338 1339 /** 1340 * msc_buffer_free_unless_used() - free a buffer unless it's in use 1341 * @msc: MSC device 1342 * 1343 * This is a locked version of msc_buffer_unlocked_free_unless_used(). 1344 */ 1345 static int msc_buffer_free_unless_used(struct msc *msc) 1346 { 1347 int ret; 1348 1349 mutex_lock(&msc->buf_mutex); 1350 ret = msc_buffer_unlocked_free_unless_used(msc); 1351 mutex_unlock(&msc->buf_mutex); 1352 1353 return ret; 1354 } 1355 1356 /** 1357 * msc_buffer_get_page() - get MSC buffer page at a given offset 1358 * @msc: MSC device 1359 * @pgoff: page offset into the storage buffer 1360 * 1361 * This traverses msc::win_list, so holding msc::buf_mutex is expected from 1362 * the caller. 1363 * 1364 * Return: page if @pgoff corresponds to a valid buffer page or NULL. 1365 */ 1366 static struct page *msc_buffer_get_page(struct msc *msc, unsigned long pgoff) 1367 { 1368 struct msc_window *win; 1369 struct scatterlist *sg; 1370 unsigned int blk; 1371 1372 if (msc->mode == MSC_MODE_SINGLE) 1373 return msc_buffer_contig_get_page(msc, pgoff); 1374 1375 list_for_each_entry(win, &msc->win_list, entry) 1376 if (pgoff >= win->pgoff && pgoff < win->pgoff + win->nr_blocks) 1377 goto found; 1378 1379 return NULL; 1380 1381 found: 1382 pgoff -= win->pgoff; 1383 1384 for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) { 1385 struct page *page = sg_page(sg); 1386 size_t pgsz = PFN_DOWN(sg->length); 1387 1388 if (pgoff < pgsz) 1389 return page + pgoff; 1390 1391 pgoff -= pgsz; 1392 } 1393 1394 return NULL; 1395 } 1396 1397 /** 1398 * struct msc_win_to_user_struct - data for copy_to_user() callback 1399 * @buf: userspace buffer to copy data to 1400 * @offset: running offset 1401 */ 1402 struct msc_win_to_user_struct { 1403 char __user *buf; 1404 unsigned long offset; 1405 }; 1406 1407 /** 1408 * msc_win_to_user() - iterator for msc_buffer_iterate() to copy data to user 1409 * @data: callback's private data 1410 * @src: source buffer 1411 * @len: amount of data to copy from the source buffer 1412 */ 1413 static unsigned long msc_win_to_user(void *data, void *src, size_t len) 1414 { 1415 struct msc_win_to_user_struct *u = data; 1416 unsigned long ret; 1417 1418 ret = copy_to_user(u->buf + u->offset, src, len); 1419 u->offset += len - ret; 1420 1421 return ret; 1422 } 1423 1424 1425 /* 1426 * file operations' callbacks 1427 */ 1428 1429 static int intel_th_msc_open(struct inode *inode, struct file *file) 1430 { 1431 struct intel_th_device *thdev = file->private_data; 1432 struct msc *msc = dev_get_drvdata(&thdev->dev); 1433 struct msc_iter *iter; 1434 1435 if (!capable(CAP_SYS_RAWIO)) 1436 return -EPERM; 1437 1438 iter = msc_iter_install(msc); 1439 if (IS_ERR(iter)) 1440 return PTR_ERR(iter); 1441 1442 file->private_data = iter; 1443 1444 return nonseekable_open(inode, file); 1445 } 1446 1447 static int intel_th_msc_release(struct inode *inode, struct file *file) 1448 { 1449 struct msc_iter *iter = file->private_data; 1450 struct msc *msc = iter->msc; 1451 1452 msc_iter_remove(iter, msc); 1453 1454 return 0; 1455 } 1456 1457 static ssize_t 1458 msc_single_to_user(struct msc *msc, char __user *buf, loff_t off, size_t len) 1459 { 1460 unsigned long size = msc->nr_pages << PAGE_SHIFT, rem = len; 1461 unsigned long start = off, tocopy = 0; 1462 1463 if (msc->single_wrap) { 1464 start += msc->single_sz; 1465 if (start < size) { 1466 tocopy = min(rem, size - start); 1467 if (copy_to_user(buf, msc->base + start, tocopy)) 1468 return -EFAULT; 1469 1470 buf += tocopy; 1471 rem -= tocopy; 1472 start += tocopy; 1473 } 1474 1475 start &= size - 1; 1476 if (rem) { 1477 tocopy = min(rem, msc->single_sz - start); 1478 if (copy_to_user(buf, msc->base + start, tocopy)) 1479 return -EFAULT; 1480 1481 rem -= tocopy; 1482 } 1483 1484 return len - rem; 1485 } 1486 1487 if (copy_to_user(buf, msc->base + start, rem)) 1488 return -EFAULT; 1489 1490 return len; 1491 } 1492 1493 static ssize_t intel_th_msc_read(struct file *file, char __user *buf, 1494 size_t len, loff_t *ppos) 1495 { 1496 struct msc_iter *iter = file->private_data; 1497 struct msc *msc = iter->msc; 1498 size_t size; 1499 loff_t off = *ppos; 1500 ssize_t ret = 0; 1501 1502 if (!atomic_inc_unless_negative(&msc->user_count)) 1503 return 0; 1504 1505 if (msc->mode == MSC_MODE_SINGLE && !msc->single_wrap) 1506 size = msc->single_sz; 1507 else 1508 size = msc->nr_pages << PAGE_SHIFT; 1509 1510 if (!size) 1511 goto put_count; 1512 1513 if (off >= size) 1514 goto put_count; 1515 1516 if (off + len >= size) 1517 len = size - off; 1518 1519 if (msc->mode == MSC_MODE_SINGLE) { 1520 ret = msc_single_to_user(msc, buf, off, len); 1521 if (ret >= 0) 1522 *ppos += ret; 1523 } else if (msc->mode == MSC_MODE_MULTI) { 1524 struct msc_win_to_user_struct u = { 1525 .buf = buf, 1526 .offset = 0, 1527 }; 1528 1529 ret = msc_buffer_iterate(iter, len, &u, msc_win_to_user); 1530 if (ret >= 0) 1531 *ppos = iter->offset; 1532 } else { 1533 ret = -ENOTSUPP; 1534 } 1535 1536 put_count: 1537 atomic_dec(&msc->user_count); 1538 1539 return ret; 1540 } 1541 1542 /* 1543 * vm operations callbacks (vm_ops) 1544 */ 1545 1546 static void msc_mmap_open(struct vm_area_struct *vma) 1547 { 1548 struct msc_iter *iter = vma->vm_file->private_data; 1549 struct msc *msc = iter->msc; 1550 1551 atomic_inc(&msc->mmap_count); 1552 } 1553 1554 static void msc_mmap_close(struct vm_area_struct *vma) 1555 { 1556 struct msc_iter *iter = vma->vm_file->private_data; 1557 struct msc *msc = iter->msc; 1558 unsigned long pg; 1559 1560 if (!atomic_dec_and_mutex_lock(&msc->mmap_count, &msc->buf_mutex)) 1561 return; 1562 1563 /* drop page _refcounts */ 1564 for (pg = 0; pg < msc->nr_pages; pg++) { 1565 struct page *page = msc_buffer_get_page(msc, pg); 1566 1567 if (WARN_ON_ONCE(!page)) 1568 continue; 1569 1570 if (page->mapping) 1571 page->mapping = NULL; 1572 } 1573 1574 /* last mapping -- drop user_count */ 1575 atomic_dec(&msc->user_count); 1576 mutex_unlock(&msc->buf_mutex); 1577 } 1578 1579 static vm_fault_t msc_mmap_fault(struct vm_fault *vmf) 1580 { 1581 struct msc_iter *iter = vmf->vma->vm_file->private_data; 1582 struct msc *msc = iter->msc; 1583 1584 vmf->page = msc_buffer_get_page(msc, vmf->pgoff); 1585 if (!vmf->page) 1586 return VM_FAULT_SIGBUS; 1587 1588 get_page(vmf->page); 1589 vmf->page->mapping = vmf->vma->vm_file->f_mapping; 1590 vmf->page->index = vmf->pgoff; 1591 1592 return 0; 1593 } 1594 1595 static const struct vm_operations_struct msc_mmap_ops = { 1596 .open = msc_mmap_open, 1597 .close = msc_mmap_close, 1598 .fault = msc_mmap_fault, 1599 }; 1600 1601 static int intel_th_msc_mmap(struct file *file, struct vm_area_struct *vma) 1602 { 1603 unsigned long size = vma->vm_end - vma->vm_start; 1604 struct msc_iter *iter = vma->vm_file->private_data; 1605 struct msc *msc = iter->msc; 1606 int ret = -EINVAL; 1607 1608 if (!size || offset_in_page(size)) 1609 return -EINVAL; 1610 1611 if (vma->vm_pgoff) 1612 return -EINVAL; 1613 1614 /* grab user_count once per mmap; drop in msc_mmap_close() */ 1615 if (!atomic_inc_unless_negative(&msc->user_count)) 1616 return -EINVAL; 1617 1618 if (msc->mode != MSC_MODE_SINGLE && 1619 msc->mode != MSC_MODE_MULTI) 1620 goto out; 1621 1622 if (size >> PAGE_SHIFT != msc->nr_pages) 1623 goto out; 1624 1625 atomic_set(&msc->mmap_count, 1); 1626 ret = 0; 1627 1628 out: 1629 if (ret) 1630 atomic_dec(&msc->user_count); 1631 1632 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 1633 vma->vm_flags |= VM_DONTEXPAND | VM_DONTCOPY; 1634 vma->vm_ops = &msc_mmap_ops; 1635 return ret; 1636 } 1637 1638 static const struct file_operations intel_th_msc_fops = { 1639 .open = intel_th_msc_open, 1640 .release = intel_th_msc_release, 1641 .read = intel_th_msc_read, 1642 .mmap = intel_th_msc_mmap, 1643 .llseek = no_llseek, 1644 .owner = THIS_MODULE, 1645 }; 1646 1647 static void intel_th_msc_wait_empty(struct intel_th_device *thdev) 1648 { 1649 struct msc *msc = dev_get_drvdata(&thdev->dev); 1650 unsigned long count; 1651 u32 reg; 1652 1653 for (reg = 0, count = MSC_PLE_WAITLOOP_DEPTH; 1654 count && !(reg & MSCSTS_PLE); count--) { 1655 reg = __raw_readl(msc->reg_base + REG_MSU_MSC0STS); 1656 cpu_relax(); 1657 } 1658 1659 if (!count) 1660 dev_dbg(msc_dev(msc), "timeout waiting for MSC0 PLE\n"); 1661 } 1662 1663 static int intel_th_msc_init(struct msc *msc) 1664 { 1665 atomic_set(&msc->user_count, -1); 1666 1667 msc->mode = MSC_MODE_MULTI; 1668 mutex_init(&msc->buf_mutex); 1669 INIT_LIST_HEAD(&msc->win_list); 1670 INIT_LIST_HEAD(&msc->iter_list); 1671 1672 msc->burst_len = 1673 (ioread32(msc->reg_base + REG_MSU_MSC0CTL) & MSC_LEN) >> 1674 __ffs(MSC_LEN); 1675 1676 return 0; 1677 } 1678 1679 static void msc_win_switch(struct msc *msc) 1680 { 1681 struct msc_window *first; 1682 1683 first = list_first_entry(&msc->win_list, struct msc_window, entry); 1684 1685 if (msc_is_last_win(msc->cur_win)) 1686 msc->cur_win = first; 1687 else 1688 msc->cur_win = list_next_entry(msc->cur_win, entry); 1689 1690 msc->base = msc_win_base(msc->cur_win); 1691 msc->base_addr = msc_win_base_dma(msc->cur_win); 1692 1693 intel_th_trace_switch(msc->thdev); 1694 } 1695 1696 /** 1697 * intel_th_msc_window_unlock - put the window back in rotation 1698 * @dev: MSC device to which this relates 1699 * @sgt: buffer's sg_table for the window, does nothing if NULL 1700 */ 1701 void intel_th_msc_window_unlock(struct device *dev, struct sg_table *sgt) 1702 { 1703 struct msc *msc = dev_get_drvdata(dev); 1704 struct msc_window *win; 1705 1706 if (!sgt) 1707 return; 1708 1709 win = msc_find_window(msc, sgt, false); 1710 if (!win) 1711 return; 1712 1713 msc_win_set_lockout(win, WIN_LOCKED, WIN_READY); 1714 } 1715 EXPORT_SYMBOL_GPL(intel_th_msc_window_unlock); 1716 1717 static void msc_work(struct work_struct *work) 1718 { 1719 struct msc *msc = container_of(work, struct msc, work); 1720 1721 intel_th_msc_deactivate(msc->thdev); 1722 } 1723 1724 static irqreturn_t intel_th_msc_interrupt(struct intel_th_device *thdev) 1725 { 1726 struct msc *msc = dev_get_drvdata(&thdev->dev); 1727 u32 msusts = ioread32(msc->msu_base + REG_MSU_MSUSTS); 1728 u32 mask = msc->index ? MSUSTS_MSC1BLAST : MSUSTS_MSC0BLAST; 1729 struct msc_window *win, *next_win; 1730 1731 if (!msc->do_irq || !msc->mbuf) 1732 return IRQ_NONE; 1733 1734 msusts &= mask; 1735 1736 if (!msusts) 1737 return msc->enabled ? IRQ_HANDLED : IRQ_NONE; 1738 1739 iowrite32(msusts, msc->msu_base + REG_MSU_MSUSTS); 1740 1741 if (!msc->enabled) 1742 return IRQ_NONE; 1743 1744 /* grab the window before we do the switch */ 1745 win = msc->cur_win; 1746 if (!win) 1747 return IRQ_HANDLED; 1748 next_win = msc_next_window(win); 1749 if (!next_win) 1750 return IRQ_HANDLED; 1751 1752 /* next window: if READY, proceed, if LOCKED, stop the trace */ 1753 if (msc_win_set_lockout(next_win, WIN_READY, WIN_INUSE)) { 1754 schedule_work(&msc->work); 1755 return IRQ_HANDLED; 1756 } 1757 1758 /* current window: INUSE -> LOCKED */ 1759 msc_win_set_lockout(win, WIN_INUSE, WIN_LOCKED); 1760 1761 msc_win_switch(msc); 1762 1763 if (msc->mbuf && msc->mbuf->ready) 1764 msc->mbuf->ready(msc->mbuf_priv, win->sgt, 1765 msc_win_total_sz(win)); 1766 1767 return IRQ_HANDLED; 1768 } 1769 1770 static const char * const msc_mode[] = { 1771 [MSC_MODE_SINGLE] = "single", 1772 [MSC_MODE_MULTI] = "multi", 1773 [MSC_MODE_EXI] = "ExI", 1774 [MSC_MODE_DEBUG] = "debug", 1775 }; 1776 1777 static ssize_t 1778 wrap_show(struct device *dev, struct device_attribute *attr, char *buf) 1779 { 1780 struct msc *msc = dev_get_drvdata(dev); 1781 1782 return scnprintf(buf, PAGE_SIZE, "%d\n", msc->wrap); 1783 } 1784 1785 static ssize_t 1786 wrap_store(struct device *dev, struct device_attribute *attr, const char *buf, 1787 size_t size) 1788 { 1789 struct msc *msc = dev_get_drvdata(dev); 1790 unsigned long val; 1791 int ret; 1792 1793 ret = kstrtoul(buf, 10, &val); 1794 if (ret) 1795 return ret; 1796 1797 msc->wrap = !!val; 1798 1799 return size; 1800 } 1801 1802 static DEVICE_ATTR_RW(wrap); 1803 1804 static void msc_buffer_unassign(struct msc *msc) 1805 { 1806 lockdep_assert_held(&msc->buf_mutex); 1807 1808 if (!msc->mbuf) 1809 return; 1810 1811 msc->mbuf->unassign(msc->mbuf_priv); 1812 msu_buffer_put(msc->mbuf); 1813 msc->mbuf_priv = NULL; 1814 msc->mbuf = NULL; 1815 } 1816 1817 static ssize_t 1818 mode_show(struct device *dev, struct device_attribute *attr, char *buf) 1819 { 1820 struct msc *msc = dev_get_drvdata(dev); 1821 const char *mode = msc_mode[msc->mode]; 1822 ssize_t ret; 1823 1824 mutex_lock(&msc->buf_mutex); 1825 if (msc->mbuf) 1826 mode = msc->mbuf->name; 1827 ret = scnprintf(buf, PAGE_SIZE, "%s\n", mode); 1828 mutex_unlock(&msc->buf_mutex); 1829 1830 return ret; 1831 } 1832 1833 static ssize_t 1834 mode_store(struct device *dev, struct device_attribute *attr, const char *buf, 1835 size_t size) 1836 { 1837 const struct msu_buffer *mbuf = NULL; 1838 struct msc *msc = dev_get_drvdata(dev); 1839 size_t len = size; 1840 char *cp, *mode; 1841 int i, ret; 1842 1843 if (!capable(CAP_SYS_RAWIO)) 1844 return -EPERM; 1845 1846 cp = memchr(buf, '\n', len); 1847 if (cp) 1848 len = cp - buf; 1849 1850 mode = kstrndup(buf, len, GFP_KERNEL); 1851 i = match_string(msc_mode, ARRAY_SIZE(msc_mode), mode); 1852 if (i >= 0) 1853 goto found; 1854 1855 /* Buffer sinks only work with a usable IRQ */ 1856 if (!msc->do_irq) { 1857 kfree(mode); 1858 return -EINVAL; 1859 } 1860 1861 mbuf = msu_buffer_get(mode); 1862 kfree(mode); 1863 if (mbuf) 1864 goto found; 1865 1866 return -EINVAL; 1867 1868 found: 1869 mutex_lock(&msc->buf_mutex); 1870 ret = 0; 1871 1872 /* Same buffer: do nothing */ 1873 if (mbuf && mbuf == msc->mbuf) { 1874 /* put the extra reference we just got */ 1875 msu_buffer_put(mbuf); 1876 goto unlock; 1877 } 1878 1879 ret = msc_buffer_unlocked_free_unless_used(msc); 1880 if (ret) 1881 goto unlock; 1882 1883 if (mbuf) { 1884 void *mbuf_priv = mbuf->assign(dev, &i); 1885 1886 if (!mbuf_priv) { 1887 ret = -ENOMEM; 1888 goto unlock; 1889 } 1890 1891 msc_buffer_unassign(msc); 1892 msc->mbuf_priv = mbuf_priv; 1893 msc->mbuf = mbuf; 1894 } else { 1895 msc_buffer_unassign(msc); 1896 } 1897 1898 msc->mode = i; 1899 1900 unlock: 1901 if (ret && mbuf) 1902 msu_buffer_put(mbuf); 1903 mutex_unlock(&msc->buf_mutex); 1904 1905 return ret ? ret : size; 1906 } 1907 1908 static DEVICE_ATTR_RW(mode); 1909 1910 static ssize_t 1911 nr_pages_show(struct device *dev, struct device_attribute *attr, char *buf) 1912 { 1913 struct msc *msc = dev_get_drvdata(dev); 1914 struct msc_window *win; 1915 size_t count = 0; 1916 1917 mutex_lock(&msc->buf_mutex); 1918 1919 if (msc->mode == MSC_MODE_SINGLE) 1920 count = scnprintf(buf, PAGE_SIZE, "%ld\n", msc->nr_pages); 1921 else if (msc->mode == MSC_MODE_MULTI) { 1922 list_for_each_entry(win, &msc->win_list, entry) { 1923 count += scnprintf(buf + count, PAGE_SIZE - count, 1924 "%d%c", win->nr_blocks, 1925 msc_is_last_win(win) ? '\n' : ','); 1926 } 1927 } else { 1928 count = scnprintf(buf, PAGE_SIZE, "unsupported\n"); 1929 } 1930 1931 mutex_unlock(&msc->buf_mutex); 1932 1933 return count; 1934 } 1935 1936 static ssize_t 1937 nr_pages_store(struct device *dev, struct device_attribute *attr, 1938 const char *buf, size_t size) 1939 { 1940 struct msc *msc = dev_get_drvdata(dev); 1941 unsigned long val, *win = NULL, *rewin; 1942 size_t len = size; 1943 const char *p = buf; 1944 char *end, *s; 1945 int ret, nr_wins = 0; 1946 1947 if (!capable(CAP_SYS_RAWIO)) 1948 return -EPERM; 1949 1950 ret = msc_buffer_free_unless_used(msc); 1951 if (ret) 1952 return ret; 1953 1954 /* scan the comma-separated list of allocation sizes */ 1955 end = memchr(buf, '\n', len); 1956 if (end) 1957 len = end - buf; 1958 1959 do { 1960 end = memchr(p, ',', len); 1961 s = kstrndup(p, end ? end - p : len, GFP_KERNEL); 1962 if (!s) { 1963 ret = -ENOMEM; 1964 goto free_win; 1965 } 1966 1967 ret = kstrtoul(s, 10, &val); 1968 kfree(s); 1969 1970 if (ret || !val) 1971 goto free_win; 1972 1973 if (nr_wins && msc->mode == MSC_MODE_SINGLE) { 1974 ret = -EINVAL; 1975 goto free_win; 1976 } 1977 1978 nr_wins++; 1979 rewin = krealloc(win, sizeof(*win) * nr_wins, GFP_KERNEL); 1980 if (!rewin) { 1981 kfree(win); 1982 return -ENOMEM; 1983 } 1984 1985 win = rewin; 1986 win[nr_wins - 1] = val; 1987 1988 if (!end) 1989 break; 1990 1991 /* consume the number and the following comma, hence +1 */ 1992 len -= end - p + 1; 1993 p = end + 1; 1994 } while (len); 1995 1996 mutex_lock(&msc->buf_mutex); 1997 ret = msc_buffer_alloc(msc, win, nr_wins); 1998 mutex_unlock(&msc->buf_mutex); 1999 2000 free_win: 2001 kfree(win); 2002 2003 return ret ? ret : size; 2004 } 2005 2006 static DEVICE_ATTR_RW(nr_pages); 2007 2008 static ssize_t 2009 win_switch_store(struct device *dev, struct device_attribute *attr, 2010 const char *buf, size_t size) 2011 { 2012 struct msc *msc = dev_get_drvdata(dev); 2013 unsigned long val; 2014 int ret; 2015 2016 ret = kstrtoul(buf, 10, &val); 2017 if (ret) 2018 return ret; 2019 2020 if (val != 1) 2021 return -EINVAL; 2022 2023 mutex_lock(&msc->buf_mutex); 2024 /* 2025 * Window switch can only happen in the "multi" mode. 2026 * If a external buffer is engaged, they have the full 2027 * control over window switching. 2028 */ 2029 if (msc->mode != MSC_MODE_MULTI || msc->mbuf) 2030 ret = -ENOTSUPP; 2031 else 2032 msc_win_switch(msc); 2033 mutex_unlock(&msc->buf_mutex); 2034 2035 return ret ? ret : size; 2036 } 2037 2038 static DEVICE_ATTR_WO(win_switch); 2039 2040 static struct attribute *msc_output_attrs[] = { 2041 &dev_attr_wrap.attr, 2042 &dev_attr_mode.attr, 2043 &dev_attr_nr_pages.attr, 2044 &dev_attr_win_switch.attr, 2045 NULL, 2046 }; 2047 2048 static struct attribute_group msc_output_group = { 2049 .attrs = msc_output_attrs, 2050 }; 2051 2052 static int intel_th_msc_probe(struct intel_th_device *thdev) 2053 { 2054 struct device *dev = &thdev->dev; 2055 struct resource *res; 2056 struct msc *msc; 2057 void __iomem *base; 2058 int err; 2059 2060 res = intel_th_device_get_resource(thdev, IORESOURCE_MEM, 0); 2061 if (!res) 2062 return -ENODEV; 2063 2064 base = devm_ioremap(dev, res->start, resource_size(res)); 2065 if (!base) 2066 return -ENOMEM; 2067 2068 msc = devm_kzalloc(dev, sizeof(*msc), GFP_KERNEL); 2069 if (!msc) 2070 return -ENOMEM; 2071 2072 res = intel_th_device_get_resource(thdev, IORESOURCE_IRQ, 1); 2073 if (!res) 2074 msc->do_irq = 1; 2075 2076 msc->index = thdev->id; 2077 2078 msc->thdev = thdev; 2079 msc->reg_base = base + msc->index * 0x100; 2080 msc->msu_base = base; 2081 2082 INIT_WORK(&msc->work, msc_work); 2083 err = intel_th_msc_init(msc); 2084 if (err) 2085 return err; 2086 2087 dev_set_drvdata(dev, msc); 2088 2089 return 0; 2090 } 2091 2092 static void intel_th_msc_remove(struct intel_th_device *thdev) 2093 { 2094 struct msc *msc = dev_get_drvdata(&thdev->dev); 2095 int ret; 2096 2097 intel_th_msc_deactivate(thdev); 2098 2099 /* 2100 * Buffers should not be used at this point except if the 2101 * output character device is still open and the parent 2102 * device gets detached from its bus, which is a FIXME. 2103 */ 2104 ret = msc_buffer_free_unless_used(msc); 2105 WARN_ON_ONCE(ret); 2106 } 2107 2108 static struct intel_th_driver intel_th_msc_driver = { 2109 .probe = intel_th_msc_probe, 2110 .remove = intel_th_msc_remove, 2111 .irq = intel_th_msc_interrupt, 2112 .wait_empty = intel_th_msc_wait_empty, 2113 .activate = intel_th_msc_activate, 2114 .deactivate = intel_th_msc_deactivate, 2115 .fops = &intel_th_msc_fops, 2116 .attr_group = &msc_output_group, 2117 .driver = { 2118 .name = "msc", 2119 .owner = THIS_MODULE, 2120 }, 2121 }; 2122 2123 module_driver(intel_th_msc_driver, 2124 intel_th_driver_register, 2125 intel_th_driver_unregister); 2126 2127 MODULE_LICENSE("GPL v2"); 2128 MODULE_DESCRIPTION("Intel(R) Trace Hub Memory Storage Unit driver"); 2129 MODULE_AUTHOR("Alexander Shishkin <alexander.shishkin@linux.intel.com>"); 2130