1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Intel(R) Trace Hub Memory Storage Unit 4 * 5 * Copyright (C) 2014-2015 Intel Corporation. 6 */ 7 8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 10 #include <linux/types.h> 11 #include <linux/module.h> 12 #include <linux/device.h> 13 #include <linux/uaccess.h> 14 #include <linux/sizes.h> 15 #include <linux/printk.h> 16 #include <linux/slab.h> 17 #include <linux/mm.h> 18 #include <linux/fs.h> 19 #include <linux/io.h> 20 #include <linux/dma-mapping.h> 21 22 #ifdef CONFIG_X86 23 #include <asm/set_memory.h> 24 #endif 25 26 #include "intel_th.h" 27 #include "msu.h" 28 29 #define msc_dev(x) (&(x)->thdev->dev) 30 31 /** 32 * struct msc_window - multiblock mode window descriptor 33 * @entry: window list linkage (msc::win_list) 34 * @pgoff: page offset into the buffer that this window starts at 35 * @nr_blocks: number of blocks (pages) in this window 36 * @sgt: array of block descriptors 37 */ 38 struct msc_window { 39 struct list_head entry; 40 unsigned long pgoff; 41 unsigned int nr_blocks; 42 struct msc *msc; 43 struct sg_table sgt; 44 }; 45 46 /** 47 * struct msc_iter - iterator for msc buffer 48 * @entry: msc::iter_list linkage 49 * @msc: pointer to the MSC device 50 * @start_win: oldest window 51 * @win: current window 52 * @offset: current logical offset into the buffer 53 * @start_block: oldest block in the window 54 * @block: block number in the window 55 * @block_off: offset into current block 56 * @wrap_count: block wrapping handling 57 * @eof: end of buffer reached 58 */ 59 struct msc_iter { 60 struct list_head entry; 61 struct msc *msc; 62 struct msc_window *start_win; 63 struct msc_window *win; 64 unsigned long offset; 65 int start_block; 66 int block; 67 unsigned int block_off; 68 unsigned int wrap_count; 69 unsigned int eof; 70 }; 71 72 /** 73 * struct msc - MSC device representation 74 * @reg_base: register window base address 75 * @thdev: intel_th_device pointer 76 * @win_list: list of windows in multiblock mode 77 * @single_sgt: single mode buffer 78 * @cur_win: current window 79 * @nr_pages: total number of pages allocated for this buffer 80 * @single_sz: amount of data in single mode 81 * @single_wrap: single mode wrap occurred 82 * @base: buffer's base pointer 83 * @base_addr: buffer's base address 84 * @user_count: number of users of the buffer 85 * @mmap_count: number of mappings 86 * @buf_mutex: mutex to serialize access to buffer-related bits 87 88 * @enabled: MSC is enabled 89 * @wrap: wrapping is enabled 90 * @mode: MSC operating mode 91 * @burst_len: write burst length 92 * @index: number of this MSC in the MSU 93 */ 94 struct msc { 95 void __iomem *reg_base; 96 void __iomem *msu_base; 97 struct intel_th_device *thdev; 98 99 struct list_head win_list; 100 struct sg_table single_sgt; 101 struct msc_window *cur_win; 102 unsigned long nr_pages; 103 unsigned long single_sz; 104 unsigned int single_wrap : 1; 105 void *base; 106 dma_addr_t base_addr; 107 108 /* <0: no buffer, 0: no users, >0: active users */ 109 atomic_t user_count; 110 111 atomic_t mmap_count; 112 struct mutex buf_mutex; 113 114 struct list_head iter_list; 115 116 /* config */ 117 unsigned int enabled : 1, 118 wrap : 1, 119 do_irq : 1; 120 unsigned int mode; 121 unsigned int burst_len; 122 unsigned int index; 123 }; 124 125 static inline bool msc_block_is_empty(struct msc_block_desc *bdesc) 126 { 127 /* header hasn't been written */ 128 if (!bdesc->valid_dw) 129 return true; 130 131 /* valid_dw includes the header */ 132 if (!msc_data_sz(bdesc)) 133 return true; 134 135 return false; 136 } 137 138 static inline struct msc_block_desc * 139 msc_win_block(struct msc_window *win, unsigned int block) 140 { 141 return sg_virt(&win->sgt.sgl[block]); 142 } 143 144 static inline dma_addr_t 145 msc_win_baddr(struct msc_window *win, unsigned int block) 146 { 147 return sg_dma_address(&win->sgt.sgl[block]); 148 } 149 150 static inline unsigned long 151 msc_win_bpfn(struct msc_window *win, unsigned int block) 152 { 153 return msc_win_baddr(win, block) >> PAGE_SHIFT; 154 } 155 156 /** 157 * msc_is_last_win() - check if a window is the last one for a given MSC 158 * @win: window 159 * Return: true if @win is the last window in MSC's multiblock buffer 160 */ 161 static inline bool msc_is_last_win(struct msc_window *win) 162 { 163 return win->entry.next == &win->msc->win_list; 164 } 165 166 /** 167 * msc_next_window() - return next window in the multiblock buffer 168 * @win: current window 169 * 170 * Return: window following the current one 171 */ 172 static struct msc_window *msc_next_window(struct msc_window *win) 173 { 174 if (msc_is_last_win(win)) 175 return list_first_entry(&win->msc->win_list, struct msc_window, 176 entry); 177 178 return list_next_entry(win, entry); 179 } 180 181 /** 182 * msc_oldest_window() - locate the window with oldest data 183 * @msc: MSC device 184 * 185 * This should only be used in multiblock mode. Caller should hold the 186 * msc::user_count reference. 187 * 188 * Return: the oldest window with valid data 189 */ 190 static struct msc_window *msc_oldest_window(struct msc *msc) 191 { 192 struct msc_window *win, *next = msc_next_window(msc->cur_win); 193 unsigned int found = 0; 194 195 if (list_empty(&msc->win_list)) 196 return NULL; 197 198 /* 199 * we might need a radix tree for this, depending on how 200 * many windows a typical user would allocate; ideally it's 201 * something like 2, in which case we're good 202 */ 203 list_for_each_entry(win, &msc->win_list, entry) { 204 if (win == next) 205 found++; 206 207 /* skip the empty ones */ 208 if (msc_block_is_empty(msc_win_block(win, 0))) 209 continue; 210 211 if (found) 212 return win; 213 } 214 215 return list_first_entry(&msc->win_list, struct msc_window, entry); 216 } 217 218 /** 219 * msc_win_oldest_block() - locate the oldest block in a given window 220 * @win: window to look at 221 * 222 * Return: index of the block with the oldest data 223 */ 224 static unsigned int msc_win_oldest_block(struct msc_window *win) 225 { 226 unsigned int blk; 227 struct msc_block_desc *bdesc = msc_win_block(win, 0); 228 229 /* without wrapping, first block is the oldest */ 230 if (!msc_block_wrapped(bdesc)) 231 return 0; 232 233 /* 234 * with wrapping, last written block contains both the newest and the 235 * oldest data for this window. 236 */ 237 for (blk = 0; blk < win->nr_blocks; blk++) { 238 bdesc = msc_win_block(win, blk); 239 240 if (msc_block_last_written(bdesc)) 241 return blk; 242 } 243 244 return 0; 245 } 246 247 static struct msc_block_desc *msc_iter_bdesc(struct msc_iter *iter) 248 { 249 return msc_win_block(iter->win, iter->block); 250 } 251 252 static void msc_iter_init(struct msc_iter *iter) 253 { 254 memset(iter, 0, sizeof(*iter)); 255 iter->start_block = -1; 256 iter->block = -1; 257 } 258 259 static struct msc_iter *msc_iter_install(struct msc *msc) 260 { 261 struct msc_iter *iter; 262 263 iter = kzalloc(sizeof(*iter), GFP_KERNEL); 264 if (!iter) 265 return ERR_PTR(-ENOMEM); 266 267 mutex_lock(&msc->buf_mutex); 268 269 /* 270 * Reading and tracing are mutually exclusive; if msc is 271 * enabled, open() will fail; otherwise existing readers 272 * will prevent enabling the msc and the rest of fops don't 273 * need to worry about it. 274 */ 275 if (msc->enabled) { 276 kfree(iter); 277 iter = ERR_PTR(-EBUSY); 278 goto unlock; 279 } 280 281 msc_iter_init(iter); 282 iter->msc = msc; 283 284 list_add_tail(&iter->entry, &msc->iter_list); 285 unlock: 286 mutex_unlock(&msc->buf_mutex); 287 288 return iter; 289 } 290 291 static void msc_iter_remove(struct msc_iter *iter, struct msc *msc) 292 { 293 mutex_lock(&msc->buf_mutex); 294 list_del(&iter->entry); 295 mutex_unlock(&msc->buf_mutex); 296 297 kfree(iter); 298 } 299 300 static void msc_iter_block_start(struct msc_iter *iter) 301 { 302 if (iter->start_block != -1) 303 return; 304 305 iter->start_block = msc_win_oldest_block(iter->win); 306 iter->block = iter->start_block; 307 iter->wrap_count = 0; 308 309 /* 310 * start with the block with oldest data; if data has wrapped 311 * in this window, it should be in this block 312 */ 313 if (msc_block_wrapped(msc_iter_bdesc(iter))) 314 iter->wrap_count = 2; 315 316 } 317 318 static int msc_iter_win_start(struct msc_iter *iter, struct msc *msc) 319 { 320 /* already started, nothing to do */ 321 if (iter->start_win) 322 return 0; 323 324 iter->start_win = msc_oldest_window(msc); 325 if (!iter->start_win) 326 return -EINVAL; 327 328 iter->win = iter->start_win; 329 iter->start_block = -1; 330 331 msc_iter_block_start(iter); 332 333 return 0; 334 } 335 336 static int msc_iter_win_advance(struct msc_iter *iter) 337 { 338 iter->win = msc_next_window(iter->win); 339 iter->start_block = -1; 340 341 if (iter->win == iter->start_win) { 342 iter->eof++; 343 return 1; 344 } 345 346 msc_iter_block_start(iter); 347 348 return 0; 349 } 350 351 static int msc_iter_block_advance(struct msc_iter *iter) 352 { 353 iter->block_off = 0; 354 355 /* wrapping */ 356 if (iter->wrap_count && iter->block == iter->start_block) { 357 iter->wrap_count--; 358 if (!iter->wrap_count) 359 /* copied newest data from the wrapped block */ 360 return msc_iter_win_advance(iter); 361 } 362 363 /* no wrapping, check for last written block */ 364 if (!iter->wrap_count && msc_block_last_written(msc_iter_bdesc(iter))) 365 /* copied newest data for the window */ 366 return msc_iter_win_advance(iter); 367 368 /* block advance */ 369 if (++iter->block == iter->win->nr_blocks) 370 iter->block = 0; 371 372 /* no wrapping, sanity check in case there is no last written block */ 373 if (!iter->wrap_count && iter->block == iter->start_block) 374 return msc_iter_win_advance(iter); 375 376 return 0; 377 } 378 379 /** 380 * msc_buffer_iterate() - go through multiblock buffer's data 381 * @iter: iterator structure 382 * @size: amount of data to scan 383 * @data: callback's private data 384 * @fn: iterator callback 385 * 386 * This will start at the window which will be written to next (containing 387 * the oldest data) and work its way to the current window, calling @fn 388 * for each chunk of data as it goes. 389 * 390 * Caller should have msc::user_count reference to make sure the buffer 391 * doesn't disappear from under us. 392 * 393 * Return: amount of data actually scanned. 394 */ 395 static ssize_t 396 msc_buffer_iterate(struct msc_iter *iter, size_t size, void *data, 397 unsigned long (*fn)(void *, void *, size_t)) 398 { 399 struct msc *msc = iter->msc; 400 size_t len = size; 401 unsigned int advance; 402 403 if (iter->eof) 404 return 0; 405 406 /* start with the oldest window */ 407 if (msc_iter_win_start(iter, msc)) 408 return 0; 409 410 do { 411 unsigned long data_bytes = msc_data_sz(msc_iter_bdesc(iter)); 412 void *src = (void *)msc_iter_bdesc(iter) + MSC_BDESC; 413 size_t tocopy = data_bytes, copied = 0; 414 size_t remaining = 0; 415 416 advance = 1; 417 418 /* 419 * If block wrapping happened, we need to visit the last block 420 * twice, because it contains both the oldest and the newest 421 * data in this window. 422 * 423 * First time (wrap_count==2), in the very beginning, to collect 424 * the oldest data, which is in the range 425 * (data_bytes..DATA_IN_PAGE). 426 * 427 * Second time (wrap_count==1), it's just like any other block, 428 * containing data in the range of [MSC_BDESC..data_bytes]. 429 */ 430 if (iter->block == iter->start_block && iter->wrap_count == 2) { 431 tocopy = DATA_IN_PAGE - data_bytes; 432 src += data_bytes; 433 } 434 435 if (!tocopy) 436 goto next_block; 437 438 tocopy -= iter->block_off; 439 src += iter->block_off; 440 441 if (len < tocopy) { 442 tocopy = len; 443 advance = 0; 444 } 445 446 remaining = fn(data, src, tocopy); 447 448 if (remaining) 449 advance = 0; 450 451 copied = tocopy - remaining; 452 len -= copied; 453 iter->block_off += copied; 454 iter->offset += copied; 455 456 if (!advance) 457 break; 458 459 next_block: 460 if (msc_iter_block_advance(iter)) 461 break; 462 463 } while (len); 464 465 return size - len; 466 } 467 468 /** 469 * msc_buffer_clear_hw_header() - clear hw header for multiblock 470 * @msc: MSC device 471 */ 472 static void msc_buffer_clear_hw_header(struct msc *msc) 473 { 474 struct msc_window *win; 475 476 list_for_each_entry(win, &msc->win_list, entry) { 477 unsigned int blk; 478 size_t hw_sz = sizeof(struct msc_block_desc) - 479 offsetof(struct msc_block_desc, hw_tag); 480 481 for (blk = 0; blk < win->nr_blocks; blk++) { 482 struct msc_block_desc *bdesc = msc_win_block(win, blk); 483 484 memset(&bdesc->hw_tag, 0, hw_sz); 485 } 486 } 487 } 488 489 static int intel_th_msu_init(struct msc *msc) 490 { 491 u32 mintctl, msusts; 492 493 if (!msc->do_irq) 494 return 0; 495 496 mintctl = ioread32(msc->msu_base + REG_MSU_MINTCTL); 497 mintctl |= msc->index ? M1BLIE : M0BLIE; 498 iowrite32(mintctl, msc->msu_base + REG_MSU_MINTCTL); 499 if (mintctl != ioread32(msc->msu_base + REG_MSU_MINTCTL)) { 500 dev_info(msc_dev(msc), "MINTCTL ignores writes: no usable interrupts\n"); 501 msc->do_irq = 0; 502 return 0; 503 } 504 505 msusts = ioread32(msc->msu_base + REG_MSU_MSUSTS); 506 iowrite32(msusts, msc->msu_base + REG_MSU_MSUSTS); 507 508 return 0; 509 } 510 511 static void intel_th_msu_deinit(struct msc *msc) 512 { 513 u32 mintctl; 514 515 if (!msc->do_irq) 516 return; 517 518 mintctl = ioread32(msc->msu_base + REG_MSU_MINTCTL); 519 mintctl &= msc->index ? ~M1BLIE : ~M0BLIE; 520 iowrite32(mintctl, msc->msu_base + REG_MSU_MINTCTL); 521 } 522 523 /** 524 * msc_configure() - set up MSC hardware 525 * @msc: the MSC device to configure 526 * 527 * Program storage mode, wrapping, burst length and trace buffer address 528 * into a given MSC. Then, enable tracing and set msc::enabled. 529 * The latter is serialized on msc::buf_mutex, so make sure to hold it. 530 */ 531 static int msc_configure(struct msc *msc) 532 { 533 u32 reg; 534 535 lockdep_assert_held(&msc->buf_mutex); 536 537 if (msc->mode > MSC_MODE_MULTI) 538 return -ENOTSUPP; 539 540 if (msc->mode == MSC_MODE_MULTI) 541 msc_buffer_clear_hw_header(msc); 542 543 reg = msc->base_addr >> PAGE_SHIFT; 544 iowrite32(reg, msc->reg_base + REG_MSU_MSC0BAR); 545 546 if (msc->mode == MSC_MODE_SINGLE) { 547 reg = msc->nr_pages; 548 iowrite32(reg, msc->reg_base + REG_MSU_MSC0SIZE); 549 } 550 551 reg = ioread32(msc->reg_base + REG_MSU_MSC0CTL); 552 reg &= ~(MSC_MODE | MSC_WRAPEN | MSC_EN | MSC_RD_HDR_OVRD); 553 554 reg |= MSC_EN; 555 reg |= msc->mode << __ffs(MSC_MODE); 556 reg |= msc->burst_len << __ffs(MSC_LEN); 557 558 if (msc->wrap) 559 reg |= MSC_WRAPEN; 560 561 iowrite32(reg, msc->reg_base + REG_MSU_MSC0CTL); 562 563 msc->thdev->output.multiblock = msc->mode == MSC_MODE_MULTI; 564 intel_th_trace_enable(msc->thdev); 565 msc->enabled = 1; 566 567 568 return 0; 569 } 570 571 /** 572 * msc_disable() - disable MSC hardware 573 * @msc: MSC device to disable 574 * 575 * If @msc is enabled, disable tracing on the switch and then disable MSC 576 * storage. Caller must hold msc::buf_mutex. 577 */ 578 static void msc_disable(struct msc *msc) 579 { 580 u32 reg; 581 582 lockdep_assert_held(&msc->buf_mutex); 583 584 intel_th_trace_disable(msc->thdev); 585 586 if (msc->mode == MSC_MODE_SINGLE) { 587 reg = ioread32(msc->reg_base + REG_MSU_MSC0STS); 588 msc->single_wrap = !!(reg & MSCSTS_WRAPSTAT); 589 590 reg = ioread32(msc->reg_base + REG_MSU_MSC0MWP); 591 msc->single_sz = reg & ((msc->nr_pages << PAGE_SHIFT) - 1); 592 dev_dbg(msc_dev(msc), "MSCnMWP: %08x/%08lx, wrap: %d\n", 593 reg, msc->single_sz, msc->single_wrap); 594 } 595 596 reg = ioread32(msc->reg_base + REG_MSU_MSC0CTL); 597 reg &= ~MSC_EN; 598 iowrite32(reg, msc->reg_base + REG_MSU_MSC0CTL); 599 msc->enabled = 0; 600 601 iowrite32(0, msc->reg_base + REG_MSU_MSC0BAR); 602 iowrite32(0, msc->reg_base + REG_MSU_MSC0SIZE); 603 604 dev_dbg(msc_dev(msc), "MSCnNWSA: %08x\n", 605 ioread32(msc->reg_base + REG_MSU_MSC0NWSA)); 606 607 reg = ioread32(msc->reg_base + REG_MSU_MSC0STS); 608 dev_dbg(msc_dev(msc), "MSCnSTS: %08x\n", reg); 609 } 610 611 static int intel_th_msc_activate(struct intel_th_device *thdev) 612 { 613 struct msc *msc = dev_get_drvdata(&thdev->dev); 614 int ret = -EBUSY; 615 616 if (!atomic_inc_unless_negative(&msc->user_count)) 617 return -ENODEV; 618 619 mutex_lock(&msc->buf_mutex); 620 621 /* if there are readers, refuse */ 622 if (list_empty(&msc->iter_list)) 623 ret = msc_configure(msc); 624 625 mutex_unlock(&msc->buf_mutex); 626 627 if (ret) 628 atomic_dec(&msc->user_count); 629 630 return ret; 631 } 632 633 static void intel_th_msc_deactivate(struct intel_th_device *thdev) 634 { 635 struct msc *msc = dev_get_drvdata(&thdev->dev); 636 637 mutex_lock(&msc->buf_mutex); 638 if (msc->enabled) { 639 msc_disable(msc); 640 atomic_dec(&msc->user_count); 641 } 642 mutex_unlock(&msc->buf_mutex); 643 } 644 645 /** 646 * msc_buffer_contig_alloc() - allocate a contiguous buffer for SINGLE mode 647 * @msc: MSC device 648 * @size: allocation size in bytes 649 * 650 * This modifies msc::base, which requires msc::buf_mutex to serialize, so the 651 * caller is expected to hold it. 652 * 653 * Return: 0 on success, -errno otherwise. 654 */ 655 static int msc_buffer_contig_alloc(struct msc *msc, unsigned long size) 656 { 657 unsigned long nr_pages = size >> PAGE_SHIFT; 658 unsigned int order = get_order(size); 659 struct page *page; 660 int ret; 661 662 if (!size) 663 return 0; 664 665 ret = sg_alloc_table(&msc->single_sgt, 1, GFP_KERNEL); 666 if (ret) 667 goto err_out; 668 669 ret = -ENOMEM; 670 page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order); 671 if (!page) 672 goto err_free_sgt; 673 674 split_page(page, order); 675 sg_set_buf(msc->single_sgt.sgl, page_address(page), size); 676 677 ret = dma_map_sg(msc_dev(msc)->parent->parent, msc->single_sgt.sgl, 1, 678 DMA_FROM_DEVICE); 679 if (ret < 0) 680 goto err_free_pages; 681 682 msc->nr_pages = nr_pages; 683 msc->base = page_address(page); 684 msc->base_addr = sg_dma_address(msc->single_sgt.sgl); 685 686 return 0; 687 688 err_free_pages: 689 __free_pages(page, order); 690 691 err_free_sgt: 692 sg_free_table(&msc->single_sgt); 693 694 err_out: 695 return ret; 696 } 697 698 /** 699 * msc_buffer_contig_free() - free a contiguous buffer 700 * @msc: MSC configured in SINGLE mode 701 */ 702 static void msc_buffer_contig_free(struct msc *msc) 703 { 704 unsigned long off; 705 706 dma_unmap_sg(msc_dev(msc)->parent->parent, msc->single_sgt.sgl, 707 1, DMA_FROM_DEVICE); 708 sg_free_table(&msc->single_sgt); 709 710 for (off = 0; off < msc->nr_pages << PAGE_SHIFT; off += PAGE_SIZE) { 711 struct page *page = virt_to_page(msc->base + off); 712 713 page->mapping = NULL; 714 __free_page(page); 715 } 716 717 msc->nr_pages = 0; 718 } 719 720 /** 721 * msc_buffer_contig_get_page() - find a page at a given offset 722 * @msc: MSC configured in SINGLE mode 723 * @pgoff: page offset 724 * 725 * Return: page, if @pgoff is within the range, NULL otherwise. 726 */ 727 static struct page *msc_buffer_contig_get_page(struct msc *msc, 728 unsigned long pgoff) 729 { 730 if (pgoff >= msc->nr_pages) 731 return NULL; 732 733 return virt_to_page(msc->base + (pgoff << PAGE_SHIFT)); 734 } 735 736 static int __msc_buffer_win_alloc(struct msc_window *win, 737 unsigned int nr_blocks) 738 { 739 struct scatterlist *sg_ptr; 740 void *block; 741 int i, ret; 742 743 ret = sg_alloc_table(&win->sgt, nr_blocks, GFP_KERNEL); 744 if (ret) 745 return -ENOMEM; 746 747 for_each_sg(win->sgt.sgl, sg_ptr, nr_blocks, i) { 748 block = dma_alloc_coherent(msc_dev(win->msc)->parent->parent, 749 PAGE_SIZE, &sg_dma_address(sg_ptr), 750 GFP_KERNEL); 751 if (!block) 752 goto err_nomem; 753 754 sg_set_buf(sg_ptr, block, PAGE_SIZE); 755 } 756 757 return nr_blocks; 758 759 err_nomem: 760 for (i--; i >= 0; i--) 761 dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE, 762 msc_win_block(win, i), 763 msc_win_baddr(win, i)); 764 765 sg_free_table(&win->sgt); 766 767 return -ENOMEM; 768 } 769 770 /** 771 * msc_buffer_win_alloc() - alloc a window for a multiblock mode 772 * @msc: MSC device 773 * @nr_blocks: number of pages in this window 774 * 775 * This modifies msc::win_list and msc::base, which requires msc::buf_mutex 776 * to serialize, so the caller is expected to hold it. 777 * 778 * Return: 0 on success, -errno otherwise. 779 */ 780 static int msc_buffer_win_alloc(struct msc *msc, unsigned int nr_blocks) 781 { 782 struct msc_window *win; 783 int ret = -ENOMEM, i; 784 785 if (!nr_blocks) 786 return 0; 787 788 /* 789 * This limitation hold as long as we need random access to the 790 * block. When that changes, this can go away. 791 */ 792 if (nr_blocks > SG_MAX_SINGLE_ALLOC) 793 return -EINVAL; 794 795 win = kzalloc(sizeof(*win), GFP_KERNEL); 796 if (!win) 797 return -ENOMEM; 798 799 win->msc = msc; 800 801 if (!list_empty(&msc->win_list)) { 802 struct msc_window *prev = list_last_entry(&msc->win_list, 803 struct msc_window, 804 entry); 805 806 /* This works as long as blocks are page-sized */ 807 win->pgoff = prev->pgoff + prev->nr_blocks; 808 } 809 810 ret = __msc_buffer_win_alloc(win, nr_blocks); 811 if (ret < 0) 812 goto err_nomem; 813 814 #ifdef CONFIG_X86 815 for (i = 0; i < ret; i++) 816 /* Set the page as uncached */ 817 set_memory_uc((unsigned long)msc_win_block(win, i), 1); 818 #endif 819 820 win->nr_blocks = ret; 821 822 if (list_empty(&msc->win_list)) { 823 msc->base = msc_win_block(win, 0); 824 msc->base_addr = msc_win_baddr(win, 0); 825 msc->cur_win = win; 826 } 827 828 list_add_tail(&win->entry, &msc->win_list); 829 msc->nr_pages += nr_blocks; 830 831 return 0; 832 833 err_nomem: 834 kfree(win); 835 836 return ret; 837 } 838 839 static void __msc_buffer_win_free(struct msc *msc, struct msc_window *win) 840 { 841 int i; 842 843 for (i = 0; i < win->nr_blocks; i++) { 844 struct page *page = sg_page(&win->sgt.sgl[i]); 845 846 page->mapping = NULL; 847 dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE, 848 msc_win_block(win, i), msc_win_baddr(win, i)); 849 } 850 sg_free_table(&win->sgt); 851 } 852 853 /** 854 * msc_buffer_win_free() - free a window from MSC's window list 855 * @msc: MSC device 856 * @win: window to free 857 * 858 * This modifies msc::win_list and msc::base, which requires msc::buf_mutex 859 * to serialize, so the caller is expected to hold it. 860 */ 861 static void msc_buffer_win_free(struct msc *msc, struct msc_window *win) 862 { 863 int i; 864 865 msc->nr_pages -= win->nr_blocks; 866 867 list_del(&win->entry); 868 if (list_empty(&msc->win_list)) { 869 msc->base = NULL; 870 msc->base_addr = 0; 871 } 872 873 #ifdef CONFIG_X86 874 for (i = 0; i < win->nr_blocks; i++) 875 /* Reset the page to write-back */ 876 set_memory_wb((unsigned long)msc_win_block(win, i), 1); 877 #endif 878 879 __msc_buffer_win_free(msc, win); 880 881 kfree(win); 882 } 883 884 /** 885 * msc_buffer_relink() - set up block descriptors for multiblock mode 886 * @msc: MSC device 887 * 888 * This traverses msc::win_list, which requires msc::buf_mutex to serialize, 889 * so the caller is expected to hold it. 890 */ 891 static void msc_buffer_relink(struct msc *msc) 892 { 893 struct msc_window *win, *next_win; 894 895 /* call with msc::mutex locked */ 896 list_for_each_entry(win, &msc->win_list, entry) { 897 unsigned int blk; 898 u32 sw_tag = 0; 899 900 /* 901 * Last window's next_win should point to the first window 902 * and MSC_SW_TAG_LASTWIN should be set. 903 */ 904 if (msc_is_last_win(win)) { 905 sw_tag |= MSC_SW_TAG_LASTWIN; 906 next_win = list_first_entry(&msc->win_list, 907 struct msc_window, entry); 908 } else { 909 next_win = list_next_entry(win, entry); 910 } 911 912 for (blk = 0; blk < win->nr_blocks; blk++) { 913 struct msc_block_desc *bdesc = msc_win_block(win, blk); 914 915 memset(bdesc, 0, sizeof(*bdesc)); 916 917 bdesc->next_win = msc_win_bpfn(next_win, 0); 918 919 /* 920 * Similarly to last window, last block should point 921 * to the first one. 922 */ 923 if (blk == win->nr_blocks - 1) { 924 sw_tag |= MSC_SW_TAG_LASTBLK; 925 bdesc->next_blk = msc_win_bpfn(win, 0); 926 } else { 927 bdesc->next_blk = msc_win_bpfn(win, blk + 1); 928 } 929 930 bdesc->sw_tag = sw_tag; 931 bdesc->block_sz = PAGE_SIZE / 64; 932 } 933 } 934 935 /* 936 * Make the above writes globally visible before tracing is 937 * enabled to make sure hardware sees them coherently. 938 */ 939 wmb(); 940 } 941 942 static void msc_buffer_multi_free(struct msc *msc) 943 { 944 struct msc_window *win, *iter; 945 946 list_for_each_entry_safe(win, iter, &msc->win_list, entry) 947 msc_buffer_win_free(msc, win); 948 } 949 950 static int msc_buffer_multi_alloc(struct msc *msc, unsigned long *nr_pages, 951 unsigned int nr_wins) 952 { 953 int ret, i; 954 955 for (i = 0; i < nr_wins; i++) { 956 ret = msc_buffer_win_alloc(msc, nr_pages[i]); 957 if (ret) { 958 msc_buffer_multi_free(msc); 959 return ret; 960 } 961 } 962 963 msc_buffer_relink(msc); 964 965 return 0; 966 } 967 968 /** 969 * msc_buffer_free() - free buffers for MSC 970 * @msc: MSC device 971 * 972 * Free MSC's storage buffers. 973 * 974 * This modifies msc::win_list and msc::base, which requires msc::buf_mutex to 975 * serialize, so the caller is expected to hold it. 976 */ 977 static void msc_buffer_free(struct msc *msc) 978 { 979 if (msc->mode == MSC_MODE_SINGLE) 980 msc_buffer_contig_free(msc); 981 else if (msc->mode == MSC_MODE_MULTI) 982 msc_buffer_multi_free(msc); 983 } 984 985 /** 986 * msc_buffer_alloc() - allocate a buffer for MSC 987 * @msc: MSC device 988 * @size: allocation size in bytes 989 * 990 * Allocate a storage buffer for MSC, depending on the msc::mode, it will be 991 * either done via msc_buffer_contig_alloc() for SINGLE operation mode or 992 * msc_buffer_win_alloc() for multiblock operation. The latter allocates one 993 * window per invocation, so in multiblock mode this can be called multiple 994 * times for the same MSC to allocate multiple windows. 995 * 996 * This modifies msc::win_list and msc::base, which requires msc::buf_mutex 997 * to serialize, so the caller is expected to hold it. 998 * 999 * Return: 0 on success, -errno otherwise. 1000 */ 1001 static int msc_buffer_alloc(struct msc *msc, unsigned long *nr_pages, 1002 unsigned int nr_wins) 1003 { 1004 int ret; 1005 1006 /* -1: buffer not allocated */ 1007 if (atomic_read(&msc->user_count) != -1) 1008 return -EBUSY; 1009 1010 if (msc->mode == MSC_MODE_SINGLE) { 1011 if (nr_wins != 1) 1012 return -EINVAL; 1013 1014 ret = msc_buffer_contig_alloc(msc, nr_pages[0] << PAGE_SHIFT); 1015 } else if (msc->mode == MSC_MODE_MULTI) { 1016 ret = msc_buffer_multi_alloc(msc, nr_pages, nr_wins); 1017 } else { 1018 ret = -ENOTSUPP; 1019 } 1020 1021 if (!ret) { 1022 /* allocation should be visible before the counter goes to 0 */ 1023 smp_mb__before_atomic(); 1024 1025 if (WARN_ON_ONCE(atomic_cmpxchg(&msc->user_count, -1, 0) != -1)) 1026 return -EINVAL; 1027 } 1028 1029 return ret; 1030 } 1031 1032 /** 1033 * msc_buffer_unlocked_free_unless_used() - free a buffer unless it's in use 1034 * @msc: MSC device 1035 * 1036 * This will free MSC buffer unless it is in use or there is no allocated 1037 * buffer. 1038 * Caller needs to hold msc::buf_mutex. 1039 * 1040 * Return: 0 on successful deallocation or if there was no buffer to 1041 * deallocate, -EBUSY if there are active users. 1042 */ 1043 static int msc_buffer_unlocked_free_unless_used(struct msc *msc) 1044 { 1045 int count, ret = 0; 1046 1047 count = atomic_cmpxchg(&msc->user_count, 0, -1); 1048 1049 /* > 0: buffer is allocated and has users */ 1050 if (count > 0) 1051 ret = -EBUSY; 1052 /* 0: buffer is allocated, no users */ 1053 else if (!count) 1054 msc_buffer_free(msc); 1055 /* < 0: no buffer, nothing to do */ 1056 1057 return ret; 1058 } 1059 1060 /** 1061 * msc_buffer_free_unless_used() - free a buffer unless it's in use 1062 * @msc: MSC device 1063 * 1064 * This is a locked version of msc_buffer_unlocked_free_unless_used(). 1065 */ 1066 static int msc_buffer_free_unless_used(struct msc *msc) 1067 { 1068 int ret; 1069 1070 mutex_lock(&msc->buf_mutex); 1071 ret = msc_buffer_unlocked_free_unless_used(msc); 1072 mutex_unlock(&msc->buf_mutex); 1073 1074 return ret; 1075 } 1076 1077 /** 1078 * msc_buffer_get_page() - get MSC buffer page at a given offset 1079 * @msc: MSC device 1080 * @pgoff: page offset into the storage buffer 1081 * 1082 * This traverses msc::win_list, so holding msc::buf_mutex is expected from 1083 * the caller. 1084 * 1085 * Return: page if @pgoff corresponds to a valid buffer page or NULL. 1086 */ 1087 static struct page *msc_buffer_get_page(struct msc *msc, unsigned long pgoff) 1088 { 1089 struct msc_window *win; 1090 1091 if (msc->mode == MSC_MODE_SINGLE) 1092 return msc_buffer_contig_get_page(msc, pgoff); 1093 1094 list_for_each_entry(win, &msc->win_list, entry) 1095 if (pgoff >= win->pgoff && pgoff < win->pgoff + win->nr_blocks) 1096 goto found; 1097 1098 return NULL; 1099 1100 found: 1101 pgoff -= win->pgoff; 1102 return sg_page(&win->sgt.sgl[pgoff]); 1103 } 1104 1105 /** 1106 * struct msc_win_to_user_struct - data for copy_to_user() callback 1107 * @buf: userspace buffer to copy data to 1108 * @offset: running offset 1109 */ 1110 struct msc_win_to_user_struct { 1111 char __user *buf; 1112 unsigned long offset; 1113 }; 1114 1115 /** 1116 * msc_win_to_user() - iterator for msc_buffer_iterate() to copy data to user 1117 * @data: callback's private data 1118 * @src: source buffer 1119 * @len: amount of data to copy from the source buffer 1120 */ 1121 static unsigned long msc_win_to_user(void *data, void *src, size_t len) 1122 { 1123 struct msc_win_to_user_struct *u = data; 1124 unsigned long ret; 1125 1126 ret = copy_to_user(u->buf + u->offset, src, len); 1127 u->offset += len - ret; 1128 1129 return ret; 1130 } 1131 1132 1133 /* 1134 * file operations' callbacks 1135 */ 1136 1137 static int intel_th_msc_open(struct inode *inode, struct file *file) 1138 { 1139 struct intel_th_device *thdev = file->private_data; 1140 struct msc *msc = dev_get_drvdata(&thdev->dev); 1141 struct msc_iter *iter; 1142 1143 if (!capable(CAP_SYS_RAWIO)) 1144 return -EPERM; 1145 1146 iter = msc_iter_install(msc); 1147 if (IS_ERR(iter)) 1148 return PTR_ERR(iter); 1149 1150 file->private_data = iter; 1151 1152 return nonseekable_open(inode, file); 1153 } 1154 1155 static int intel_th_msc_release(struct inode *inode, struct file *file) 1156 { 1157 struct msc_iter *iter = file->private_data; 1158 struct msc *msc = iter->msc; 1159 1160 msc_iter_remove(iter, msc); 1161 1162 return 0; 1163 } 1164 1165 static ssize_t 1166 msc_single_to_user(struct msc *msc, char __user *buf, loff_t off, size_t len) 1167 { 1168 unsigned long size = msc->nr_pages << PAGE_SHIFT, rem = len; 1169 unsigned long start = off, tocopy = 0; 1170 1171 if (msc->single_wrap) { 1172 start += msc->single_sz; 1173 if (start < size) { 1174 tocopy = min(rem, size - start); 1175 if (copy_to_user(buf, msc->base + start, tocopy)) 1176 return -EFAULT; 1177 1178 buf += tocopy; 1179 rem -= tocopy; 1180 start += tocopy; 1181 } 1182 1183 start &= size - 1; 1184 if (rem) { 1185 tocopy = min(rem, msc->single_sz - start); 1186 if (copy_to_user(buf, msc->base + start, tocopy)) 1187 return -EFAULT; 1188 1189 rem -= tocopy; 1190 } 1191 1192 return len - rem; 1193 } 1194 1195 if (copy_to_user(buf, msc->base + start, rem)) 1196 return -EFAULT; 1197 1198 return len; 1199 } 1200 1201 static ssize_t intel_th_msc_read(struct file *file, char __user *buf, 1202 size_t len, loff_t *ppos) 1203 { 1204 struct msc_iter *iter = file->private_data; 1205 struct msc *msc = iter->msc; 1206 size_t size; 1207 loff_t off = *ppos; 1208 ssize_t ret = 0; 1209 1210 if (!atomic_inc_unless_negative(&msc->user_count)) 1211 return 0; 1212 1213 if (msc->mode == MSC_MODE_SINGLE && !msc->single_wrap) 1214 size = msc->single_sz; 1215 else 1216 size = msc->nr_pages << PAGE_SHIFT; 1217 1218 if (!size) 1219 goto put_count; 1220 1221 if (off >= size) 1222 goto put_count; 1223 1224 if (off + len >= size) 1225 len = size - off; 1226 1227 if (msc->mode == MSC_MODE_SINGLE) { 1228 ret = msc_single_to_user(msc, buf, off, len); 1229 if (ret >= 0) 1230 *ppos += ret; 1231 } else if (msc->mode == MSC_MODE_MULTI) { 1232 struct msc_win_to_user_struct u = { 1233 .buf = buf, 1234 .offset = 0, 1235 }; 1236 1237 ret = msc_buffer_iterate(iter, len, &u, msc_win_to_user); 1238 if (ret >= 0) 1239 *ppos = iter->offset; 1240 } else { 1241 ret = -ENOTSUPP; 1242 } 1243 1244 put_count: 1245 atomic_dec(&msc->user_count); 1246 1247 return ret; 1248 } 1249 1250 /* 1251 * vm operations callbacks (vm_ops) 1252 */ 1253 1254 static void msc_mmap_open(struct vm_area_struct *vma) 1255 { 1256 struct msc_iter *iter = vma->vm_file->private_data; 1257 struct msc *msc = iter->msc; 1258 1259 atomic_inc(&msc->mmap_count); 1260 } 1261 1262 static void msc_mmap_close(struct vm_area_struct *vma) 1263 { 1264 struct msc_iter *iter = vma->vm_file->private_data; 1265 struct msc *msc = iter->msc; 1266 unsigned long pg; 1267 1268 if (!atomic_dec_and_mutex_lock(&msc->mmap_count, &msc->buf_mutex)) 1269 return; 1270 1271 /* drop page _refcounts */ 1272 for (pg = 0; pg < msc->nr_pages; pg++) { 1273 struct page *page = msc_buffer_get_page(msc, pg); 1274 1275 if (WARN_ON_ONCE(!page)) 1276 continue; 1277 1278 if (page->mapping) 1279 page->mapping = NULL; 1280 } 1281 1282 /* last mapping -- drop user_count */ 1283 atomic_dec(&msc->user_count); 1284 mutex_unlock(&msc->buf_mutex); 1285 } 1286 1287 static vm_fault_t msc_mmap_fault(struct vm_fault *vmf) 1288 { 1289 struct msc_iter *iter = vmf->vma->vm_file->private_data; 1290 struct msc *msc = iter->msc; 1291 1292 vmf->page = msc_buffer_get_page(msc, vmf->pgoff); 1293 if (!vmf->page) 1294 return VM_FAULT_SIGBUS; 1295 1296 get_page(vmf->page); 1297 vmf->page->mapping = vmf->vma->vm_file->f_mapping; 1298 vmf->page->index = vmf->pgoff; 1299 1300 return 0; 1301 } 1302 1303 static const struct vm_operations_struct msc_mmap_ops = { 1304 .open = msc_mmap_open, 1305 .close = msc_mmap_close, 1306 .fault = msc_mmap_fault, 1307 }; 1308 1309 static int intel_th_msc_mmap(struct file *file, struct vm_area_struct *vma) 1310 { 1311 unsigned long size = vma->vm_end - vma->vm_start; 1312 struct msc_iter *iter = vma->vm_file->private_data; 1313 struct msc *msc = iter->msc; 1314 int ret = -EINVAL; 1315 1316 if (!size || offset_in_page(size)) 1317 return -EINVAL; 1318 1319 if (vma->vm_pgoff) 1320 return -EINVAL; 1321 1322 /* grab user_count once per mmap; drop in msc_mmap_close() */ 1323 if (!atomic_inc_unless_negative(&msc->user_count)) 1324 return -EINVAL; 1325 1326 if (msc->mode != MSC_MODE_SINGLE && 1327 msc->mode != MSC_MODE_MULTI) 1328 goto out; 1329 1330 if (size >> PAGE_SHIFT != msc->nr_pages) 1331 goto out; 1332 1333 atomic_set(&msc->mmap_count, 1); 1334 ret = 0; 1335 1336 out: 1337 if (ret) 1338 atomic_dec(&msc->user_count); 1339 1340 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 1341 vma->vm_flags |= VM_DONTEXPAND | VM_DONTCOPY; 1342 vma->vm_ops = &msc_mmap_ops; 1343 return ret; 1344 } 1345 1346 static const struct file_operations intel_th_msc_fops = { 1347 .open = intel_th_msc_open, 1348 .release = intel_th_msc_release, 1349 .read = intel_th_msc_read, 1350 .mmap = intel_th_msc_mmap, 1351 .llseek = no_llseek, 1352 .owner = THIS_MODULE, 1353 }; 1354 1355 static void intel_th_msc_wait_empty(struct intel_th_device *thdev) 1356 { 1357 struct msc *msc = dev_get_drvdata(&thdev->dev); 1358 unsigned long count; 1359 u32 reg; 1360 1361 for (reg = 0, count = MSC_PLE_WAITLOOP_DEPTH; 1362 count && !(reg & MSCSTS_PLE); count--) { 1363 reg = __raw_readl(msc->reg_base + REG_MSU_MSC0STS); 1364 cpu_relax(); 1365 } 1366 1367 if (!count) 1368 dev_dbg(msc_dev(msc), "timeout waiting for MSC0 PLE\n"); 1369 } 1370 1371 static int intel_th_msc_init(struct msc *msc) 1372 { 1373 atomic_set(&msc->user_count, -1); 1374 1375 msc->mode = MSC_MODE_MULTI; 1376 mutex_init(&msc->buf_mutex); 1377 INIT_LIST_HEAD(&msc->win_list); 1378 INIT_LIST_HEAD(&msc->iter_list); 1379 1380 msc->burst_len = 1381 (ioread32(msc->reg_base + REG_MSU_MSC0CTL) & MSC_LEN) >> 1382 __ffs(MSC_LEN); 1383 1384 return 0; 1385 } 1386 1387 static void msc_win_switch(struct msc *msc) 1388 { 1389 struct msc_window *last, *first; 1390 1391 first = list_first_entry(&msc->win_list, struct msc_window, entry); 1392 last = list_last_entry(&msc->win_list, struct msc_window, entry); 1393 1394 if (msc_is_last_win(msc->cur_win)) 1395 msc->cur_win = first; 1396 else 1397 msc->cur_win = list_next_entry(msc->cur_win, entry); 1398 1399 msc->base = msc_win_block(msc->cur_win, 0); 1400 msc->base_addr = msc_win_baddr(msc->cur_win, 0); 1401 1402 intel_th_trace_switch(msc->thdev); 1403 } 1404 1405 static irqreturn_t intel_th_msc_interrupt(struct intel_th_device *thdev) 1406 { 1407 struct msc *msc = dev_get_drvdata(&thdev->dev); 1408 u32 msusts = ioread32(msc->msu_base + REG_MSU_MSUSTS); 1409 u32 mask = msc->index ? MSUSTS_MSC1BLAST : MSUSTS_MSC0BLAST; 1410 1411 if (!(msusts & mask)) { 1412 if (msc->enabled) 1413 return IRQ_HANDLED; 1414 return IRQ_NONE; 1415 } 1416 1417 return IRQ_HANDLED; 1418 } 1419 1420 static const char * const msc_mode[] = { 1421 [MSC_MODE_SINGLE] = "single", 1422 [MSC_MODE_MULTI] = "multi", 1423 [MSC_MODE_EXI] = "ExI", 1424 [MSC_MODE_DEBUG] = "debug", 1425 }; 1426 1427 static ssize_t 1428 wrap_show(struct device *dev, struct device_attribute *attr, char *buf) 1429 { 1430 struct msc *msc = dev_get_drvdata(dev); 1431 1432 return scnprintf(buf, PAGE_SIZE, "%d\n", msc->wrap); 1433 } 1434 1435 static ssize_t 1436 wrap_store(struct device *dev, struct device_attribute *attr, const char *buf, 1437 size_t size) 1438 { 1439 struct msc *msc = dev_get_drvdata(dev); 1440 unsigned long val; 1441 int ret; 1442 1443 ret = kstrtoul(buf, 10, &val); 1444 if (ret) 1445 return ret; 1446 1447 msc->wrap = !!val; 1448 1449 return size; 1450 } 1451 1452 static DEVICE_ATTR_RW(wrap); 1453 1454 static ssize_t 1455 mode_show(struct device *dev, struct device_attribute *attr, char *buf) 1456 { 1457 struct msc *msc = dev_get_drvdata(dev); 1458 1459 return scnprintf(buf, PAGE_SIZE, "%s\n", msc_mode[msc->mode]); 1460 } 1461 1462 static ssize_t 1463 mode_store(struct device *dev, struct device_attribute *attr, const char *buf, 1464 size_t size) 1465 { 1466 struct msc *msc = dev_get_drvdata(dev); 1467 size_t len = size; 1468 char *cp; 1469 int i, ret; 1470 1471 if (!capable(CAP_SYS_RAWIO)) 1472 return -EPERM; 1473 1474 cp = memchr(buf, '\n', len); 1475 if (cp) 1476 len = cp - buf; 1477 1478 for (i = 0; i < ARRAY_SIZE(msc_mode); i++) 1479 if (!strncmp(msc_mode[i], buf, len)) 1480 goto found; 1481 1482 return -EINVAL; 1483 1484 found: 1485 mutex_lock(&msc->buf_mutex); 1486 ret = msc_buffer_unlocked_free_unless_used(msc); 1487 if (!ret) 1488 msc->mode = i; 1489 mutex_unlock(&msc->buf_mutex); 1490 1491 return ret ? ret : size; 1492 } 1493 1494 static DEVICE_ATTR_RW(mode); 1495 1496 static ssize_t 1497 nr_pages_show(struct device *dev, struct device_attribute *attr, char *buf) 1498 { 1499 struct msc *msc = dev_get_drvdata(dev); 1500 struct msc_window *win; 1501 size_t count = 0; 1502 1503 mutex_lock(&msc->buf_mutex); 1504 1505 if (msc->mode == MSC_MODE_SINGLE) 1506 count = scnprintf(buf, PAGE_SIZE, "%ld\n", msc->nr_pages); 1507 else if (msc->mode == MSC_MODE_MULTI) { 1508 list_for_each_entry(win, &msc->win_list, entry) { 1509 count += scnprintf(buf + count, PAGE_SIZE - count, 1510 "%d%c", win->nr_blocks, 1511 msc_is_last_win(win) ? '\n' : ','); 1512 } 1513 } else { 1514 count = scnprintf(buf, PAGE_SIZE, "unsupported\n"); 1515 } 1516 1517 mutex_unlock(&msc->buf_mutex); 1518 1519 return count; 1520 } 1521 1522 static ssize_t 1523 nr_pages_store(struct device *dev, struct device_attribute *attr, 1524 const char *buf, size_t size) 1525 { 1526 struct msc *msc = dev_get_drvdata(dev); 1527 unsigned long val, *win = NULL, *rewin; 1528 size_t len = size; 1529 const char *p = buf; 1530 char *end, *s; 1531 int ret, nr_wins = 0; 1532 1533 if (!capable(CAP_SYS_RAWIO)) 1534 return -EPERM; 1535 1536 ret = msc_buffer_free_unless_used(msc); 1537 if (ret) 1538 return ret; 1539 1540 /* scan the comma-separated list of allocation sizes */ 1541 end = memchr(buf, '\n', len); 1542 if (end) 1543 len = end - buf; 1544 1545 do { 1546 end = memchr(p, ',', len); 1547 s = kstrndup(p, end ? end - p : len, GFP_KERNEL); 1548 if (!s) { 1549 ret = -ENOMEM; 1550 goto free_win; 1551 } 1552 1553 ret = kstrtoul(s, 10, &val); 1554 kfree(s); 1555 1556 if (ret || !val) 1557 goto free_win; 1558 1559 if (nr_wins && msc->mode == MSC_MODE_SINGLE) { 1560 ret = -EINVAL; 1561 goto free_win; 1562 } 1563 1564 nr_wins++; 1565 rewin = krealloc(win, sizeof(*win) * nr_wins, GFP_KERNEL); 1566 if (!rewin) { 1567 kfree(win); 1568 return -ENOMEM; 1569 } 1570 1571 win = rewin; 1572 win[nr_wins - 1] = val; 1573 1574 if (!end) 1575 break; 1576 1577 /* consume the number and the following comma, hence +1 */ 1578 len -= end - p + 1; 1579 p = end + 1; 1580 } while (len); 1581 1582 mutex_lock(&msc->buf_mutex); 1583 ret = msc_buffer_alloc(msc, win, nr_wins); 1584 mutex_unlock(&msc->buf_mutex); 1585 1586 free_win: 1587 kfree(win); 1588 1589 return ret ? ret : size; 1590 } 1591 1592 static DEVICE_ATTR_RW(nr_pages); 1593 1594 static ssize_t 1595 win_switch_store(struct device *dev, struct device_attribute *attr, 1596 const char *buf, size_t size) 1597 { 1598 struct msc *msc = dev_get_drvdata(dev); 1599 unsigned long val; 1600 int ret; 1601 1602 ret = kstrtoul(buf, 10, &val); 1603 if (ret) 1604 return ret; 1605 1606 if (val != 1) 1607 return -EINVAL; 1608 1609 mutex_lock(&msc->buf_mutex); 1610 if (msc->mode != MSC_MODE_MULTI) 1611 ret = -ENOTSUPP; 1612 else 1613 msc_win_switch(msc); 1614 mutex_unlock(&msc->buf_mutex); 1615 1616 return ret ? ret : size; 1617 } 1618 1619 static DEVICE_ATTR_WO(win_switch); 1620 1621 static struct attribute *msc_output_attrs[] = { 1622 &dev_attr_wrap.attr, 1623 &dev_attr_mode.attr, 1624 &dev_attr_nr_pages.attr, 1625 &dev_attr_win_switch.attr, 1626 NULL, 1627 }; 1628 1629 static struct attribute_group msc_output_group = { 1630 .attrs = msc_output_attrs, 1631 }; 1632 1633 static int intel_th_msc_probe(struct intel_th_device *thdev) 1634 { 1635 struct device *dev = &thdev->dev; 1636 struct resource *res; 1637 struct msc *msc; 1638 void __iomem *base; 1639 int err; 1640 1641 res = intel_th_device_get_resource(thdev, IORESOURCE_MEM, 0); 1642 if (!res) 1643 return -ENODEV; 1644 1645 base = devm_ioremap(dev, res->start, resource_size(res)); 1646 if (!base) 1647 return -ENOMEM; 1648 1649 msc = devm_kzalloc(dev, sizeof(*msc), GFP_KERNEL); 1650 if (!msc) 1651 return -ENOMEM; 1652 1653 res = intel_th_device_get_resource(thdev, IORESOURCE_IRQ, 1); 1654 if (!res) 1655 msc->do_irq = 1; 1656 1657 msc->index = thdev->id; 1658 1659 msc->thdev = thdev; 1660 msc->reg_base = base + msc->index * 0x100; 1661 msc->msu_base = base; 1662 1663 err = intel_th_msu_init(msc); 1664 if (err) 1665 return err; 1666 1667 err = intel_th_msc_init(msc); 1668 if (err) 1669 return err; 1670 1671 dev_set_drvdata(dev, msc); 1672 1673 return 0; 1674 } 1675 1676 static void intel_th_msc_remove(struct intel_th_device *thdev) 1677 { 1678 struct msc *msc = dev_get_drvdata(&thdev->dev); 1679 int ret; 1680 1681 intel_th_msc_deactivate(thdev); 1682 intel_th_msu_deinit(msc); 1683 1684 /* 1685 * Buffers should not be used at this point except if the 1686 * output character device is still open and the parent 1687 * device gets detached from its bus, which is a FIXME. 1688 */ 1689 ret = msc_buffer_free_unless_used(msc); 1690 WARN_ON_ONCE(ret); 1691 } 1692 1693 static struct intel_th_driver intel_th_msc_driver = { 1694 .probe = intel_th_msc_probe, 1695 .remove = intel_th_msc_remove, 1696 .irq = intel_th_msc_interrupt, 1697 .wait_empty = intel_th_msc_wait_empty, 1698 .activate = intel_th_msc_activate, 1699 .deactivate = intel_th_msc_deactivate, 1700 .fops = &intel_th_msc_fops, 1701 .attr_group = &msc_output_group, 1702 .driver = { 1703 .name = "msc", 1704 .owner = THIS_MODULE, 1705 }, 1706 }; 1707 1708 module_driver(intel_th_msc_driver, 1709 intel_th_driver_register, 1710 intel_th_driver_unregister); 1711 1712 MODULE_LICENSE("GPL v2"); 1713 MODULE_DESCRIPTION("Intel(R) Trace Hub Memory Storage Unit driver"); 1714 MODULE_AUTHOR("Alexander Shishkin <alexander.shishkin@linux.intel.com>"); 1715