1 /* 2 * Intel(R) Trace Hub Memory Storage Unit 3 * 4 * Copyright (C) 2014-2015 Intel Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 */ 15 16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 17 18 #include <linux/types.h> 19 #include <linux/module.h> 20 #include <linux/device.h> 21 #include <linux/uaccess.h> 22 #include <linux/sizes.h> 23 #include <linux/printk.h> 24 #include <linux/slab.h> 25 #include <linux/mm.h> 26 #include <linux/fs.h> 27 #include <linux/io.h> 28 #include <linux/dma-mapping.h> 29 30 #include <asm/cacheflush.h> 31 32 #include "intel_th.h" 33 #include "msu.h" 34 35 #define msc_dev(x) (&(x)->thdev->dev) 36 37 /** 38 * struct msc_block - multiblock mode block descriptor 39 * @bdesc: pointer to hardware descriptor (beginning of the block) 40 * @addr: physical address of the block 41 */ 42 struct msc_block { 43 struct msc_block_desc *bdesc; 44 dma_addr_t addr; 45 }; 46 47 /** 48 * struct msc_window - multiblock mode window descriptor 49 * @entry: window list linkage (msc::win_list) 50 * @pgoff: page offset into the buffer that this window starts at 51 * @nr_blocks: number of blocks (pages) in this window 52 * @block: array of block descriptors 53 */ 54 struct msc_window { 55 struct list_head entry; 56 unsigned long pgoff; 57 unsigned int nr_blocks; 58 struct msc *msc; 59 struct msc_block block[0]; 60 }; 61 62 /** 63 * struct msc_iter - iterator for msc buffer 64 * @entry: msc::iter_list linkage 65 * @msc: pointer to the MSC device 66 * @start_win: oldest window 67 * @win: current window 68 * @offset: current logical offset into the buffer 69 * @start_block: oldest block in the window 70 * @block: block number in the window 71 * @block_off: offset into current block 72 * @wrap_count: block wrapping handling 73 * @eof: end of buffer reached 74 */ 75 struct msc_iter { 76 struct list_head entry; 77 struct msc *msc; 78 struct msc_window *start_win; 79 struct msc_window *win; 80 unsigned long offset; 81 int start_block; 82 int block; 83 unsigned int block_off; 84 unsigned int wrap_count; 85 unsigned int eof; 86 }; 87 88 /** 89 * struct msc - MSC device representation 90 * @reg_base: register window base address 91 * @thdev: intel_th_device pointer 92 * @win_list: list of windows in multiblock mode 93 * @nr_pages: total number of pages allocated for this buffer 94 * @single_sz: amount of data in single mode 95 * @single_wrap: single mode wrap occurred 96 * @base: buffer's base pointer 97 * @base_addr: buffer's base address 98 * @user_count: number of users of the buffer 99 * @mmap_count: number of mappings 100 * @buf_mutex: mutex to serialize access to buffer-related bits 101 102 * @enabled: MSC is enabled 103 * @wrap: wrapping is enabled 104 * @mode: MSC operating mode 105 * @burst_len: write burst length 106 * @index: number of this MSC in the MSU 107 */ 108 struct msc { 109 void __iomem *reg_base; 110 struct intel_th_device *thdev; 111 112 struct list_head win_list; 113 unsigned long nr_pages; 114 unsigned long single_sz; 115 unsigned int single_wrap : 1; 116 void *base; 117 dma_addr_t base_addr; 118 119 /* <0: no buffer, 0: no users, >0: active users */ 120 atomic_t user_count; 121 122 atomic_t mmap_count; 123 struct mutex buf_mutex; 124 125 struct mutex iter_mutex; 126 struct list_head iter_list; 127 128 /* config */ 129 unsigned int enabled : 1, 130 wrap : 1; 131 unsigned int mode; 132 unsigned int burst_len; 133 unsigned int index; 134 }; 135 136 static inline bool msc_block_is_empty(struct msc_block_desc *bdesc) 137 { 138 /* header hasn't been written */ 139 if (!bdesc->valid_dw) 140 return true; 141 142 /* valid_dw includes the header */ 143 if (!msc_data_sz(bdesc)) 144 return true; 145 146 return false; 147 } 148 149 /** 150 * msc_oldest_window() - locate the window with oldest data 151 * @msc: MSC device 152 * 153 * This should only be used in multiblock mode. Caller should hold the 154 * msc::user_count reference. 155 * 156 * Return: the oldest window with valid data 157 */ 158 static struct msc_window *msc_oldest_window(struct msc *msc) 159 { 160 struct msc_window *win; 161 u32 reg = ioread32(msc->reg_base + REG_MSU_MSC0NWSA); 162 unsigned long win_addr = (unsigned long)reg << PAGE_SHIFT; 163 unsigned int found = 0; 164 165 if (list_empty(&msc->win_list)) 166 return NULL; 167 168 /* 169 * we might need a radix tree for this, depending on how 170 * many windows a typical user would allocate; ideally it's 171 * something like 2, in which case we're good 172 */ 173 list_for_each_entry(win, &msc->win_list, entry) { 174 if (win->block[0].addr == win_addr) 175 found++; 176 177 /* skip the empty ones */ 178 if (msc_block_is_empty(win->block[0].bdesc)) 179 continue; 180 181 if (found) 182 return win; 183 } 184 185 return list_entry(msc->win_list.next, struct msc_window, entry); 186 } 187 188 /** 189 * msc_win_oldest_block() - locate the oldest block in a given window 190 * @win: window to look at 191 * 192 * Return: index of the block with the oldest data 193 */ 194 static unsigned int msc_win_oldest_block(struct msc_window *win) 195 { 196 unsigned int blk; 197 struct msc_block_desc *bdesc = win->block[0].bdesc; 198 199 /* without wrapping, first block is the oldest */ 200 if (!msc_block_wrapped(bdesc)) 201 return 0; 202 203 /* 204 * with wrapping, last written block contains both the newest and the 205 * oldest data for this window. 206 */ 207 for (blk = 0; blk < win->nr_blocks; blk++) { 208 bdesc = win->block[blk].bdesc; 209 210 if (msc_block_last_written(bdesc)) 211 return blk; 212 } 213 214 return 0; 215 } 216 217 /** 218 * msc_is_last_win() - check if a window is the last one for a given MSC 219 * @win: window 220 * Return: true if @win is the last window in MSC's multiblock buffer 221 */ 222 static inline bool msc_is_last_win(struct msc_window *win) 223 { 224 return win->entry.next == &win->msc->win_list; 225 } 226 227 /** 228 * msc_next_window() - return next window in the multiblock buffer 229 * @win: current window 230 * 231 * Return: window following the current one 232 */ 233 static struct msc_window *msc_next_window(struct msc_window *win) 234 { 235 if (msc_is_last_win(win)) 236 return list_entry(win->msc->win_list.next, struct msc_window, 237 entry); 238 239 return list_entry(win->entry.next, struct msc_window, entry); 240 } 241 242 static struct msc_block_desc *msc_iter_bdesc(struct msc_iter *iter) 243 { 244 return iter->win->block[iter->block].bdesc; 245 } 246 247 static void msc_iter_init(struct msc_iter *iter) 248 { 249 memset(iter, 0, sizeof(*iter)); 250 iter->start_block = -1; 251 iter->block = -1; 252 } 253 254 static struct msc_iter *msc_iter_install(struct msc *msc) 255 { 256 struct msc_iter *iter; 257 258 iter = kzalloc(sizeof(*iter), GFP_KERNEL); 259 if (!iter) 260 return NULL; 261 262 msc_iter_init(iter); 263 iter->msc = msc; 264 265 mutex_lock(&msc->iter_mutex); 266 list_add_tail(&iter->entry, &msc->iter_list); 267 mutex_unlock(&msc->iter_mutex); 268 269 return iter; 270 } 271 272 static void msc_iter_remove(struct msc_iter *iter, struct msc *msc) 273 { 274 mutex_lock(&msc->iter_mutex); 275 list_del(&iter->entry); 276 mutex_unlock(&msc->iter_mutex); 277 278 kfree(iter); 279 } 280 281 static void msc_iter_block_start(struct msc_iter *iter) 282 { 283 if (iter->start_block != -1) 284 return; 285 286 iter->start_block = msc_win_oldest_block(iter->win); 287 iter->block = iter->start_block; 288 iter->wrap_count = 0; 289 290 /* 291 * start with the block with oldest data; if data has wrapped 292 * in this window, it should be in this block 293 */ 294 if (msc_block_wrapped(msc_iter_bdesc(iter))) 295 iter->wrap_count = 2; 296 297 } 298 299 static int msc_iter_win_start(struct msc_iter *iter, struct msc *msc) 300 { 301 /* already started, nothing to do */ 302 if (iter->start_win) 303 return 0; 304 305 iter->start_win = msc_oldest_window(msc); 306 if (!iter->start_win) 307 return -EINVAL; 308 309 iter->win = iter->start_win; 310 iter->start_block = -1; 311 312 msc_iter_block_start(iter); 313 314 return 0; 315 } 316 317 static int msc_iter_win_advance(struct msc_iter *iter) 318 { 319 iter->win = msc_next_window(iter->win); 320 iter->start_block = -1; 321 322 if (iter->win == iter->start_win) { 323 iter->eof++; 324 return 1; 325 } 326 327 msc_iter_block_start(iter); 328 329 return 0; 330 } 331 332 static int msc_iter_block_advance(struct msc_iter *iter) 333 { 334 iter->block_off = 0; 335 336 /* wrapping */ 337 if (iter->wrap_count && iter->block == iter->start_block) { 338 iter->wrap_count--; 339 if (!iter->wrap_count) 340 /* copied newest data from the wrapped block */ 341 return msc_iter_win_advance(iter); 342 } 343 344 /* no wrapping, check for last written block */ 345 if (!iter->wrap_count && msc_block_last_written(msc_iter_bdesc(iter))) 346 /* copied newest data for the window */ 347 return msc_iter_win_advance(iter); 348 349 /* block advance */ 350 if (++iter->block == iter->win->nr_blocks) 351 iter->block = 0; 352 353 /* no wrapping, sanity check in case there is no last written block */ 354 if (!iter->wrap_count && iter->block == iter->start_block) 355 return msc_iter_win_advance(iter); 356 357 return 0; 358 } 359 360 /** 361 * msc_buffer_iterate() - go through multiblock buffer's data 362 * @iter: iterator structure 363 * @size: amount of data to scan 364 * @data: callback's private data 365 * @fn: iterator callback 366 * 367 * This will start at the window which will be written to next (containing 368 * the oldest data) and work its way to the current window, calling @fn 369 * for each chunk of data as it goes. 370 * 371 * Caller should have msc::user_count reference to make sure the buffer 372 * doesn't disappear from under us. 373 * 374 * Return: amount of data actually scanned. 375 */ 376 static ssize_t 377 msc_buffer_iterate(struct msc_iter *iter, size_t size, void *data, 378 unsigned long (*fn)(void *, void *, size_t)) 379 { 380 struct msc *msc = iter->msc; 381 size_t len = size; 382 unsigned int advance; 383 384 if (iter->eof) 385 return 0; 386 387 /* start with the oldest window */ 388 if (msc_iter_win_start(iter, msc)) 389 return 0; 390 391 do { 392 unsigned long data_bytes = msc_data_sz(msc_iter_bdesc(iter)); 393 void *src = (void *)msc_iter_bdesc(iter) + MSC_BDESC; 394 size_t tocopy = data_bytes, copied = 0; 395 size_t remaining = 0; 396 397 advance = 1; 398 399 /* 400 * If block wrapping happened, we need to visit the last block 401 * twice, because it contains both the oldest and the newest 402 * data in this window. 403 * 404 * First time (wrap_count==2), in the very beginning, to collect 405 * the oldest data, which is in the range 406 * (data_bytes..DATA_IN_PAGE). 407 * 408 * Second time (wrap_count==1), it's just like any other block, 409 * containing data in the range of [MSC_BDESC..data_bytes]. 410 */ 411 if (iter->block == iter->start_block && iter->wrap_count == 2) { 412 tocopy = DATA_IN_PAGE - data_bytes; 413 src += data_bytes; 414 } 415 416 if (!tocopy) 417 goto next_block; 418 419 tocopy -= iter->block_off; 420 src += iter->block_off; 421 422 if (len < tocopy) { 423 tocopy = len; 424 advance = 0; 425 } 426 427 remaining = fn(data, src, tocopy); 428 429 if (remaining) 430 advance = 0; 431 432 copied = tocopy - remaining; 433 len -= copied; 434 iter->block_off += copied; 435 iter->offset += copied; 436 437 if (!advance) 438 break; 439 440 next_block: 441 if (msc_iter_block_advance(iter)) 442 break; 443 444 } while (len); 445 446 return size - len; 447 } 448 449 /** 450 * msc_buffer_clear_hw_header() - clear hw header for multiblock 451 * @msc: MSC device 452 */ 453 static void msc_buffer_clear_hw_header(struct msc *msc) 454 { 455 struct msc_window *win; 456 457 mutex_lock(&msc->buf_mutex); 458 list_for_each_entry(win, &msc->win_list, entry) { 459 unsigned int blk; 460 size_t hw_sz = sizeof(struct msc_block_desc) - 461 offsetof(struct msc_block_desc, hw_tag); 462 463 for (blk = 0; blk < win->nr_blocks; blk++) { 464 struct msc_block_desc *bdesc = win->block[blk].bdesc; 465 466 memset(&bdesc->hw_tag, 0, hw_sz); 467 } 468 } 469 mutex_unlock(&msc->buf_mutex); 470 } 471 472 /** 473 * msc_configure() - set up MSC hardware 474 * @msc: the MSC device to configure 475 * 476 * Program storage mode, wrapping, burst length and trace buffer address 477 * into a given MSC. If msc::enabled is set, enable the trace, too. 478 */ 479 static int msc_configure(struct msc *msc) 480 { 481 u32 reg; 482 483 if (msc->mode > MSC_MODE_MULTI) 484 return -ENOTSUPP; 485 486 if (msc->mode == MSC_MODE_MULTI) 487 msc_buffer_clear_hw_header(msc); 488 489 reg = msc->base_addr >> PAGE_SHIFT; 490 iowrite32(reg, msc->reg_base + REG_MSU_MSC0BAR); 491 492 if (msc->mode == MSC_MODE_SINGLE) { 493 reg = msc->nr_pages; 494 iowrite32(reg, msc->reg_base + REG_MSU_MSC0SIZE); 495 } 496 497 reg = ioread32(msc->reg_base + REG_MSU_MSC0CTL); 498 reg &= ~(MSC_MODE | MSC_WRAPEN | MSC_EN | MSC_RD_HDR_OVRD); 499 500 reg |= msc->mode << __ffs(MSC_MODE); 501 reg |= msc->burst_len << __ffs(MSC_LEN); 502 /*if (msc->mode == MSC_MODE_MULTI) 503 reg |= MSC_RD_HDR_OVRD; */ 504 if (msc->wrap) 505 reg |= MSC_WRAPEN; 506 if (msc->enabled) 507 reg |= MSC_EN; 508 509 iowrite32(reg, msc->reg_base + REG_MSU_MSC0CTL); 510 511 if (msc->enabled) { 512 msc->thdev->output.multiblock = msc->mode == MSC_MODE_MULTI; 513 intel_th_trace_enable(msc->thdev); 514 } 515 516 return 0; 517 } 518 519 /** 520 * msc_disable() - disable MSC hardware 521 * @msc: MSC device to disable 522 * 523 * If @msc is enabled, disable tracing on the switch and then disable MSC 524 * storage. 525 */ 526 static void msc_disable(struct msc *msc) 527 { 528 unsigned long count; 529 u32 reg; 530 531 if (!msc->enabled) 532 return; 533 534 intel_th_trace_disable(msc->thdev); 535 536 for (reg = 0, count = MSC_PLE_WAITLOOP_DEPTH; 537 count && !(reg & MSCSTS_PLE); count--) { 538 reg = ioread32(msc->reg_base + REG_MSU_MSC0STS); 539 cpu_relax(); 540 } 541 542 if (!count) 543 dev_dbg(msc_dev(msc), "timeout waiting for MSC0 PLE\n"); 544 545 if (msc->mode == MSC_MODE_SINGLE) { 546 msc->single_wrap = !!(reg & MSCSTS_WRAPSTAT); 547 548 reg = ioread32(msc->reg_base + REG_MSU_MSC0MWP); 549 msc->single_sz = reg & ((msc->nr_pages << PAGE_SHIFT) - 1); 550 dev_dbg(msc_dev(msc), "MSCnMWP: %08x/%08lx, wrap: %d\n", 551 reg, msc->single_sz, msc->single_wrap); 552 } 553 554 reg = ioread32(msc->reg_base + REG_MSU_MSC0CTL); 555 reg &= ~MSC_EN; 556 iowrite32(reg, msc->reg_base + REG_MSU_MSC0CTL); 557 msc->enabled = 0; 558 559 iowrite32(0, msc->reg_base + REG_MSU_MSC0BAR); 560 iowrite32(0, msc->reg_base + REG_MSU_MSC0SIZE); 561 562 dev_dbg(msc_dev(msc), "MSCnNWSA: %08x\n", 563 ioread32(msc->reg_base + REG_MSU_MSC0NWSA)); 564 565 reg = ioread32(msc->reg_base + REG_MSU_MSC0STS); 566 dev_dbg(msc_dev(msc), "MSCnSTS: %08x\n", reg); 567 } 568 569 static int intel_th_msc_activate(struct intel_th_device *thdev) 570 { 571 struct msc *msc = dev_get_drvdata(&thdev->dev); 572 int ret = 0; 573 574 if (!atomic_inc_unless_negative(&msc->user_count)) 575 return -ENODEV; 576 577 mutex_lock(&msc->iter_mutex); 578 if (!list_empty(&msc->iter_list)) 579 ret = -EBUSY; 580 mutex_unlock(&msc->iter_mutex); 581 582 if (ret) { 583 atomic_dec(&msc->user_count); 584 return ret; 585 } 586 587 msc->enabled = 1; 588 589 return msc_configure(msc); 590 } 591 592 static void intel_th_msc_deactivate(struct intel_th_device *thdev) 593 { 594 struct msc *msc = dev_get_drvdata(&thdev->dev); 595 596 msc_disable(msc); 597 598 atomic_dec(&msc->user_count); 599 } 600 601 /** 602 * msc_buffer_contig_alloc() - allocate a contiguous buffer for SINGLE mode 603 * @msc: MSC device 604 * @size: allocation size in bytes 605 * 606 * This modifies msc::base, which requires msc::buf_mutex to serialize, so the 607 * caller is expected to hold it. 608 * 609 * Return: 0 on success, -errno otherwise. 610 */ 611 static int msc_buffer_contig_alloc(struct msc *msc, unsigned long size) 612 { 613 unsigned int order = get_order(size); 614 struct page *page; 615 616 if (!size) 617 return 0; 618 619 page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order); 620 if (!page) 621 return -ENOMEM; 622 623 split_page(page, order); 624 msc->nr_pages = size >> PAGE_SHIFT; 625 msc->base = page_address(page); 626 msc->base_addr = page_to_phys(page); 627 628 return 0; 629 } 630 631 /** 632 * msc_buffer_contig_free() - free a contiguous buffer 633 * @msc: MSC configured in SINGLE mode 634 */ 635 static void msc_buffer_contig_free(struct msc *msc) 636 { 637 unsigned long off; 638 639 for (off = 0; off < msc->nr_pages << PAGE_SHIFT; off += PAGE_SIZE) { 640 struct page *page = virt_to_page(msc->base + off); 641 642 page->mapping = NULL; 643 __free_page(page); 644 } 645 646 msc->nr_pages = 0; 647 } 648 649 /** 650 * msc_buffer_contig_get_page() - find a page at a given offset 651 * @msc: MSC configured in SINGLE mode 652 * @pgoff: page offset 653 * 654 * Return: page, if @pgoff is within the range, NULL otherwise. 655 */ 656 static struct page *msc_buffer_contig_get_page(struct msc *msc, 657 unsigned long pgoff) 658 { 659 if (pgoff >= msc->nr_pages) 660 return NULL; 661 662 return virt_to_page(msc->base + (pgoff << PAGE_SHIFT)); 663 } 664 665 /** 666 * msc_buffer_win_alloc() - alloc a window for a multiblock mode 667 * @msc: MSC device 668 * @nr_blocks: number of pages in this window 669 * 670 * This modifies msc::win_list and msc::base, which requires msc::buf_mutex 671 * to serialize, so the caller is expected to hold it. 672 * 673 * Return: 0 on success, -errno otherwise. 674 */ 675 static int msc_buffer_win_alloc(struct msc *msc, unsigned int nr_blocks) 676 { 677 struct msc_window *win; 678 unsigned long size = PAGE_SIZE; 679 int i, ret = -ENOMEM; 680 681 if (!nr_blocks) 682 return 0; 683 684 win = kzalloc(offsetof(struct msc_window, block[nr_blocks]), 685 GFP_KERNEL); 686 if (!win) 687 return -ENOMEM; 688 689 if (!list_empty(&msc->win_list)) { 690 struct msc_window *prev = list_entry(msc->win_list.prev, 691 struct msc_window, entry); 692 693 win->pgoff = prev->pgoff + prev->nr_blocks; 694 } 695 696 for (i = 0; i < nr_blocks; i++) { 697 win->block[i].bdesc = dma_alloc_coherent(msc_dev(msc), size, 698 &win->block[i].addr, 699 GFP_KERNEL); 700 701 #ifdef CONFIG_X86 702 /* Set the page as uncached */ 703 set_memory_uc((unsigned long)win->block[i].bdesc, 1); 704 #endif 705 706 if (!win->block[i].bdesc) 707 goto err_nomem; 708 } 709 710 win->msc = msc; 711 win->nr_blocks = nr_blocks; 712 713 if (list_empty(&msc->win_list)) { 714 msc->base = win->block[0].bdesc; 715 msc->base_addr = win->block[0].addr; 716 } 717 718 list_add_tail(&win->entry, &msc->win_list); 719 msc->nr_pages += nr_blocks; 720 721 return 0; 722 723 err_nomem: 724 for (i--; i >= 0; i--) { 725 #ifdef CONFIG_X86 726 /* Reset the page to write-back before releasing */ 727 set_memory_wb((unsigned long)win->block[i].bdesc, 1); 728 #endif 729 dma_free_coherent(msc_dev(msc), size, win->block[i].bdesc, 730 win->block[i].addr); 731 } 732 kfree(win); 733 734 return ret; 735 } 736 737 /** 738 * msc_buffer_win_free() - free a window from MSC's window list 739 * @msc: MSC device 740 * @win: window to free 741 * 742 * This modifies msc::win_list and msc::base, which requires msc::buf_mutex 743 * to serialize, so the caller is expected to hold it. 744 */ 745 static void msc_buffer_win_free(struct msc *msc, struct msc_window *win) 746 { 747 int i; 748 749 msc->nr_pages -= win->nr_blocks; 750 751 list_del(&win->entry); 752 if (list_empty(&msc->win_list)) { 753 msc->base = NULL; 754 msc->base_addr = 0; 755 } 756 757 for (i = 0; i < win->nr_blocks; i++) { 758 struct page *page = virt_to_page(win->block[i].bdesc); 759 760 page->mapping = NULL; 761 #ifdef CONFIG_X86 762 /* Reset the page to write-back before releasing */ 763 set_memory_wb((unsigned long)win->block[i].bdesc, 1); 764 #endif 765 dma_free_coherent(msc_dev(win->msc), PAGE_SIZE, 766 win->block[i].bdesc, win->block[i].addr); 767 } 768 769 kfree(win); 770 } 771 772 /** 773 * msc_buffer_relink() - set up block descriptors for multiblock mode 774 * @msc: MSC device 775 * 776 * This traverses msc::win_list, which requires msc::buf_mutex to serialize, 777 * so the caller is expected to hold it. 778 */ 779 static void msc_buffer_relink(struct msc *msc) 780 { 781 struct msc_window *win, *next_win; 782 783 /* call with msc::mutex locked */ 784 list_for_each_entry(win, &msc->win_list, entry) { 785 unsigned int blk; 786 u32 sw_tag = 0; 787 788 /* 789 * Last window's next_win should point to the first window 790 * and MSC_SW_TAG_LASTWIN should be set. 791 */ 792 if (msc_is_last_win(win)) { 793 sw_tag |= MSC_SW_TAG_LASTWIN; 794 next_win = list_entry(msc->win_list.next, 795 struct msc_window, entry); 796 } else { 797 next_win = list_entry(win->entry.next, 798 struct msc_window, entry); 799 } 800 801 for (blk = 0; blk < win->nr_blocks; blk++) { 802 struct msc_block_desc *bdesc = win->block[blk].bdesc; 803 804 memset(bdesc, 0, sizeof(*bdesc)); 805 806 bdesc->next_win = next_win->block[0].addr >> PAGE_SHIFT; 807 808 /* 809 * Similarly to last window, last block should point 810 * to the first one. 811 */ 812 if (blk == win->nr_blocks - 1) { 813 sw_tag |= MSC_SW_TAG_LASTBLK; 814 bdesc->next_blk = 815 win->block[0].addr >> PAGE_SHIFT; 816 } else { 817 bdesc->next_blk = 818 win->block[blk + 1].addr >> PAGE_SHIFT; 819 } 820 821 bdesc->sw_tag = sw_tag; 822 bdesc->block_sz = PAGE_SIZE / 64; 823 } 824 } 825 826 /* 827 * Make the above writes globally visible before tracing is 828 * enabled to make sure hardware sees them coherently. 829 */ 830 wmb(); 831 } 832 833 static void msc_buffer_multi_free(struct msc *msc) 834 { 835 struct msc_window *win, *iter; 836 837 list_for_each_entry_safe(win, iter, &msc->win_list, entry) 838 msc_buffer_win_free(msc, win); 839 } 840 841 static int msc_buffer_multi_alloc(struct msc *msc, unsigned long *nr_pages, 842 unsigned int nr_wins) 843 { 844 int ret, i; 845 846 for (i = 0; i < nr_wins; i++) { 847 ret = msc_buffer_win_alloc(msc, nr_pages[i]); 848 if (ret) { 849 msc_buffer_multi_free(msc); 850 return ret; 851 } 852 } 853 854 msc_buffer_relink(msc); 855 856 return 0; 857 } 858 859 /** 860 * msc_buffer_free() - free buffers for MSC 861 * @msc: MSC device 862 * 863 * Free MSC's storage buffers. 864 * 865 * This modifies msc::win_list and msc::base, which requires msc::buf_mutex to 866 * serialize, so the caller is expected to hold it. 867 */ 868 static void msc_buffer_free(struct msc *msc) 869 { 870 if (msc->mode == MSC_MODE_SINGLE) 871 msc_buffer_contig_free(msc); 872 else if (msc->mode == MSC_MODE_MULTI) 873 msc_buffer_multi_free(msc); 874 } 875 876 /** 877 * msc_buffer_alloc() - allocate a buffer for MSC 878 * @msc: MSC device 879 * @size: allocation size in bytes 880 * 881 * Allocate a storage buffer for MSC, depending on the msc::mode, it will be 882 * either done via msc_buffer_contig_alloc() for SINGLE operation mode or 883 * msc_buffer_win_alloc() for multiblock operation. The latter allocates one 884 * window per invocation, so in multiblock mode this can be called multiple 885 * times for the same MSC to allocate multiple windows. 886 * 887 * This modifies msc::win_list and msc::base, which requires msc::buf_mutex 888 * to serialize, so the caller is expected to hold it. 889 * 890 * Return: 0 on success, -errno otherwise. 891 */ 892 static int msc_buffer_alloc(struct msc *msc, unsigned long *nr_pages, 893 unsigned int nr_wins) 894 { 895 int ret; 896 897 /* -1: buffer not allocated */ 898 if (atomic_read(&msc->user_count) != -1) 899 return -EBUSY; 900 901 if (msc->mode == MSC_MODE_SINGLE) { 902 if (nr_wins != 1) 903 return -EINVAL; 904 905 ret = msc_buffer_contig_alloc(msc, nr_pages[0] << PAGE_SHIFT); 906 } else if (msc->mode == MSC_MODE_MULTI) { 907 ret = msc_buffer_multi_alloc(msc, nr_pages, nr_wins); 908 } else { 909 ret = -ENOTSUPP; 910 } 911 912 if (!ret) { 913 /* allocation should be visible before the counter goes to 0 */ 914 smp_mb__before_atomic(); 915 916 if (WARN_ON_ONCE(atomic_cmpxchg(&msc->user_count, -1, 0) != -1)) 917 return -EINVAL; 918 } 919 920 return ret; 921 } 922 923 /** 924 * msc_buffer_unlocked_free_unless_used() - free a buffer unless it's in use 925 * @msc: MSC device 926 * 927 * This will free MSC buffer unless it is in use or there is no allocated 928 * buffer. 929 * Caller needs to hold msc::buf_mutex. 930 * 931 * Return: 0 on successful deallocation or if there was no buffer to 932 * deallocate, -EBUSY if there are active users. 933 */ 934 static int msc_buffer_unlocked_free_unless_used(struct msc *msc) 935 { 936 int count, ret = 0; 937 938 count = atomic_cmpxchg(&msc->user_count, 0, -1); 939 940 /* > 0: buffer is allocated and has users */ 941 if (count > 0) 942 ret = -EBUSY; 943 /* 0: buffer is allocated, no users */ 944 else if (!count) 945 msc_buffer_free(msc); 946 /* < 0: no buffer, nothing to do */ 947 948 return ret; 949 } 950 951 /** 952 * msc_buffer_free_unless_used() - free a buffer unless it's in use 953 * @msc: MSC device 954 * 955 * This is a locked version of msc_buffer_unlocked_free_unless_used(). 956 */ 957 static int msc_buffer_free_unless_used(struct msc *msc) 958 { 959 int ret; 960 961 mutex_lock(&msc->buf_mutex); 962 ret = msc_buffer_unlocked_free_unless_used(msc); 963 mutex_unlock(&msc->buf_mutex); 964 965 return ret; 966 } 967 968 /** 969 * msc_buffer_get_page() - get MSC buffer page at a given offset 970 * @msc: MSC device 971 * @pgoff: page offset into the storage buffer 972 * 973 * This traverses msc::win_list, so holding msc::buf_mutex is expected from 974 * the caller. 975 * 976 * Return: page if @pgoff corresponds to a valid buffer page or NULL. 977 */ 978 static struct page *msc_buffer_get_page(struct msc *msc, unsigned long pgoff) 979 { 980 struct msc_window *win; 981 982 if (msc->mode == MSC_MODE_SINGLE) 983 return msc_buffer_contig_get_page(msc, pgoff); 984 985 list_for_each_entry(win, &msc->win_list, entry) 986 if (pgoff >= win->pgoff && pgoff < win->pgoff + win->nr_blocks) 987 goto found; 988 989 return NULL; 990 991 found: 992 pgoff -= win->pgoff; 993 return virt_to_page(win->block[pgoff].bdesc); 994 } 995 996 /** 997 * struct msc_win_to_user_struct - data for copy_to_user() callback 998 * @buf: userspace buffer to copy data to 999 * @offset: running offset 1000 */ 1001 struct msc_win_to_user_struct { 1002 char __user *buf; 1003 unsigned long offset; 1004 }; 1005 1006 /** 1007 * msc_win_to_user() - iterator for msc_buffer_iterate() to copy data to user 1008 * @data: callback's private data 1009 * @src: source buffer 1010 * @len: amount of data to copy from the source buffer 1011 */ 1012 static unsigned long msc_win_to_user(void *data, void *src, size_t len) 1013 { 1014 struct msc_win_to_user_struct *u = data; 1015 unsigned long ret; 1016 1017 ret = copy_to_user(u->buf + u->offset, src, len); 1018 u->offset += len - ret; 1019 1020 return ret; 1021 } 1022 1023 1024 /* 1025 * file operations' callbacks 1026 */ 1027 1028 static int intel_th_msc_open(struct inode *inode, struct file *file) 1029 { 1030 struct intel_th_device *thdev = file->private_data; 1031 struct msc *msc = dev_get_drvdata(&thdev->dev); 1032 struct msc_iter *iter; 1033 1034 if (!capable(CAP_SYS_RAWIO)) 1035 return -EPERM; 1036 1037 iter = msc_iter_install(msc); 1038 if (!iter) 1039 return -ENOMEM; 1040 1041 file->private_data = iter; 1042 1043 return nonseekable_open(inode, file); 1044 } 1045 1046 static int intel_th_msc_release(struct inode *inode, struct file *file) 1047 { 1048 struct msc_iter *iter = file->private_data; 1049 struct msc *msc = iter->msc; 1050 1051 msc_iter_remove(iter, msc); 1052 1053 return 0; 1054 } 1055 1056 static ssize_t 1057 msc_single_to_user(struct msc *msc, char __user *buf, loff_t off, size_t len) 1058 { 1059 unsigned long size = msc->nr_pages << PAGE_SHIFT, rem = len; 1060 unsigned long start = off, tocopy = 0; 1061 1062 if (msc->single_wrap) { 1063 start += msc->single_sz; 1064 if (start < size) { 1065 tocopy = min(rem, size - start); 1066 if (copy_to_user(buf, msc->base + start, tocopy)) 1067 return -EFAULT; 1068 1069 buf += tocopy; 1070 rem -= tocopy; 1071 start += tocopy; 1072 } 1073 1074 start &= size - 1; 1075 if (rem) { 1076 tocopy = min(rem, msc->single_sz - start); 1077 if (copy_to_user(buf, msc->base + start, tocopy)) 1078 return -EFAULT; 1079 1080 rem -= tocopy; 1081 } 1082 1083 return len - rem; 1084 } 1085 1086 if (copy_to_user(buf, msc->base + start, rem)) 1087 return -EFAULT; 1088 1089 return len; 1090 } 1091 1092 static ssize_t intel_th_msc_read(struct file *file, char __user *buf, 1093 size_t len, loff_t *ppos) 1094 { 1095 struct msc_iter *iter = file->private_data; 1096 struct msc *msc = iter->msc; 1097 size_t size; 1098 loff_t off = *ppos; 1099 ssize_t ret = 0; 1100 1101 if (!atomic_inc_unless_negative(&msc->user_count)) 1102 return 0; 1103 1104 if (msc->enabled) { 1105 ret = -EBUSY; 1106 goto put_count; 1107 } 1108 1109 if (msc->mode == MSC_MODE_SINGLE && !msc->single_wrap) 1110 size = msc->single_sz; 1111 else 1112 size = msc->nr_pages << PAGE_SHIFT; 1113 1114 if (!size) 1115 goto put_count; 1116 1117 if (off >= size) 1118 goto put_count; 1119 1120 if (off + len >= size) 1121 len = size - off; 1122 1123 if (msc->mode == MSC_MODE_SINGLE) { 1124 ret = msc_single_to_user(msc, buf, off, len); 1125 if (ret >= 0) 1126 *ppos += ret; 1127 } else if (msc->mode == MSC_MODE_MULTI) { 1128 struct msc_win_to_user_struct u = { 1129 .buf = buf, 1130 .offset = 0, 1131 }; 1132 1133 ret = msc_buffer_iterate(iter, len, &u, msc_win_to_user); 1134 if (ret >= 0) 1135 *ppos = iter->offset; 1136 } else { 1137 ret = -ENOTSUPP; 1138 } 1139 1140 put_count: 1141 atomic_dec(&msc->user_count); 1142 1143 return ret; 1144 } 1145 1146 /* 1147 * vm operations callbacks (vm_ops) 1148 */ 1149 1150 static void msc_mmap_open(struct vm_area_struct *vma) 1151 { 1152 struct msc_iter *iter = vma->vm_file->private_data; 1153 struct msc *msc = iter->msc; 1154 1155 atomic_inc(&msc->mmap_count); 1156 } 1157 1158 static void msc_mmap_close(struct vm_area_struct *vma) 1159 { 1160 struct msc_iter *iter = vma->vm_file->private_data; 1161 struct msc *msc = iter->msc; 1162 unsigned long pg; 1163 1164 if (!atomic_dec_and_mutex_lock(&msc->mmap_count, &msc->buf_mutex)) 1165 return; 1166 1167 /* drop page _counts */ 1168 for (pg = 0; pg < msc->nr_pages; pg++) { 1169 struct page *page = msc_buffer_get_page(msc, pg); 1170 1171 if (WARN_ON_ONCE(!page)) 1172 continue; 1173 1174 if (page->mapping) 1175 page->mapping = NULL; 1176 } 1177 1178 /* last mapping -- drop user_count */ 1179 atomic_dec(&msc->user_count); 1180 mutex_unlock(&msc->buf_mutex); 1181 } 1182 1183 static int msc_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1184 { 1185 struct msc_iter *iter = vma->vm_file->private_data; 1186 struct msc *msc = iter->msc; 1187 1188 vmf->page = msc_buffer_get_page(msc, vmf->pgoff); 1189 if (!vmf->page) 1190 return VM_FAULT_SIGBUS; 1191 1192 get_page(vmf->page); 1193 vmf->page->mapping = vma->vm_file->f_mapping; 1194 vmf->page->index = vmf->pgoff; 1195 1196 return 0; 1197 } 1198 1199 static const struct vm_operations_struct msc_mmap_ops = { 1200 .open = msc_mmap_open, 1201 .close = msc_mmap_close, 1202 .fault = msc_mmap_fault, 1203 }; 1204 1205 static int intel_th_msc_mmap(struct file *file, struct vm_area_struct *vma) 1206 { 1207 unsigned long size = vma->vm_end - vma->vm_start; 1208 struct msc_iter *iter = vma->vm_file->private_data; 1209 struct msc *msc = iter->msc; 1210 int ret = -EINVAL; 1211 1212 if (!size || offset_in_page(size)) 1213 return -EINVAL; 1214 1215 if (vma->vm_pgoff) 1216 return -EINVAL; 1217 1218 /* grab user_count once per mmap; drop in msc_mmap_close() */ 1219 if (!atomic_inc_unless_negative(&msc->user_count)) 1220 return -EINVAL; 1221 1222 if (msc->mode != MSC_MODE_SINGLE && 1223 msc->mode != MSC_MODE_MULTI) 1224 goto out; 1225 1226 if (size >> PAGE_SHIFT != msc->nr_pages) 1227 goto out; 1228 1229 atomic_set(&msc->mmap_count, 1); 1230 ret = 0; 1231 1232 out: 1233 if (ret) 1234 atomic_dec(&msc->user_count); 1235 1236 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 1237 vma->vm_flags |= VM_DONTEXPAND | VM_DONTCOPY; 1238 vma->vm_ops = &msc_mmap_ops; 1239 return ret; 1240 } 1241 1242 static const struct file_operations intel_th_msc_fops = { 1243 .open = intel_th_msc_open, 1244 .release = intel_th_msc_release, 1245 .read = intel_th_msc_read, 1246 .mmap = intel_th_msc_mmap, 1247 .llseek = no_llseek, 1248 }; 1249 1250 static int intel_th_msc_init(struct msc *msc) 1251 { 1252 atomic_set(&msc->user_count, -1); 1253 1254 msc->mode = MSC_MODE_MULTI; 1255 mutex_init(&msc->buf_mutex); 1256 INIT_LIST_HEAD(&msc->win_list); 1257 1258 mutex_init(&msc->iter_mutex); 1259 INIT_LIST_HEAD(&msc->iter_list); 1260 1261 msc->burst_len = 1262 (ioread32(msc->reg_base + REG_MSU_MSC0CTL) & MSC_LEN) >> 1263 __ffs(MSC_LEN); 1264 1265 return 0; 1266 } 1267 1268 static const char * const msc_mode[] = { 1269 [MSC_MODE_SINGLE] = "single", 1270 [MSC_MODE_MULTI] = "multi", 1271 [MSC_MODE_EXI] = "ExI", 1272 [MSC_MODE_DEBUG] = "debug", 1273 }; 1274 1275 static ssize_t 1276 wrap_show(struct device *dev, struct device_attribute *attr, char *buf) 1277 { 1278 struct msc *msc = dev_get_drvdata(dev); 1279 1280 return scnprintf(buf, PAGE_SIZE, "%d\n", msc->wrap); 1281 } 1282 1283 static ssize_t 1284 wrap_store(struct device *dev, struct device_attribute *attr, const char *buf, 1285 size_t size) 1286 { 1287 struct msc *msc = dev_get_drvdata(dev); 1288 unsigned long val; 1289 int ret; 1290 1291 ret = kstrtoul(buf, 10, &val); 1292 if (ret) 1293 return ret; 1294 1295 msc->wrap = !!val; 1296 1297 return size; 1298 } 1299 1300 static DEVICE_ATTR_RW(wrap); 1301 1302 static ssize_t 1303 mode_show(struct device *dev, struct device_attribute *attr, char *buf) 1304 { 1305 struct msc *msc = dev_get_drvdata(dev); 1306 1307 return scnprintf(buf, PAGE_SIZE, "%s\n", msc_mode[msc->mode]); 1308 } 1309 1310 static ssize_t 1311 mode_store(struct device *dev, struct device_attribute *attr, const char *buf, 1312 size_t size) 1313 { 1314 struct msc *msc = dev_get_drvdata(dev); 1315 size_t len = size; 1316 char *cp; 1317 int i, ret; 1318 1319 if (!capable(CAP_SYS_RAWIO)) 1320 return -EPERM; 1321 1322 cp = memchr(buf, '\n', len); 1323 if (cp) 1324 len = cp - buf; 1325 1326 for (i = 0; i < ARRAY_SIZE(msc_mode); i++) 1327 if (!strncmp(msc_mode[i], buf, len)) 1328 goto found; 1329 1330 return -EINVAL; 1331 1332 found: 1333 mutex_lock(&msc->buf_mutex); 1334 ret = msc_buffer_unlocked_free_unless_used(msc); 1335 if (!ret) 1336 msc->mode = i; 1337 mutex_unlock(&msc->buf_mutex); 1338 1339 return ret ? ret : size; 1340 } 1341 1342 static DEVICE_ATTR_RW(mode); 1343 1344 static ssize_t 1345 nr_pages_show(struct device *dev, struct device_attribute *attr, char *buf) 1346 { 1347 struct msc *msc = dev_get_drvdata(dev); 1348 struct msc_window *win; 1349 size_t count = 0; 1350 1351 mutex_lock(&msc->buf_mutex); 1352 1353 if (msc->mode == MSC_MODE_SINGLE) 1354 count = scnprintf(buf, PAGE_SIZE, "%ld\n", msc->nr_pages); 1355 else if (msc->mode == MSC_MODE_MULTI) { 1356 list_for_each_entry(win, &msc->win_list, entry) { 1357 count += scnprintf(buf + count, PAGE_SIZE - count, 1358 "%d%c", win->nr_blocks, 1359 msc_is_last_win(win) ? '\n' : ','); 1360 } 1361 } else { 1362 count = scnprintf(buf, PAGE_SIZE, "unsupported\n"); 1363 } 1364 1365 mutex_unlock(&msc->buf_mutex); 1366 1367 return count; 1368 } 1369 1370 static ssize_t 1371 nr_pages_store(struct device *dev, struct device_attribute *attr, 1372 const char *buf, size_t size) 1373 { 1374 struct msc *msc = dev_get_drvdata(dev); 1375 unsigned long val, *win = NULL, *rewin; 1376 size_t len = size; 1377 const char *p = buf; 1378 char *end, *s; 1379 int ret, nr_wins = 0; 1380 1381 if (!capable(CAP_SYS_RAWIO)) 1382 return -EPERM; 1383 1384 ret = msc_buffer_free_unless_used(msc); 1385 if (ret) 1386 return ret; 1387 1388 /* scan the comma-separated list of allocation sizes */ 1389 end = memchr(buf, '\n', len); 1390 if (end) 1391 len = end - buf; 1392 1393 do { 1394 end = memchr(p, ',', len); 1395 s = kstrndup(p, end ? end - p : len, GFP_KERNEL); 1396 ret = kstrtoul(s, 10, &val); 1397 kfree(s); 1398 1399 if (ret || !val) 1400 goto free_win; 1401 1402 if (nr_wins && msc->mode == MSC_MODE_SINGLE) { 1403 ret = -EINVAL; 1404 goto free_win; 1405 } 1406 1407 nr_wins++; 1408 rewin = krealloc(win, sizeof(*win) * nr_wins, GFP_KERNEL); 1409 if (!rewin) { 1410 kfree(win); 1411 return -ENOMEM; 1412 } 1413 1414 win = rewin; 1415 win[nr_wins - 1] = val; 1416 1417 if (!end) 1418 break; 1419 1420 len -= end - p; 1421 p = end + 1; 1422 } while (len); 1423 1424 mutex_lock(&msc->buf_mutex); 1425 ret = msc_buffer_alloc(msc, win, nr_wins); 1426 mutex_unlock(&msc->buf_mutex); 1427 1428 free_win: 1429 kfree(win); 1430 1431 return ret ? ret : size; 1432 } 1433 1434 static DEVICE_ATTR_RW(nr_pages); 1435 1436 static struct attribute *msc_output_attrs[] = { 1437 &dev_attr_wrap.attr, 1438 &dev_attr_mode.attr, 1439 &dev_attr_nr_pages.attr, 1440 NULL, 1441 }; 1442 1443 static struct attribute_group msc_output_group = { 1444 .attrs = msc_output_attrs, 1445 }; 1446 1447 static int intel_th_msc_probe(struct intel_th_device *thdev) 1448 { 1449 struct device *dev = &thdev->dev; 1450 struct resource *res; 1451 struct msc *msc; 1452 void __iomem *base; 1453 int err; 1454 1455 res = intel_th_device_get_resource(thdev, IORESOURCE_MEM, 0); 1456 if (!res) 1457 return -ENODEV; 1458 1459 base = devm_ioremap(dev, res->start, resource_size(res)); 1460 if (!base) 1461 return -ENOMEM; 1462 1463 msc = devm_kzalloc(dev, sizeof(*msc), GFP_KERNEL); 1464 if (!msc) 1465 return -ENOMEM; 1466 1467 msc->index = thdev->id; 1468 1469 msc->thdev = thdev; 1470 msc->reg_base = base + msc->index * 0x100; 1471 1472 err = intel_th_msc_init(msc); 1473 if (err) 1474 return err; 1475 1476 err = sysfs_create_group(&dev->kobj, &msc_output_group); 1477 if (err) 1478 return err; 1479 1480 dev_set_drvdata(dev, msc); 1481 1482 return 0; 1483 } 1484 1485 static void intel_th_msc_remove(struct intel_th_device *thdev) 1486 { 1487 sysfs_remove_group(&thdev->dev.kobj, &msc_output_group); 1488 } 1489 1490 static struct intel_th_driver intel_th_msc_driver = { 1491 .probe = intel_th_msc_probe, 1492 .remove = intel_th_msc_remove, 1493 .activate = intel_th_msc_activate, 1494 .deactivate = intel_th_msc_deactivate, 1495 .fops = &intel_th_msc_fops, 1496 .driver = { 1497 .name = "msc", 1498 .owner = THIS_MODULE, 1499 }, 1500 }; 1501 1502 module_driver(intel_th_msc_driver, 1503 intel_th_driver_register, 1504 intel_th_driver_unregister); 1505 1506 MODULE_LICENSE("GPL v2"); 1507 MODULE_DESCRIPTION("Intel(R) Trace Hub Memory Storage Unit driver"); 1508 MODULE_AUTHOR("Alexander Shishkin <alexander.shishkin@linux.intel.com>"); 1509