xref: /openbmc/linux/drivers/hwtracing/intel_th/msu.c (revision 4f6cce39)
1 /*
2  * Intel(R) Trace Hub Memory Storage Unit
3  *
4  * Copyright (C) 2014-2015 Intel Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  */
15 
16 #define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt
17 
18 #include <linux/types.h>
19 #include <linux/module.h>
20 #include <linux/device.h>
21 #include <linux/uaccess.h>
22 #include <linux/sizes.h>
23 #include <linux/printk.h>
24 #include <linux/slab.h>
25 #include <linux/mm.h>
26 #include <linux/fs.h>
27 #include <linux/io.h>
28 #include <linux/dma-mapping.h>
29 
30 #include <asm/cacheflush.h>
31 
32 #include "intel_th.h"
33 #include "msu.h"
34 
35 #define msc_dev(x) (&(x)->thdev->dev)
36 
37 /**
38  * struct msc_block - multiblock mode block descriptor
39  * @bdesc:	pointer to hardware descriptor (beginning of the block)
40  * @addr:	physical address of the block
41  */
42 struct msc_block {
43 	struct msc_block_desc	*bdesc;
44 	dma_addr_t		addr;
45 };
46 
47 /**
48  * struct msc_window - multiblock mode window descriptor
49  * @entry:	window list linkage (msc::win_list)
50  * @pgoff:	page offset into the buffer that this window starts at
51  * @nr_blocks:	number of blocks (pages) in this window
52  * @block:	array of block descriptors
53  */
54 struct msc_window {
55 	struct list_head	entry;
56 	unsigned long		pgoff;
57 	unsigned int		nr_blocks;
58 	struct msc		*msc;
59 	struct msc_block	block[0];
60 };
61 
62 /**
63  * struct msc_iter - iterator for msc buffer
64  * @entry:		msc::iter_list linkage
65  * @msc:		pointer to the MSC device
66  * @start_win:		oldest window
67  * @win:		current window
68  * @offset:		current logical offset into the buffer
69  * @start_block:	oldest block in the window
70  * @block:		block number in the window
71  * @block_off:		offset into current block
72  * @wrap_count:		block wrapping handling
73  * @eof:		end of buffer reached
74  */
75 struct msc_iter {
76 	struct list_head	entry;
77 	struct msc		*msc;
78 	struct msc_window	*start_win;
79 	struct msc_window	*win;
80 	unsigned long		offset;
81 	int			start_block;
82 	int			block;
83 	unsigned int		block_off;
84 	unsigned int		wrap_count;
85 	unsigned int		eof;
86 };
87 
88 /**
89  * struct msc - MSC device representation
90  * @reg_base:		register window base address
91  * @thdev:		intel_th_device pointer
92  * @win_list:		list of windows in multiblock mode
93  * @nr_pages:		total number of pages allocated for this buffer
94  * @single_sz:		amount of data in single mode
95  * @single_wrap:	single mode wrap occurred
96  * @base:		buffer's base pointer
97  * @base_addr:		buffer's base address
98  * @user_count:		number of users of the buffer
99  * @mmap_count:		number of mappings
100  * @buf_mutex:		mutex to serialize access to buffer-related bits
101 
102  * @enabled:		MSC is enabled
103  * @wrap:		wrapping is enabled
104  * @mode:		MSC operating mode
105  * @burst_len:		write burst length
106  * @index:		number of this MSC in the MSU
107  */
108 struct msc {
109 	void __iomem		*reg_base;
110 	struct intel_th_device	*thdev;
111 
112 	struct list_head	win_list;
113 	unsigned long		nr_pages;
114 	unsigned long		single_sz;
115 	unsigned int		single_wrap : 1;
116 	void			*base;
117 	dma_addr_t		base_addr;
118 
119 	/* <0: no buffer, 0: no users, >0: active users */
120 	atomic_t		user_count;
121 
122 	atomic_t		mmap_count;
123 	struct mutex		buf_mutex;
124 
125 	struct list_head	iter_list;
126 
127 	/* config */
128 	unsigned int		enabled : 1,
129 				wrap	: 1;
130 	unsigned int		mode;
131 	unsigned int		burst_len;
132 	unsigned int		index;
133 };
134 
135 static inline bool msc_block_is_empty(struct msc_block_desc *bdesc)
136 {
137 	/* header hasn't been written */
138 	if (!bdesc->valid_dw)
139 		return true;
140 
141 	/* valid_dw includes the header */
142 	if (!msc_data_sz(bdesc))
143 		return true;
144 
145 	return false;
146 }
147 
148 /**
149  * msc_oldest_window() - locate the window with oldest data
150  * @msc:	MSC device
151  *
152  * This should only be used in multiblock mode. Caller should hold the
153  * msc::user_count reference.
154  *
155  * Return:	the oldest window with valid data
156  */
157 static struct msc_window *msc_oldest_window(struct msc *msc)
158 {
159 	struct msc_window *win;
160 	u32 reg = ioread32(msc->reg_base + REG_MSU_MSC0NWSA);
161 	unsigned long win_addr = (unsigned long)reg << PAGE_SHIFT;
162 	unsigned int found = 0;
163 
164 	if (list_empty(&msc->win_list))
165 		return NULL;
166 
167 	/*
168 	 * we might need a radix tree for this, depending on how
169 	 * many windows a typical user would allocate; ideally it's
170 	 * something like 2, in which case we're good
171 	 */
172 	list_for_each_entry(win, &msc->win_list, entry) {
173 		if (win->block[0].addr == win_addr)
174 			found++;
175 
176 		/* skip the empty ones */
177 		if (msc_block_is_empty(win->block[0].bdesc))
178 			continue;
179 
180 		if (found)
181 			return win;
182 	}
183 
184 	return list_entry(msc->win_list.next, struct msc_window, entry);
185 }
186 
187 /**
188  * msc_win_oldest_block() - locate the oldest block in a given window
189  * @win:	window to look at
190  *
191  * Return:	index of the block with the oldest data
192  */
193 static unsigned int msc_win_oldest_block(struct msc_window *win)
194 {
195 	unsigned int blk;
196 	struct msc_block_desc *bdesc = win->block[0].bdesc;
197 
198 	/* without wrapping, first block is the oldest */
199 	if (!msc_block_wrapped(bdesc))
200 		return 0;
201 
202 	/*
203 	 * with wrapping, last written block contains both the newest and the
204 	 * oldest data for this window.
205 	 */
206 	for (blk = 0; blk < win->nr_blocks; blk++) {
207 		bdesc = win->block[blk].bdesc;
208 
209 		if (msc_block_last_written(bdesc))
210 			return blk;
211 	}
212 
213 	return 0;
214 }
215 
216 /**
217  * msc_is_last_win() - check if a window is the last one for a given MSC
218  * @win:	window
219  * Return:	true if @win is the last window in MSC's multiblock buffer
220  */
221 static inline bool msc_is_last_win(struct msc_window *win)
222 {
223 	return win->entry.next == &win->msc->win_list;
224 }
225 
226 /**
227  * msc_next_window() - return next window in the multiblock buffer
228  * @win:	current window
229  *
230  * Return:	window following the current one
231  */
232 static struct msc_window *msc_next_window(struct msc_window *win)
233 {
234 	if (msc_is_last_win(win))
235 		return list_entry(win->msc->win_list.next, struct msc_window,
236 				  entry);
237 
238 	return list_entry(win->entry.next, struct msc_window, entry);
239 }
240 
241 static struct msc_block_desc *msc_iter_bdesc(struct msc_iter *iter)
242 {
243 	return iter->win->block[iter->block].bdesc;
244 }
245 
246 static void msc_iter_init(struct msc_iter *iter)
247 {
248 	memset(iter, 0, sizeof(*iter));
249 	iter->start_block = -1;
250 	iter->block = -1;
251 }
252 
253 static struct msc_iter *msc_iter_install(struct msc *msc)
254 {
255 	struct msc_iter *iter;
256 
257 	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
258 	if (!iter)
259 		return ERR_PTR(-ENOMEM);
260 
261 	mutex_lock(&msc->buf_mutex);
262 
263 	/*
264 	 * Reading and tracing are mutually exclusive; if msc is
265 	 * enabled, open() will fail; otherwise existing readers
266 	 * will prevent enabling the msc and the rest of fops don't
267 	 * need to worry about it.
268 	 */
269 	if (msc->enabled) {
270 		kfree(iter);
271 		iter = ERR_PTR(-EBUSY);
272 		goto unlock;
273 	}
274 
275 	msc_iter_init(iter);
276 	iter->msc = msc;
277 
278 	list_add_tail(&iter->entry, &msc->iter_list);
279 unlock:
280 	mutex_unlock(&msc->buf_mutex);
281 
282 	return iter;
283 }
284 
285 static void msc_iter_remove(struct msc_iter *iter, struct msc *msc)
286 {
287 	mutex_lock(&msc->buf_mutex);
288 	list_del(&iter->entry);
289 	mutex_unlock(&msc->buf_mutex);
290 
291 	kfree(iter);
292 }
293 
294 static void msc_iter_block_start(struct msc_iter *iter)
295 {
296 	if (iter->start_block != -1)
297 		return;
298 
299 	iter->start_block = msc_win_oldest_block(iter->win);
300 	iter->block = iter->start_block;
301 	iter->wrap_count = 0;
302 
303 	/*
304 	 * start with the block with oldest data; if data has wrapped
305 	 * in this window, it should be in this block
306 	 */
307 	if (msc_block_wrapped(msc_iter_bdesc(iter)))
308 		iter->wrap_count = 2;
309 
310 }
311 
312 static int msc_iter_win_start(struct msc_iter *iter, struct msc *msc)
313 {
314 	/* already started, nothing to do */
315 	if (iter->start_win)
316 		return 0;
317 
318 	iter->start_win = msc_oldest_window(msc);
319 	if (!iter->start_win)
320 		return -EINVAL;
321 
322 	iter->win = iter->start_win;
323 	iter->start_block = -1;
324 
325 	msc_iter_block_start(iter);
326 
327 	return 0;
328 }
329 
330 static int msc_iter_win_advance(struct msc_iter *iter)
331 {
332 	iter->win = msc_next_window(iter->win);
333 	iter->start_block = -1;
334 
335 	if (iter->win == iter->start_win) {
336 		iter->eof++;
337 		return 1;
338 	}
339 
340 	msc_iter_block_start(iter);
341 
342 	return 0;
343 }
344 
345 static int msc_iter_block_advance(struct msc_iter *iter)
346 {
347 	iter->block_off = 0;
348 
349 	/* wrapping */
350 	if (iter->wrap_count && iter->block == iter->start_block) {
351 		iter->wrap_count--;
352 		if (!iter->wrap_count)
353 			/* copied newest data from the wrapped block */
354 			return msc_iter_win_advance(iter);
355 	}
356 
357 	/* no wrapping, check for last written block */
358 	if (!iter->wrap_count && msc_block_last_written(msc_iter_bdesc(iter)))
359 		/* copied newest data for the window */
360 		return msc_iter_win_advance(iter);
361 
362 	/* block advance */
363 	if (++iter->block == iter->win->nr_blocks)
364 		iter->block = 0;
365 
366 	/* no wrapping, sanity check in case there is no last written block */
367 	if (!iter->wrap_count && iter->block == iter->start_block)
368 		return msc_iter_win_advance(iter);
369 
370 	return 0;
371 }
372 
373 /**
374  * msc_buffer_iterate() - go through multiblock buffer's data
375  * @iter:	iterator structure
376  * @size:	amount of data to scan
377  * @data:	callback's private data
378  * @fn:		iterator callback
379  *
380  * This will start at the window which will be written to next (containing
381  * the oldest data) and work its way to the current window, calling @fn
382  * for each chunk of data as it goes.
383  *
384  * Caller should have msc::user_count reference to make sure the buffer
385  * doesn't disappear from under us.
386  *
387  * Return:	amount of data actually scanned.
388  */
389 static ssize_t
390 msc_buffer_iterate(struct msc_iter *iter, size_t size, void *data,
391 		   unsigned long (*fn)(void *, void *, size_t))
392 {
393 	struct msc *msc = iter->msc;
394 	size_t len = size;
395 	unsigned int advance;
396 
397 	if (iter->eof)
398 		return 0;
399 
400 	/* start with the oldest window */
401 	if (msc_iter_win_start(iter, msc))
402 		return 0;
403 
404 	do {
405 		unsigned long data_bytes = msc_data_sz(msc_iter_bdesc(iter));
406 		void *src = (void *)msc_iter_bdesc(iter) + MSC_BDESC;
407 		size_t tocopy = data_bytes, copied = 0;
408 		size_t remaining = 0;
409 
410 		advance = 1;
411 
412 		/*
413 		 * If block wrapping happened, we need to visit the last block
414 		 * twice, because it contains both the oldest and the newest
415 		 * data in this window.
416 		 *
417 		 * First time (wrap_count==2), in the very beginning, to collect
418 		 * the oldest data, which is in the range
419 		 * (data_bytes..DATA_IN_PAGE).
420 		 *
421 		 * Second time (wrap_count==1), it's just like any other block,
422 		 * containing data in the range of [MSC_BDESC..data_bytes].
423 		 */
424 		if (iter->block == iter->start_block && iter->wrap_count == 2) {
425 			tocopy = DATA_IN_PAGE - data_bytes;
426 			src += data_bytes;
427 		}
428 
429 		if (!tocopy)
430 			goto next_block;
431 
432 		tocopy -= iter->block_off;
433 		src += iter->block_off;
434 
435 		if (len < tocopy) {
436 			tocopy = len;
437 			advance = 0;
438 		}
439 
440 		remaining = fn(data, src, tocopy);
441 
442 		if (remaining)
443 			advance = 0;
444 
445 		copied = tocopy - remaining;
446 		len -= copied;
447 		iter->block_off += copied;
448 		iter->offset += copied;
449 
450 		if (!advance)
451 			break;
452 
453 next_block:
454 		if (msc_iter_block_advance(iter))
455 			break;
456 
457 	} while (len);
458 
459 	return size - len;
460 }
461 
462 /**
463  * msc_buffer_clear_hw_header() - clear hw header for multiblock
464  * @msc:	MSC device
465  */
466 static void msc_buffer_clear_hw_header(struct msc *msc)
467 {
468 	struct msc_window *win;
469 
470 	list_for_each_entry(win, &msc->win_list, entry) {
471 		unsigned int blk;
472 		size_t hw_sz = sizeof(struct msc_block_desc) -
473 			offsetof(struct msc_block_desc, hw_tag);
474 
475 		for (blk = 0; blk < win->nr_blocks; blk++) {
476 			struct msc_block_desc *bdesc = win->block[blk].bdesc;
477 
478 			memset(&bdesc->hw_tag, 0, hw_sz);
479 		}
480 	}
481 }
482 
483 /**
484  * msc_configure() - set up MSC hardware
485  * @msc:	the MSC device to configure
486  *
487  * Program storage mode, wrapping, burst length and trace buffer address
488  * into a given MSC. Then, enable tracing and set msc::enabled.
489  * The latter is serialized on msc::buf_mutex, so make sure to hold it.
490  */
491 static int msc_configure(struct msc *msc)
492 {
493 	u32 reg;
494 
495 	lockdep_assert_held(&msc->buf_mutex);
496 
497 	if (msc->mode > MSC_MODE_MULTI)
498 		return -ENOTSUPP;
499 
500 	if (msc->mode == MSC_MODE_MULTI)
501 		msc_buffer_clear_hw_header(msc);
502 
503 	reg = msc->base_addr >> PAGE_SHIFT;
504 	iowrite32(reg, msc->reg_base + REG_MSU_MSC0BAR);
505 
506 	if (msc->mode == MSC_MODE_SINGLE) {
507 		reg = msc->nr_pages;
508 		iowrite32(reg, msc->reg_base + REG_MSU_MSC0SIZE);
509 	}
510 
511 	reg = ioread32(msc->reg_base + REG_MSU_MSC0CTL);
512 	reg &= ~(MSC_MODE | MSC_WRAPEN | MSC_EN | MSC_RD_HDR_OVRD);
513 
514 	reg |= MSC_EN;
515 	reg |= msc->mode << __ffs(MSC_MODE);
516 	reg |= msc->burst_len << __ffs(MSC_LEN);
517 
518 	if (msc->wrap)
519 		reg |= MSC_WRAPEN;
520 
521 	iowrite32(reg, msc->reg_base + REG_MSU_MSC0CTL);
522 
523 	msc->thdev->output.multiblock = msc->mode == MSC_MODE_MULTI;
524 	intel_th_trace_enable(msc->thdev);
525 	msc->enabled = 1;
526 
527 
528 	return 0;
529 }
530 
531 /**
532  * msc_disable() - disable MSC hardware
533  * @msc:	MSC device to disable
534  *
535  * If @msc is enabled, disable tracing on the switch and then disable MSC
536  * storage. Caller must hold msc::buf_mutex.
537  */
538 static void msc_disable(struct msc *msc)
539 {
540 	unsigned long count;
541 	u32 reg;
542 
543 	lockdep_assert_held(&msc->buf_mutex);
544 
545 	intel_th_trace_disable(msc->thdev);
546 
547 	for (reg = 0, count = MSC_PLE_WAITLOOP_DEPTH;
548 	     count && !(reg & MSCSTS_PLE); count--) {
549 		reg = ioread32(msc->reg_base + REG_MSU_MSC0STS);
550 		cpu_relax();
551 	}
552 
553 	if (!count)
554 		dev_dbg(msc_dev(msc), "timeout waiting for MSC0 PLE\n");
555 
556 	if (msc->mode == MSC_MODE_SINGLE) {
557 		msc->single_wrap = !!(reg & MSCSTS_WRAPSTAT);
558 
559 		reg = ioread32(msc->reg_base + REG_MSU_MSC0MWP);
560 		msc->single_sz = reg & ((msc->nr_pages << PAGE_SHIFT) - 1);
561 		dev_dbg(msc_dev(msc), "MSCnMWP: %08x/%08lx, wrap: %d\n",
562 			reg, msc->single_sz, msc->single_wrap);
563 	}
564 
565 	reg = ioread32(msc->reg_base + REG_MSU_MSC0CTL);
566 	reg &= ~MSC_EN;
567 	iowrite32(reg, msc->reg_base + REG_MSU_MSC0CTL);
568 	msc->enabled = 0;
569 
570 	iowrite32(0, msc->reg_base + REG_MSU_MSC0BAR);
571 	iowrite32(0, msc->reg_base + REG_MSU_MSC0SIZE);
572 
573 	dev_dbg(msc_dev(msc), "MSCnNWSA: %08x\n",
574 		ioread32(msc->reg_base + REG_MSU_MSC0NWSA));
575 
576 	reg = ioread32(msc->reg_base + REG_MSU_MSC0STS);
577 	dev_dbg(msc_dev(msc), "MSCnSTS: %08x\n", reg);
578 }
579 
580 static int intel_th_msc_activate(struct intel_th_device *thdev)
581 {
582 	struct msc *msc = dev_get_drvdata(&thdev->dev);
583 	int ret = -EBUSY;
584 
585 	if (!atomic_inc_unless_negative(&msc->user_count))
586 		return -ENODEV;
587 
588 	mutex_lock(&msc->buf_mutex);
589 
590 	/* if there are readers, refuse */
591 	if (list_empty(&msc->iter_list))
592 		ret = msc_configure(msc);
593 
594 	mutex_unlock(&msc->buf_mutex);
595 
596 	if (ret)
597 		atomic_dec(&msc->user_count);
598 
599 	return ret;
600 }
601 
602 static void intel_th_msc_deactivate(struct intel_th_device *thdev)
603 {
604 	struct msc *msc = dev_get_drvdata(&thdev->dev);
605 
606 	mutex_lock(&msc->buf_mutex);
607 	if (msc->enabled) {
608 		msc_disable(msc);
609 		atomic_dec(&msc->user_count);
610 	}
611 	mutex_unlock(&msc->buf_mutex);
612 }
613 
614 /**
615  * msc_buffer_contig_alloc() - allocate a contiguous buffer for SINGLE mode
616  * @msc:	MSC device
617  * @size:	allocation size in bytes
618  *
619  * This modifies msc::base, which requires msc::buf_mutex to serialize, so the
620  * caller is expected to hold it.
621  *
622  * Return:	0 on success, -errno otherwise.
623  */
624 static int msc_buffer_contig_alloc(struct msc *msc, unsigned long size)
625 {
626 	unsigned int order = get_order(size);
627 	struct page *page;
628 
629 	if (!size)
630 		return 0;
631 
632 	page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
633 	if (!page)
634 		return -ENOMEM;
635 
636 	split_page(page, order);
637 	msc->nr_pages = size >> PAGE_SHIFT;
638 	msc->base = page_address(page);
639 	msc->base_addr = page_to_phys(page);
640 
641 	return 0;
642 }
643 
644 /**
645  * msc_buffer_contig_free() - free a contiguous buffer
646  * @msc:	MSC configured in SINGLE mode
647  */
648 static void msc_buffer_contig_free(struct msc *msc)
649 {
650 	unsigned long off;
651 
652 	for (off = 0; off < msc->nr_pages << PAGE_SHIFT; off += PAGE_SIZE) {
653 		struct page *page = virt_to_page(msc->base + off);
654 
655 		page->mapping = NULL;
656 		__free_page(page);
657 	}
658 
659 	msc->nr_pages = 0;
660 }
661 
662 /**
663  * msc_buffer_contig_get_page() - find a page at a given offset
664  * @msc:	MSC configured in SINGLE mode
665  * @pgoff:	page offset
666  *
667  * Return:	page, if @pgoff is within the range, NULL otherwise.
668  */
669 static struct page *msc_buffer_contig_get_page(struct msc *msc,
670 					       unsigned long pgoff)
671 {
672 	if (pgoff >= msc->nr_pages)
673 		return NULL;
674 
675 	return virt_to_page(msc->base + (pgoff << PAGE_SHIFT));
676 }
677 
678 /**
679  * msc_buffer_win_alloc() - alloc a window for a multiblock mode
680  * @msc:	MSC device
681  * @nr_blocks:	number of pages in this window
682  *
683  * This modifies msc::win_list and msc::base, which requires msc::buf_mutex
684  * to serialize, so the caller is expected to hold it.
685  *
686  * Return:	0 on success, -errno otherwise.
687  */
688 static int msc_buffer_win_alloc(struct msc *msc, unsigned int nr_blocks)
689 {
690 	struct msc_window *win;
691 	unsigned long size = PAGE_SIZE;
692 	int i, ret = -ENOMEM;
693 
694 	if (!nr_blocks)
695 		return 0;
696 
697 	win = kzalloc(offsetof(struct msc_window, block[nr_blocks]),
698 		      GFP_KERNEL);
699 	if (!win)
700 		return -ENOMEM;
701 
702 	if (!list_empty(&msc->win_list)) {
703 		struct msc_window *prev = list_entry(msc->win_list.prev,
704 						     struct msc_window, entry);
705 
706 		win->pgoff = prev->pgoff + prev->nr_blocks;
707 	}
708 
709 	for (i = 0; i < nr_blocks; i++) {
710 		win->block[i].bdesc = dma_alloc_coherent(msc_dev(msc), size,
711 							 &win->block[i].addr,
712 							 GFP_KERNEL);
713 
714 #ifdef CONFIG_X86
715 		/* Set the page as uncached */
716 		set_memory_uc((unsigned long)win->block[i].bdesc, 1);
717 #endif
718 
719 		if (!win->block[i].bdesc)
720 			goto err_nomem;
721 	}
722 
723 	win->msc = msc;
724 	win->nr_blocks = nr_blocks;
725 
726 	if (list_empty(&msc->win_list)) {
727 		msc->base = win->block[0].bdesc;
728 		msc->base_addr = win->block[0].addr;
729 	}
730 
731 	list_add_tail(&win->entry, &msc->win_list);
732 	msc->nr_pages += nr_blocks;
733 
734 	return 0;
735 
736 err_nomem:
737 	for (i--; i >= 0; i--) {
738 #ifdef CONFIG_X86
739 		/* Reset the page to write-back before releasing */
740 		set_memory_wb((unsigned long)win->block[i].bdesc, 1);
741 #endif
742 		dma_free_coherent(msc_dev(msc), size, win->block[i].bdesc,
743 				  win->block[i].addr);
744 	}
745 	kfree(win);
746 
747 	return ret;
748 }
749 
750 /**
751  * msc_buffer_win_free() - free a window from MSC's window list
752  * @msc:	MSC device
753  * @win:	window to free
754  *
755  * This modifies msc::win_list and msc::base, which requires msc::buf_mutex
756  * to serialize, so the caller is expected to hold it.
757  */
758 static void msc_buffer_win_free(struct msc *msc, struct msc_window *win)
759 {
760 	int i;
761 
762 	msc->nr_pages -= win->nr_blocks;
763 
764 	list_del(&win->entry);
765 	if (list_empty(&msc->win_list)) {
766 		msc->base = NULL;
767 		msc->base_addr = 0;
768 	}
769 
770 	for (i = 0; i < win->nr_blocks; i++) {
771 		struct page *page = virt_to_page(win->block[i].bdesc);
772 
773 		page->mapping = NULL;
774 #ifdef CONFIG_X86
775 		/* Reset the page to write-back before releasing */
776 		set_memory_wb((unsigned long)win->block[i].bdesc, 1);
777 #endif
778 		dma_free_coherent(msc_dev(win->msc), PAGE_SIZE,
779 				  win->block[i].bdesc, win->block[i].addr);
780 	}
781 
782 	kfree(win);
783 }
784 
785 /**
786  * msc_buffer_relink() - set up block descriptors for multiblock mode
787  * @msc:	MSC device
788  *
789  * This traverses msc::win_list, which requires msc::buf_mutex to serialize,
790  * so the caller is expected to hold it.
791  */
792 static void msc_buffer_relink(struct msc *msc)
793 {
794 	struct msc_window *win, *next_win;
795 
796 	/* call with msc::mutex locked */
797 	list_for_each_entry(win, &msc->win_list, entry) {
798 		unsigned int blk;
799 		u32 sw_tag = 0;
800 
801 		/*
802 		 * Last window's next_win should point to the first window
803 		 * and MSC_SW_TAG_LASTWIN should be set.
804 		 */
805 		if (msc_is_last_win(win)) {
806 			sw_tag |= MSC_SW_TAG_LASTWIN;
807 			next_win = list_entry(msc->win_list.next,
808 					      struct msc_window, entry);
809 		} else {
810 			next_win = list_entry(win->entry.next,
811 					      struct msc_window, entry);
812 		}
813 
814 		for (blk = 0; blk < win->nr_blocks; blk++) {
815 			struct msc_block_desc *bdesc = win->block[blk].bdesc;
816 
817 			memset(bdesc, 0, sizeof(*bdesc));
818 
819 			bdesc->next_win = next_win->block[0].addr >> PAGE_SHIFT;
820 
821 			/*
822 			 * Similarly to last window, last block should point
823 			 * to the first one.
824 			 */
825 			if (blk == win->nr_blocks - 1) {
826 				sw_tag |= MSC_SW_TAG_LASTBLK;
827 				bdesc->next_blk =
828 					win->block[0].addr >> PAGE_SHIFT;
829 			} else {
830 				bdesc->next_blk =
831 					win->block[blk + 1].addr >> PAGE_SHIFT;
832 			}
833 
834 			bdesc->sw_tag = sw_tag;
835 			bdesc->block_sz = PAGE_SIZE / 64;
836 		}
837 	}
838 
839 	/*
840 	 * Make the above writes globally visible before tracing is
841 	 * enabled to make sure hardware sees them coherently.
842 	 */
843 	wmb();
844 }
845 
846 static void msc_buffer_multi_free(struct msc *msc)
847 {
848 	struct msc_window *win, *iter;
849 
850 	list_for_each_entry_safe(win, iter, &msc->win_list, entry)
851 		msc_buffer_win_free(msc, win);
852 }
853 
854 static int msc_buffer_multi_alloc(struct msc *msc, unsigned long *nr_pages,
855 				  unsigned int nr_wins)
856 {
857 	int ret, i;
858 
859 	for (i = 0; i < nr_wins; i++) {
860 		ret = msc_buffer_win_alloc(msc, nr_pages[i]);
861 		if (ret) {
862 			msc_buffer_multi_free(msc);
863 			return ret;
864 		}
865 	}
866 
867 	msc_buffer_relink(msc);
868 
869 	return 0;
870 }
871 
872 /**
873  * msc_buffer_free() - free buffers for MSC
874  * @msc:	MSC device
875  *
876  * Free MSC's storage buffers.
877  *
878  * This modifies msc::win_list and msc::base, which requires msc::buf_mutex to
879  * serialize, so the caller is expected to hold it.
880  */
881 static void msc_buffer_free(struct msc *msc)
882 {
883 	if (msc->mode == MSC_MODE_SINGLE)
884 		msc_buffer_contig_free(msc);
885 	else if (msc->mode == MSC_MODE_MULTI)
886 		msc_buffer_multi_free(msc);
887 }
888 
889 /**
890  * msc_buffer_alloc() - allocate a buffer for MSC
891  * @msc:	MSC device
892  * @size:	allocation size in bytes
893  *
894  * Allocate a storage buffer for MSC, depending on the msc::mode, it will be
895  * either done via msc_buffer_contig_alloc() for SINGLE operation mode or
896  * msc_buffer_win_alloc() for multiblock operation. The latter allocates one
897  * window per invocation, so in multiblock mode this can be called multiple
898  * times for the same MSC to allocate multiple windows.
899  *
900  * This modifies msc::win_list and msc::base, which requires msc::buf_mutex
901  * to serialize, so the caller is expected to hold it.
902  *
903  * Return:	0 on success, -errno otherwise.
904  */
905 static int msc_buffer_alloc(struct msc *msc, unsigned long *nr_pages,
906 			    unsigned int nr_wins)
907 {
908 	int ret;
909 
910 	/* -1: buffer not allocated */
911 	if (atomic_read(&msc->user_count) != -1)
912 		return -EBUSY;
913 
914 	if (msc->mode == MSC_MODE_SINGLE) {
915 		if (nr_wins != 1)
916 			return -EINVAL;
917 
918 		ret = msc_buffer_contig_alloc(msc, nr_pages[0] << PAGE_SHIFT);
919 	} else if (msc->mode == MSC_MODE_MULTI) {
920 		ret = msc_buffer_multi_alloc(msc, nr_pages, nr_wins);
921 	} else {
922 		ret = -ENOTSUPP;
923 	}
924 
925 	if (!ret) {
926 		/* allocation should be visible before the counter goes to 0 */
927 		smp_mb__before_atomic();
928 
929 		if (WARN_ON_ONCE(atomic_cmpxchg(&msc->user_count, -1, 0) != -1))
930 			return -EINVAL;
931 	}
932 
933 	return ret;
934 }
935 
936 /**
937  * msc_buffer_unlocked_free_unless_used() - free a buffer unless it's in use
938  * @msc:	MSC device
939  *
940  * This will free MSC buffer unless it is in use or there is no allocated
941  * buffer.
942  * Caller needs to hold msc::buf_mutex.
943  *
944  * Return:	0 on successful deallocation or if there was no buffer to
945  *		deallocate, -EBUSY if there are active users.
946  */
947 static int msc_buffer_unlocked_free_unless_used(struct msc *msc)
948 {
949 	int count, ret = 0;
950 
951 	count = atomic_cmpxchg(&msc->user_count, 0, -1);
952 
953 	/* > 0: buffer is allocated and has users */
954 	if (count > 0)
955 		ret = -EBUSY;
956 	/* 0: buffer is allocated, no users */
957 	else if (!count)
958 		msc_buffer_free(msc);
959 	/* < 0: no buffer, nothing to do */
960 
961 	return ret;
962 }
963 
964 /**
965  * msc_buffer_free_unless_used() - free a buffer unless it's in use
966  * @msc:	MSC device
967  *
968  * This is a locked version of msc_buffer_unlocked_free_unless_used().
969  */
970 static int msc_buffer_free_unless_used(struct msc *msc)
971 {
972 	int ret;
973 
974 	mutex_lock(&msc->buf_mutex);
975 	ret = msc_buffer_unlocked_free_unless_used(msc);
976 	mutex_unlock(&msc->buf_mutex);
977 
978 	return ret;
979 }
980 
981 /**
982  * msc_buffer_get_page() - get MSC buffer page at a given offset
983  * @msc:	MSC device
984  * @pgoff:	page offset into the storage buffer
985  *
986  * This traverses msc::win_list, so holding msc::buf_mutex is expected from
987  * the caller.
988  *
989  * Return:	page if @pgoff corresponds to a valid buffer page or NULL.
990  */
991 static struct page *msc_buffer_get_page(struct msc *msc, unsigned long pgoff)
992 {
993 	struct msc_window *win;
994 
995 	if (msc->mode == MSC_MODE_SINGLE)
996 		return msc_buffer_contig_get_page(msc, pgoff);
997 
998 	list_for_each_entry(win, &msc->win_list, entry)
999 		if (pgoff >= win->pgoff && pgoff < win->pgoff + win->nr_blocks)
1000 			goto found;
1001 
1002 	return NULL;
1003 
1004 found:
1005 	pgoff -= win->pgoff;
1006 	return virt_to_page(win->block[pgoff].bdesc);
1007 }
1008 
1009 /**
1010  * struct msc_win_to_user_struct - data for copy_to_user() callback
1011  * @buf:	userspace buffer to copy data to
1012  * @offset:	running offset
1013  */
1014 struct msc_win_to_user_struct {
1015 	char __user	*buf;
1016 	unsigned long	offset;
1017 };
1018 
1019 /**
1020  * msc_win_to_user() - iterator for msc_buffer_iterate() to copy data to user
1021  * @data:	callback's private data
1022  * @src:	source buffer
1023  * @len:	amount of data to copy from the source buffer
1024  */
1025 static unsigned long msc_win_to_user(void *data, void *src, size_t len)
1026 {
1027 	struct msc_win_to_user_struct *u = data;
1028 	unsigned long ret;
1029 
1030 	ret = copy_to_user(u->buf + u->offset, src, len);
1031 	u->offset += len - ret;
1032 
1033 	return ret;
1034 }
1035 
1036 
1037 /*
1038  * file operations' callbacks
1039  */
1040 
1041 static int intel_th_msc_open(struct inode *inode, struct file *file)
1042 {
1043 	struct intel_th_device *thdev = file->private_data;
1044 	struct msc *msc = dev_get_drvdata(&thdev->dev);
1045 	struct msc_iter *iter;
1046 
1047 	if (!capable(CAP_SYS_RAWIO))
1048 		return -EPERM;
1049 
1050 	iter = msc_iter_install(msc);
1051 	if (IS_ERR(iter))
1052 		return PTR_ERR(iter);
1053 
1054 	file->private_data = iter;
1055 
1056 	return nonseekable_open(inode, file);
1057 }
1058 
1059 static int intel_th_msc_release(struct inode *inode, struct file *file)
1060 {
1061 	struct msc_iter *iter = file->private_data;
1062 	struct msc *msc = iter->msc;
1063 
1064 	msc_iter_remove(iter, msc);
1065 
1066 	return 0;
1067 }
1068 
1069 static ssize_t
1070 msc_single_to_user(struct msc *msc, char __user *buf, loff_t off, size_t len)
1071 {
1072 	unsigned long size = msc->nr_pages << PAGE_SHIFT, rem = len;
1073 	unsigned long start = off, tocopy = 0;
1074 
1075 	if (msc->single_wrap) {
1076 		start += msc->single_sz;
1077 		if (start < size) {
1078 			tocopy = min(rem, size - start);
1079 			if (copy_to_user(buf, msc->base + start, tocopy))
1080 				return -EFAULT;
1081 
1082 			buf += tocopy;
1083 			rem -= tocopy;
1084 			start += tocopy;
1085 		}
1086 
1087 		start &= size - 1;
1088 		if (rem) {
1089 			tocopy = min(rem, msc->single_sz - start);
1090 			if (copy_to_user(buf, msc->base + start, tocopy))
1091 				return -EFAULT;
1092 
1093 			rem -= tocopy;
1094 		}
1095 
1096 		return len - rem;
1097 	}
1098 
1099 	if (copy_to_user(buf, msc->base + start, rem))
1100 		return -EFAULT;
1101 
1102 	return len;
1103 }
1104 
1105 static ssize_t intel_th_msc_read(struct file *file, char __user *buf,
1106 				 size_t len, loff_t *ppos)
1107 {
1108 	struct msc_iter *iter = file->private_data;
1109 	struct msc *msc = iter->msc;
1110 	size_t size;
1111 	loff_t off = *ppos;
1112 	ssize_t ret = 0;
1113 
1114 	if (!atomic_inc_unless_negative(&msc->user_count))
1115 		return 0;
1116 
1117 	if (msc->mode == MSC_MODE_SINGLE && !msc->single_wrap)
1118 		size = msc->single_sz;
1119 	else
1120 		size = msc->nr_pages << PAGE_SHIFT;
1121 
1122 	if (!size)
1123 		goto put_count;
1124 
1125 	if (off >= size)
1126 		goto put_count;
1127 
1128 	if (off + len >= size)
1129 		len = size - off;
1130 
1131 	if (msc->mode == MSC_MODE_SINGLE) {
1132 		ret = msc_single_to_user(msc, buf, off, len);
1133 		if (ret >= 0)
1134 			*ppos += ret;
1135 	} else if (msc->mode == MSC_MODE_MULTI) {
1136 		struct msc_win_to_user_struct u = {
1137 			.buf	= buf,
1138 			.offset	= 0,
1139 		};
1140 
1141 		ret = msc_buffer_iterate(iter, len, &u, msc_win_to_user);
1142 		if (ret >= 0)
1143 			*ppos = iter->offset;
1144 	} else {
1145 		ret = -ENOTSUPP;
1146 	}
1147 
1148 put_count:
1149 	atomic_dec(&msc->user_count);
1150 
1151 	return ret;
1152 }
1153 
1154 /*
1155  * vm operations callbacks (vm_ops)
1156  */
1157 
1158 static void msc_mmap_open(struct vm_area_struct *vma)
1159 {
1160 	struct msc_iter *iter = vma->vm_file->private_data;
1161 	struct msc *msc = iter->msc;
1162 
1163 	atomic_inc(&msc->mmap_count);
1164 }
1165 
1166 static void msc_mmap_close(struct vm_area_struct *vma)
1167 {
1168 	struct msc_iter *iter = vma->vm_file->private_data;
1169 	struct msc *msc = iter->msc;
1170 	unsigned long pg;
1171 
1172 	if (!atomic_dec_and_mutex_lock(&msc->mmap_count, &msc->buf_mutex))
1173 		return;
1174 
1175 	/* drop page _refcounts */
1176 	for (pg = 0; pg < msc->nr_pages; pg++) {
1177 		struct page *page = msc_buffer_get_page(msc, pg);
1178 
1179 		if (WARN_ON_ONCE(!page))
1180 			continue;
1181 
1182 		if (page->mapping)
1183 			page->mapping = NULL;
1184 	}
1185 
1186 	/* last mapping -- drop user_count */
1187 	atomic_dec(&msc->user_count);
1188 	mutex_unlock(&msc->buf_mutex);
1189 }
1190 
1191 static int msc_mmap_fault(struct vm_fault *vmf)
1192 {
1193 	struct msc_iter *iter = vmf->vma->vm_file->private_data;
1194 	struct msc *msc = iter->msc;
1195 
1196 	vmf->page = msc_buffer_get_page(msc, vmf->pgoff);
1197 	if (!vmf->page)
1198 		return VM_FAULT_SIGBUS;
1199 
1200 	get_page(vmf->page);
1201 	vmf->page->mapping = vmf->vma->vm_file->f_mapping;
1202 	vmf->page->index = vmf->pgoff;
1203 
1204 	return 0;
1205 }
1206 
1207 static const struct vm_operations_struct msc_mmap_ops = {
1208 	.open	= msc_mmap_open,
1209 	.close	= msc_mmap_close,
1210 	.fault	= msc_mmap_fault,
1211 };
1212 
1213 static int intel_th_msc_mmap(struct file *file, struct vm_area_struct *vma)
1214 {
1215 	unsigned long size = vma->vm_end - vma->vm_start;
1216 	struct msc_iter *iter = vma->vm_file->private_data;
1217 	struct msc *msc = iter->msc;
1218 	int ret = -EINVAL;
1219 
1220 	if (!size || offset_in_page(size))
1221 		return -EINVAL;
1222 
1223 	if (vma->vm_pgoff)
1224 		return -EINVAL;
1225 
1226 	/* grab user_count once per mmap; drop in msc_mmap_close() */
1227 	if (!atomic_inc_unless_negative(&msc->user_count))
1228 		return -EINVAL;
1229 
1230 	if (msc->mode != MSC_MODE_SINGLE &&
1231 	    msc->mode != MSC_MODE_MULTI)
1232 		goto out;
1233 
1234 	if (size >> PAGE_SHIFT != msc->nr_pages)
1235 		goto out;
1236 
1237 	atomic_set(&msc->mmap_count, 1);
1238 	ret = 0;
1239 
1240 out:
1241 	if (ret)
1242 		atomic_dec(&msc->user_count);
1243 
1244 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1245 	vma->vm_flags |= VM_DONTEXPAND | VM_DONTCOPY;
1246 	vma->vm_ops = &msc_mmap_ops;
1247 	return ret;
1248 }
1249 
1250 static const struct file_operations intel_th_msc_fops = {
1251 	.open		= intel_th_msc_open,
1252 	.release	= intel_th_msc_release,
1253 	.read		= intel_th_msc_read,
1254 	.mmap		= intel_th_msc_mmap,
1255 	.llseek		= no_llseek,
1256 	.owner		= THIS_MODULE,
1257 };
1258 
1259 static int intel_th_msc_init(struct msc *msc)
1260 {
1261 	atomic_set(&msc->user_count, -1);
1262 
1263 	msc->mode = MSC_MODE_MULTI;
1264 	mutex_init(&msc->buf_mutex);
1265 	INIT_LIST_HEAD(&msc->win_list);
1266 	INIT_LIST_HEAD(&msc->iter_list);
1267 
1268 	msc->burst_len =
1269 		(ioread32(msc->reg_base + REG_MSU_MSC0CTL) & MSC_LEN) >>
1270 		__ffs(MSC_LEN);
1271 
1272 	return 0;
1273 }
1274 
1275 static const char * const msc_mode[] = {
1276 	[MSC_MODE_SINGLE]	= "single",
1277 	[MSC_MODE_MULTI]	= "multi",
1278 	[MSC_MODE_EXI]		= "ExI",
1279 	[MSC_MODE_DEBUG]	= "debug",
1280 };
1281 
1282 static ssize_t
1283 wrap_show(struct device *dev, struct device_attribute *attr, char *buf)
1284 {
1285 	struct msc *msc = dev_get_drvdata(dev);
1286 
1287 	return scnprintf(buf, PAGE_SIZE, "%d\n", msc->wrap);
1288 }
1289 
1290 static ssize_t
1291 wrap_store(struct device *dev, struct device_attribute *attr, const char *buf,
1292 	   size_t size)
1293 {
1294 	struct msc *msc = dev_get_drvdata(dev);
1295 	unsigned long val;
1296 	int ret;
1297 
1298 	ret = kstrtoul(buf, 10, &val);
1299 	if (ret)
1300 		return ret;
1301 
1302 	msc->wrap = !!val;
1303 
1304 	return size;
1305 }
1306 
1307 static DEVICE_ATTR_RW(wrap);
1308 
1309 static ssize_t
1310 mode_show(struct device *dev, struct device_attribute *attr, char *buf)
1311 {
1312 	struct msc *msc = dev_get_drvdata(dev);
1313 
1314 	return scnprintf(buf, PAGE_SIZE, "%s\n", msc_mode[msc->mode]);
1315 }
1316 
1317 static ssize_t
1318 mode_store(struct device *dev, struct device_attribute *attr, const char *buf,
1319 	   size_t size)
1320 {
1321 	struct msc *msc = dev_get_drvdata(dev);
1322 	size_t len = size;
1323 	char *cp;
1324 	int i, ret;
1325 
1326 	if (!capable(CAP_SYS_RAWIO))
1327 		return -EPERM;
1328 
1329 	cp = memchr(buf, '\n', len);
1330 	if (cp)
1331 		len = cp - buf;
1332 
1333 	for (i = 0; i < ARRAY_SIZE(msc_mode); i++)
1334 		if (!strncmp(msc_mode[i], buf, len))
1335 			goto found;
1336 
1337 	return -EINVAL;
1338 
1339 found:
1340 	mutex_lock(&msc->buf_mutex);
1341 	ret = msc_buffer_unlocked_free_unless_used(msc);
1342 	if (!ret)
1343 		msc->mode = i;
1344 	mutex_unlock(&msc->buf_mutex);
1345 
1346 	return ret ? ret : size;
1347 }
1348 
1349 static DEVICE_ATTR_RW(mode);
1350 
1351 static ssize_t
1352 nr_pages_show(struct device *dev, struct device_attribute *attr, char *buf)
1353 {
1354 	struct msc *msc = dev_get_drvdata(dev);
1355 	struct msc_window *win;
1356 	size_t count = 0;
1357 
1358 	mutex_lock(&msc->buf_mutex);
1359 
1360 	if (msc->mode == MSC_MODE_SINGLE)
1361 		count = scnprintf(buf, PAGE_SIZE, "%ld\n", msc->nr_pages);
1362 	else if (msc->mode == MSC_MODE_MULTI) {
1363 		list_for_each_entry(win, &msc->win_list, entry) {
1364 			count += scnprintf(buf + count, PAGE_SIZE - count,
1365 					   "%d%c", win->nr_blocks,
1366 					   msc_is_last_win(win) ? '\n' : ',');
1367 		}
1368 	} else {
1369 		count = scnprintf(buf, PAGE_SIZE, "unsupported\n");
1370 	}
1371 
1372 	mutex_unlock(&msc->buf_mutex);
1373 
1374 	return count;
1375 }
1376 
1377 static ssize_t
1378 nr_pages_store(struct device *dev, struct device_attribute *attr,
1379 	       const char *buf, size_t size)
1380 {
1381 	struct msc *msc = dev_get_drvdata(dev);
1382 	unsigned long val, *win = NULL, *rewin;
1383 	size_t len = size;
1384 	const char *p = buf;
1385 	char *end, *s;
1386 	int ret, nr_wins = 0;
1387 
1388 	if (!capable(CAP_SYS_RAWIO))
1389 		return -EPERM;
1390 
1391 	ret = msc_buffer_free_unless_used(msc);
1392 	if (ret)
1393 		return ret;
1394 
1395 	/* scan the comma-separated list of allocation sizes */
1396 	end = memchr(buf, '\n', len);
1397 	if (end)
1398 		len = end - buf;
1399 
1400 	do {
1401 		end = memchr(p, ',', len);
1402 		s = kstrndup(p, end ? end - p : len, GFP_KERNEL);
1403 		if (!s) {
1404 			ret = -ENOMEM;
1405 			goto free_win;
1406 		}
1407 
1408 		ret = kstrtoul(s, 10, &val);
1409 		kfree(s);
1410 
1411 		if (ret || !val)
1412 			goto free_win;
1413 
1414 		if (nr_wins && msc->mode == MSC_MODE_SINGLE) {
1415 			ret = -EINVAL;
1416 			goto free_win;
1417 		}
1418 
1419 		nr_wins++;
1420 		rewin = krealloc(win, sizeof(*win) * nr_wins, GFP_KERNEL);
1421 		if (!rewin) {
1422 			kfree(win);
1423 			return -ENOMEM;
1424 		}
1425 
1426 		win = rewin;
1427 		win[nr_wins - 1] = val;
1428 
1429 		if (!end)
1430 			break;
1431 
1432 		len -= end - p;
1433 		p = end + 1;
1434 	} while (len);
1435 
1436 	mutex_lock(&msc->buf_mutex);
1437 	ret = msc_buffer_alloc(msc, win, nr_wins);
1438 	mutex_unlock(&msc->buf_mutex);
1439 
1440 free_win:
1441 	kfree(win);
1442 
1443 	return ret ? ret : size;
1444 }
1445 
1446 static DEVICE_ATTR_RW(nr_pages);
1447 
1448 static struct attribute *msc_output_attrs[] = {
1449 	&dev_attr_wrap.attr,
1450 	&dev_attr_mode.attr,
1451 	&dev_attr_nr_pages.attr,
1452 	NULL,
1453 };
1454 
1455 static struct attribute_group msc_output_group = {
1456 	.attrs	= msc_output_attrs,
1457 };
1458 
1459 static int intel_th_msc_probe(struct intel_th_device *thdev)
1460 {
1461 	struct device *dev = &thdev->dev;
1462 	struct resource *res;
1463 	struct msc *msc;
1464 	void __iomem *base;
1465 	int err;
1466 
1467 	res = intel_th_device_get_resource(thdev, IORESOURCE_MEM, 0);
1468 	if (!res)
1469 		return -ENODEV;
1470 
1471 	base = devm_ioremap(dev, res->start, resource_size(res));
1472 	if (!base)
1473 		return -ENOMEM;
1474 
1475 	msc = devm_kzalloc(dev, sizeof(*msc), GFP_KERNEL);
1476 	if (!msc)
1477 		return -ENOMEM;
1478 
1479 	msc->index = thdev->id;
1480 
1481 	msc->thdev = thdev;
1482 	msc->reg_base = base + msc->index * 0x100;
1483 
1484 	err = intel_th_msc_init(msc);
1485 	if (err)
1486 		return err;
1487 
1488 	dev_set_drvdata(dev, msc);
1489 
1490 	return 0;
1491 }
1492 
1493 static void intel_th_msc_remove(struct intel_th_device *thdev)
1494 {
1495 	struct msc *msc = dev_get_drvdata(&thdev->dev);
1496 	int ret;
1497 
1498 	intel_th_msc_deactivate(thdev);
1499 
1500 	/*
1501 	 * Buffers should not be used at this point except if the
1502 	 * output character device is still open and the parent
1503 	 * device gets detached from its bus, which is a FIXME.
1504 	 */
1505 	ret = msc_buffer_free_unless_used(msc);
1506 	WARN_ON_ONCE(ret);
1507 }
1508 
1509 static struct intel_th_driver intel_th_msc_driver = {
1510 	.probe	= intel_th_msc_probe,
1511 	.remove	= intel_th_msc_remove,
1512 	.activate	= intel_th_msc_activate,
1513 	.deactivate	= intel_th_msc_deactivate,
1514 	.fops	= &intel_th_msc_fops,
1515 	.attr_group	= &msc_output_group,
1516 	.driver	= {
1517 		.name	= "msc",
1518 		.owner	= THIS_MODULE,
1519 	},
1520 };
1521 
1522 module_driver(intel_th_msc_driver,
1523 	      intel_th_driver_register,
1524 	      intel_th_driver_unregister);
1525 
1526 MODULE_LICENSE("GPL v2");
1527 MODULE_DESCRIPTION("Intel(R) Trace Hub Memory Storage Unit driver");
1528 MODULE_AUTHOR("Alexander Shishkin <alexander.shishkin@linux.intel.com>");
1529