xref: /openbmc/linux/drivers/hwtracing/intel_th/msu.c (revision addee42a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Intel(R) Trace Hub Memory Storage Unit
4  *
5  * Copyright (C) 2014-2015 Intel Corporation.
6  */
7 
8 #define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt
9 
10 #include <linux/types.h>
11 #include <linux/module.h>
12 #include <linux/device.h>
13 #include <linux/uaccess.h>
14 #include <linux/sizes.h>
15 #include <linux/printk.h>
16 #include <linux/slab.h>
17 #include <linux/mm.h>
18 #include <linux/fs.h>
19 #include <linux/io.h>
20 #include <linux/dma-mapping.h>
21 
22 #ifdef CONFIG_X86
23 #include <asm/set_memory.h>
24 #endif
25 
26 #include "intel_th.h"
27 #include "msu.h"
28 
29 #define msc_dev(x) (&(x)->thdev->dev)
30 
31 /**
32  * struct msc_block - multiblock mode block descriptor
33  * @bdesc:	pointer to hardware descriptor (beginning of the block)
34  * @addr:	physical address of the block
35  */
36 struct msc_block {
37 	struct msc_block_desc	*bdesc;
38 	dma_addr_t		addr;
39 };
40 
41 /**
42  * struct msc_window - multiblock mode window descriptor
43  * @entry:	window list linkage (msc::win_list)
44  * @pgoff:	page offset into the buffer that this window starts at
45  * @nr_blocks:	number of blocks (pages) in this window
46  * @block:	array of block descriptors
47  */
48 struct msc_window {
49 	struct list_head	entry;
50 	unsigned long		pgoff;
51 	unsigned int		nr_blocks;
52 	struct msc		*msc;
53 	struct msc_block	block[0];
54 };
55 
56 /**
57  * struct msc_iter - iterator for msc buffer
58  * @entry:		msc::iter_list linkage
59  * @msc:		pointer to the MSC device
60  * @start_win:		oldest window
61  * @win:		current window
62  * @offset:		current logical offset into the buffer
63  * @start_block:	oldest block in the window
64  * @block:		block number in the window
65  * @block_off:		offset into current block
66  * @wrap_count:		block wrapping handling
67  * @eof:		end of buffer reached
68  */
69 struct msc_iter {
70 	struct list_head	entry;
71 	struct msc		*msc;
72 	struct msc_window	*start_win;
73 	struct msc_window	*win;
74 	unsigned long		offset;
75 	int			start_block;
76 	int			block;
77 	unsigned int		block_off;
78 	unsigned int		wrap_count;
79 	unsigned int		eof;
80 };
81 
82 /**
83  * struct msc - MSC device representation
84  * @reg_base:		register window base address
85  * @thdev:		intel_th_device pointer
86  * @win_list:		list of windows in multiblock mode
87  * @nr_pages:		total number of pages allocated for this buffer
88  * @single_sz:		amount of data in single mode
89  * @single_wrap:	single mode wrap occurred
90  * @base:		buffer's base pointer
91  * @base_addr:		buffer's base address
92  * @user_count:		number of users of the buffer
93  * @mmap_count:		number of mappings
94  * @buf_mutex:		mutex to serialize access to buffer-related bits
95 
96  * @enabled:		MSC is enabled
97  * @wrap:		wrapping is enabled
98  * @mode:		MSC operating mode
99  * @burst_len:		write burst length
100  * @index:		number of this MSC in the MSU
101  */
102 struct msc {
103 	void __iomem		*reg_base;
104 	struct intel_th_device	*thdev;
105 
106 	struct list_head	win_list;
107 	unsigned long		nr_pages;
108 	unsigned long		single_sz;
109 	unsigned int		single_wrap : 1;
110 	void			*base;
111 	dma_addr_t		base_addr;
112 
113 	/* <0: no buffer, 0: no users, >0: active users */
114 	atomic_t		user_count;
115 
116 	atomic_t		mmap_count;
117 	struct mutex		buf_mutex;
118 
119 	struct list_head	iter_list;
120 
121 	/* config */
122 	unsigned int		enabled : 1,
123 				wrap	: 1;
124 	unsigned int		mode;
125 	unsigned int		burst_len;
126 	unsigned int		index;
127 };
128 
129 static inline bool msc_block_is_empty(struct msc_block_desc *bdesc)
130 {
131 	/* header hasn't been written */
132 	if (!bdesc->valid_dw)
133 		return true;
134 
135 	/* valid_dw includes the header */
136 	if (!msc_data_sz(bdesc))
137 		return true;
138 
139 	return false;
140 }
141 
142 /**
143  * msc_oldest_window() - locate the window with oldest data
144  * @msc:	MSC device
145  *
146  * This should only be used in multiblock mode. Caller should hold the
147  * msc::user_count reference.
148  *
149  * Return:	the oldest window with valid data
150  */
151 static struct msc_window *msc_oldest_window(struct msc *msc)
152 {
153 	struct msc_window *win;
154 	u32 reg = ioread32(msc->reg_base + REG_MSU_MSC0NWSA);
155 	unsigned long win_addr = (unsigned long)reg << PAGE_SHIFT;
156 	unsigned int found = 0;
157 
158 	if (list_empty(&msc->win_list))
159 		return NULL;
160 
161 	/*
162 	 * we might need a radix tree for this, depending on how
163 	 * many windows a typical user would allocate; ideally it's
164 	 * something like 2, in which case we're good
165 	 */
166 	list_for_each_entry(win, &msc->win_list, entry) {
167 		if (win->block[0].addr == win_addr)
168 			found++;
169 
170 		/* skip the empty ones */
171 		if (msc_block_is_empty(win->block[0].bdesc))
172 			continue;
173 
174 		if (found)
175 			return win;
176 	}
177 
178 	return list_entry(msc->win_list.next, struct msc_window, entry);
179 }
180 
181 /**
182  * msc_win_oldest_block() - locate the oldest block in a given window
183  * @win:	window to look at
184  *
185  * Return:	index of the block with the oldest data
186  */
187 static unsigned int msc_win_oldest_block(struct msc_window *win)
188 {
189 	unsigned int blk;
190 	struct msc_block_desc *bdesc = win->block[0].bdesc;
191 
192 	/* without wrapping, first block is the oldest */
193 	if (!msc_block_wrapped(bdesc))
194 		return 0;
195 
196 	/*
197 	 * with wrapping, last written block contains both the newest and the
198 	 * oldest data for this window.
199 	 */
200 	for (blk = 0; blk < win->nr_blocks; blk++) {
201 		bdesc = win->block[blk].bdesc;
202 
203 		if (msc_block_last_written(bdesc))
204 			return blk;
205 	}
206 
207 	return 0;
208 }
209 
210 /**
211  * msc_is_last_win() - check if a window is the last one for a given MSC
212  * @win:	window
213  * Return:	true if @win is the last window in MSC's multiblock buffer
214  */
215 static inline bool msc_is_last_win(struct msc_window *win)
216 {
217 	return win->entry.next == &win->msc->win_list;
218 }
219 
220 /**
221  * msc_next_window() - return next window in the multiblock buffer
222  * @win:	current window
223  *
224  * Return:	window following the current one
225  */
226 static struct msc_window *msc_next_window(struct msc_window *win)
227 {
228 	if (msc_is_last_win(win))
229 		return list_entry(win->msc->win_list.next, struct msc_window,
230 				  entry);
231 
232 	return list_entry(win->entry.next, struct msc_window, entry);
233 }
234 
235 static struct msc_block_desc *msc_iter_bdesc(struct msc_iter *iter)
236 {
237 	return iter->win->block[iter->block].bdesc;
238 }
239 
240 static void msc_iter_init(struct msc_iter *iter)
241 {
242 	memset(iter, 0, sizeof(*iter));
243 	iter->start_block = -1;
244 	iter->block = -1;
245 }
246 
247 static struct msc_iter *msc_iter_install(struct msc *msc)
248 {
249 	struct msc_iter *iter;
250 
251 	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
252 	if (!iter)
253 		return ERR_PTR(-ENOMEM);
254 
255 	mutex_lock(&msc->buf_mutex);
256 
257 	/*
258 	 * Reading and tracing are mutually exclusive; if msc is
259 	 * enabled, open() will fail; otherwise existing readers
260 	 * will prevent enabling the msc and the rest of fops don't
261 	 * need to worry about it.
262 	 */
263 	if (msc->enabled) {
264 		kfree(iter);
265 		iter = ERR_PTR(-EBUSY);
266 		goto unlock;
267 	}
268 
269 	msc_iter_init(iter);
270 	iter->msc = msc;
271 
272 	list_add_tail(&iter->entry, &msc->iter_list);
273 unlock:
274 	mutex_unlock(&msc->buf_mutex);
275 
276 	return iter;
277 }
278 
279 static void msc_iter_remove(struct msc_iter *iter, struct msc *msc)
280 {
281 	mutex_lock(&msc->buf_mutex);
282 	list_del(&iter->entry);
283 	mutex_unlock(&msc->buf_mutex);
284 
285 	kfree(iter);
286 }
287 
288 static void msc_iter_block_start(struct msc_iter *iter)
289 {
290 	if (iter->start_block != -1)
291 		return;
292 
293 	iter->start_block = msc_win_oldest_block(iter->win);
294 	iter->block = iter->start_block;
295 	iter->wrap_count = 0;
296 
297 	/*
298 	 * start with the block with oldest data; if data has wrapped
299 	 * in this window, it should be in this block
300 	 */
301 	if (msc_block_wrapped(msc_iter_bdesc(iter)))
302 		iter->wrap_count = 2;
303 
304 }
305 
306 static int msc_iter_win_start(struct msc_iter *iter, struct msc *msc)
307 {
308 	/* already started, nothing to do */
309 	if (iter->start_win)
310 		return 0;
311 
312 	iter->start_win = msc_oldest_window(msc);
313 	if (!iter->start_win)
314 		return -EINVAL;
315 
316 	iter->win = iter->start_win;
317 	iter->start_block = -1;
318 
319 	msc_iter_block_start(iter);
320 
321 	return 0;
322 }
323 
324 static int msc_iter_win_advance(struct msc_iter *iter)
325 {
326 	iter->win = msc_next_window(iter->win);
327 	iter->start_block = -1;
328 
329 	if (iter->win == iter->start_win) {
330 		iter->eof++;
331 		return 1;
332 	}
333 
334 	msc_iter_block_start(iter);
335 
336 	return 0;
337 }
338 
339 static int msc_iter_block_advance(struct msc_iter *iter)
340 {
341 	iter->block_off = 0;
342 
343 	/* wrapping */
344 	if (iter->wrap_count && iter->block == iter->start_block) {
345 		iter->wrap_count--;
346 		if (!iter->wrap_count)
347 			/* copied newest data from the wrapped block */
348 			return msc_iter_win_advance(iter);
349 	}
350 
351 	/* no wrapping, check for last written block */
352 	if (!iter->wrap_count && msc_block_last_written(msc_iter_bdesc(iter)))
353 		/* copied newest data for the window */
354 		return msc_iter_win_advance(iter);
355 
356 	/* block advance */
357 	if (++iter->block == iter->win->nr_blocks)
358 		iter->block = 0;
359 
360 	/* no wrapping, sanity check in case there is no last written block */
361 	if (!iter->wrap_count && iter->block == iter->start_block)
362 		return msc_iter_win_advance(iter);
363 
364 	return 0;
365 }
366 
367 /**
368  * msc_buffer_iterate() - go through multiblock buffer's data
369  * @iter:	iterator structure
370  * @size:	amount of data to scan
371  * @data:	callback's private data
372  * @fn:		iterator callback
373  *
374  * This will start at the window which will be written to next (containing
375  * the oldest data) and work its way to the current window, calling @fn
376  * for each chunk of data as it goes.
377  *
378  * Caller should have msc::user_count reference to make sure the buffer
379  * doesn't disappear from under us.
380  *
381  * Return:	amount of data actually scanned.
382  */
383 static ssize_t
384 msc_buffer_iterate(struct msc_iter *iter, size_t size, void *data,
385 		   unsigned long (*fn)(void *, void *, size_t))
386 {
387 	struct msc *msc = iter->msc;
388 	size_t len = size;
389 	unsigned int advance;
390 
391 	if (iter->eof)
392 		return 0;
393 
394 	/* start with the oldest window */
395 	if (msc_iter_win_start(iter, msc))
396 		return 0;
397 
398 	do {
399 		unsigned long data_bytes = msc_data_sz(msc_iter_bdesc(iter));
400 		void *src = (void *)msc_iter_bdesc(iter) + MSC_BDESC;
401 		size_t tocopy = data_bytes, copied = 0;
402 		size_t remaining = 0;
403 
404 		advance = 1;
405 
406 		/*
407 		 * If block wrapping happened, we need to visit the last block
408 		 * twice, because it contains both the oldest and the newest
409 		 * data in this window.
410 		 *
411 		 * First time (wrap_count==2), in the very beginning, to collect
412 		 * the oldest data, which is in the range
413 		 * (data_bytes..DATA_IN_PAGE).
414 		 *
415 		 * Second time (wrap_count==1), it's just like any other block,
416 		 * containing data in the range of [MSC_BDESC..data_bytes].
417 		 */
418 		if (iter->block == iter->start_block && iter->wrap_count == 2) {
419 			tocopy = DATA_IN_PAGE - data_bytes;
420 			src += data_bytes;
421 		}
422 
423 		if (!tocopy)
424 			goto next_block;
425 
426 		tocopy -= iter->block_off;
427 		src += iter->block_off;
428 
429 		if (len < tocopy) {
430 			tocopy = len;
431 			advance = 0;
432 		}
433 
434 		remaining = fn(data, src, tocopy);
435 
436 		if (remaining)
437 			advance = 0;
438 
439 		copied = tocopy - remaining;
440 		len -= copied;
441 		iter->block_off += copied;
442 		iter->offset += copied;
443 
444 		if (!advance)
445 			break;
446 
447 next_block:
448 		if (msc_iter_block_advance(iter))
449 			break;
450 
451 	} while (len);
452 
453 	return size - len;
454 }
455 
456 /**
457  * msc_buffer_clear_hw_header() - clear hw header for multiblock
458  * @msc:	MSC device
459  */
460 static void msc_buffer_clear_hw_header(struct msc *msc)
461 {
462 	struct msc_window *win;
463 
464 	list_for_each_entry(win, &msc->win_list, entry) {
465 		unsigned int blk;
466 		size_t hw_sz = sizeof(struct msc_block_desc) -
467 			offsetof(struct msc_block_desc, hw_tag);
468 
469 		for (blk = 0; blk < win->nr_blocks; blk++) {
470 			struct msc_block_desc *bdesc = win->block[blk].bdesc;
471 
472 			memset(&bdesc->hw_tag, 0, hw_sz);
473 		}
474 	}
475 }
476 
477 /**
478  * msc_configure() - set up MSC hardware
479  * @msc:	the MSC device to configure
480  *
481  * Program storage mode, wrapping, burst length and trace buffer address
482  * into a given MSC. Then, enable tracing and set msc::enabled.
483  * The latter is serialized on msc::buf_mutex, so make sure to hold it.
484  */
485 static int msc_configure(struct msc *msc)
486 {
487 	u32 reg;
488 
489 	lockdep_assert_held(&msc->buf_mutex);
490 
491 	if (msc->mode > MSC_MODE_MULTI)
492 		return -ENOTSUPP;
493 
494 	if (msc->mode == MSC_MODE_MULTI)
495 		msc_buffer_clear_hw_header(msc);
496 
497 	reg = msc->base_addr >> PAGE_SHIFT;
498 	iowrite32(reg, msc->reg_base + REG_MSU_MSC0BAR);
499 
500 	if (msc->mode == MSC_MODE_SINGLE) {
501 		reg = msc->nr_pages;
502 		iowrite32(reg, msc->reg_base + REG_MSU_MSC0SIZE);
503 	}
504 
505 	reg = ioread32(msc->reg_base + REG_MSU_MSC0CTL);
506 	reg &= ~(MSC_MODE | MSC_WRAPEN | MSC_EN | MSC_RD_HDR_OVRD);
507 
508 	reg |= MSC_EN;
509 	reg |= msc->mode << __ffs(MSC_MODE);
510 	reg |= msc->burst_len << __ffs(MSC_LEN);
511 
512 	if (msc->wrap)
513 		reg |= MSC_WRAPEN;
514 
515 	iowrite32(reg, msc->reg_base + REG_MSU_MSC0CTL);
516 
517 	msc->thdev->output.multiblock = msc->mode == MSC_MODE_MULTI;
518 	intel_th_trace_enable(msc->thdev);
519 	msc->enabled = 1;
520 
521 
522 	return 0;
523 }
524 
525 /**
526  * msc_disable() - disable MSC hardware
527  * @msc:	MSC device to disable
528  *
529  * If @msc is enabled, disable tracing on the switch and then disable MSC
530  * storage. Caller must hold msc::buf_mutex.
531  */
532 static void msc_disable(struct msc *msc)
533 {
534 	unsigned long count;
535 	u32 reg;
536 
537 	lockdep_assert_held(&msc->buf_mutex);
538 
539 	intel_th_trace_disable(msc->thdev);
540 
541 	for (reg = 0, count = MSC_PLE_WAITLOOP_DEPTH;
542 	     count && !(reg & MSCSTS_PLE); count--) {
543 		reg = ioread32(msc->reg_base + REG_MSU_MSC0STS);
544 		cpu_relax();
545 	}
546 
547 	if (!count)
548 		dev_dbg(msc_dev(msc), "timeout waiting for MSC0 PLE\n");
549 
550 	if (msc->mode == MSC_MODE_SINGLE) {
551 		msc->single_wrap = !!(reg & MSCSTS_WRAPSTAT);
552 
553 		reg = ioread32(msc->reg_base + REG_MSU_MSC0MWP);
554 		msc->single_sz = reg & ((msc->nr_pages << PAGE_SHIFT) - 1);
555 		dev_dbg(msc_dev(msc), "MSCnMWP: %08x/%08lx, wrap: %d\n",
556 			reg, msc->single_sz, msc->single_wrap);
557 	}
558 
559 	reg = ioread32(msc->reg_base + REG_MSU_MSC0CTL);
560 	reg &= ~MSC_EN;
561 	iowrite32(reg, msc->reg_base + REG_MSU_MSC0CTL);
562 	msc->enabled = 0;
563 
564 	iowrite32(0, msc->reg_base + REG_MSU_MSC0BAR);
565 	iowrite32(0, msc->reg_base + REG_MSU_MSC0SIZE);
566 
567 	dev_dbg(msc_dev(msc), "MSCnNWSA: %08x\n",
568 		ioread32(msc->reg_base + REG_MSU_MSC0NWSA));
569 
570 	reg = ioread32(msc->reg_base + REG_MSU_MSC0STS);
571 	dev_dbg(msc_dev(msc), "MSCnSTS: %08x\n", reg);
572 }
573 
574 static int intel_th_msc_activate(struct intel_th_device *thdev)
575 {
576 	struct msc *msc = dev_get_drvdata(&thdev->dev);
577 	int ret = -EBUSY;
578 
579 	if (!atomic_inc_unless_negative(&msc->user_count))
580 		return -ENODEV;
581 
582 	mutex_lock(&msc->buf_mutex);
583 
584 	/* if there are readers, refuse */
585 	if (list_empty(&msc->iter_list))
586 		ret = msc_configure(msc);
587 
588 	mutex_unlock(&msc->buf_mutex);
589 
590 	if (ret)
591 		atomic_dec(&msc->user_count);
592 
593 	return ret;
594 }
595 
596 static void intel_th_msc_deactivate(struct intel_th_device *thdev)
597 {
598 	struct msc *msc = dev_get_drvdata(&thdev->dev);
599 
600 	mutex_lock(&msc->buf_mutex);
601 	if (msc->enabled) {
602 		msc_disable(msc);
603 		atomic_dec(&msc->user_count);
604 	}
605 	mutex_unlock(&msc->buf_mutex);
606 }
607 
608 /**
609  * msc_buffer_contig_alloc() - allocate a contiguous buffer for SINGLE mode
610  * @msc:	MSC device
611  * @size:	allocation size in bytes
612  *
613  * This modifies msc::base, which requires msc::buf_mutex to serialize, so the
614  * caller is expected to hold it.
615  *
616  * Return:	0 on success, -errno otherwise.
617  */
618 static int msc_buffer_contig_alloc(struct msc *msc, unsigned long size)
619 {
620 	unsigned int order = get_order(size);
621 	struct page *page;
622 
623 	if (!size)
624 		return 0;
625 
626 	page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
627 	if (!page)
628 		return -ENOMEM;
629 
630 	split_page(page, order);
631 	msc->nr_pages = size >> PAGE_SHIFT;
632 	msc->base = page_address(page);
633 	msc->base_addr = page_to_phys(page);
634 
635 	return 0;
636 }
637 
638 /**
639  * msc_buffer_contig_free() - free a contiguous buffer
640  * @msc:	MSC configured in SINGLE mode
641  */
642 static void msc_buffer_contig_free(struct msc *msc)
643 {
644 	unsigned long off;
645 
646 	for (off = 0; off < msc->nr_pages << PAGE_SHIFT; off += PAGE_SIZE) {
647 		struct page *page = virt_to_page(msc->base + off);
648 
649 		page->mapping = NULL;
650 		__free_page(page);
651 	}
652 
653 	msc->nr_pages = 0;
654 }
655 
656 /**
657  * msc_buffer_contig_get_page() - find a page at a given offset
658  * @msc:	MSC configured in SINGLE mode
659  * @pgoff:	page offset
660  *
661  * Return:	page, if @pgoff is within the range, NULL otherwise.
662  */
663 static struct page *msc_buffer_contig_get_page(struct msc *msc,
664 					       unsigned long pgoff)
665 {
666 	if (pgoff >= msc->nr_pages)
667 		return NULL;
668 
669 	return virt_to_page(msc->base + (pgoff << PAGE_SHIFT));
670 }
671 
672 /**
673  * msc_buffer_win_alloc() - alloc a window for a multiblock mode
674  * @msc:	MSC device
675  * @nr_blocks:	number of pages in this window
676  *
677  * This modifies msc::win_list and msc::base, which requires msc::buf_mutex
678  * to serialize, so the caller is expected to hold it.
679  *
680  * Return:	0 on success, -errno otherwise.
681  */
682 static int msc_buffer_win_alloc(struct msc *msc, unsigned int nr_blocks)
683 {
684 	struct msc_window *win;
685 	unsigned long size = PAGE_SIZE;
686 	int i, ret = -ENOMEM;
687 
688 	if (!nr_blocks)
689 		return 0;
690 
691 	win = kzalloc(offsetof(struct msc_window, block[nr_blocks]),
692 		      GFP_KERNEL);
693 	if (!win)
694 		return -ENOMEM;
695 
696 	if (!list_empty(&msc->win_list)) {
697 		struct msc_window *prev = list_entry(msc->win_list.prev,
698 						     struct msc_window, entry);
699 
700 		win->pgoff = prev->pgoff + prev->nr_blocks;
701 	}
702 
703 	for (i = 0; i < nr_blocks; i++) {
704 		win->block[i].bdesc =
705 			dma_alloc_coherent(msc_dev(msc)->parent->parent, size,
706 					   &win->block[i].addr, GFP_KERNEL);
707 
708 		if (!win->block[i].bdesc)
709 			goto err_nomem;
710 
711 #ifdef CONFIG_X86
712 		/* Set the page as uncached */
713 		set_memory_uc((unsigned long)win->block[i].bdesc, 1);
714 #endif
715 	}
716 
717 	win->msc = msc;
718 	win->nr_blocks = nr_blocks;
719 
720 	if (list_empty(&msc->win_list)) {
721 		msc->base = win->block[0].bdesc;
722 		msc->base_addr = win->block[0].addr;
723 	}
724 
725 	list_add_tail(&win->entry, &msc->win_list);
726 	msc->nr_pages += nr_blocks;
727 
728 	return 0;
729 
730 err_nomem:
731 	for (i--; i >= 0; i--) {
732 #ifdef CONFIG_X86
733 		/* Reset the page to write-back before releasing */
734 		set_memory_wb((unsigned long)win->block[i].bdesc, 1);
735 #endif
736 		dma_free_coherent(msc_dev(msc), size, win->block[i].bdesc,
737 				  win->block[i].addr);
738 	}
739 	kfree(win);
740 
741 	return ret;
742 }
743 
744 /**
745  * msc_buffer_win_free() - free a window from MSC's window list
746  * @msc:	MSC device
747  * @win:	window to free
748  *
749  * This modifies msc::win_list and msc::base, which requires msc::buf_mutex
750  * to serialize, so the caller is expected to hold it.
751  */
752 static void msc_buffer_win_free(struct msc *msc, struct msc_window *win)
753 {
754 	int i;
755 
756 	msc->nr_pages -= win->nr_blocks;
757 
758 	list_del(&win->entry);
759 	if (list_empty(&msc->win_list)) {
760 		msc->base = NULL;
761 		msc->base_addr = 0;
762 	}
763 
764 	for (i = 0; i < win->nr_blocks; i++) {
765 		struct page *page = virt_to_page(win->block[i].bdesc);
766 
767 		page->mapping = NULL;
768 #ifdef CONFIG_X86
769 		/* Reset the page to write-back before releasing */
770 		set_memory_wb((unsigned long)win->block[i].bdesc, 1);
771 #endif
772 		dma_free_coherent(msc_dev(win->msc), PAGE_SIZE,
773 				  win->block[i].bdesc, win->block[i].addr);
774 	}
775 
776 	kfree(win);
777 }
778 
779 /**
780  * msc_buffer_relink() - set up block descriptors for multiblock mode
781  * @msc:	MSC device
782  *
783  * This traverses msc::win_list, which requires msc::buf_mutex to serialize,
784  * so the caller is expected to hold it.
785  */
786 static void msc_buffer_relink(struct msc *msc)
787 {
788 	struct msc_window *win, *next_win;
789 
790 	/* call with msc::mutex locked */
791 	list_for_each_entry(win, &msc->win_list, entry) {
792 		unsigned int blk;
793 		u32 sw_tag = 0;
794 
795 		/*
796 		 * Last window's next_win should point to the first window
797 		 * and MSC_SW_TAG_LASTWIN should be set.
798 		 */
799 		if (msc_is_last_win(win)) {
800 			sw_tag |= MSC_SW_TAG_LASTWIN;
801 			next_win = list_entry(msc->win_list.next,
802 					      struct msc_window, entry);
803 		} else {
804 			next_win = list_entry(win->entry.next,
805 					      struct msc_window, entry);
806 		}
807 
808 		for (blk = 0; blk < win->nr_blocks; blk++) {
809 			struct msc_block_desc *bdesc = win->block[blk].bdesc;
810 
811 			memset(bdesc, 0, sizeof(*bdesc));
812 
813 			bdesc->next_win = next_win->block[0].addr >> PAGE_SHIFT;
814 
815 			/*
816 			 * Similarly to last window, last block should point
817 			 * to the first one.
818 			 */
819 			if (blk == win->nr_blocks - 1) {
820 				sw_tag |= MSC_SW_TAG_LASTBLK;
821 				bdesc->next_blk =
822 					win->block[0].addr >> PAGE_SHIFT;
823 			} else {
824 				bdesc->next_blk =
825 					win->block[blk + 1].addr >> PAGE_SHIFT;
826 			}
827 
828 			bdesc->sw_tag = sw_tag;
829 			bdesc->block_sz = PAGE_SIZE / 64;
830 		}
831 	}
832 
833 	/*
834 	 * Make the above writes globally visible before tracing is
835 	 * enabled to make sure hardware sees them coherently.
836 	 */
837 	wmb();
838 }
839 
840 static void msc_buffer_multi_free(struct msc *msc)
841 {
842 	struct msc_window *win, *iter;
843 
844 	list_for_each_entry_safe(win, iter, &msc->win_list, entry)
845 		msc_buffer_win_free(msc, win);
846 }
847 
848 static int msc_buffer_multi_alloc(struct msc *msc, unsigned long *nr_pages,
849 				  unsigned int nr_wins)
850 {
851 	int ret, i;
852 
853 	for (i = 0; i < nr_wins; i++) {
854 		ret = msc_buffer_win_alloc(msc, nr_pages[i]);
855 		if (ret) {
856 			msc_buffer_multi_free(msc);
857 			return ret;
858 		}
859 	}
860 
861 	msc_buffer_relink(msc);
862 
863 	return 0;
864 }
865 
866 /**
867  * msc_buffer_free() - free buffers for MSC
868  * @msc:	MSC device
869  *
870  * Free MSC's storage buffers.
871  *
872  * This modifies msc::win_list and msc::base, which requires msc::buf_mutex to
873  * serialize, so the caller is expected to hold it.
874  */
875 static void msc_buffer_free(struct msc *msc)
876 {
877 	if (msc->mode == MSC_MODE_SINGLE)
878 		msc_buffer_contig_free(msc);
879 	else if (msc->mode == MSC_MODE_MULTI)
880 		msc_buffer_multi_free(msc);
881 }
882 
883 /**
884  * msc_buffer_alloc() - allocate a buffer for MSC
885  * @msc:	MSC device
886  * @size:	allocation size in bytes
887  *
888  * Allocate a storage buffer for MSC, depending on the msc::mode, it will be
889  * either done via msc_buffer_contig_alloc() for SINGLE operation mode or
890  * msc_buffer_win_alloc() for multiblock operation. The latter allocates one
891  * window per invocation, so in multiblock mode this can be called multiple
892  * times for the same MSC to allocate multiple windows.
893  *
894  * This modifies msc::win_list and msc::base, which requires msc::buf_mutex
895  * to serialize, so the caller is expected to hold it.
896  *
897  * Return:	0 on success, -errno otherwise.
898  */
899 static int msc_buffer_alloc(struct msc *msc, unsigned long *nr_pages,
900 			    unsigned int nr_wins)
901 {
902 	int ret;
903 
904 	/* -1: buffer not allocated */
905 	if (atomic_read(&msc->user_count) != -1)
906 		return -EBUSY;
907 
908 	if (msc->mode == MSC_MODE_SINGLE) {
909 		if (nr_wins != 1)
910 			return -EINVAL;
911 
912 		ret = msc_buffer_contig_alloc(msc, nr_pages[0] << PAGE_SHIFT);
913 	} else if (msc->mode == MSC_MODE_MULTI) {
914 		ret = msc_buffer_multi_alloc(msc, nr_pages, nr_wins);
915 	} else {
916 		ret = -ENOTSUPP;
917 	}
918 
919 	if (!ret) {
920 		/* allocation should be visible before the counter goes to 0 */
921 		smp_mb__before_atomic();
922 
923 		if (WARN_ON_ONCE(atomic_cmpxchg(&msc->user_count, -1, 0) != -1))
924 			return -EINVAL;
925 	}
926 
927 	return ret;
928 }
929 
930 /**
931  * msc_buffer_unlocked_free_unless_used() - free a buffer unless it's in use
932  * @msc:	MSC device
933  *
934  * This will free MSC buffer unless it is in use or there is no allocated
935  * buffer.
936  * Caller needs to hold msc::buf_mutex.
937  *
938  * Return:	0 on successful deallocation or if there was no buffer to
939  *		deallocate, -EBUSY if there are active users.
940  */
941 static int msc_buffer_unlocked_free_unless_used(struct msc *msc)
942 {
943 	int count, ret = 0;
944 
945 	count = atomic_cmpxchg(&msc->user_count, 0, -1);
946 
947 	/* > 0: buffer is allocated and has users */
948 	if (count > 0)
949 		ret = -EBUSY;
950 	/* 0: buffer is allocated, no users */
951 	else if (!count)
952 		msc_buffer_free(msc);
953 	/* < 0: no buffer, nothing to do */
954 
955 	return ret;
956 }
957 
958 /**
959  * msc_buffer_free_unless_used() - free a buffer unless it's in use
960  * @msc:	MSC device
961  *
962  * This is a locked version of msc_buffer_unlocked_free_unless_used().
963  */
964 static int msc_buffer_free_unless_used(struct msc *msc)
965 {
966 	int ret;
967 
968 	mutex_lock(&msc->buf_mutex);
969 	ret = msc_buffer_unlocked_free_unless_used(msc);
970 	mutex_unlock(&msc->buf_mutex);
971 
972 	return ret;
973 }
974 
975 /**
976  * msc_buffer_get_page() - get MSC buffer page at a given offset
977  * @msc:	MSC device
978  * @pgoff:	page offset into the storage buffer
979  *
980  * This traverses msc::win_list, so holding msc::buf_mutex is expected from
981  * the caller.
982  *
983  * Return:	page if @pgoff corresponds to a valid buffer page or NULL.
984  */
985 static struct page *msc_buffer_get_page(struct msc *msc, unsigned long pgoff)
986 {
987 	struct msc_window *win;
988 
989 	if (msc->mode == MSC_MODE_SINGLE)
990 		return msc_buffer_contig_get_page(msc, pgoff);
991 
992 	list_for_each_entry(win, &msc->win_list, entry)
993 		if (pgoff >= win->pgoff && pgoff < win->pgoff + win->nr_blocks)
994 			goto found;
995 
996 	return NULL;
997 
998 found:
999 	pgoff -= win->pgoff;
1000 	return virt_to_page(win->block[pgoff].bdesc);
1001 }
1002 
1003 /**
1004  * struct msc_win_to_user_struct - data for copy_to_user() callback
1005  * @buf:	userspace buffer to copy data to
1006  * @offset:	running offset
1007  */
1008 struct msc_win_to_user_struct {
1009 	char __user	*buf;
1010 	unsigned long	offset;
1011 };
1012 
1013 /**
1014  * msc_win_to_user() - iterator for msc_buffer_iterate() to copy data to user
1015  * @data:	callback's private data
1016  * @src:	source buffer
1017  * @len:	amount of data to copy from the source buffer
1018  */
1019 static unsigned long msc_win_to_user(void *data, void *src, size_t len)
1020 {
1021 	struct msc_win_to_user_struct *u = data;
1022 	unsigned long ret;
1023 
1024 	ret = copy_to_user(u->buf + u->offset, src, len);
1025 	u->offset += len - ret;
1026 
1027 	return ret;
1028 }
1029 
1030 
1031 /*
1032  * file operations' callbacks
1033  */
1034 
1035 static int intel_th_msc_open(struct inode *inode, struct file *file)
1036 {
1037 	struct intel_th_device *thdev = file->private_data;
1038 	struct msc *msc = dev_get_drvdata(&thdev->dev);
1039 	struct msc_iter *iter;
1040 
1041 	if (!capable(CAP_SYS_RAWIO))
1042 		return -EPERM;
1043 
1044 	iter = msc_iter_install(msc);
1045 	if (IS_ERR(iter))
1046 		return PTR_ERR(iter);
1047 
1048 	file->private_data = iter;
1049 
1050 	return nonseekable_open(inode, file);
1051 }
1052 
1053 static int intel_th_msc_release(struct inode *inode, struct file *file)
1054 {
1055 	struct msc_iter *iter = file->private_data;
1056 	struct msc *msc = iter->msc;
1057 
1058 	msc_iter_remove(iter, msc);
1059 
1060 	return 0;
1061 }
1062 
1063 static ssize_t
1064 msc_single_to_user(struct msc *msc, char __user *buf, loff_t off, size_t len)
1065 {
1066 	unsigned long size = msc->nr_pages << PAGE_SHIFT, rem = len;
1067 	unsigned long start = off, tocopy = 0;
1068 
1069 	if (msc->single_wrap) {
1070 		start += msc->single_sz;
1071 		if (start < size) {
1072 			tocopy = min(rem, size - start);
1073 			if (copy_to_user(buf, msc->base + start, tocopy))
1074 				return -EFAULT;
1075 
1076 			buf += tocopy;
1077 			rem -= tocopy;
1078 			start += tocopy;
1079 		}
1080 
1081 		start &= size - 1;
1082 		if (rem) {
1083 			tocopy = min(rem, msc->single_sz - start);
1084 			if (copy_to_user(buf, msc->base + start, tocopy))
1085 				return -EFAULT;
1086 
1087 			rem -= tocopy;
1088 		}
1089 
1090 		return len - rem;
1091 	}
1092 
1093 	if (copy_to_user(buf, msc->base + start, rem))
1094 		return -EFAULT;
1095 
1096 	return len;
1097 }
1098 
1099 static ssize_t intel_th_msc_read(struct file *file, char __user *buf,
1100 				 size_t len, loff_t *ppos)
1101 {
1102 	struct msc_iter *iter = file->private_data;
1103 	struct msc *msc = iter->msc;
1104 	size_t size;
1105 	loff_t off = *ppos;
1106 	ssize_t ret = 0;
1107 
1108 	if (!atomic_inc_unless_negative(&msc->user_count))
1109 		return 0;
1110 
1111 	if (msc->mode == MSC_MODE_SINGLE && !msc->single_wrap)
1112 		size = msc->single_sz;
1113 	else
1114 		size = msc->nr_pages << PAGE_SHIFT;
1115 
1116 	if (!size)
1117 		goto put_count;
1118 
1119 	if (off >= size)
1120 		goto put_count;
1121 
1122 	if (off + len >= size)
1123 		len = size - off;
1124 
1125 	if (msc->mode == MSC_MODE_SINGLE) {
1126 		ret = msc_single_to_user(msc, buf, off, len);
1127 		if (ret >= 0)
1128 			*ppos += ret;
1129 	} else if (msc->mode == MSC_MODE_MULTI) {
1130 		struct msc_win_to_user_struct u = {
1131 			.buf	= buf,
1132 			.offset	= 0,
1133 		};
1134 
1135 		ret = msc_buffer_iterate(iter, len, &u, msc_win_to_user);
1136 		if (ret >= 0)
1137 			*ppos = iter->offset;
1138 	} else {
1139 		ret = -ENOTSUPP;
1140 	}
1141 
1142 put_count:
1143 	atomic_dec(&msc->user_count);
1144 
1145 	return ret;
1146 }
1147 
1148 /*
1149  * vm operations callbacks (vm_ops)
1150  */
1151 
1152 static void msc_mmap_open(struct vm_area_struct *vma)
1153 {
1154 	struct msc_iter *iter = vma->vm_file->private_data;
1155 	struct msc *msc = iter->msc;
1156 
1157 	atomic_inc(&msc->mmap_count);
1158 }
1159 
1160 static void msc_mmap_close(struct vm_area_struct *vma)
1161 {
1162 	struct msc_iter *iter = vma->vm_file->private_data;
1163 	struct msc *msc = iter->msc;
1164 	unsigned long pg;
1165 
1166 	if (!atomic_dec_and_mutex_lock(&msc->mmap_count, &msc->buf_mutex))
1167 		return;
1168 
1169 	/* drop page _refcounts */
1170 	for (pg = 0; pg < msc->nr_pages; pg++) {
1171 		struct page *page = msc_buffer_get_page(msc, pg);
1172 
1173 		if (WARN_ON_ONCE(!page))
1174 			continue;
1175 
1176 		if (page->mapping)
1177 			page->mapping = NULL;
1178 	}
1179 
1180 	/* last mapping -- drop user_count */
1181 	atomic_dec(&msc->user_count);
1182 	mutex_unlock(&msc->buf_mutex);
1183 }
1184 
1185 static int msc_mmap_fault(struct vm_fault *vmf)
1186 {
1187 	struct msc_iter *iter = vmf->vma->vm_file->private_data;
1188 	struct msc *msc = iter->msc;
1189 
1190 	vmf->page = msc_buffer_get_page(msc, vmf->pgoff);
1191 	if (!vmf->page)
1192 		return VM_FAULT_SIGBUS;
1193 
1194 	get_page(vmf->page);
1195 	vmf->page->mapping = vmf->vma->vm_file->f_mapping;
1196 	vmf->page->index = vmf->pgoff;
1197 
1198 	return 0;
1199 }
1200 
1201 static const struct vm_operations_struct msc_mmap_ops = {
1202 	.open	= msc_mmap_open,
1203 	.close	= msc_mmap_close,
1204 	.fault	= msc_mmap_fault,
1205 };
1206 
1207 static int intel_th_msc_mmap(struct file *file, struct vm_area_struct *vma)
1208 {
1209 	unsigned long size = vma->vm_end - vma->vm_start;
1210 	struct msc_iter *iter = vma->vm_file->private_data;
1211 	struct msc *msc = iter->msc;
1212 	int ret = -EINVAL;
1213 
1214 	if (!size || offset_in_page(size))
1215 		return -EINVAL;
1216 
1217 	if (vma->vm_pgoff)
1218 		return -EINVAL;
1219 
1220 	/* grab user_count once per mmap; drop in msc_mmap_close() */
1221 	if (!atomic_inc_unless_negative(&msc->user_count))
1222 		return -EINVAL;
1223 
1224 	if (msc->mode != MSC_MODE_SINGLE &&
1225 	    msc->mode != MSC_MODE_MULTI)
1226 		goto out;
1227 
1228 	if (size >> PAGE_SHIFT != msc->nr_pages)
1229 		goto out;
1230 
1231 	atomic_set(&msc->mmap_count, 1);
1232 	ret = 0;
1233 
1234 out:
1235 	if (ret)
1236 		atomic_dec(&msc->user_count);
1237 
1238 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1239 	vma->vm_flags |= VM_DONTEXPAND | VM_DONTCOPY;
1240 	vma->vm_ops = &msc_mmap_ops;
1241 	return ret;
1242 }
1243 
1244 static const struct file_operations intel_th_msc_fops = {
1245 	.open		= intel_th_msc_open,
1246 	.release	= intel_th_msc_release,
1247 	.read		= intel_th_msc_read,
1248 	.mmap		= intel_th_msc_mmap,
1249 	.llseek		= no_llseek,
1250 	.owner		= THIS_MODULE,
1251 };
1252 
1253 static int intel_th_msc_init(struct msc *msc)
1254 {
1255 	atomic_set(&msc->user_count, -1);
1256 
1257 	msc->mode = MSC_MODE_MULTI;
1258 	mutex_init(&msc->buf_mutex);
1259 	INIT_LIST_HEAD(&msc->win_list);
1260 	INIT_LIST_HEAD(&msc->iter_list);
1261 
1262 	msc->burst_len =
1263 		(ioread32(msc->reg_base + REG_MSU_MSC0CTL) & MSC_LEN) >>
1264 		__ffs(MSC_LEN);
1265 
1266 	return 0;
1267 }
1268 
1269 static const char * const msc_mode[] = {
1270 	[MSC_MODE_SINGLE]	= "single",
1271 	[MSC_MODE_MULTI]	= "multi",
1272 	[MSC_MODE_EXI]		= "ExI",
1273 	[MSC_MODE_DEBUG]	= "debug",
1274 };
1275 
1276 static ssize_t
1277 wrap_show(struct device *dev, struct device_attribute *attr, char *buf)
1278 {
1279 	struct msc *msc = dev_get_drvdata(dev);
1280 
1281 	return scnprintf(buf, PAGE_SIZE, "%d\n", msc->wrap);
1282 }
1283 
1284 static ssize_t
1285 wrap_store(struct device *dev, struct device_attribute *attr, const char *buf,
1286 	   size_t size)
1287 {
1288 	struct msc *msc = dev_get_drvdata(dev);
1289 	unsigned long val;
1290 	int ret;
1291 
1292 	ret = kstrtoul(buf, 10, &val);
1293 	if (ret)
1294 		return ret;
1295 
1296 	msc->wrap = !!val;
1297 
1298 	return size;
1299 }
1300 
1301 static DEVICE_ATTR_RW(wrap);
1302 
1303 static ssize_t
1304 mode_show(struct device *dev, struct device_attribute *attr, char *buf)
1305 {
1306 	struct msc *msc = dev_get_drvdata(dev);
1307 
1308 	return scnprintf(buf, PAGE_SIZE, "%s\n", msc_mode[msc->mode]);
1309 }
1310 
1311 static ssize_t
1312 mode_store(struct device *dev, struct device_attribute *attr, const char *buf,
1313 	   size_t size)
1314 {
1315 	struct msc *msc = dev_get_drvdata(dev);
1316 	size_t len = size;
1317 	char *cp;
1318 	int i, ret;
1319 
1320 	if (!capable(CAP_SYS_RAWIO))
1321 		return -EPERM;
1322 
1323 	cp = memchr(buf, '\n', len);
1324 	if (cp)
1325 		len = cp - buf;
1326 
1327 	for (i = 0; i < ARRAY_SIZE(msc_mode); i++)
1328 		if (!strncmp(msc_mode[i], buf, len))
1329 			goto found;
1330 
1331 	return -EINVAL;
1332 
1333 found:
1334 	mutex_lock(&msc->buf_mutex);
1335 	ret = msc_buffer_unlocked_free_unless_used(msc);
1336 	if (!ret)
1337 		msc->mode = i;
1338 	mutex_unlock(&msc->buf_mutex);
1339 
1340 	return ret ? ret : size;
1341 }
1342 
1343 static DEVICE_ATTR_RW(mode);
1344 
1345 static ssize_t
1346 nr_pages_show(struct device *dev, struct device_attribute *attr, char *buf)
1347 {
1348 	struct msc *msc = dev_get_drvdata(dev);
1349 	struct msc_window *win;
1350 	size_t count = 0;
1351 
1352 	mutex_lock(&msc->buf_mutex);
1353 
1354 	if (msc->mode == MSC_MODE_SINGLE)
1355 		count = scnprintf(buf, PAGE_SIZE, "%ld\n", msc->nr_pages);
1356 	else if (msc->mode == MSC_MODE_MULTI) {
1357 		list_for_each_entry(win, &msc->win_list, entry) {
1358 			count += scnprintf(buf + count, PAGE_SIZE - count,
1359 					   "%d%c", win->nr_blocks,
1360 					   msc_is_last_win(win) ? '\n' : ',');
1361 		}
1362 	} else {
1363 		count = scnprintf(buf, PAGE_SIZE, "unsupported\n");
1364 	}
1365 
1366 	mutex_unlock(&msc->buf_mutex);
1367 
1368 	return count;
1369 }
1370 
1371 static ssize_t
1372 nr_pages_store(struct device *dev, struct device_attribute *attr,
1373 	       const char *buf, size_t size)
1374 {
1375 	struct msc *msc = dev_get_drvdata(dev);
1376 	unsigned long val, *win = NULL, *rewin;
1377 	size_t len = size;
1378 	const char *p = buf;
1379 	char *end, *s;
1380 	int ret, nr_wins = 0;
1381 
1382 	if (!capable(CAP_SYS_RAWIO))
1383 		return -EPERM;
1384 
1385 	ret = msc_buffer_free_unless_used(msc);
1386 	if (ret)
1387 		return ret;
1388 
1389 	/* scan the comma-separated list of allocation sizes */
1390 	end = memchr(buf, '\n', len);
1391 	if (end)
1392 		len = end - buf;
1393 
1394 	do {
1395 		end = memchr(p, ',', len);
1396 		s = kstrndup(p, end ? end - p : len, GFP_KERNEL);
1397 		if (!s) {
1398 			ret = -ENOMEM;
1399 			goto free_win;
1400 		}
1401 
1402 		ret = kstrtoul(s, 10, &val);
1403 		kfree(s);
1404 
1405 		if (ret || !val)
1406 			goto free_win;
1407 
1408 		if (nr_wins && msc->mode == MSC_MODE_SINGLE) {
1409 			ret = -EINVAL;
1410 			goto free_win;
1411 		}
1412 
1413 		nr_wins++;
1414 		rewin = krealloc(win, sizeof(*win) * nr_wins, GFP_KERNEL);
1415 		if (!rewin) {
1416 			kfree(win);
1417 			return -ENOMEM;
1418 		}
1419 
1420 		win = rewin;
1421 		win[nr_wins - 1] = val;
1422 
1423 		if (!end)
1424 			break;
1425 
1426 		len -= end - p;
1427 		p = end + 1;
1428 	} while (len);
1429 
1430 	mutex_lock(&msc->buf_mutex);
1431 	ret = msc_buffer_alloc(msc, win, nr_wins);
1432 	mutex_unlock(&msc->buf_mutex);
1433 
1434 free_win:
1435 	kfree(win);
1436 
1437 	return ret ? ret : size;
1438 }
1439 
1440 static DEVICE_ATTR_RW(nr_pages);
1441 
1442 static struct attribute *msc_output_attrs[] = {
1443 	&dev_attr_wrap.attr,
1444 	&dev_attr_mode.attr,
1445 	&dev_attr_nr_pages.attr,
1446 	NULL,
1447 };
1448 
1449 static struct attribute_group msc_output_group = {
1450 	.attrs	= msc_output_attrs,
1451 };
1452 
1453 static int intel_th_msc_probe(struct intel_th_device *thdev)
1454 {
1455 	struct device *dev = &thdev->dev;
1456 	struct resource *res;
1457 	struct msc *msc;
1458 	void __iomem *base;
1459 	int err;
1460 
1461 	res = intel_th_device_get_resource(thdev, IORESOURCE_MEM, 0);
1462 	if (!res)
1463 		return -ENODEV;
1464 
1465 	base = devm_ioremap(dev, res->start, resource_size(res));
1466 	if (!base)
1467 		return -ENOMEM;
1468 
1469 	msc = devm_kzalloc(dev, sizeof(*msc), GFP_KERNEL);
1470 	if (!msc)
1471 		return -ENOMEM;
1472 
1473 	msc->index = thdev->id;
1474 
1475 	msc->thdev = thdev;
1476 	msc->reg_base = base + msc->index * 0x100;
1477 
1478 	err = intel_th_msc_init(msc);
1479 	if (err)
1480 		return err;
1481 
1482 	dev_set_drvdata(dev, msc);
1483 
1484 	return 0;
1485 }
1486 
1487 static void intel_th_msc_remove(struct intel_th_device *thdev)
1488 {
1489 	struct msc *msc = dev_get_drvdata(&thdev->dev);
1490 	int ret;
1491 
1492 	intel_th_msc_deactivate(thdev);
1493 
1494 	/*
1495 	 * Buffers should not be used at this point except if the
1496 	 * output character device is still open and the parent
1497 	 * device gets detached from its bus, which is a FIXME.
1498 	 */
1499 	ret = msc_buffer_free_unless_used(msc);
1500 	WARN_ON_ONCE(ret);
1501 }
1502 
1503 static struct intel_th_driver intel_th_msc_driver = {
1504 	.probe	= intel_th_msc_probe,
1505 	.remove	= intel_th_msc_remove,
1506 	.activate	= intel_th_msc_activate,
1507 	.deactivate	= intel_th_msc_deactivate,
1508 	.fops	= &intel_th_msc_fops,
1509 	.attr_group	= &msc_output_group,
1510 	.driver	= {
1511 		.name	= "msc",
1512 		.owner	= THIS_MODULE,
1513 	},
1514 };
1515 
1516 module_driver(intel_th_msc_driver,
1517 	      intel_th_driver_register,
1518 	      intel_th_driver_unregister);
1519 
1520 MODULE_LICENSE("GPL v2");
1521 MODULE_DESCRIPTION("Intel(R) Trace Hub Memory Storage Unit driver");
1522 MODULE_AUTHOR("Alexander Shishkin <alexander.shishkin@linux.intel.com>");
1523