xref: /openbmc/linux/drivers/hwtracing/intel_th/msu.c (revision d198b34f3855eee2571dda03eea75a09c7c31480)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Intel(R) Trace Hub Memory Storage Unit
4  *
5  * Copyright (C) 2014-2015 Intel Corporation.
6  */
7 
8 #define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt
9 
10 #include <linux/types.h>
11 #include <linux/module.h>
12 #include <linux/device.h>
13 #include <linux/uaccess.h>
14 #include <linux/sizes.h>
15 #include <linux/printk.h>
16 #include <linux/slab.h>
17 #include <linux/mm.h>
18 #include <linux/fs.h>
19 #include <linux/io.h>
20 #include <linux/workqueue.h>
21 #include <linux/dma-mapping.h>
22 
23 #ifdef CONFIG_X86
24 #include <asm/set_memory.h>
25 #endif
26 
27 #include <linux/intel_th.h>
28 #include "intel_th.h"
29 #include "msu.h"
30 
31 #define msc_dev(x) (&(x)->thdev->dev)
32 
33 /*
34  * Lockout state transitions:
35  *   READY -> INUSE -+-> LOCKED -+-> READY -> etc.
36  *                   \-----------/
37  * WIN_READY:	window can be used by HW
38  * WIN_INUSE:	window is in use
39  * WIN_LOCKED:	window is filled up and is being processed by the buffer
40  * handling code
41  *
42  * All state transitions happen automatically, except for the LOCKED->READY,
43  * which needs to be signalled by the buffer code by calling
44  * intel_th_msc_window_unlock().
45  *
46  * When the interrupt handler has to switch to the next window, it checks
47  * whether it's READY, and if it is, it performs the switch and tracing
48  * continues. If it's LOCKED, it stops the trace.
49  */
50 enum lockout_state {
51 	WIN_READY = 0,
52 	WIN_INUSE,
53 	WIN_LOCKED
54 };
55 
56 /**
57  * struct msc_window - multiblock mode window descriptor
58  * @entry:	window list linkage (msc::win_list)
59  * @pgoff:	page offset into the buffer that this window starts at
60  * @lockout:	lockout state, see comment below
61  * @lo_lock:	lockout state serialization
62  * @nr_blocks:	number of blocks (pages) in this window
63  * @nr_segs:	number of segments in this window (<= @nr_blocks)
64  * @_sgt:	array of block descriptors
65  * @sgt:	array of block descriptors
66  */
67 struct msc_window {
68 	struct list_head	entry;
69 	unsigned long		pgoff;
70 	enum lockout_state	lockout;
71 	spinlock_t		lo_lock;
72 	unsigned int		nr_blocks;
73 	unsigned int		nr_segs;
74 	struct msc		*msc;
75 	struct sg_table		_sgt;
76 	struct sg_table		*sgt;
77 };
78 
79 /**
80  * struct msc_iter - iterator for msc buffer
81  * @entry:		msc::iter_list linkage
82  * @msc:		pointer to the MSC device
83  * @start_win:		oldest window
84  * @win:		current window
85  * @offset:		current logical offset into the buffer
86  * @start_block:	oldest block in the window
87  * @block:		block number in the window
88  * @block_off:		offset into current block
89  * @wrap_count:		block wrapping handling
90  * @eof:		end of buffer reached
91  */
92 struct msc_iter {
93 	struct list_head	entry;
94 	struct msc		*msc;
95 	struct msc_window	*start_win;
96 	struct msc_window	*win;
97 	unsigned long		offset;
98 	struct scatterlist	*start_block;
99 	struct scatterlist	*block;
100 	unsigned int		block_off;
101 	unsigned int		wrap_count;
102 	unsigned int		eof;
103 };
104 
105 /**
106  * struct msc - MSC device representation
107  * @reg_base:		register window base address
108  * @thdev:		intel_th_device pointer
109  * @mbuf:		MSU buffer, if assigned
110  * @mbuf_priv		MSU buffer's private data, if @mbuf
111  * @win_list:		list of windows in multiblock mode
112  * @single_sgt:		single mode buffer
113  * @cur_win:		current window
114  * @nr_pages:		total number of pages allocated for this buffer
115  * @single_sz:		amount of data in single mode
116  * @single_wrap:	single mode wrap occurred
117  * @base:		buffer's base pointer
118  * @base_addr:		buffer's base address
119  * @user_count:		number of users of the buffer
120  * @mmap_count:		number of mappings
121  * @buf_mutex:		mutex to serialize access to buffer-related bits
122 
123  * @enabled:		MSC is enabled
124  * @wrap:		wrapping is enabled
125  * @mode:		MSC operating mode
126  * @burst_len:		write burst length
127  * @index:		number of this MSC in the MSU
128  */
129 struct msc {
130 	void __iomem		*reg_base;
131 	void __iomem		*msu_base;
132 	struct intel_th_device	*thdev;
133 
134 	const struct msu_buffer	*mbuf;
135 	void			*mbuf_priv;
136 
137 	struct work_struct	work;
138 	struct list_head	win_list;
139 	struct sg_table		single_sgt;
140 	struct msc_window	*cur_win;
141 	unsigned long		nr_pages;
142 	unsigned long		single_sz;
143 	unsigned int		single_wrap : 1;
144 	void			*base;
145 	dma_addr_t		base_addr;
146 	u32			orig_addr;
147 	u32			orig_sz;
148 
149 	/* <0: no buffer, 0: no users, >0: active users */
150 	atomic_t		user_count;
151 
152 	atomic_t		mmap_count;
153 	struct mutex		buf_mutex;
154 
155 	struct list_head	iter_list;
156 
157 	/* config */
158 	unsigned int		enabled : 1,
159 				wrap	: 1,
160 				do_irq	: 1;
161 	unsigned int		mode;
162 	unsigned int		burst_len;
163 	unsigned int		index;
164 };
165 
166 static LIST_HEAD(msu_buffer_list);
167 static DEFINE_MUTEX(msu_buffer_mutex);
168 
169 /**
170  * struct msu_buffer_entry - internal MSU buffer bookkeeping
171  * @entry:	link to msu_buffer_list
172  * @mbuf:	MSU buffer object
173  * @owner:	module that provides this MSU buffer
174  */
175 struct msu_buffer_entry {
176 	struct list_head	entry;
177 	const struct msu_buffer	*mbuf;
178 	struct module		*owner;
179 };
180 
181 static struct msu_buffer_entry *__msu_buffer_entry_find(const char *name)
182 {
183 	struct msu_buffer_entry *mbe;
184 
185 	lockdep_assert_held(&msu_buffer_mutex);
186 
187 	list_for_each_entry(mbe, &msu_buffer_list, entry) {
188 		if (!strcmp(mbe->mbuf->name, name))
189 			return mbe;
190 	}
191 
192 	return NULL;
193 }
194 
195 static const struct msu_buffer *
196 msu_buffer_get(const char *name)
197 {
198 	struct msu_buffer_entry *mbe;
199 
200 	mutex_lock(&msu_buffer_mutex);
201 	mbe = __msu_buffer_entry_find(name);
202 	if (mbe && !try_module_get(mbe->owner))
203 		mbe = NULL;
204 	mutex_unlock(&msu_buffer_mutex);
205 
206 	return mbe ? mbe->mbuf : NULL;
207 }
208 
209 static void msu_buffer_put(const struct msu_buffer *mbuf)
210 {
211 	struct msu_buffer_entry *mbe;
212 
213 	mutex_lock(&msu_buffer_mutex);
214 	mbe = __msu_buffer_entry_find(mbuf->name);
215 	if (mbe)
216 		module_put(mbe->owner);
217 	mutex_unlock(&msu_buffer_mutex);
218 }
219 
220 int intel_th_msu_buffer_register(const struct msu_buffer *mbuf,
221 				 struct module *owner)
222 {
223 	struct msu_buffer_entry *mbe;
224 	int ret = 0;
225 
226 	mbe = kzalloc(sizeof(*mbe), GFP_KERNEL);
227 	if (!mbe)
228 		return -ENOMEM;
229 
230 	mutex_lock(&msu_buffer_mutex);
231 	if (__msu_buffer_entry_find(mbuf->name)) {
232 		ret = -EEXIST;
233 		kfree(mbe);
234 		goto unlock;
235 	}
236 
237 	mbe->mbuf = mbuf;
238 	mbe->owner = owner;
239 	list_add_tail(&mbe->entry, &msu_buffer_list);
240 unlock:
241 	mutex_unlock(&msu_buffer_mutex);
242 
243 	return ret;
244 }
245 EXPORT_SYMBOL_GPL(intel_th_msu_buffer_register);
246 
247 void intel_th_msu_buffer_unregister(const struct msu_buffer *mbuf)
248 {
249 	struct msu_buffer_entry *mbe;
250 
251 	mutex_lock(&msu_buffer_mutex);
252 	mbe = __msu_buffer_entry_find(mbuf->name);
253 	if (mbe) {
254 		list_del(&mbe->entry);
255 		kfree(mbe);
256 	}
257 	mutex_unlock(&msu_buffer_mutex);
258 }
259 EXPORT_SYMBOL_GPL(intel_th_msu_buffer_unregister);
260 
261 static inline bool msc_block_is_empty(struct msc_block_desc *bdesc)
262 {
263 	/* header hasn't been written */
264 	if (!bdesc->valid_dw)
265 		return true;
266 
267 	/* valid_dw includes the header */
268 	if (!msc_data_sz(bdesc))
269 		return true;
270 
271 	return false;
272 }
273 
274 static inline struct scatterlist *msc_win_base_sg(struct msc_window *win)
275 {
276 	return win->sgt->sgl;
277 }
278 
279 static inline struct msc_block_desc *msc_win_base(struct msc_window *win)
280 {
281 	return sg_virt(msc_win_base_sg(win));
282 }
283 
284 static inline dma_addr_t msc_win_base_dma(struct msc_window *win)
285 {
286 	return sg_dma_address(msc_win_base_sg(win));
287 }
288 
289 static inline unsigned long
290 msc_win_base_pfn(struct msc_window *win)
291 {
292 	return PFN_DOWN(msc_win_base_dma(win));
293 }
294 
295 /**
296  * msc_is_last_win() - check if a window is the last one for a given MSC
297  * @win:	window
298  * Return:	true if @win is the last window in MSC's multiblock buffer
299  */
300 static inline bool msc_is_last_win(struct msc_window *win)
301 {
302 	return win->entry.next == &win->msc->win_list;
303 }
304 
305 /**
306  * msc_next_window() - return next window in the multiblock buffer
307  * @win:	current window
308  *
309  * Return:	window following the current one
310  */
311 static struct msc_window *msc_next_window(struct msc_window *win)
312 {
313 	if (msc_is_last_win(win))
314 		return list_first_entry(&win->msc->win_list, struct msc_window,
315 					entry);
316 
317 	return list_next_entry(win, entry);
318 }
319 
320 static size_t msc_win_total_sz(struct msc_window *win)
321 {
322 	struct scatterlist *sg;
323 	unsigned int blk;
324 	size_t size = 0;
325 
326 	for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) {
327 		struct msc_block_desc *bdesc = sg_virt(sg);
328 
329 		if (msc_block_wrapped(bdesc))
330 			return (size_t)win->nr_blocks << PAGE_SHIFT;
331 
332 		size += msc_total_sz(bdesc);
333 		if (msc_block_last_written(bdesc))
334 			break;
335 	}
336 
337 	return size;
338 }
339 
340 /**
341  * msc_find_window() - find a window matching a given sg_table
342  * @msc:	MSC device
343  * @sgt:	SG table of the window
344  * @nonempty:	skip over empty windows
345  *
346  * Return:	MSC window structure pointer or NULL if the window
347  *		could not be found.
348  */
349 static struct msc_window *
350 msc_find_window(struct msc *msc, struct sg_table *sgt, bool nonempty)
351 {
352 	struct msc_window *win;
353 	unsigned int found = 0;
354 
355 	if (list_empty(&msc->win_list))
356 		return NULL;
357 
358 	/*
359 	 * we might need a radix tree for this, depending on how
360 	 * many windows a typical user would allocate; ideally it's
361 	 * something like 2, in which case we're good
362 	 */
363 	list_for_each_entry(win, &msc->win_list, entry) {
364 		if (win->sgt == sgt)
365 			found++;
366 
367 		/* skip the empty ones */
368 		if (nonempty && msc_block_is_empty(msc_win_base(win)))
369 			continue;
370 
371 		if (found)
372 			return win;
373 	}
374 
375 	return NULL;
376 }
377 
378 /**
379  * msc_oldest_window() - locate the window with oldest data
380  * @msc:	MSC device
381  *
382  * This should only be used in multiblock mode. Caller should hold the
383  * msc::user_count reference.
384  *
385  * Return:	the oldest window with valid data
386  */
387 static struct msc_window *msc_oldest_window(struct msc *msc)
388 {
389 	struct msc_window *win;
390 
391 	if (list_empty(&msc->win_list))
392 		return NULL;
393 
394 	win = msc_find_window(msc, msc_next_window(msc->cur_win)->sgt, true);
395 	if (win)
396 		return win;
397 
398 	return list_first_entry(&msc->win_list, struct msc_window, entry);
399 }
400 
401 /**
402  * msc_win_oldest_sg() - locate the oldest block in a given window
403  * @win:	window to look at
404  *
405  * Return:	index of the block with the oldest data
406  */
407 static struct scatterlist *msc_win_oldest_sg(struct msc_window *win)
408 {
409 	unsigned int blk;
410 	struct scatterlist *sg;
411 	struct msc_block_desc *bdesc = msc_win_base(win);
412 
413 	/* without wrapping, first block is the oldest */
414 	if (!msc_block_wrapped(bdesc))
415 		return msc_win_base_sg(win);
416 
417 	/*
418 	 * with wrapping, last written block contains both the newest and the
419 	 * oldest data for this window.
420 	 */
421 	for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) {
422 		struct msc_block_desc *bdesc = sg_virt(sg);
423 
424 		if (msc_block_last_written(bdesc))
425 			return sg;
426 	}
427 
428 	return msc_win_base_sg(win);
429 }
430 
431 static struct msc_block_desc *msc_iter_bdesc(struct msc_iter *iter)
432 {
433 	return sg_virt(iter->block);
434 }
435 
436 static struct msc_iter *msc_iter_install(struct msc *msc)
437 {
438 	struct msc_iter *iter;
439 
440 	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
441 	if (!iter)
442 		return ERR_PTR(-ENOMEM);
443 
444 	mutex_lock(&msc->buf_mutex);
445 
446 	/*
447 	 * Reading and tracing are mutually exclusive; if msc is
448 	 * enabled, open() will fail; otherwise existing readers
449 	 * will prevent enabling the msc and the rest of fops don't
450 	 * need to worry about it.
451 	 */
452 	if (msc->enabled) {
453 		kfree(iter);
454 		iter = ERR_PTR(-EBUSY);
455 		goto unlock;
456 	}
457 
458 	iter->msc = msc;
459 
460 	list_add_tail(&iter->entry, &msc->iter_list);
461 unlock:
462 	mutex_unlock(&msc->buf_mutex);
463 
464 	return iter;
465 }
466 
467 static void msc_iter_remove(struct msc_iter *iter, struct msc *msc)
468 {
469 	mutex_lock(&msc->buf_mutex);
470 	list_del(&iter->entry);
471 	mutex_unlock(&msc->buf_mutex);
472 
473 	kfree(iter);
474 }
475 
476 static void msc_iter_block_start(struct msc_iter *iter)
477 {
478 	if (iter->start_block)
479 		return;
480 
481 	iter->start_block = msc_win_oldest_sg(iter->win);
482 	iter->block = iter->start_block;
483 	iter->wrap_count = 0;
484 
485 	/*
486 	 * start with the block with oldest data; if data has wrapped
487 	 * in this window, it should be in this block
488 	 */
489 	if (msc_block_wrapped(msc_iter_bdesc(iter)))
490 		iter->wrap_count = 2;
491 
492 }
493 
494 static int msc_iter_win_start(struct msc_iter *iter, struct msc *msc)
495 {
496 	/* already started, nothing to do */
497 	if (iter->start_win)
498 		return 0;
499 
500 	iter->start_win = msc_oldest_window(msc);
501 	if (!iter->start_win)
502 		return -EINVAL;
503 
504 	iter->win = iter->start_win;
505 	iter->start_block = NULL;
506 
507 	msc_iter_block_start(iter);
508 
509 	return 0;
510 }
511 
512 static int msc_iter_win_advance(struct msc_iter *iter)
513 {
514 	iter->win = msc_next_window(iter->win);
515 	iter->start_block = NULL;
516 
517 	if (iter->win == iter->start_win) {
518 		iter->eof++;
519 		return 1;
520 	}
521 
522 	msc_iter_block_start(iter);
523 
524 	return 0;
525 }
526 
527 static int msc_iter_block_advance(struct msc_iter *iter)
528 {
529 	iter->block_off = 0;
530 
531 	/* wrapping */
532 	if (iter->wrap_count && iter->block == iter->start_block) {
533 		iter->wrap_count--;
534 		if (!iter->wrap_count)
535 			/* copied newest data from the wrapped block */
536 			return msc_iter_win_advance(iter);
537 	}
538 
539 	/* no wrapping, check for last written block */
540 	if (!iter->wrap_count && msc_block_last_written(msc_iter_bdesc(iter)))
541 		/* copied newest data for the window */
542 		return msc_iter_win_advance(iter);
543 
544 	/* block advance */
545 	if (sg_is_last(iter->block))
546 		iter->block = msc_win_base_sg(iter->win);
547 	else
548 		iter->block = sg_next(iter->block);
549 
550 	/* no wrapping, sanity check in case there is no last written block */
551 	if (!iter->wrap_count && iter->block == iter->start_block)
552 		return msc_iter_win_advance(iter);
553 
554 	return 0;
555 }
556 
557 /**
558  * msc_buffer_iterate() - go through multiblock buffer's data
559  * @iter:	iterator structure
560  * @size:	amount of data to scan
561  * @data:	callback's private data
562  * @fn:		iterator callback
563  *
564  * This will start at the window which will be written to next (containing
565  * the oldest data) and work its way to the current window, calling @fn
566  * for each chunk of data as it goes.
567  *
568  * Caller should have msc::user_count reference to make sure the buffer
569  * doesn't disappear from under us.
570  *
571  * Return:	amount of data actually scanned.
572  */
573 static ssize_t
574 msc_buffer_iterate(struct msc_iter *iter, size_t size, void *data,
575 		   unsigned long (*fn)(void *, void *, size_t))
576 {
577 	struct msc *msc = iter->msc;
578 	size_t len = size;
579 	unsigned int advance;
580 
581 	if (iter->eof)
582 		return 0;
583 
584 	/* start with the oldest window */
585 	if (msc_iter_win_start(iter, msc))
586 		return 0;
587 
588 	do {
589 		unsigned long data_bytes = msc_data_sz(msc_iter_bdesc(iter));
590 		void *src = (void *)msc_iter_bdesc(iter) + MSC_BDESC;
591 		size_t tocopy = data_bytes, copied = 0;
592 		size_t remaining = 0;
593 
594 		advance = 1;
595 
596 		/*
597 		 * If block wrapping happened, we need to visit the last block
598 		 * twice, because it contains both the oldest and the newest
599 		 * data in this window.
600 		 *
601 		 * First time (wrap_count==2), in the very beginning, to collect
602 		 * the oldest data, which is in the range
603 		 * (data_bytes..DATA_IN_PAGE).
604 		 *
605 		 * Second time (wrap_count==1), it's just like any other block,
606 		 * containing data in the range of [MSC_BDESC..data_bytes].
607 		 */
608 		if (iter->block == iter->start_block && iter->wrap_count == 2) {
609 			tocopy = DATA_IN_PAGE - data_bytes;
610 			src += data_bytes;
611 		}
612 
613 		if (!tocopy)
614 			goto next_block;
615 
616 		tocopy -= iter->block_off;
617 		src += iter->block_off;
618 
619 		if (len < tocopy) {
620 			tocopy = len;
621 			advance = 0;
622 		}
623 
624 		remaining = fn(data, src, tocopy);
625 
626 		if (remaining)
627 			advance = 0;
628 
629 		copied = tocopy - remaining;
630 		len -= copied;
631 		iter->block_off += copied;
632 		iter->offset += copied;
633 
634 		if (!advance)
635 			break;
636 
637 next_block:
638 		if (msc_iter_block_advance(iter))
639 			break;
640 
641 	} while (len);
642 
643 	return size - len;
644 }
645 
646 /**
647  * msc_buffer_clear_hw_header() - clear hw header for multiblock
648  * @msc:	MSC device
649  */
650 static void msc_buffer_clear_hw_header(struct msc *msc)
651 {
652 	struct msc_window *win;
653 	struct scatterlist *sg;
654 
655 	list_for_each_entry(win, &msc->win_list, entry) {
656 		unsigned int blk;
657 		size_t hw_sz = sizeof(struct msc_block_desc) -
658 			offsetof(struct msc_block_desc, hw_tag);
659 
660 		for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) {
661 			struct msc_block_desc *bdesc = sg_virt(sg);
662 
663 			memset(&bdesc->hw_tag, 0, hw_sz);
664 		}
665 	}
666 }
667 
668 static int intel_th_msu_init(struct msc *msc)
669 {
670 	u32 mintctl, msusts;
671 
672 	if (!msc->do_irq)
673 		return 0;
674 
675 	if (!msc->mbuf)
676 		return 0;
677 
678 	mintctl = ioread32(msc->msu_base + REG_MSU_MINTCTL);
679 	mintctl |= msc->index ? M1BLIE : M0BLIE;
680 	iowrite32(mintctl, msc->msu_base + REG_MSU_MINTCTL);
681 	if (mintctl != ioread32(msc->msu_base + REG_MSU_MINTCTL)) {
682 		dev_info(msc_dev(msc), "MINTCTL ignores writes: no usable interrupts\n");
683 		msc->do_irq = 0;
684 		return 0;
685 	}
686 
687 	msusts = ioread32(msc->msu_base + REG_MSU_MSUSTS);
688 	iowrite32(msusts, msc->msu_base + REG_MSU_MSUSTS);
689 
690 	return 0;
691 }
692 
693 static void intel_th_msu_deinit(struct msc *msc)
694 {
695 	u32 mintctl;
696 
697 	if (!msc->do_irq)
698 		return;
699 
700 	mintctl = ioread32(msc->msu_base + REG_MSU_MINTCTL);
701 	mintctl &= msc->index ? ~M1BLIE : ~M0BLIE;
702 	iowrite32(mintctl, msc->msu_base + REG_MSU_MINTCTL);
703 }
704 
705 static int msc_win_set_lockout(struct msc_window *win,
706 			       enum lockout_state expect,
707 			       enum lockout_state new)
708 {
709 	enum lockout_state old;
710 	unsigned long flags;
711 	int ret = 0;
712 
713 	if (!win->msc->mbuf)
714 		return 0;
715 
716 	spin_lock_irqsave(&win->lo_lock, flags);
717 	old = win->lockout;
718 
719 	if (old != expect) {
720 		ret = -EINVAL;
721 		goto unlock;
722 	}
723 
724 	win->lockout = new;
725 
726 	if (old == expect && new == WIN_LOCKED)
727 		atomic_inc(&win->msc->user_count);
728 	else if (old == expect && old == WIN_LOCKED)
729 		atomic_dec(&win->msc->user_count);
730 
731 unlock:
732 	spin_unlock_irqrestore(&win->lo_lock, flags);
733 
734 	if (ret) {
735 		if (expect == WIN_READY && old == WIN_LOCKED)
736 			return -EBUSY;
737 
738 		/* from intel_th_msc_window_unlock(), don't warn if not locked */
739 		if (expect == WIN_LOCKED && old == new)
740 			return 0;
741 
742 		dev_warn_ratelimited(msc_dev(win->msc),
743 				     "expected lockout state %d, got %d\n",
744 				     expect, old);
745 	}
746 
747 	return ret;
748 }
749 /**
750  * msc_configure() - set up MSC hardware
751  * @msc:	the MSC device to configure
752  *
753  * Program storage mode, wrapping, burst length and trace buffer address
754  * into a given MSC. Then, enable tracing and set msc::enabled.
755  * The latter is serialized on msc::buf_mutex, so make sure to hold it.
756  */
757 static int msc_configure(struct msc *msc)
758 {
759 	u32 reg;
760 
761 	lockdep_assert_held(&msc->buf_mutex);
762 
763 	if (msc->mode > MSC_MODE_MULTI)
764 		return -EINVAL;
765 
766 	if (msc->mode == MSC_MODE_MULTI) {
767 		if (msc_win_set_lockout(msc->cur_win, WIN_READY, WIN_INUSE))
768 			return -EBUSY;
769 
770 		msc_buffer_clear_hw_header(msc);
771 	}
772 
773 	msc->orig_addr = ioread32(msc->reg_base + REG_MSU_MSC0BAR);
774 	msc->orig_sz   = ioread32(msc->reg_base + REG_MSU_MSC0SIZE);
775 
776 	reg = msc->base_addr >> PAGE_SHIFT;
777 	iowrite32(reg, msc->reg_base + REG_MSU_MSC0BAR);
778 
779 	if (msc->mode == MSC_MODE_SINGLE) {
780 		reg = msc->nr_pages;
781 		iowrite32(reg, msc->reg_base + REG_MSU_MSC0SIZE);
782 	}
783 
784 	reg = ioread32(msc->reg_base + REG_MSU_MSC0CTL);
785 	reg &= ~(MSC_MODE | MSC_WRAPEN | MSC_EN | MSC_RD_HDR_OVRD);
786 
787 	reg |= MSC_EN;
788 	reg |= msc->mode << __ffs(MSC_MODE);
789 	reg |= msc->burst_len << __ffs(MSC_LEN);
790 
791 	if (msc->wrap)
792 		reg |= MSC_WRAPEN;
793 
794 	iowrite32(reg, msc->reg_base + REG_MSU_MSC0CTL);
795 
796 	intel_th_msu_init(msc);
797 
798 	msc->thdev->output.multiblock = msc->mode == MSC_MODE_MULTI;
799 	intel_th_trace_enable(msc->thdev);
800 	msc->enabled = 1;
801 
802 	if (msc->mbuf && msc->mbuf->activate)
803 		msc->mbuf->activate(msc->mbuf_priv);
804 
805 	return 0;
806 }
807 
808 /**
809  * msc_disable() - disable MSC hardware
810  * @msc:	MSC device to disable
811  *
812  * If @msc is enabled, disable tracing on the switch and then disable MSC
813  * storage. Caller must hold msc::buf_mutex.
814  */
815 static void msc_disable(struct msc *msc)
816 {
817 	struct msc_window *win = msc->cur_win;
818 	u32 reg;
819 
820 	lockdep_assert_held(&msc->buf_mutex);
821 
822 	if (msc->mode == MSC_MODE_MULTI)
823 		msc_win_set_lockout(win, WIN_INUSE, WIN_LOCKED);
824 
825 	if (msc->mbuf && msc->mbuf->deactivate)
826 		msc->mbuf->deactivate(msc->mbuf_priv);
827 	intel_th_msu_deinit(msc);
828 	intel_th_trace_disable(msc->thdev);
829 
830 	if (msc->mode == MSC_MODE_SINGLE) {
831 		reg = ioread32(msc->reg_base + REG_MSU_MSC0STS);
832 		msc->single_wrap = !!(reg & MSCSTS_WRAPSTAT);
833 
834 		reg = ioread32(msc->reg_base + REG_MSU_MSC0MWP);
835 		msc->single_sz = reg & ((msc->nr_pages << PAGE_SHIFT) - 1);
836 		dev_dbg(msc_dev(msc), "MSCnMWP: %08x/%08lx, wrap: %d\n",
837 			reg, msc->single_sz, msc->single_wrap);
838 	}
839 
840 	reg = ioread32(msc->reg_base + REG_MSU_MSC0CTL);
841 	reg &= ~MSC_EN;
842 	iowrite32(reg, msc->reg_base + REG_MSU_MSC0CTL);
843 
844 	if (msc->mbuf && msc->mbuf->ready)
845 		msc->mbuf->ready(msc->mbuf_priv, win->sgt,
846 				 msc_win_total_sz(win));
847 
848 	msc->enabled = 0;
849 
850 	iowrite32(msc->orig_addr, msc->reg_base + REG_MSU_MSC0BAR);
851 	iowrite32(msc->orig_sz, msc->reg_base + REG_MSU_MSC0SIZE);
852 
853 	dev_dbg(msc_dev(msc), "MSCnNWSA: %08x\n",
854 		ioread32(msc->reg_base + REG_MSU_MSC0NWSA));
855 
856 	reg = ioread32(msc->reg_base + REG_MSU_MSC0STS);
857 	dev_dbg(msc_dev(msc), "MSCnSTS: %08x\n", reg);
858 
859 	reg = ioread32(msc->reg_base + REG_MSU_MSUSTS);
860 	reg &= msc->index ? MSUSTS_MSC1BLAST : MSUSTS_MSC0BLAST;
861 	iowrite32(reg, msc->reg_base + REG_MSU_MSUSTS);
862 }
863 
864 static int intel_th_msc_activate(struct intel_th_device *thdev)
865 {
866 	struct msc *msc = dev_get_drvdata(&thdev->dev);
867 	int ret = -EBUSY;
868 
869 	if (!atomic_inc_unless_negative(&msc->user_count))
870 		return -ENODEV;
871 
872 	mutex_lock(&msc->buf_mutex);
873 
874 	/* if there are readers, refuse */
875 	if (list_empty(&msc->iter_list))
876 		ret = msc_configure(msc);
877 
878 	mutex_unlock(&msc->buf_mutex);
879 
880 	if (ret)
881 		atomic_dec(&msc->user_count);
882 
883 	return ret;
884 }
885 
886 static void intel_th_msc_deactivate(struct intel_th_device *thdev)
887 {
888 	struct msc *msc = dev_get_drvdata(&thdev->dev);
889 
890 	mutex_lock(&msc->buf_mutex);
891 	if (msc->enabled) {
892 		msc_disable(msc);
893 		atomic_dec(&msc->user_count);
894 	}
895 	mutex_unlock(&msc->buf_mutex);
896 }
897 
898 /**
899  * msc_buffer_contig_alloc() - allocate a contiguous buffer for SINGLE mode
900  * @msc:	MSC device
901  * @size:	allocation size in bytes
902  *
903  * This modifies msc::base, which requires msc::buf_mutex to serialize, so the
904  * caller is expected to hold it.
905  *
906  * Return:	0 on success, -errno otherwise.
907  */
908 static int msc_buffer_contig_alloc(struct msc *msc, unsigned long size)
909 {
910 	unsigned long nr_pages = size >> PAGE_SHIFT;
911 	unsigned int order = get_order(size);
912 	struct page *page;
913 	int ret;
914 
915 	if (!size)
916 		return 0;
917 
918 	ret = sg_alloc_table(&msc->single_sgt, 1, GFP_KERNEL);
919 	if (ret)
920 		goto err_out;
921 
922 	ret = -ENOMEM;
923 	page = alloc_pages(GFP_KERNEL | __GFP_ZERO | GFP_DMA32, order);
924 	if (!page)
925 		goto err_free_sgt;
926 
927 	split_page(page, order);
928 	sg_set_buf(msc->single_sgt.sgl, page_address(page), size);
929 
930 	ret = dma_map_sg(msc_dev(msc)->parent->parent, msc->single_sgt.sgl, 1,
931 			 DMA_FROM_DEVICE);
932 	if (ret < 0)
933 		goto err_free_pages;
934 
935 	msc->nr_pages = nr_pages;
936 	msc->base = page_address(page);
937 	msc->base_addr = sg_dma_address(msc->single_sgt.sgl);
938 
939 	return 0;
940 
941 err_free_pages:
942 	__free_pages(page, order);
943 
944 err_free_sgt:
945 	sg_free_table(&msc->single_sgt);
946 
947 err_out:
948 	return ret;
949 }
950 
951 /**
952  * msc_buffer_contig_free() - free a contiguous buffer
953  * @msc:	MSC configured in SINGLE mode
954  */
955 static void msc_buffer_contig_free(struct msc *msc)
956 {
957 	unsigned long off;
958 
959 	dma_unmap_sg(msc_dev(msc)->parent->parent, msc->single_sgt.sgl,
960 		     1, DMA_FROM_DEVICE);
961 	sg_free_table(&msc->single_sgt);
962 
963 	for (off = 0; off < msc->nr_pages << PAGE_SHIFT; off += PAGE_SIZE) {
964 		struct page *page = virt_to_page(msc->base + off);
965 
966 		page->mapping = NULL;
967 		__free_page(page);
968 	}
969 
970 	msc->nr_pages = 0;
971 }
972 
973 /**
974  * msc_buffer_contig_get_page() - find a page at a given offset
975  * @msc:	MSC configured in SINGLE mode
976  * @pgoff:	page offset
977  *
978  * Return:	page, if @pgoff is within the range, NULL otherwise.
979  */
980 static struct page *msc_buffer_contig_get_page(struct msc *msc,
981 					       unsigned long pgoff)
982 {
983 	if (pgoff >= msc->nr_pages)
984 		return NULL;
985 
986 	return virt_to_page(msc->base + (pgoff << PAGE_SHIFT));
987 }
988 
989 static int __msc_buffer_win_alloc(struct msc_window *win,
990 				  unsigned int nr_segs)
991 {
992 	struct scatterlist *sg_ptr;
993 	void *block;
994 	int i, ret;
995 
996 	ret = sg_alloc_table(win->sgt, nr_segs, GFP_KERNEL);
997 	if (ret)
998 		return -ENOMEM;
999 
1000 	for_each_sg(win->sgt->sgl, sg_ptr, nr_segs, i) {
1001 		block = dma_alloc_coherent(msc_dev(win->msc)->parent->parent,
1002 					  PAGE_SIZE, &sg_dma_address(sg_ptr),
1003 					  GFP_KERNEL);
1004 		if (!block)
1005 			goto err_nomem;
1006 
1007 		sg_set_buf(sg_ptr, block, PAGE_SIZE);
1008 	}
1009 
1010 	return nr_segs;
1011 
1012 err_nomem:
1013 	for_each_sg(win->sgt->sgl, sg_ptr, i, ret)
1014 		dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE,
1015 				  sg_virt(sg_ptr), sg_dma_address(sg_ptr));
1016 
1017 	sg_free_table(win->sgt);
1018 
1019 	return -ENOMEM;
1020 }
1021 
1022 #ifdef CONFIG_X86
1023 static void msc_buffer_set_uc(struct msc_window *win, unsigned int nr_segs)
1024 {
1025 	struct scatterlist *sg_ptr;
1026 	int i;
1027 
1028 	for_each_sg(win->sgt->sgl, sg_ptr, nr_segs, i) {
1029 		/* Set the page as uncached */
1030 		set_memory_uc((unsigned long)sg_virt(sg_ptr),
1031 			      PFN_DOWN(sg_ptr->length));
1032 	}
1033 }
1034 
1035 static void msc_buffer_set_wb(struct msc_window *win)
1036 {
1037 	struct scatterlist *sg_ptr;
1038 	int i;
1039 
1040 	for_each_sg(win->sgt->sgl, sg_ptr, win->nr_segs, i) {
1041 		/* Reset the page to write-back */
1042 		set_memory_wb((unsigned long)sg_virt(sg_ptr),
1043 			      PFN_DOWN(sg_ptr->length));
1044 	}
1045 }
1046 #else /* !X86 */
1047 static inline void
1048 msc_buffer_set_uc(struct msc_window *win, unsigned int nr_segs) {}
1049 static inline void msc_buffer_set_wb(struct msc_window *win) {}
1050 #endif /* CONFIG_X86 */
1051 
1052 /**
1053  * msc_buffer_win_alloc() - alloc a window for a multiblock mode
1054  * @msc:	MSC device
1055  * @nr_blocks:	number of pages in this window
1056  *
1057  * This modifies msc::win_list and msc::base, which requires msc::buf_mutex
1058  * to serialize, so the caller is expected to hold it.
1059  *
1060  * Return:	0 on success, -errno otherwise.
1061  */
1062 static int msc_buffer_win_alloc(struct msc *msc, unsigned int nr_blocks)
1063 {
1064 	struct msc_window *win;
1065 	int ret = -ENOMEM;
1066 
1067 	if (!nr_blocks)
1068 		return 0;
1069 
1070 	win = kzalloc(sizeof(*win), GFP_KERNEL);
1071 	if (!win)
1072 		return -ENOMEM;
1073 
1074 	win->msc = msc;
1075 	win->sgt = &win->_sgt;
1076 	win->lockout = WIN_READY;
1077 	spin_lock_init(&win->lo_lock);
1078 
1079 	if (!list_empty(&msc->win_list)) {
1080 		struct msc_window *prev = list_last_entry(&msc->win_list,
1081 							  struct msc_window,
1082 							  entry);
1083 
1084 		win->pgoff = prev->pgoff + prev->nr_blocks;
1085 	}
1086 
1087 	if (msc->mbuf && msc->mbuf->alloc_window)
1088 		ret = msc->mbuf->alloc_window(msc->mbuf_priv, &win->sgt,
1089 					      nr_blocks << PAGE_SHIFT);
1090 	else
1091 		ret = __msc_buffer_win_alloc(win, nr_blocks);
1092 
1093 	if (ret <= 0)
1094 		goto err_nomem;
1095 
1096 	msc_buffer_set_uc(win, ret);
1097 
1098 	win->nr_segs = ret;
1099 	win->nr_blocks = nr_blocks;
1100 
1101 	if (list_empty(&msc->win_list)) {
1102 		msc->base = msc_win_base(win);
1103 		msc->base_addr = msc_win_base_dma(win);
1104 		msc->cur_win = win;
1105 	}
1106 
1107 	list_add_tail(&win->entry, &msc->win_list);
1108 	msc->nr_pages += nr_blocks;
1109 
1110 	return 0;
1111 
1112 err_nomem:
1113 	kfree(win);
1114 
1115 	return ret;
1116 }
1117 
1118 static void __msc_buffer_win_free(struct msc *msc, struct msc_window *win)
1119 {
1120 	struct scatterlist *sg;
1121 	int i;
1122 
1123 	for_each_sg(win->sgt->sgl, sg, win->nr_segs, i) {
1124 		struct page *page = sg_page(sg);
1125 
1126 		page->mapping = NULL;
1127 		dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE,
1128 				  sg_virt(sg), sg_dma_address(sg));
1129 	}
1130 	sg_free_table(win->sgt);
1131 }
1132 
1133 /**
1134  * msc_buffer_win_free() - free a window from MSC's window list
1135  * @msc:	MSC device
1136  * @win:	window to free
1137  *
1138  * This modifies msc::win_list and msc::base, which requires msc::buf_mutex
1139  * to serialize, so the caller is expected to hold it.
1140  */
1141 static void msc_buffer_win_free(struct msc *msc, struct msc_window *win)
1142 {
1143 	msc->nr_pages -= win->nr_blocks;
1144 
1145 	list_del(&win->entry);
1146 	if (list_empty(&msc->win_list)) {
1147 		msc->base = NULL;
1148 		msc->base_addr = 0;
1149 	}
1150 
1151 	msc_buffer_set_wb(win);
1152 
1153 	if (msc->mbuf && msc->mbuf->free_window)
1154 		msc->mbuf->free_window(msc->mbuf_priv, win->sgt);
1155 	else
1156 		__msc_buffer_win_free(msc, win);
1157 
1158 	kfree(win);
1159 }
1160 
1161 /**
1162  * msc_buffer_relink() - set up block descriptors for multiblock mode
1163  * @msc:	MSC device
1164  *
1165  * This traverses msc::win_list, which requires msc::buf_mutex to serialize,
1166  * so the caller is expected to hold it.
1167  */
1168 static void msc_buffer_relink(struct msc *msc)
1169 {
1170 	struct msc_window *win, *next_win;
1171 
1172 	/* call with msc::mutex locked */
1173 	list_for_each_entry(win, &msc->win_list, entry) {
1174 		struct scatterlist *sg;
1175 		unsigned int blk;
1176 		u32 sw_tag = 0;
1177 
1178 		/*
1179 		 * Last window's next_win should point to the first window
1180 		 * and MSC_SW_TAG_LASTWIN should be set.
1181 		 */
1182 		if (msc_is_last_win(win)) {
1183 			sw_tag |= MSC_SW_TAG_LASTWIN;
1184 			next_win = list_first_entry(&msc->win_list,
1185 						    struct msc_window, entry);
1186 		} else {
1187 			next_win = list_next_entry(win, entry);
1188 		}
1189 
1190 		for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) {
1191 			struct msc_block_desc *bdesc = sg_virt(sg);
1192 
1193 			memset(bdesc, 0, sizeof(*bdesc));
1194 
1195 			bdesc->next_win = msc_win_base_pfn(next_win);
1196 
1197 			/*
1198 			 * Similarly to last window, last block should point
1199 			 * to the first one.
1200 			 */
1201 			if (blk == win->nr_segs - 1) {
1202 				sw_tag |= MSC_SW_TAG_LASTBLK;
1203 				bdesc->next_blk = msc_win_base_pfn(win);
1204 			} else {
1205 				dma_addr_t addr = sg_dma_address(sg_next(sg));
1206 
1207 				bdesc->next_blk = PFN_DOWN(addr);
1208 			}
1209 
1210 			bdesc->sw_tag = sw_tag;
1211 			bdesc->block_sz = sg->length / 64;
1212 		}
1213 	}
1214 
1215 	/*
1216 	 * Make the above writes globally visible before tracing is
1217 	 * enabled to make sure hardware sees them coherently.
1218 	 */
1219 	wmb();
1220 }
1221 
1222 static void msc_buffer_multi_free(struct msc *msc)
1223 {
1224 	struct msc_window *win, *iter;
1225 
1226 	list_for_each_entry_safe(win, iter, &msc->win_list, entry)
1227 		msc_buffer_win_free(msc, win);
1228 }
1229 
1230 static int msc_buffer_multi_alloc(struct msc *msc, unsigned long *nr_pages,
1231 				  unsigned int nr_wins)
1232 {
1233 	int ret, i;
1234 
1235 	for (i = 0; i < nr_wins; i++) {
1236 		ret = msc_buffer_win_alloc(msc, nr_pages[i]);
1237 		if (ret) {
1238 			msc_buffer_multi_free(msc);
1239 			return ret;
1240 		}
1241 	}
1242 
1243 	msc_buffer_relink(msc);
1244 
1245 	return 0;
1246 }
1247 
1248 /**
1249  * msc_buffer_free() - free buffers for MSC
1250  * @msc:	MSC device
1251  *
1252  * Free MSC's storage buffers.
1253  *
1254  * This modifies msc::win_list and msc::base, which requires msc::buf_mutex to
1255  * serialize, so the caller is expected to hold it.
1256  */
1257 static void msc_buffer_free(struct msc *msc)
1258 {
1259 	if (msc->mode == MSC_MODE_SINGLE)
1260 		msc_buffer_contig_free(msc);
1261 	else if (msc->mode == MSC_MODE_MULTI)
1262 		msc_buffer_multi_free(msc);
1263 }
1264 
1265 /**
1266  * msc_buffer_alloc() - allocate a buffer for MSC
1267  * @msc:	MSC device
1268  * @size:	allocation size in bytes
1269  *
1270  * Allocate a storage buffer for MSC, depending on the msc::mode, it will be
1271  * either done via msc_buffer_contig_alloc() for SINGLE operation mode or
1272  * msc_buffer_win_alloc() for multiblock operation. The latter allocates one
1273  * window per invocation, so in multiblock mode this can be called multiple
1274  * times for the same MSC to allocate multiple windows.
1275  *
1276  * This modifies msc::win_list and msc::base, which requires msc::buf_mutex
1277  * to serialize, so the caller is expected to hold it.
1278  *
1279  * Return:	0 on success, -errno otherwise.
1280  */
1281 static int msc_buffer_alloc(struct msc *msc, unsigned long *nr_pages,
1282 			    unsigned int nr_wins)
1283 {
1284 	int ret;
1285 
1286 	/* -1: buffer not allocated */
1287 	if (atomic_read(&msc->user_count) != -1)
1288 		return -EBUSY;
1289 
1290 	if (msc->mode == MSC_MODE_SINGLE) {
1291 		if (nr_wins != 1)
1292 			return -EINVAL;
1293 
1294 		ret = msc_buffer_contig_alloc(msc, nr_pages[0] << PAGE_SHIFT);
1295 	} else if (msc->mode == MSC_MODE_MULTI) {
1296 		ret = msc_buffer_multi_alloc(msc, nr_pages, nr_wins);
1297 	} else {
1298 		ret = -EINVAL;
1299 	}
1300 
1301 	if (!ret) {
1302 		/* allocation should be visible before the counter goes to 0 */
1303 		smp_mb__before_atomic();
1304 
1305 		if (WARN_ON_ONCE(atomic_cmpxchg(&msc->user_count, -1, 0) != -1))
1306 			return -EINVAL;
1307 	}
1308 
1309 	return ret;
1310 }
1311 
1312 /**
1313  * msc_buffer_unlocked_free_unless_used() - free a buffer unless it's in use
1314  * @msc:	MSC device
1315  *
1316  * This will free MSC buffer unless it is in use or there is no allocated
1317  * buffer.
1318  * Caller needs to hold msc::buf_mutex.
1319  *
1320  * Return:	0 on successful deallocation or if there was no buffer to
1321  *		deallocate, -EBUSY if there are active users.
1322  */
1323 static int msc_buffer_unlocked_free_unless_used(struct msc *msc)
1324 {
1325 	int count, ret = 0;
1326 
1327 	count = atomic_cmpxchg(&msc->user_count, 0, -1);
1328 
1329 	/* > 0: buffer is allocated and has users */
1330 	if (count > 0)
1331 		ret = -EBUSY;
1332 	/* 0: buffer is allocated, no users */
1333 	else if (!count)
1334 		msc_buffer_free(msc);
1335 	/* < 0: no buffer, nothing to do */
1336 
1337 	return ret;
1338 }
1339 
1340 /**
1341  * msc_buffer_free_unless_used() - free a buffer unless it's in use
1342  * @msc:	MSC device
1343  *
1344  * This is a locked version of msc_buffer_unlocked_free_unless_used().
1345  */
1346 static int msc_buffer_free_unless_used(struct msc *msc)
1347 {
1348 	int ret;
1349 
1350 	mutex_lock(&msc->buf_mutex);
1351 	ret = msc_buffer_unlocked_free_unless_used(msc);
1352 	mutex_unlock(&msc->buf_mutex);
1353 
1354 	return ret;
1355 }
1356 
1357 /**
1358  * msc_buffer_get_page() - get MSC buffer page at a given offset
1359  * @msc:	MSC device
1360  * @pgoff:	page offset into the storage buffer
1361  *
1362  * This traverses msc::win_list, so holding msc::buf_mutex is expected from
1363  * the caller.
1364  *
1365  * Return:	page if @pgoff corresponds to a valid buffer page or NULL.
1366  */
1367 static struct page *msc_buffer_get_page(struct msc *msc, unsigned long pgoff)
1368 {
1369 	struct msc_window *win;
1370 	struct scatterlist *sg;
1371 	unsigned int blk;
1372 
1373 	if (msc->mode == MSC_MODE_SINGLE)
1374 		return msc_buffer_contig_get_page(msc, pgoff);
1375 
1376 	list_for_each_entry(win, &msc->win_list, entry)
1377 		if (pgoff >= win->pgoff && pgoff < win->pgoff + win->nr_blocks)
1378 			goto found;
1379 
1380 	return NULL;
1381 
1382 found:
1383 	pgoff -= win->pgoff;
1384 
1385 	for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) {
1386 		struct page *page = sg_page(sg);
1387 		size_t pgsz = PFN_DOWN(sg->length);
1388 
1389 		if (pgoff < pgsz)
1390 			return page + pgoff;
1391 
1392 		pgoff -= pgsz;
1393 	}
1394 
1395 	return NULL;
1396 }
1397 
1398 /**
1399  * struct msc_win_to_user_struct - data for copy_to_user() callback
1400  * @buf:	userspace buffer to copy data to
1401  * @offset:	running offset
1402  */
1403 struct msc_win_to_user_struct {
1404 	char __user	*buf;
1405 	unsigned long	offset;
1406 };
1407 
1408 /**
1409  * msc_win_to_user() - iterator for msc_buffer_iterate() to copy data to user
1410  * @data:	callback's private data
1411  * @src:	source buffer
1412  * @len:	amount of data to copy from the source buffer
1413  */
1414 static unsigned long msc_win_to_user(void *data, void *src, size_t len)
1415 {
1416 	struct msc_win_to_user_struct *u = data;
1417 	unsigned long ret;
1418 
1419 	ret = copy_to_user(u->buf + u->offset, src, len);
1420 	u->offset += len - ret;
1421 
1422 	return ret;
1423 }
1424 
1425 
1426 /*
1427  * file operations' callbacks
1428  */
1429 
1430 static int intel_th_msc_open(struct inode *inode, struct file *file)
1431 {
1432 	struct intel_th_device *thdev = file->private_data;
1433 	struct msc *msc = dev_get_drvdata(&thdev->dev);
1434 	struct msc_iter *iter;
1435 
1436 	if (!capable(CAP_SYS_RAWIO))
1437 		return -EPERM;
1438 
1439 	iter = msc_iter_install(msc);
1440 	if (IS_ERR(iter))
1441 		return PTR_ERR(iter);
1442 
1443 	file->private_data = iter;
1444 
1445 	return nonseekable_open(inode, file);
1446 }
1447 
1448 static int intel_th_msc_release(struct inode *inode, struct file *file)
1449 {
1450 	struct msc_iter *iter = file->private_data;
1451 	struct msc *msc = iter->msc;
1452 
1453 	msc_iter_remove(iter, msc);
1454 
1455 	return 0;
1456 }
1457 
1458 static ssize_t
1459 msc_single_to_user(struct msc *msc, char __user *buf, loff_t off, size_t len)
1460 {
1461 	unsigned long size = msc->nr_pages << PAGE_SHIFT, rem = len;
1462 	unsigned long start = off, tocopy = 0;
1463 
1464 	if (msc->single_wrap) {
1465 		start += msc->single_sz;
1466 		if (start < size) {
1467 			tocopy = min(rem, size - start);
1468 			if (copy_to_user(buf, msc->base + start, tocopy))
1469 				return -EFAULT;
1470 
1471 			buf += tocopy;
1472 			rem -= tocopy;
1473 			start += tocopy;
1474 		}
1475 
1476 		start &= size - 1;
1477 		if (rem) {
1478 			tocopy = min(rem, msc->single_sz - start);
1479 			if (copy_to_user(buf, msc->base + start, tocopy))
1480 				return -EFAULT;
1481 
1482 			rem -= tocopy;
1483 		}
1484 
1485 		return len - rem;
1486 	}
1487 
1488 	if (copy_to_user(buf, msc->base + start, rem))
1489 		return -EFAULT;
1490 
1491 	return len;
1492 }
1493 
1494 static ssize_t intel_th_msc_read(struct file *file, char __user *buf,
1495 				 size_t len, loff_t *ppos)
1496 {
1497 	struct msc_iter *iter = file->private_data;
1498 	struct msc *msc = iter->msc;
1499 	size_t size;
1500 	loff_t off = *ppos;
1501 	ssize_t ret = 0;
1502 
1503 	if (!atomic_inc_unless_negative(&msc->user_count))
1504 		return 0;
1505 
1506 	if (msc->mode == MSC_MODE_SINGLE && !msc->single_wrap)
1507 		size = msc->single_sz;
1508 	else
1509 		size = msc->nr_pages << PAGE_SHIFT;
1510 
1511 	if (!size)
1512 		goto put_count;
1513 
1514 	if (off >= size)
1515 		goto put_count;
1516 
1517 	if (off + len >= size)
1518 		len = size - off;
1519 
1520 	if (msc->mode == MSC_MODE_SINGLE) {
1521 		ret = msc_single_to_user(msc, buf, off, len);
1522 		if (ret >= 0)
1523 			*ppos += ret;
1524 	} else if (msc->mode == MSC_MODE_MULTI) {
1525 		struct msc_win_to_user_struct u = {
1526 			.buf	= buf,
1527 			.offset	= 0,
1528 		};
1529 
1530 		ret = msc_buffer_iterate(iter, len, &u, msc_win_to_user);
1531 		if (ret >= 0)
1532 			*ppos = iter->offset;
1533 	} else {
1534 		ret = -EINVAL;
1535 	}
1536 
1537 put_count:
1538 	atomic_dec(&msc->user_count);
1539 
1540 	return ret;
1541 }
1542 
1543 /*
1544  * vm operations callbacks (vm_ops)
1545  */
1546 
1547 static void msc_mmap_open(struct vm_area_struct *vma)
1548 {
1549 	struct msc_iter *iter = vma->vm_file->private_data;
1550 	struct msc *msc = iter->msc;
1551 
1552 	atomic_inc(&msc->mmap_count);
1553 }
1554 
1555 static void msc_mmap_close(struct vm_area_struct *vma)
1556 {
1557 	struct msc_iter *iter = vma->vm_file->private_data;
1558 	struct msc *msc = iter->msc;
1559 	unsigned long pg;
1560 
1561 	if (!atomic_dec_and_mutex_lock(&msc->mmap_count, &msc->buf_mutex))
1562 		return;
1563 
1564 	/* drop page _refcounts */
1565 	for (pg = 0; pg < msc->nr_pages; pg++) {
1566 		struct page *page = msc_buffer_get_page(msc, pg);
1567 
1568 		if (WARN_ON_ONCE(!page))
1569 			continue;
1570 
1571 		if (page->mapping)
1572 			page->mapping = NULL;
1573 	}
1574 
1575 	/* last mapping -- drop user_count */
1576 	atomic_dec(&msc->user_count);
1577 	mutex_unlock(&msc->buf_mutex);
1578 }
1579 
1580 static vm_fault_t msc_mmap_fault(struct vm_fault *vmf)
1581 {
1582 	struct msc_iter *iter = vmf->vma->vm_file->private_data;
1583 	struct msc *msc = iter->msc;
1584 
1585 	vmf->page = msc_buffer_get_page(msc, vmf->pgoff);
1586 	if (!vmf->page)
1587 		return VM_FAULT_SIGBUS;
1588 
1589 	get_page(vmf->page);
1590 	vmf->page->mapping = vmf->vma->vm_file->f_mapping;
1591 	vmf->page->index = vmf->pgoff;
1592 
1593 	return 0;
1594 }
1595 
1596 static const struct vm_operations_struct msc_mmap_ops = {
1597 	.open	= msc_mmap_open,
1598 	.close	= msc_mmap_close,
1599 	.fault	= msc_mmap_fault,
1600 };
1601 
1602 static int intel_th_msc_mmap(struct file *file, struct vm_area_struct *vma)
1603 {
1604 	unsigned long size = vma->vm_end - vma->vm_start;
1605 	struct msc_iter *iter = vma->vm_file->private_data;
1606 	struct msc *msc = iter->msc;
1607 	int ret = -EINVAL;
1608 
1609 	if (!size || offset_in_page(size))
1610 		return -EINVAL;
1611 
1612 	if (vma->vm_pgoff)
1613 		return -EINVAL;
1614 
1615 	/* grab user_count once per mmap; drop in msc_mmap_close() */
1616 	if (!atomic_inc_unless_negative(&msc->user_count))
1617 		return -EINVAL;
1618 
1619 	if (msc->mode != MSC_MODE_SINGLE &&
1620 	    msc->mode != MSC_MODE_MULTI)
1621 		goto out;
1622 
1623 	if (size >> PAGE_SHIFT != msc->nr_pages)
1624 		goto out;
1625 
1626 	atomic_set(&msc->mmap_count, 1);
1627 	ret = 0;
1628 
1629 out:
1630 	if (ret)
1631 		atomic_dec(&msc->user_count);
1632 
1633 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1634 	vma->vm_flags |= VM_DONTEXPAND | VM_DONTCOPY;
1635 	vma->vm_ops = &msc_mmap_ops;
1636 	return ret;
1637 }
1638 
1639 static const struct file_operations intel_th_msc_fops = {
1640 	.open		= intel_th_msc_open,
1641 	.release	= intel_th_msc_release,
1642 	.read		= intel_th_msc_read,
1643 	.mmap		= intel_th_msc_mmap,
1644 	.llseek		= no_llseek,
1645 	.owner		= THIS_MODULE,
1646 };
1647 
1648 static void intel_th_msc_wait_empty(struct intel_th_device *thdev)
1649 {
1650 	struct msc *msc = dev_get_drvdata(&thdev->dev);
1651 	unsigned long count;
1652 	u32 reg;
1653 
1654 	for (reg = 0, count = MSC_PLE_WAITLOOP_DEPTH;
1655 	     count && !(reg & MSCSTS_PLE); count--) {
1656 		reg = __raw_readl(msc->reg_base + REG_MSU_MSC0STS);
1657 		cpu_relax();
1658 	}
1659 
1660 	if (!count)
1661 		dev_dbg(msc_dev(msc), "timeout waiting for MSC0 PLE\n");
1662 }
1663 
1664 static int intel_th_msc_init(struct msc *msc)
1665 {
1666 	atomic_set(&msc->user_count, -1);
1667 
1668 	msc->mode = MSC_MODE_MULTI;
1669 	mutex_init(&msc->buf_mutex);
1670 	INIT_LIST_HEAD(&msc->win_list);
1671 	INIT_LIST_HEAD(&msc->iter_list);
1672 
1673 	msc->burst_len =
1674 		(ioread32(msc->reg_base + REG_MSU_MSC0CTL) & MSC_LEN) >>
1675 		__ffs(MSC_LEN);
1676 
1677 	return 0;
1678 }
1679 
1680 static int msc_win_switch(struct msc *msc)
1681 {
1682 	struct msc_window *first;
1683 
1684 	if (list_empty(&msc->win_list))
1685 		return -EINVAL;
1686 
1687 	first = list_first_entry(&msc->win_list, struct msc_window, entry);
1688 
1689 	if (msc_is_last_win(msc->cur_win))
1690 		msc->cur_win = first;
1691 	else
1692 		msc->cur_win = list_next_entry(msc->cur_win, entry);
1693 
1694 	msc->base = msc_win_base(msc->cur_win);
1695 	msc->base_addr = msc_win_base_dma(msc->cur_win);
1696 
1697 	intel_th_trace_switch(msc->thdev);
1698 
1699 	return 0;
1700 }
1701 
1702 /**
1703  * intel_th_msc_window_unlock - put the window back in rotation
1704  * @dev:	MSC device to which this relates
1705  * @sgt:	buffer's sg_table for the window, does nothing if NULL
1706  */
1707 void intel_th_msc_window_unlock(struct device *dev, struct sg_table *sgt)
1708 {
1709 	struct msc *msc = dev_get_drvdata(dev);
1710 	struct msc_window *win;
1711 
1712 	if (!sgt)
1713 		return;
1714 
1715 	win = msc_find_window(msc, sgt, false);
1716 	if (!win)
1717 		return;
1718 
1719 	msc_win_set_lockout(win, WIN_LOCKED, WIN_READY);
1720 }
1721 EXPORT_SYMBOL_GPL(intel_th_msc_window_unlock);
1722 
1723 static void msc_work(struct work_struct *work)
1724 {
1725 	struct msc *msc = container_of(work, struct msc, work);
1726 
1727 	intel_th_msc_deactivate(msc->thdev);
1728 }
1729 
1730 static irqreturn_t intel_th_msc_interrupt(struct intel_th_device *thdev)
1731 {
1732 	struct msc *msc = dev_get_drvdata(&thdev->dev);
1733 	u32 msusts = ioread32(msc->msu_base + REG_MSU_MSUSTS);
1734 	u32 mask = msc->index ? MSUSTS_MSC1BLAST : MSUSTS_MSC0BLAST;
1735 	struct msc_window *win, *next_win;
1736 
1737 	if (!msc->do_irq || !msc->mbuf)
1738 		return IRQ_NONE;
1739 
1740 	msusts &= mask;
1741 
1742 	if (!msusts)
1743 		return msc->enabled ? IRQ_HANDLED : IRQ_NONE;
1744 
1745 	iowrite32(msusts, msc->msu_base + REG_MSU_MSUSTS);
1746 
1747 	if (!msc->enabled)
1748 		return IRQ_NONE;
1749 
1750 	/* grab the window before we do the switch */
1751 	win = msc->cur_win;
1752 	if (!win)
1753 		return IRQ_HANDLED;
1754 	next_win = msc_next_window(win);
1755 	if (!next_win)
1756 		return IRQ_HANDLED;
1757 
1758 	/* next window: if READY, proceed, if LOCKED, stop the trace */
1759 	if (msc_win_set_lockout(next_win, WIN_READY, WIN_INUSE)) {
1760 		schedule_work(&msc->work);
1761 		return IRQ_HANDLED;
1762 	}
1763 
1764 	/* current window: INUSE -> LOCKED */
1765 	msc_win_set_lockout(win, WIN_INUSE, WIN_LOCKED);
1766 
1767 	msc_win_switch(msc);
1768 
1769 	if (msc->mbuf && msc->mbuf->ready)
1770 		msc->mbuf->ready(msc->mbuf_priv, win->sgt,
1771 				 msc_win_total_sz(win));
1772 
1773 	return IRQ_HANDLED;
1774 }
1775 
1776 static const char * const msc_mode[] = {
1777 	[MSC_MODE_SINGLE]	= "single",
1778 	[MSC_MODE_MULTI]	= "multi",
1779 	[MSC_MODE_EXI]		= "ExI",
1780 	[MSC_MODE_DEBUG]	= "debug",
1781 };
1782 
1783 static ssize_t
1784 wrap_show(struct device *dev, struct device_attribute *attr, char *buf)
1785 {
1786 	struct msc *msc = dev_get_drvdata(dev);
1787 
1788 	return scnprintf(buf, PAGE_SIZE, "%d\n", msc->wrap);
1789 }
1790 
1791 static ssize_t
1792 wrap_store(struct device *dev, struct device_attribute *attr, const char *buf,
1793 	   size_t size)
1794 {
1795 	struct msc *msc = dev_get_drvdata(dev);
1796 	unsigned long val;
1797 	int ret;
1798 
1799 	ret = kstrtoul(buf, 10, &val);
1800 	if (ret)
1801 		return ret;
1802 
1803 	msc->wrap = !!val;
1804 
1805 	return size;
1806 }
1807 
1808 static DEVICE_ATTR_RW(wrap);
1809 
1810 static void msc_buffer_unassign(struct msc *msc)
1811 {
1812 	lockdep_assert_held(&msc->buf_mutex);
1813 
1814 	if (!msc->mbuf)
1815 		return;
1816 
1817 	msc->mbuf->unassign(msc->mbuf_priv);
1818 	msu_buffer_put(msc->mbuf);
1819 	msc->mbuf_priv = NULL;
1820 	msc->mbuf = NULL;
1821 }
1822 
1823 static ssize_t
1824 mode_show(struct device *dev, struct device_attribute *attr, char *buf)
1825 {
1826 	struct msc *msc = dev_get_drvdata(dev);
1827 	const char *mode = msc_mode[msc->mode];
1828 	ssize_t ret;
1829 
1830 	mutex_lock(&msc->buf_mutex);
1831 	if (msc->mbuf)
1832 		mode = msc->mbuf->name;
1833 	ret = scnprintf(buf, PAGE_SIZE, "%s\n", mode);
1834 	mutex_unlock(&msc->buf_mutex);
1835 
1836 	return ret;
1837 }
1838 
1839 static ssize_t
1840 mode_store(struct device *dev, struct device_attribute *attr, const char *buf,
1841 	   size_t size)
1842 {
1843 	const struct msu_buffer *mbuf = NULL;
1844 	struct msc *msc = dev_get_drvdata(dev);
1845 	size_t len = size;
1846 	char *cp, *mode;
1847 	int i, ret;
1848 
1849 	if (!capable(CAP_SYS_RAWIO))
1850 		return -EPERM;
1851 
1852 	cp = memchr(buf, '\n', len);
1853 	if (cp)
1854 		len = cp - buf;
1855 
1856 	mode = kstrndup(buf, len, GFP_KERNEL);
1857 	if (!mode)
1858 		return -ENOMEM;
1859 
1860 	i = match_string(msc_mode, ARRAY_SIZE(msc_mode), mode);
1861 	if (i >= 0) {
1862 		kfree(mode);
1863 		goto found;
1864 	}
1865 
1866 	/* Buffer sinks only work with a usable IRQ */
1867 	if (!msc->do_irq) {
1868 		kfree(mode);
1869 		return -EINVAL;
1870 	}
1871 
1872 	mbuf = msu_buffer_get(mode);
1873 	kfree(mode);
1874 	if (mbuf)
1875 		goto found;
1876 
1877 	return -EINVAL;
1878 
1879 found:
1880 	mutex_lock(&msc->buf_mutex);
1881 	ret = 0;
1882 
1883 	/* Same buffer: do nothing */
1884 	if (mbuf && mbuf == msc->mbuf) {
1885 		/* put the extra reference we just got */
1886 		msu_buffer_put(mbuf);
1887 		goto unlock;
1888 	}
1889 
1890 	ret = msc_buffer_unlocked_free_unless_used(msc);
1891 	if (ret)
1892 		goto unlock;
1893 
1894 	if (mbuf) {
1895 		void *mbuf_priv = mbuf->assign(dev, &i);
1896 
1897 		if (!mbuf_priv) {
1898 			ret = -ENOMEM;
1899 			goto unlock;
1900 		}
1901 
1902 		msc_buffer_unassign(msc);
1903 		msc->mbuf_priv = mbuf_priv;
1904 		msc->mbuf = mbuf;
1905 	} else {
1906 		msc_buffer_unassign(msc);
1907 	}
1908 
1909 	msc->mode = i;
1910 
1911 unlock:
1912 	if (ret && mbuf)
1913 		msu_buffer_put(mbuf);
1914 	mutex_unlock(&msc->buf_mutex);
1915 
1916 	return ret ? ret : size;
1917 }
1918 
1919 static DEVICE_ATTR_RW(mode);
1920 
1921 static ssize_t
1922 nr_pages_show(struct device *dev, struct device_attribute *attr, char *buf)
1923 {
1924 	struct msc *msc = dev_get_drvdata(dev);
1925 	struct msc_window *win;
1926 	size_t count = 0;
1927 
1928 	mutex_lock(&msc->buf_mutex);
1929 
1930 	if (msc->mode == MSC_MODE_SINGLE)
1931 		count = scnprintf(buf, PAGE_SIZE, "%ld\n", msc->nr_pages);
1932 	else if (msc->mode == MSC_MODE_MULTI) {
1933 		list_for_each_entry(win, &msc->win_list, entry) {
1934 			count += scnprintf(buf + count, PAGE_SIZE - count,
1935 					   "%d%c", win->nr_blocks,
1936 					   msc_is_last_win(win) ? '\n' : ',');
1937 		}
1938 	} else {
1939 		count = scnprintf(buf, PAGE_SIZE, "unsupported\n");
1940 	}
1941 
1942 	mutex_unlock(&msc->buf_mutex);
1943 
1944 	return count;
1945 }
1946 
1947 static ssize_t
1948 nr_pages_store(struct device *dev, struct device_attribute *attr,
1949 	       const char *buf, size_t size)
1950 {
1951 	struct msc *msc = dev_get_drvdata(dev);
1952 	unsigned long val, *win = NULL, *rewin;
1953 	size_t len = size;
1954 	const char *p = buf;
1955 	char *end, *s;
1956 	int ret, nr_wins = 0;
1957 
1958 	if (!capable(CAP_SYS_RAWIO))
1959 		return -EPERM;
1960 
1961 	ret = msc_buffer_free_unless_used(msc);
1962 	if (ret)
1963 		return ret;
1964 
1965 	/* scan the comma-separated list of allocation sizes */
1966 	end = memchr(buf, '\n', len);
1967 	if (end)
1968 		len = end - buf;
1969 
1970 	do {
1971 		end = memchr(p, ',', len);
1972 		s = kstrndup(p, end ? end - p : len, GFP_KERNEL);
1973 		if (!s) {
1974 			ret = -ENOMEM;
1975 			goto free_win;
1976 		}
1977 
1978 		ret = kstrtoul(s, 10, &val);
1979 		kfree(s);
1980 
1981 		if (ret || !val)
1982 			goto free_win;
1983 
1984 		if (nr_wins && msc->mode == MSC_MODE_SINGLE) {
1985 			ret = -EINVAL;
1986 			goto free_win;
1987 		}
1988 
1989 		nr_wins++;
1990 		rewin = krealloc(win, sizeof(*win) * nr_wins, GFP_KERNEL);
1991 		if (!rewin) {
1992 			kfree(win);
1993 			return -ENOMEM;
1994 		}
1995 
1996 		win = rewin;
1997 		win[nr_wins - 1] = val;
1998 
1999 		if (!end)
2000 			break;
2001 
2002 		/* consume the number and the following comma, hence +1 */
2003 		len -= end - p + 1;
2004 		p = end + 1;
2005 	} while (len);
2006 
2007 	mutex_lock(&msc->buf_mutex);
2008 	ret = msc_buffer_alloc(msc, win, nr_wins);
2009 	mutex_unlock(&msc->buf_mutex);
2010 
2011 free_win:
2012 	kfree(win);
2013 
2014 	return ret ? ret : size;
2015 }
2016 
2017 static DEVICE_ATTR_RW(nr_pages);
2018 
2019 static ssize_t
2020 win_switch_store(struct device *dev, struct device_attribute *attr,
2021 		 const char *buf, size_t size)
2022 {
2023 	struct msc *msc = dev_get_drvdata(dev);
2024 	unsigned long val;
2025 	int ret;
2026 
2027 	ret = kstrtoul(buf, 10, &val);
2028 	if (ret)
2029 		return ret;
2030 
2031 	if (val != 1)
2032 		return -EINVAL;
2033 
2034 	ret = -EINVAL;
2035 	mutex_lock(&msc->buf_mutex);
2036 	/*
2037 	 * Window switch can only happen in the "multi" mode.
2038 	 * If a external buffer is engaged, they have the full
2039 	 * control over window switching.
2040 	 */
2041 	if (msc->mode == MSC_MODE_MULTI && !msc->mbuf)
2042 		ret = msc_win_switch(msc);
2043 	mutex_unlock(&msc->buf_mutex);
2044 
2045 	return ret ? ret : size;
2046 }
2047 
2048 static DEVICE_ATTR_WO(win_switch);
2049 
2050 static struct attribute *msc_output_attrs[] = {
2051 	&dev_attr_wrap.attr,
2052 	&dev_attr_mode.attr,
2053 	&dev_attr_nr_pages.attr,
2054 	&dev_attr_win_switch.attr,
2055 	NULL,
2056 };
2057 
2058 static struct attribute_group msc_output_group = {
2059 	.attrs	= msc_output_attrs,
2060 };
2061 
2062 static int intel_th_msc_probe(struct intel_th_device *thdev)
2063 {
2064 	struct device *dev = &thdev->dev;
2065 	struct resource *res;
2066 	struct msc *msc;
2067 	void __iomem *base;
2068 	int err;
2069 
2070 	res = intel_th_device_get_resource(thdev, IORESOURCE_MEM, 0);
2071 	if (!res)
2072 		return -ENODEV;
2073 
2074 	base = devm_ioremap(dev, res->start, resource_size(res));
2075 	if (!base)
2076 		return -ENOMEM;
2077 
2078 	msc = devm_kzalloc(dev, sizeof(*msc), GFP_KERNEL);
2079 	if (!msc)
2080 		return -ENOMEM;
2081 
2082 	res = intel_th_device_get_resource(thdev, IORESOURCE_IRQ, 1);
2083 	if (!res)
2084 		msc->do_irq = 1;
2085 
2086 	msc->index = thdev->id;
2087 
2088 	msc->thdev = thdev;
2089 	msc->reg_base = base + msc->index * 0x100;
2090 	msc->msu_base = base;
2091 
2092 	INIT_WORK(&msc->work, msc_work);
2093 	err = intel_th_msc_init(msc);
2094 	if (err)
2095 		return err;
2096 
2097 	dev_set_drvdata(dev, msc);
2098 
2099 	return 0;
2100 }
2101 
2102 static void intel_th_msc_remove(struct intel_th_device *thdev)
2103 {
2104 	struct msc *msc = dev_get_drvdata(&thdev->dev);
2105 	int ret;
2106 
2107 	intel_th_msc_deactivate(thdev);
2108 
2109 	/*
2110 	 * Buffers should not be used at this point except if the
2111 	 * output character device is still open and the parent
2112 	 * device gets detached from its bus, which is a FIXME.
2113 	 */
2114 	ret = msc_buffer_free_unless_used(msc);
2115 	WARN_ON_ONCE(ret);
2116 }
2117 
2118 static struct intel_th_driver intel_th_msc_driver = {
2119 	.probe	= intel_th_msc_probe,
2120 	.remove	= intel_th_msc_remove,
2121 	.irq		= intel_th_msc_interrupt,
2122 	.wait_empty	= intel_th_msc_wait_empty,
2123 	.activate	= intel_th_msc_activate,
2124 	.deactivate	= intel_th_msc_deactivate,
2125 	.fops	= &intel_th_msc_fops,
2126 	.attr_group	= &msc_output_group,
2127 	.driver	= {
2128 		.name	= "msc",
2129 		.owner	= THIS_MODULE,
2130 	},
2131 };
2132 
2133 module_driver(intel_th_msc_driver,
2134 	      intel_th_driver_register,
2135 	      intel_th_driver_unregister);
2136 
2137 MODULE_LICENSE("GPL v2");
2138 MODULE_DESCRIPTION("Intel(R) Trace Hub Memory Storage Unit driver");
2139 MODULE_AUTHOR("Alexander Shishkin <alexander.shishkin@linux.intel.com>");
2140