xref: /openbmc/linux/drivers/hwtracing/intel_th/msu.c (revision bbbc08a1)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Intel(R) Trace Hub Memory Storage Unit
4  *
5  * Copyright (C) 2014-2015 Intel Corporation.
6  */
7 
8 #define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt
9 
10 #include <linux/types.h>
11 #include <linux/module.h>
12 #include <linux/device.h>
13 #include <linux/uaccess.h>
14 #include <linux/sizes.h>
15 #include <linux/printk.h>
16 #include <linux/slab.h>
17 #include <linux/mm.h>
18 #include <linux/fs.h>
19 #include <linux/io.h>
20 #include <linux/dma-mapping.h>
21 
22 #ifdef CONFIG_X86
23 #include <asm/set_memory.h>
24 #endif
25 
26 #include "intel_th.h"
27 #include "msu.h"
28 
29 #define msc_dev(x) (&(x)->thdev->dev)
30 
31 /**
32  * struct msc_window - multiblock mode window descriptor
33  * @entry:	window list linkage (msc::win_list)
34  * @pgoff:	page offset into the buffer that this window starts at
35  * @nr_blocks:	number of blocks (pages) in this window
36  * @nr_segs:	number of segments in this window (<= @nr_blocks)
37  * @_sgt:	array of block descriptors
38  * @sgt:	array of block descriptors
39  */
40 struct msc_window {
41 	struct list_head	entry;
42 	unsigned long		pgoff;
43 	unsigned int		nr_blocks;
44 	unsigned int		nr_segs;
45 	struct msc		*msc;
46 	struct sg_table		_sgt;
47 	struct sg_table		*sgt;
48 };
49 
50 /**
51  * struct msc_iter - iterator for msc buffer
52  * @entry:		msc::iter_list linkage
53  * @msc:		pointer to the MSC device
54  * @start_win:		oldest window
55  * @win:		current window
56  * @offset:		current logical offset into the buffer
57  * @start_block:	oldest block in the window
58  * @block:		block number in the window
59  * @block_off:		offset into current block
60  * @wrap_count:		block wrapping handling
61  * @eof:		end of buffer reached
62  */
63 struct msc_iter {
64 	struct list_head	entry;
65 	struct msc		*msc;
66 	struct msc_window	*start_win;
67 	struct msc_window	*win;
68 	unsigned long		offset;
69 	int			start_block;
70 	int			block;
71 	unsigned int		block_off;
72 	unsigned int		wrap_count;
73 	unsigned int		eof;
74 };
75 
76 /**
77  * struct msc - MSC device representation
78  * @reg_base:		register window base address
79  * @thdev:		intel_th_device pointer
80  * @win_list:		list of windows in multiblock mode
81  * @single_sgt:		single mode buffer
82  * @cur_win:		current window
83  * @nr_pages:		total number of pages allocated for this buffer
84  * @single_sz:		amount of data in single mode
85  * @single_wrap:	single mode wrap occurred
86  * @base:		buffer's base pointer
87  * @base_addr:		buffer's base address
88  * @user_count:		number of users of the buffer
89  * @mmap_count:		number of mappings
90  * @buf_mutex:		mutex to serialize access to buffer-related bits
91 
92  * @enabled:		MSC is enabled
93  * @wrap:		wrapping is enabled
94  * @mode:		MSC operating mode
95  * @burst_len:		write burst length
96  * @index:		number of this MSC in the MSU
97  */
98 struct msc {
99 	void __iomem		*reg_base;
100 	void __iomem		*msu_base;
101 	struct intel_th_device	*thdev;
102 
103 	struct list_head	win_list;
104 	struct sg_table		single_sgt;
105 	struct msc_window	*cur_win;
106 	unsigned long		nr_pages;
107 	unsigned long		single_sz;
108 	unsigned int		single_wrap : 1;
109 	void			*base;
110 	dma_addr_t		base_addr;
111 
112 	/* <0: no buffer, 0: no users, >0: active users */
113 	atomic_t		user_count;
114 
115 	atomic_t		mmap_count;
116 	struct mutex		buf_mutex;
117 
118 	struct list_head	iter_list;
119 
120 	/* config */
121 	unsigned int		enabled : 1,
122 				wrap	: 1,
123 				do_irq	: 1;
124 	unsigned int		mode;
125 	unsigned int		burst_len;
126 	unsigned int		index;
127 };
128 
129 static inline bool msc_block_is_empty(struct msc_block_desc *bdesc)
130 {
131 	/* header hasn't been written */
132 	if (!bdesc->valid_dw)
133 		return true;
134 
135 	/* valid_dw includes the header */
136 	if (!msc_data_sz(bdesc))
137 		return true;
138 
139 	return false;
140 }
141 
142 static inline struct msc_block_desc *
143 msc_win_block(struct msc_window *win, unsigned int block)
144 {
145 	return sg_virt(&win->sgt->sgl[block]);
146 }
147 
148 static inline size_t
149 msc_win_actual_bsz(struct msc_window *win, unsigned int block)
150 {
151 	return win->sgt->sgl[block].length;
152 }
153 
154 static inline dma_addr_t
155 msc_win_baddr(struct msc_window *win, unsigned int block)
156 {
157 	return sg_dma_address(&win->sgt->sgl[block]);
158 }
159 
160 static inline unsigned long
161 msc_win_bpfn(struct msc_window *win, unsigned int block)
162 {
163 	return msc_win_baddr(win, block) >> PAGE_SHIFT;
164 }
165 
166 /**
167  * msc_is_last_win() - check if a window is the last one for a given MSC
168  * @win:	window
169  * Return:	true if @win is the last window in MSC's multiblock buffer
170  */
171 static inline bool msc_is_last_win(struct msc_window *win)
172 {
173 	return win->entry.next == &win->msc->win_list;
174 }
175 
176 /**
177  * msc_next_window() - return next window in the multiblock buffer
178  * @win:	current window
179  *
180  * Return:	window following the current one
181  */
182 static struct msc_window *msc_next_window(struct msc_window *win)
183 {
184 	if (msc_is_last_win(win))
185 		return list_first_entry(&win->msc->win_list, struct msc_window,
186 					entry);
187 
188 	return list_next_entry(win, entry);
189 }
190 
191 /**
192  * msc_oldest_window() - locate the window with oldest data
193  * @msc:	MSC device
194  *
195  * This should only be used in multiblock mode. Caller should hold the
196  * msc::user_count reference.
197  *
198  * Return:	the oldest window with valid data
199  */
200 static struct msc_window *msc_oldest_window(struct msc *msc)
201 {
202 	struct msc_window *win, *next = msc_next_window(msc->cur_win);
203 	unsigned int found = 0;
204 
205 	if (list_empty(&msc->win_list))
206 		return NULL;
207 
208 	/*
209 	 * we might need a radix tree for this, depending on how
210 	 * many windows a typical user would allocate; ideally it's
211 	 * something like 2, in which case we're good
212 	 */
213 	list_for_each_entry(win, &msc->win_list, entry) {
214 		if (win == next)
215 			found++;
216 
217 		/* skip the empty ones */
218 		if (msc_block_is_empty(msc_win_block(win, 0)))
219 			continue;
220 
221 		if (found)
222 			return win;
223 	}
224 
225 	return list_first_entry(&msc->win_list, struct msc_window, entry);
226 }
227 
228 /**
229  * msc_win_oldest_block() - locate the oldest block in a given window
230  * @win:	window to look at
231  *
232  * Return:	index of the block with the oldest data
233  */
234 static unsigned int msc_win_oldest_block(struct msc_window *win)
235 {
236 	unsigned int blk;
237 	struct msc_block_desc *bdesc = msc_win_block(win, 0);
238 
239 	/* without wrapping, first block is the oldest */
240 	if (!msc_block_wrapped(bdesc))
241 		return 0;
242 
243 	/*
244 	 * with wrapping, last written block contains both the newest and the
245 	 * oldest data for this window.
246 	 */
247 	for (blk = 0; blk < win->nr_segs; blk++) {
248 		bdesc = msc_win_block(win, blk);
249 
250 		if (msc_block_last_written(bdesc))
251 			return blk;
252 	}
253 
254 	return 0;
255 }
256 
257 static struct msc_block_desc *msc_iter_bdesc(struct msc_iter *iter)
258 {
259 	return msc_win_block(iter->win, iter->block);
260 }
261 
262 static void msc_iter_init(struct msc_iter *iter)
263 {
264 	memset(iter, 0, sizeof(*iter));
265 	iter->start_block = -1;
266 	iter->block = -1;
267 }
268 
269 static struct msc_iter *msc_iter_install(struct msc *msc)
270 {
271 	struct msc_iter *iter;
272 
273 	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
274 	if (!iter)
275 		return ERR_PTR(-ENOMEM);
276 
277 	mutex_lock(&msc->buf_mutex);
278 
279 	/*
280 	 * Reading and tracing are mutually exclusive; if msc is
281 	 * enabled, open() will fail; otherwise existing readers
282 	 * will prevent enabling the msc and the rest of fops don't
283 	 * need to worry about it.
284 	 */
285 	if (msc->enabled) {
286 		kfree(iter);
287 		iter = ERR_PTR(-EBUSY);
288 		goto unlock;
289 	}
290 
291 	msc_iter_init(iter);
292 	iter->msc = msc;
293 
294 	list_add_tail(&iter->entry, &msc->iter_list);
295 unlock:
296 	mutex_unlock(&msc->buf_mutex);
297 
298 	return iter;
299 }
300 
301 static void msc_iter_remove(struct msc_iter *iter, struct msc *msc)
302 {
303 	mutex_lock(&msc->buf_mutex);
304 	list_del(&iter->entry);
305 	mutex_unlock(&msc->buf_mutex);
306 
307 	kfree(iter);
308 }
309 
310 static void msc_iter_block_start(struct msc_iter *iter)
311 {
312 	if (iter->start_block != -1)
313 		return;
314 
315 	iter->start_block = msc_win_oldest_block(iter->win);
316 	iter->block = iter->start_block;
317 	iter->wrap_count = 0;
318 
319 	/*
320 	 * start with the block with oldest data; if data has wrapped
321 	 * in this window, it should be in this block
322 	 */
323 	if (msc_block_wrapped(msc_iter_bdesc(iter)))
324 		iter->wrap_count = 2;
325 
326 }
327 
328 static int msc_iter_win_start(struct msc_iter *iter, struct msc *msc)
329 {
330 	/* already started, nothing to do */
331 	if (iter->start_win)
332 		return 0;
333 
334 	iter->start_win = msc_oldest_window(msc);
335 	if (!iter->start_win)
336 		return -EINVAL;
337 
338 	iter->win = iter->start_win;
339 	iter->start_block = -1;
340 
341 	msc_iter_block_start(iter);
342 
343 	return 0;
344 }
345 
346 static int msc_iter_win_advance(struct msc_iter *iter)
347 {
348 	iter->win = msc_next_window(iter->win);
349 	iter->start_block = -1;
350 
351 	if (iter->win == iter->start_win) {
352 		iter->eof++;
353 		return 1;
354 	}
355 
356 	msc_iter_block_start(iter);
357 
358 	return 0;
359 }
360 
361 static int msc_iter_block_advance(struct msc_iter *iter)
362 {
363 	iter->block_off = 0;
364 
365 	/* wrapping */
366 	if (iter->wrap_count && iter->block == iter->start_block) {
367 		iter->wrap_count--;
368 		if (!iter->wrap_count)
369 			/* copied newest data from the wrapped block */
370 			return msc_iter_win_advance(iter);
371 	}
372 
373 	/* no wrapping, check for last written block */
374 	if (!iter->wrap_count && msc_block_last_written(msc_iter_bdesc(iter)))
375 		/* copied newest data for the window */
376 		return msc_iter_win_advance(iter);
377 
378 	/* block advance */
379 	if (++iter->block == iter->win->nr_segs)
380 		iter->block = 0;
381 
382 	/* no wrapping, sanity check in case there is no last written block */
383 	if (!iter->wrap_count && iter->block == iter->start_block)
384 		return msc_iter_win_advance(iter);
385 
386 	return 0;
387 }
388 
389 /**
390  * msc_buffer_iterate() - go through multiblock buffer's data
391  * @iter:	iterator structure
392  * @size:	amount of data to scan
393  * @data:	callback's private data
394  * @fn:		iterator callback
395  *
396  * This will start at the window which will be written to next (containing
397  * the oldest data) and work its way to the current window, calling @fn
398  * for each chunk of data as it goes.
399  *
400  * Caller should have msc::user_count reference to make sure the buffer
401  * doesn't disappear from under us.
402  *
403  * Return:	amount of data actually scanned.
404  */
405 static ssize_t
406 msc_buffer_iterate(struct msc_iter *iter, size_t size, void *data,
407 		   unsigned long (*fn)(void *, void *, size_t))
408 {
409 	struct msc *msc = iter->msc;
410 	size_t len = size;
411 	unsigned int advance;
412 
413 	if (iter->eof)
414 		return 0;
415 
416 	/* start with the oldest window */
417 	if (msc_iter_win_start(iter, msc))
418 		return 0;
419 
420 	do {
421 		unsigned long data_bytes = msc_data_sz(msc_iter_bdesc(iter));
422 		void *src = (void *)msc_iter_bdesc(iter) + MSC_BDESC;
423 		size_t tocopy = data_bytes, copied = 0;
424 		size_t remaining = 0;
425 
426 		advance = 1;
427 
428 		/*
429 		 * If block wrapping happened, we need to visit the last block
430 		 * twice, because it contains both the oldest and the newest
431 		 * data in this window.
432 		 *
433 		 * First time (wrap_count==2), in the very beginning, to collect
434 		 * the oldest data, which is in the range
435 		 * (data_bytes..DATA_IN_PAGE).
436 		 *
437 		 * Second time (wrap_count==1), it's just like any other block,
438 		 * containing data in the range of [MSC_BDESC..data_bytes].
439 		 */
440 		if (iter->block == iter->start_block && iter->wrap_count == 2) {
441 			tocopy = DATA_IN_PAGE - data_bytes;
442 			src += data_bytes;
443 		}
444 
445 		if (!tocopy)
446 			goto next_block;
447 
448 		tocopy -= iter->block_off;
449 		src += iter->block_off;
450 
451 		if (len < tocopy) {
452 			tocopy = len;
453 			advance = 0;
454 		}
455 
456 		remaining = fn(data, src, tocopy);
457 
458 		if (remaining)
459 			advance = 0;
460 
461 		copied = tocopy - remaining;
462 		len -= copied;
463 		iter->block_off += copied;
464 		iter->offset += copied;
465 
466 		if (!advance)
467 			break;
468 
469 next_block:
470 		if (msc_iter_block_advance(iter))
471 			break;
472 
473 	} while (len);
474 
475 	return size - len;
476 }
477 
478 /**
479  * msc_buffer_clear_hw_header() - clear hw header for multiblock
480  * @msc:	MSC device
481  */
482 static void msc_buffer_clear_hw_header(struct msc *msc)
483 {
484 	struct msc_window *win;
485 
486 	list_for_each_entry(win, &msc->win_list, entry) {
487 		unsigned int blk;
488 		size_t hw_sz = sizeof(struct msc_block_desc) -
489 			offsetof(struct msc_block_desc, hw_tag);
490 
491 		for (blk = 0; blk < win->nr_segs; blk++) {
492 			struct msc_block_desc *bdesc = msc_win_block(win, blk);
493 
494 			memset(&bdesc->hw_tag, 0, hw_sz);
495 		}
496 	}
497 }
498 
499 static int intel_th_msu_init(struct msc *msc)
500 {
501 	u32 mintctl, msusts;
502 
503 	if (!msc->do_irq)
504 		return 0;
505 
506 	mintctl = ioread32(msc->msu_base + REG_MSU_MINTCTL);
507 	mintctl |= msc->index ? M1BLIE : M0BLIE;
508 	iowrite32(mintctl, msc->msu_base + REG_MSU_MINTCTL);
509 	if (mintctl != ioread32(msc->msu_base + REG_MSU_MINTCTL)) {
510 		dev_info(msc_dev(msc), "MINTCTL ignores writes: no usable interrupts\n");
511 		msc->do_irq = 0;
512 		return 0;
513 	}
514 
515 	msusts = ioread32(msc->msu_base + REG_MSU_MSUSTS);
516 	iowrite32(msusts, msc->msu_base + REG_MSU_MSUSTS);
517 
518 	return 0;
519 }
520 
521 static void intel_th_msu_deinit(struct msc *msc)
522 {
523 	u32 mintctl;
524 
525 	if (!msc->do_irq)
526 		return;
527 
528 	mintctl = ioread32(msc->msu_base + REG_MSU_MINTCTL);
529 	mintctl &= msc->index ? ~M1BLIE : ~M0BLIE;
530 	iowrite32(mintctl, msc->msu_base + REG_MSU_MINTCTL);
531 }
532 
533 /**
534  * msc_configure() - set up MSC hardware
535  * @msc:	the MSC device to configure
536  *
537  * Program storage mode, wrapping, burst length and trace buffer address
538  * into a given MSC. Then, enable tracing and set msc::enabled.
539  * The latter is serialized on msc::buf_mutex, so make sure to hold it.
540  */
541 static int msc_configure(struct msc *msc)
542 {
543 	u32 reg;
544 
545 	lockdep_assert_held(&msc->buf_mutex);
546 
547 	if (msc->mode > MSC_MODE_MULTI)
548 		return -ENOTSUPP;
549 
550 	if (msc->mode == MSC_MODE_MULTI)
551 		msc_buffer_clear_hw_header(msc);
552 
553 	reg = msc->base_addr >> PAGE_SHIFT;
554 	iowrite32(reg, msc->reg_base + REG_MSU_MSC0BAR);
555 
556 	if (msc->mode == MSC_MODE_SINGLE) {
557 		reg = msc->nr_pages;
558 		iowrite32(reg, msc->reg_base + REG_MSU_MSC0SIZE);
559 	}
560 
561 	reg = ioread32(msc->reg_base + REG_MSU_MSC0CTL);
562 	reg &= ~(MSC_MODE | MSC_WRAPEN | MSC_EN | MSC_RD_HDR_OVRD);
563 
564 	reg |= MSC_EN;
565 	reg |= msc->mode << __ffs(MSC_MODE);
566 	reg |= msc->burst_len << __ffs(MSC_LEN);
567 
568 	if (msc->wrap)
569 		reg |= MSC_WRAPEN;
570 
571 	iowrite32(reg, msc->reg_base + REG_MSU_MSC0CTL);
572 
573 	msc->thdev->output.multiblock = msc->mode == MSC_MODE_MULTI;
574 	intel_th_trace_enable(msc->thdev);
575 	msc->enabled = 1;
576 
577 
578 	return 0;
579 }
580 
581 /**
582  * msc_disable() - disable MSC hardware
583  * @msc:	MSC device to disable
584  *
585  * If @msc is enabled, disable tracing on the switch and then disable MSC
586  * storage. Caller must hold msc::buf_mutex.
587  */
588 static void msc_disable(struct msc *msc)
589 {
590 	u32 reg;
591 
592 	lockdep_assert_held(&msc->buf_mutex);
593 
594 	intel_th_trace_disable(msc->thdev);
595 
596 	if (msc->mode == MSC_MODE_SINGLE) {
597 		reg = ioread32(msc->reg_base + REG_MSU_MSC0STS);
598 		msc->single_wrap = !!(reg & MSCSTS_WRAPSTAT);
599 
600 		reg = ioread32(msc->reg_base + REG_MSU_MSC0MWP);
601 		msc->single_sz = reg & ((msc->nr_pages << PAGE_SHIFT) - 1);
602 		dev_dbg(msc_dev(msc), "MSCnMWP: %08x/%08lx, wrap: %d\n",
603 			reg, msc->single_sz, msc->single_wrap);
604 	}
605 
606 	reg = ioread32(msc->reg_base + REG_MSU_MSC0CTL);
607 	reg &= ~MSC_EN;
608 	iowrite32(reg, msc->reg_base + REG_MSU_MSC0CTL);
609 	msc->enabled = 0;
610 
611 	iowrite32(0, msc->reg_base + REG_MSU_MSC0BAR);
612 	iowrite32(0, msc->reg_base + REG_MSU_MSC0SIZE);
613 
614 	dev_dbg(msc_dev(msc), "MSCnNWSA: %08x\n",
615 		ioread32(msc->reg_base + REG_MSU_MSC0NWSA));
616 
617 	reg = ioread32(msc->reg_base + REG_MSU_MSC0STS);
618 	dev_dbg(msc_dev(msc), "MSCnSTS: %08x\n", reg);
619 }
620 
621 static int intel_th_msc_activate(struct intel_th_device *thdev)
622 {
623 	struct msc *msc = dev_get_drvdata(&thdev->dev);
624 	int ret = -EBUSY;
625 
626 	if (!atomic_inc_unless_negative(&msc->user_count))
627 		return -ENODEV;
628 
629 	mutex_lock(&msc->buf_mutex);
630 
631 	/* if there are readers, refuse */
632 	if (list_empty(&msc->iter_list))
633 		ret = msc_configure(msc);
634 
635 	mutex_unlock(&msc->buf_mutex);
636 
637 	if (ret)
638 		atomic_dec(&msc->user_count);
639 
640 	return ret;
641 }
642 
643 static void intel_th_msc_deactivate(struct intel_th_device *thdev)
644 {
645 	struct msc *msc = dev_get_drvdata(&thdev->dev);
646 
647 	mutex_lock(&msc->buf_mutex);
648 	if (msc->enabled) {
649 		msc_disable(msc);
650 		atomic_dec(&msc->user_count);
651 	}
652 	mutex_unlock(&msc->buf_mutex);
653 }
654 
655 /**
656  * msc_buffer_contig_alloc() - allocate a contiguous buffer for SINGLE mode
657  * @msc:	MSC device
658  * @size:	allocation size in bytes
659  *
660  * This modifies msc::base, which requires msc::buf_mutex to serialize, so the
661  * caller is expected to hold it.
662  *
663  * Return:	0 on success, -errno otherwise.
664  */
665 static int msc_buffer_contig_alloc(struct msc *msc, unsigned long size)
666 {
667 	unsigned long nr_pages = size >> PAGE_SHIFT;
668 	unsigned int order = get_order(size);
669 	struct page *page;
670 	int ret;
671 
672 	if (!size)
673 		return 0;
674 
675 	ret = sg_alloc_table(&msc->single_sgt, 1, GFP_KERNEL);
676 	if (ret)
677 		goto err_out;
678 
679 	ret = -ENOMEM;
680 	page = alloc_pages(GFP_KERNEL | __GFP_ZERO | GFP_DMA32, order);
681 	if (!page)
682 		goto err_free_sgt;
683 
684 	split_page(page, order);
685 	sg_set_buf(msc->single_sgt.sgl, page_address(page), size);
686 
687 	ret = dma_map_sg(msc_dev(msc)->parent->parent, msc->single_sgt.sgl, 1,
688 			 DMA_FROM_DEVICE);
689 	if (ret < 0)
690 		goto err_free_pages;
691 
692 	msc->nr_pages = nr_pages;
693 	msc->base = page_address(page);
694 	msc->base_addr = sg_dma_address(msc->single_sgt.sgl);
695 
696 	return 0;
697 
698 err_free_pages:
699 	__free_pages(page, order);
700 
701 err_free_sgt:
702 	sg_free_table(&msc->single_sgt);
703 
704 err_out:
705 	return ret;
706 }
707 
708 /**
709  * msc_buffer_contig_free() - free a contiguous buffer
710  * @msc:	MSC configured in SINGLE mode
711  */
712 static void msc_buffer_contig_free(struct msc *msc)
713 {
714 	unsigned long off;
715 
716 	dma_unmap_sg(msc_dev(msc)->parent->parent, msc->single_sgt.sgl,
717 		     1, DMA_FROM_DEVICE);
718 	sg_free_table(&msc->single_sgt);
719 
720 	for (off = 0; off < msc->nr_pages << PAGE_SHIFT; off += PAGE_SIZE) {
721 		struct page *page = virt_to_page(msc->base + off);
722 
723 		page->mapping = NULL;
724 		__free_page(page);
725 	}
726 
727 	msc->nr_pages = 0;
728 }
729 
730 /**
731  * msc_buffer_contig_get_page() - find a page at a given offset
732  * @msc:	MSC configured in SINGLE mode
733  * @pgoff:	page offset
734  *
735  * Return:	page, if @pgoff is within the range, NULL otherwise.
736  */
737 static struct page *msc_buffer_contig_get_page(struct msc *msc,
738 					       unsigned long pgoff)
739 {
740 	if (pgoff >= msc->nr_pages)
741 		return NULL;
742 
743 	return virt_to_page(msc->base + (pgoff << PAGE_SHIFT));
744 }
745 
746 static int __msc_buffer_win_alloc(struct msc_window *win,
747 				  unsigned int nr_segs)
748 {
749 	struct scatterlist *sg_ptr;
750 	void *block;
751 	int i, ret;
752 
753 	ret = sg_alloc_table(win->sgt, nr_segs, GFP_KERNEL);
754 	if (ret)
755 		return -ENOMEM;
756 
757 	for_each_sg(win->sgt->sgl, sg_ptr, nr_segs, i) {
758 		block = dma_alloc_coherent(msc_dev(win->msc)->parent->parent,
759 					  PAGE_SIZE, &sg_dma_address(sg_ptr),
760 					  GFP_KERNEL);
761 		if (!block)
762 			goto err_nomem;
763 
764 		sg_set_buf(sg_ptr, block, PAGE_SIZE);
765 	}
766 
767 	return nr_segs;
768 
769 err_nomem:
770 	for (i--; i >= 0; i--)
771 		dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE,
772 				  msc_win_block(win, i),
773 				  msc_win_baddr(win, i));
774 
775 	sg_free_table(win->sgt);
776 
777 	return -ENOMEM;
778 }
779 
780 #ifdef CONFIG_X86
781 static void msc_buffer_set_uc(struct msc_window *win, unsigned int nr_segs)
782 {
783 	int i;
784 
785 	for (i = 0; i < nr_segs; i++)
786 		/* Set the page as uncached */
787 		set_memory_uc((unsigned long)msc_win_block(win, i), 1);
788 }
789 
790 static void msc_buffer_set_wb(struct msc_window *win)
791 {
792 	int i;
793 
794 	for (i = 0; i < win->nr_segs; i++)
795 		/* Reset the page to write-back */
796 		set_memory_wb((unsigned long)msc_win_block(win, i), 1);
797 }
798 #else /* !X86 */
799 static inline void
800 msc_buffer_set_uc(struct msc_window *win, unsigned int nr_segs) {}
801 static inline void msc_buffer_set_wb(struct msc_window *win) {}
802 #endif /* CONFIG_X86 */
803 
804 /**
805  * msc_buffer_win_alloc() - alloc a window for a multiblock mode
806  * @msc:	MSC device
807  * @nr_blocks:	number of pages in this window
808  *
809  * This modifies msc::win_list and msc::base, which requires msc::buf_mutex
810  * to serialize, so the caller is expected to hold it.
811  *
812  * Return:	0 on success, -errno otherwise.
813  */
814 static int msc_buffer_win_alloc(struct msc *msc, unsigned int nr_blocks)
815 {
816 	struct msc_window *win;
817 	int ret = -ENOMEM;
818 
819 	if (!nr_blocks)
820 		return 0;
821 
822 	/*
823 	 * This limitation hold as long as we need random access to the
824 	 * block. When that changes, this can go away.
825 	 */
826 	if (nr_blocks > SG_MAX_SINGLE_ALLOC)
827 		return -EINVAL;
828 
829 	win = kzalloc(sizeof(*win), GFP_KERNEL);
830 	if (!win)
831 		return -ENOMEM;
832 
833 	win->msc = msc;
834 	win->sgt = &win->_sgt;
835 
836 	if (!list_empty(&msc->win_list)) {
837 		struct msc_window *prev = list_last_entry(&msc->win_list,
838 							  struct msc_window,
839 							  entry);
840 
841 		win->pgoff = prev->pgoff + prev->nr_blocks;
842 	}
843 
844 	ret = __msc_buffer_win_alloc(win, nr_blocks);
845 	if (ret < 0)
846 		goto err_nomem;
847 
848 	msc_buffer_set_uc(win, ret);
849 
850 	win->nr_segs = ret;
851 	win->nr_blocks = nr_blocks;
852 
853 	if (list_empty(&msc->win_list)) {
854 		msc->base = msc_win_block(win, 0);
855 		msc->base_addr = msc_win_baddr(win, 0);
856 		msc->cur_win = win;
857 	}
858 
859 	list_add_tail(&win->entry, &msc->win_list);
860 	msc->nr_pages += nr_blocks;
861 
862 	return 0;
863 
864 err_nomem:
865 	kfree(win);
866 
867 	return ret;
868 }
869 
870 static void __msc_buffer_win_free(struct msc *msc, struct msc_window *win)
871 {
872 	int i;
873 
874 	for (i = 0; i < win->nr_segs; i++) {
875 		struct page *page = sg_page(&win->sgt->sgl[i]);
876 
877 		page->mapping = NULL;
878 		dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE,
879 				  msc_win_block(win, i), msc_win_baddr(win, i));
880 	}
881 	sg_free_table(win->sgt);
882 }
883 
884 /**
885  * msc_buffer_win_free() - free a window from MSC's window list
886  * @msc:	MSC device
887  * @win:	window to free
888  *
889  * This modifies msc::win_list and msc::base, which requires msc::buf_mutex
890  * to serialize, so the caller is expected to hold it.
891  */
892 static void msc_buffer_win_free(struct msc *msc, struct msc_window *win)
893 {
894 	msc->nr_pages -= win->nr_blocks;
895 
896 	list_del(&win->entry);
897 	if (list_empty(&msc->win_list)) {
898 		msc->base = NULL;
899 		msc->base_addr = 0;
900 	}
901 
902 	msc_buffer_set_wb(win);
903 
904 	__msc_buffer_win_free(msc, win);
905 
906 	kfree(win);
907 }
908 
909 /**
910  * msc_buffer_relink() - set up block descriptors for multiblock mode
911  * @msc:	MSC device
912  *
913  * This traverses msc::win_list, which requires msc::buf_mutex to serialize,
914  * so the caller is expected to hold it.
915  */
916 static void msc_buffer_relink(struct msc *msc)
917 {
918 	struct msc_window *win, *next_win;
919 
920 	/* call with msc::mutex locked */
921 	list_for_each_entry(win, &msc->win_list, entry) {
922 		unsigned int blk;
923 		u32 sw_tag = 0;
924 
925 		/*
926 		 * Last window's next_win should point to the first window
927 		 * and MSC_SW_TAG_LASTWIN should be set.
928 		 */
929 		if (msc_is_last_win(win)) {
930 			sw_tag |= MSC_SW_TAG_LASTWIN;
931 			next_win = list_first_entry(&msc->win_list,
932 						    struct msc_window, entry);
933 		} else {
934 			next_win = list_next_entry(win, entry);
935 		}
936 
937 		for (blk = 0; blk < win->nr_segs; blk++) {
938 			struct msc_block_desc *bdesc = msc_win_block(win, blk);
939 
940 			memset(bdesc, 0, sizeof(*bdesc));
941 
942 			bdesc->next_win = msc_win_bpfn(next_win, 0);
943 
944 			/*
945 			 * Similarly to last window, last block should point
946 			 * to the first one.
947 			 */
948 			if (blk == win->nr_segs - 1) {
949 				sw_tag |= MSC_SW_TAG_LASTBLK;
950 				bdesc->next_blk = msc_win_bpfn(win, 0);
951 			} else {
952 				bdesc->next_blk = msc_win_bpfn(win, blk + 1);
953 			}
954 
955 			bdesc->sw_tag = sw_tag;
956 			bdesc->block_sz = msc_win_actual_bsz(win, blk) / 64;
957 		}
958 	}
959 
960 	/*
961 	 * Make the above writes globally visible before tracing is
962 	 * enabled to make sure hardware sees them coherently.
963 	 */
964 	wmb();
965 }
966 
967 static void msc_buffer_multi_free(struct msc *msc)
968 {
969 	struct msc_window *win, *iter;
970 
971 	list_for_each_entry_safe(win, iter, &msc->win_list, entry)
972 		msc_buffer_win_free(msc, win);
973 }
974 
975 static int msc_buffer_multi_alloc(struct msc *msc, unsigned long *nr_pages,
976 				  unsigned int nr_wins)
977 {
978 	int ret, i;
979 
980 	for (i = 0; i < nr_wins; i++) {
981 		ret = msc_buffer_win_alloc(msc, nr_pages[i]);
982 		if (ret) {
983 			msc_buffer_multi_free(msc);
984 			return ret;
985 		}
986 	}
987 
988 	msc_buffer_relink(msc);
989 
990 	return 0;
991 }
992 
993 /**
994  * msc_buffer_free() - free buffers for MSC
995  * @msc:	MSC device
996  *
997  * Free MSC's storage buffers.
998  *
999  * This modifies msc::win_list and msc::base, which requires msc::buf_mutex to
1000  * serialize, so the caller is expected to hold it.
1001  */
1002 static void msc_buffer_free(struct msc *msc)
1003 {
1004 	if (msc->mode == MSC_MODE_SINGLE)
1005 		msc_buffer_contig_free(msc);
1006 	else if (msc->mode == MSC_MODE_MULTI)
1007 		msc_buffer_multi_free(msc);
1008 }
1009 
1010 /**
1011  * msc_buffer_alloc() - allocate a buffer for MSC
1012  * @msc:	MSC device
1013  * @size:	allocation size in bytes
1014  *
1015  * Allocate a storage buffer for MSC, depending on the msc::mode, it will be
1016  * either done via msc_buffer_contig_alloc() for SINGLE operation mode or
1017  * msc_buffer_win_alloc() for multiblock operation. The latter allocates one
1018  * window per invocation, so in multiblock mode this can be called multiple
1019  * times for the same MSC to allocate multiple windows.
1020  *
1021  * This modifies msc::win_list and msc::base, which requires msc::buf_mutex
1022  * to serialize, so the caller is expected to hold it.
1023  *
1024  * Return:	0 on success, -errno otherwise.
1025  */
1026 static int msc_buffer_alloc(struct msc *msc, unsigned long *nr_pages,
1027 			    unsigned int nr_wins)
1028 {
1029 	int ret;
1030 
1031 	/* -1: buffer not allocated */
1032 	if (atomic_read(&msc->user_count) != -1)
1033 		return -EBUSY;
1034 
1035 	if (msc->mode == MSC_MODE_SINGLE) {
1036 		if (nr_wins != 1)
1037 			return -EINVAL;
1038 
1039 		ret = msc_buffer_contig_alloc(msc, nr_pages[0] << PAGE_SHIFT);
1040 	} else if (msc->mode == MSC_MODE_MULTI) {
1041 		ret = msc_buffer_multi_alloc(msc, nr_pages, nr_wins);
1042 	} else {
1043 		ret = -ENOTSUPP;
1044 	}
1045 
1046 	if (!ret) {
1047 		/* allocation should be visible before the counter goes to 0 */
1048 		smp_mb__before_atomic();
1049 
1050 		if (WARN_ON_ONCE(atomic_cmpxchg(&msc->user_count, -1, 0) != -1))
1051 			return -EINVAL;
1052 	}
1053 
1054 	return ret;
1055 }
1056 
1057 /**
1058  * msc_buffer_unlocked_free_unless_used() - free a buffer unless it's in use
1059  * @msc:	MSC device
1060  *
1061  * This will free MSC buffer unless it is in use or there is no allocated
1062  * buffer.
1063  * Caller needs to hold msc::buf_mutex.
1064  *
1065  * Return:	0 on successful deallocation or if there was no buffer to
1066  *		deallocate, -EBUSY if there are active users.
1067  */
1068 static int msc_buffer_unlocked_free_unless_used(struct msc *msc)
1069 {
1070 	int count, ret = 0;
1071 
1072 	count = atomic_cmpxchg(&msc->user_count, 0, -1);
1073 
1074 	/* > 0: buffer is allocated and has users */
1075 	if (count > 0)
1076 		ret = -EBUSY;
1077 	/* 0: buffer is allocated, no users */
1078 	else if (!count)
1079 		msc_buffer_free(msc);
1080 	/* < 0: no buffer, nothing to do */
1081 
1082 	return ret;
1083 }
1084 
1085 /**
1086  * msc_buffer_free_unless_used() - free a buffer unless it's in use
1087  * @msc:	MSC device
1088  *
1089  * This is a locked version of msc_buffer_unlocked_free_unless_used().
1090  */
1091 static int msc_buffer_free_unless_used(struct msc *msc)
1092 {
1093 	int ret;
1094 
1095 	mutex_lock(&msc->buf_mutex);
1096 	ret = msc_buffer_unlocked_free_unless_used(msc);
1097 	mutex_unlock(&msc->buf_mutex);
1098 
1099 	return ret;
1100 }
1101 
1102 /**
1103  * msc_buffer_get_page() - get MSC buffer page at a given offset
1104  * @msc:	MSC device
1105  * @pgoff:	page offset into the storage buffer
1106  *
1107  * This traverses msc::win_list, so holding msc::buf_mutex is expected from
1108  * the caller.
1109  *
1110  * Return:	page if @pgoff corresponds to a valid buffer page or NULL.
1111  */
1112 static struct page *msc_buffer_get_page(struct msc *msc, unsigned long pgoff)
1113 {
1114 	struct msc_window *win;
1115 	unsigned int blk;
1116 
1117 	if (msc->mode == MSC_MODE_SINGLE)
1118 		return msc_buffer_contig_get_page(msc, pgoff);
1119 
1120 	list_for_each_entry(win, &msc->win_list, entry)
1121 		if (pgoff >= win->pgoff && pgoff < win->pgoff + win->nr_blocks)
1122 			goto found;
1123 
1124 	return NULL;
1125 
1126 found:
1127 	pgoff -= win->pgoff;
1128 
1129 	for (blk = 0; blk < win->nr_segs; blk++) {
1130 		struct page *page = sg_page(&win->sgt->sgl[blk]);
1131 		size_t pgsz = PFN_DOWN(msc_win_actual_bsz(win, blk));
1132 
1133 		if (pgoff < pgsz)
1134 			return page + pgoff;
1135 
1136 		pgoff -= pgsz;
1137 	}
1138 
1139 	return NULL;
1140 }
1141 
1142 /**
1143  * struct msc_win_to_user_struct - data for copy_to_user() callback
1144  * @buf:	userspace buffer to copy data to
1145  * @offset:	running offset
1146  */
1147 struct msc_win_to_user_struct {
1148 	char __user	*buf;
1149 	unsigned long	offset;
1150 };
1151 
1152 /**
1153  * msc_win_to_user() - iterator for msc_buffer_iterate() to copy data to user
1154  * @data:	callback's private data
1155  * @src:	source buffer
1156  * @len:	amount of data to copy from the source buffer
1157  */
1158 static unsigned long msc_win_to_user(void *data, void *src, size_t len)
1159 {
1160 	struct msc_win_to_user_struct *u = data;
1161 	unsigned long ret;
1162 
1163 	ret = copy_to_user(u->buf + u->offset, src, len);
1164 	u->offset += len - ret;
1165 
1166 	return ret;
1167 }
1168 
1169 
1170 /*
1171  * file operations' callbacks
1172  */
1173 
1174 static int intel_th_msc_open(struct inode *inode, struct file *file)
1175 {
1176 	struct intel_th_device *thdev = file->private_data;
1177 	struct msc *msc = dev_get_drvdata(&thdev->dev);
1178 	struct msc_iter *iter;
1179 
1180 	if (!capable(CAP_SYS_RAWIO))
1181 		return -EPERM;
1182 
1183 	iter = msc_iter_install(msc);
1184 	if (IS_ERR(iter))
1185 		return PTR_ERR(iter);
1186 
1187 	file->private_data = iter;
1188 
1189 	return nonseekable_open(inode, file);
1190 }
1191 
1192 static int intel_th_msc_release(struct inode *inode, struct file *file)
1193 {
1194 	struct msc_iter *iter = file->private_data;
1195 	struct msc *msc = iter->msc;
1196 
1197 	msc_iter_remove(iter, msc);
1198 
1199 	return 0;
1200 }
1201 
1202 static ssize_t
1203 msc_single_to_user(struct msc *msc, char __user *buf, loff_t off, size_t len)
1204 {
1205 	unsigned long size = msc->nr_pages << PAGE_SHIFT, rem = len;
1206 	unsigned long start = off, tocopy = 0;
1207 
1208 	if (msc->single_wrap) {
1209 		start += msc->single_sz;
1210 		if (start < size) {
1211 			tocopy = min(rem, size - start);
1212 			if (copy_to_user(buf, msc->base + start, tocopy))
1213 				return -EFAULT;
1214 
1215 			buf += tocopy;
1216 			rem -= tocopy;
1217 			start += tocopy;
1218 		}
1219 
1220 		start &= size - 1;
1221 		if (rem) {
1222 			tocopy = min(rem, msc->single_sz - start);
1223 			if (copy_to_user(buf, msc->base + start, tocopy))
1224 				return -EFAULT;
1225 
1226 			rem -= tocopy;
1227 		}
1228 
1229 		return len - rem;
1230 	}
1231 
1232 	if (copy_to_user(buf, msc->base + start, rem))
1233 		return -EFAULT;
1234 
1235 	return len;
1236 }
1237 
1238 static ssize_t intel_th_msc_read(struct file *file, char __user *buf,
1239 				 size_t len, loff_t *ppos)
1240 {
1241 	struct msc_iter *iter = file->private_data;
1242 	struct msc *msc = iter->msc;
1243 	size_t size;
1244 	loff_t off = *ppos;
1245 	ssize_t ret = 0;
1246 
1247 	if (!atomic_inc_unless_negative(&msc->user_count))
1248 		return 0;
1249 
1250 	if (msc->mode == MSC_MODE_SINGLE && !msc->single_wrap)
1251 		size = msc->single_sz;
1252 	else
1253 		size = msc->nr_pages << PAGE_SHIFT;
1254 
1255 	if (!size)
1256 		goto put_count;
1257 
1258 	if (off >= size)
1259 		goto put_count;
1260 
1261 	if (off + len >= size)
1262 		len = size - off;
1263 
1264 	if (msc->mode == MSC_MODE_SINGLE) {
1265 		ret = msc_single_to_user(msc, buf, off, len);
1266 		if (ret >= 0)
1267 			*ppos += ret;
1268 	} else if (msc->mode == MSC_MODE_MULTI) {
1269 		struct msc_win_to_user_struct u = {
1270 			.buf	= buf,
1271 			.offset	= 0,
1272 		};
1273 
1274 		ret = msc_buffer_iterate(iter, len, &u, msc_win_to_user);
1275 		if (ret >= 0)
1276 			*ppos = iter->offset;
1277 	} else {
1278 		ret = -ENOTSUPP;
1279 	}
1280 
1281 put_count:
1282 	atomic_dec(&msc->user_count);
1283 
1284 	return ret;
1285 }
1286 
1287 /*
1288  * vm operations callbacks (vm_ops)
1289  */
1290 
1291 static void msc_mmap_open(struct vm_area_struct *vma)
1292 {
1293 	struct msc_iter *iter = vma->vm_file->private_data;
1294 	struct msc *msc = iter->msc;
1295 
1296 	atomic_inc(&msc->mmap_count);
1297 }
1298 
1299 static void msc_mmap_close(struct vm_area_struct *vma)
1300 {
1301 	struct msc_iter *iter = vma->vm_file->private_data;
1302 	struct msc *msc = iter->msc;
1303 	unsigned long pg;
1304 
1305 	if (!atomic_dec_and_mutex_lock(&msc->mmap_count, &msc->buf_mutex))
1306 		return;
1307 
1308 	/* drop page _refcounts */
1309 	for (pg = 0; pg < msc->nr_pages; pg++) {
1310 		struct page *page = msc_buffer_get_page(msc, pg);
1311 
1312 		if (WARN_ON_ONCE(!page))
1313 			continue;
1314 
1315 		if (page->mapping)
1316 			page->mapping = NULL;
1317 	}
1318 
1319 	/* last mapping -- drop user_count */
1320 	atomic_dec(&msc->user_count);
1321 	mutex_unlock(&msc->buf_mutex);
1322 }
1323 
1324 static vm_fault_t msc_mmap_fault(struct vm_fault *vmf)
1325 {
1326 	struct msc_iter *iter = vmf->vma->vm_file->private_data;
1327 	struct msc *msc = iter->msc;
1328 
1329 	vmf->page = msc_buffer_get_page(msc, vmf->pgoff);
1330 	if (!vmf->page)
1331 		return VM_FAULT_SIGBUS;
1332 
1333 	get_page(vmf->page);
1334 	vmf->page->mapping = vmf->vma->vm_file->f_mapping;
1335 	vmf->page->index = vmf->pgoff;
1336 
1337 	return 0;
1338 }
1339 
1340 static const struct vm_operations_struct msc_mmap_ops = {
1341 	.open	= msc_mmap_open,
1342 	.close	= msc_mmap_close,
1343 	.fault	= msc_mmap_fault,
1344 };
1345 
1346 static int intel_th_msc_mmap(struct file *file, struct vm_area_struct *vma)
1347 {
1348 	unsigned long size = vma->vm_end - vma->vm_start;
1349 	struct msc_iter *iter = vma->vm_file->private_data;
1350 	struct msc *msc = iter->msc;
1351 	int ret = -EINVAL;
1352 
1353 	if (!size || offset_in_page(size))
1354 		return -EINVAL;
1355 
1356 	if (vma->vm_pgoff)
1357 		return -EINVAL;
1358 
1359 	/* grab user_count once per mmap; drop in msc_mmap_close() */
1360 	if (!atomic_inc_unless_negative(&msc->user_count))
1361 		return -EINVAL;
1362 
1363 	if (msc->mode != MSC_MODE_SINGLE &&
1364 	    msc->mode != MSC_MODE_MULTI)
1365 		goto out;
1366 
1367 	if (size >> PAGE_SHIFT != msc->nr_pages)
1368 		goto out;
1369 
1370 	atomic_set(&msc->mmap_count, 1);
1371 	ret = 0;
1372 
1373 out:
1374 	if (ret)
1375 		atomic_dec(&msc->user_count);
1376 
1377 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1378 	vma->vm_flags |= VM_DONTEXPAND | VM_DONTCOPY;
1379 	vma->vm_ops = &msc_mmap_ops;
1380 	return ret;
1381 }
1382 
1383 static const struct file_operations intel_th_msc_fops = {
1384 	.open		= intel_th_msc_open,
1385 	.release	= intel_th_msc_release,
1386 	.read		= intel_th_msc_read,
1387 	.mmap		= intel_th_msc_mmap,
1388 	.llseek		= no_llseek,
1389 	.owner		= THIS_MODULE,
1390 };
1391 
1392 static void intel_th_msc_wait_empty(struct intel_th_device *thdev)
1393 {
1394 	struct msc *msc = dev_get_drvdata(&thdev->dev);
1395 	unsigned long count;
1396 	u32 reg;
1397 
1398 	for (reg = 0, count = MSC_PLE_WAITLOOP_DEPTH;
1399 	     count && !(reg & MSCSTS_PLE); count--) {
1400 		reg = __raw_readl(msc->reg_base + REG_MSU_MSC0STS);
1401 		cpu_relax();
1402 	}
1403 
1404 	if (!count)
1405 		dev_dbg(msc_dev(msc), "timeout waiting for MSC0 PLE\n");
1406 }
1407 
1408 static int intel_th_msc_init(struct msc *msc)
1409 {
1410 	atomic_set(&msc->user_count, -1);
1411 
1412 	msc->mode = MSC_MODE_MULTI;
1413 	mutex_init(&msc->buf_mutex);
1414 	INIT_LIST_HEAD(&msc->win_list);
1415 	INIT_LIST_HEAD(&msc->iter_list);
1416 
1417 	msc->burst_len =
1418 		(ioread32(msc->reg_base + REG_MSU_MSC0CTL) & MSC_LEN) >>
1419 		__ffs(MSC_LEN);
1420 
1421 	return 0;
1422 }
1423 
1424 static void msc_win_switch(struct msc *msc)
1425 {
1426 	struct msc_window *first;
1427 
1428 	first = list_first_entry(&msc->win_list, struct msc_window, entry);
1429 
1430 	if (msc_is_last_win(msc->cur_win))
1431 		msc->cur_win = first;
1432 	else
1433 		msc->cur_win = list_next_entry(msc->cur_win, entry);
1434 
1435 	msc->base = msc_win_block(msc->cur_win, 0);
1436 	msc->base_addr = msc_win_baddr(msc->cur_win, 0);
1437 
1438 	intel_th_trace_switch(msc->thdev);
1439 }
1440 
1441 static irqreturn_t intel_th_msc_interrupt(struct intel_th_device *thdev)
1442 {
1443 	struct msc *msc = dev_get_drvdata(&thdev->dev);
1444 	u32 msusts = ioread32(msc->msu_base + REG_MSU_MSUSTS);
1445 	u32 mask = msc->index ? MSUSTS_MSC1BLAST : MSUSTS_MSC0BLAST;
1446 
1447 	if (!(msusts & mask)) {
1448 		if (msc->enabled)
1449 			return IRQ_HANDLED;
1450 		return IRQ_NONE;
1451 	}
1452 
1453 	return IRQ_HANDLED;
1454 }
1455 
1456 static const char * const msc_mode[] = {
1457 	[MSC_MODE_SINGLE]	= "single",
1458 	[MSC_MODE_MULTI]	= "multi",
1459 	[MSC_MODE_EXI]		= "ExI",
1460 	[MSC_MODE_DEBUG]	= "debug",
1461 };
1462 
1463 static ssize_t
1464 wrap_show(struct device *dev, struct device_attribute *attr, char *buf)
1465 {
1466 	struct msc *msc = dev_get_drvdata(dev);
1467 
1468 	return scnprintf(buf, PAGE_SIZE, "%d\n", msc->wrap);
1469 }
1470 
1471 static ssize_t
1472 wrap_store(struct device *dev, struct device_attribute *attr, const char *buf,
1473 	   size_t size)
1474 {
1475 	struct msc *msc = dev_get_drvdata(dev);
1476 	unsigned long val;
1477 	int ret;
1478 
1479 	ret = kstrtoul(buf, 10, &val);
1480 	if (ret)
1481 		return ret;
1482 
1483 	msc->wrap = !!val;
1484 
1485 	return size;
1486 }
1487 
1488 static DEVICE_ATTR_RW(wrap);
1489 
1490 static ssize_t
1491 mode_show(struct device *dev, struct device_attribute *attr, char *buf)
1492 {
1493 	struct msc *msc = dev_get_drvdata(dev);
1494 
1495 	return scnprintf(buf, PAGE_SIZE, "%s\n", msc_mode[msc->mode]);
1496 }
1497 
1498 static ssize_t
1499 mode_store(struct device *dev, struct device_attribute *attr, const char *buf,
1500 	   size_t size)
1501 {
1502 	struct msc *msc = dev_get_drvdata(dev);
1503 	size_t len = size;
1504 	char *cp;
1505 	int i, ret;
1506 
1507 	if (!capable(CAP_SYS_RAWIO))
1508 		return -EPERM;
1509 
1510 	cp = memchr(buf, '\n', len);
1511 	if (cp)
1512 		len = cp - buf;
1513 
1514 	for (i = 0; i < ARRAY_SIZE(msc_mode); i++)
1515 		if (!strncmp(msc_mode[i], buf, len))
1516 			goto found;
1517 
1518 	return -EINVAL;
1519 
1520 found:
1521 	mutex_lock(&msc->buf_mutex);
1522 	ret = msc_buffer_unlocked_free_unless_used(msc);
1523 	if (!ret)
1524 		msc->mode = i;
1525 	mutex_unlock(&msc->buf_mutex);
1526 
1527 	return ret ? ret : size;
1528 }
1529 
1530 static DEVICE_ATTR_RW(mode);
1531 
1532 static ssize_t
1533 nr_pages_show(struct device *dev, struct device_attribute *attr, char *buf)
1534 {
1535 	struct msc *msc = dev_get_drvdata(dev);
1536 	struct msc_window *win;
1537 	size_t count = 0;
1538 
1539 	mutex_lock(&msc->buf_mutex);
1540 
1541 	if (msc->mode == MSC_MODE_SINGLE)
1542 		count = scnprintf(buf, PAGE_SIZE, "%ld\n", msc->nr_pages);
1543 	else if (msc->mode == MSC_MODE_MULTI) {
1544 		list_for_each_entry(win, &msc->win_list, entry) {
1545 			count += scnprintf(buf + count, PAGE_SIZE - count,
1546 					   "%d%c", win->nr_blocks,
1547 					   msc_is_last_win(win) ? '\n' : ',');
1548 		}
1549 	} else {
1550 		count = scnprintf(buf, PAGE_SIZE, "unsupported\n");
1551 	}
1552 
1553 	mutex_unlock(&msc->buf_mutex);
1554 
1555 	return count;
1556 }
1557 
1558 static ssize_t
1559 nr_pages_store(struct device *dev, struct device_attribute *attr,
1560 	       const char *buf, size_t size)
1561 {
1562 	struct msc *msc = dev_get_drvdata(dev);
1563 	unsigned long val, *win = NULL, *rewin;
1564 	size_t len = size;
1565 	const char *p = buf;
1566 	char *end, *s;
1567 	int ret, nr_wins = 0;
1568 
1569 	if (!capable(CAP_SYS_RAWIO))
1570 		return -EPERM;
1571 
1572 	ret = msc_buffer_free_unless_used(msc);
1573 	if (ret)
1574 		return ret;
1575 
1576 	/* scan the comma-separated list of allocation sizes */
1577 	end = memchr(buf, '\n', len);
1578 	if (end)
1579 		len = end - buf;
1580 
1581 	do {
1582 		end = memchr(p, ',', len);
1583 		s = kstrndup(p, end ? end - p : len, GFP_KERNEL);
1584 		if (!s) {
1585 			ret = -ENOMEM;
1586 			goto free_win;
1587 		}
1588 
1589 		ret = kstrtoul(s, 10, &val);
1590 		kfree(s);
1591 
1592 		if (ret || !val)
1593 			goto free_win;
1594 
1595 		if (nr_wins && msc->mode == MSC_MODE_SINGLE) {
1596 			ret = -EINVAL;
1597 			goto free_win;
1598 		}
1599 
1600 		nr_wins++;
1601 		rewin = krealloc(win, sizeof(*win) * nr_wins, GFP_KERNEL);
1602 		if (!rewin) {
1603 			kfree(win);
1604 			return -ENOMEM;
1605 		}
1606 
1607 		win = rewin;
1608 		win[nr_wins - 1] = val;
1609 
1610 		if (!end)
1611 			break;
1612 
1613 		/* consume the number and the following comma, hence +1 */
1614 		len -= end - p + 1;
1615 		p = end + 1;
1616 	} while (len);
1617 
1618 	mutex_lock(&msc->buf_mutex);
1619 	ret = msc_buffer_alloc(msc, win, nr_wins);
1620 	mutex_unlock(&msc->buf_mutex);
1621 
1622 free_win:
1623 	kfree(win);
1624 
1625 	return ret ? ret : size;
1626 }
1627 
1628 static DEVICE_ATTR_RW(nr_pages);
1629 
1630 static ssize_t
1631 win_switch_store(struct device *dev, struct device_attribute *attr,
1632 		 const char *buf, size_t size)
1633 {
1634 	struct msc *msc = dev_get_drvdata(dev);
1635 	unsigned long val;
1636 	int ret;
1637 
1638 	ret = kstrtoul(buf, 10, &val);
1639 	if (ret)
1640 		return ret;
1641 
1642 	if (val != 1)
1643 		return -EINVAL;
1644 
1645 	mutex_lock(&msc->buf_mutex);
1646 	if (msc->mode != MSC_MODE_MULTI)
1647 		ret = -ENOTSUPP;
1648 	else
1649 		msc_win_switch(msc);
1650 	mutex_unlock(&msc->buf_mutex);
1651 
1652 	return ret ? ret : size;
1653 }
1654 
1655 static DEVICE_ATTR_WO(win_switch);
1656 
1657 static struct attribute *msc_output_attrs[] = {
1658 	&dev_attr_wrap.attr,
1659 	&dev_attr_mode.attr,
1660 	&dev_attr_nr_pages.attr,
1661 	&dev_attr_win_switch.attr,
1662 	NULL,
1663 };
1664 
1665 static struct attribute_group msc_output_group = {
1666 	.attrs	= msc_output_attrs,
1667 };
1668 
1669 static int intel_th_msc_probe(struct intel_th_device *thdev)
1670 {
1671 	struct device *dev = &thdev->dev;
1672 	struct resource *res;
1673 	struct msc *msc;
1674 	void __iomem *base;
1675 	int err;
1676 
1677 	res = intel_th_device_get_resource(thdev, IORESOURCE_MEM, 0);
1678 	if (!res)
1679 		return -ENODEV;
1680 
1681 	base = devm_ioremap(dev, res->start, resource_size(res));
1682 	if (!base)
1683 		return -ENOMEM;
1684 
1685 	msc = devm_kzalloc(dev, sizeof(*msc), GFP_KERNEL);
1686 	if (!msc)
1687 		return -ENOMEM;
1688 
1689 	res = intel_th_device_get_resource(thdev, IORESOURCE_IRQ, 1);
1690 	if (!res)
1691 		msc->do_irq = 1;
1692 
1693 	msc->index = thdev->id;
1694 
1695 	msc->thdev = thdev;
1696 	msc->reg_base = base + msc->index * 0x100;
1697 	msc->msu_base = base;
1698 
1699 	err = intel_th_msu_init(msc);
1700 	if (err)
1701 		return err;
1702 
1703 	err = intel_th_msc_init(msc);
1704 	if (err)
1705 		return err;
1706 
1707 	dev_set_drvdata(dev, msc);
1708 
1709 	return 0;
1710 }
1711 
1712 static void intel_th_msc_remove(struct intel_th_device *thdev)
1713 {
1714 	struct msc *msc = dev_get_drvdata(&thdev->dev);
1715 	int ret;
1716 
1717 	intel_th_msc_deactivate(thdev);
1718 	intel_th_msu_deinit(msc);
1719 
1720 	/*
1721 	 * Buffers should not be used at this point except if the
1722 	 * output character device is still open and the parent
1723 	 * device gets detached from its bus, which is a FIXME.
1724 	 */
1725 	ret = msc_buffer_free_unless_used(msc);
1726 	WARN_ON_ONCE(ret);
1727 }
1728 
1729 static struct intel_th_driver intel_th_msc_driver = {
1730 	.probe	= intel_th_msc_probe,
1731 	.remove	= intel_th_msc_remove,
1732 	.irq		= intel_th_msc_interrupt,
1733 	.wait_empty	= intel_th_msc_wait_empty,
1734 	.activate	= intel_th_msc_activate,
1735 	.deactivate	= intel_th_msc_deactivate,
1736 	.fops	= &intel_th_msc_fops,
1737 	.attr_group	= &msc_output_group,
1738 	.driver	= {
1739 		.name	= "msc",
1740 		.owner	= THIS_MODULE,
1741 	},
1742 };
1743 
1744 module_driver(intel_th_msc_driver,
1745 	      intel_th_driver_register,
1746 	      intel_th_driver_unregister);
1747 
1748 MODULE_LICENSE("GPL v2");
1749 MODULE_DESCRIPTION("Intel(R) Trace Hub Memory Storage Unit driver");
1750 MODULE_AUTHOR("Alexander Shishkin <alexander.shishkin@linux.intel.com>");
1751