1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * System Trace Module (STM) infrastructure
4 * Copyright (c) 2014, Intel Corporation.
5 *
6 * STM class implements generic infrastructure for System Trace Module devices
7 * as defined in MIPI STPv2 specification.
8 */
9
10 #include <linux/pm_runtime.h>
11 #include <linux/uaccess.h>
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/device.h>
15 #include <linux/compat.h>
16 #include <linux/kdev_t.h>
17 #include <linux/srcu.h>
18 #include <linux/slab.h>
19 #include <linux/stm.h>
20 #include <linux/fs.h>
21 #include <linux/mm.h>
22 #include <linux/vmalloc.h>
23 #include "stm.h"
24
25 #include <uapi/linux/stm.h>
26
27 static unsigned int stm_core_up;
28
29 /*
30 * The SRCU here makes sure that STM device doesn't disappear from under a
31 * stm_source_write() caller, which may want to have as little overhead as
32 * possible.
33 */
34 static struct srcu_struct stm_source_srcu;
35
masters_show(struct device * dev,struct device_attribute * attr,char * buf)36 static ssize_t masters_show(struct device *dev,
37 struct device_attribute *attr,
38 char *buf)
39 {
40 struct stm_device *stm = to_stm_device(dev);
41 int ret;
42
43 ret = sprintf(buf, "%u %u\n", stm->data->sw_start, stm->data->sw_end);
44
45 return ret;
46 }
47
48 static DEVICE_ATTR_RO(masters);
49
channels_show(struct device * dev,struct device_attribute * attr,char * buf)50 static ssize_t channels_show(struct device *dev,
51 struct device_attribute *attr,
52 char *buf)
53 {
54 struct stm_device *stm = to_stm_device(dev);
55 int ret;
56
57 ret = sprintf(buf, "%u\n", stm->data->sw_nchannels);
58
59 return ret;
60 }
61
62 static DEVICE_ATTR_RO(channels);
63
hw_override_show(struct device * dev,struct device_attribute * attr,char * buf)64 static ssize_t hw_override_show(struct device *dev,
65 struct device_attribute *attr,
66 char *buf)
67 {
68 struct stm_device *stm = to_stm_device(dev);
69 int ret;
70
71 ret = sprintf(buf, "%u\n", stm->data->hw_override);
72
73 return ret;
74 }
75
76 static DEVICE_ATTR_RO(hw_override);
77
78 static struct attribute *stm_attrs[] = {
79 &dev_attr_masters.attr,
80 &dev_attr_channels.attr,
81 &dev_attr_hw_override.attr,
82 NULL,
83 };
84
85 ATTRIBUTE_GROUPS(stm);
86
87 static struct class stm_class = {
88 .name = "stm",
89 .dev_groups = stm_groups,
90 };
91
92 /**
93 * stm_find_device() - find stm device by name
94 * @buf: character buffer containing the name
95 *
96 * This is called when either policy gets assigned to an stm device or an
97 * stm_source device gets linked to an stm device.
98 *
99 * This grabs device's reference (get_device()) and module reference, both
100 * of which the calling path needs to make sure to drop with stm_put_device().
101 *
102 * Return: stm device pointer or null if lookup failed.
103 */
stm_find_device(const char * buf)104 struct stm_device *stm_find_device(const char *buf)
105 {
106 struct stm_device *stm;
107 struct device *dev;
108
109 if (!stm_core_up)
110 return NULL;
111
112 dev = class_find_device_by_name(&stm_class, buf);
113 if (!dev)
114 return NULL;
115
116 stm = to_stm_device(dev);
117 if (!try_module_get(stm->owner)) {
118 /* matches class_find_device() above */
119 put_device(dev);
120 return NULL;
121 }
122
123 return stm;
124 }
125
126 /**
127 * stm_put_device() - drop references on the stm device
128 * @stm: stm device, previously acquired by stm_find_device()
129 *
130 * This drops the module reference and device reference taken by
131 * stm_find_device() or stm_char_open().
132 */
stm_put_device(struct stm_device * stm)133 void stm_put_device(struct stm_device *stm)
134 {
135 module_put(stm->owner);
136 put_device(&stm->dev);
137 }
138
139 /*
140 * Internally we only care about software-writable masters here, that is the
141 * ones in the range [stm_data->sw_start..stm_data..sw_end], however we need
142 * original master numbers to be visible externally, since they are the ones
143 * that will appear in the STP stream. Thus, the internal bookkeeping uses
144 * $master - stm_data->sw_start to reference master descriptors and such.
145 */
146
147 #define __stm_master(_s, _m) \
148 ((_s)->masters[(_m) - (_s)->data->sw_start])
149
150 static inline struct stp_master *
stm_master(struct stm_device * stm,unsigned int idx)151 stm_master(struct stm_device *stm, unsigned int idx)
152 {
153 if (idx < stm->data->sw_start || idx > stm->data->sw_end)
154 return NULL;
155
156 return __stm_master(stm, idx);
157 }
158
stp_master_alloc(struct stm_device * stm,unsigned int idx)159 static int stp_master_alloc(struct stm_device *stm, unsigned int idx)
160 {
161 struct stp_master *master;
162
163 master = kzalloc(struct_size(master, chan_map,
164 BITS_TO_LONGS(stm->data->sw_nchannels)),
165 GFP_ATOMIC);
166 if (!master)
167 return -ENOMEM;
168
169 master->nr_free = stm->data->sw_nchannels;
170 __stm_master(stm, idx) = master;
171
172 return 0;
173 }
174
stp_master_free(struct stm_device * stm,unsigned int idx)175 static void stp_master_free(struct stm_device *stm, unsigned int idx)
176 {
177 struct stp_master *master = stm_master(stm, idx);
178
179 if (!master)
180 return;
181
182 __stm_master(stm, idx) = NULL;
183 kfree(master);
184 }
185
stm_output_claim(struct stm_device * stm,struct stm_output * output)186 static void stm_output_claim(struct stm_device *stm, struct stm_output *output)
187 {
188 struct stp_master *master = stm_master(stm, output->master);
189
190 lockdep_assert_held(&stm->mc_lock);
191 lockdep_assert_held(&output->lock);
192
193 if (WARN_ON_ONCE(master->nr_free < output->nr_chans))
194 return;
195
196 bitmap_allocate_region(&master->chan_map[0], output->channel,
197 ilog2(output->nr_chans));
198
199 master->nr_free -= output->nr_chans;
200 }
201
202 static void
stm_output_disclaim(struct stm_device * stm,struct stm_output * output)203 stm_output_disclaim(struct stm_device *stm, struct stm_output *output)
204 {
205 struct stp_master *master = stm_master(stm, output->master);
206
207 lockdep_assert_held(&stm->mc_lock);
208 lockdep_assert_held(&output->lock);
209
210 bitmap_release_region(&master->chan_map[0], output->channel,
211 ilog2(output->nr_chans));
212
213 master->nr_free += output->nr_chans;
214 output->nr_chans = 0;
215 }
216
217 /*
218 * This is like bitmap_find_free_region(), except it can ignore @start bits
219 * at the beginning.
220 */
find_free_channels(unsigned long * bitmap,unsigned int start,unsigned int end,unsigned int width)221 static int find_free_channels(unsigned long *bitmap, unsigned int start,
222 unsigned int end, unsigned int width)
223 {
224 unsigned int pos;
225 int i;
226
227 for (pos = start; pos < end + 1; pos = ALIGN(pos, width)) {
228 pos = find_next_zero_bit(bitmap, end + 1, pos);
229 if (pos + width > end + 1)
230 break;
231
232 if (pos & (width - 1))
233 continue;
234
235 for (i = 1; i < width && !test_bit(pos + i, bitmap); i++)
236 ;
237 if (i == width)
238 return pos;
239
240 /* step over [pos..pos+i) to continue search */
241 pos += i;
242 }
243
244 return -1;
245 }
246
247 static int
stm_find_master_chan(struct stm_device * stm,unsigned int width,unsigned int * mstart,unsigned int mend,unsigned int * cstart,unsigned int cend)248 stm_find_master_chan(struct stm_device *stm, unsigned int width,
249 unsigned int *mstart, unsigned int mend,
250 unsigned int *cstart, unsigned int cend)
251 {
252 struct stp_master *master;
253 unsigned int midx;
254 int pos, err;
255
256 for (midx = *mstart; midx <= mend; midx++) {
257 if (!stm_master(stm, midx)) {
258 err = stp_master_alloc(stm, midx);
259 if (err)
260 return err;
261 }
262
263 master = stm_master(stm, midx);
264
265 if (!master->nr_free)
266 continue;
267
268 pos = find_free_channels(master->chan_map, *cstart, cend,
269 width);
270 if (pos < 0)
271 continue;
272
273 *mstart = midx;
274 *cstart = pos;
275 return 0;
276 }
277
278 return -ENOSPC;
279 }
280
stm_output_assign(struct stm_device * stm,unsigned int width,struct stp_policy_node * policy_node,struct stm_output * output)281 static int stm_output_assign(struct stm_device *stm, unsigned int width,
282 struct stp_policy_node *policy_node,
283 struct stm_output *output)
284 {
285 unsigned int midx, cidx, mend, cend;
286 int ret = -EINVAL;
287
288 if (width > stm->data->sw_nchannels)
289 return -EINVAL;
290
291 /* We no longer accept policy_node==NULL here */
292 if (WARN_ON_ONCE(!policy_node))
293 return -EINVAL;
294
295 /*
296 * Also, the caller holds reference to policy_node, so it won't
297 * disappear on us.
298 */
299 stp_policy_node_get_ranges(policy_node, &midx, &mend, &cidx, &cend);
300
301 spin_lock(&stm->mc_lock);
302 spin_lock(&output->lock);
303 /* output is already assigned -- shouldn't happen */
304 if (WARN_ON_ONCE(output->nr_chans))
305 goto unlock;
306
307 ret = stm_find_master_chan(stm, width, &midx, mend, &cidx, cend);
308 if (ret < 0)
309 goto unlock;
310
311 output->master = midx;
312 output->channel = cidx;
313 output->nr_chans = width;
314 if (stm->pdrv->output_open) {
315 void *priv = stp_policy_node_priv(policy_node);
316
317 if (WARN_ON_ONCE(!priv))
318 goto unlock;
319
320 /* configfs subsys mutex is held by the caller */
321 ret = stm->pdrv->output_open(priv, output);
322 if (ret)
323 goto unlock;
324 }
325
326 stm_output_claim(stm, output);
327 dev_dbg(&stm->dev, "assigned %u:%u (+%u)\n", midx, cidx, width);
328
329 ret = 0;
330 unlock:
331 if (ret)
332 output->nr_chans = 0;
333
334 spin_unlock(&output->lock);
335 spin_unlock(&stm->mc_lock);
336
337 return ret;
338 }
339
stm_output_free(struct stm_device * stm,struct stm_output * output)340 static void stm_output_free(struct stm_device *stm, struct stm_output *output)
341 {
342 spin_lock(&stm->mc_lock);
343 spin_lock(&output->lock);
344 if (output->nr_chans)
345 stm_output_disclaim(stm, output);
346 if (stm->pdrv && stm->pdrv->output_close)
347 stm->pdrv->output_close(output);
348 spin_unlock(&output->lock);
349 spin_unlock(&stm->mc_lock);
350 }
351
stm_output_init(struct stm_output * output)352 static void stm_output_init(struct stm_output *output)
353 {
354 spin_lock_init(&output->lock);
355 }
356
major_match(struct device * dev,const void * data)357 static int major_match(struct device *dev, const void *data)
358 {
359 unsigned int major = *(unsigned int *)data;
360
361 return MAJOR(dev->devt) == major;
362 }
363
364 /*
365 * Framing protocol management
366 * Modules can implement STM protocol drivers and (un-)register them
367 * with the STM class framework.
368 */
369 static struct list_head stm_pdrv_head;
370 static struct mutex stm_pdrv_mutex;
371
372 struct stm_pdrv_entry {
373 struct list_head entry;
374 const struct stm_protocol_driver *pdrv;
375 const struct config_item_type *node_type;
376 };
377
378 static const struct stm_pdrv_entry *
__stm_lookup_protocol(const char * name)379 __stm_lookup_protocol(const char *name)
380 {
381 struct stm_pdrv_entry *pe;
382
383 /*
384 * If no name is given (NULL or ""), fall back to "p_basic".
385 */
386 if (!name || !*name)
387 name = "p_basic";
388
389 list_for_each_entry(pe, &stm_pdrv_head, entry) {
390 if (!strcmp(name, pe->pdrv->name))
391 return pe;
392 }
393
394 return NULL;
395 }
396
stm_register_protocol(const struct stm_protocol_driver * pdrv)397 int stm_register_protocol(const struct stm_protocol_driver *pdrv)
398 {
399 struct stm_pdrv_entry *pe = NULL;
400 int ret = -ENOMEM;
401
402 mutex_lock(&stm_pdrv_mutex);
403
404 if (__stm_lookup_protocol(pdrv->name)) {
405 ret = -EEXIST;
406 goto unlock;
407 }
408
409 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
410 if (!pe)
411 goto unlock;
412
413 if (pdrv->policy_attr) {
414 pe->node_type = get_policy_node_type(pdrv->policy_attr);
415 if (!pe->node_type)
416 goto unlock;
417 }
418
419 list_add_tail(&pe->entry, &stm_pdrv_head);
420 pe->pdrv = pdrv;
421
422 ret = 0;
423 unlock:
424 mutex_unlock(&stm_pdrv_mutex);
425
426 if (ret)
427 kfree(pe);
428
429 return ret;
430 }
431 EXPORT_SYMBOL_GPL(stm_register_protocol);
432
stm_unregister_protocol(const struct stm_protocol_driver * pdrv)433 void stm_unregister_protocol(const struct stm_protocol_driver *pdrv)
434 {
435 struct stm_pdrv_entry *pe, *iter;
436
437 mutex_lock(&stm_pdrv_mutex);
438
439 list_for_each_entry_safe(pe, iter, &stm_pdrv_head, entry) {
440 if (pe->pdrv == pdrv) {
441 list_del(&pe->entry);
442
443 if (pe->node_type) {
444 kfree(pe->node_type->ct_attrs);
445 kfree(pe->node_type);
446 }
447 kfree(pe);
448 break;
449 }
450 }
451
452 mutex_unlock(&stm_pdrv_mutex);
453 }
454 EXPORT_SYMBOL_GPL(stm_unregister_protocol);
455
stm_get_protocol(const struct stm_protocol_driver * pdrv)456 static bool stm_get_protocol(const struct stm_protocol_driver *pdrv)
457 {
458 return try_module_get(pdrv->owner);
459 }
460
stm_put_protocol(const struct stm_protocol_driver * pdrv)461 void stm_put_protocol(const struct stm_protocol_driver *pdrv)
462 {
463 module_put(pdrv->owner);
464 }
465
stm_lookup_protocol(const char * name,const struct stm_protocol_driver ** pdrv,const struct config_item_type ** node_type)466 int stm_lookup_protocol(const char *name,
467 const struct stm_protocol_driver **pdrv,
468 const struct config_item_type **node_type)
469 {
470 const struct stm_pdrv_entry *pe;
471
472 mutex_lock(&stm_pdrv_mutex);
473
474 pe = __stm_lookup_protocol(name);
475 if (pe && pe->pdrv && stm_get_protocol(pe->pdrv)) {
476 *pdrv = pe->pdrv;
477 *node_type = pe->node_type;
478 }
479
480 mutex_unlock(&stm_pdrv_mutex);
481
482 return pe ? 0 : -ENOENT;
483 }
484
stm_char_open(struct inode * inode,struct file * file)485 static int stm_char_open(struct inode *inode, struct file *file)
486 {
487 struct stm_file *stmf;
488 struct device *dev;
489 unsigned int major = imajor(inode);
490 int err = -ENOMEM;
491
492 dev = class_find_device(&stm_class, NULL, &major, major_match);
493 if (!dev)
494 return -ENODEV;
495
496 stmf = kzalloc(sizeof(*stmf), GFP_KERNEL);
497 if (!stmf)
498 goto err_put_device;
499
500 err = -ENODEV;
501 stm_output_init(&stmf->output);
502 stmf->stm = to_stm_device(dev);
503
504 if (!try_module_get(stmf->stm->owner))
505 goto err_free;
506
507 file->private_data = stmf;
508
509 return nonseekable_open(inode, file);
510
511 err_free:
512 kfree(stmf);
513 err_put_device:
514 /* matches class_find_device() above */
515 put_device(dev);
516
517 return err;
518 }
519
stm_char_release(struct inode * inode,struct file * file)520 static int stm_char_release(struct inode *inode, struct file *file)
521 {
522 struct stm_file *stmf = file->private_data;
523 struct stm_device *stm = stmf->stm;
524
525 if (stm->data->unlink)
526 stm->data->unlink(stm->data, stmf->output.master,
527 stmf->output.channel);
528
529 stm_output_free(stm, &stmf->output);
530
531 /*
532 * matches the stm_char_open()'s
533 * class_find_device() + try_module_get()
534 */
535 stm_put_device(stm);
536 kfree(stmf);
537
538 return 0;
539 }
540
541 static int
stm_assign_first_policy(struct stm_device * stm,struct stm_output * output,char ** ids,unsigned int width)542 stm_assign_first_policy(struct stm_device *stm, struct stm_output *output,
543 char **ids, unsigned int width)
544 {
545 struct stp_policy_node *pn;
546 int err, n;
547
548 /*
549 * On success, stp_policy_node_lookup() will return holding the
550 * configfs subsystem mutex, which is then released in
551 * stp_policy_node_put(). This allows the pdrv->output_open() in
552 * stm_output_assign() to serialize against the attribute accessors.
553 */
554 for (n = 0, pn = NULL; ids[n] && !pn; n++)
555 pn = stp_policy_node_lookup(stm, ids[n]);
556
557 if (!pn)
558 return -EINVAL;
559
560 err = stm_output_assign(stm, width, pn, output);
561
562 stp_policy_node_put(pn);
563
564 return err;
565 }
566
567 /**
568 * stm_data_write() - send the given payload as data packets
569 * @data: stm driver's data
570 * @m: STP master
571 * @c: STP channel
572 * @ts_first: timestamp the first packet
573 * @buf: data payload buffer
574 * @count: data payload size
575 */
stm_data_write(struct stm_data * data,unsigned int m,unsigned int c,bool ts_first,const void * buf,size_t count)576 ssize_t notrace stm_data_write(struct stm_data *data, unsigned int m,
577 unsigned int c, bool ts_first, const void *buf,
578 size_t count)
579 {
580 unsigned int flags = ts_first ? STP_PACKET_TIMESTAMPED : 0;
581 ssize_t sz;
582 size_t pos;
583
584 for (pos = 0, sz = 0; pos < count; pos += sz) {
585 sz = min_t(unsigned int, count - pos, 8);
586 sz = data->packet(data, m, c, STP_PACKET_DATA, flags, sz,
587 &((u8 *)buf)[pos]);
588 if (sz <= 0)
589 break;
590
591 if (ts_first) {
592 flags = 0;
593 ts_first = false;
594 }
595 }
596
597 return sz < 0 ? sz : pos;
598 }
599 EXPORT_SYMBOL_GPL(stm_data_write);
600
601 static ssize_t notrace
stm_write(struct stm_device * stm,struct stm_output * output,unsigned int chan,const char * buf,size_t count)602 stm_write(struct stm_device *stm, struct stm_output *output,
603 unsigned int chan, const char *buf, size_t count)
604 {
605 int err;
606
607 /* stm->pdrv is serialized against policy_mutex */
608 if (!stm->pdrv)
609 return -ENODEV;
610
611 err = stm->pdrv->write(stm->data, output, chan, buf, count);
612 if (err < 0)
613 return err;
614
615 return err;
616 }
617
stm_char_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)618 static ssize_t stm_char_write(struct file *file, const char __user *buf,
619 size_t count, loff_t *ppos)
620 {
621 struct stm_file *stmf = file->private_data;
622 struct stm_device *stm = stmf->stm;
623 char *kbuf;
624 int err;
625
626 if (count + 1 > PAGE_SIZE)
627 count = PAGE_SIZE - 1;
628
629 /*
630 * If no m/c have been assigned to this writer up to this
631 * point, try to use the task name and "default" policy entries.
632 */
633 if (!stmf->output.nr_chans) {
634 char comm[sizeof(current->comm)];
635 char *ids[] = { comm, "default", NULL };
636
637 get_task_comm(comm, current);
638
639 err = stm_assign_first_policy(stmf->stm, &stmf->output, ids, 1);
640 /*
641 * EBUSY means that somebody else just assigned this
642 * output, which is just fine for write()
643 */
644 if (err)
645 return err;
646 }
647
648 kbuf = kmalloc(count + 1, GFP_KERNEL);
649 if (!kbuf)
650 return -ENOMEM;
651
652 err = copy_from_user(kbuf, buf, count);
653 if (err) {
654 kfree(kbuf);
655 return -EFAULT;
656 }
657
658 pm_runtime_get_sync(&stm->dev);
659
660 count = stm_write(stm, &stmf->output, 0, kbuf, count);
661
662 pm_runtime_mark_last_busy(&stm->dev);
663 pm_runtime_put_autosuspend(&stm->dev);
664 kfree(kbuf);
665
666 return count;
667 }
668
stm_mmap_open(struct vm_area_struct * vma)669 static void stm_mmap_open(struct vm_area_struct *vma)
670 {
671 struct stm_file *stmf = vma->vm_file->private_data;
672 struct stm_device *stm = stmf->stm;
673
674 pm_runtime_get(&stm->dev);
675 }
676
stm_mmap_close(struct vm_area_struct * vma)677 static void stm_mmap_close(struct vm_area_struct *vma)
678 {
679 struct stm_file *stmf = vma->vm_file->private_data;
680 struct stm_device *stm = stmf->stm;
681
682 pm_runtime_mark_last_busy(&stm->dev);
683 pm_runtime_put_autosuspend(&stm->dev);
684 }
685
686 static const struct vm_operations_struct stm_mmap_vmops = {
687 .open = stm_mmap_open,
688 .close = stm_mmap_close,
689 };
690
stm_char_mmap(struct file * file,struct vm_area_struct * vma)691 static int stm_char_mmap(struct file *file, struct vm_area_struct *vma)
692 {
693 struct stm_file *stmf = file->private_data;
694 struct stm_device *stm = stmf->stm;
695 unsigned long size, phys;
696
697 if (!stm->data->mmio_addr)
698 return -EOPNOTSUPP;
699
700 if (vma->vm_pgoff)
701 return -EINVAL;
702
703 size = vma->vm_end - vma->vm_start;
704
705 if (stmf->output.nr_chans * stm->data->sw_mmiosz != size)
706 return -EINVAL;
707
708 phys = stm->data->mmio_addr(stm->data, stmf->output.master,
709 stmf->output.channel,
710 stmf->output.nr_chans);
711
712 if (!phys)
713 return -EINVAL;
714
715 pm_runtime_get_sync(&stm->dev);
716
717 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
718 vm_flags_set(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
719 vma->vm_ops = &stm_mmap_vmops;
720 vm_iomap_memory(vma, phys, size);
721
722 return 0;
723 }
724
stm_char_policy_set_ioctl(struct stm_file * stmf,void __user * arg)725 static int stm_char_policy_set_ioctl(struct stm_file *stmf, void __user *arg)
726 {
727 struct stm_device *stm = stmf->stm;
728 struct stp_policy_id *id;
729 char *ids[] = { NULL, NULL };
730 int ret = -EINVAL, wlimit = 1;
731 u32 size;
732
733 if (stmf->output.nr_chans)
734 return -EBUSY;
735
736 if (copy_from_user(&size, arg, sizeof(size)))
737 return -EFAULT;
738
739 if (size < sizeof(*id) || size >= PATH_MAX + sizeof(*id))
740 return -EINVAL;
741
742 /*
743 * size + 1 to make sure the .id string at the bottom is terminated,
744 * which is also why memdup_user() is not useful here
745 */
746 id = kzalloc(size + 1, GFP_KERNEL);
747 if (!id)
748 return -ENOMEM;
749
750 if (copy_from_user(id, arg, size)) {
751 ret = -EFAULT;
752 goto err_free;
753 }
754
755 if (id->__reserved_0 || id->__reserved_1)
756 goto err_free;
757
758 if (stm->data->sw_mmiosz)
759 wlimit = PAGE_SIZE / stm->data->sw_mmiosz;
760
761 if (id->width < 1 || id->width > wlimit)
762 goto err_free;
763
764 ids[0] = id->id;
765 ret = stm_assign_first_policy(stmf->stm, &stmf->output, ids,
766 id->width);
767 if (ret)
768 goto err_free;
769
770 if (stm->data->link)
771 ret = stm->data->link(stm->data, stmf->output.master,
772 stmf->output.channel);
773
774 if (ret)
775 stm_output_free(stmf->stm, &stmf->output);
776
777 err_free:
778 kfree(id);
779
780 return ret;
781 }
782
stm_char_policy_get_ioctl(struct stm_file * stmf,void __user * arg)783 static int stm_char_policy_get_ioctl(struct stm_file *stmf, void __user *arg)
784 {
785 struct stp_policy_id id = {
786 .size = sizeof(id),
787 .master = stmf->output.master,
788 .channel = stmf->output.channel,
789 .width = stmf->output.nr_chans,
790 .__reserved_0 = 0,
791 .__reserved_1 = 0,
792 };
793
794 return copy_to_user(arg, &id, id.size) ? -EFAULT : 0;
795 }
796
797 static long
stm_char_ioctl(struct file * file,unsigned int cmd,unsigned long arg)798 stm_char_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
799 {
800 struct stm_file *stmf = file->private_data;
801 struct stm_data *stm_data = stmf->stm->data;
802 int err = -ENOTTY;
803 u64 options;
804
805 switch (cmd) {
806 case STP_POLICY_ID_SET:
807 err = stm_char_policy_set_ioctl(stmf, (void __user *)arg);
808 if (err)
809 return err;
810
811 return stm_char_policy_get_ioctl(stmf, (void __user *)arg);
812
813 case STP_POLICY_ID_GET:
814 return stm_char_policy_get_ioctl(stmf, (void __user *)arg);
815
816 case STP_SET_OPTIONS:
817 if (copy_from_user(&options, (u64 __user *)arg, sizeof(u64)))
818 return -EFAULT;
819
820 if (stm_data->set_options)
821 err = stm_data->set_options(stm_data,
822 stmf->output.master,
823 stmf->output.channel,
824 stmf->output.nr_chans,
825 options);
826
827 break;
828 default:
829 break;
830 }
831
832 return err;
833 }
834
835 static const struct file_operations stm_fops = {
836 .open = stm_char_open,
837 .release = stm_char_release,
838 .write = stm_char_write,
839 .mmap = stm_char_mmap,
840 .unlocked_ioctl = stm_char_ioctl,
841 .compat_ioctl = compat_ptr_ioctl,
842 .llseek = no_llseek,
843 };
844
stm_device_release(struct device * dev)845 static void stm_device_release(struct device *dev)
846 {
847 struct stm_device *stm = to_stm_device(dev);
848
849 vfree(stm);
850 }
851
stm_register_device(struct device * parent,struct stm_data * stm_data,struct module * owner)852 int stm_register_device(struct device *parent, struct stm_data *stm_data,
853 struct module *owner)
854 {
855 struct stm_device *stm;
856 unsigned int nmasters;
857 int err = -ENOMEM;
858
859 if (!stm_core_up)
860 return -EPROBE_DEFER;
861
862 if (!stm_data->packet || !stm_data->sw_nchannels)
863 return -EINVAL;
864
865 nmasters = stm_data->sw_end - stm_data->sw_start + 1;
866 stm = vzalloc(sizeof(*stm) + nmasters * sizeof(void *));
867 if (!stm)
868 return -ENOMEM;
869
870 stm->major = register_chrdev(0, stm_data->name, &stm_fops);
871 if (stm->major < 0) {
872 err = stm->major;
873 vfree(stm);
874 return err;
875 }
876
877 device_initialize(&stm->dev);
878 stm->dev.devt = MKDEV(stm->major, 0);
879 stm->dev.class = &stm_class;
880 stm->dev.parent = parent;
881 stm->dev.release = stm_device_release;
882
883 mutex_init(&stm->link_mutex);
884 spin_lock_init(&stm->link_lock);
885 INIT_LIST_HEAD(&stm->link_list);
886
887 /* initialize the object before it is accessible via sysfs */
888 spin_lock_init(&stm->mc_lock);
889 mutex_init(&stm->policy_mutex);
890 stm->sw_nmasters = nmasters;
891 stm->owner = owner;
892 stm->data = stm_data;
893 stm_data->stm = stm;
894
895 err = kobject_set_name(&stm->dev.kobj, "%s", stm_data->name);
896 if (err)
897 goto err_device;
898
899 err = device_add(&stm->dev);
900 if (err)
901 goto err_device;
902
903 /*
904 * Use delayed autosuspend to avoid bouncing back and forth
905 * on recurring character device writes, with the initial
906 * delay time of 2 seconds.
907 */
908 pm_runtime_no_callbacks(&stm->dev);
909 pm_runtime_use_autosuspend(&stm->dev);
910 pm_runtime_set_autosuspend_delay(&stm->dev, 2000);
911 pm_runtime_set_suspended(&stm->dev);
912 pm_runtime_enable(&stm->dev);
913
914 return 0;
915
916 err_device:
917 unregister_chrdev(stm->major, stm_data->name);
918
919 /* calls stm_device_release() */
920 put_device(&stm->dev);
921
922 return err;
923 }
924 EXPORT_SYMBOL_GPL(stm_register_device);
925
926 static int __stm_source_link_drop(struct stm_source_device *src,
927 struct stm_device *stm);
928
stm_unregister_device(struct stm_data * stm_data)929 void stm_unregister_device(struct stm_data *stm_data)
930 {
931 struct stm_device *stm = stm_data->stm;
932 struct stm_source_device *src, *iter;
933 int i, ret;
934
935 pm_runtime_dont_use_autosuspend(&stm->dev);
936 pm_runtime_disable(&stm->dev);
937
938 mutex_lock(&stm->link_mutex);
939 list_for_each_entry_safe(src, iter, &stm->link_list, link_entry) {
940 ret = __stm_source_link_drop(src, stm);
941 /*
942 * src <-> stm link must not change under the same
943 * stm::link_mutex, so complain loudly if it has;
944 * also in this situation ret!=0 means this src is
945 * not connected to this stm and it should be otherwise
946 * safe to proceed with the tear-down of stm.
947 */
948 WARN_ON_ONCE(ret);
949 }
950 mutex_unlock(&stm->link_mutex);
951
952 synchronize_srcu(&stm_source_srcu);
953
954 unregister_chrdev(stm->major, stm_data->name);
955
956 mutex_lock(&stm->policy_mutex);
957 if (stm->policy)
958 stp_policy_unbind(stm->policy);
959 mutex_unlock(&stm->policy_mutex);
960
961 for (i = stm->data->sw_start; i <= stm->data->sw_end; i++)
962 stp_master_free(stm, i);
963
964 device_unregister(&stm->dev);
965 stm_data->stm = NULL;
966 }
967 EXPORT_SYMBOL_GPL(stm_unregister_device);
968
969 /*
970 * stm::link_list access serialization uses a spinlock and a mutex; holding
971 * either of them guarantees that the list is stable; modification requires
972 * holding both of them.
973 *
974 * Lock ordering is as follows:
975 * stm::link_mutex
976 * stm::link_lock
977 * src::link_lock
978 */
979
980 /**
981 * stm_source_link_add() - connect an stm_source device to an stm device
982 * @src: stm_source device
983 * @stm: stm device
984 *
985 * This function establishes a link from stm_source to an stm device so that
986 * the former can send out trace data to the latter.
987 *
988 * Return: 0 on success, -errno otherwise.
989 */
stm_source_link_add(struct stm_source_device * src,struct stm_device * stm)990 static int stm_source_link_add(struct stm_source_device *src,
991 struct stm_device *stm)
992 {
993 char *ids[] = { NULL, "default", NULL };
994 int err = -ENOMEM;
995
996 mutex_lock(&stm->link_mutex);
997 spin_lock(&stm->link_lock);
998 spin_lock(&src->link_lock);
999
1000 /* src->link is dereferenced under stm_source_srcu but not the list */
1001 rcu_assign_pointer(src->link, stm);
1002 list_add_tail(&src->link_entry, &stm->link_list);
1003
1004 spin_unlock(&src->link_lock);
1005 spin_unlock(&stm->link_lock);
1006 mutex_unlock(&stm->link_mutex);
1007
1008 ids[0] = kstrdup(src->data->name, GFP_KERNEL);
1009 if (!ids[0])
1010 goto fail_detach;
1011
1012 err = stm_assign_first_policy(stm, &src->output, ids,
1013 src->data->nr_chans);
1014 kfree(ids[0]);
1015
1016 if (err)
1017 goto fail_detach;
1018
1019 /* this is to notify the STM device that a new link has been made */
1020 if (stm->data->link)
1021 err = stm->data->link(stm->data, src->output.master,
1022 src->output.channel);
1023
1024 if (err)
1025 goto fail_free_output;
1026
1027 /* this is to let the source carry out all necessary preparations */
1028 if (src->data->link)
1029 src->data->link(src->data);
1030
1031 return 0;
1032
1033 fail_free_output:
1034 stm_output_free(stm, &src->output);
1035
1036 fail_detach:
1037 mutex_lock(&stm->link_mutex);
1038 spin_lock(&stm->link_lock);
1039 spin_lock(&src->link_lock);
1040
1041 rcu_assign_pointer(src->link, NULL);
1042 list_del_init(&src->link_entry);
1043
1044 spin_unlock(&src->link_lock);
1045 spin_unlock(&stm->link_lock);
1046 mutex_unlock(&stm->link_mutex);
1047
1048 return err;
1049 }
1050
1051 /**
1052 * __stm_source_link_drop() - detach stm_source from an stm device
1053 * @src: stm_source device
1054 * @stm: stm device
1055 *
1056 * If @stm is @src::link, disconnect them from one another and put the
1057 * reference on the @stm device.
1058 *
1059 * Caller must hold stm::link_mutex.
1060 */
__stm_source_link_drop(struct stm_source_device * src,struct stm_device * stm)1061 static int __stm_source_link_drop(struct stm_source_device *src,
1062 struct stm_device *stm)
1063 {
1064 struct stm_device *link;
1065 int ret = 0;
1066
1067 lockdep_assert_held(&stm->link_mutex);
1068
1069 /* for stm::link_list modification, we hold both mutex and spinlock */
1070 spin_lock(&stm->link_lock);
1071 spin_lock(&src->link_lock);
1072 link = srcu_dereference_check(src->link, &stm_source_srcu, 1);
1073
1074 /*
1075 * The linked device may have changed since we last looked, because
1076 * we weren't holding the src::link_lock back then; if this is the
1077 * case, tell the caller to retry.
1078 */
1079 if (link != stm) {
1080 ret = -EAGAIN;
1081 goto unlock;
1082 }
1083
1084 stm_output_free(link, &src->output);
1085 list_del_init(&src->link_entry);
1086 pm_runtime_mark_last_busy(&link->dev);
1087 pm_runtime_put_autosuspend(&link->dev);
1088 /* matches stm_find_device() from stm_source_link_store() */
1089 stm_put_device(link);
1090 rcu_assign_pointer(src->link, NULL);
1091
1092 unlock:
1093 spin_unlock(&src->link_lock);
1094 spin_unlock(&stm->link_lock);
1095
1096 /*
1097 * Call the unlink callbacks for both source and stm, when we know
1098 * that we have actually performed the unlinking.
1099 */
1100 if (!ret) {
1101 if (src->data->unlink)
1102 src->data->unlink(src->data);
1103
1104 if (stm->data->unlink)
1105 stm->data->unlink(stm->data, src->output.master,
1106 src->output.channel);
1107 }
1108
1109 return ret;
1110 }
1111
1112 /**
1113 * stm_source_link_drop() - detach stm_source from its stm device
1114 * @src: stm_source device
1115 *
1116 * Unlinking means disconnecting from source's STM device; after this
1117 * writes will be unsuccessful until it is linked to a new STM device.
1118 *
1119 * This will happen on "stm_source_link" sysfs attribute write to undo
1120 * the existing link (if any), or on linked STM device's de-registration.
1121 */
stm_source_link_drop(struct stm_source_device * src)1122 static void stm_source_link_drop(struct stm_source_device *src)
1123 {
1124 struct stm_device *stm;
1125 int idx, ret;
1126
1127 retry:
1128 idx = srcu_read_lock(&stm_source_srcu);
1129 /*
1130 * The stm device will be valid for the duration of this
1131 * read section, but the link may change before we grab
1132 * the src::link_lock in __stm_source_link_drop().
1133 */
1134 stm = srcu_dereference(src->link, &stm_source_srcu);
1135
1136 ret = 0;
1137 if (stm) {
1138 mutex_lock(&stm->link_mutex);
1139 ret = __stm_source_link_drop(src, stm);
1140 mutex_unlock(&stm->link_mutex);
1141 }
1142
1143 srcu_read_unlock(&stm_source_srcu, idx);
1144
1145 /* if it did change, retry */
1146 if (ret == -EAGAIN)
1147 goto retry;
1148 }
1149
stm_source_link_show(struct device * dev,struct device_attribute * attr,char * buf)1150 static ssize_t stm_source_link_show(struct device *dev,
1151 struct device_attribute *attr,
1152 char *buf)
1153 {
1154 struct stm_source_device *src = to_stm_source_device(dev);
1155 struct stm_device *stm;
1156 int idx, ret;
1157
1158 idx = srcu_read_lock(&stm_source_srcu);
1159 stm = srcu_dereference(src->link, &stm_source_srcu);
1160 ret = sprintf(buf, "%s\n",
1161 stm ? dev_name(&stm->dev) : "<none>");
1162 srcu_read_unlock(&stm_source_srcu, idx);
1163
1164 return ret;
1165 }
1166
stm_source_link_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1167 static ssize_t stm_source_link_store(struct device *dev,
1168 struct device_attribute *attr,
1169 const char *buf, size_t count)
1170 {
1171 struct stm_source_device *src = to_stm_source_device(dev);
1172 struct stm_device *link;
1173 int err;
1174
1175 stm_source_link_drop(src);
1176
1177 link = stm_find_device(buf);
1178 if (!link)
1179 return -EINVAL;
1180
1181 pm_runtime_get(&link->dev);
1182
1183 err = stm_source_link_add(src, link);
1184 if (err) {
1185 pm_runtime_put_autosuspend(&link->dev);
1186 /* matches the stm_find_device() above */
1187 stm_put_device(link);
1188 }
1189
1190 return err ? : count;
1191 }
1192
1193 static DEVICE_ATTR_RW(stm_source_link);
1194
1195 static struct attribute *stm_source_attrs[] = {
1196 &dev_attr_stm_source_link.attr,
1197 NULL,
1198 };
1199
1200 ATTRIBUTE_GROUPS(stm_source);
1201
1202 static struct class stm_source_class = {
1203 .name = "stm_source",
1204 .dev_groups = stm_source_groups,
1205 };
1206
stm_source_device_release(struct device * dev)1207 static void stm_source_device_release(struct device *dev)
1208 {
1209 struct stm_source_device *src = to_stm_source_device(dev);
1210
1211 kfree(src);
1212 }
1213
1214 /**
1215 * stm_source_register_device() - register an stm_source device
1216 * @parent: parent device
1217 * @data: device description structure
1218 *
1219 * This will create a device of stm_source class that can write
1220 * data to an stm device once linked.
1221 *
1222 * Return: 0 on success, -errno otherwise.
1223 */
stm_source_register_device(struct device * parent,struct stm_source_data * data)1224 int stm_source_register_device(struct device *parent,
1225 struct stm_source_data *data)
1226 {
1227 struct stm_source_device *src;
1228 int err;
1229
1230 if (!stm_core_up)
1231 return -EPROBE_DEFER;
1232
1233 src = kzalloc(sizeof(*src), GFP_KERNEL);
1234 if (!src)
1235 return -ENOMEM;
1236
1237 device_initialize(&src->dev);
1238 src->dev.class = &stm_source_class;
1239 src->dev.parent = parent;
1240 src->dev.release = stm_source_device_release;
1241
1242 err = kobject_set_name(&src->dev.kobj, "%s", data->name);
1243 if (err)
1244 goto err;
1245
1246 pm_runtime_no_callbacks(&src->dev);
1247 pm_runtime_forbid(&src->dev);
1248
1249 err = device_add(&src->dev);
1250 if (err)
1251 goto err;
1252
1253 stm_output_init(&src->output);
1254 spin_lock_init(&src->link_lock);
1255 INIT_LIST_HEAD(&src->link_entry);
1256 src->data = data;
1257 data->src = src;
1258
1259 return 0;
1260
1261 err:
1262 put_device(&src->dev);
1263
1264 return err;
1265 }
1266 EXPORT_SYMBOL_GPL(stm_source_register_device);
1267
1268 /**
1269 * stm_source_unregister_device() - unregister an stm_source device
1270 * @data: device description that was used to register the device
1271 *
1272 * This will remove a previously created stm_source device from the system.
1273 */
stm_source_unregister_device(struct stm_source_data * data)1274 void stm_source_unregister_device(struct stm_source_data *data)
1275 {
1276 struct stm_source_device *src = data->src;
1277
1278 stm_source_link_drop(src);
1279
1280 device_unregister(&src->dev);
1281 }
1282 EXPORT_SYMBOL_GPL(stm_source_unregister_device);
1283
stm_source_write(struct stm_source_data * data,unsigned int chan,const char * buf,size_t count)1284 int notrace stm_source_write(struct stm_source_data *data,
1285 unsigned int chan,
1286 const char *buf, size_t count)
1287 {
1288 struct stm_source_device *src = data->src;
1289 struct stm_device *stm;
1290 int idx;
1291
1292 if (!src->output.nr_chans)
1293 return -ENODEV;
1294
1295 if (chan >= src->output.nr_chans)
1296 return -EINVAL;
1297
1298 idx = srcu_read_lock(&stm_source_srcu);
1299
1300 stm = srcu_dereference(src->link, &stm_source_srcu);
1301 if (stm)
1302 count = stm_write(stm, &src->output, chan, buf, count);
1303 else
1304 count = -ENODEV;
1305
1306 srcu_read_unlock(&stm_source_srcu, idx);
1307
1308 return count;
1309 }
1310 EXPORT_SYMBOL_GPL(stm_source_write);
1311
stm_core_init(void)1312 static int __init stm_core_init(void)
1313 {
1314 int err;
1315
1316 err = class_register(&stm_class);
1317 if (err)
1318 return err;
1319
1320 err = class_register(&stm_source_class);
1321 if (err)
1322 goto err_stm;
1323
1324 err = stp_configfs_init();
1325 if (err)
1326 goto err_src;
1327
1328 init_srcu_struct(&stm_source_srcu);
1329 INIT_LIST_HEAD(&stm_pdrv_head);
1330 mutex_init(&stm_pdrv_mutex);
1331
1332 /*
1333 * So as to not confuse existing users with a requirement
1334 * to load yet another module, do it here.
1335 */
1336 if (IS_ENABLED(CONFIG_STM_PROTO_BASIC))
1337 (void)request_module_nowait("stm_p_basic");
1338 stm_core_up++;
1339
1340 return 0;
1341
1342 err_src:
1343 class_unregister(&stm_source_class);
1344 err_stm:
1345 class_unregister(&stm_class);
1346
1347 return err;
1348 }
1349
1350 module_init(stm_core_init);
1351
stm_core_exit(void)1352 static void __exit stm_core_exit(void)
1353 {
1354 cleanup_srcu_struct(&stm_source_srcu);
1355 class_unregister(&stm_source_class);
1356 class_unregister(&stm_class);
1357 stp_configfs_exit();
1358 }
1359
1360 module_exit(stm_core_exit);
1361
1362 MODULE_LICENSE("GPL v2");
1363 MODULE_DESCRIPTION("System Trace Module device class");
1364 MODULE_AUTHOR("Alexander Shishkin <alexander.shishkin@linux.intel.com>");
1365