1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * driver for channel subsystem
4 *
5 * Copyright IBM Corp. 2002, 2010
6 *
7 * Author(s): Arnd Bergmann (arndb@de.ibm.com)
8 * Cornelia Huck (cornelia.huck@de.ibm.com)
9 */
10
11 #define KMSG_COMPONENT "cio"
12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13
14 #include <linux/export.h>
15 #include <linux/init.h>
16 #include <linux/device.h>
17 #include <linux/slab.h>
18 #include <linux/errno.h>
19 #include <linux/list.h>
20 #include <linux/reboot.h>
21 #include <linux/proc_fs.h>
22 #include <linux/genalloc.h>
23 #include <linux/dma-mapping.h>
24 #include <asm/isc.h>
25 #include <asm/crw.h>
26
27 #include "css.h"
28 #include "cio.h"
29 #include "blacklist.h"
30 #include "cio_debug.h"
31 #include "ioasm.h"
32 #include "chsc.h"
33 #include "device.h"
34 #include "idset.h"
35 #include "chp.h"
36
37 int css_init_done = 0;
38 int max_ssid;
39
40 #define MAX_CSS_IDX 0
41 struct channel_subsystem *channel_subsystems[MAX_CSS_IDX + 1];
42 static struct bus_type css_bus_type;
43
44 int
for_each_subchannel(int (* fn)(struct subchannel_id,void *),void * data)45 for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
46 {
47 struct subchannel_id schid;
48 int ret;
49
50 init_subchannel_id(&schid);
51 do {
52 do {
53 ret = fn(schid, data);
54 if (ret)
55 break;
56 } while (schid.sch_no++ < __MAX_SUBCHANNEL);
57 schid.sch_no = 0;
58 } while (schid.ssid++ < max_ssid);
59 return ret;
60 }
61
62 struct cb_data {
63 void *data;
64 struct idset *set;
65 int (*fn_known_sch)(struct subchannel *, void *);
66 int (*fn_unknown_sch)(struct subchannel_id, void *);
67 };
68
call_fn_known_sch(struct device * dev,void * data)69 static int call_fn_known_sch(struct device *dev, void *data)
70 {
71 struct subchannel *sch = to_subchannel(dev);
72 struct cb_data *cb = data;
73 int rc = 0;
74
75 if (cb->set)
76 idset_sch_del(cb->set, sch->schid);
77 if (cb->fn_known_sch)
78 rc = cb->fn_known_sch(sch, cb->data);
79 return rc;
80 }
81
call_fn_unknown_sch(struct subchannel_id schid,void * data)82 static int call_fn_unknown_sch(struct subchannel_id schid, void *data)
83 {
84 struct cb_data *cb = data;
85 int rc = 0;
86
87 if (idset_sch_contains(cb->set, schid))
88 rc = cb->fn_unknown_sch(schid, cb->data);
89 return rc;
90 }
91
call_fn_all_sch(struct subchannel_id schid,void * data)92 static int call_fn_all_sch(struct subchannel_id schid, void *data)
93 {
94 struct cb_data *cb = data;
95 struct subchannel *sch;
96 int rc = 0;
97
98 sch = get_subchannel_by_schid(schid);
99 if (sch) {
100 if (cb->fn_known_sch)
101 rc = cb->fn_known_sch(sch, cb->data);
102 put_device(&sch->dev);
103 } else {
104 if (cb->fn_unknown_sch)
105 rc = cb->fn_unknown_sch(schid, cb->data);
106 }
107
108 return rc;
109 }
110
for_each_subchannel_staged(int (* fn_known)(struct subchannel *,void *),int (* fn_unknown)(struct subchannel_id,void *),void * data)111 int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
112 int (*fn_unknown)(struct subchannel_id,
113 void *), void *data)
114 {
115 struct cb_data cb;
116 int rc;
117
118 cb.data = data;
119 cb.fn_known_sch = fn_known;
120 cb.fn_unknown_sch = fn_unknown;
121
122 if (fn_known && !fn_unknown) {
123 /* Skip idset allocation in case of known-only loop. */
124 cb.set = NULL;
125 return bus_for_each_dev(&css_bus_type, NULL, &cb,
126 call_fn_known_sch);
127 }
128
129 cb.set = idset_sch_new();
130 if (!cb.set)
131 /* fall back to brute force scanning in case of oom */
132 return for_each_subchannel(call_fn_all_sch, &cb);
133
134 idset_fill(cb.set);
135
136 /* Process registered subchannels. */
137 rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch);
138 if (rc)
139 goto out;
140 /* Process unregistered subchannels. */
141 if (fn_unknown)
142 rc = for_each_subchannel(call_fn_unknown_sch, &cb);
143 out:
144 idset_free(cb.set);
145
146 return rc;
147 }
148
149 static void css_sch_todo(struct work_struct *work);
150
css_sch_create_locks(struct subchannel * sch)151 static int css_sch_create_locks(struct subchannel *sch)
152 {
153 sch->lock = kmalloc(sizeof(*sch->lock), GFP_KERNEL);
154 if (!sch->lock)
155 return -ENOMEM;
156
157 spin_lock_init(sch->lock);
158 mutex_init(&sch->reg_mutex);
159
160 return 0;
161 }
162
css_subchannel_release(struct device * dev)163 static void css_subchannel_release(struct device *dev)
164 {
165 struct subchannel *sch = to_subchannel(dev);
166
167 sch->config.intparm = 0;
168 cio_commit_config(sch);
169 kfree(sch->driver_override);
170 kfree(sch->lock);
171 kfree(sch);
172 }
173
css_validate_subchannel(struct subchannel_id schid,struct schib * schib)174 static int css_validate_subchannel(struct subchannel_id schid,
175 struct schib *schib)
176 {
177 int err;
178
179 switch (schib->pmcw.st) {
180 case SUBCHANNEL_TYPE_IO:
181 case SUBCHANNEL_TYPE_MSG:
182 if (!css_sch_is_valid(schib))
183 err = -ENODEV;
184 else if (is_blacklisted(schid.ssid, schib->pmcw.dev)) {
185 CIO_MSG_EVENT(6, "Blacklisted device detected "
186 "at devno %04X, subchannel set %x\n",
187 schib->pmcw.dev, schid.ssid);
188 err = -ENODEV;
189 } else
190 err = 0;
191 break;
192 default:
193 err = 0;
194 }
195 if (err)
196 goto out;
197
198 CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports subchannel type %04X\n",
199 schid.ssid, schid.sch_no, schib->pmcw.st);
200 out:
201 return err;
202 }
203
css_alloc_subchannel(struct subchannel_id schid,struct schib * schib)204 struct subchannel *css_alloc_subchannel(struct subchannel_id schid,
205 struct schib *schib)
206 {
207 struct subchannel *sch;
208 int ret;
209
210 ret = css_validate_subchannel(schid, schib);
211 if (ret < 0)
212 return ERR_PTR(ret);
213
214 sch = kzalloc(sizeof(*sch), GFP_KERNEL | GFP_DMA);
215 if (!sch)
216 return ERR_PTR(-ENOMEM);
217
218 sch->schid = schid;
219 sch->schib = *schib;
220 sch->st = schib->pmcw.st;
221
222 ret = css_sch_create_locks(sch);
223 if (ret)
224 goto err;
225
226 INIT_WORK(&sch->todo_work, css_sch_todo);
227 sch->dev.release = &css_subchannel_release;
228 sch->dev.dma_mask = &sch->dma_mask;
229 device_initialize(&sch->dev);
230 /*
231 * The physical addresses for some of the dma structures that can
232 * belong to a subchannel need to fit 31 bit width (e.g. ccw).
233 */
234 ret = dma_set_coherent_mask(&sch->dev, DMA_BIT_MASK(31));
235 if (ret)
236 goto err_lock;
237 /*
238 * But we don't have such restrictions imposed on the stuff that
239 * is handled by the streaming API.
240 */
241 ret = dma_set_mask(&sch->dev, DMA_BIT_MASK(64));
242 if (ret)
243 goto err_lock;
244
245 return sch;
246
247 err_lock:
248 kfree(sch->lock);
249 err:
250 kfree(sch);
251 return ERR_PTR(ret);
252 }
253
css_sch_device_register(struct subchannel * sch)254 static int css_sch_device_register(struct subchannel *sch)
255 {
256 int ret;
257
258 mutex_lock(&sch->reg_mutex);
259 dev_set_name(&sch->dev, "0.%x.%04x", sch->schid.ssid,
260 sch->schid.sch_no);
261 ret = device_add(&sch->dev);
262 mutex_unlock(&sch->reg_mutex);
263 return ret;
264 }
265
266 /**
267 * css_sch_device_unregister - unregister a subchannel
268 * @sch: subchannel to be unregistered
269 */
css_sch_device_unregister(struct subchannel * sch)270 void css_sch_device_unregister(struct subchannel *sch)
271 {
272 mutex_lock(&sch->reg_mutex);
273 if (device_is_registered(&sch->dev))
274 device_unregister(&sch->dev);
275 mutex_unlock(&sch->reg_mutex);
276 }
277 EXPORT_SYMBOL_GPL(css_sch_device_unregister);
278
ssd_from_pmcw(struct chsc_ssd_info * ssd,struct pmcw * pmcw)279 static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
280 {
281 int i;
282 int mask;
283
284 memset(ssd, 0, sizeof(struct chsc_ssd_info));
285 ssd->path_mask = pmcw->pim;
286 for (i = 0; i < 8; i++) {
287 mask = 0x80 >> i;
288 if (pmcw->pim & mask) {
289 chp_id_init(&ssd->chpid[i]);
290 ssd->chpid[i].id = pmcw->chpid[i];
291 }
292 }
293 }
294
ssd_register_chpids(struct chsc_ssd_info * ssd)295 static void ssd_register_chpids(struct chsc_ssd_info *ssd)
296 {
297 int i;
298 int mask;
299
300 for (i = 0; i < 8; i++) {
301 mask = 0x80 >> i;
302 if (ssd->path_mask & mask)
303 chp_new(ssd->chpid[i]);
304 }
305 }
306
css_update_ssd_info(struct subchannel * sch)307 void css_update_ssd_info(struct subchannel *sch)
308 {
309 int ret;
310
311 ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info);
312 if (ret)
313 ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
314
315 ssd_register_chpids(&sch->ssd_info);
316 }
317
type_show(struct device * dev,struct device_attribute * attr,char * buf)318 static ssize_t type_show(struct device *dev, struct device_attribute *attr,
319 char *buf)
320 {
321 struct subchannel *sch = to_subchannel(dev);
322
323 return sprintf(buf, "%01x\n", sch->st);
324 }
325
326 static DEVICE_ATTR_RO(type);
327
modalias_show(struct device * dev,struct device_attribute * attr,char * buf)328 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
329 char *buf)
330 {
331 struct subchannel *sch = to_subchannel(dev);
332
333 return sprintf(buf, "css:t%01X\n", sch->st);
334 }
335
336 static DEVICE_ATTR_RO(modalias);
337
driver_override_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)338 static ssize_t driver_override_store(struct device *dev,
339 struct device_attribute *attr,
340 const char *buf, size_t count)
341 {
342 struct subchannel *sch = to_subchannel(dev);
343 int ret;
344
345 ret = driver_set_override(dev, &sch->driver_override, buf, count);
346 if (ret)
347 return ret;
348
349 return count;
350 }
351
driver_override_show(struct device * dev,struct device_attribute * attr,char * buf)352 static ssize_t driver_override_show(struct device *dev,
353 struct device_attribute *attr, char *buf)
354 {
355 struct subchannel *sch = to_subchannel(dev);
356 ssize_t len;
357
358 device_lock(dev);
359 len = snprintf(buf, PAGE_SIZE, "%s\n", sch->driver_override);
360 device_unlock(dev);
361 return len;
362 }
363 static DEVICE_ATTR_RW(driver_override);
364
365 static struct attribute *subch_attrs[] = {
366 &dev_attr_type.attr,
367 &dev_attr_modalias.attr,
368 &dev_attr_driver_override.attr,
369 NULL,
370 };
371
372 static struct attribute_group subch_attr_group = {
373 .attrs = subch_attrs,
374 };
375
376 static const struct attribute_group *default_subch_attr_groups[] = {
377 &subch_attr_group,
378 NULL,
379 };
380
chpids_show(struct device * dev,struct device_attribute * attr,char * buf)381 static ssize_t chpids_show(struct device *dev,
382 struct device_attribute *attr,
383 char *buf)
384 {
385 struct subchannel *sch = to_subchannel(dev);
386 struct chsc_ssd_info *ssd = &sch->ssd_info;
387 ssize_t ret = 0;
388 int mask;
389 int chp;
390
391 for (chp = 0; chp < 8; chp++) {
392 mask = 0x80 >> chp;
393 if (ssd->path_mask & mask)
394 ret += sprintf(buf + ret, "%02x ", ssd->chpid[chp].id);
395 else
396 ret += sprintf(buf + ret, "00 ");
397 }
398 ret += sprintf(buf + ret, "\n");
399 return ret;
400 }
401 static DEVICE_ATTR_RO(chpids);
402
pimpampom_show(struct device * dev,struct device_attribute * attr,char * buf)403 static ssize_t pimpampom_show(struct device *dev,
404 struct device_attribute *attr,
405 char *buf)
406 {
407 struct subchannel *sch = to_subchannel(dev);
408 struct pmcw *pmcw = &sch->schib.pmcw;
409
410 return sprintf(buf, "%02x %02x %02x\n",
411 pmcw->pim, pmcw->pam, pmcw->pom);
412 }
413 static DEVICE_ATTR_RO(pimpampom);
414
dev_busid_show(struct device * dev,struct device_attribute * attr,char * buf)415 static ssize_t dev_busid_show(struct device *dev,
416 struct device_attribute *attr,
417 char *buf)
418 {
419 struct subchannel *sch = to_subchannel(dev);
420 struct pmcw *pmcw = &sch->schib.pmcw;
421
422 if ((pmcw->st == SUBCHANNEL_TYPE_IO && pmcw->dnv) ||
423 (pmcw->st == SUBCHANNEL_TYPE_MSG && pmcw->w))
424 return sysfs_emit(buf, "0.%x.%04x\n", sch->schid.ssid,
425 pmcw->dev);
426 else
427 return sysfs_emit(buf, "none\n");
428 }
429 static DEVICE_ATTR_RO(dev_busid);
430
431 static struct attribute *io_subchannel_type_attrs[] = {
432 &dev_attr_chpids.attr,
433 &dev_attr_pimpampom.attr,
434 &dev_attr_dev_busid.attr,
435 NULL,
436 };
437 ATTRIBUTE_GROUPS(io_subchannel_type);
438
439 static const struct device_type io_subchannel_type = {
440 .groups = io_subchannel_type_groups,
441 };
442
css_register_subchannel(struct subchannel * sch)443 int css_register_subchannel(struct subchannel *sch)
444 {
445 int ret;
446
447 /* Initialize the subchannel structure */
448 sch->dev.parent = &channel_subsystems[0]->device;
449 sch->dev.bus = &css_bus_type;
450 sch->dev.groups = default_subch_attr_groups;
451
452 if (sch->st == SUBCHANNEL_TYPE_IO)
453 sch->dev.type = &io_subchannel_type;
454
455 css_update_ssd_info(sch);
456 /* make it known to the system */
457 ret = css_sch_device_register(sch);
458 if (ret) {
459 CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n",
460 sch->schid.ssid, sch->schid.sch_no, ret);
461 return ret;
462 }
463 return ret;
464 }
465
css_probe_device(struct subchannel_id schid,struct schib * schib)466 static int css_probe_device(struct subchannel_id schid, struct schib *schib)
467 {
468 struct subchannel *sch;
469 int ret;
470
471 sch = css_alloc_subchannel(schid, schib);
472 if (IS_ERR(sch))
473 return PTR_ERR(sch);
474
475 ret = css_register_subchannel(sch);
476 if (ret)
477 put_device(&sch->dev);
478
479 return ret;
480 }
481
482 static int
check_subchannel(struct device * dev,const void * data)483 check_subchannel(struct device *dev, const void *data)
484 {
485 struct subchannel *sch;
486 struct subchannel_id *schid = (void *)data;
487
488 sch = to_subchannel(dev);
489 return schid_equal(&sch->schid, schid);
490 }
491
492 struct subchannel *
get_subchannel_by_schid(struct subchannel_id schid)493 get_subchannel_by_schid(struct subchannel_id schid)
494 {
495 struct device *dev;
496
497 dev = bus_find_device(&css_bus_type, NULL,
498 &schid, check_subchannel);
499
500 return dev ? to_subchannel(dev) : NULL;
501 }
502
503 /**
504 * css_sch_is_valid() - check if a subchannel is valid
505 * @schib: subchannel information block for the subchannel
506 */
css_sch_is_valid(struct schib * schib)507 int css_sch_is_valid(struct schib *schib)
508 {
509 if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv)
510 return 0;
511 if ((schib->pmcw.st == SUBCHANNEL_TYPE_MSG) && !schib->pmcw.w)
512 return 0;
513 return 1;
514 }
515 EXPORT_SYMBOL_GPL(css_sch_is_valid);
516
css_evaluate_new_subchannel(struct subchannel_id schid,int slow)517 static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
518 {
519 struct schib schib;
520 int ccode;
521
522 if (!slow) {
523 /* Will be done on the slow path. */
524 return -EAGAIN;
525 }
526 /*
527 * The first subchannel that is not-operational (ccode==3)
528 * indicates that there aren't any more devices available.
529 * If stsch gets an exception, it means the current subchannel set
530 * is not valid.
531 */
532 ccode = stsch(schid, &schib);
533 if (ccode)
534 return (ccode == 3) ? -ENXIO : ccode;
535
536 return css_probe_device(schid, &schib);
537 }
538
css_evaluate_known_subchannel(struct subchannel * sch,int slow)539 static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
540 {
541 int ret = 0;
542
543 if (sch->driver) {
544 if (sch->driver->sch_event)
545 ret = sch->driver->sch_event(sch, slow);
546 else
547 dev_dbg(&sch->dev,
548 "Got subchannel machine check but "
549 "no sch_event handler provided.\n");
550 }
551 if (ret != 0 && ret != -EAGAIN) {
552 CIO_MSG_EVENT(2, "eval: sch 0.%x.%04x, rc=%d\n",
553 sch->schid.ssid, sch->schid.sch_no, ret);
554 }
555 return ret;
556 }
557
css_evaluate_subchannel(struct subchannel_id schid,int slow)558 static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
559 {
560 struct subchannel *sch;
561 int ret;
562
563 sch = get_subchannel_by_schid(schid);
564 if (sch) {
565 ret = css_evaluate_known_subchannel(sch, slow);
566 put_device(&sch->dev);
567 } else
568 ret = css_evaluate_new_subchannel(schid, slow);
569 if (ret == -EAGAIN)
570 css_schedule_eval(schid);
571 }
572
573 /**
574 * css_sched_sch_todo - schedule a subchannel operation
575 * @sch: subchannel
576 * @todo: todo
577 *
578 * Schedule the operation identified by @todo to be performed on the slow path
579 * workqueue. Do nothing if another operation with higher priority is already
580 * scheduled. Needs to be called with subchannel lock held.
581 */
css_sched_sch_todo(struct subchannel * sch,enum sch_todo todo)582 void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
583 {
584 CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
585 sch->schid.ssid, sch->schid.sch_no, todo);
586 if (sch->todo >= todo)
587 return;
588 /* Get workqueue ref. */
589 if (!get_device(&sch->dev))
590 return;
591 sch->todo = todo;
592 if (!queue_work(cio_work_q, &sch->todo_work)) {
593 /* Already queued, release workqueue ref. */
594 put_device(&sch->dev);
595 }
596 }
597 EXPORT_SYMBOL_GPL(css_sched_sch_todo);
598
css_sch_todo(struct work_struct * work)599 static void css_sch_todo(struct work_struct *work)
600 {
601 struct subchannel *sch;
602 enum sch_todo todo;
603 int ret;
604
605 sch = container_of(work, struct subchannel, todo_work);
606 /* Find out todo. */
607 spin_lock_irq(sch->lock);
608 todo = sch->todo;
609 CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
610 sch->schid.sch_no, todo);
611 sch->todo = SCH_TODO_NOTHING;
612 spin_unlock_irq(sch->lock);
613 /* Perform todo. */
614 switch (todo) {
615 case SCH_TODO_NOTHING:
616 break;
617 case SCH_TODO_EVAL:
618 ret = css_evaluate_known_subchannel(sch, 1);
619 if (ret == -EAGAIN) {
620 spin_lock_irq(sch->lock);
621 css_sched_sch_todo(sch, todo);
622 spin_unlock_irq(sch->lock);
623 }
624 break;
625 case SCH_TODO_UNREG:
626 css_sch_device_unregister(sch);
627 break;
628 }
629 /* Release workqueue ref. */
630 put_device(&sch->dev);
631 }
632
633 static struct idset *slow_subchannel_set;
634 static DEFINE_SPINLOCK(slow_subchannel_lock);
635 static DECLARE_WAIT_QUEUE_HEAD(css_eval_wq);
636 static atomic_t css_eval_scheduled;
637
slow_subchannel_init(void)638 static int __init slow_subchannel_init(void)
639 {
640 atomic_set(&css_eval_scheduled, 0);
641 slow_subchannel_set = idset_sch_new();
642 if (!slow_subchannel_set) {
643 CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n");
644 return -ENOMEM;
645 }
646 return 0;
647 }
648
slow_eval_known_fn(struct subchannel * sch,void * data)649 static int slow_eval_known_fn(struct subchannel *sch, void *data)
650 {
651 int eval;
652 int rc;
653
654 spin_lock_irq(&slow_subchannel_lock);
655 eval = idset_sch_contains(slow_subchannel_set, sch->schid);
656 idset_sch_del(slow_subchannel_set, sch->schid);
657 spin_unlock_irq(&slow_subchannel_lock);
658 if (eval) {
659 rc = css_evaluate_known_subchannel(sch, 1);
660 if (rc == -EAGAIN)
661 css_schedule_eval(sch->schid);
662 /*
663 * The loop might take long time for platforms with lots of
664 * known devices. Allow scheduling here.
665 */
666 cond_resched();
667 }
668 return 0;
669 }
670
slow_eval_unknown_fn(struct subchannel_id schid,void * data)671 static int slow_eval_unknown_fn(struct subchannel_id schid, void *data)
672 {
673 int eval;
674 int rc = 0;
675
676 spin_lock_irq(&slow_subchannel_lock);
677 eval = idset_sch_contains(slow_subchannel_set, schid);
678 idset_sch_del(slow_subchannel_set, schid);
679 spin_unlock_irq(&slow_subchannel_lock);
680 if (eval) {
681 rc = css_evaluate_new_subchannel(schid, 1);
682 switch (rc) {
683 case -EAGAIN:
684 css_schedule_eval(schid);
685 rc = 0;
686 break;
687 case -ENXIO:
688 case -ENOMEM:
689 case -EIO:
690 /* These should abort looping */
691 spin_lock_irq(&slow_subchannel_lock);
692 idset_sch_del_subseq(slow_subchannel_set, schid);
693 spin_unlock_irq(&slow_subchannel_lock);
694 break;
695 default:
696 rc = 0;
697 }
698 /* Allow scheduling here since the containing loop might
699 * take a while. */
700 cond_resched();
701 }
702 return rc;
703 }
704
css_slow_path_func(struct work_struct * unused)705 static void css_slow_path_func(struct work_struct *unused)
706 {
707 unsigned long flags;
708
709 CIO_TRACE_EVENT(4, "slowpath");
710 for_each_subchannel_staged(slow_eval_known_fn, slow_eval_unknown_fn,
711 NULL);
712 spin_lock_irqsave(&slow_subchannel_lock, flags);
713 if (idset_is_empty(slow_subchannel_set)) {
714 atomic_set(&css_eval_scheduled, 0);
715 wake_up(&css_eval_wq);
716 }
717 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
718 }
719
720 static DECLARE_DELAYED_WORK(slow_path_work, css_slow_path_func);
721 struct workqueue_struct *cio_work_q;
722
css_schedule_eval(struct subchannel_id schid)723 void css_schedule_eval(struct subchannel_id schid)
724 {
725 unsigned long flags;
726
727 spin_lock_irqsave(&slow_subchannel_lock, flags);
728 idset_sch_add(slow_subchannel_set, schid);
729 atomic_set(&css_eval_scheduled, 1);
730 queue_delayed_work(cio_work_q, &slow_path_work, 0);
731 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
732 }
733
css_schedule_eval_all(void)734 void css_schedule_eval_all(void)
735 {
736 unsigned long flags;
737
738 spin_lock_irqsave(&slow_subchannel_lock, flags);
739 idset_fill(slow_subchannel_set);
740 atomic_set(&css_eval_scheduled, 1);
741 queue_delayed_work(cio_work_q, &slow_path_work, 0);
742 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
743 }
744
__unset_validpath(struct device * dev,void * data)745 static int __unset_validpath(struct device *dev, void *data)
746 {
747 struct idset *set = data;
748 struct subchannel *sch = to_subchannel(dev);
749 struct pmcw *pmcw = &sch->schib.pmcw;
750
751 /* Here we want to make sure that we are considering only those subchannels
752 * which do not have an operational device attached to it. This can be found
753 * with the help of PAM and POM values of pmcw. OPM provides the information
754 * about any path which is currently vary-off, so that we should not consider.
755 */
756 if (sch->st == SUBCHANNEL_TYPE_IO &&
757 (sch->opm & pmcw->pam & pmcw->pom))
758 idset_sch_del(set, sch->schid);
759
760 return 0;
761 }
762
__unset_online(struct device * dev,void * data)763 static int __unset_online(struct device *dev, void *data)
764 {
765 struct idset *set = data;
766 struct subchannel *sch = to_subchannel(dev);
767
768 if (sch->st == SUBCHANNEL_TYPE_IO && sch->config.ena)
769 idset_sch_del(set, sch->schid);
770
771 return 0;
772 }
773
css_schedule_eval_cond(enum css_eval_cond cond,unsigned long delay)774 void css_schedule_eval_cond(enum css_eval_cond cond, unsigned long delay)
775 {
776 unsigned long flags;
777 struct idset *set;
778
779 /* Find unregistered subchannels. */
780 set = idset_sch_new();
781 if (!set) {
782 /* Fallback. */
783 css_schedule_eval_all();
784 return;
785 }
786 idset_fill(set);
787 switch (cond) {
788 case CSS_EVAL_NO_PATH:
789 bus_for_each_dev(&css_bus_type, NULL, set, __unset_validpath);
790 break;
791 case CSS_EVAL_NOT_ONLINE:
792 bus_for_each_dev(&css_bus_type, NULL, set, __unset_online);
793 break;
794 default:
795 break;
796 }
797
798 /* Apply to slow_subchannel_set. */
799 spin_lock_irqsave(&slow_subchannel_lock, flags);
800 idset_add_set(slow_subchannel_set, set);
801 atomic_set(&css_eval_scheduled, 1);
802 queue_delayed_work(cio_work_q, &slow_path_work, delay);
803 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
804 idset_free(set);
805 }
806
css_wait_for_slow_path(void)807 void css_wait_for_slow_path(void)
808 {
809 flush_workqueue(cio_work_q);
810 }
811
812 /* Schedule reprobing of all subchannels with no valid operational path. */
css_schedule_reprobe(void)813 void css_schedule_reprobe(void)
814 {
815 /* Schedule with a delay to allow merging of subsequent calls. */
816 css_schedule_eval_cond(CSS_EVAL_NO_PATH, 1 * HZ);
817 }
818 EXPORT_SYMBOL_GPL(css_schedule_reprobe);
819
820 /*
821 * Called from the machine check handler for subchannel report words.
822 */
css_process_crw(struct crw * crw0,struct crw * crw1,int overflow)823 static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
824 {
825 struct subchannel_id mchk_schid;
826 struct subchannel *sch;
827
828 if (overflow) {
829 css_schedule_eval_all();
830 return;
831 }
832 CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, "
833 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
834 crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
835 crw0->erc, crw0->rsid);
836 if (crw1)
837 CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, "
838 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
839 crw1->slct, crw1->oflw, crw1->chn, crw1->rsc,
840 crw1->anc, crw1->erc, crw1->rsid);
841 init_subchannel_id(&mchk_schid);
842 mchk_schid.sch_no = crw0->rsid;
843 if (crw1)
844 mchk_schid.ssid = (crw1->rsid >> 4) & 3;
845
846 if (crw0->erc == CRW_ERC_PMOD) {
847 sch = get_subchannel_by_schid(mchk_schid);
848 if (sch) {
849 css_update_ssd_info(sch);
850 put_device(&sch->dev);
851 }
852 }
853 /*
854 * Since we are always presented with IPI in the CRW, we have to
855 * use stsch() to find out if the subchannel in question has come
856 * or gone.
857 */
858 css_evaluate_subchannel(mchk_schid, 0);
859 }
860
861 static void __init
css_generate_pgid(struct channel_subsystem * css,u32 tod_high)862 css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
863 {
864 struct cpuid cpu_id;
865
866 if (css_general_characteristics.mcss) {
867 css->global_pgid.pgid_high.ext_cssid.version = 0x80;
868 css->global_pgid.pgid_high.ext_cssid.cssid =
869 css->id_valid ? css->cssid : 0;
870 } else {
871 css->global_pgid.pgid_high.cpu_addr = stap();
872 }
873 get_cpu_id(&cpu_id);
874 css->global_pgid.cpu_id = cpu_id.ident;
875 css->global_pgid.cpu_model = cpu_id.machine;
876 css->global_pgid.tod_high = tod_high;
877 }
878
channel_subsystem_release(struct device * dev)879 static void channel_subsystem_release(struct device *dev)
880 {
881 struct channel_subsystem *css = to_css(dev);
882
883 mutex_destroy(&css->mutex);
884 kfree(css);
885 }
886
real_cssid_show(struct device * dev,struct device_attribute * a,char * buf)887 static ssize_t real_cssid_show(struct device *dev, struct device_attribute *a,
888 char *buf)
889 {
890 struct channel_subsystem *css = to_css(dev);
891
892 if (!css->id_valid)
893 return -EINVAL;
894
895 return sprintf(buf, "%x\n", css->cssid);
896 }
897 static DEVICE_ATTR_RO(real_cssid);
898
rescan_store(struct device * dev,struct device_attribute * a,const char * buf,size_t count)899 static ssize_t rescan_store(struct device *dev, struct device_attribute *a,
900 const char *buf, size_t count)
901 {
902 CIO_TRACE_EVENT(4, "usr-rescan");
903
904 css_schedule_eval_all();
905 css_complete_work();
906
907 return count;
908 }
909 static DEVICE_ATTR_WO(rescan);
910
cm_enable_show(struct device * dev,struct device_attribute * a,char * buf)911 static ssize_t cm_enable_show(struct device *dev, struct device_attribute *a,
912 char *buf)
913 {
914 struct channel_subsystem *css = to_css(dev);
915 int ret;
916
917 mutex_lock(&css->mutex);
918 ret = sprintf(buf, "%x\n", css->cm_enabled);
919 mutex_unlock(&css->mutex);
920 return ret;
921 }
922
cm_enable_store(struct device * dev,struct device_attribute * a,const char * buf,size_t count)923 static ssize_t cm_enable_store(struct device *dev, struct device_attribute *a,
924 const char *buf, size_t count)
925 {
926 struct channel_subsystem *css = to_css(dev);
927 unsigned long val;
928 int ret;
929
930 ret = kstrtoul(buf, 16, &val);
931 if (ret)
932 return ret;
933 mutex_lock(&css->mutex);
934 switch (val) {
935 case 0:
936 ret = css->cm_enabled ? chsc_secm(css, 0) : 0;
937 break;
938 case 1:
939 ret = css->cm_enabled ? 0 : chsc_secm(css, 1);
940 break;
941 default:
942 ret = -EINVAL;
943 }
944 mutex_unlock(&css->mutex);
945 return ret < 0 ? ret : count;
946 }
947 static DEVICE_ATTR_RW(cm_enable);
948
cm_enable_mode(struct kobject * kobj,struct attribute * attr,int index)949 static umode_t cm_enable_mode(struct kobject *kobj, struct attribute *attr,
950 int index)
951 {
952 return css_chsc_characteristics.secm ? attr->mode : 0;
953 }
954
955 static struct attribute *cssdev_attrs[] = {
956 &dev_attr_real_cssid.attr,
957 &dev_attr_rescan.attr,
958 NULL,
959 };
960
961 static struct attribute_group cssdev_attr_group = {
962 .attrs = cssdev_attrs,
963 };
964
965 static struct attribute *cssdev_cm_attrs[] = {
966 &dev_attr_cm_enable.attr,
967 NULL,
968 };
969
970 static struct attribute_group cssdev_cm_attr_group = {
971 .attrs = cssdev_cm_attrs,
972 .is_visible = cm_enable_mode,
973 };
974
975 static const struct attribute_group *cssdev_attr_groups[] = {
976 &cssdev_attr_group,
977 &cssdev_cm_attr_group,
978 NULL,
979 };
980
setup_css(int nr)981 static int __init setup_css(int nr)
982 {
983 struct channel_subsystem *css;
984 int ret;
985
986 css = kzalloc(sizeof(*css), GFP_KERNEL);
987 if (!css)
988 return -ENOMEM;
989
990 channel_subsystems[nr] = css;
991 dev_set_name(&css->device, "css%x", nr);
992 css->device.groups = cssdev_attr_groups;
993 css->device.release = channel_subsystem_release;
994 /*
995 * We currently allocate notifier bits with this (using
996 * css->device as the device argument with the DMA API)
997 * and are fine with 64 bit addresses.
998 */
999 ret = dma_coerce_mask_and_coherent(&css->device, DMA_BIT_MASK(64));
1000 if (ret) {
1001 kfree(css);
1002 goto out_err;
1003 }
1004
1005 mutex_init(&css->mutex);
1006 ret = chsc_get_cssid_iid(nr, &css->cssid, &css->iid);
1007 if (!ret) {
1008 css->id_valid = true;
1009 pr_info("Partition identifier %01x.%01x\n", css->cssid,
1010 css->iid);
1011 }
1012 css_generate_pgid(css, (u32) (get_tod_clock() >> 32));
1013
1014 ret = device_register(&css->device);
1015 if (ret) {
1016 put_device(&css->device);
1017 goto out_err;
1018 }
1019
1020 css->pseudo_subchannel = kzalloc(sizeof(*css->pseudo_subchannel),
1021 GFP_KERNEL);
1022 if (!css->pseudo_subchannel) {
1023 device_unregister(&css->device);
1024 ret = -ENOMEM;
1025 goto out_err;
1026 }
1027
1028 css->pseudo_subchannel->dev.parent = &css->device;
1029 css->pseudo_subchannel->dev.release = css_subchannel_release;
1030 mutex_init(&css->pseudo_subchannel->reg_mutex);
1031 ret = css_sch_create_locks(css->pseudo_subchannel);
1032 if (ret) {
1033 kfree(css->pseudo_subchannel);
1034 device_unregister(&css->device);
1035 goto out_err;
1036 }
1037
1038 dev_set_name(&css->pseudo_subchannel->dev, "defunct");
1039 ret = device_register(&css->pseudo_subchannel->dev);
1040 if (ret) {
1041 put_device(&css->pseudo_subchannel->dev);
1042 device_unregister(&css->device);
1043 goto out_err;
1044 }
1045
1046 return ret;
1047 out_err:
1048 channel_subsystems[nr] = NULL;
1049 return ret;
1050 }
1051
css_reboot_event(struct notifier_block * this,unsigned long event,void * ptr)1052 static int css_reboot_event(struct notifier_block *this,
1053 unsigned long event,
1054 void *ptr)
1055 {
1056 struct channel_subsystem *css;
1057 int ret;
1058
1059 ret = NOTIFY_DONE;
1060 for_each_css(css) {
1061 mutex_lock(&css->mutex);
1062 if (css->cm_enabled)
1063 if (chsc_secm(css, 0))
1064 ret = NOTIFY_BAD;
1065 mutex_unlock(&css->mutex);
1066 }
1067
1068 return ret;
1069 }
1070
1071 static struct notifier_block css_reboot_notifier = {
1072 .notifier_call = css_reboot_event,
1073 };
1074
1075 #define CIO_DMA_GFP (GFP_KERNEL | __GFP_ZERO)
1076 static struct gen_pool *cio_dma_pool;
1077
1078 /* Currently cio supports only a single css */
cio_get_dma_css_dev(void)1079 struct device *cio_get_dma_css_dev(void)
1080 {
1081 return &channel_subsystems[0]->device;
1082 }
1083
cio_gp_dma_create(struct device * dma_dev,int nr_pages)1084 struct gen_pool *cio_gp_dma_create(struct device *dma_dev, int nr_pages)
1085 {
1086 struct gen_pool *gp_dma;
1087 void *cpu_addr;
1088 dma_addr_t dma_addr;
1089 int i;
1090
1091 gp_dma = gen_pool_create(3, -1);
1092 if (!gp_dma)
1093 return NULL;
1094 for (i = 0; i < nr_pages; ++i) {
1095 cpu_addr = dma_alloc_coherent(dma_dev, PAGE_SIZE, &dma_addr,
1096 CIO_DMA_GFP);
1097 if (!cpu_addr)
1098 return gp_dma;
1099 gen_pool_add_virt(gp_dma, (unsigned long) cpu_addr,
1100 dma_addr, PAGE_SIZE, -1);
1101 }
1102 return gp_dma;
1103 }
1104
__gp_dma_free_dma(struct gen_pool * pool,struct gen_pool_chunk * chunk,void * data)1105 static void __gp_dma_free_dma(struct gen_pool *pool,
1106 struct gen_pool_chunk *chunk, void *data)
1107 {
1108 size_t chunk_size = chunk->end_addr - chunk->start_addr + 1;
1109
1110 dma_free_coherent((struct device *) data, chunk_size,
1111 (void *) chunk->start_addr,
1112 (dma_addr_t) chunk->phys_addr);
1113 }
1114
cio_gp_dma_destroy(struct gen_pool * gp_dma,struct device * dma_dev)1115 void cio_gp_dma_destroy(struct gen_pool *gp_dma, struct device *dma_dev)
1116 {
1117 if (!gp_dma)
1118 return;
1119 /* this is quite ugly but no better idea */
1120 gen_pool_for_each_chunk(gp_dma, __gp_dma_free_dma, dma_dev);
1121 gen_pool_destroy(gp_dma);
1122 }
1123
cio_dma_pool_init(void)1124 static int cio_dma_pool_init(void)
1125 {
1126 /* No need to free up the resources: compiled in */
1127 cio_dma_pool = cio_gp_dma_create(cio_get_dma_css_dev(), 1);
1128 if (!cio_dma_pool)
1129 return -ENOMEM;
1130 return 0;
1131 }
1132
cio_gp_dma_zalloc(struct gen_pool * gp_dma,struct device * dma_dev,size_t size)1133 void *cio_gp_dma_zalloc(struct gen_pool *gp_dma, struct device *dma_dev,
1134 size_t size)
1135 {
1136 dma_addr_t dma_addr;
1137 unsigned long addr;
1138 size_t chunk_size;
1139
1140 if (!gp_dma)
1141 return NULL;
1142 addr = gen_pool_alloc(gp_dma, size);
1143 while (!addr) {
1144 chunk_size = round_up(size, PAGE_SIZE);
1145 addr = (unsigned long) dma_alloc_coherent(dma_dev,
1146 chunk_size, &dma_addr, CIO_DMA_GFP);
1147 if (!addr)
1148 return NULL;
1149 gen_pool_add_virt(gp_dma, addr, dma_addr, chunk_size, -1);
1150 addr = gen_pool_alloc(gp_dma, size);
1151 }
1152 return (void *) addr;
1153 }
1154
cio_gp_dma_free(struct gen_pool * gp_dma,void * cpu_addr,size_t size)1155 void cio_gp_dma_free(struct gen_pool *gp_dma, void *cpu_addr, size_t size)
1156 {
1157 if (!cpu_addr)
1158 return;
1159 memset(cpu_addr, 0, size);
1160 gen_pool_free(gp_dma, (unsigned long) cpu_addr, size);
1161 }
1162
1163 /*
1164 * Allocate dma memory from the css global pool. Intended for memory not
1165 * specific to any single device within the css. The allocated memory
1166 * is not guaranteed to be 31-bit addressable.
1167 *
1168 * Caution: Not suitable for early stuff like console.
1169 */
cio_dma_zalloc(size_t size)1170 void *cio_dma_zalloc(size_t size)
1171 {
1172 return cio_gp_dma_zalloc(cio_dma_pool, cio_get_dma_css_dev(), size);
1173 }
1174
cio_dma_free(void * cpu_addr,size_t size)1175 void cio_dma_free(void *cpu_addr, size_t size)
1176 {
1177 cio_gp_dma_free(cio_dma_pool, cpu_addr, size);
1178 }
1179
1180 /*
1181 * Now that the driver core is running, we can setup our channel subsystem.
1182 * The struct subchannel's are created during probing.
1183 */
css_bus_init(void)1184 static int __init css_bus_init(void)
1185 {
1186 int ret, i;
1187
1188 ret = chsc_init();
1189 if (ret)
1190 return ret;
1191
1192 chsc_determine_css_characteristics();
1193 /* Try to enable MSS. */
1194 ret = chsc_enable_facility(CHSC_SDA_OC_MSS);
1195 if (ret)
1196 max_ssid = 0;
1197 else /* Success. */
1198 max_ssid = __MAX_SSID;
1199
1200 ret = slow_subchannel_init();
1201 if (ret)
1202 goto out;
1203
1204 ret = crw_register_handler(CRW_RSC_SCH, css_process_crw);
1205 if (ret)
1206 goto out;
1207
1208 if ((ret = bus_register(&css_bus_type)))
1209 goto out;
1210
1211 /* Setup css structure. */
1212 for (i = 0; i <= MAX_CSS_IDX; i++) {
1213 ret = setup_css(i);
1214 if (ret)
1215 goto out_unregister;
1216 }
1217 ret = register_reboot_notifier(&css_reboot_notifier);
1218 if (ret)
1219 goto out_unregister;
1220 ret = cio_dma_pool_init();
1221 if (ret)
1222 goto out_unregister_rn;
1223 airq_init();
1224 css_init_done = 1;
1225
1226 /* Enable default isc for I/O subchannels. */
1227 isc_register(IO_SCH_ISC);
1228
1229 return 0;
1230 out_unregister_rn:
1231 unregister_reboot_notifier(&css_reboot_notifier);
1232 out_unregister:
1233 while (i-- > 0) {
1234 struct channel_subsystem *css = channel_subsystems[i];
1235 device_unregister(&css->pseudo_subchannel->dev);
1236 device_unregister(&css->device);
1237 }
1238 bus_unregister(&css_bus_type);
1239 out:
1240 crw_unregister_handler(CRW_RSC_SCH);
1241 idset_free(slow_subchannel_set);
1242 chsc_init_cleanup();
1243 pr_alert("The CSS device driver initialization failed with "
1244 "errno=%d\n", ret);
1245 return ret;
1246 }
1247
css_bus_cleanup(void)1248 static void __init css_bus_cleanup(void)
1249 {
1250 struct channel_subsystem *css;
1251
1252 for_each_css(css) {
1253 device_unregister(&css->pseudo_subchannel->dev);
1254 device_unregister(&css->device);
1255 }
1256 bus_unregister(&css_bus_type);
1257 crw_unregister_handler(CRW_RSC_SCH);
1258 idset_free(slow_subchannel_set);
1259 chsc_init_cleanup();
1260 isc_unregister(IO_SCH_ISC);
1261 }
1262
channel_subsystem_init(void)1263 static int __init channel_subsystem_init(void)
1264 {
1265 int ret;
1266
1267 ret = css_bus_init();
1268 if (ret)
1269 return ret;
1270 cio_work_q = create_singlethread_workqueue("cio");
1271 if (!cio_work_q) {
1272 ret = -ENOMEM;
1273 goto out_bus;
1274 }
1275 ret = io_subchannel_init();
1276 if (ret)
1277 goto out_wq;
1278
1279 /* Register subchannels which are already in use. */
1280 cio_register_early_subchannels();
1281 /* Start initial subchannel evaluation. */
1282 css_schedule_eval_all();
1283
1284 return ret;
1285 out_wq:
1286 destroy_workqueue(cio_work_q);
1287 out_bus:
1288 css_bus_cleanup();
1289 return ret;
1290 }
1291 subsys_initcall(channel_subsystem_init);
1292
css_settle(struct device_driver * drv,void * unused)1293 static int css_settle(struct device_driver *drv, void *unused)
1294 {
1295 struct css_driver *cssdrv = to_cssdriver(drv);
1296
1297 if (cssdrv->settle)
1298 return cssdrv->settle();
1299 return 0;
1300 }
1301
css_complete_work(void)1302 int css_complete_work(void)
1303 {
1304 int ret;
1305
1306 /* Wait for the evaluation of subchannels to finish. */
1307 ret = wait_event_interruptible(css_eval_wq,
1308 atomic_read(&css_eval_scheduled) == 0);
1309 if (ret)
1310 return -EINTR;
1311 flush_workqueue(cio_work_q);
1312 /* Wait for the subchannel type specific initialization to finish */
1313 return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle);
1314 }
1315
1316
1317 /*
1318 * Wait for the initialization of devices to finish, to make sure we are
1319 * done with our setup if the search for the root device starts.
1320 */
channel_subsystem_init_sync(void)1321 static int __init channel_subsystem_init_sync(void)
1322 {
1323 css_complete_work();
1324 return 0;
1325 }
1326 subsys_initcall_sync(channel_subsystem_init_sync);
1327
1328 #ifdef CONFIG_PROC_FS
cio_settle_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)1329 static ssize_t cio_settle_write(struct file *file, const char __user *buf,
1330 size_t count, loff_t *ppos)
1331 {
1332 int ret;
1333
1334 /* Handle pending CRW's. */
1335 crw_wait_for_channel_report();
1336 ret = css_complete_work();
1337
1338 return ret ? ret : count;
1339 }
1340
1341 static const struct proc_ops cio_settle_proc_ops = {
1342 .proc_open = nonseekable_open,
1343 .proc_write = cio_settle_write,
1344 .proc_lseek = no_llseek,
1345 };
1346
cio_settle_init(void)1347 static int __init cio_settle_init(void)
1348 {
1349 struct proc_dir_entry *entry;
1350
1351 entry = proc_create("cio_settle", S_IWUSR, NULL, &cio_settle_proc_ops);
1352 if (!entry)
1353 return -ENOMEM;
1354 return 0;
1355 }
1356 device_initcall(cio_settle_init);
1357 #endif /*CONFIG_PROC_FS*/
1358
sch_is_pseudo_sch(struct subchannel * sch)1359 int sch_is_pseudo_sch(struct subchannel *sch)
1360 {
1361 if (!sch->dev.parent)
1362 return 0;
1363 return sch == to_css(sch->dev.parent)->pseudo_subchannel;
1364 }
1365
css_bus_match(struct device * dev,struct device_driver * drv)1366 static int css_bus_match(struct device *dev, struct device_driver *drv)
1367 {
1368 struct subchannel *sch = to_subchannel(dev);
1369 struct css_driver *driver = to_cssdriver(drv);
1370 struct css_device_id *id;
1371
1372 /* When driver_override is set, only bind to the matching driver */
1373 if (sch->driver_override && strcmp(sch->driver_override, drv->name))
1374 return 0;
1375
1376 for (id = driver->subchannel_type; id->match_flags; id++) {
1377 if (sch->st == id->type)
1378 return 1;
1379 }
1380
1381 return 0;
1382 }
1383
css_probe(struct device * dev)1384 static int css_probe(struct device *dev)
1385 {
1386 struct subchannel *sch;
1387 int ret;
1388
1389 sch = to_subchannel(dev);
1390 sch->driver = to_cssdriver(dev->driver);
1391 ret = sch->driver->probe ? sch->driver->probe(sch) : 0;
1392 if (ret)
1393 sch->driver = NULL;
1394 return ret;
1395 }
1396
css_remove(struct device * dev)1397 static void css_remove(struct device *dev)
1398 {
1399 struct subchannel *sch;
1400
1401 sch = to_subchannel(dev);
1402 if (sch->driver->remove)
1403 sch->driver->remove(sch);
1404 sch->driver = NULL;
1405 }
1406
css_shutdown(struct device * dev)1407 static void css_shutdown(struct device *dev)
1408 {
1409 struct subchannel *sch;
1410
1411 sch = to_subchannel(dev);
1412 if (sch->driver && sch->driver->shutdown)
1413 sch->driver->shutdown(sch);
1414 }
1415
css_uevent(const struct device * dev,struct kobj_uevent_env * env)1416 static int css_uevent(const struct device *dev, struct kobj_uevent_env *env)
1417 {
1418 const struct subchannel *sch = to_subchannel(dev);
1419 int ret;
1420
1421 ret = add_uevent_var(env, "ST=%01X", sch->st);
1422 if (ret)
1423 return ret;
1424 ret = add_uevent_var(env, "MODALIAS=css:t%01X", sch->st);
1425 return ret;
1426 }
1427
1428 static struct bus_type css_bus_type = {
1429 .name = "css",
1430 .match = css_bus_match,
1431 .probe = css_probe,
1432 .remove = css_remove,
1433 .shutdown = css_shutdown,
1434 .uevent = css_uevent,
1435 };
1436
1437 /**
1438 * css_driver_register - register a css driver
1439 * @cdrv: css driver to register
1440 *
1441 * This is mainly a wrapper around driver_register that sets name
1442 * and bus_type in the embedded struct device_driver correctly.
1443 */
css_driver_register(struct css_driver * cdrv)1444 int css_driver_register(struct css_driver *cdrv)
1445 {
1446 cdrv->drv.bus = &css_bus_type;
1447 return driver_register(&cdrv->drv);
1448 }
1449 EXPORT_SYMBOL_GPL(css_driver_register);
1450
1451 /**
1452 * css_driver_unregister - unregister a css driver
1453 * @cdrv: css driver to unregister
1454 *
1455 * This is a wrapper around driver_unregister.
1456 */
css_driver_unregister(struct css_driver * cdrv)1457 void css_driver_unregister(struct css_driver *cdrv)
1458 {
1459 driver_unregister(&cdrv->drv);
1460 }
1461 EXPORT_SYMBOL_GPL(css_driver_unregister);
1462