xref: /openbmc/linux/drivers/s390/cio/chsc_sch.c (revision dd21bfa4)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Driver for s390 chsc subchannels
4  *
5  * Copyright IBM Corp. 2008, 2011
6  *
7  * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
8  *
9  */
10 
11 #include <linux/slab.h>
12 #include <linux/compat.h>
13 #include <linux/device.h>
14 #include <linux/module.h>
15 #include <linux/uaccess.h>
16 #include <linux/miscdevice.h>
17 #include <linux/kernel_stat.h>
18 
19 #include <asm/cio.h>
20 #include <asm/chsc.h>
21 #include <asm/isc.h>
22 
23 #include "cio.h"
24 #include "cio_debug.h"
25 #include "css.h"
26 #include "chsc_sch.h"
27 #include "ioasm.h"
28 
29 static debug_info_t *chsc_debug_msg_id;
30 static debug_info_t *chsc_debug_log_id;
31 
32 static struct chsc_request *on_close_request;
33 static struct chsc_async_area *on_close_chsc_area;
34 static DEFINE_MUTEX(on_close_mutex);
35 
36 #define CHSC_MSG(imp, args...) do {					\
37 		debug_sprintf_event(chsc_debug_msg_id, imp , ##args);	\
38 	} while (0)
39 
40 #define CHSC_LOG(imp, txt) do {					\
41 		debug_text_event(chsc_debug_log_id, imp , txt);	\
42 	} while (0)
43 
44 static void CHSC_LOG_HEX(int level, void *data, int length)
45 {
46 	debug_event(chsc_debug_log_id, level, data, length);
47 }
48 
49 MODULE_AUTHOR("IBM Corporation");
50 MODULE_DESCRIPTION("driver for s390 chsc subchannels");
51 MODULE_LICENSE("GPL");
52 
53 static void chsc_subchannel_irq(struct subchannel *sch)
54 {
55 	struct chsc_private *private = dev_get_drvdata(&sch->dev);
56 	struct chsc_request *request = private->request;
57 	struct irb *irb = this_cpu_ptr(&cio_irb);
58 
59 	CHSC_LOG(4, "irb");
60 	CHSC_LOG_HEX(4, irb, sizeof(*irb));
61 	inc_irq_stat(IRQIO_CSC);
62 
63 	/* Copy irb to provided request and set done. */
64 	if (!request) {
65 		CHSC_MSG(0, "Interrupt on sch 0.%x.%04x with no request\n",
66 			 sch->schid.ssid, sch->schid.sch_no);
67 		return;
68 	}
69 	private->request = NULL;
70 	memcpy(&request->irb, irb, sizeof(*irb));
71 	cio_update_schib(sch);
72 	complete(&request->completion);
73 	put_device(&sch->dev);
74 }
75 
76 static int chsc_subchannel_probe(struct subchannel *sch)
77 {
78 	struct chsc_private *private;
79 	int ret;
80 
81 	CHSC_MSG(6, "Detected chsc subchannel 0.%x.%04x\n",
82 		 sch->schid.ssid, sch->schid.sch_no);
83 	sch->isc = CHSC_SCH_ISC;
84 	private = kzalloc(sizeof(*private), GFP_KERNEL);
85 	if (!private)
86 		return -ENOMEM;
87 	dev_set_drvdata(&sch->dev, private);
88 	ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
89 	if (ret) {
90 		CHSC_MSG(0, "Failed to enable 0.%x.%04x: %d\n",
91 			 sch->schid.ssid, sch->schid.sch_no, ret);
92 		dev_set_drvdata(&sch->dev, NULL);
93 		kfree(private);
94 	}
95 	return ret;
96 }
97 
98 static void chsc_subchannel_remove(struct subchannel *sch)
99 {
100 	struct chsc_private *private;
101 
102 	cio_disable_subchannel(sch);
103 	private = dev_get_drvdata(&sch->dev);
104 	dev_set_drvdata(&sch->dev, NULL);
105 	if (private->request) {
106 		complete(&private->request->completion);
107 		put_device(&sch->dev);
108 	}
109 	kfree(private);
110 }
111 
112 static void chsc_subchannel_shutdown(struct subchannel *sch)
113 {
114 	cio_disable_subchannel(sch);
115 }
116 
117 static struct css_device_id chsc_subchannel_ids[] = {
118 	{ .match_flags = 0x1, .type =SUBCHANNEL_TYPE_CHSC, },
119 	{ /* end of list */ },
120 };
121 MODULE_DEVICE_TABLE(css, chsc_subchannel_ids);
122 
123 static struct css_driver chsc_subchannel_driver = {
124 	.drv = {
125 		.owner = THIS_MODULE,
126 		.name = "chsc_subchannel",
127 	},
128 	.subchannel_type = chsc_subchannel_ids,
129 	.irq = chsc_subchannel_irq,
130 	.probe = chsc_subchannel_probe,
131 	.remove = chsc_subchannel_remove,
132 	.shutdown = chsc_subchannel_shutdown,
133 };
134 
135 static int __init chsc_init_dbfs(void)
136 {
137 	chsc_debug_msg_id = debug_register("chsc_msg", 8, 1, 4 * sizeof(long));
138 	if (!chsc_debug_msg_id)
139 		goto out;
140 	debug_register_view(chsc_debug_msg_id, &debug_sprintf_view);
141 	debug_set_level(chsc_debug_msg_id, 2);
142 	chsc_debug_log_id = debug_register("chsc_log", 16, 1, 16);
143 	if (!chsc_debug_log_id)
144 		goto out;
145 	debug_register_view(chsc_debug_log_id, &debug_hex_ascii_view);
146 	debug_set_level(chsc_debug_log_id, 2);
147 	return 0;
148 out:
149 	debug_unregister(chsc_debug_msg_id);
150 	return -ENOMEM;
151 }
152 
153 static void chsc_remove_dbfs(void)
154 {
155 	debug_unregister(chsc_debug_log_id);
156 	debug_unregister(chsc_debug_msg_id);
157 }
158 
159 static int __init chsc_init_sch_driver(void)
160 {
161 	return css_driver_register(&chsc_subchannel_driver);
162 }
163 
164 static void chsc_cleanup_sch_driver(void)
165 {
166 	css_driver_unregister(&chsc_subchannel_driver);
167 }
168 
169 static DEFINE_SPINLOCK(chsc_lock);
170 
171 static int chsc_subchannel_match_next_free(struct device *dev, const void *data)
172 {
173 	struct subchannel *sch = to_subchannel(dev);
174 
175 	return sch->schib.pmcw.ena && !scsw_fctl(&sch->schib.scsw);
176 }
177 
178 static struct subchannel *chsc_get_next_subchannel(struct subchannel *sch)
179 {
180 	struct device *dev;
181 
182 	dev = driver_find_device(&chsc_subchannel_driver.drv,
183 				 sch ? &sch->dev : NULL, NULL,
184 				 chsc_subchannel_match_next_free);
185 	return dev ? to_subchannel(dev) : NULL;
186 }
187 
188 /**
189  * chsc_async() - try to start a chsc request asynchronously
190  * @chsc_area: request to be started
191  * @request: request structure to associate
192  *
193  * Tries to start a chsc request on one of the existing chsc subchannels.
194  * Returns:
195  *  %0 if the request was performed synchronously
196  *  %-EINPROGRESS if the request was successfully started
197  *  %-EBUSY if all chsc subchannels are busy
198  *  %-ENODEV if no chsc subchannels are available
199  * Context:
200  *  interrupts disabled, chsc_lock held
201  */
202 static int chsc_async(struct chsc_async_area *chsc_area,
203 		      struct chsc_request *request)
204 {
205 	int cc;
206 	struct chsc_private *private;
207 	struct subchannel *sch = NULL;
208 	int ret = -ENODEV;
209 	char dbf[10];
210 
211 	chsc_area->header.key = PAGE_DEFAULT_KEY >> 4;
212 	while ((sch = chsc_get_next_subchannel(sch))) {
213 		spin_lock(sch->lock);
214 		private = dev_get_drvdata(&sch->dev);
215 		if (private->request) {
216 			spin_unlock(sch->lock);
217 			ret = -EBUSY;
218 			continue;
219 		}
220 		chsc_area->header.sid = sch->schid;
221 		CHSC_LOG(2, "schid");
222 		CHSC_LOG_HEX(2, &sch->schid, sizeof(sch->schid));
223 		cc = chsc(chsc_area);
224 		snprintf(dbf, sizeof(dbf), "cc:%d", cc);
225 		CHSC_LOG(2, dbf);
226 		switch (cc) {
227 		case 0:
228 			ret = 0;
229 			break;
230 		case 1:
231 			sch->schib.scsw.cmd.fctl |= SCSW_FCTL_START_FUNC;
232 			ret = -EINPROGRESS;
233 			private->request = request;
234 			break;
235 		case 2:
236 			ret = -EBUSY;
237 			break;
238 		default:
239 			ret = -ENODEV;
240 		}
241 		spin_unlock(sch->lock);
242 		CHSC_MSG(2, "chsc on 0.%x.%04x returned cc=%d\n",
243 			 sch->schid.ssid, sch->schid.sch_no, cc);
244 		if (ret == -EINPROGRESS)
245 			return -EINPROGRESS;
246 		put_device(&sch->dev);
247 		if (ret == 0)
248 			return 0;
249 	}
250 	return ret;
251 }
252 
253 static void chsc_log_command(void *chsc_area)
254 {
255 	char dbf[10];
256 
257 	snprintf(dbf, sizeof(dbf), "CHSC:%x", ((uint16_t *)chsc_area)[1]);
258 	CHSC_LOG(0, dbf);
259 	CHSC_LOG_HEX(0, chsc_area, 32);
260 }
261 
262 static int chsc_examine_irb(struct chsc_request *request)
263 {
264 	int backed_up;
265 
266 	if (!(scsw_stctl(&request->irb.scsw) & SCSW_STCTL_STATUS_PEND))
267 		return -EIO;
268 	backed_up = scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHAIN_CHECK;
269 	request->irb.scsw.cmd.cstat &= ~SCHN_STAT_CHAIN_CHECK;
270 	if (scsw_cstat(&request->irb.scsw) == 0)
271 		return 0;
272 	if (!backed_up)
273 		return 0;
274 	if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_PROG_CHECK)
275 		return -EIO;
276 	if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_PROT_CHECK)
277 		return -EPERM;
278 	if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHN_DATA_CHK)
279 		return -EAGAIN;
280 	if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHN_CTRL_CHK)
281 		return -EAGAIN;
282 	return -EIO;
283 }
284 
285 static int chsc_ioctl_start(void __user *user_area)
286 {
287 	struct chsc_request *request;
288 	struct chsc_async_area *chsc_area;
289 	int ret;
290 	char dbf[10];
291 
292 	if (!css_general_characteristics.dynio)
293 		/* It makes no sense to try. */
294 		return -EOPNOTSUPP;
295 	chsc_area = (void *)get_zeroed_page(GFP_DMA | GFP_KERNEL);
296 	if (!chsc_area)
297 		return -ENOMEM;
298 	request = kzalloc(sizeof(*request), GFP_KERNEL);
299 	if (!request) {
300 		ret = -ENOMEM;
301 		goto out_free;
302 	}
303 	init_completion(&request->completion);
304 	if (copy_from_user(chsc_area, user_area, PAGE_SIZE)) {
305 		ret = -EFAULT;
306 		goto out_free;
307 	}
308 	chsc_log_command(chsc_area);
309 	spin_lock_irq(&chsc_lock);
310 	ret = chsc_async(chsc_area, request);
311 	spin_unlock_irq(&chsc_lock);
312 	if (ret == -EINPROGRESS) {
313 		wait_for_completion(&request->completion);
314 		ret = chsc_examine_irb(request);
315 	}
316 	/* copy area back to user */
317 	if (!ret)
318 		if (copy_to_user(user_area, chsc_area, PAGE_SIZE))
319 			ret = -EFAULT;
320 out_free:
321 	snprintf(dbf, sizeof(dbf), "ret:%d", ret);
322 	CHSC_LOG(0, dbf);
323 	kfree(request);
324 	free_page((unsigned long)chsc_area);
325 	return ret;
326 }
327 
328 static int chsc_ioctl_on_close_set(void __user *user_area)
329 {
330 	char dbf[13];
331 	int ret;
332 
333 	mutex_lock(&on_close_mutex);
334 	if (on_close_chsc_area) {
335 		ret = -EBUSY;
336 		goto out_unlock;
337 	}
338 	on_close_request = kzalloc(sizeof(*on_close_request), GFP_KERNEL);
339 	if (!on_close_request) {
340 		ret = -ENOMEM;
341 		goto out_unlock;
342 	}
343 	on_close_chsc_area = (void *)get_zeroed_page(GFP_DMA | GFP_KERNEL);
344 	if (!on_close_chsc_area) {
345 		ret = -ENOMEM;
346 		goto out_free_request;
347 	}
348 	if (copy_from_user(on_close_chsc_area, user_area, PAGE_SIZE)) {
349 		ret = -EFAULT;
350 		goto out_free_chsc;
351 	}
352 	ret = 0;
353 	goto out_unlock;
354 
355 out_free_chsc:
356 	free_page((unsigned long)on_close_chsc_area);
357 	on_close_chsc_area = NULL;
358 out_free_request:
359 	kfree(on_close_request);
360 	on_close_request = NULL;
361 out_unlock:
362 	mutex_unlock(&on_close_mutex);
363 	snprintf(dbf, sizeof(dbf), "ocsret:%d", ret);
364 	CHSC_LOG(0, dbf);
365 	return ret;
366 }
367 
368 static int chsc_ioctl_on_close_remove(void)
369 {
370 	char dbf[13];
371 	int ret;
372 
373 	mutex_lock(&on_close_mutex);
374 	if (!on_close_chsc_area) {
375 		ret = -ENOENT;
376 		goto out_unlock;
377 	}
378 	free_page((unsigned long)on_close_chsc_area);
379 	on_close_chsc_area = NULL;
380 	kfree(on_close_request);
381 	on_close_request = NULL;
382 	ret = 0;
383 out_unlock:
384 	mutex_unlock(&on_close_mutex);
385 	snprintf(dbf, sizeof(dbf), "ocrret:%d", ret);
386 	CHSC_LOG(0, dbf);
387 	return ret;
388 }
389 
390 static int chsc_ioctl_start_sync(void __user *user_area)
391 {
392 	struct chsc_sync_area *chsc_area;
393 	int ret, ccode;
394 
395 	chsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
396 	if (!chsc_area)
397 		return -ENOMEM;
398 	if (copy_from_user(chsc_area, user_area, PAGE_SIZE)) {
399 		ret = -EFAULT;
400 		goto out_free;
401 	}
402 	if (chsc_area->header.code & 0x4000) {
403 		ret = -EINVAL;
404 		goto out_free;
405 	}
406 	chsc_log_command(chsc_area);
407 	ccode = chsc(chsc_area);
408 	if (ccode != 0) {
409 		ret = -EIO;
410 		goto out_free;
411 	}
412 	if (copy_to_user(user_area, chsc_area, PAGE_SIZE))
413 		ret = -EFAULT;
414 	else
415 		ret = 0;
416 out_free:
417 	free_page((unsigned long)chsc_area);
418 	return ret;
419 }
420 
421 static int chsc_ioctl_info_channel_path(void __user *user_cd)
422 {
423 	struct chsc_chp_cd *cd;
424 	int ret, ccode;
425 	struct {
426 		struct chsc_header request;
427 		u32 : 2;
428 		u32 m : 1;
429 		u32 : 1;
430 		u32 fmt1 : 4;
431 		u32 cssid : 8;
432 		u32 : 8;
433 		u32 first_chpid : 8;
434 		u32 : 24;
435 		u32 last_chpid : 8;
436 		u32 : 32;
437 		struct chsc_header response;
438 		u8 data[PAGE_SIZE - 20];
439 	} __attribute__ ((packed)) *scpcd_area;
440 
441 	scpcd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
442 	if (!scpcd_area)
443 		return -ENOMEM;
444 	cd = kzalloc(sizeof(*cd), GFP_KERNEL);
445 	if (!cd) {
446 		ret = -ENOMEM;
447 		goto out_free;
448 	}
449 	if (copy_from_user(cd, user_cd, sizeof(*cd))) {
450 		ret = -EFAULT;
451 		goto out_free;
452 	}
453 	scpcd_area->request.length = 0x0010;
454 	scpcd_area->request.code = 0x0028;
455 	scpcd_area->m = cd->m;
456 	scpcd_area->fmt1 = cd->fmt;
457 	scpcd_area->cssid = cd->chpid.cssid;
458 	scpcd_area->first_chpid = cd->chpid.id;
459 	scpcd_area->last_chpid = cd->chpid.id;
460 
461 	ccode = chsc(scpcd_area);
462 	if (ccode != 0) {
463 		ret = -EIO;
464 		goto out_free;
465 	}
466 	if (scpcd_area->response.code != 0x0001) {
467 		ret = -EIO;
468 		CHSC_MSG(0, "scpcd: response code=%x\n",
469 			 scpcd_area->response.code);
470 		goto out_free;
471 	}
472 	memcpy(&cd->cpcb, &scpcd_area->response, scpcd_area->response.length);
473 	if (copy_to_user(user_cd, cd, sizeof(*cd)))
474 		ret = -EFAULT;
475 	else
476 		ret = 0;
477 out_free:
478 	kfree(cd);
479 	free_page((unsigned long)scpcd_area);
480 	return ret;
481 }
482 
483 static int chsc_ioctl_info_cu(void __user *user_cd)
484 {
485 	struct chsc_cu_cd *cd;
486 	int ret, ccode;
487 	struct {
488 		struct chsc_header request;
489 		u32 : 2;
490 		u32 m : 1;
491 		u32 : 1;
492 		u32 fmt1 : 4;
493 		u32 cssid : 8;
494 		u32 : 8;
495 		u32 first_cun : 8;
496 		u32 : 24;
497 		u32 last_cun : 8;
498 		u32 : 32;
499 		struct chsc_header response;
500 		u8 data[PAGE_SIZE - 20];
501 	} __attribute__ ((packed)) *scucd_area;
502 
503 	scucd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
504 	if (!scucd_area)
505 		return -ENOMEM;
506 	cd = kzalloc(sizeof(*cd), GFP_KERNEL);
507 	if (!cd) {
508 		ret = -ENOMEM;
509 		goto out_free;
510 	}
511 	if (copy_from_user(cd, user_cd, sizeof(*cd))) {
512 		ret = -EFAULT;
513 		goto out_free;
514 	}
515 	scucd_area->request.length = 0x0010;
516 	scucd_area->request.code = 0x0026;
517 	scucd_area->m = cd->m;
518 	scucd_area->fmt1 = cd->fmt;
519 	scucd_area->cssid = cd->cssid;
520 	scucd_area->first_cun = cd->cun;
521 	scucd_area->last_cun = cd->cun;
522 
523 	ccode = chsc(scucd_area);
524 	if (ccode != 0) {
525 		ret = -EIO;
526 		goto out_free;
527 	}
528 	if (scucd_area->response.code != 0x0001) {
529 		ret = -EIO;
530 		CHSC_MSG(0, "scucd: response code=%x\n",
531 			 scucd_area->response.code);
532 		goto out_free;
533 	}
534 	memcpy(&cd->cucb, &scucd_area->response, scucd_area->response.length);
535 	if (copy_to_user(user_cd, cd, sizeof(*cd)))
536 		ret = -EFAULT;
537 	else
538 		ret = 0;
539 out_free:
540 	kfree(cd);
541 	free_page((unsigned long)scucd_area);
542 	return ret;
543 }
544 
545 static int chsc_ioctl_info_sch_cu(void __user *user_cud)
546 {
547 	struct chsc_sch_cud *cud;
548 	int ret, ccode;
549 	struct {
550 		struct chsc_header request;
551 		u32 : 2;
552 		u32 m : 1;
553 		u32 : 5;
554 		u32 fmt1 : 4;
555 		u32 : 2;
556 		u32 ssid : 2;
557 		u32 first_sch : 16;
558 		u32 : 8;
559 		u32 cssid : 8;
560 		u32 last_sch : 16;
561 		u32 : 32;
562 		struct chsc_header response;
563 		u8 data[PAGE_SIZE - 20];
564 	} __attribute__ ((packed)) *sscud_area;
565 
566 	sscud_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
567 	if (!sscud_area)
568 		return -ENOMEM;
569 	cud = kzalloc(sizeof(*cud), GFP_KERNEL);
570 	if (!cud) {
571 		ret = -ENOMEM;
572 		goto out_free;
573 	}
574 	if (copy_from_user(cud, user_cud, sizeof(*cud))) {
575 		ret = -EFAULT;
576 		goto out_free;
577 	}
578 	sscud_area->request.length = 0x0010;
579 	sscud_area->request.code = 0x0006;
580 	sscud_area->m = cud->schid.m;
581 	sscud_area->fmt1 = cud->fmt;
582 	sscud_area->ssid = cud->schid.ssid;
583 	sscud_area->first_sch = cud->schid.sch_no;
584 	sscud_area->cssid = cud->schid.cssid;
585 	sscud_area->last_sch = cud->schid.sch_no;
586 
587 	ccode = chsc(sscud_area);
588 	if (ccode != 0) {
589 		ret = -EIO;
590 		goto out_free;
591 	}
592 	if (sscud_area->response.code != 0x0001) {
593 		ret = -EIO;
594 		CHSC_MSG(0, "sscud: response code=%x\n",
595 			 sscud_area->response.code);
596 		goto out_free;
597 	}
598 	memcpy(&cud->scub, &sscud_area->response, sscud_area->response.length);
599 	if (copy_to_user(user_cud, cud, sizeof(*cud)))
600 		ret = -EFAULT;
601 	else
602 		ret = 0;
603 out_free:
604 	kfree(cud);
605 	free_page((unsigned long)sscud_area);
606 	return ret;
607 }
608 
609 static int chsc_ioctl_conf_info(void __user *user_ci)
610 {
611 	struct chsc_conf_info *ci;
612 	int ret, ccode;
613 	struct {
614 		struct chsc_header request;
615 		u32 : 2;
616 		u32 m : 1;
617 		u32 : 1;
618 		u32 fmt1 : 4;
619 		u32 cssid : 8;
620 		u32 : 6;
621 		u32 ssid : 2;
622 		u32 : 8;
623 		u64 : 64;
624 		struct chsc_header response;
625 		u8 data[PAGE_SIZE - 20];
626 	} __attribute__ ((packed)) *sci_area;
627 
628 	sci_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
629 	if (!sci_area)
630 		return -ENOMEM;
631 	ci = kzalloc(sizeof(*ci), GFP_KERNEL);
632 	if (!ci) {
633 		ret = -ENOMEM;
634 		goto out_free;
635 	}
636 	if (copy_from_user(ci, user_ci, sizeof(*ci))) {
637 		ret = -EFAULT;
638 		goto out_free;
639 	}
640 	sci_area->request.length = 0x0010;
641 	sci_area->request.code = 0x0012;
642 	sci_area->m = ci->id.m;
643 	sci_area->fmt1 = ci->fmt;
644 	sci_area->cssid = ci->id.cssid;
645 	sci_area->ssid = ci->id.ssid;
646 
647 	ccode = chsc(sci_area);
648 	if (ccode != 0) {
649 		ret = -EIO;
650 		goto out_free;
651 	}
652 	if (sci_area->response.code != 0x0001) {
653 		ret = -EIO;
654 		CHSC_MSG(0, "sci: response code=%x\n",
655 			 sci_area->response.code);
656 		goto out_free;
657 	}
658 	memcpy(&ci->scid, &sci_area->response, sci_area->response.length);
659 	if (copy_to_user(user_ci, ci, sizeof(*ci)))
660 		ret = -EFAULT;
661 	else
662 		ret = 0;
663 out_free:
664 	kfree(ci);
665 	free_page((unsigned long)sci_area);
666 	return ret;
667 }
668 
669 static int chsc_ioctl_conf_comp_list(void __user *user_ccl)
670 {
671 	struct chsc_comp_list *ccl;
672 	int ret, ccode;
673 	struct {
674 		struct chsc_header request;
675 		u32 ctype : 8;
676 		u32 : 4;
677 		u32 fmt : 4;
678 		u32 : 16;
679 		u64 : 64;
680 		u32 list_parm[2];
681 		u64 : 64;
682 		struct chsc_header response;
683 		u8 data[PAGE_SIZE - 36];
684 	} __attribute__ ((packed)) *sccl_area;
685 	struct {
686 		u32 m : 1;
687 		u32 : 31;
688 		u32 cssid : 8;
689 		u32 : 16;
690 		u32 chpid : 8;
691 	} __attribute__ ((packed)) *chpid_parm;
692 	struct {
693 		u32 f_cssid : 8;
694 		u32 l_cssid : 8;
695 		u32 : 16;
696 		u32 res;
697 	} __attribute__ ((packed)) *cssids_parm;
698 
699 	sccl_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
700 	if (!sccl_area)
701 		return -ENOMEM;
702 	ccl = kzalloc(sizeof(*ccl), GFP_KERNEL);
703 	if (!ccl) {
704 		ret = -ENOMEM;
705 		goto out_free;
706 	}
707 	if (copy_from_user(ccl, user_ccl, sizeof(*ccl))) {
708 		ret = -EFAULT;
709 		goto out_free;
710 	}
711 	sccl_area->request.length = 0x0020;
712 	sccl_area->request.code = 0x0030;
713 	sccl_area->fmt = ccl->req.fmt;
714 	sccl_area->ctype = ccl->req.ctype;
715 	switch (sccl_area->ctype) {
716 	case CCL_CU_ON_CHP:
717 	case CCL_IOP_CHP:
718 		chpid_parm = (void *)&sccl_area->list_parm;
719 		chpid_parm->m = ccl->req.chpid.m;
720 		chpid_parm->cssid = ccl->req.chpid.chp.cssid;
721 		chpid_parm->chpid = ccl->req.chpid.chp.id;
722 		break;
723 	case CCL_CSS_IMG:
724 	case CCL_CSS_IMG_CONF_CHAR:
725 		cssids_parm = (void *)&sccl_area->list_parm;
726 		cssids_parm->f_cssid = ccl->req.cssids.f_cssid;
727 		cssids_parm->l_cssid = ccl->req.cssids.l_cssid;
728 		break;
729 	}
730 	ccode = chsc(sccl_area);
731 	if (ccode != 0) {
732 		ret = -EIO;
733 		goto out_free;
734 	}
735 	if (sccl_area->response.code != 0x0001) {
736 		ret = -EIO;
737 		CHSC_MSG(0, "sccl: response code=%x\n",
738 			 sccl_area->response.code);
739 		goto out_free;
740 	}
741 	memcpy(&ccl->sccl, &sccl_area->response, sccl_area->response.length);
742 	if (copy_to_user(user_ccl, ccl, sizeof(*ccl)))
743 		ret = -EFAULT;
744 	else
745 		ret = 0;
746 out_free:
747 	kfree(ccl);
748 	free_page((unsigned long)sccl_area);
749 	return ret;
750 }
751 
752 static int chsc_ioctl_chpd(void __user *user_chpd)
753 {
754 	struct chsc_scpd *scpd_area;
755 	struct chsc_cpd_info *chpd;
756 	int ret;
757 
758 	chpd = kzalloc(sizeof(*chpd), GFP_KERNEL);
759 	scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
760 	if (!scpd_area || !chpd) {
761 		ret = -ENOMEM;
762 		goto out_free;
763 	}
764 	if (copy_from_user(chpd, user_chpd, sizeof(*chpd))) {
765 		ret = -EFAULT;
766 		goto out_free;
767 	}
768 	ret = chsc_determine_channel_path_desc(chpd->chpid, chpd->fmt,
769 					       chpd->rfmt, chpd->c, chpd->m,
770 					       scpd_area);
771 	if (ret)
772 		goto out_free;
773 	memcpy(&chpd->chpdb, &scpd_area->response, scpd_area->response.length);
774 	if (copy_to_user(user_chpd, chpd, sizeof(*chpd)))
775 		ret = -EFAULT;
776 out_free:
777 	kfree(chpd);
778 	free_page((unsigned long)scpd_area);
779 	return ret;
780 }
781 
782 static int chsc_ioctl_dcal(void __user *user_dcal)
783 {
784 	struct chsc_dcal *dcal;
785 	int ret, ccode;
786 	struct {
787 		struct chsc_header request;
788 		u32 atype : 8;
789 		u32 : 4;
790 		u32 fmt : 4;
791 		u32 : 16;
792 		u32 res0[2];
793 		u32 list_parm[2];
794 		u32 res1[2];
795 		struct chsc_header response;
796 		u8 data[PAGE_SIZE - 36];
797 	} __attribute__ ((packed)) *sdcal_area;
798 
799 	sdcal_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
800 	if (!sdcal_area)
801 		return -ENOMEM;
802 	dcal = kzalloc(sizeof(*dcal), GFP_KERNEL);
803 	if (!dcal) {
804 		ret = -ENOMEM;
805 		goto out_free;
806 	}
807 	if (copy_from_user(dcal, user_dcal, sizeof(*dcal))) {
808 		ret = -EFAULT;
809 		goto out_free;
810 	}
811 	sdcal_area->request.length = 0x0020;
812 	sdcal_area->request.code = 0x0034;
813 	sdcal_area->atype = dcal->req.atype;
814 	sdcal_area->fmt = dcal->req.fmt;
815 	memcpy(&sdcal_area->list_parm, &dcal->req.list_parm,
816 	       sizeof(sdcal_area->list_parm));
817 
818 	ccode = chsc(sdcal_area);
819 	if (ccode != 0) {
820 		ret = -EIO;
821 		goto out_free;
822 	}
823 	if (sdcal_area->response.code != 0x0001) {
824 		ret = -EIO;
825 		CHSC_MSG(0, "sdcal: response code=%x\n",
826 			 sdcal_area->response.code);
827 		goto out_free;
828 	}
829 	memcpy(&dcal->sdcal, &sdcal_area->response,
830 	       sdcal_area->response.length);
831 	if (copy_to_user(user_dcal, dcal, sizeof(*dcal)))
832 		ret = -EFAULT;
833 	else
834 		ret = 0;
835 out_free:
836 	kfree(dcal);
837 	free_page((unsigned long)sdcal_area);
838 	return ret;
839 }
840 
841 static long chsc_ioctl(struct file *filp, unsigned int cmd,
842 		       unsigned long arg)
843 {
844 	void __user *argp;
845 
846 	CHSC_MSG(2, "chsc_ioctl called, cmd=%x\n", cmd);
847 	if (is_compat_task())
848 		argp = compat_ptr(arg);
849 	else
850 		argp = (void __user *)arg;
851 	switch (cmd) {
852 	case CHSC_START:
853 		return chsc_ioctl_start(argp);
854 	case CHSC_START_SYNC:
855 		return chsc_ioctl_start_sync(argp);
856 	case CHSC_INFO_CHANNEL_PATH:
857 		return chsc_ioctl_info_channel_path(argp);
858 	case CHSC_INFO_CU:
859 		return chsc_ioctl_info_cu(argp);
860 	case CHSC_INFO_SCH_CU:
861 		return chsc_ioctl_info_sch_cu(argp);
862 	case CHSC_INFO_CI:
863 		return chsc_ioctl_conf_info(argp);
864 	case CHSC_INFO_CCL:
865 		return chsc_ioctl_conf_comp_list(argp);
866 	case CHSC_INFO_CPD:
867 		return chsc_ioctl_chpd(argp);
868 	case CHSC_INFO_DCAL:
869 		return chsc_ioctl_dcal(argp);
870 	case CHSC_ON_CLOSE_SET:
871 		return chsc_ioctl_on_close_set(argp);
872 	case CHSC_ON_CLOSE_REMOVE:
873 		return chsc_ioctl_on_close_remove();
874 	default: /* unknown ioctl number */
875 		return -ENOIOCTLCMD;
876 	}
877 }
878 
879 static atomic_t chsc_ready_for_use = ATOMIC_INIT(1);
880 
881 static int chsc_open(struct inode *inode, struct file *file)
882 {
883 	if (!atomic_dec_and_test(&chsc_ready_for_use)) {
884 		atomic_inc(&chsc_ready_for_use);
885 		return -EBUSY;
886 	}
887 	return nonseekable_open(inode, file);
888 }
889 
890 static int chsc_release(struct inode *inode, struct file *filp)
891 {
892 	char dbf[13];
893 	int ret;
894 
895 	mutex_lock(&on_close_mutex);
896 	if (!on_close_chsc_area)
897 		goto out_unlock;
898 	init_completion(&on_close_request->completion);
899 	CHSC_LOG(0, "on_close");
900 	chsc_log_command(on_close_chsc_area);
901 	spin_lock_irq(&chsc_lock);
902 	ret = chsc_async(on_close_chsc_area, on_close_request);
903 	spin_unlock_irq(&chsc_lock);
904 	if (ret == -EINPROGRESS) {
905 		wait_for_completion(&on_close_request->completion);
906 		ret = chsc_examine_irb(on_close_request);
907 	}
908 	snprintf(dbf, sizeof(dbf), "relret:%d", ret);
909 	CHSC_LOG(0, dbf);
910 	free_page((unsigned long)on_close_chsc_area);
911 	on_close_chsc_area = NULL;
912 	kfree(on_close_request);
913 	on_close_request = NULL;
914 out_unlock:
915 	mutex_unlock(&on_close_mutex);
916 	atomic_inc(&chsc_ready_for_use);
917 	return 0;
918 }
919 
920 static const struct file_operations chsc_fops = {
921 	.owner = THIS_MODULE,
922 	.open = chsc_open,
923 	.release = chsc_release,
924 	.unlocked_ioctl = chsc_ioctl,
925 	.compat_ioctl = chsc_ioctl,
926 	.llseek = no_llseek,
927 };
928 
929 static struct miscdevice chsc_misc_device = {
930 	.minor = MISC_DYNAMIC_MINOR,
931 	.name = "chsc",
932 	.fops = &chsc_fops,
933 };
934 
935 static int __init chsc_misc_init(void)
936 {
937 	return misc_register(&chsc_misc_device);
938 }
939 
940 static void chsc_misc_cleanup(void)
941 {
942 	misc_deregister(&chsc_misc_device);
943 }
944 
945 static int __init chsc_sch_init(void)
946 {
947 	int ret;
948 
949 	ret = chsc_init_dbfs();
950 	if (ret)
951 		return ret;
952 	isc_register(CHSC_SCH_ISC);
953 	ret = chsc_init_sch_driver();
954 	if (ret)
955 		goto out_dbf;
956 	ret = chsc_misc_init();
957 	if (ret)
958 		goto out_driver;
959 	return ret;
960 out_driver:
961 	chsc_cleanup_sch_driver();
962 out_dbf:
963 	isc_unregister(CHSC_SCH_ISC);
964 	chsc_remove_dbfs();
965 	return ret;
966 }
967 
968 static void __exit chsc_sch_exit(void)
969 {
970 	chsc_misc_cleanup();
971 	chsc_cleanup_sch_driver();
972 	isc_unregister(CHSC_SCH_ISC);
973 	chsc_remove_dbfs();
974 }
975 
976 module_init(chsc_sch_init);
977 module_exit(chsc_sch_exit);
978