xref: /openbmc/linux/drivers/macintosh/smu.c (revision 96de0e252cedffad61b3cb5e05662c591898e69a)
1 /*
2  * PowerMac G5 SMU driver
3  *
4  * Copyright 2004 J. Mayer <l_indien@magic.fr>
5  * Copyright 2005 Benjamin Herrenschmidt, IBM Corp.
6  *
7  * Released under the term of the GNU GPL v2.
8  */
9 
10 /*
11  * TODO:
12  *  - maybe add timeout to commands ?
13  *  - blocking version of time functions
14  *  - polling version of i2c commands (including timer that works with
15  *    interrutps off)
16  *  - maybe avoid some data copies with i2c by directly using the smu cmd
17  *    buffer and a lower level internal interface
18  *  - understand SMU -> CPU events and implement reception of them via
19  *    the userland interface
20  */
21 
22 #include <linux/types.h>
23 #include <linux/kernel.h>
24 #include <linux/device.h>
25 #include <linux/dmapool.h>
26 #include <linux/bootmem.h>
27 #include <linux/vmalloc.h>
28 #include <linux/highmem.h>
29 #include <linux/jiffies.h>
30 #include <linux/interrupt.h>
31 #include <linux/rtc.h>
32 #include <linux/completion.h>
33 #include <linux/miscdevice.h>
34 #include <linux/delay.h>
35 #include <linux/sysdev.h>
36 #include <linux/poll.h>
37 #include <linux/mutex.h>
38 
39 #include <asm/byteorder.h>
40 #include <asm/io.h>
41 #include <asm/prom.h>
42 #include <asm/machdep.h>
43 #include <asm/pmac_feature.h>
44 #include <asm/smu.h>
45 #include <asm/sections.h>
46 #include <asm/abs_addr.h>
47 #include <asm/uaccess.h>
48 #include <asm/of_device.h>
49 #include <asm/of_platform.h>
50 
51 #define VERSION "0.7"
52 #define AUTHOR  "(c) 2005 Benjamin Herrenschmidt, IBM Corp."
53 
54 #undef DEBUG_SMU
55 
56 #ifdef DEBUG_SMU
57 #define DPRINTK(fmt, args...) do { printk(KERN_DEBUG fmt , ##args); } while (0)
58 #else
59 #define DPRINTK(fmt, args...) do { } while (0)
60 #endif
61 
62 /*
63  * This is the command buffer passed to the SMU hardware
64  */
65 #define SMU_MAX_DATA	254
66 
67 struct smu_cmd_buf {
68 	u8 cmd;
69 	u8 length;
70 	u8 data[SMU_MAX_DATA];
71 };
72 
73 struct smu_device {
74 	spinlock_t		lock;
75 	struct device_node	*of_node;
76 	struct of_device	*of_dev;
77 	int			doorbell;	/* doorbell gpio */
78 	u32 __iomem		*db_buf;	/* doorbell buffer */
79 	struct device_node	*db_node;
80 	unsigned int		db_irq;
81 	int			msg;
82 	struct device_node	*msg_node;
83 	unsigned int		msg_irq;
84 	struct smu_cmd_buf	*cmd_buf;	/* command buffer virtual */
85 	u32			cmd_buf_abs;	/* command buffer absolute */
86 	struct list_head	cmd_list;
87 	struct smu_cmd		*cmd_cur;	/* pending command */
88 	struct list_head	cmd_i2c_list;
89 	struct smu_i2c_cmd	*cmd_i2c_cur;	/* pending i2c command */
90 	struct timer_list	i2c_timer;
91 };
92 
93 /*
94  * I don't think there will ever be more than one SMU, so
95  * for now, just hard code that
96  */
97 static struct smu_device	*smu;
98 static DEFINE_MUTEX(smu_part_access);
99 static int smu_irq_inited;
100 
101 static void smu_i2c_retry(unsigned long data);
102 
103 /*
104  * SMU driver low level stuff
105  */
106 
107 static void smu_start_cmd(void)
108 {
109 	unsigned long faddr, fend;
110 	struct smu_cmd *cmd;
111 
112 	if (list_empty(&smu->cmd_list))
113 		return;
114 
115 	/* Fetch first command in queue */
116 	cmd = list_entry(smu->cmd_list.next, struct smu_cmd, link);
117 	smu->cmd_cur = cmd;
118 	list_del(&cmd->link);
119 
120 	DPRINTK("SMU: starting cmd %x, %d bytes data\n", cmd->cmd,
121 		cmd->data_len);
122 	DPRINTK("SMU: data buffer: %02x %02x %02x %02x %02x %02x %02x %02x\n",
123 		((u8 *)cmd->data_buf)[0], ((u8 *)cmd->data_buf)[1],
124 		((u8 *)cmd->data_buf)[2], ((u8 *)cmd->data_buf)[3],
125 		((u8 *)cmd->data_buf)[4], ((u8 *)cmd->data_buf)[5],
126 		((u8 *)cmd->data_buf)[6], ((u8 *)cmd->data_buf)[7]);
127 
128 	/* Fill the SMU command buffer */
129 	smu->cmd_buf->cmd = cmd->cmd;
130 	smu->cmd_buf->length = cmd->data_len;
131 	memcpy(smu->cmd_buf->data, cmd->data_buf, cmd->data_len);
132 
133 	/* Flush command and data to RAM */
134 	faddr = (unsigned long)smu->cmd_buf;
135 	fend = faddr + smu->cmd_buf->length + 2;
136 	flush_inval_dcache_range(faddr, fend);
137 
138 	/* This isn't exactly a DMA mapping here, I suspect
139 	 * the SMU is actually communicating with us via i2c to the
140 	 * northbridge or the CPU to access RAM.
141 	 */
142 	writel(smu->cmd_buf_abs, smu->db_buf);
143 
144 	/* Ring the SMU doorbell */
145 	pmac_do_feature_call(PMAC_FTR_WRITE_GPIO, NULL, smu->doorbell, 4);
146 }
147 
148 
149 static irqreturn_t smu_db_intr(int irq, void *arg)
150 {
151 	unsigned long flags;
152 	struct smu_cmd *cmd;
153 	void (*done)(struct smu_cmd *cmd, void *misc) = NULL;
154 	void *misc = NULL;
155 	u8 gpio;
156 	int rc = 0;
157 
158 	/* SMU completed the command, well, we hope, let's make sure
159 	 * of it
160 	 */
161 	spin_lock_irqsave(&smu->lock, flags);
162 
163 	gpio = pmac_do_feature_call(PMAC_FTR_READ_GPIO, NULL, smu->doorbell);
164 	if ((gpio & 7) != 7) {
165 		spin_unlock_irqrestore(&smu->lock, flags);
166 		return IRQ_HANDLED;
167 	}
168 
169 	cmd = smu->cmd_cur;
170 	smu->cmd_cur = NULL;
171 	if (cmd == NULL)
172 		goto bail;
173 
174 	if (rc == 0) {
175 		unsigned long faddr;
176 		int reply_len;
177 		u8 ack;
178 
179 		/* CPU might have brought back the cache line, so we need
180 		 * to flush again before peeking at the SMU response. We
181 		 * flush the entire buffer for now as we haven't read the
182 		 * reply lenght (it's only 2 cache lines anyway)
183 		 */
184 		faddr = (unsigned long)smu->cmd_buf;
185 		flush_inval_dcache_range(faddr, faddr + 256);
186 
187 		/* Now check ack */
188 		ack = (~cmd->cmd) & 0xff;
189 		if (ack != smu->cmd_buf->cmd) {
190 			DPRINTK("SMU: incorrect ack, want %x got %x\n",
191 				ack, smu->cmd_buf->cmd);
192 			rc = -EIO;
193 		}
194 		reply_len = rc == 0 ? smu->cmd_buf->length : 0;
195 		DPRINTK("SMU: reply len: %d\n", reply_len);
196 		if (reply_len > cmd->reply_len) {
197 			printk(KERN_WARNING "SMU: reply buffer too small,"
198 			       "got %d bytes for a %d bytes buffer\n",
199 			       reply_len, cmd->reply_len);
200 			reply_len = cmd->reply_len;
201 		}
202 		cmd->reply_len = reply_len;
203 		if (cmd->reply_buf && reply_len)
204 			memcpy(cmd->reply_buf, smu->cmd_buf->data, reply_len);
205 	}
206 
207 	/* Now complete the command. Write status last in order as we lost
208 	 * ownership of the command structure as soon as it's no longer -1
209 	 */
210 	done = cmd->done;
211 	misc = cmd->misc;
212 	mb();
213 	cmd->status = rc;
214  bail:
215 	/* Start next command if any */
216 	smu_start_cmd();
217 	spin_unlock_irqrestore(&smu->lock, flags);
218 
219 	/* Call command completion handler if any */
220 	if (done)
221 		done(cmd, misc);
222 
223 	/* It's an edge interrupt, nothing to do */
224 	return IRQ_HANDLED;
225 }
226 
227 
228 static irqreturn_t smu_msg_intr(int irq, void *arg)
229 {
230 	/* I don't quite know what to do with this one, we seem to never
231 	 * receive it, so I suspect we have to arm it someway in the SMU
232 	 * to start getting events that way.
233 	 */
234 
235 	printk(KERN_INFO "SMU: message interrupt !\n");
236 
237 	/* It's an edge interrupt, nothing to do */
238 	return IRQ_HANDLED;
239 }
240 
241 
242 /*
243  * Queued command management.
244  *
245  */
246 
247 int smu_queue_cmd(struct smu_cmd *cmd)
248 {
249 	unsigned long flags;
250 
251 	if (smu == NULL)
252 		return -ENODEV;
253 	if (cmd->data_len > SMU_MAX_DATA ||
254 	    cmd->reply_len > SMU_MAX_DATA)
255 		return -EINVAL;
256 
257 	cmd->status = 1;
258 	spin_lock_irqsave(&smu->lock, flags);
259 	list_add_tail(&cmd->link, &smu->cmd_list);
260 	if (smu->cmd_cur == NULL)
261 		smu_start_cmd();
262 	spin_unlock_irqrestore(&smu->lock, flags);
263 
264 	/* Workaround for early calls when irq isn't available */
265 	if (!smu_irq_inited || smu->db_irq == NO_IRQ)
266 		smu_spinwait_cmd(cmd);
267 
268 	return 0;
269 }
270 EXPORT_SYMBOL(smu_queue_cmd);
271 
272 
273 int smu_queue_simple(struct smu_simple_cmd *scmd, u8 command,
274 		     unsigned int data_len,
275 		     void (*done)(struct smu_cmd *cmd, void *misc),
276 		     void *misc, ...)
277 {
278 	struct smu_cmd *cmd = &scmd->cmd;
279 	va_list list;
280 	int i;
281 
282 	if (data_len > sizeof(scmd->buffer))
283 		return -EINVAL;
284 
285 	memset(scmd, 0, sizeof(*scmd));
286 	cmd->cmd = command;
287 	cmd->data_len = data_len;
288 	cmd->data_buf = scmd->buffer;
289 	cmd->reply_len = sizeof(scmd->buffer);
290 	cmd->reply_buf = scmd->buffer;
291 	cmd->done = done;
292 	cmd->misc = misc;
293 
294 	va_start(list, misc);
295 	for (i = 0; i < data_len; ++i)
296 		scmd->buffer[i] = (u8)va_arg(list, int);
297 	va_end(list);
298 
299 	return smu_queue_cmd(cmd);
300 }
301 EXPORT_SYMBOL(smu_queue_simple);
302 
303 
304 void smu_poll(void)
305 {
306 	u8 gpio;
307 
308 	if (smu == NULL)
309 		return;
310 
311 	gpio = pmac_do_feature_call(PMAC_FTR_READ_GPIO, NULL, smu->doorbell);
312 	if ((gpio & 7) == 7)
313 		smu_db_intr(smu->db_irq, smu);
314 }
315 EXPORT_SYMBOL(smu_poll);
316 
317 
318 void smu_done_complete(struct smu_cmd *cmd, void *misc)
319 {
320 	struct completion *comp = misc;
321 
322 	complete(comp);
323 }
324 EXPORT_SYMBOL(smu_done_complete);
325 
326 
327 void smu_spinwait_cmd(struct smu_cmd *cmd)
328 {
329 	while(cmd->status == 1)
330 		smu_poll();
331 }
332 EXPORT_SYMBOL(smu_spinwait_cmd);
333 
334 
335 /* RTC low level commands */
336 static inline int bcd2hex (int n)
337 {
338 	return (((n & 0xf0) >> 4) * 10) + (n & 0xf);
339 }
340 
341 
342 static inline int hex2bcd (int n)
343 {
344 	return ((n / 10) << 4) + (n % 10);
345 }
346 
347 
348 static inline void smu_fill_set_rtc_cmd(struct smu_cmd_buf *cmd_buf,
349 					struct rtc_time *time)
350 {
351 	cmd_buf->cmd = 0x8e;
352 	cmd_buf->length = 8;
353 	cmd_buf->data[0] = 0x80;
354 	cmd_buf->data[1] = hex2bcd(time->tm_sec);
355 	cmd_buf->data[2] = hex2bcd(time->tm_min);
356 	cmd_buf->data[3] = hex2bcd(time->tm_hour);
357 	cmd_buf->data[4] = time->tm_wday;
358 	cmd_buf->data[5] = hex2bcd(time->tm_mday);
359 	cmd_buf->data[6] = hex2bcd(time->tm_mon) + 1;
360 	cmd_buf->data[7] = hex2bcd(time->tm_year - 100);
361 }
362 
363 
364 int smu_get_rtc_time(struct rtc_time *time, int spinwait)
365 {
366 	struct smu_simple_cmd cmd;
367 	int rc;
368 
369 	if (smu == NULL)
370 		return -ENODEV;
371 
372 	memset(time, 0, sizeof(struct rtc_time));
373 	rc = smu_queue_simple(&cmd, SMU_CMD_RTC_COMMAND, 1, NULL, NULL,
374 			      SMU_CMD_RTC_GET_DATETIME);
375 	if (rc)
376 		return rc;
377 	smu_spinwait_simple(&cmd);
378 
379 	time->tm_sec = bcd2hex(cmd.buffer[0]);
380 	time->tm_min = bcd2hex(cmd.buffer[1]);
381 	time->tm_hour = bcd2hex(cmd.buffer[2]);
382 	time->tm_wday = bcd2hex(cmd.buffer[3]);
383 	time->tm_mday = bcd2hex(cmd.buffer[4]);
384 	time->tm_mon = bcd2hex(cmd.buffer[5]) - 1;
385 	time->tm_year = bcd2hex(cmd.buffer[6]) + 100;
386 
387 	return 0;
388 }
389 
390 
391 int smu_set_rtc_time(struct rtc_time *time, int spinwait)
392 {
393 	struct smu_simple_cmd cmd;
394 	int rc;
395 
396 	if (smu == NULL)
397 		return -ENODEV;
398 
399 	rc = smu_queue_simple(&cmd, SMU_CMD_RTC_COMMAND, 8, NULL, NULL,
400 			      SMU_CMD_RTC_SET_DATETIME,
401 			      hex2bcd(time->tm_sec),
402 			      hex2bcd(time->tm_min),
403 			      hex2bcd(time->tm_hour),
404 			      time->tm_wday,
405 			      hex2bcd(time->tm_mday),
406 			      hex2bcd(time->tm_mon) + 1,
407 			      hex2bcd(time->tm_year - 100));
408 	if (rc)
409 		return rc;
410 	smu_spinwait_simple(&cmd);
411 
412 	return 0;
413 }
414 
415 
416 void smu_shutdown(void)
417 {
418 	struct smu_simple_cmd cmd;
419 
420 	if (smu == NULL)
421 		return;
422 
423 	if (smu_queue_simple(&cmd, SMU_CMD_POWER_COMMAND, 9, NULL, NULL,
424 			     'S', 'H', 'U', 'T', 'D', 'O', 'W', 'N', 0))
425 		return;
426 	smu_spinwait_simple(&cmd);
427 	for (;;)
428 		;
429 }
430 
431 
432 void smu_restart(void)
433 {
434 	struct smu_simple_cmd cmd;
435 
436 	if (smu == NULL)
437 		return;
438 
439 	if (smu_queue_simple(&cmd, SMU_CMD_POWER_COMMAND, 8, NULL, NULL,
440 			     'R', 'E', 'S', 'T', 'A', 'R', 'T', 0))
441 		return;
442 	smu_spinwait_simple(&cmd);
443 	for (;;)
444 		;
445 }
446 
447 
448 int smu_present(void)
449 {
450 	return smu != NULL;
451 }
452 EXPORT_SYMBOL(smu_present);
453 
454 
455 int __init smu_init (void)
456 {
457 	struct device_node *np;
458 	const u32 *data;
459 
460         np = of_find_node_by_type(NULL, "smu");
461         if (np == NULL)
462 		return -ENODEV;
463 
464 	printk(KERN_INFO "SMU driver %s %s\n", VERSION, AUTHOR);
465 
466 	if (smu_cmdbuf_abs == 0) {
467 		printk(KERN_ERR "SMU: Command buffer not allocated !\n");
468 		return -EINVAL;
469 	}
470 
471 	smu = alloc_bootmem(sizeof(struct smu_device));
472 	if (smu == NULL)
473 		return -ENOMEM;
474 	memset(smu, 0, sizeof(*smu));
475 
476 	spin_lock_init(&smu->lock);
477 	INIT_LIST_HEAD(&smu->cmd_list);
478 	INIT_LIST_HEAD(&smu->cmd_i2c_list);
479 	smu->of_node = np;
480 	smu->db_irq = NO_IRQ;
481 	smu->msg_irq = NO_IRQ;
482 
483 	/* smu_cmdbuf_abs is in the low 2G of RAM, can be converted to a
484 	 * 32 bits value safely
485 	 */
486 	smu->cmd_buf_abs = (u32)smu_cmdbuf_abs;
487 	smu->cmd_buf = (struct smu_cmd_buf *)abs_to_virt(smu_cmdbuf_abs);
488 
489 	smu->db_node = of_find_node_by_name(NULL, "smu-doorbell");
490 	if (smu->db_node == NULL) {
491 		printk(KERN_ERR "SMU: Can't find doorbell GPIO !\n");
492 		goto fail;
493 	}
494 	data = of_get_property(smu->db_node, "reg", NULL);
495 	if (data == NULL) {
496 		of_node_put(smu->db_node);
497 		smu->db_node = NULL;
498 		printk(KERN_ERR "SMU: Can't find doorbell GPIO address !\n");
499 		goto fail;
500 	}
501 
502 	/* Current setup has one doorbell GPIO that does both doorbell
503 	 * and ack. GPIOs are at 0x50, best would be to find that out
504 	 * in the device-tree though.
505 	 */
506 	smu->doorbell = *data;
507 	if (smu->doorbell < 0x50)
508 		smu->doorbell += 0x50;
509 
510 	/* Now look for the smu-interrupt GPIO */
511 	do {
512 		smu->msg_node = of_find_node_by_name(NULL, "smu-interrupt");
513 		if (smu->msg_node == NULL)
514 			break;
515 		data = of_get_property(smu->msg_node, "reg", NULL);
516 		if (data == NULL) {
517 			of_node_put(smu->msg_node);
518 			smu->msg_node = NULL;
519 			break;
520 		}
521 		smu->msg = *data;
522 		if (smu->msg < 0x50)
523 			smu->msg += 0x50;
524 	} while(0);
525 
526 	/* Doorbell buffer is currently hard-coded, I didn't find a proper
527 	 * device-tree entry giving the address. Best would probably to use
528 	 * an offset for K2 base though, but let's do it that way for now.
529 	 */
530 	smu->db_buf = ioremap(0x8000860c, 0x1000);
531 	if (smu->db_buf == NULL) {
532 		printk(KERN_ERR "SMU: Can't map doorbell buffer pointer !\n");
533 		goto fail;
534 	}
535 
536 	sys_ctrler = SYS_CTRLER_SMU;
537 	return 0;
538 
539  fail:
540 	smu = NULL;
541 	return -ENXIO;
542 
543 }
544 
545 
546 static int smu_late_init(void)
547 {
548 	if (!smu)
549 		return 0;
550 
551 	init_timer(&smu->i2c_timer);
552 	smu->i2c_timer.function = smu_i2c_retry;
553 	smu->i2c_timer.data = (unsigned long)smu;
554 
555 	if (smu->db_node) {
556 		smu->db_irq = irq_of_parse_and_map(smu->db_node, 0);
557 		if (smu->db_irq == NO_IRQ)
558 			printk(KERN_ERR "smu: failed to map irq for node %s\n",
559 			       smu->db_node->full_name);
560 	}
561 	if (smu->msg_node) {
562 		smu->msg_irq = irq_of_parse_and_map(smu->msg_node, 0);
563 		if (smu->msg_irq == NO_IRQ)
564 			printk(KERN_ERR "smu: failed to map irq for node %s\n",
565 			       smu->msg_node->full_name);
566 	}
567 
568 	/*
569 	 * Try to request the interrupts
570 	 */
571 
572 	if (smu->db_irq != NO_IRQ) {
573 		if (request_irq(smu->db_irq, smu_db_intr,
574 				IRQF_SHARED, "SMU doorbell", smu) < 0) {
575 			printk(KERN_WARNING "SMU: can't "
576 			       "request interrupt %d\n",
577 			       smu->db_irq);
578 			smu->db_irq = NO_IRQ;
579 		}
580 	}
581 
582 	if (smu->msg_irq != NO_IRQ) {
583 		if (request_irq(smu->msg_irq, smu_msg_intr,
584 				IRQF_SHARED, "SMU message", smu) < 0) {
585 			printk(KERN_WARNING "SMU: can't "
586 			       "request interrupt %d\n",
587 			       smu->msg_irq);
588 			smu->msg_irq = NO_IRQ;
589 		}
590 	}
591 
592 	smu_irq_inited = 1;
593 	return 0;
594 }
595 /* This has to be before arch_initcall as the low i2c stuff relies on the
596  * above having been done before we reach arch_initcalls
597  */
598 core_initcall(smu_late_init);
599 
600 /*
601  * sysfs visibility
602  */
603 
604 static void smu_expose_childs(struct work_struct *unused)
605 {
606 	struct device_node *np;
607 
608 	for (np = NULL; (np = of_get_next_child(smu->of_node, np)) != NULL;)
609 		if (of_device_is_compatible(np, "smu-sensors"))
610 			of_platform_device_create(np, "smu-sensors",
611 						  &smu->of_dev->dev);
612 }
613 
614 static DECLARE_WORK(smu_expose_childs_work, smu_expose_childs);
615 
616 static int smu_platform_probe(struct of_device* dev,
617 			      const struct of_device_id *match)
618 {
619 	if (!smu)
620 		return -ENODEV;
621 	smu->of_dev = dev;
622 
623 	/*
624 	 * Ok, we are matched, now expose all i2c busses. We have to defer
625 	 * that unfortunately or it would deadlock inside the device model
626 	 */
627 	schedule_work(&smu_expose_childs_work);
628 
629 	return 0;
630 }
631 
632 static struct of_device_id smu_platform_match[] =
633 {
634 	{
635 		.type		= "smu",
636 	},
637 	{},
638 };
639 
640 static struct of_platform_driver smu_of_platform_driver =
641 {
642 	.name 		= "smu",
643 	.match_table	= smu_platform_match,
644 	.probe		= smu_platform_probe,
645 };
646 
647 static int __init smu_init_sysfs(void)
648 {
649 	/*
650 	 * Due to sysfs bogosity, a sysdev is not a real device, so
651 	 * we should in fact create both if we want sysdev semantics
652 	 * for power management.
653 	 * For now, we don't power manage machines with an SMU chip,
654 	 * I'm a bit too far from figuring out how that works with those
655 	 * new chipsets, but that will come back and bite us
656 	 */
657 	of_register_platform_driver(&smu_of_platform_driver);
658 	return 0;
659 }
660 
661 device_initcall(smu_init_sysfs);
662 
663 struct of_device *smu_get_ofdev(void)
664 {
665 	if (!smu)
666 		return NULL;
667 	return smu->of_dev;
668 }
669 
670 EXPORT_SYMBOL_GPL(smu_get_ofdev);
671 
672 /*
673  * i2c interface
674  */
675 
676 static void smu_i2c_complete_command(struct smu_i2c_cmd *cmd, int fail)
677 {
678 	void (*done)(struct smu_i2c_cmd *cmd, void *misc) = cmd->done;
679 	void *misc = cmd->misc;
680 	unsigned long flags;
681 
682 	/* Check for read case */
683 	if (!fail && cmd->read) {
684 		if (cmd->pdata[0] < 1)
685 			fail = 1;
686 		else
687 			memcpy(cmd->info.data, &cmd->pdata[1],
688 			       cmd->info.datalen);
689 	}
690 
691 	DPRINTK("SMU: completing, success: %d\n", !fail);
692 
693 	/* Update status and mark no pending i2c command with lock
694 	 * held so nobody comes in while we dequeue an eventual
695 	 * pending next i2c command
696 	 */
697 	spin_lock_irqsave(&smu->lock, flags);
698 	smu->cmd_i2c_cur = NULL;
699 	wmb();
700 	cmd->status = fail ? -EIO : 0;
701 
702 	/* Is there another i2c command waiting ? */
703 	if (!list_empty(&smu->cmd_i2c_list)) {
704 		struct smu_i2c_cmd *newcmd;
705 
706 		/* Fetch it, new current, remove from list */
707 		newcmd = list_entry(smu->cmd_i2c_list.next,
708 				    struct smu_i2c_cmd, link);
709 		smu->cmd_i2c_cur = newcmd;
710 		list_del(&cmd->link);
711 
712 		/* Queue with low level smu */
713 		list_add_tail(&cmd->scmd.link, &smu->cmd_list);
714 		if (smu->cmd_cur == NULL)
715 			smu_start_cmd();
716 	}
717 	spin_unlock_irqrestore(&smu->lock, flags);
718 
719 	/* Call command completion handler if any */
720 	if (done)
721 		done(cmd, misc);
722 
723 }
724 
725 
726 static void smu_i2c_retry(unsigned long data)
727 {
728 	struct smu_i2c_cmd	*cmd = smu->cmd_i2c_cur;
729 
730 	DPRINTK("SMU: i2c failure, requeuing...\n");
731 
732 	/* requeue command simply by resetting reply_len */
733 	cmd->pdata[0] = 0xff;
734 	cmd->scmd.reply_len = sizeof(cmd->pdata);
735 	smu_queue_cmd(&cmd->scmd);
736 }
737 
738 
739 static void smu_i2c_low_completion(struct smu_cmd *scmd, void *misc)
740 {
741 	struct smu_i2c_cmd	*cmd = misc;
742 	int			fail = 0;
743 
744 	DPRINTK("SMU: i2c compl. stage=%d status=%x pdata[0]=%x rlen: %x\n",
745 		cmd->stage, scmd->status, cmd->pdata[0], scmd->reply_len);
746 
747 	/* Check for possible status */
748 	if (scmd->status < 0)
749 		fail = 1;
750 	else if (cmd->read) {
751 		if (cmd->stage == 0)
752 			fail = cmd->pdata[0] != 0;
753 		else
754 			fail = cmd->pdata[0] >= 0x80;
755 	} else {
756 		fail = cmd->pdata[0] != 0;
757 	}
758 
759 	/* Handle failures by requeuing command, after 5ms interval
760 	 */
761 	if (fail && --cmd->retries > 0) {
762 		DPRINTK("SMU: i2c failure, starting timer...\n");
763 		BUG_ON(cmd != smu->cmd_i2c_cur);
764 		if (!smu_irq_inited) {
765 			mdelay(5);
766 			smu_i2c_retry(0);
767 			return;
768 		}
769 		mod_timer(&smu->i2c_timer, jiffies + msecs_to_jiffies(5));
770 		return;
771 	}
772 
773 	/* If failure or stage 1, command is complete */
774 	if (fail || cmd->stage != 0) {
775 		smu_i2c_complete_command(cmd, fail);
776 		return;
777 	}
778 
779 	DPRINTK("SMU: going to stage 1\n");
780 
781 	/* Ok, initial command complete, now poll status */
782 	scmd->reply_buf = cmd->pdata;
783 	scmd->reply_len = sizeof(cmd->pdata);
784 	scmd->data_buf = cmd->pdata;
785 	scmd->data_len = 1;
786 	cmd->pdata[0] = 0;
787 	cmd->stage = 1;
788 	cmd->retries = 20;
789 	smu_queue_cmd(scmd);
790 }
791 
792 
793 int smu_queue_i2c(struct smu_i2c_cmd *cmd)
794 {
795 	unsigned long flags;
796 
797 	if (smu == NULL)
798 		return -ENODEV;
799 
800 	/* Fill most fields of scmd */
801 	cmd->scmd.cmd = SMU_CMD_I2C_COMMAND;
802 	cmd->scmd.done = smu_i2c_low_completion;
803 	cmd->scmd.misc = cmd;
804 	cmd->scmd.reply_buf = cmd->pdata;
805 	cmd->scmd.reply_len = sizeof(cmd->pdata);
806 	cmd->scmd.data_buf = (u8 *)(char *)&cmd->info;
807 	cmd->scmd.status = 1;
808 	cmd->stage = 0;
809 	cmd->pdata[0] = 0xff;
810 	cmd->retries = 20;
811 	cmd->status = 1;
812 
813 	/* Check transfer type, sanitize some "info" fields
814 	 * based on transfer type and do more checking
815 	 */
816 	cmd->info.caddr = cmd->info.devaddr;
817 	cmd->read = cmd->info.devaddr & 0x01;
818 	switch(cmd->info.type) {
819 	case SMU_I2C_TRANSFER_SIMPLE:
820 		memset(&cmd->info.sublen, 0, 4);
821 		break;
822 	case SMU_I2C_TRANSFER_COMBINED:
823 		cmd->info.devaddr &= 0xfe;
824 	case SMU_I2C_TRANSFER_STDSUB:
825 		if (cmd->info.sublen > 3)
826 			return -EINVAL;
827 		break;
828 	default:
829 		return -EINVAL;
830 	}
831 
832 	/* Finish setting up command based on transfer direction
833 	 */
834 	if (cmd->read) {
835 		if (cmd->info.datalen > SMU_I2C_READ_MAX)
836 			return -EINVAL;
837 		memset(cmd->info.data, 0xff, cmd->info.datalen);
838 		cmd->scmd.data_len = 9;
839 	} else {
840 		if (cmd->info.datalen > SMU_I2C_WRITE_MAX)
841 			return -EINVAL;
842 		cmd->scmd.data_len = 9 + cmd->info.datalen;
843 	}
844 
845 	DPRINTK("SMU: i2c enqueuing command\n");
846 	DPRINTK("SMU:   %s, len=%d bus=%x addr=%x sub0=%x type=%x\n",
847 		cmd->read ? "read" : "write", cmd->info.datalen,
848 		cmd->info.bus, cmd->info.caddr,
849 		cmd->info.subaddr[0], cmd->info.type);
850 
851 
852 	/* Enqueue command in i2c list, and if empty, enqueue also in
853 	 * main command list
854 	 */
855 	spin_lock_irqsave(&smu->lock, flags);
856 	if (smu->cmd_i2c_cur == NULL) {
857 		smu->cmd_i2c_cur = cmd;
858 		list_add_tail(&cmd->scmd.link, &smu->cmd_list);
859 		if (smu->cmd_cur == NULL)
860 			smu_start_cmd();
861 	} else
862 		list_add_tail(&cmd->link, &smu->cmd_i2c_list);
863 	spin_unlock_irqrestore(&smu->lock, flags);
864 
865 	return 0;
866 }
867 
868 /*
869  * Handling of "partitions"
870  */
871 
872 static int smu_read_datablock(u8 *dest, unsigned int addr, unsigned int len)
873 {
874 	DECLARE_COMPLETION_ONSTACK(comp);
875 	unsigned int chunk;
876 	struct smu_cmd cmd;
877 	int rc;
878 	u8 params[8];
879 
880 	/* We currently use a chunk size of 0xe. We could check the
881 	 * SMU firmware version and use bigger sizes though
882 	 */
883 	chunk = 0xe;
884 
885 	while (len) {
886 		unsigned int clen = min(len, chunk);
887 
888 		cmd.cmd = SMU_CMD_MISC_ee_COMMAND;
889 		cmd.data_len = 7;
890 		cmd.data_buf = params;
891 		cmd.reply_len = chunk;
892 		cmd.reply_buf = dest;
893 		cmd.done = smu_done_complete;
894 		cmd.misc = &comp;
895 		params[0] = SMU_CMD_MISC_ee_GET_DATABLOCK_REC;
896 		params[1] = 0x4;
897 		*((u32 *)&params[2]) = addr;
898 		params[6] = clen;
899 
900 		rc = smu_queue_cmd(&cmd);
901 		if (rc)
902 			return rc;
903 		wait_for_completion(&comp);
904 		if (cmd.status != 0)
905 			return rc;
906 		if (cmd.reply_len != clen) {
907 			printk(KERN_DEBUG "SMU: short read in "
908 			       "smu_read_datablock, got: %d, want: %d\n",
909 			       cmd.reply_len, clen);
910 			return -EIO;
911 		}
912 		len -= clen;
913 		addr += clen;
914 		dest += clen;
915 	}
916 	return 0;
917 }
918 
919 static struct smu_sdbp_header *smu_create_sdb_partition(int id)
920 {
921 	DECLARE_COMPLETION_ONSTACK(comp);
922 	struct smu_simple_cmd cmd;
923 	unsigned int addr, len, tlen;
924 	struct smu_sdbp_header *hdr;
925 	struct property *prop;
926 
927 	/* First query the partition info */
928 	DPRINTK("SMU: Query partition infos ... (irq=%d)\n", smu->db_irq);
929 	smu_queue_simple(&cmd, SMU_CMD_PARTITION_COMMAND, 2,
930 			 smu_done_complete, &comp,
931 			 SMU_CMD_PARTITION_LATEST, id);
932 	wait_for_completion(&comp);
933 	DPRINTK("SMU: done, status: %d, reply_len: %d\n",
934 		cmd.cmd.status, cmd.cmd.reply_len);
935 
936 	/* Partition doesn't exist (or other error) */
937 	if (cmd.cmd.status != 0 || cmd.cmd.reply_len != 6)
938 		return NULL;
939 
940 	/* Fetch address and length from reply */
941 	addr = *((u16 *)cmd.buffer);
942 	len = cmd.buffer[3] << 2;
943 	/* Calucluate total length to allocate, including the 17 bytes
944 	 * for "sdb-partition-XX" that we append at the end of the buffer
945 	 */
946 	tlen = sizeof(struct property) + len + 18;
947 
948 	prop = kzalloc(tlen, GFP_KERNEL);
949 	if (prop == NULL)
950 		return NULL;
951 	hdr = (struct smu_sdbp_header *)(prop + 1);
952 	prop->name = ((char *)prop) + tlen - 18;
953 	sprintf(prop->name, "sdb-partition-%02x", id);
954 	prop->length = len;
955 	prop->value = hdr;
956 	prop->next = NULL;
957 
958 	/* Read the datablock */
959 	if (smu_read_datablock((u8 *)hdr, addr, len)) {
960 		printk(KERN_DEBUG "SMU: datablock read failed while reading "
961 		       "partition %02x !\n", id);
962 		goto failure;
963 	}
964 
965 	/* Got it, check a few things and create the property */
966 	if (hdr->id != id) {
967 		printk(KERN_DEBUG "SMU: Reading partition %02x and got "
968 		       "%02x !\n", id, hdr->id);
969 		goto failure;
970 	}
971 	if (prom_add_property(smu->of_node, prop)) {
972 		printk(KERN_DEBUG "SMU: Failed creating sdb-partition-%02x "
973 		       "property !\n", id);
974 		goto failure;
975 	}
976 
977 	return hdr;
978  failure:
979 	kfree(prop);
980 	return NULL;
981 }
982 
983 /* Note: Only allowed to return error code in pointers (using ERR_PTR)
984  * when interruptible is 1
985  */
986 const struct smu_sdbp_header *__smu_get_sdb_partition(int id,
987 		unsigned int *size, int interruptible)
988 {
989 	char pname[32];
990 	const struct smu_sdbp_header *part;
991 
992 	if (!smu)
993 		return NULL;
994 
995 	sprintf(pname, "sdb-partition-%02x", id);
996 
997 	DPRINTK("smu_get_sdb_partition(%02x)\n", id);
998 
999 	if (interruptible) {
1000 		int rc;
1001 		rc = mutex_lock_interruptible(&smu_part_access);
1002 		if (rc)
1003 			return ERR_PTR(rc);
1004 	} else
1005 		mutex_lock(&smu_part_access);
1006 
1007 	part = of_get_property(smu->of_node, pname, size);
1008 	if (part == NULL) {
1009 		DPRINTK("trying to extract from SMU ...\n");
1010 		part = smu_create_sdb_partition(id);
1011 		if (part != NULL && size)
1012 			*size = part->len << 2;
1013 	}
1014 	mutex_unlock(&smu_part_access);
1015 	return part;
1016 }
1017 
1018 const struct smu_sdbp_header *smu_get_sdb_partition(int id, unsigned int *size)
1019 {
1020 	return __smu_get_sdb_partition(id, size, 0);
1021 }
1022 EXPORT_SYMBOL(smu_get_sdb_partition);
1023 
1024 
1025 /*
1026  * Userland driver interface
1027  */
1028 
1029 
1030 static LIST_HEAD(smu_clist);
1031 static DEFINE_SPINLOCK(smu_clist_lock);
1032 
1033 enum smu_file_mode {
1034 	smu_file_commands,
1035 	smu_file_events,
1036 	smu_file_closing
1037 };
1038 
1039 struct smu_private
1040 {
1041 	struct list_head	list;
1042 	enum smu_file_mode	mode;
1043 	int			busy;
1044 	struct smu_cmd		cmd;
1045 	spinlock_t		lock;
1046 	wait_queue_head_t	wait;
1047 	u8			buffer[SMU_MAX_DATA];
1048 };
1049 
1050 
1051 static int smu_open(struct inode *inode, struct file *file)
1052 {
1053 	struct smu_private *pp;
1054 	unsigned long flags;
1055 
1056 	pp = kzalloc(sizeof(struct smu_private), GFP_KERNEL);
1057 	if (pp == 0)
1058 		return -ENOMEM;
1059 	spin_lock_init(&pp->lock);
1060 	pp->mode = smu_file_commands;
1061 	init_waitqueue_head(&pp->wait);
1062 
1063 	spin_lock_irqsave(&smu_clist_lock, flags);
1064 	list_add(&pp->list, &smu_clist);
1065 	spin_unlock_irqrestore(&smu_clist_lock, flags);
1066 	file->private_data = pp;
1067 
1068 	return 0;
1069 }
1070 
1071 
1072 static void smu_user_cmd_done(struct smu_cmd *cmd, void *misc)
1073 {
1074 	struct smu_private *pp = misc;
1075 
1076 	wake_up_all(&pp->wait);
1077 }
1078 
1079 
1080 static ssize_t smu_write(struct file *file, const char __user *buf,
1081 			 size_t count, loff_t *ppos)
1082 {
1083 	struct smu_private *pp = file->private_data;
1084 	unsigned long flags;
1085 	struct smu_user_cmd_hdr hdr;
1086 	int rc = 0;
1087 
1088 	if (pp->busy)
1089 		return -EBUSY;
1090 	else if (copy_from_user(&hdr, buf, sizeof(hdr)))
1091 		return -EFAULT;
1092 	else if (hdr.cmdtype == SMU_CMDTYPE_WANTS_EVENTS) {
1093 		pp->mode = smu_file_events;
1094 		return 0;
1095 	} else if (hdr.cmdtype == SMU_CMDTYPE_GET_PARTITION) {
1096 		const struct smu_sdbp_header *part;
1097 		part = __smu_get_sdb_partition(hdr.cmd, NULL, 1);
1098 		if (part == NULL)
1099 			return -EINVAL;
1100 		else if (IS_ERR(part))
1101 			return PTR_ERR(part);
1102 		return 0;
1103 	} else if (hdr.cmdtype != SMU_CMDTYPE_SMU)
1104 		return -EINVAL;
1105 	else if (pp->mode != smu_file_commands)
1106 		return -EBADFD;
1107 	else if (hdr.data_len > SMU_MAX_DATA)
1108 		return -EINVAL;
1109 
1110 	spin_lock_irqsave(&pp->lock, flags);
1111 	if (pp->busy) {
1112 		spin_unlock_irqrestore(&pp->lock, flags);
1113 		return -EBUSY;
1114 	}
1115 	pp->busy = 1;
1116 	pp->cmd.status = 1;
1117 	spin_unlock_irqrestore(&pp->lock, flags);
1118 
1119 	if (copy_from_user(pp->buffer, buf + sizeof(hdr), hdr.data_len)) {
1120 		pp->busy = 0;
1121 		return -EFAULT;
1122 	}
1123 
1124 	pp->cmd.cmd = hdr.cmd;
1125 	pp->cmd.data_len = hdr.data_len;
1126 	pp->cmd.reply_len = SMU_MAX_DATA;
1127 	pp->cmd.data_buf = pp->buffer;
1128 	pp->cmd.reply_buf = pp->buffer;
1129 	pp->cmd.done = smu_user_cmd_done;
1130 	pp->cmd.misc = pp;
1131 	rc = smu_queue_cmd(&pp->cmd);
1132 	if (rc < 0)
1133 		return rc;
1134 	return count;
1135 }
1136 
1137 
1138 static ssize_t smu_read_command(struct file *file, struct smu_private *pp,
1139 				char __user *buf, size_t count)
1140 {
1141 	DECLARE_WAITQUEUE(wait, current);
1142 	struct smu_user_reply_hdr hdr;
1143 	unsigned long flags;
1144 	int size, rc = 0;
1145 
1146 	if (!pp->busy)
1147 		return 0;
1148 	if (count < sizeof(struct smu_user_reply_hdr))
1149 		return -EOVERFLOW;
1150 	spin_lock_irqsave(&pp->lock, flags);
1151 	if (pp->cmd.status == 1) {
1152 		if (file->f_flags & O_NONBLOCK)
1153 			return -EAGAIN;
1154 		add_wait_queue(&pp->wait, &wait);
1155 		for (;;) {
1156 			set_current_state(TASK_INTERRUPTIBLE);
1157 			rc = 0;
1158 			if (pp->cmd.status != 1)
1159 				break;
1160 			rc = -ERESTARTSYS;
1161 			if (signal_pending(current))
1162 				break;
1163 			spin_unlock_irqrestore(&pp->lock, flags);
1164 			schedule();
1165 			spin_lock_irqsave(&pp->lock, flags);
1166 		}
1167 		set_current_state(TASK_RUNNING);
1168 		remove_wait_queue(&pp->wait, &wait);
1169 	}
1170 	spin_unlock_irqrestore(&pp->lock, flags);
1171 	if (rc)
1172 		return rc;
1173 	if (pp->cmd.status != 0)
1174 		pp->cmd.reply_len = 0;
1175 	size = sizeof(hdr) + pp->cmd.reply_len;
1176 	if (count < size)
1177 		size = count;
1178 	rc = size;
1179 	hdr.status = pp->cmd.status;
1180 	hdr.reply_len = pp->cmd.reply_len;
1181 	if (copy_to_user(buf, &hdr, sizeof(hdr)))
1182 		return -EFAULT;
1183 	size -= sizeof(hdr);
1184 	if (size && copy_to_user(buf + sizeof(hdr), pp->buffer, size))
1185 		return -EFAULT;
1186 	pp->busy = 0;
1187 
1188 	return rc;
1189 }
1190 
1191 
1192 static ssize_t smu_read_events(struct file *file, struct smu_private *pp,
1193 			       char __user *buf, size_t count)
1194 {
1195 	/* Not implemented */
1196 	msleep_interruptible(1000);
1197 	return 0;
1198 }
1199 
1200 
1201 static ssize_t smu_read(struct file *file, char __user *buf,
1202 			size_t count, loff_t *ppos)
1203 {
1204 	struct smu_private *pp = file->private_data;
1205 
1206 	if (pp->mode == smu_file_commands)
1207 		return smu_read_command(file, pp, buf, count);
1208 	if (pp->mode == smu_file_events)
1209 		return smu_read_events(file, pp, buf, count);
1210 
1211 	return -EBADFD;
1212 }
1213 
1214 static unsigned int smu_fpoll(struct file *file, poll_table *wait)
1215 {
1216 	struct smu_private *pp = file->private_data;
1217 	unsigned int mask = 0;
1218 	unsigned long flags;
1219 
1220 	if (pp == 0)
1221 		return 0;
1222 
1223 	if (pp->mode == smu_file_commands) {
1224 		poll_wait(file, &pp->wait, wait);
1225 
1226 		spin_lock_irqsave(&pp->lock, flags);
1227 		if (pp->busy && pp->cmd.status != 1)
1228 			mask |= POLLIN;
1229 		spin_unlock_irqrestore(&pp->lock, flags);
1230 	} if (pp->mode == smu_file_events) {
1231 		/* Not yet implemented */
1232 	}
1233 	return mask;
1234 }
1235 
1236 static int smu_release(struct inode *inode, struct file *file)
1237 {
1238 	struct smu_private *pp = file->private_data;
1239 	unsigned long flags;
1240 	unsigned int busy;
1241 
1242 	if (pp == 0)
1243 		return 0;
1244 
1245 	file->private_data = NULL;
1246 
1247 	/* Mark file as closing to avoid races with new request */
1248 	spin_lock_irqsave(&pp->lock, flags);
1249 	pp->mode = smu_file_closing;
1250 	busy = pp->busy;
1251 
1252 	/* Wait for any pending request to complete */
1253 	if (busy && pp->cmd.status == 1) {
1254 		DECLARE_WAITQUEUE(wait, current);
1255 
1256 		add_wait_queue(&pp->wait, &wait);
1257 		for (;;) {
1258 			set_current_state(TASK_UNINTERRUPTIBLE);
1259 			if (pp->cmd.status != 1)
1260 				break;
1261 			spin_unlock_irqrestore(&pp->lock, flags);
1262 			schedule();
1263 			spin_lock_irqsave(&pp->lock, flags);
1264 		}
1265 		set_current_state(TASK_RUNNING);
1266 		remove_wait_queue(&pp->wait, &wait);
1267 	}
1268 	spin_unlock_irqrestore(&pp->lock, flags);
1269 
1270 	spin_lock_irqsave(&smu_clist_lock, flags);
1271 	list_del(&pp->list);
1272 	spin_unlock_irqrestore(&smu_clist_lock, flags);
1273 	kfree(pp);
1274 
1275 	return 0;
1276 }
1277 
1278 
1279 static const struct file_operations smu_device_fops = {
1280 	.llseek		= no_llseek,
1281 	.read		= smu_read,
1282 	.write		= smu_write,
1283 	.poll		= smu_fpoll,
1284 	.open		= smu_open,
1285 	.release	= smu_release,
1286 };
1287 
1288 static struct miscdevice pmu_device = {
1289 	MISC_DYNAMIC_MINOR, "smu", &smu_device_fops
1290 };
1291 
1292 static int smu_device_init(void)
1293 {
1294 	if (!smu)
1295 		return -ENODEV;
1296 	if (misc_register(&pmu_device) < 0)
1297 		printk(KERN_ERR "via-pmu: cannot register misc device.\n");
1298 	return 0;
1299 }
1300 device_initcall(smu_device_init);
1301