1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Driver for the Intel SCU IPC mechanism
4  *
5  * (C) Copyright 2008-2010,2015 Intel Corporation
6  * Author: Sreedhara DS (sreedhara.ds@intel.com)
7  *
8  * SCU running in ARC processor communicates with other entity running in IA
9  * core through IPC mechanism which in turn messaging between IA core ad SCU.
10  * SCU has two IPC mechanism IPC-1 and IPC-2. IPC-1 is used between IA32 and
11  * SCU where IPC-2 is used between P-Unit and SCU. This driver delas with
12  * IPC-1 Driver provides an API for power control unit registers (e.g. MSIC)
13  * along with other APIs.
14  */
15 
16 #include <linux/delay.h>
17 #include <linux/device.h>
18 #include <linux/errno.h>
19 #include <linux/init.h>
20 #include <linux/interrupt.h>
21 #include <linux/io.h>
22 #include <linux/module.h>
23 #include <linux/slab.h>
24 
25 #include <asm/intel_scu_ipc.h>
26 
27 /* IPC defines the following message types */
28 #define IPCMSG_PCNTRL         0xff /* Power controller unit read/write */
29 
30 /* Command id associated with message IPCMSG_PCNTRL */
31 #define IPC_CMD_PCNTRL_W      0 /* Register write */
32 #define IPC_CMD_PCNTRL_R      1 /* Register read */
33 #define IPC_CMD_PCNTRL_M      2 /* Register read-modify-write */
34 
35 /*
36  * IPC register summary
37  *
38  * IPC register blocks are memory mapped at fixed address of PCI BAR 0.
39  * To read or write information to the SCU, driver writes to IPC-1 memory
40  * mapped registers. The following is the IPC mechanism
41  *
42  * 1. IA core cDMI interface claims this transaction and converts it to a
43  *    Transaction Layer Packet (TLP) message which is sent across the cDMI.
44  *
45  * 2. South Complex cDMI block receives this message and writes it to
46  *    the IPC-1 register block, causing an interrupt to the SCU
47  *
48  * 3. SCU firmware decodes this interrupt and IPC message and the appropriate
49  *    message handler is called within firmware.
50  */
51 
52 #define IPC_WWBUF_SIZE    20		/* IPC Write buffer Size */
53 #define IPC_RWBUF_SIZE    20		/* IPC Read buffer Size */
54 #define IPC_IOC	          0x100		/* IPC command register IOC bit */
55 
56 struct intel_scu_ipc_dev {
57 	struct device dev;
58 	struct resource mem;
59 	struct module *owner;
60 	int irq;
61 	void __iomem *ipc_base;
62 	struct completion cmd_complete;
63 };
64 
65 #define IPC_STATUS		0x04
66 #define IPC_STATUS_IRQ		BIT(2)
67 #define IPC_STATUS_ERR		BIT(1)
68 #define IPC_STATUS_BUSY		BIT(0)
69 
70 /*
71  * IPC Write/Read Buffers:
72  * 16 byte buffer for sending and receiving data to and from SCU.
73  */
74 #define IPC_WRITE_BUFFER	0x80
75 #define IPC_READ_BUFFER		0x90
76 
77 /* Timeout in jiffies */
78 #define IPC_TIMEOUT		(10 * HZ)
79 
80 static struct intel_scu_ipc_dev *ipcdev; /* Only one for now */
81 static DEFINE_MUTEX(ipclock); /* lock used to prevent multiple call to SCU */
82 
83 static struct class intel_scu_ipc_class = {
84 	.name = "intel_scu_ipc",
85 };
86 
87 /**
88  * intel_scu_ipc_dev_get() - Get SCU IPC instance
89  *
90  * The recommended new API takes SCU IPC instance as parameter and this
91  * function can be called by driver to get the instance. This also makes
92  * sure the driver providing the IPC functionality cannot be unloaded
93  * while the caller has the instance.
94  *
95  * Call intel_scu_ipc_dev_put() to release the instance.
96  *
97  * Returns %NULL if SCU IPC is not currently available.
98  */
99 struct intel_scu_ipc_dev *intel_scu_ipc_dev_get(void)
100 {
101 	struct intel_scu_ipc_dev *scu = NULL;
102 
103 	mutex_lock(&ipclock);
104 	if (ipcdev) {
105 		get_device(&ipcdev->dev);
106 		/*
107 		 * Prevent the IPC provider from being unloaded while it
108 		 * is being used.
109 		 */
110 		if (!try_module_get(ipcdev->owner))
111 			put_device(&ipcdev->dev);
112 		else
113 			scu = ipcdev;
114 	}
115 
116 	mutex_unlock(&ipclock);
117 	return scu;
118 }
119 EXPORT_SYMBOL_GPL(intel_scu_ipc_dev_get);
120 
121 /**
122  * intel_scu_ipc_dev_put() - Put SCU IPC instance
123  * @scu: SCU IPC instance
124  *
125  * This function releases the SCU IPC instance retrieved from
126  * intel_scu_ipc_dev_get() and allows the driver providing IPC to be
127  * unloaded.
128  */
129 void intel_scu_ipc_dev_put(struct intel_scu_ipc_dev *scu)
130 {
131 	if (scu) {
132 		module_put(scu->owner);
133 		put_device(&scu->dev);
134 	}
135 }
136 EXPORT_SYMBOL_GPL(intel_scu_ipc_dev_put);
137 
138 struct intel_scu_ipc_devres {
139 	struct intel_scu_ipc_dev *scu;
140 };
141 
142 static void devm_intel_scu_ipc_dev_release(struct device *dev, void *res)
143 {
144 	struct intel_scu_ipc_devres *dr = res;
145 	struct intel_scu_ipc_dev *scu = dr->scu;
146 
147 	intel_scu_ipc_dev_put(scu);
148 }
149 
150 /**
151  * devm_intel_scu_ipc_dev_get() - Allocate managed SCU IPC device
152  * @dev: Device requesting the SCU IPC device
153  *
154  * The recommended new API takes SCU IPC instance as parameter and this
155  * function can be called by driver to get the instance. This also makes
156  * sure the driver providing the IPC functionality cannot be unloaded
157  * while the caller has the instance.
158  *
159  * Returns %NULL if SCU IPC is not currently available.
160  */
161 struct intel_scu_ipc_dev *devm_intel_scu_ipc_dev_get(struct device *dev)
162 {
163 	struct intel_scu_ipc_devres *dr;
164 	struct intel_scu_ipc_dev *scu;
165 
166 	dr = devres_alloc(devm_intel_scu_ipc_dev_release, sizeof(*dr), GFP_KERNEL);
167 	if (!dr)
168 		return NULL;
169 
170 	scu = intel_scu_ipc_dev_get();
171 	if (!scu) {
172 		devres_free(dr);
173 		return NULL;
174 	}
175 
176 	dr->scu = scu;
177 	devres_add(dev, dr);
178 
179 	return scu;
180 }
181 EXPORT_SYMBOL_GPL(devm_intel_scu_ipc_dev_get);
182 
183 /*
184  * Send ipc command
185  * Command Register (Write Only):
186  * A write to this register results in an interrupt to the SCU core processor
187  * Format:
188  * |rfu2(8) | size(8) | command id(4) | rfu1(3) | ioc(1) | command(8)|
189  */
190 static inline void ipc_command(struct intel_scu_ipc_dev *scu, u32 cmd)
191 {
192 	reinit_completion(&scu->cmd_complete);
193 	writel(cmd | IPC_IOC, scu->ipc_base);
194 }
195 
196 /*
197  * Write ipc data
198  * IPC Write Buffer (Write Only):
199  * 16-byte buffer for sending data associated with IPC command to
200  * SCU. Size of the data is specified in the IPC_COMMAND_REG register
201  */
202 static inline void ipc_data_writel(struct intel_scu_ipc_dev *scu, u32 data, u32 offset)
203 {
204 	writel(data, scu->ipc_base + IPC_WRITE_BUFFER + offset);
205 }
206 
207 /*
208  * Status Register (Read Only):
209  * Driver will read this register to get the ready/busy status of the IPC
210  * block and error status of the IPC command that was just processed by SCU
211  * Format:
212  * |rfu3(8)|error code(8)|initiator id(8)|cmd id(4)|rfu1(2)|error(1)|busy(1)|
213  */
214 static inline u8 ipc_read_status(struct intel_scu_ipc_dev *scu)
215 {
216 	return __raw_readl(scu->ipc_base + IPC_STATUS);
217 }
218 
219 /* Read ipc byte data */
220 static inline u8 ipc_data_readb(struct intel_scu_ipc_dev *scu, u32 offset)
221 {
222 	return readb(scu->ipc_base + IPC_READ_BUFFER + offset);
223 }
224 
225 /* Read ipc u32 data */
226 static inline u32 ipc_data_readl(struct intel_scu_ipc_dev *scu, u32 offset)
227 {
228 	return readl(scu->ipc_base + IPC_READ_BUFFER + offset);
229 }
230 
231 /* Wait till scu status is busy */
232 static inline int busy_loop(struct intel_scu_ipc_dev *scu)
233 {
234 	unsigned long end = jiffies + IPC_TIMEOUT;
235 
236 	do {
237 		u32 status;
238 
239 		status = ipc_read_status(scu);
240 		if (!(status & IPC_STATUS_BUSY))
241 			return (status & IPC_STATUS_ERR) ? -EIO : 0;
242 
243 		usleep_range(50, 100);
244 	} while (time_before(jiffies, end));
245 
246 	return -ETIMEDOUT;
247 }
248 
249 /* Wait till ipc ioc interrupt is received or timeout in 10 HZ */
250 static inline int ipc_wait_for_interrupt(struct intel_scu_ipc_dev *scu)
251 {
252 	int status;
253 
254 	if (!wait_for_completion_timeout(&scu->cmd_complete, IPC_TIMEOUT))
255 		return -ETIMEDOUT;
256 
257 	status = ipc_read_status(scu);
258 	if (status & IPC_STATUS_ERR)
259 		return -EIO;
260 
261 	return 0;
262 }
263 
264 static int intel_scu_ipc_check_status(struct intel_scu_ipc_dev *scu)
265 {
266 	return scu->irq > 0 ? ipc_wait_for_interrupt(scu) : busy_loop(scu);
267 }
268 
269 /* Read/Write power control(PMIC in Langwell, MSIC in PenWell) registers */
270 static int pwr_reg_rdwr(struct intel_scu_ipc_dev *scu, u16 *addr, u8 *data,
271 			u32 count, u32 op, u32 id)
272 {
273 	int nc;
274 	u32 offset = 0;
275 	int err;
276 	u8 cbuf[IPC_WWBUF_SIZE];
277 	u32 *wbuf = (u32 *)&cbuf;
278 
279 	memset(cbuf, 0, sizeof(cbuf));
280 
281 	mutex_lock(&ipclock);
282 	if (!scu)
283 		scu = ipcdev;
284 	if (!scu) {
285 		mutex_unlock(&ipclock);
286 		return -ENODEV;
287 	}
288 
289 	for (nc = 0; nc < count; nc++, offset += 2) {
290 		cbuf[offset] = addr[nc];
291 		cbuf[offset + 1] = addr[nc] >> 8;
292 	}
293 
294 	if (id == IPC_CMD_PCNTRL_R) {
295 		for (nc = 0, offset = 0; nc < count; nc++, offset += 4)
296 			ipc_data_writel(scu, wbuf[nc], offset);
297 		ipc_command(scu, (count * 2) << 16 | id << 12 | 0 << 8 | op);
298 	} else if (id == IPC_CMD_PCNTRL_W) {
299 		for (nc = 0; nc < count; nc++, offset += 1)
300 			cbuf[offset] = data[nc];
301 		for (nc = 0, offset = 0; nc < count; nc++, offset += 4)
302 			ipc_data_writel(scu, wbuf[nc], offset);
303 		ipc_command(scu, (count * 3) << 16 | id << 12 | 0 << 8 | op);
304 	} else if (id == IPC_CMD_PCNTRL_M) {
305 		cbuf[offset] = data[0];
306 		cbuf[offset + 1] = data[1];
307 		ipc_data_writel(scu, wbuf[0], 0); /* Write wbuff */
308 		ipc_command(scu, 4 << 16 | id << 12 | 0 << 8 | op);
309 	}
310 
311 	err = intel_scu_ipc_check_status(scu);
312 	if (!err && id == IPC_CMD_PCNTRL_R) { /* Read rbuf */
313 		/* Workaround: values are read as 0 without memcpy_fromio */
314 		memcpy_fromio(cbuf, scu->ipc_base + 0x90, 16);
315 		for (nc = 0; nc < count; nc++)
316 			data[nc] = ipc_data_readb(scu, nc);
317 	}
318 	mutex_unlock(&ipclock);
319 	return err;
320 }
321 
322 /**
323  * intel_scu_ipc_dev_ioread8() - Read a byte via the SCU
324  * @scu: Optional SCU IPC instance
325  * @addr: Register on SCU
326  * @data: Return pointer for read byte
327  *
328  * Read a single register. Returns %0 on success or an error code. All
329  * locking between SCU accesses is handled for the caller.
330  *
331  * This function may sleep.
332  */
333 int intel_scu_ipc_dev_ioread8(struct intel_scu_ipc_dev *scu, u16 addr, u8 *data)
334 {
335 	return pwr_reg_rdwr(scu, &addr, data, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
336 }
337 EXPORT_SYMBOL(intel_scu_ipc_dev_ioread8);
338 
339 /**
340  * intel_scu_ipc_dev_iowrite8() - Write a byte via the SCU
341  * @scu: Optional SCU IPC instance
342  * @addr: Register on SCU
343  * @data: Byte to write
344  *
345  * Write a single register. Returns %0 on success or an error code. All
346  * locking between SCU accesses is handled for the caller.
347  *
348  * This function may sleep.
349  */
350 int intel_scu_ipc_dev_iowrite8(struct intel_scu_ipc_dev *scu, u16 addr, u8 data)
351 {
352 	return pwr_reg_rdwr(scu, &addr, &data, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
353 }
354 EXPORT_SYMBOL(intel_scu_ipc_dev_iowrite8);
355 
356 /**
357  * intel_scu_ipc_dev_readv() - Read a set of registers
358  * @scu: Optional SCU IPC instance
359  * @addr: Register list
360  * @data: Bytes to return
361  * @len: Length of array
362  *
363  * Read registers. Returns %0 on success or an error code. All locking
364  * between SCU accesses is handled for the caller.
365  *
366  * The largest array length permitted by the hardware is 5 items.
367  *
368  * This function may sleep.
369  */
370 int intel_scu_ipc_dev_readv(struct intel_scu_ipc_dev *scu, u16 *addr, u8 *data,
371 			    size_t len)
372 {
373 	return pwr_reg_rdwr(scu, addr, data, len, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
374 }
375 EXPORT_SYMBOL(intel_scu_ipc_dev_readv);
376 
377 /**
378  * intel_scu_ipc_dev_writev() - Write a set of registers
379  * @scu: Optional SCU IPC instance
380  * @addr: Register list
381  * @data: Bytes to write
382  * @len: Length of array
383  *
384  * Write registers. Returns %0 on success or an error code. All locking
385  * between SCU accesses is handled for the caller.
386  *
387  * The largest array length permitted by the hardware is 5 items.
388  *
389  * This function may sleep.
390  */
391 int intel_scu_ipc_dev_writev(struct intel_scu_ipc_dev *scu, u16 *addr, u8 *data,
392 			     size_t len)
393 {
394 	return pwr_reg_rdwr(scu, addr, data, len, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
395 }
396 EXPORT_SYMBOL(intel_scu_ipc_dev_writev);
397 
398 /**
399  * intel_scu_ipc_dev_update() - Update a register
400  * @scu: Optional SCU IPC instance
401  * @addr: Register address
402  * @data: Bits to update
403  * @mask: Mask of bits to update
404  *
405  * Read-modify-write power control unit register. The first data argument
406  * must be register value and second is mask value mask is a bitmap that
407  * indicates which bits to update. %0 = masked. Don't modify this bit, %1 =
408  * modify this bit. returns %0 on success or an error code.
409  *
410  * This function may sleep. Locking between SCU accesses is handled
411  * for the caller.
412  */
413 int intel_scu_ipc_dev_update(struct intel_scu_ipc_dev *scu, u16 addr, u8 data,
414 			     u8 mask)
415 {
416 	u8 tmp[2] = { data, mask };
417 	return pwr_reg_rdwr(scu, &addr, tmp, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_M);
418 }
419 EXPORT_SYMBOL(intel_scu_ipc_dev_update);
420 
421 /**
422  * intel_scu_ipc_dev_simple_command() - Send a simple command
423  * @scu: Optional SCU IPC instance
424  * @cmd: Command
425  * @sub: Sub type
426  *
427  * Issue a simple command to the SCU. Do not use this interface if you must
428  * then access data as any data values may be overwritten by another SCU
429  * access by the time this function returns.
430  *
431  * This function may sleep. Locking for SCU accesses is handled for the
432  * caller.
433  */
434 int intel_scu_ipc_dev_simple_command(struct intel_scu_ipc_dev *scu, int cmd,
435 				     int sub)
436 {
437 	u32 cmdval;
438 	int err;
439 
440 	mutex_lock(&ipclock);
441 	if (!scu)
442 		scu = ipcdev;
443 	if (!scu) {
444 		mutex_unlock(&ipclock);
445 		return -ENODEV;
446 	}
447 	scu = ipcdev;
448 	cmdval = sub << 12 | cmd;
449 	ipc_command(scu, cmdval);
450 	err = intel_scu_ipc_check_status(scu);
451 	mutex_unlock(&ipclock);
452 	if (err)
453 		dev_err(&scu->dev, "IPC command %#x failed with %d\n", cmdval, err);
454 	return err;
455 }
456 EXPORT_SYMBOL(intel_scu_ipc_dev_simple_command);
457 
458 /**
459  * intel_scu_ipc_dev_command_with_size() - Command with data
460  * @scu: Optional SCU IPC instance
461  * @cmd: Command
462  * @sub: Sub type
463  * @in: Input data
464  * @inlen: Input length in bytes
465  * @size: Input size written to the IPC command register in whatever
466  *	  units (dword, byte) the particular firmware requires. Normally
467  *	  should be the same as @inlen.
468  * @out: Output data
469  * @outlen: Output length in bytes
470  *
471  * Issue a command to the SCU which involves data transfers. Do the
472  * data copies under the lock but leave it for the caller to interpret.
473  */
474 int intel_scu_ipc_dev_command_with_size(struct intel_scu_ipc_dev *scu, int cmd,
475 					int sub, const void *in, size_t inlen,
476 					size_t size, void *out, size_t outlen)
477 {
478 	size_t outbuflen = DIV_ROUND_UP(outlen, sizeof(u32));
479 	size_t inbuflen = DIV_ROUND_UP(inlen, sizeof(u32));
480 	u32 cmdval, inbuf[4] = {};
481 	int i, err;
482 
483 	if (inbuflen > 4 || outbuflen > 4)
484 		return -EINVAL;
485 
486 	mutex_lock(&ipclock);
487 	if (!scu)
488 		scu = ipcdev;
489 	if (!scu) {
490 		mutex_unlock(&ipclock);
491 		return -ENODEV;
492 	}
493 
494 	memcpy(inbuf, in, inlen);
495 	for (i = 0; i < inbuflen; i++)
496 		ipc_data_writel(scu, inbuf[i], 4 * i);
497 
498 	cmdval = (size << 16) | (sub << 12) | cmd;
499 	ipc_command(scu, cmdval);
500 	err = intel_scu_ipc_check_status(scu);
501 
502 	if (!err) {
503 		u32 outbuf[4] = {};
504 
505 		for (i = 0; i < outbuflen; i++)
506 			outbuf[i] = ipc_data_readl(scu, 4 * i);
507 
508 		memcpy(out, outbuf, outlen);
509 	}
510 
511 	mutex_unlock(&ipclock);
512 	if (err)
513 		dev_err(&scu->dev, "IPC command %#x failed with %d\n", cmdval, err);
514 	return err;
515 }
516 EXPORT_SYMBOL(intel_scu_ipc_dev_command_with_size);
517 
518 /*
519  * Interrupt handler gets called when ioc bit of IPC_COMMAND_REG set to 1
520  * When ioc bit is set to 1, caller api must wait for interrupt handler called
521  * which in turn unlocks the caller api. Currently this is not used
522  *
523  * This is edge triggered so we need take no action to clear anything
524  */
525 static irqreturn_t ioc(int irq, void *dev_id)
526 {
527 	struct intel_scu_ipc_dev *scu = dev_id;
528 	int status = ipc_read_status(scu);
529 
530 	writel(status | IPC_STATUS_IRQ, scu->ipc_base + IPC_STATUS);
531 	complete(&scu->cmd_complete);
532 
533 	return IRQ_HANDLED;
534 }
535 
536 static void intel_scu_ipc_release(struct device *dev)
537 {
538 	struct intel_scu_ipc_dev *scu;
539 
540 	scu = container_of(dev, struct intel_scu_ipc_dev, dev);
541 	if (scu->irq > 0)
542 		free_irq(scu->irq, scu);
543 	iounmap(scu->ipc_base);
544 	release_mem_region(scu->mem.start, resource_size(&scu->mem));
545 	kfree(scu);
546 }
547 
548 /**
549  * __intel_scu_ipc_register() - Register SCU IPC device
550  * @parent: Parent device
551  * @scu_data: Data used to configure SCU IPC
552  * @owner: Module registering the SCU IPC device
553  *
554  * Call this function to register SCU IPC mechanism under @parent.
555  * Returns pointer to the new SCU IPC device or ERR_PTR() in case of
556  * failure. The caller may use the returned instance if it needs to do
557  * SCU IPC calls itself.
558  */
559 struct intel_scu_ipc_dev *
560 __intel_scu_ipc_register(struct device *parent,
561 			 const struct intel_scu_ipc_data *scu_data,
562 			 struct module *owner)
563 {
564 	int err;
565 	struct intel_scu_ipc_dev *scu;
566 	void __iomem *ipc_base;
567 
568 	mutex_lock(&ipclock);
569 	/* We support only one IPC */
570 	if (ipcdev) {
571 		err = -EBUSY;
572 		goto err_unlock;
573 	}
574 
575 	scu = kzalloc(sizeof(*scu), GFP_KERNEL);
576 	if (!scu) {
577 		err = -ENOMEM;
578 		goto err_unlock;
579 	}
580 
581 	scu->owner = owner;
582 	scu->dev.parent = parent;
583 	scu->dev.class = &intel_scu_ipc_class;
584 	scu->dev.release = intel_scu_ipc_release;
585 
586 	if (!request_mem_region(scu_data->mem.start, resource_size(&scu_data->mem),
587 				"intel_scu_ipc")) {
588 		err = -EBUSY;
589 		goto err_free;
590 	}
591 
592 	ipc_base = ioremap(scu_data->mem.start, resource_size(&scu_data->mem));
593 	if (!ipc_base) {
594 		err = -ENOMEM;
595 		goto err_release;
596 	}
597 
598 	scu->ipc_base = ipc_base;
599 	scu->mem = scu_data->mem;
600 	scu->irq = scu_data->irq;
601 	init_completion(&scu->cmd_complete);
602 
603 	if (scu->irq > 0) {
604 		err = request_irq(scu->irq, ioc, 0, "intel_scu_ipc", scu);
605 		if (err)
606 			goto err_unmap;
607 	}
608 
609 	/*
610 	 * After this point intel_scu_ipc_release() takes care of
611 	 * releasing the SCU IPC resources once refcount drops to zero.
612 	 */
613 	dev_set_name(&scu->dev, "intel_scu_ipc");
614 	err = device_register(&scu->dev);
615 	if (err) {
616 		put_device(&scu->dev);
617 		goto err_unlock;
618 	}
619 
620 	/* Assign device at last */
621 	ipcdev = scu;
622 	mutex_unlock(&ipclock);
623 
624 	return scu;
625 
626 err_unmap:
627 	iounmap(ipc_base);
628 err_release:
629 	release_mem_region(scu_data->mem.start, resource_size(&scu_data->mem));
630 err_free:
631 	kfree(scu);
632 err_unlock:
633 	mutex_unlock(&ipclock);
634 
635 	return ERR_PTR(err);
636 }
637 EXPORT_SYMBOL_GPL(__intel_scu_ipc_register);
638 
639 /**
640  * intel_scu_ipc_unregister() - Unregister SCU IPC
641  * @scu: SCU IPC handle
642  *
643  * This unregisters the SCU IPC device and releases the acquired
644  * resources once the refcount goes to zero.
645  */
646 void intel_scu_ipc_unregister(struct intel_scu_ipc_dev *scu)
647 {
648 	mutex_lock(&ipclock);
649 	if (!WARN_ON(!ipcdev)) {
650 		ipcdev = NULL;
651 		device_unregister(&scu->dev);
652 	}
653 	mutex_unlock(&ipclock);
654 }
655 EXPORT_SYMBOL_GPL(intel_scu_ipc_unregister);
656 
657 static void devm_intel_scu_ipc_unregister(struct device *dev, void *res)
658 {
659 	struct intel_scu_ipc_devres *dr = res;
660 	struct intel_scu_ipc_dev *scu = dr->scu;
661 
662 	intel_scu_ipc_unregister(scu);
663 }
664 
665 /**
666  * __devm_intel_scu_ipc_register() - Register managed SCU IPC device
667  * @parent: Parent device
668  * @scu_data: Data used to configure SCU IPC
669  * @owner: Module registering the SCU IPC device
670  *
671  * Call this function to register managed SCU IPC mechanism under
672  * @parent. Returns pointer to the new SCU IPC device or ERR_PTR() in
673  * case of failure. The caller may use the returned instance if it needs
674  * to do SCU IPC calls itself.
675  */
676 struct intel_scu_ipc_dev *
677 __devm_intel_scu_ipc_register(struct device *parent,
678 			      const struct intel_scu_ipc_data *scu_data,
679 			      struct module *owner)
680 {
681 	struct intel_scu_ipc_devres *dr;
682 	struct intel_scu_ipc_dev *scu;
683 
684 	dr = devres_alloc(devm_intel_scu_ipc_unregister, sizeof(*dr), GFP_KERNEL);
685 	if (!dr)
686 		return NULL;
687 
688 	scu = __intel_scu_ipc_register(parent, scu_data, owner);
689 	if (IS_ERR(scu)) {
690 		devres_free(dr);
691 		return scu;
692 	}
693 
694 	dr->scu = scu;
695 	devres_add(parent, dr);
696 
697 	return scu;
698 }
699 EXPORT_SYMBOL_GPL(__devm_intel_scu_ipc_register);
700 
701 static int __init intel_scu_ipc_init(void)
702 {
703 	return class_register(&intel_scu_ipc_class);
704 }
705 subsys_initcall(intel_scu_ipc_init);
706 
707 static void __exit intel_scu_ipc_exit(void)
708 {
709 	class_unregister(&intel_scu_ipc_class);
710 }
711 module_exit(intel_scu_ipc_exit);
712