xref: /openbmc/linux/drivers/platform/x86/intel/sdsi.c (revision 724ba675)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Intel On Demand (Software Defined Silicon) driver
4  *
5  * Copyright (c) 2022, Intel Corporation.
6  * All Rights Reserved.
7  *
8  * Author: "David E. Box" <david.e.box@linux.intel.com>
9  */
10 
11 #include <linux/auxiliary_bus.h>
12 #include <linux/bits.h>
13 #include <linux/bitfield.h>
14 #include <linux/device.h>
15 #include <linux/iopoll.h>
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/pci.h>
19 #include <linux/slab.h>
20 #include <linux/sysfs.h>
21 #include <linux/types.h>
22 #include <linux/uaccess.h>
23 
24 #include "vsec.h"
25 
26 #define ACCESS_TYPE_BARID		2
27 #define ACCESS_TYPE_LOCAL		3
28 
29 #define SDSI_MIN_SIZE_DWORDS		276
30 #define SDSI_SIZE_MAILBOX		1024
31 #define SDSI_SIZE_REGS			80
32 #define SDSI_SIZE_CMD			sizeof(u64)
33 
34 /*
35  * Write messages are currently up to the size of the mailbox
36  * while read messages are up to 4 times the size of the
37  * mailbox, sent in packets
38  */
39 #define SDSI_SIZE_WRITE_MSG		SDSI_SIZE_MAILBOX
40 #define SDSI_SIZE_READ_MSG		(SDSI_SIZE_MAILBOX * 4)
41 
42 #define SDSI_ENABLED_FEATURES_OFFSET	16
43 #define SDSI_FEATURE_SDSI		BIT(3)
44 #define SDSI_FEATURE_METERING		BIT(26)
45 
46 #define SDSI_SOCKET_ID_OFFSET		64
47 #define SDSI_SOCKET_ID			GENMASK(3, 0)
48 
49 #define SDSI_MBOX_CMD_SUCCESS		0x40
50 #define SDSI_MBOX_CMD_TIMEOUT		0x80
51 
52 #define MBOX_TIMEOUT_US			500000
53 #define MBOX_TIMEOUT_ACQUIRE_US		1000
54 #define MBOX_POLLING_PERIOD_US		100
55 #define MBOX_ACQUIRE_NUM_RETRIES	5
56 #define MBOX_ACQUIRE_RETRY_DELAY_MS	500
57 #define MBOX_MAX_PACKETS		4
58 
59 #define MBOX_OWNER_NONE			0x00
60 #define MBOX_OWNER_INBAND		0x01
61 
62 #define CTRL_RUN_BUSY			BIT(0)
63 #define CTRL_READ_WRITE			BIT(1)
64 #define CTRL_SOM			BIT(2)
65 #define CTRL_EOM			BIT(3)
66 #define CTRL_OWNER			GENMASK(5, 4)
67 #define CTRL_COMPLETE			BIT(6)
68 #define CTRL_READY			BIT(7)
69 #define CTRL_STATUS			GENMASK(15, 8)
70 #define CTRL_PACKET_SIZE		GENMASK(31, 16)
71 #define CTRL_MSG_SIZE			GENMASK(63, 48)
72 
73 #define DISC_TABLE_SIZE			12
74 #define DT_ACCESS_TYPE			GENMASK(3, 0)
75 #define DT_SIZE				GENMASK(27, 12)
76 #define DT_TBIR				GENMASK(2, 0)
77 #define DT_OFFSET(v)			((v) & GENMASK(31, 3))
78 
79 #define SDSI_GUID_V1			0x006DD191
80 #define GUID_V1_CNTRL_SIZE		8
81 #define GUID_V1_REGS_SIZE		72
82 #define SDSI_GUID_V2			0xF210D9EF
83 #define GUID_V2_CNTRL_SIZE		16
84 #define GUID_V2_REGS_SIZE		80
85 
86 enum sdsi_command {
87 	SDSI_CMD_PROVISION_AKC		= 0x0004,
88 	SDSI_CMD_PROVISION_CAP		= 0x0008,
89 	SDSI_CMD_READ_STATE		= 0x0010,
90 	SDSI_CMD_READ_METER		= 0x0014,
91 };
92 
93 struct sdsi_mbox_info {
94 	u64	*payload;
95 	void	*buffer;
96 	int	size;
97 };
98 
99 struct disc_table {
100 	u32	access_info;
101 	u32	guid;
102 	u32	offset;
103 };
104 
105 struct sdsi_priv {
106 	struct mutex		mb_lock;	/* Mailbox access lock */
107 	struct device		*dev;
108 	void __iomem		*control_addr;
109 	void __iomem		*mbox_addr;
110 	void __iomem		*regs_addr;
111 	int			control_size;
112 	int			maibox_size;
113 	int			registers_size;
114 	u32			guid;
115 	u32			features;
116 };
117 
118 /* SDSi mailbox operations must be performed using 64bit mov instructions */
119 static __always_inline void
120 sdsi_memcpy64_toio(u64 __iomem *to, const u64 *from, size_t count_bytes)
121 {
122 	size_t count = count_bytes / sizeof(*to);
123 	int i;
124 
125 	for (i = 0; i < count; i++)
126 		writeq(from[i], &to[i]);
127 }
128 
129 static __always_inline void
130 sdsi_memcpy64_fromio(u64 *to, const u64 __iomem *from, size_t count_bytes)
131 {
132 	size_t count = count_bytes / sizeof(*to);
133 	int i;
134 
135 	for (i = 0; i < count; i++)
136 		to[i] = readq(&from[i]);
137 }
138 
139 static inline void sdsi_complete_transaction(struct sdsi_priv *priv)
140 {
141 	u64 control = FIELD_PREP(CTRL_COMPLETE, 1);
142 
143 	lockdep_assert_held(&priv->mb_lock);
144 	writeq(control, priv->control_addr);
145 }
146 
147 static int sdsi_status_to_errno(u32 status)
148 {
149 	switch (status) {
150 	case SDSI_MBOX_CMD_SUCCESS:
151 		return 0;
152 	case SDSI_MBOX_CMD_TIMEOUT:
153 		return -ETIMEDOUT;
154 	default:
155 		return -EIO;
156 	}
157 }
158 
159 static int sdsi_mbox_cmd_read(struct sdsi_priv *priv, struct sdsi_mbox_info *info,
160 			      size_t *data_size)
161 {
162 	struct device *dev = priv->dev;
163 	u32 total, loop, eom, status, message_size;
164 	u64 control;
165 	int ret;
166 
167 	lockdep_assert_held(&priv->mb_lock);
168 
169 	/* Format and send the read command */
170 	control = FIELD_PREP(CTRL_EOM, 1) |
171 		  FIELD_PREP(CTRL_SOM, 1) |
172 		  FIELD_PREP(CTRL_RUN_BUSY, 1) |
173 		  FIELD_PREP(CTRL_PACKET_SIZE, info->size);
174 	writeq(control, priv->control_addr);
175 
176 	/* For reads, data sizes that are larger than the mailbox size are read in packets. */
177 	total = 0;
178 	loop = 0;
179 	do {
180 		void *buf = info->buffer + (SDSI_SIZE_MAILBOX * loop);
181 		u32 packet_size;
182 
183 		/* Poll on ready bit */
184 		ret = readq_poll_timeout(priv->control_addr, control, control & CTRL_READY,
185 					 MBOX_POLLING_PERIOD_US, MBOX_TIMEOUT_US);
186 		if (ret)
187 			break;
188 
189 		eom = FIELD_GET(CTRL_EOM, control);
190 		status = FIELD_GET(CTRL_STATUS, control);
191 		packet_size = FIELD_GET(CTRL_PACKET_SIZE, control);
192 		message_size = FIELD_GET(CTRL_MSG_SIZE, control);
193 
194 		ret = sdsi_status_to_errno(status);
195 		if (ret)
196 			break;
197 
198 		/* Only the last packet can be less than the mailbox size. */
199 		if (!eom && packet_size != SDSI_SIZE_MAILBOX) {
200 			dev_err(dev, "Invalid packet size\n");
201 			ret = -EPROTO;
202 			break;
203 		}
204 
205 		if (packet_size > SDSI_SIZE_MAILBOX) {
206 			dev_err(dev, "Packet size too large\n");
207 			ret = -EPROTO;
208 			break;
209 		}
210 
211 		sdsi_memcpy64_fromio(buf, priv->mbox_addr, round_up(packet_size, SDSI_SIZE_CMD));
212 
213 		total += packet_size;
214 
215 		sdsi_complete_transaction(priv);
216 	} while (!eom && ++loop < MBOX_MAX_PACKETS);
217 
218 	if (ret) {
219 		sdsi_complete_transaction(priv);
220 		return ret;
221 	}
222 
223 	if (!eom) {
224 		dev_err(dev, "Exceeded read attempts\n");
225 		return -EPROTO;
226 	}
227 
228 	/* Message size check is only valid for multi-packet transfers */
229 	if (loop && total != message_size)
230 		dev_warn(dev, "Read count %u differs from expected count %u\n",
231 			 total, message_size);
232 
233 	*data_size = total;
234 
235 	return 0;
236 }
237 
238 static int sdsi_mbox_cmd_write(struct sdsi_priv *priv, struct sdsi_mbox_info *info)
239 {
240 	u64 control;
241 	u32 status;
242 	int ret;
243 
244 	lockdep_assert_held(&priv->mb_lock);
245 
246 	/* Write rest of the payload */
247 	sdsi_memcpy64_toio(priv->mbox_addr + SDSI_SIZE_CMD, info->payload + 1,
248 			   info->size - SDSI_SIZE_CMD);
249 
250 	/* Format and send the write command */
251 	control = FIELD_PREP(CTRL_EOM, 1) |
252 		  FIELD_PREP(CTRL_SOM, 1) |
253 		  FIELD_PREP(CTRL_RUN_BUSY, 1) |
254 		  FIELD_PREP(CTRL_READ_WRITE, 1) |
255 		  FIELD_PREP(CTRL_PACKET_SIZE, info->size);
256 	writeq(control, priv->control_addr);
257 
258 	/* Poll on ready bit */
259 	ret = readq_poll_timeout(priv->control_addr, control, control & CTRL_READY,
260 				 MBOX_POLLING_PERIOD_US, MBOX_TIMEOUT_US);
261 
262 	if (ret)
263 		goto release_mbox;
264 
265 	status = FIELD_GET(CTRL_STATUS, control);
266 	ret = sdsi_status_to_errno(status);
267 
268 release_mbox:
269 	sdsi_complete_transaction(priv);
270 
271 	return ret;
272 }
273 
274 static int sdsi_mbox_acquire(struct sdsi_priv *priv, struct sdsi_mbox_info *info)
275 {
276 	u64 control;
277 	u32 owner;
278 	int ret, retries = 0;
279 
280 	lockdep_assert_held(&priv->mb_lock);
281 
282 	/* Check mailbox is available */
283 	control = readq(priv->control_addr);
284 	owner = FIELD_GET(CTRL_OWNER, control);
285 	if (owner != MBOX_OWNER_NONE)
286 		return -EBUSY;
287 
288 	/*
289 	 * If there has been no recent transaction and no one owns the mailbox,
290 	 * we should acquire it in under 1ms. However, if we've accessed it
291 	 * recently it may take up to 2.1 seconds to acquire it again.
292 	 */
293 	do {
294 		/* Write first qword of payload */
295 		writeq(info->payload[0], priv->mbox_addr);
296 
297 		/* Check for ownership */
298 		ret = readq_poll_timeout(priv->control_addr, control,
299 			FIELD_GET(CTRL_OWNER, control) == MBOX_OWNER_INBAND,
300 			MBOX_POLLING_PERIOD_US, MBOX_TIMEOUT_ACQUIRE_US);
301 
302 		if (FIELD_GET(CTRL_OWNER, control) == MBOX_OWNER_NONE &&
303 		    retries++ < MBOX_ACQUIRE_NUM_RETRIES) {
304 			msleep(MBOX_ACQUIRE_RETRY_DELAY_MS);
305 			continue;
306 		}
307 
308 		/* Either we got it or someone else did. */
309 		break;
310 	} while (true);
311 
312 	return ret;
313 }
314 
315 static int sdsi_mbox_write(struct sdsi_priv *priv, struct sdsi_mbox_info *info)
316 {
317 	int ret;
318 
319 	lockdep_assert_held(&priv->mb_lock);
320 
321 	ret = sdsi_mbox_acquire(priv, info);
322 	if (ret)
323 		return ret;
324 
325 	return sdsi_mbox_cmd_write(priv, info);
326 }
327 
328 static int sdsi_mbox_read(struct sdsi_priv *priv, struct sdsi_mbox_info *info, size_t *data_size)
329 {
330 	int ret;
331 
332 	lockdep_assert_held(&priv->mb_lock);
333 
334 	ret = sdsi_mbox_acquire(priv, info);
335 	if (ret)
336 		return ret;
337 
338 	return sdsi_mbox_cmd_read(priv, info, data_size);
339 }
340 
341 static ssize_t sdsi_provision(struct sdsi_priv *priv, char *buf, size_t count,
342 			      enum sdsi_command command)
343 {
344 	struct sdsi_mbox_info info;
345 	int ret;
346 
347 	if (count > (SDSI_SIZE_WRITE_MSG - SDSI_SIZE_CMD))
348 		return -EOVERFLOW;
349 
350 	/* Qword aligned message + command qword */
351 	info.size = round_up(count, SDSI_SIZE_CMD) + SDSI_SIZE_CMD;
352 
353 	info.payload = kzalloc(info.size, GFP_KERNEL);
354 	if (!info.payload)
355 		return -ENOMEM;
356 
357 	/* Copy message to payload buffer */
358 	memcpy(info.payload, buf, count);
359 
360 	/* Command is last qword of payload buffer */
361 	info.payload[(info.size - SDSI_SIZE_CMD) / SDSI_SIZE_CMD] = command;
362 
363 	ret = mutex_lock_interruptible(&priv->mb_lock);
364 	if (ret)
365 		goto free_payload;
366 	ret = sdsi_mbox_write(priv, &info);
367 	mutex_unlock(&priv->mb_lock);
368 
369 free_payload:
370 	kfree(info.payload);
371 
372 	if (ret)
373 		return ret;
374 
375 	return count;
376 }
377 
378 static ssize_t provision_akc_write(struct file *filp, struct kobject *kobj,
379 				   struct bin_attribute *attr, char *buf, loff_t off,
380 				   size_t count)
381 {
382 	struct device *dev = kobj_to_dev(kobj);
383 	struct sdsi_priv *priv = dev_get_drvdata(dev);
384 
385 	if (off)
386 		return -ESPIPE;
387 
388 	return sdsi_provision(priv, buf, count, SDSI_CMD_PROVISION_AKC);
389 }
390 static BIN_ATTR_WO(provision_akc, SDSI_SIZE_WRITE_MSG);
391 
392 static ssize_t provision_cap_write(struct file *filp, struct kobject *kobj,
393 				   struct bin_attribute *attr, char *buf, loff_t off,
394 				   size_t count)
395 {
396 	struct device *dev = kobj_to_dev(kobj);
397 	struct sdsi_priv *priv = dev_get_drvdata(dev);
398 
399 	if (off)
400 		return -ESPIPE;
401 
402 	return sdsi_provision(priv, buf, count, SDSI_CMD_PROVISION_CAP);
403 }
404 static BIN_ATTR_WO(provision_cap, SDSI_SIZE_WRITE_MSG);
405 
406 static ssize_t
407 certificate_read(u64 command, struct sdsi_priv *priv, char *buf, loff_t off,
408 		 size_t count)
409 {
410 	struct sdsi_mbox_info info;
411 	size_t size;
412 	int ret;
413 
414 	if (off)
415 		return 0;
416 
417 	/* Buffer for return data */
418 	info.buffer = kmalloc(SDSI_SIZE_READ_MSG, GFP_KERNEL);
419 	if (!info.buffer)
420 		return -ENOMEM;
421 
422 	info.payload = &command;
423 	info.size = sizeof(command);
424 
425 	ret = mutex_lock_interruptible(&priv->mb_lock);
426 	if (ret)
427 		goto free_buffer;
428 	ret = sdsi_mbox_read(priv, &info, &size);
429 	mutex_unlock(&priv->mb_lock);
430 	if (ret < 0)
431 		goto free_buffer;
432 
433 	if (size > count)
434 		size = count;
435 
436 	memcpy(buf, info.buffer, size);
437 
438 free_buffer:
439 	kfree(info.buffer);
440 
441 	if (ret)
442 		return ret;
443 
444 	return size;
445 }
446 
447 static ssize_t
448 state_certificate_read(struct file *filp, struct kobject *kobj,
449 		       struct bin_attribute *attr, char *buf, loff_t off,
450 		       size_t count)
451 {
452 	struct device *dev = kobj_to_dev(kobj);
453 	struct sdsi_priv *priv = dev_get_drvdata(dev);
454 
455 	return certificate_read(SDSI_CMD_READ_STATE, priv, buf, off, count);
456 }
457 static BIN_ATTR_ADMIN_RO(state_certificate, SDSI_SIZE_READ_MSG);
458 
459 static ssize_t
460 meter_certificate_read(struct file *filp, struct kobject *kobj,
461 		       struct bin_attribute *attr, char *buf, loff_t off,
462 		       size_t count)
463 {
464 	struct device *dev = kobj_to_dev(kobj);
465 	struct sdsi_priv *priv = dev_get_drvdata(dev);
466 
467 	return certificate_read(SDSI_CMD_READ_METER, priv, buf, off, count);
468 }
469 static BIN_ATTR_ADMIN_RO(meter_certificate, SDSI_SIZE_READ_MSG);
470 
471 static ssize_t registers_read(struct file *filp, struct kobject *kobj,
472 			      struct bin_attribute *attr, char *buf, loff_t off,
473 			      size_t count)
474 {
475 	struct device *dev = kobj_to_dev(kobj);
476 	struct sdsi_priv *priv = dev_get_drvdata(dev);
477 	void __iomem *addr = priv->regs_addr;
478 	int size =  priv->registers_size;
479 
480 	/*
481 	 * The check below is performed by the sysfs caller based on the static
482 	 * file size. But this may be greater than the actual size which is based
483 	 * on the GUID. So check here again based on actual size before reading.
484 	 */
485 	if (off >= size)
486 		return 0;
487 
488 	if (off + count > size)
489 		count = size - off;
490 
491 	memcpy_fromio(buf, addr + off, count);
492 
493 	return count;
494 }
495 static BIN_ATTR_ADMIN_RO(registers, SDSI_SIZE_REGS);
496 
497 static struct bin_attribute *sdsi_bin_attrs[] = {
498 	&bin_attr_registers,
499 	&bin_attr_state_certificate,
500 	&bin_attr_meter_certificate,
501 	&bin_attr_provision_akc,
502 	&bin_attr_provision_cap,
503 	NULL
504 };
505 
506 static umode_t
507 sdsi_battr_is_visible(struct kobject *kobj, struct bin_attribute *attr, int n)
508 {
509 	struct device *dev = kobj_to_dev(kobj);
510 	struct sdsi_priv *priv = dev_get_drvdata(dev);
511 
512 	/* Registers file is always readable if the device is present */
513 	if (attr == &bin_attr_registers)
514 		return attr->attr.mode;
515 
516 	/* All other attributes not visible if BIOS has not enabled On Demand */
517 	if (!(priv->features & SDSI_FEATURE_SDSI))
518 		return 0;
519 
520 	if (attr == &bin_attr_meter_certificate)
521 		return (priv->features & SDSI_FEATURE_METERING) ?
522 				attr->attr.mode : 0;
523 
524 	return attr->attr.mode;
525 }
526 
527 static ssize_t guid_show(struct device *dev, struct device_attribute *attr, char *buf)
528 {
529 	struct sdsi_priv *priv = dev_get_drvdata(dev);
530 
531 	return sysfs_emit(buf, "0x%x\n", priv->guid);
532 }
533 static DEVICE_ATTR_RO(guid);
534 
535 static struct attribute *sdsi_attrs[] = {
536 	&dev_attr_guid.attr,
537 	NULL
538 };
539 
540 static const struct attribute_group sdsi_group = {
541 	.attrs = sdsi_attrs,
542 	.bin_attrs = sdsi_bin_attrs,
543 	.is_bin_visible = sdsi_battr_is_visible,
544 };
545 __ATTRIBUTE_GROUPS(sdsi);
546 
547 static int sdsi_get_layout(struct sdsi_priv *priv, struct disc_table *table)
548 {
549 	switch (table->guid) {
550 	case SDSI_GUID_V1:
551 		priv->control_size = GUID_V1_CNTRL_SIZE;
552 		priv->registers_size = GUID_V1_REGS_SIZE;
553 		break;
554 	case SDSI_GUID_V2:
555 		priv->control_size = GUID_V2_CNTRL_SIZE;
556 		priv->registers_size = GUID_V2_REGS_SIZE;
557 		break;
558 	default:
559 		dev_err(priv->dev, "Unrecognized GUID 0x%x\n", table->guid);
560 		return -EINVAL;
561 	}
562 	return 0;
563 }
564 
565 static int sdsi_map_mbox_registers(struct sdsi_priv *priv, struct pci_dev *parent,
566 				   struct disc_table *disc_table, struct resource *disc_res)
567 {
568 	u32 access_type = FIELD_GET(DT_ACCESS_TYPE, disc_table->access_info);
569 	u32 size = FIELD_GET(DT_SIZE, disc_table->access_info);
570 	u32 tbir = FIELD_GET(DT_TBIR, disc_table->offset);
571 	u32 offset = DT_OFFSET(disc_table->offset);
572 	struct resource res = {};
573 
574 	/* Starting location of SDSi MMIO region based on access type */
575 	switch (access_type) {
576 	case ACCESS_TYPE_LOCAL:
577 		if (tbir) {
578 			dev_err(priv->dev, "Unsupported BAR index %u for access type %u\n",
579 				tbir, access_type);
580 			return -EINVAL;
581 		}
582 
583 		/*
584 		 * For access_type LOCAL, the base address is as follows:
585 		 * base address = end of discovery region + base offset + 1
586 		 */
587 		res.start = disc_res->end + offset + 1;
588 		break;
589 
590 	case ACCESS_TYPE_BARID:
591 		res.start = pci_resource_start(parent, tbir) + offset;
592 		break;
593 
594 	default:
595 		dev_err(priv->dev, "Unrecognized access_type %u\n", access_type);
596 		return -EINVAL;
597 	}
598 
599 	res.end = res.start + size * sizeof(u32) - 1;
600 	res.flags = IORESOURCE_MEM;
601 
602 	priv->control_addr = devm_ioremap_resource(priv->dev, &res);
603 	if (IS_ERR(priv->control_addr))
604 		return PTR_ERR(priv->control_addr);
605 
606 	priv->mbox_addr = priv->control_addr + priv->control_size;
607 	priv->regs_addr = priv->mbox_addr + SDSI_SIZE_MAILBOX;
608 
609 	priv->features = readq(priv->regs_addr + SDSI_ENABLED_FEATURES_OFFSET);
610 
611 	return 0;
612 }
613 
614 static int sdsi_probe(struct auxiliary_device *auxdev, const struct auxiliary_device_id *id)
615 {
616 	struct intel_vsec_device *intel_cap_dev = auxdev_to_ivdev(auxdev);
617 	struct disc_table disc_table;
618 	struct resource *disc_res;
619 	void __iomem *disc_addr;
620 	struct sdsi_priv *priv;
621 	int ret;
622 
623 	priv = devm_kzalloc(&auxdev->dev, sizeof(*priv), GFP_KERNEL);
624 	if (!priv)
625 		return -ENOMEM;
626 
627 	priv->dev = &auxdev->dev;
628 	mutex_init(&priv->mb_lock);
629 	auxiliary_set_drvdata(auxdev, priv);
630 
631 	/* Get the SDSi discovery table */
632 	disc_res = &intel_cap_dev->resource[0];
633 	disc_addr = devm_ioremap_resource(&auxdev->dev, disc_res);
634 	if (IS_ERR(disc_addr))
635 		return PTR_ERR(disc_addr);
636 
637 	memcpy_fromio(&disc_table, disc_addr, DISC_TABLE_SIZE);
638 
639 	priv->guid = disc_table.guid;
640 
641 	/* Get guid based layout info */
642 	ret = sdsi_get_layout(priv, &disc_table);
643 	if (ret)
644 		return ret;
645 
646 	/* Map the SDSi mailbox registers */
647 	ret = sdsi_map_mbox_registers(priv, intel_cap_dev->pcidev, &disc_table, disc_res);
648 	if (ret)
649 		return ret;
650 
651 	return 0;
652 }
653 
654 static const struct auxiliary_device_id sdsi_aux_id_table[] = {
655 	{ .name = "intel_vsec.sdsi" },
656 	{}
657 };
658 MODULE_DEVICE_TABLE(auxiliary, sdsi_aux_id_table);
659 
660 static struct auxiliary_driver sdsi_aux_driver = {
661 	.driver = {
662 		.dev_groups = sdsi_groups,
663 	},
664 	.id_table	= sdsi_aux_id_table,
665 	.probe		= sdsi_probe,
666 	/* No remove. All resources are handled under devm */
667 };
668 module_auxiliary_driver(sdsi_aux_driver);
669 
670 MODULE_AUTHOR("David E. Box <david.e.box@linux.intel.com>");
671 MODULE_DESCRIPTION("Intel On Demand (SDSi) driver");
672 MODULE_LICENSE("GPL");
673