xref: /openbmc/linux/sound/pci/asihpi/hpioctl.c (revision ee7da21a)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3     AudioScience HPI driver
4     Common Linux HPI ioctl and module probe/remove functions
5 
6     Copyright (C) 1997-2014  AudioScience Inc. <support@audioscience.com>
7 
8 
9 *******************************************************************************/
10 #define SOURCEFILE_NAME "hpioctl.c"
11 
12 #include "hpi_internal.h"
13 #include "hpi_version.h"
14 #include "hpimsginit.h"
15 #include "hpidebug.h"
16 #include "hpimsgx.h"
17 #include "hpioctl.h"
18 #include "hpicmn.h"
19 
20 #include <linux/fs.h>
21 #include <linux/interrupt.h>
22 #include <linux/slab.h>
23 #include <linux/moduleparam.h>
24 #include <linux/uaccess.h>
25 #include <linux/pci.h>
26 #include <linux/stringify.h>
27 #include <linux/module.h>
28 #include <linux/vmalloc.h>
29 #include <linux/nospec.h>
30 
31 #ifdef MODULE_FIRMWARE
32 MODULE_FIRMWARE("asihpi/dsp5000.bin");
33 MODULE_FIRMWARE("asihpi/dsp6200.bin");
34 MODULE_FIRMWARE("asihpi/dsp6205.bin");
35 MODULE_FIRMWARE("asihpi/dsp6400.bin");
36 MODULE_FIRMWARE("asihpi/dsp6600.bin");
37 MODULE_FIRMWARE("asihpi/dsp8700.bin");
38 MODULE_FIRMWARE("asihpi/dsp8900.bin");
39 #endif
40 
41 static int prealloc_stream_buf;
42 module_param(prealloc_stream_buf, int, 0444);
43 MODULE_PARM_DESC(prealloc_stream_buf,
44 	"Preallocate size for per-adapter stream buffer");
45 
46 /* Allow the debug level to be changed after module load.
47  E.g.   echo 2 > /sys/module/asihpi/parameters/hpiDebugLevel
48 */
49 module_param(hpi_debug_level, int, 0644);
50 MODULE_PARM_DESC(hpi_debug_level, "debug verbosity 0..5");
51 
52 /* List of adapters found */
53 static struct hpi_adapter adapters[HPI_MAX_ADAPTERS];
54 
55 /* Wrapper function to HPI_Message to enable dumping of the
56    message and response types.
57 */
58 static void hpi_send_recv_f(struct hpi_message *phm, struct hpi_response *phr,
59 	struct file *file)
60 {
61 	if ((phm->adapter_index >= HPI_MAX_ADAPTERS)
62 		&& (phm->object != HPI_OBJ_SUBSYSTEM))
63 		phr->error = HPI_ERROR_INVALID_OBJ_INDEX;
64 	else
65 		hpi_send_recv_ex(phm, phr, file);
66 }
67 
68 /* This is called from hpifunc.c functions, called by ALSA
69  * (or other kernel process) In this case there is no file descriptor
70  * available for the message cache code
71  */
72 void hpi_send_recv(struct hpi_message *phm, struct hpi_response *phr)
73 {
74 	hpi_send_recv_f(phm, phr, HOWNER_KERNEL);
75 }
76 
77 EXPORT_SYMBOL(hpi_send_recv);
78 /* for radio-asihpi */
79 
80 int asihpi_hpi_release(struct file *file)
81 {
82 	struct hpi_message hm;
83 	struct hpi_response hr;
84 
85 /* HPI_DEBUG_LOG(INFO,"hpi_release file %p, pid %d\n", file, current->pid); */
86 	/* close the subsystem just in case the application forgot to. */
87 	hpi_init_message_response(&hm, &hr, HPI_OBJ_SUBSYSTEM,
88 		HPI_SUBSYS_CLOSE);
89 	hpi_send_recv_ex(&hm, &hr, file);
90 	return 0;
91 }
92 
93 long asihpi_hpi_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
94 {
95 	struct hpi_ioctl_linux __user *phpi_ioctl_data;
96 	void __user *puhm;
97 	void __user *puhr;
98 	union hpi_message_buffer_v1 *hm;
99 	union hpi_response_buffer_v1 *hr;
100 	u16 msg_size;
101 	u16 res_max_size;
102 	u32 uncopied_bytes;
103 	int err = 0;
104 
105 	if (cmd != HPI_IOCTL_LINUX)
106 		return -EINVAL;
107 
108 	hm = kmalloc(sizeof(*hm), GFP_KERNEL);
109 	hr = kzalloc(sizeof(*hr), GFP_KERNEL);
110 	if (!hm || !hr) {
111 		err = -ENOMEM;
112 		goto out;
113 	}
114 
115 	phpi_ioctl_data = (struct hpi_ioctl_linux __user *)arg;
116 
117 	/* Read the message and response pointers from user space.  */
118 	if (get_user(puhm, &phpi_ioctl_data->phm)
119 		|| get_user(puhr, &phpi_ioctl_data->phr)) {
120 		err = -EFAULT;
121 		goto out;
122 	}
123 
124 	/* Now read the message size and data from user space.  */
125 	if (get_user(msg_size, (u16 __user *)puhm)) {
126 		err = -EFAULT;
127 		goto out;
128 	}
129 	if (msg_size > sizeof(*hm))
130 		msg_size = sizeof(*hm);
131 
132 	/* printk(KERN_INFO "message size %d\n", hm->h.wSize); */
133 
134 	uncopied_bytes = copy_from_user(hm, puhm, msg_size);
135 	if (uncopied_bytes) {
136 		HPI_DEBUG_LOG(ERROR, "uncopied bytes %d\n", uncopied_bytes);
137 		err = -EFAULT;
138 		goto out;
139 	}
140 
141 	/* Override h.size in case it is changed between two userspace fetches */
142 	hm->h.size = msg_size;
143 
144 	if (get_user(res_max_size, (u16 __user *)puhr)) {
145 		err = -EFAULT;
146 		goto out;
147 	}
148 	/* printk(KERN_INFO "user response size %d\n", res_max_size); */
149 	if (res_max_size < sizeof(struct hpi_response_header)) {
150 		HPI_DEBUG_LOG(WARNING, "small res size %d\n", res_max_size);
151 		err = -EFAULT;
152 		goto out;
153 	}
154 
155 	res_max_size = min_t(size_t, res_max_size, sizeof(*hr));
156 
157 	switch (hm->h.function) {
158 	case HPI_SUBSYS_CREATE_ADAPTER:
159 	case HPI_ADAPTER_DELETE:
160 		/* Application must not use these functions! */
161 		hr->h.size = sizeof(hr->h);
162 		hr->h.error = HPI_ERROR_INVALID_OPERATION;
163 		hr->h.function = hm->h.function;
164 		uncopied_bytes = copy_to_user(puhr, hr, hr->h.size);
165 		if (uncopied_bytes)
166 			err = -EFAULT;
167 		else
168 			err = 0;
169 		goto out;
170 	}
171 
172 	hr->h.size = res_max_size;
173 	if (hm->h.object == HPI_OBJ_SUBSYSTEM) {
174 		hpi_send_recv_f(&hm->m0, &hr->r0, file);
175 	} else {
176 		u16 __user *ptr = NULL;
177 		u32 size = 0;
178 		/* -1=no data 0=read from user mem, 1=write to user mem */
179 		int wrflag = -1;
180 		struct hpi_adapter *pa = NULL;
181 
182 		if (hm->h.adapter_index < ARRAY_SIZE(adapters))
183 			pa = &adapters[array_index_nospec(hm->h.adapter_index,
184 							  ARRAY_SIZE(adapters))];
185 
186 		if (!pa || !pa->adapter || !pa->adapter->type) {
187 			hpi_init_response(&hr->r0, hm->h.object,
188 				hm->h.function, HPI_ERROR_BAD_ADAPTER_NUMBER);
189 
190 			uncopied_bytes =
191 				copy_to_user(puhr, hr, sizeof(hr->h));
192 			if (uncopied_bytes)
193 				err = -EFAULT;
194 			else
195 				err = 0;
196 			goto out;
197 		}
198 
199 		if (mutex_lock_interruptible(&pa->mutex)) {
200 			err = -EINTR;
201 			goto out;
202 		}
203 
204 		/* Dig out any pointers embedded in the message.  */
205 		switch (hm->h.function) {
206 		case HPI_OSTREAM_WRITE:
207 		case HPI_ISTREAM_READ:{
208 				/* Yes, sparse, this is correct. */
209 				ptr = (u16 __user *)hm->m0.u.d.u.data.pb_data;
210 				size = hm->m0.u.d.u.data.data_size;
211 
212 				/* Allocate buffer according to application request.
213 				   ?Is it better to alloc/free for the duration
214 				   of the transaction?
215 				 */
216 				if (pa->buffer_size < size) {
217 					HPI_DEBUG_LOG(DEBUG,
218 						"Realloc adapter %d stream "
219 						"buffer from %zd to %d\n",
220 						hm->h.adapter_index,
221 						pa->buffer_size, size);
222 					if (pa->p_buffer) {
223 						pa->buffer_size = 0;
224 						vfree(pa->p_buffer);
225 					}
226 					pa->p_buffer = vmalloc(size);
227 					if (pa->p_buffer)
228 						pa->buffer_size = size;
229 					else {
230 						HPI_DEBUG_LOG(ERROR,
231 							"HPI could not allocate "
232 							"stream buffer size %d\n",
233 							size);
234 
235 						mutex_unlock(&pa->mutex);
236 						err = -EINVAL;
237 						goto out;
238 					}
239 				}
240 
241 				hm->m0.u.d.u.data.pb_data = pa->p_buffer;
242 				if (hm->h.function == HPI_ISTREAM_READ)
243 					/* from card, WRITE to user mem */
244 					wrflag = 1;
245 				else
246 					wrflag = 0;
247 				break;
248 			}
249 
250 		default:
251 			size = 0;
252 			break;
253 		}
254 
255 		if (size && (wrflag == 0)) {
256 			uncopied_bytes =
257 				copy_from_user(pa->p_buffer, ptr, size);
258 			if (uncopied_bytes)
259 				HPI_DEBUG_LOG(WARNING,
260 					"Missed %d of %d "
261 					"bytes from user\n", uncopied_bytes,
262 					size);
263 		}
264 
265 		hpi_send_recv_f(&hm->m0, &hr->r0, file);
266 
267 		if (size && (wrflag == 1)) {
268 			uncopied_bytes =
269 				copy_to_user(ptr, pa->p_buffer, size);
270 			if (uncopied_bytes)
271 				HPI_DEBUG_LOG(WARNING,
272 					"Missed %d of %d " "bytes to user\n",
273 					uncopied_bytes, size);
274 		}
275 
276 		mutex_unlock(&pa->mutex);
277 	}
278 
279 	/* on return response size must be set */
280 	/*printk(KERN_INFO "response size %d\n", hr->h.wSize); */
281 
282 	if (!hr->h.size) {
283 		HPI_DEBUG_LOG(ERROR, "response zero size\n");
284 		err = -EFAULT;
285 		goto out;
286 	}
287 
288 	if (hr->h.size > res_max_size) {
289 		HPI_DEBUG_LOG(ERROR, "response too big %d %d\n", hr->h.size,
290 			res_max_size);
291 		hr->h.error = HPI_ERROR_RESPONSE_BUFFER_TOO_SMALL;
292 		hr->h.specific_error = hr->h.size;
293 		hr->h.size = sizeof(hr->h);
294 	}
295 
296 	uncopied_bytes = copy_to_user(puhr, hr, hr->h.size);
297 	if (uncopied_bytes) {
298 		HPI_DEBUG_LOG(ERROR, "uncopied bytes %d\n", uncopied_bytes);
299 		err = -EFAULT;
300 		goto out;
301 	}
302 
303 out:
304 	kfree(hm);
305 	kfree(hr);
306 	return err;
307 }
308 
309 static int asihpi_irq_count;
310 
311 static irqreturn_t asihpi_isr(int irq, void *dev_id)
312 {
313 	struct hpi_adapter *a = dev_id;
314 	int handled;
315 
316 	if (!a->adapter->irq_query_and_clear) {
317 		pr_err("asihpi_isr ASI%04X:%d no handler\n", a->adapter->type,
318 			a->adapter->index);
319 		return IRQ_NONE;
320 	}
321 
322 	handled = a->adapter->irq_query_and_clear(a->adapter, 0);
323 
324 	if (!handled)
325 		return IRQ_NONE;
326 
327 	asihpi_irq_count++;
328 	/* printk(KERN_INFO "asihpi_isr %d ASI%04X:%d irq handled\n",
329 	   asihpi_irq_count, a->adapter->type, a->adapter->index); */
330 
331 	if (a->interrupt_callback)
332 		return IRQ_WAKE_THREAD;
333 
334 	return IRQ_HANDLED;
335 }
336 
337 static irqreturn_t asihpi_isr_thread(int irq, void *dev_id)
338 {
339 	struct hpi_adapter *a = dev_id;
340 
341 	if (a->interrupt_callback)
342 		a->interrupt_callback(a);
343 	return IRQ_HANDLED;
344 }
345 
346 int asihpi_adapter_probe(struct pci_dev *pci_dev,
347 			 const struct pci_device_id *pci_id)
348 {
349 	int idx, nm, low_latency_mode = 0, irq_supported = 0;
350 	int adapter_index;
351 	unsigned int memlen;
352 	struct hpi_message hm;
353 	struct hpi_response hr;
354 	struct hpi_adapter adapter;
355 	struct hpi_pci pci = { 0 };
356 
357 	memset(&adapter, 0, sizeof(adapter));
358 
359 	dev_printk(KERN_DEBUG, &pci_dev->dev,
360 		"probe %04x:%04x,%04x:%04x,%04x\n", pci_dev->vendor,
361 		pci_dev->device, pci_dev->subsystem_vendor,
362 		pci_dev->subsystem_device, pci_dev->devfn);
363 
364 	if (pci_enable_device(pci_dev) < 0) {
365 		dev_err(&pci_dev->dev,
366 			"pci_enable_device failed, disabling device\n");
367 		return -EIO;
368 	}
369 
370 	pci_set_master(pci_dev);	/* also sets latency timer if < 16 */
371 
372 	hpi_init_message_response(&hm, &hr, HPI_OBJ_SUBSYSTEM,
373 		HPI_SUBSYS_CREATE_ADAPTER);
374 	hpi_init_response(&hr, HPI_OBJ_SUBSYSTEM, HPI_SUBSYS_CREATE_ADAPTER,
375 		HPI_ERROR_PROCESSING_MESSAGE);
376 
377 	hm.adapter_index = HPI_ADAPTER_INDEX_INVALID;
378 
379 	nm = HPI_MAX_ADAPTER_MEM_SPACES;
380 
381 	for (idx = 0; idx < nm; idx++) {
382 		HPI_DEBUG_LOG(INFO, "resource %d %pR\n", idx,
383 			&pci_dev->resource[idx]);
384 
385 		if (pci_resource_flags(pci_dev, idx) & IORESOURCE_MEM) {
386 			memlen = pci_resource_len(pci_dev, idx);
387 			pci.ap_mem_base[idx] =
388 				ioremap(pci_resource_start(pci_dev, idx),
389 				memlen);
390 			if (!pci.ap_mem_base[idx]) {
391 				HPI_DEBUG_LOG(ERROR,
392 					"ioremap failed, aborting\n");
393 				/* unmap previously mapped pci mem space */
394 				goto err;
395 			}
396 		}
397 	}
398 
399 	pci.pci_dev = pci_dev;
400 	hm.u.s.resource.bus_type = HPI_BUS_PCI;
401 	hm.u.s.resource.r.pci = &pci;
402 
403 	/* call CreateAdapterObject on the relevant hpi module */
404 	hpi_send_recv_ex(&hm, &hr, HOWNER_KERNEL);
405 	if (hr.error)
406 		goto err;
407 
408 	adapter_index = hr.u.s.adapter_index;
409 	adapter.adapter = hpi_find_adapter(adapter_index);
410 
411 	if (prealloc_stream_buf) {
412 		adapter.p_buffer = vmalloc(prealloc_stream_buf);
413 		if (!adapter.p_buffer) {
414 			HPI_DEBUG_LOG(ERROR,
415 				"HPI could not allocate "
416 				"kernel buffer size %d\n",
417 				prealloc_stream_buf);
418 			goto err;
419 		}
420 	}
421 
422 	hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
423 		HPI_ADAPTER_OPEN);
424 	hm.adapter_index = adapter.adapter->index;
425 	hpi_send_recv_ex(&hm, &hr, HOWNER_KERNEL);
426 
427 	if (hr.error) {
428 		HPI_DEBUG_LOG(ERROR, "HPI_ADAPTER_OPEN failed, aborting\n");
429 		goto err;
430 	}
431 
432 	/* Check if current mode == Low Latency mode */
433 	hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
434 		HPI_ADAPTER_GET_MODE);
435 	hm.adapter_index = adapter.adapter->index;
436 	hpi_send_recv_ex(&hm, &hr, HOWNER_KERNEL);
437 
438 	if (!hr.error
439 		&& hr.u.ax.mode.adapter_mode == HPI_ADAPTER_MODE_LOW_LATENCY)
440 		low_latency_mode = 1;
441 	else
442 		dev_info(&pci_dev->dev,
443 			"Adapter at index %d is not in low latency mode\n",
444 			adapter.adapter->index);
445 
446 	/* Check if IRQs are supported */
447 	hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
448 		HPI_ADAPTER_GET_PROPERTY);
449 	hm.adapter_index = adapter.adapter->index;
450 	hm.u.ax.property_set.property = HPI_ADAPTER_PROPERTY_SUPPORTS_IRQ;
451 	hpi_send_recv_ex(&hm, &hr, HOWNER_KERNEL);
452 	if (hr.error || !hr.u.ax.property_get.parameter1) {
453 		dev_info(&pci_dev->dev,
454 			"IRQs not supported by adapter at index %d\n",
455 			adapter.adapter->index);
456 	} else {
457 		irq_supported = 1;
458 	}
459 
460 	/* WARNING can't init mutex in 'adapter'
461 	 * and then copy it to adapters[] ?!?!
462 	 */
463 	adapters[adapter_index] = adapter;
464 	mutex_init(&adapters[adapter_index].mutex);
465 	pci_set_drvdata(pci_dev, &adapters[adapter_index]);
466 
467 	if (low_latency_mode && irq_supported) {
468 		if (!adapter.adapter->irq_query_and_clear) {
469 			dev_err(&pci_dev->dev,
470 				"no IRQ handler for adapter %d, aborting\n",
471 				adapter.adapter->index);
472 			goto err;
473 		}
474 
475 		/* Disable IRQ generation on DSP side by setting the rate to 0 */
476 		hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
477 			HPI_ADAPTER_SET_PROPERTY);
478 		hm.adapter_index = adapter.adapter->index;
479 		hm.u.ax.property_set.property = HPI_ADAPTER_PROPERTY_IRQ_RATE;
480 		hm.u.ax.property_set.parameter1 = 0;
481 		hm.u.ax.property_set.parameter2 = 0;
482 		hpi_send_recv_ex(&hm, &hr, HOWNER_KERNEL);
483 		if (hr.error) {
484 			HPI_DEBUG_LOG(ERROR,
485 				"HPI_ADAPTER_GET_MODE failed, aborting\n");
486 			goto err;
487 		}
488 
489 		/* Note: request_irq calls asihpi_isr here */
490 		if (request_threaded_irq(pci_dev->irq, asihpi_isr,
491 					 asihpi_isr_thread, IRQF_SHARED,
492 					 "asihpi", &adapters[adapter_index])) {
493 			dev_err(&pci_dev->dev, "request_irq(%d) failed\n",
494 				pci_dev->irq);
495 			goto err;
496 		}
497 
498 		adapters[adapter_index].interrupt_mode = 1;
499 
500 		dev_info(&pci_dev->dev, "using irq %d\n", pci_dev->irq);
501 		adapters[adapter_index].irq = pci_dev->irq;
502 	} else {
503 		dev_info(&pci_dev->dev, "using polled mode\n");
504 	}
505 
506 	dev_info(&pci_dev->dev, "probe succeeded for ASI%04X HPI index %d\n",
507 		 adapter.adapter->type, adapter_index);
508 
509 	return 0;
510 
511 err:
512 	while (--idx >= 0) {
513 		if (pci.ap_mem_base[idx]) {
514 			iounmap(pci.ap_mem_base[idx]);
515 			pci.ap_mem_base[idx] = NULL;
516 		}
517 	}
518 
519 	if (adapter.p_buffer) {
520 		adapter.buffer_size = 0;
521 		vfree(adapter.p_buffer);
522 	}
523 
524 	HPI_DEBUG_LOG(ERROR, "adapter_probe failed\n");
525 	return -ENODEV;
526 }
527 
528 void asihpi_adapter_remove(struct pci_dev *pci_dev)
529 {
530 	int idx;
531 	struct hpi_message hm;
532 	struct hpi_response hr;
533 	struct hpi_adapter *pa;
534 	struct hpi_pci pci;
535 
536 	pa = pci_get_drvdata(pci_dev);
537 	pci = pa->adapter->pci;
538 
539 	/* Disable IRQ generation on DSP side */
540 	hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
541 		HPI_ADAPTER_SET_PROPERTY);
542 	hm.adapter_index = pa->adapter->index;
543 	hm.u.ax.property_set.property = HPI_ADAPTER_PROPERTY_IRQ_RATE;
544 	hm.u.ax.property_set.parameter1 = 0;
545 	hm.u.ax.property_set.parameter2 = 0;
546 	hpi_send_recv_ex(&hm, &hr, HOWNER_KERNEL);
547 
548 	hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
549 		HPI_ADAPTER_DELETE);
550 	hm.adapter_index = pa->adapter->index;
551 	hpi_send_recv_ex(&hm, &hr, HOWNER_KERNEL);
552 
553 	/* unmap PCI memory space, mapped during device init. */
554 	for (idx = 0; idx < HPI_MAX_ADAPTER_MEM_SPACES; ++idx)
555 		iounmap(pci.ap_mem_base[idx]);
556 
557 	if (pa->irq)
558 		free_irq(pa->irq, pa);
559 
560 	vfree(pa->p_buffer);
561 
562 	if (1)
563 		dev_info(&pci_dev->dev,
564 			 "remove %04x:%04x,%04x:%04x,%04x, HPI index %d\n",
565 			 pci_dev->vendor, pci_dev->device,
566 			 pci_dev->subsystem_vendor, pci_dev->subsystem_device,
567 			 pci_dev->devfn, pa->adapter->index);
568 
569 	memset(pa, 0, sizeof(*pa));
570 }
571 
572 void __init asihpi_init(void)
573 {
574 	struct hpi_message hm;
575 	struct hpi_response hr;
576 
577 	memset(adapters, 0, sizeof(adapters));
578 
579 	printk(KERN_INFO "ASIHPI driver " HPI_VER_STRING "\n");
580 
581 	hpi_init_message_response(&hm, &hr, HPI_OBJ_SUBSYSTEM,
582 		HPI_SUBSYS_DRIVER_LOAD);
583 	hpi_send_recv_ex(&hm, &hr, HOWNER_KERNEL);
584 }
585 
586 void asihpi_exit(void)
587 {
588 	struct hpi_message hm;
589 	struct hpi_response hr;
590 
591 	hpi_init_message_response(&hm, &hr, HPI_OBJ_SUBSYSTEM,
592 		HPI_SUBSYS_DRIVER_UNLOAD);
593 	hpi_send_recv_ex(&hm, &hr, HOWNER_KERNEL);
594 }
595