1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Intel Speed Select Interface: Common functions
4  * Copyright (c) 2019, Intel Corporation.
5  * All rights reserved.
6  *
7  * Author: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
8  */
9 
10 #include <linux/cpufeature.h>
11 #include <linux/cpuhotplug.h>
12 #include <linux/fs.h>
13 #include <linux/hashtable.h>
14 #include <linux/miscdevice.h>
15 #include <linux/module.h>
16 #include <linux/pci.h>
17 #include <linux/sched/signal.h>
18 #include <linux/slab.h>
19 #include <linux/uaccess.h>
20 #include <uapi/linux/isst_if.h>
21 
22 #include <asm/cpu_device_id.h>
23 #include <asm/intel-family.h>
24 
25 #include "isst_if_common.h"
26 
27 #define MSR_THREAD_ID_INFO	0x53
28 #define MSR_PM_LOGICAL_ID	0x54
29 #define MSR_CPU_BUS_NUMBER	0x128
30 
31 static struct isst_if_cmd_cb punit_callbacks[ISST_IF_DEV_MAX];
32 
33 static int punit_msr_white_list[] = {
34 	MSR_TURBO_RATIO_LIMIT,
35 	MSR_CONFIG_TDP_CONTROL,
36 	MSR_TURBO_RATIO_LIMIT1,
37 	MSR_TURBO_RATIO_LIMIT2,
38 	MSR_PM_LOGICAL_ID,
39 };
40 
41 struct isst_valid_cmd_ranges {
42 	u16 cmd;
43 	u16 sub_cmd_beg;
44 	u16 sub_cmd_end;
45 };
46 
47 struct isst_cmd_set_req_type {
48 	u16 cmd;
49 	u16 sub_cmd;
50 	u16 param;
51 };
52 
53 static const struct isst_valid_cmd_ranges isst_valid_cmds[] = {
54 	{0xD0, 0x00, 0x03},
55 	{0x7F, 0x00, 0x0C},
56 	{0x7F, 0x10, 0x12},
57 	{0x7F, 0x20, 0x23},
58 	{0x94, 0x03, 0x03},
59 	{0x95, 0x03, 0x03},
60 };
61 
62 static const struct isst_cmd_set_req_type isst_cmd_set_reqs[] = {
63 	{0xD0, 0x00, 0x08},
64 	{0xD0, 0x01, 0x08},
65 	{0xD0, 0x02, 0x08},
66 	{0xD0, 0x03, 0x08},
67 	{0x7F, 0x02, 0x00},
68 	{0x7F, 0x08, 0x00},
69 	{0x95, 0x03, 0x03},
70 };
71 
72 struct isst_cmd {
73 	struct hlist_node hnode;
74 	u64 data;
75 	u32 cmd;
76 	int cpu;
77 	int mbox_cmd_type;
78 	u32 param;
79 };
80 
81 static bool isst_hpm_support;
82 
83 static DECLARE_HASHTABLE(isst_hash, 8);
84 static DEFINE_MUTEX(isst_hash_lock);
85 
isst_store_new_cmd(int cmd,u32 cpu,int mbox_cmd_type,u32 param,u32 data)86 static int isst_store_new_cmd(int cmd, u32 cpu, int mbox_cmd_type, u32 param,
87 			      u32 data)
88 {
89 	struct isst_cmd *sst_cmd;
90 
91 	sst_cmd = kmalloc(sizeof(*sst_cmd), GFP_KERNEL);
92 	if (!sst_cmd)
93 		return -ENOMEM;
94 
95 	sst_cmd->cpu = cpu;
96 	sst_cmd->cmd = cmd;
97 	sst_cmd->mbox_cmd_type = mbox_cmd_type;
98 	sst_cmd->param = param;
99 	sst_cmd->data = data;
100 
101 	hash_add(isst_hash, &sst_cmd->hnode, sst_cmd->cmd);
102 
103 	return 0;
104 }
105 
isst_delete_hash(void)106 static void isst_delete_hash(void)
107 {
108 	struct isst_cmd *sst_cmd;
109 	struct hlist_node *tmp;
110 	int i;
111 
112 	hash_for_each_safe(isst_hash, i, tmp, sst_cmd, hnode) {
113 		hash_del(&sst_cmd->hnode);
114 		kfree(sst_cmd);
115 	}
116 }
117 
118 /**
119  * isst_store_cmd() - Store command to a hash table
120  * @cmd: Mailbox command.
121  * @sub_cmd: Mailbox sub-command or MSR id.
122  * @cpu: Target CPU for the command
123  * @mbox_cmd_type: Mailbox or MSR command.
124  * @param: Mailbox parameter.
125  * @data: Mailbox request data or MSR data.
126  *
127  * Stores the command to a hash table if there is no such command already
128  * stored. If already stored update the latest parameter and data for the
129  * command.
130  *
131  * Return: Return result of store to hash table, 0 for success, others for
132  * failure.
133  */
isst_store_cmd(int cmd,int sub_cmd,u32 cpu,int mbox_cmd_type,u32 param,u64 data)134 int isst_store_cmd(int cmd, int sub_cmd, u32 cpu, int mbox_cmd_type,
135 		   u32 param, u64 data)
136 {
137 	struct isst_cmd *sst_cmd;
138 	int full_cmd, ret;
139 
140 	full_cmd = (cmd & GENMASK_ULL(15, 0)) << 16;
141 	full_cmd |= (sub_cmd & GENMASK_ULL(15, 0));
142 	mutex_lock(&isst_hash_lock);
143 	hash_for_each_possible(isst_hash, sst_cmd, hnode, full_cmd) {
144 		if (sst_cmd->cmd == full_cmd && sst_cmd->cpu == cpu &&
145 		    sst_cmd->mbox_cmd_type == mbox_cmd_type) {
146 			sst_cmd->param = param;
147 			sst_cmd->data = data;
148 			mutex_unlock(&isst_hash_lock);
149 			return 0;
150 		}
151 	}
152 
153 	ret = isst_store_new_cmd(full_cmd, cpu, mbox_cmd_type, param, data);
154 	mutex_unlock(&isst_hash_lock);
155 
156 	return ret;
157 }
158 EXPORT_SYMBOL_GPL(isst_store_cmd);
159 
isst_mbox_resume_command(struct isst_if_cmd_cb * cb,struct isst_cmd * sst_cmd)160 static void isst_mbox_resume_command(struct isst_if_cmd_cb *cb,
161 				     struct isst_cmd *sst_cmd)
162 {
163 	struct isst_if_mbox_cmd mbox_cmd;
164 	int wr_only;
165 
166 	mbox_cmd.command = (sst_cmd->cmd & GENMASK_ULL(31, 16)) >> 16;
167 	mbox_cmd.sub_command = sst_cmd->cmd & GENMASK_ULL(15, 0);
168 	mbox_cmd.parameter = sst_cmd->param;
169 	mbox_cmd.req_data = sst_cmd->data;
170 	mbox_cmd.logical_cpu = sst_cmd->cpu;
171 	(cb->cmd_callback)((u8 *)&mbox_cmd, &wr_only, 1);
172 }
173 
174 /**
175  * isst_resume_common() - Process Resume request
176  *
177  * On resume replay all mailbox commands and MSRs.
178  *
179  * Return: None.
180  */
isst_resume_common(void)181 void isst_resume_common(void)
182 {
183 	struct isst_cmd *sst_cmd;
184 	int i;
185 
186 	hash_for_each(isst_hash, i, sst_cmd, hnode) {
187 		struct isst_if_cmd_cb *cb;
188 
189 		if (sst_cmd->mbox_cmd_type) {
190 			cb = &punit_callbacks[ISST_IF_DEV_MBOX];
191 			if (cb->registered)
192 				isst_mbox_resume_command(cb, sst_cmd);
193 		} else {
194 			wrmsrl_safe_on_cpu(sst_cmd->cpu, sst_cmd->cmd,
195 					   sst_cmd->data);
196 		}
197 	}
198 }
199 EXPORT_SYMBOL_GPL(isst_resume_common);
200 
isst_restore_msr_local(int cpu)201 static void isst_restore_msr_local(int cpu)
202 {
203 	struct isst_cmd *sst_cmd;
204 	int i;
205 
206 	mutex_lock(&isst_hash_lock);
207 	for (i = 0; i < ARRAY_SIZE(punit_msr_white_list); ++i) {
208 		if (!punit_msr_white_list[i])
209 			break;
210 
211 		hash_for_each_possible(isst_hash, sst_cmd, hnode,
212 				       punit_msr_white_list[i]) {
213 			if (!sst_cmd->mbox_cmd_type && sst_cmd->cpu == cpu)
214 				wrmsrl_safe(sst_cmd->cmd, sst_cmd->data);
215 		}
216 	}
217 	mutex_unlock(&isst_hash_lock);
218 }
219 
220 /**
221  * isst_if_mbox_cmd_invalid() - Check invalid mailbox commands
222  * @cmd: Pointer to the command structure to verify.
223  *
224  * Invalid command to PUNIT to may result in instability of the platform.
225  * This function has a whitelist of commands, which are allowed.
226  *
227  * Return: Return true if the command is invalid, else false.
228  */
isst_if_mbox_cmd_invalid(struct isst_if_mbox_cmd * cmd)229 bool isst_if_mbox_cmd_invalid(struct isst_if_mbox_cmd *cmd)
230 {
231 	int i;
232 
233 	if (cmd->logical_cpu >= nr_cpu_ids)
234 		return true;
235 
236 	for (i = 0; i < ARRAY_SIZE(isst_valid_cmds); ++i) {
237 		if (cmd->command == isst_valid_cmds[i].cmd &&
238 		    (cmd->sub_command >= isst_valid_cmds[i].sub_cmd_beg &&
239 		     cmd->sub_command <= isst_valid_cmds[i].sub_cmd_end)) {
240 			return false;
241 		}
242 	}
243 
244 	return true;
245 }
246 EXPORT_SYMBOL_GPL(isst_if_mbox_cmd_invalid);
247 
248 /**
249  * isst_if_mbox_cmd_set_req() - Check mailbox command is a set request
250  * @cmd: Pointer to the command structure to verify.
251  *
252  * Check if the given mail box level is set request and not a get request.
253  *
254  * Return: Return true if the command is set_req, else false.
255  */
isst_if_mbox_cmd_set_req(struct isst_if_mbox_cmd * cmd)256 bool isst_if_mbox_cmd_set_req(struct isst_if_mbox_cmd *cmd)
257 {
258 	int i;
259 
260 	for (i = 0; i < ARRAY_SIZE(isst_cmd_set_reqs); ++i) {
261 		if (cmd->command == isst_cmd_set_reqs[i].cmd &&
262 		    cmd->sub_command == isst_cmd_set_reqs[i].sub_cmd &&
263 		    cmd->parameter == isst_cmd_set_reqs[i].param) {
264 			return true;
265 		}
266 	}
267 
268 	return false;
269 }
270 EXPORT_SYMBOL_GPL(isst_if_mbox_cmd_set_req);
271 
272 static int isst_if_api_version;
273 
isst_if_get_platform_info(void __user * argp)274 static int isst_if_get_platform_info(void __user *argp)
275 {
276 	struct isst_if_platform_info info;
277 
278 	info.api_version = isst_if_api_version;
279 	info.driver_version = ISST_IF_DRIVER_VERSION;
280 	info.max_cmds_per_ioctl = ISST_IF_CMD_LIMIT;
281 	info.mbox_supported = punit_callbacks[ISST_IF_DEV_MBOX].registered;
282 	info.mmio_supported = punit_callbacks[ISST_IF_DEV_MMIO].registered;
283 
284 	if (copy_to_user(argp, &info, sizeof(info)))
285 		return -EFAULT;
286 
287 	return 0;
288 }
289 
290 #define ISST_MAX_BUS_NUMBER	2
291 
292 struct isst_if_cpu_info {
293 	/* For BUS 0 and BUS 1 only, which we need for PUNIT interface */
294 	int bus_info[ISST_MAX_BUS_NUMBER];
295 	struct pci_dev *pci_dev[ISST_MAX_BUS_NUMBER];
296 	int punit_cpu_id;
297 	int numa_node;
298 };
299 
300 struct isst_if_pkg_info {
301 	struct pci_dev *pci_dev[ISST_MAX_BUS_NUMBER];
302 };
303 
304 static struct isst_if_cpu_info *isst_cpu_info;
305 static struct isst_if_pkg_info *isst_pkg_info;
306 
_isst_if_get_pci_dev(int cpu,int bus_no,int dev,int fn)307 static struct pci_dev *_isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn)
308 {
309 	struct pci_dev *matched_pci_dev = NULL;
310 	struct pci_dev *pci_dev = NULL;
311 	struct pci_dev *_pci_dev = NULL;
312 	int no_matches = 0, pkg_id;
313 	int bus_number;
314 
315 	if (bus_no < 0 || bus_no >= ISST_MAX_BUS_NUMBER || cpu < 0 ||
316 	    cpu >= nr_cpu_ids || cpu >= num_possible_cpus())
317 		return NULL;
318 
319 	pkg_id = topology_logical_package_id(cpu);
320 	if (pkg_id >= topology_max_packages())
321 		return NULL;
322 
323 	bus_number = isst_cpu_info[cpu].bus_info[bus_no];
324 	if (bus_number < 0)
325 		return NULL;
326 
327 	for_each_pci_dev(_pci_dev) {
328 		int node;
329 
330 		if (_pci_dev->bus->number != bus_number ||
331 		    _pci_dev->devfn != PCI_DEVFN(dev, fn))
332 			continue;
333 
334 		++no_matches;
335 		if (!matched_pci_dev)
336 			matched_pci_dev = _pci_dev;
337 
338 		node = dev_to_node(&_pci_dev->dev);
339 		if (node == NUMA_NO_NODE) {
340 			pr_info_once("Fail to get numa node for CPU:%d bus:%d dev:%d fn:%d\n",
341 				     cpu, bus_no, dev, fn);
342 			continue;
343 		}
344 
345 		if (node == isst_cpu_info[cpu].numa_node) {
346 			isst_pkg_info[pkg_id].pci_dev[bus_no] = _pci_dev;
347 
348 			pci_dev = _pci_dev;
349 			break;
350 		}
351 	}
352 
353 	/*
354 	 * If there is no numa matched pci_dev, then there can be following cases:
355 	 * 1. CONFIG_NUMA is not defined: In this case if there is only single device
356 	 *    match, then we don't need numa information. Simply return last match.
357 	 *    Othewise return NULL.
358 	 * 2. NUMA information is not exposed via _SEG method. In this case it is similar
359 	 *    to case 1.
360 	 * 3. Numa information doesn't match with CPU numa node and more than one match
361 	 *    return NULL.
362 	 */
363 	if (!pci_dev && no_matches == 1)
364 		pci_dev = matched_pci_dev;
365 
366 	/* Return pci_dev pointer for any matched CPU in the package */
367 	if (!pci_dev)
368 		pci_dev = isst_pkg_info[pkg_id].pci_dev[bus_no];
369 
370 	return pci_dev;
371 }
372 
373 /**
374  * isst_if_get_pci_dev() - Get the PCI device instance for a CPU
375  * @cpu: Logical CPU number.
376  * @bus_no: The bus number assigned by the hardware.
377  * @dev: The device number assigned by the hardware.
378  * @fn: The function number assigned by the hardware.
379  *
380  * Using cached bus information, find out the PCI device for a bus number,
381  * device and function.
382  *
383  * Return: Return pci_dev pointer or NULL.
384  */
isst_if_get_pci_dev(int cpu,int bus_no,int dev,int fn)385 struct pci_dev *isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn)
386 {
387 	struct pci_dev *pci_dev;
388 
389 	if (bus_no < 0 || bus_no >= ISST_MAX_BUS_NUMBER  || cpu < 0 ||
390 	    cpu >= nr_cpu_ids || cpu >= num_possible_cpus())
391 		return NULL;
392 
393 	pci_dev = isst_cpu_info[cpu].pci_dev[bus_no];
394 
395 	if (pci_dev && pci_dev->devfn == PCI_DEVFN(dev, fn))
396 		return pci_dev;
397 
398 	return _isst_if_get_pci_dev(cpu, bus_no, dev, fn);
399 }
400 EXPORT_SYMBOL_GPL(isst_if_get_pci_dev);
401 
isst_if_cpu_online(unsigned int cpu)402 static int isst_if_cpu_online(unsigned int cpu)
403 {
404 	u64 data;
405 	int ret;
406 
407 	isst_cpu_info[cpu].numa_node = cpu_to_node(cpu);
408 
409 	ret = rdmsrl_safe(MSR_CPU_BUS_NUMBER, &data);
410 	if (ret) {
411 		/* This is not a fatal error on MSR mailbox only I/F */
412 		isst_cpu_info[cpu].bus_info[0] = -1;
413 		isst_cpu_info[cpu].bus_info[1] = -1;
414 	} else {
415 		isst_cpu_info[cpu].bus_info[0] = data & 0xff;
416 		isst_cpu_info[cpu].bus_info[1] = (data >> 8) & 0xff;
417 		isst_cpu_info[cpu].pci_dev[0] = _isst_if_get_pci_dev(cpu, 0, 0, 1);
418 		isst_cpu_info[cpu].pci_dev[1] = _isst_if_get_pci_dev(cpu, 1, 30, 1);
419 	}
420 
421 	if (isst_hpm_support) {
422 
423 		ret = rdmsrl_safe(MSR_PM_LOGICAL_ID, &data);
424 		if (!ret)
425 			goto set_punit_id;
426 	}
427 
428 	ret = rdmsrl_safe(MSR_THREAD_ID_INFO, &data);
429 	if (ret) {
430 		isst_cpu_info[cpu].punit_cpu_id = -1;
431 		return ret;
432 	}
433 
434 set_punit_id:
435 	isst_cpu_info[cpu].punit_cpu_id = data;
436 
437 	isst_restore_msr_local(cpu);
438 
439 	return 0;
440 }
441 
442 static int isst_if_online_id;
443 
isst_if_cpu_info_init(void)444 static int isst_if_cpu_info_init(void)
445 {
446 	int ret;
447 
448 	isst_cpu_info = kcalloc(num_possible_cpus(),
449 				sizeof(*isst_cpu_info),
450 				GFP_KERNEL);
451 	if (!isst_cpu_info)
452 		return -ENOMEM;
453 
454 	isst_pkg_info = kcalloc(topology_max_packages(),
455 				sizeof(*isst_pkg_info),
456 				GFP_KERNEL);
457 	if (!isst_pkg_info) {
458 		kfree(isst_cpu_info);
459 		return -ENOMEM;
460 	}
461 
462 	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
463 				"platform/x86/isst-if:online",
464 				isst_if_cpu_online, NULL);
465 	if (ret < 0) {
466 		kfree(isst_pkg_info);
467 		kfree(isst_cpu_info);
468 		return ret;
469 	}
470 
471 	isst_if_online_id = ret;
472 
473 	return 0;
474 }
475 
isst_if_cpu_info_exit(void)476 static void isst_if_cpu_info_exit(void)
477 {
478 	cpuhp_remove_state(isst_if_online_id);
479 	kfree(isst_pkg_info);
480 	kfree(isst_cpu_info);
481 };
482 
isst_if_proc_phyid_req(u8 * cmd_ptr,int * write_only,int resume)483 static long isst_if_proc_phyid_req(u8 *cmd_ptr, int *write_only, int resume)
484 {
485 	struct isst_if_cpu_map *cpu_map;
486 
487 	cpu_map = (struct isst_if_cpu_map *)cmd_ptr;
488 	if (cpu_map->logical_cpu >= nr_cpu_ids ||
489 	    cpu_map->logical_cpu >= num_possible_cpus())
490 		return -EINVAL;
491 
492 	*write_only = 0;
493 	cpu_map->physical_cpu = isst_cpu_info[cpu_map->logical_cpu].punit_cpu_id;
494 
495 	return 0;
496 }
497 
match_punit_msr_white_list(int msr)498 static bool match_punit_msr_white_list(int msr)
499 {
500 	int i;
501 
502 	for (i = 0; i < ARRAY_SIZE(punit_msr_white_list); ++i) {
503 		if (punit_msr_white_list[i] == msr)
504 			return true;
505 	}
506 
507 	return false;
508 }
509 
isst_if_msr_cmd_req(u8 * cmd_ptr,int * write_only,int resume)510 static long isst_if_msr_cmd_req(u8 *cmd_ptr, int *write_only, int resume)
511 {
512 	struct isst_if_msr_cmd *msr_cmd;
513 	int ret;
514 
515 	msr_cmd = (struct isst_if_msr_cmd *)cmd_ptr;
516 
517 	if (!match_punit_msr_white_list(msr_cmd->msr))
518 		return -EINVAL;
519 
520 	if (msr_cmd->logical_cpu >= nr_cpu_ids)
521 		return -EINVAL;
522 
523 	if (msr_cmd->read_write) {
524 		if (!capable(CAP_SYS_ADMIN))
525 			return -EPERM;
526 
527 		ret = wrmsrl_safe_on_cpu(msr_cmd->logical_cpu,
528 					 msr_cmd->msr,
529 					 msr_cmd->data);
530 		*write_only = 1;
531 		if (!ret && !resume)
532 			ret = isst_store_cmd(0, msr_cmd->msr,
533 					     msr_cmd->logical_cpu,
534 					     0, 0, msr_cmd->data);
535 	} else {
536 		u64 data;
537 
538 		ret = rdmsrl_safe_on_cpu(msr_cmd->logical_cpu,
539 					 msr_cmd->msr, &data);
540 		if (!ret) {
541 			msr_cmd->data = data;
542 			*write_only = 0;
543 		}
544 	}
545 
546 
547 	return ret;
548 }
549 
isst_if_exec_multi_cmd(void __user * argp,struct isst_if_cmd_cb * cb)550 static long isst_if_exec_multi_cmd(void __user *argp, struct isst_if_cmd_cb *cb)
551 {
552 	unsigned char __user *ptr;
553 	u32 cmd_count;
554 	u8 *cmd_ptr;
555 	long ret;
556 	int i;
557 
558 	/* Each multi command has u32 command count as the first field */
559 	if (copy_from_user(&cmd_count, argp, sizeof(cmd_count)))
560 		return -EFAULT;
561 
562 	if (!cmd_count || cmd_count > ISST_IF_CMD_LIMIT)
563 		return -EINVAL;
564 
565 	cmd_ptr = kmalloc(cb->cmd_size, GFP_KERNEL);
566 	if (!cmd_ptr)
567 		return -ENOMEM;
568 
569 	/* cb->offset points to start of the command after the command count */
570 	ptr = argp + cb->offset;
571 
572 	for (i = 0; i < cmd_count; ++i) {
573 		int wr_only;
574 
575 		if (signal_pending(current)) {
576 			ret = -EINTR;
577 			break;
578 		}
579 
580 		if (copy_from_user(cmd_ptr, ptr, cb->cmd_size)) {
581 			ret = -EFAULT;
582 			break;
583 		}
584 
585 		ret = cb->cmd_callback(cmd_ptr, &wr_only, 0);
586 		if (ret)
587 			break;
588 
589 		if (!wr_only && copy_to_user(ptr, cmd_ptr, cb->cmd_size)) {
590 			ret = -EFAULT;
591 			break;
592 		}
593 
594 		ptr += cb->cmd_size;
595 	}
596 
597 	kfree(cmd_ptr);
598 
599 	return i ? i : ret;
600 }
601 
isst_if_def_ioctl(struct file * file,unsigned int cmd,unsigned long arg)602 static long isst_if_def_ioctl(struct file *file, unsigned int cmd,
603 			      unsigned long arg)
604 {
605 	void __user *argp = (void __user *)arg;
606 	struct isst_if_cmd_cb cmd_cb;
607 	struct isst_if_cmd_cb *cb;
608 	long ret = -ENOTTY;
609 	int i;
610 
611 	switch (cmd) {
612 	case ISST_IF_GET_PLATFORM_INFO:
613 		ret = isst_if_get_platform_info(argp);
614 		break;
615 	case ISST_IF_GET_PHY_ID:
616 		cmd_cb.cmd_size = sizeof(struct isst_if_cpu_map);
617 		cmd_cb.offset = offsetof(struct isst_if_cpu_maps, cpu_map);
618 		cmd_cb.cmd_callback = isst_if_proc_phyid_req;
619 		ret = isst_if_exec_multi_cmd(argp, &cmd_cb);
620 		break;
621 	case ISST_IF_IO_CMD:
622 		cb = &punit_callbacks[ISST_IF_DEV_MMIO];
623 		if (cb->registered)
624 			ret = isst_if_exec_multi_cmd(argp, cb);
625 		break;
626 	case ISST_IF_MBOX_COMMAND:
627 		cb = &punit_callbacks[ISST_IF_DEV_MBOX];
628 		if (cb->registered)
629 			ret = isst_if_exec_multi_cmd(argp, cb);
630 		break;
631 	case ISST_IF_MSR_COMMAND:
632 		cmd_cb.cmd_size = sizeof(struct isst_if_msr_cmd);
633 		cmd_cb.offset = offsetof(struct isst_if_msr_cmds, msr_cmd);
634 		cmd_cb.cmd_callback = isst_if_msr_cmd_req;
635 		ret = isst_if_exec_multi_cmd(argp, &cmd_cb);
636 		break;
637 	default:
638 		for (i = 0; i < ISST_IF_DEV_MAX; ++i) {
639 			struct isst_if_cmd_cb *cb = &punit_callbacks[i];
640 			int ret;
641 
642 			if (cb->def_ioctl) {
643 				ret = cb->def_ioctl(file, cmd, arg);
644 				if (!ret)
645 					return ret;
646 			}
647 		}
648 		break;
649 	}
650 
651 	return ret;
652 }
653 
654 /* Lock to prevent module registration when already opened by user space */
655 static DEFINE_MUTEX(punit_misc_dev_open_lock);
656 /* Lock to allow one shared misc device for all ISST interfaces */
657 static DEFINE_MUTEX(punit_misc_dev_reg_lock);
658 static int misc_usage_count;
659 static int misc_device_ret;
660 static int misc_device_open;
661 
isst_if_open(struct inode * inode,struct file * file)662 static int isst_if_open(struct inode *inode, struct file *file)
663 {
664 	int i, ret = 0;
665 
666 	/* Fail open, if a module is going away */
667 	mutex_lock(&punit_misc_dev_open_lock);
668 	for (i = 0; i < ISST_IF_DEV_MAX; ++i) {
669 		struct isst_if_cmd_cb *cb = &punit_callbacks[i];
670 
671 		if (cb->registered && !try_module_get(cb->owner)) {
672 			ret = -ENODEV;
673 			break;
674 		}
675 	}
676 	if (ret) {
677 		int j;
678 
679 		for (j = 0; j < i; ++j) {
680 			struct isst_if_cmd_cb *cb;
681 
682 			cb = &punit_callbacks[j];
683 			if (cb->registered)
684 				module_put(cb->owner);
685 		}
686 	} else {
687 		misc_device_open++;
688 	}
689 	mutex_unlock(&punit_misc_dev_open_lock);
690 
691 	return ret;
692 }
693 
isst_if_relase(struct inode * inode,struct file * f)694 static int isst_if_relase(struct inode *inode, struct file *f)
695 {
696 	int i;
697 
698 	mutex_lock(&punit_misc_dev_open_lock);
699 	misc_device_open--;
700 	for (i = 0; i < ISST_IF_DEV_MAX; ++i) {
701 		struct isst_if_cmd_cb *cb = &punit_callbacks[i];
702 
703 		if (cb->registered)
704 			module_put(cb->owner);
705 	}
706 	mutex_unlock(&punit_misc_dev_open_lock);
707 
708 	return 0;
709 }
710 
711 static const struct file_operations isst_if_char_driver_ops = {
712 	.open = isst_if_open,
713 	.unlocked_ioctl = isst_if_def_ioctl,
714 	.release = isst_if_relase,
715 };
716 
717 static struct miscdevice isst_if_char_driver = {
718 	.minor		= MISC_DYNAMIC_MINOR,
719 	.name		= "isst_interface",
720 	.fops		= &isst_if_char_driver_ops,
721 };
722 
723 static const struct x86_cpu_id hpm_cpu_ids[] = {
724 	X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_D,	NULL),
725 	X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_X,	NULL),
726 	X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT,	NULL),
727 	X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT_X,	NULL),
728 	{}
729 };
730 
isst_misc_reg(void)731 static int isst_misc_reg(void)
732 {
733 	mutex_lock(&punit_misc_dev_reg_lock);
734 	if (misc_device_ret)
735 		goto unlock_exit;
736 
737 	if (!misc_usage_count) {
738 		const struct x86_cpu_id *id;
739 
740 		id = x86_match_cpu(hpm_cpu_ids);
741 		if (id)
742 			isst_hpm_support = true;
743 
744 		misc_device_ret = isst_if_cpu_info_init();
745 		if (misc_device_ret)
746 			goto unlock_exit;
747 
748 		misc_device_ret = misc_register(&isst_if_char_driver);
749 		if (misc_device_ret) {
750 			isst_if_cpu_info_exit();
751 			goto unlock_exit;
752 		}
753 	}
754 	misc_usage_count++;
755 
756 unlock_exit:
757 	mutex_unlock(&punit_misc_dev_reg_lock);
758 
759 	return misc_device_ret;
760 }
761 
isst_misc_unreg(void)762 static void isst_misc_unreg(void)
763 {
764 	mutex_lock(&punit_misc_dev_reg_lock);
765 	if (misc_usage_count)
766 		misc_usage_count--;
767 	if (!misc_usage_count && !misc_device_ret) {
768 		misc_deregister(&isst_if_char_driver);
769 		isst_if_cpu_info_exit();
770 	}
771 	mutex_unlock(&punit_misc_dev_reg_lock);
772 }
773 
774 /**
775  * isst_if_cdev_register() - Register callback for IOCTL
776  * @device_type: The device type this callback handling.
777  * @cb:	Callback structure.
778  *
779  * This function registers a callback to device type. On very first call
780  * it will register a misc device, which is used for user kernel interface.
781  * Other calls simply increment ref count. Registry will fail, if the user
782  * already opened misc device for operation. Also if the misc device
783  * creation failed, then it will not try again and all callers will get
784  * failure code.
785  *
786  * Return: Return the return value from the misc creation device or -EINVAL
787  * for unsupported device type.
788  */
isst_if_cdev_register(int device_type,struct isst_if_cmd_cb * cb)789 int isst_if_cdev_register(int device_type, struct isst_if_cmd_cb *cb)
790 {
791 	int ret;
792 
793 	if (device_type >= ISST_IF_DEV_MAX)
794 		return -EINVAL;
795 
796 	mutex_lock(&punit_misc_dev_open_lock);
797 	/* Device is already open, we don't want to add new callbacks */
798 	if (misc_device_open) {
799 		mutex_unlock(&punit_misc_dev_open_lock);
800 		return -EAGAIN;
801 	}
802 	if (!cb->api_version)
803 		cb->api_version = ISST_IF_API_VERSION;
804 	if (cb->api_version > isst_if_api_version)
805 		isst_if_api_version = cb->api_version;
806 	memcpy(&punit_callbacks[device_type], cb, sizeof(*cb));
807 	punit_callbacks[device_type].registered = 1;
808 	mutex_unlock(&punit_misc_dev_open_lock);
809 
810 	ret = isst_misc_reg();
811 	if (ret) {
812 		/*
813 		 * No need of mutex as the misc device register failed
814 		 * as no one can open device yet. Hence no contention.
815 		 */
816 		punit_callbacks[device_type].registered = 0;
817 		return ret;
818 	}
819 	return 0;
820 }
821 EXPORT_SYMBOL_GPL(isst_if_cdev_register);
822 
823 /**
824  * isst_if_cdev_unregister() - Unregister callback for IOCTL
825  * @device_type: The device type to unregister.
826  *
827  * This function unregisters the previously registered callback. If this
828  * is the last callback unregistering, then misc device is removed.
829  *
830  * Return: None.
831  */
isst_if_cdev_unregister(int device_type)832 void isst_if_cdev_unregister(int device_type)
833 {
834 	isst_misc_unreg();
835 	mutex_lock(&punit_misc_dev_open_lock);
836 	punit_callbacks[device_type].def_ioctl = NULL;
837 	punit_callbacks[device_type].registered = 0;
838 	if (device_type == ISST_IF_DEV_MBOX)
839 		isst_delete_hash();
840 	mutex_unlock(&punit_misc_dev_open_lock);
841 }
842 EXPORT_SYMBOL_GPL(isst_if_cdev_unregister);
843 
844 MODULE_LICENSE("GPL v2");
845