1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright 2016-17 IBM Corp.
4  */
5 
6 #define pr_fmt(fmt) "vas: " fmt
7 
8 #include <linux/module.h>
9 #include <linux/kernel.h>
10 #include <linux/export.h>
11 #include <linux/types.h>
12 #include <linux/slab.h>
13 #include <linux/platform_device.h>
14 #include <linux/of_platform.h>
15 #include <linux/of_address.h>
16 #include <linux/of.h>
17 #include <asm/prom.h>
18 #include <asm/xive.h>
19 
20 #include "vas.h"
21 
22 DEFINE_MUTEX(vas_mutex);
23 static LIST_HEAD(vas_instances);
24 
25 static DEFINE_PER_CPU(int, cpu_vas_id);
26 
27 static int vas_irq_fault_window_setup(struct vas_instance *vinst)
28 {
29 	return vas_setup_fault_window(vinst);
30 }
31 
32 static int init_vas_instance(struct platform_device *pdev)
33 {
34 	struct device_node *dn = pdev->dev.of_node;
35 	struct vas_instance *vinst;
36 	struct xive_irq_data *xd;
37 	uint32_t chipid, hwirq;
38 	struct resource *res;
39 	int rc, cpu, vasid;
40 
41 	rc = of_property_read_u32(dn, "ibm,vas-id", &vasid);
42 	if (rc) {
43 		pr_err("No ibm,vas-id property for %s?\n", pdev->name);
44 		return -ENODEV;
45 	}
46 
47 	rc = of_property_read_u32(dn, "ibm,chip-id", &chipid);
48 	if (rc) {
49 		pr_err("No ibm,chip-id property for %s?\n", pdev->name);
50 		return -ENODEV;
51 	}
52 
53 	if (pdev->num_resources != 4) {
54 		pr_err("Unexpected DT configuration for [%s, %d]\n",
55 				pdev->name, vasid);
56 		return -ENODEV;
57 	}
58 
59 	vinst = kzalloc(sizeof(*vinst), GFP_KERNEL);
60 	if (!vinst)
61 		return -ENOMEM;
62 
63 	INIT_LIST_HEAD(&vinst->node);
64 	ida_init(&vinst->ida);
65 	mutex_init(&vinst->mutex);
66 	vinst->vas_id = vasid;
67 	vinst->pdev = pdev;
68 
69 	res = &pdev->resource[0];
70 	vinst->hvwc_bar_start = res->start;
71 
72 	res = &pdev->resource[1];
73 	vinst->uwc_bar_start = res->start;
74 
75 	res = &pdev->resource[2];
76 	vinst->paste_base_addr = res->start;
77 
78 	res = &pdev->resource[3];
79 	if (res->end > 62) {
80 		pr_err("Bad 'paste_win_id_shift' in DT, %llx\n", res->end);
81 		goto free_vinst;
82 	}
83 
84 	vinst->paste_win_id_shift = 63 - res->end;
85 
86 	hwirq = xive_native_alloc_irq_on_chip(chipid);
87 	if (!hwirq) {
88 		pr_err("Inst%d: Unable to allocate global irq for chip %d\n",
89 				vinst->vas_id, chipid);
90 		return -ENOENT;
91 	}
92 
93 	vinst->virq = irq_create_mapping(NULL, hwirq);
94 	if (!vinst->virq) {
95 		pr_err("Inst%d: Unable to map global irq %d\n",
96 				vinst->vas_id, hwirq);
97 		return -EINVAL;
98 	}
99 
100 	xd = irq_get_handler_data(vinst->virq);
101 	if (!xd) {
102 		pr_err("Inst%d: Invalid virq %d\n",
103 				vinst->vas_id, vinst->virq);
104 		return -EINVAL;
105 	}
106 
107 	vinst->irq_port = xd->trig_page;
108 	pr_devel("Initialized instance [%s, %d] paste_base 0x%llx paste_win_id_shift 0x%llx IRQ %d Port 0x%llx\n",
109 			pdev->name, vasid, vinst->paste_base_addr,
110 			vinst->paste_win_id_shift, vinst->virq,
111 			vinst->irq_port);
112 
113 	for_each_possible_cpu(cpu) {
114 		if (cpu_to_chip_id(cpu) == of_get_ibm_chip_id(dn))
115 			per_cpu(cpu_vas_id, cpu) = vasid;
116 	}
117 
118 	mutex_lock(&vas_mutex);
119 	list_add(&vinst->node, &vas_instances);
120 	mutex_unlock(&vas_mutex);
121 
122 	/*
123 	 * IRQ and fault handling setup is needed only for user space
124 	 * send windows.
125 	 */
126 	if (vinst->virq) {
127 		rc = vas_irq_fault_window_setup(vinst);
128 		/*
129 		 * Fault window is used only for user space send windows.
130 		 * So if vinst->virq is NULL, tx_win_open returns -ENODEV
131 		 * for user space.
132 		 */
133 		if (rc)
134 			vinst->virq = 0;
135 	}
136 
137 	vas_instance_init_dbgdir(vinst);
138 
139 	dev_set_drvdata(&pdev->dev, vinst);
140 
141 	return 0;
142 
143 free_vinst:
144 	kfree(vinst);
145 	return -ENODEV;
146 
147 }
148 
149 /*
150  * Although this is read/used multiple times, it is written to only
151  * during initialization.
152  */
153 struct vas_instance *find_vas_instance(int vasid)
154 {
155 	struct list_head *ent;
156 	struct vas_instance *vinst;
157 
158 	mutex_lock(&vas_mutex);
159 
160 	if (vasid == -1)
161 		vasid = per_cpu(cpu_vas_id, smp_processor_id());
162 
163 	list_for_each(ent, &vas_instances) {
164 		vinst = list_entry(ent, struct vas_instance, node);
165 		if (vinst->vas_id == vasid) {
166 			mutex_unlock(&vas_mutex);
167 			return vinst;
168 		}
169 	}
170 	mutex_unlock(&vas_mutex);
171 
172 	pr_devel("Instance %d not found\n", vasid);
173 	return NULL;
174 }
175 
176 int chip_to_vas_id(int chipid)
177 {
178 	int cpu;
179 
180 	for_each_possible_cpu(cpu) {
181 		if (cpu_to_chip_id(cpu) == chipid)
182 			return per_cpu(cpu_vas_id, cpu);
183 	}
184 	return -1;
185 }
186 EXPORT_SYMBOL(chip_to_vas_id);
187 
188 static int vas_probe(struct platform_device *pdev)
189 {
190 	return init_vas_instance(pdev);
191 }
192 
193 static const struct of_device_id powernv_vas_match[] = {
194 	{ .compatible = "ibm,vas",},
195 	{},
196 };
197 
198 static struct platform_driver vas_driver = {
199 	.driver = {
200 		.name = "vas",
201 		.of_match_table = powernv_vas_match,
202 	},
203 	.probe = vas_probe,
204 };
205 
206 static int __init vas_init(void)
207 {
208 	int found = 0;
209 	struct device_node *dn;
210 
211 	platform_driver_register(&vas_driver);
212 
213 	for_each_compatible_node(dn, NULL, "ibm,vas") {
214 		of_platform_device_create(dn, NULL, NULL);
215 		found++;
216 	}
217 
218 	if (!found) {
219 		platform_driver_unregister(&vas_driver);
220 		return -ENODEV;
221 	}
222 
223 	pr_devel("Found %d instances\n", found);
224 
225 	return 0;
226 }
227 device_initcall(vas_init);
228