xref: /openbmc/linux/drivers/misc/cxl/api.c (revision b802fb99)
1 /*
2  * Copyright 2014 IBM Corp.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version
7  * 2 of the License, or (at your option) any later version.
8  */
9 
10 #include <linux/pci.h>
11 #include <linux/slab.h>
12 #include <linux/anon_inodes.h>
13 #include <linux/file.h>
14 #include <misc/cxl.h>
15 #include <linux/fs.h>
16 
17 #include "cxl.h"
18 
19 struct cxl_context *cxl_dev_context_init(struct pci_dev *dev)
20 {
21 	struct address_space *mapping;
22 	struct cxl_afu *afu;
23 	struct cxl_context  *ctx;
24 	int rc;
25 
26 	afu = cxl_pci_to_afu(dev);
27 
28 	ctx = cxl_context_alloc();
29 	if (IS_ERR(ctx)) {
30 		rc = PTR_ERR(ctx);
31 		goto err_dev;
32 	}
33 
34 	ctx->kernelapi = true;
35 
36 	/*
37 	 * Make our own address space since we won't have one from the
38 	 * filesystem like the user api has, and even if we do associate a file
39 	 * with this context we don't want to use the global anonymous inode's
40 	 * address space as that can invalidate unrelated users:
41 	 */
42 	mapping = kmalloc(sizeof(struct address_space), GFP_KERNEL);
43 	if (!mapping) {
44 		rc = -ENOMEM;
45 		goto err_ctx;
46 	}
47 	address_space_init_once(mapping);
48 
49 	/* Make it a slave context.  We can promote it later? */
50 	rc = cxl_context_init(ctx, afu, false, mapping);
51 	if (rc)
52 		goto err_mapping;
53 
54 	cxl_assign_psn_space(ctx);
55 
56 	return ctx;
57 
58 err_mapping:
59 	kfree(mapping);
60 err_ctx:
61 	kfree(ctx);
62 err_dev:
63 	return ERR_PTR(rc);
64 }
65 EXPORT_SYMBOL_GPL(cxl_dev_context_init);
66 
67 struct cxl_context *cxl_get_context(struct pci_dev *dev)
68 {
69 	return dev->dev.archdata.cxl_ctx;
70 }
71 EXPORT_SYMBOL_GPL(cxl_get_context);
72 
73 struct device *cxl_get_phys_dev(struct pci_dev *dev)
74 {
75 	struct cxl_afu *afu;
76 
77 	afu = cxl_pci_to_afu(dev);
78 
79 	return afu->adapter->dev.parent;
80 }
81 EXPORT_SYMBOL_GPL(cxl_get_phys_dev);
82 
83 int cxl_release_context(struct cxl_context *ctx)
84 {
85 	if (ctx->status >= STARTED)
86 		return -EBUSY;
87 
88 	cxl_context_free(ctx);
89 
90 	return 0;
91 }
92 EXPORT_SYMBOL_GPL(cxl_release_context);
93 
94 int cxl_allocate_afu_irqs(struct cxl_context *ctx, int num)
95 {
96 	if (num == 0)
97 		num = ctx->afu->pp_irqs;
98 	return afu_allocate_irqs(ctx, num);
99 }
100 EXPORT_SYMBOL_GPL(cxl_allocate_afu_irqs);
101 
102 void cxl_free_afu_irqs(struct cxl_context *ctx)
103 {
104 	afu_irq_name_free(ctx);
105 	cxl_release_irq_ranges(&ctx->irqs, ctx->afu->adapter);
106 }
107 EXPORT_SYMBOL_GPL(cxl_free_afu_irqs);
108 
109 static irq_hw_number_t cxl_find_afu_irq(struct cxl_context *ctx, int num)
110 {
111 	__u16 range;
112 	int r;
113 
114 	WARN_ON(num == 0);
115 
116 	for (r = 0; r < CXL_IRQ_RANGES; r++) {
117 		range = ctx->irqs.range[r];
118 		if (num < range) {
119 			return ctx->irqs.offset[r] + num;
120 		}
121 		num -= range;
122 	}
123 	return 0;
124 }
125 
126 int cxl_map_afu_irq(struct cxl_context *ctx, int num,
127 		    irq_handler_t handler, void *cookie, char *name)
128 {
129 	irq_hw_number_t hwirq;
130 
131 	/*
132 	 * Find interrupt we are to register.
133 	 */
134 	hwirq = cxl_find_afu_irq(ctx, num);
135 	if (!hwirq)
136 		return -ENOENT;
137 
138 	return cxl_map_irq(ctx->afu->adapter, hwirq, handler, cookie, name);
139 }
140 EXPORT_SYMBOL_GPL(cxl_map_afu_irq);
141 
142 void cxl_unmap_afu_irq(struct cxl_context *ctx, int num, void *cookie)
143 {
144 	irq_hw_number_t hwirq;
145 	unsigned int virq;
146 
147 	hwirq = cxl_find_afu_irq(ctx, num);
148 	if (!hwirq)
149 		return;
150 
151 	virq = irq_find_mapping(NULL, hwirq);
152 	if (virq)
153 		cxl_unmap_irq(virq, cookie);
154 }
155 EXPORT_SYMBOL_GPL(cxl_unmap_afu_irq);
156 
157 /*
158  * Start a context
159  * Code here similar to afu_ioctl_start_work().
160  */
161 int cxl_start_context(struct cxl_context *ctx, u64 wed,
162 		      struct task_struct *task)
163 {
164 	int rc = 0;
165 	bool kernel = true;
166 
167 	pr_devel("%s: pe: %i\n", __func__, ctx->pe);
168 
169 	mutex_lock(&ctx->status_mutex);
170 	if (ctx->status == STARTED)
171 		goto out; /* already started */
172 
173 	if (task) {
174 		ctx->pid = get_task_pid(task, PIDTYPE_PID);
175 		ctx->glpid = get_task_pid(task->group_leader, PIDTYPE_PID);
176 		kernel = false;
177 	}
178 
179 	cxl_ctx_get();
180 
181 	if ((rc = cxl_attach_process(ctx, kernel, wed , 0))) {
182 		put_pid(ctx->pid);
183 		cxl_ctx_put();
184 		goto out;
185 	}
186 
187 	ctx->status = STARTED;
188 out:
189 	mutex_unlock(&ctx->status_mutex);
190 	return rc;
191 }
192 EXPORT_SYMBOL_GPL(cxl_start_context);
193 
194 int cxl_process_element(struct cxl_context *ctx)
195 {
196 	return ctx->pe;
197 }
198 EXPORT_SYMBOL_GPL(cxl_process_element);
199 
200 /* Stop a context.  Returns 0 on success, otherwise -Errno */
201 int cxl_stop_context(struct cxl_context *ctx)
202 {
203 	return __detach_context(ctx);
204 }
205 EXPORT_SYMBOL_GPL(cxl_stop_context);
206 
207 void cxl_set_master(struct cxl_context *ctx)
208 {
209 	ctx->master = true;
210 	cxl_assign_psn_space(ctx);
211 }
212 EXPORT_SYMBOL_GPL(cxl_set_master);
213 
214 /* wrappers around afu_* file ops which are EXPORTED */
215 int cxl_fd_open(struct inode *inode, struct file *file)
216 {
217 	return afu_open(inode, file);
218 }
219 EXPORT_SYMBOL_GPL(cxl_fd_open);
220 int cxl_fd_release(struct inode *inode, struct file *file)
221 {
222 	return afu_release(inode, file);
223 }
224 EXPORT_SYMBOL_GPL(cxl_fd_release);
225 long cxl_fd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
226 {
227 	return afu_ioctl(file, cmd, arg);
228 }
229 EXPORT_SYMBOL_GPL(cxl_fd_ioctl);
230 int cxl_fd_mmap(struct file *file, struct vm_area_struct *vm)
231 {
232 	return afu_mmap(file, vm);
233 }
234 EXPORT_SYMBOL_GPL(cxl_fd_mmap);
235 unsigned int cxl_fd_poll(struct file *file, struct poll_table_struct *poll)
236 {
237 	return afu_poll(file, poll);
238 }
239 EXPORT_SYMBOL_GPL(cxl_fd_poll);
240 ssize_t cxl_fd_read(struct file *file, char __user *buf, size_t count,
241 			loff_t *off)
242 {
243 	return afu_read(file, buf, count, off);
244 }
245 EXPORT_SYMBOL_GPL(cxl_fd_read);
246 
247 #define PATCH_FOPS(NAME) if (!fops->NAME) fops->NAME = afu_fops.NAME
248 
249 /* Get a struct file and fd for a context and attach the ops */
250 struct file *cxl_get_fd(struct cxl_context *ctx, struct file_operations *fops,
251 			int *fd)
252 {
253 	struct file *file;
254 	int rc, flags, fdtmp;
255 
256 	flags = O_RDWR | O_CLOEXEC;
257 
258 	/* This code is similar to anon_inode_getfd() */
259 	rc = get_unused_fd_flags(flags);
260 	if (rc < 0)
261 		return ERR_PTR(rc);
262 	fdtmp = rc;
263 
264 	/*
265 	 * Patch the file ops.  Needs to be careful that this is rentrant safe.
266 	 */
267 	if (fops) {
268 		PATCH_FOPS(open);
269 		PATCH_FOPS(poll);
270 		PATCH_FOPS(read);
271 		PATCH_FOPS(release);
272 		PATCH_FOPS(unlocked_ioctl);
273 		PATCH_FOPS(compat_ioctl);
274 		PATCH_FOPS(mmap);
275 	} else /* use default ops */
276 		fops = (struct file_operations *)&afu_fops;
277 
278 	file = anon_inode_getfile("cxl", fops, ctx, flags);
279 	if (IS_ERR(file))
280 		goto err_fd;
281 
282 	file->f_mapping = ctx->mapping;
283 
284 	*fd = fdtmp;
285 	return file;
286 
287 err_fd:
288 	put_unused_fd(fdtmp);
289 	return NULL;
290 }
291 EXPORT_SYMBOL_GPL(cxl_get_fd);
292 
293 struct cxl_context *cxl_fops_get_context(struct file *file)
294 {
295 	return file->private_data;
296 }
297 EXPORT_SYMBOL_GPL(cxl_fops_get_context);
298 
299 int cxl_start_work(struct cxl_context *ctx,
300 		   struct cxl_ioctl_start_work *work)
301 {
302 	int rc;
303 
304 	/* code taken from afu_ioctl_start_work */
305 	if (!(work->flags & CXL_START_WORK_NUM_IRQS))
306 		work->num_interrupts = ctx->afu->pp_irqs;
307 	else if ((work->num_interrupts < ctx->afu->pp_irqs) ||
308 		 (work->num_interrupts > ctx->afu->irqs_max)) {
309 		return -EINVAL;
310 	}
311 
312 	rc = afu_register_irqs(ctx, work->num_interrupts);
313 	if (rc)
314 		return rc;
315 
316 	rc = cxl_start_context(ctx, work->work_element_descriptor, current);
317 	if (rc < 0) {
318 		afu_release_irqs(ctx, ctx);
319 		return rc;
320 	}
321 
322 	return 0;
323 }
324 EXPORT_SYMBOL_GPL(cxl_start_work);
325 
326 void __iomem *cxl_psa_map(struct cxl_context *ctx)
327 {
328 	struct cxl_afu *afu = ctx->afu;
329 	int rc;
330 
331 	rc = cxl_afu_check_and_enable(afu);
332 	if (rc)
333 		return NULL;
334 
335 	pr_devel("%s: psn_phys%llx size:%llx\n",
336 		 __func__, afu->psn_phys, afu->adapter->ps_size);
337 	return ioremap(ctx->psn_phys, ctx->psn_size);
338 }
339 EXPORT_SYMBOL_GPL(cxl_psa_map);
340 
341 void cxl_psa_unmap(void __iomem *addr)
342 {
343 	iounmap(addr);
344 }
345 EXPORT_SYMBOL_GPL(cxl_psa_unmap);
346 
347 int cxl_afu_reset(struct cxl_context *ctx)
348 {
349 	struct cxl_afu *afu = ctx->afu;
350 	int rc;
351 
352 	rc = __cxl_afu_reset(afu);
353 	if (rc)
354 		return rc;
355 
356 	return cxl_afu_check_and_enable(afu);
357 }
358 EXPORT_SYMBOL_GPL(cxl_afu_reset);
359 
360 void cxl_perst_reloads_same_image(struct cxl_afu *afu,
361 				  bool perst_reloads_same_image)
362 {
363 	afu->adapter->perst_same_image = perst_reloads_same_image;
364 }
365 EXPORT_SYMBOL_GPL(cxl_perst_reloads_same_image);
366