xref: /openbmc/linux/drivers/xen/privcmd-buf.c (revision f220d3eb)
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 
3 /******************************************************************************
4  * privcmd-buf.c
5  *
6  * Mmap of hypercall buffers.
7  *
8  * Copyright (c) 2018 Juergen Gross
9  */
10 
11 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
12 
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/list.h>
16 #include <linux/miscdevice.h>
17 #include <linux/mm.h>
18 #include <linux/slab.h>
19 
20 #include "privcmd.h"
21 
22 MODULE_LICENSE("GPL");
23 
24 static unsigned int limit = 64;
25 module_param(limit, uint, 0644);
26 MODULE_PARM_DESC(limit, "Maximum number of pages that may be allocated by "
27 			"the privcmd-buf device per open file");
28 
29 struct privcmd_buf_private {
30 	struct mutex lock;
31 	struct list_head list;
32 	unsigned int allocated;
33 };
34 
35 struct privcmd_buf_vma_private {
36 	struct privcmd_buf_private *file_priv;
37 	struct list_head list;
38 	unsigned int users;
39 	unsigned int n_pages;
40 	struct page *pages[];
41 };
42 
43 static int privcmd_buf_open(struct inode *ino, struct file *file)
44 {
45 	struct privcmd_buf_private *file_priv;
46 
47 	file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
48 	if (!file_priv)
49 		return -ENOMEM;
50 
51 	mutex_init(&file_priv->lock);
52 	INIT_LIST_HEAD(&file_priv->list);
53 
54 	file->private_data = file_priv;
55 
56 	return 0;
57 }
58 
59 static void privcmd_buf_vmapriv_free(struct privcmd_buf_vma_private *vma_priv)
60 {
61 	unsigned int i;
62 
63 	vma_priv->file_priv->allocated -= vma_priv->n_pages;
64 
65 	list_del(&vma_priv->list);
66 
67 	for (i = 0; i < vma_priv->n_pages; i++)
68 		if (vma_priv->pages[i])
69 			__free_page(vma_priv->pages[i]);
70 
71 	kfree(vma_priv);
72 }
73 
74 static int privcmd_buf_release(struct inode *ino, struct file *file)
75 {
76 	struct privcmd_buf_private *file_priv = file->private_data;
77 	struct privcmd_buf_vma_private *vma_priv;
78 
79 	mutex_lock(&file_priv->lock);
80 
81 	while (!list_empty(&file_priv->list)) {
82 		vma_priv = list_first_entry(&file_priv->list,
83 					    struct privcmd_buf_vma_private,
84 					    list);
85 		privcmd_buf_vmapriv_free(vma_priv);
86 	}
87 
88 	mutex_unlock(&file_priv->lock);
89 
90 	kfree(file_priv);
91 
92 	return 0;
93 }
94 
95 static void privcmd_buf_vma_open(struct vm_area_struct *vma)
96 {
97 	struct privcmd_buf_vma_private *vma_priv = vma->vm_private_data;
98 
99 	if (!vma_priv)
100 		return;
101 
102 	mutex_lock(&vma_priv->file_priv->lock);
103 	vma_priv->users++;
104 	mutex_unlock(&vma_priv->file_priv->lock);
105 }
106 
107 static void privcmd_buf_vma_close(struct vm_area_struct *vma)
108 {
109 	struct privcmd_buf_vma_private *vma_priv = vma->vm_private_data;
110 	struct privcmd_buf_private *file_priv;
111 
112 	if (!vma_priv)
113 		return;
114 
115 	file_priv = vma_priv->file_priv;
116 
117 	mutex_lock(&file_priv->lock);
118 
119 	vma_priv->users--;
120 	if (!vma_priv->users)
121 		privcmd_buf_vmapriv_free(vma_priv);
122 
123 	mutex_unlock(&file_priv->lock);
124 }
125 
126 static vm_fault_t privcmd_buf_vma_fault(struct vm_fault *vmf)
127 {
128 	pr_debug("fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
129 		 vmf->vma, vmf->vma->vm_start, vmf->vma->vm_end,
130 		 vmf->pgoff, (void *)vmf->address);
131 
132 	return VM_FAULT_SIGBUS;
133 }
134 
135 static const struct vm_operations_struct privcmd_buf_vm_ops = {
136 	.open = privcmd_buf_vma_open,
137 	.close = privcmd_buf_vma_close,
138 	.fault = privcmd_buf_vma_fault,
139 };
140 
141 static int privcmd_buf_mmap(struct file *file, struct vm_area_struct *vma)
142 {
143 	struct privcmd_buf_private *file_priv = file->private_data;
144 	struct privcmd_buf_vma_private *vma_priv;
145 	unsigned long count = vma_pages(vma);
146 	unsigned int i;
147 	int ret = 0;
148 
149 	if (!(vma->vm_flags & VM_SHARED) || count > limit ||
150 	    file_priv->allocated + count > limit)
151 		return -EINVAL;
152 
153 	vma_priv = kzalloc(sizeof(*vma_priv) + count * sizeof(void *),
154 			   GFP_KERNEL);
155 	if (!vma_priv)
156 		return -ENOMEM;
157 
158 	vma_priv->n_pages = count;
159 	count = 0;
160 	for (i = 0; i < vma_priv->n_pages; i++) {
161 		vma_priv->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
162 		if (!vma_priv->pages[i])
163 			break;
164 		count++;
165 	}
166 
167 	mutex_lock(&file_priv->lock);
168 
169 	file_priv->allocated += count;
170 
171 	vma_priv->file_priv = file_priv;
172 	vma_priv->users = 1;
173 
174 	vma->vm_flags |= VM_IO | VM_DONTEXPAND;
175 	vma->vm_ops = &privcmd_buf_vm_ops;
176 	vma->vm_private_data = vma_priv;
177 
178 	list_add(&vma_priv->list, &file_priv->list);
179 
180 	if (vma_priv->n_pages != count)
181 		ret = -ENOMEM;
182 	else
183 		for (i = 0; i < vma_priv->n_pages; i++) {
184 			ret = vm_insert_page(vma, vma->vm_start + i * PAGE_SIZE,
185 					     vma_priv->pages[i]);
186 			if (ret)
187 				break;
188 		}
189 
190 	if (ret)
191 		privcmd_buf_vmapriv_free(vma_priv);
192 
193 	mutex_unlock(&file_priv->lock);
194 
195 	return ret;
196 }
197 
198 const struct file_operations xen_privcmdbuf_fops = {
199 	.owner = THIS_MODULE,
200 	.open = privcmd_buf_open,
201 	.release = privcmd_buf_release,
202 	.mmap = privcmd_buf_mmap,
203 };
204 EXPORT_SYMBOL_GPL(xen_privcmdbuf_fops);
205 
206 struct miscdevice xen_privcmdbuf_dev = {
207 	.minor = MISC_DYNAMIC_MINOR,
208 	.name = "xen/hypercall",
209 	.fops = &xen_privcmdbuf_fops,
210 };
211