xref: /openbmc/linux/drivers/uio/uio_hv_generic.c (revision ca2478a7d974f38d29d27acb42a952c7f168916e)
1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * uio_hv_generic - generic UIO driver for VMBus
4   *
5   * Copyright (c) 2013-2016 Brocade Communications Systems, Inc.
6   * Copyright (c) 2016, Microsoft Corporation.
7   *
8   * Since the driver does not declare any device ids, you must allocate
9   * id and bind the device to the driver yourself.  For example:
10   *
11   * Associate Network GUID with UIO device
12   * # echo "f8615163-df3e-46c5-913f-f2d2f965ed0e" \
13   *    > /sys/bus/vmbus/drivers/uio_hv_generic/new_id
14   * Then rebind
15   * # echo -n "ed963694-e847-4b2a-85af-bc9cfc11d6f3" \
16   *    > /sys/bus/vmbus/drivers/hv_netvsc/unbind
17   * # echo -n "ed963694-e847-4b2a-85af-bc9cfc11d6f3" \
18   *    > /sys/bus/vmbus/drivers/uio_hv_generic/bind
19   */
20  #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21  
22  #include <linux/device.h>
23  #include <linux/kernel.h>
24  #include <linux/module.h>
25  #include <linux/uio_driver.h>
26  #include <linux/netdevice.h>
27  #include <linux/if_ether.h>
28  #include <linux/skbuff.h>
29  #include <linux/hyperv.h>
30  #include <linux/vmalloc.h>
31  #include <linux/slab.h>
32  
33  #include "../hv/hyperv_vmbus.h"
34  
35  #define DRIVER_VERSION	"0.02.1"
36  #define DRIVER_AUTHOR	"Stephen Hemminger <sthemmin at microsoft.com>"
37  #define DRIVER_DESC	"Generic UIO driver for VMBus devices"
38  
39  #define HV_RING_SIZE	 512	/* pages */
40  #define SEND_BUFFER_SIZE (16 * 1024 * 1024)
41  #define RECV_BUFFER_SIZE (31 * 1024 * 1024)
42  
43  /*
44   * List of resources to be mapped to user space
45   * can be extended up to MAX_UIO_MAPS(5) items
46   */
47  enum hv_uio_map {
48  	TXRX_RING_MAP = 0,
49  	INT_PAGE_MAP,
50  	MON_PAGE_MAP,
51  	RECV_BUF_MAP,
52  	SEND_BUF_MAP
53  };
54  
55  struct hv_uio_private_data {
56  	struct uio_info info;
57  	struct hv_device *device;
58  	atomic_t refcnt;
59  
60  	void	*recv_buf;
61  	struct vmbus_gpadl recv_gpadl;
62  	char	recv_name[32];	/* "recv_4294967295" */
63  
64  	void	*send_buf;
65  	struct vmbus_gpadl send_gpadl;
66  	char	send_name[32];
67  };
68  
69  /*
70   * This is the irqcontrol callback to be registered to uio_info.
71   * It can be used to disable/enable interrupt from user space processes.
72   *
73   * @param info
74   *  pointer to uio_info.
75   * @param irq_state
76   *  state value. 1 to enable interrupt, 0 to disable interrupt.
77   */
78  static int
hv_uio_irqcontrol(struct uio_info * info,s32 irq_state)79  hv_uio_irqcontrol(struct uio_info *info, s32 irq_state)
80  {
81  	struct hv_uio_private_data *pdata = info->priv;
82  	struct hv_device *dev = pdata->device;
83  
84  	dev->channel->inbound.ring_buffer->interrupt_mask = !irq_state;
85  	virt_mb();
86  
87  	return 0;
88  }
89  
90  /*
91   * Callback from vmbus_event when something is in inbound ring.
92   */
hv_uio_channel_cb(void * context)93  static void hv_uio_channel_cb(void *context)
94  {
95  	struct vmbus_channel *chan = context;
96  	struct hv_device *hv_dev = chan->device_obj;
97  	struct hv_uio_private_data *pdata = hv_get_drvdata(hv_dev);
98  
99  	chan->inbound.ring_buffer->interrupt_mask = 1;
100  	virt_mb();
101  
102  	uio_event_notify(&pdata->info);
103  }
104  
105  /*
106   * Callback from vmbus_event when channel is rescinded.
107   * It is meant for rescind of primary channels only.
108   */
hv_uio_rescind(struct vmbus_channel * channel)109  static void hv_uio_rescind(struct vmbus_channel *channel)
110  {
111  	struct hv_device *hv_dev = channel->device_obj;
112  	struct hv_uio_private_data *pdata = hv_get_drvdata(hv_dev);
113  
114  	/*
115  	 * Turn off the interrupt file handle
116  	 * Next read for event will return -EIO
117  	 */
118  	pdata->info.irq = 0;
119  
120  	/* Wake up reader */
121  	uio_event_notify(&pdata->info);
122  
123  	/*
124  	 * With rescind callback registered, rescind path will not unregister the device
125  	 * from vmbus when the primary channel is rescinded.
126  	 * Without it, rescind handling is incomplete and next onoffer msg does not come.
127  	 * Unregister the device from vmbus here.
128  	 */
129  	vmbus_device_unregister(channel->device_obj);
130  }
131  
132  /* Sysfs API to allow mmap of the ring buffers
133   * The ring buffer is allocated as contiguous memory by vmbus_open
134   */
hv_uio_ring_mmap(struct file * filp,struct kobject * kobj,struct bin_attribute * attr,struct vm_area_struct * vma)135  static int hv_uio_ring_mmap(struct file *filp, struct kobject *kobj,
136  			    struct bin_attribute *attr,
137  			    struct vm_area_struct *vma)
138  {
139  	struct vmbus_channel *channel
140  		= container_of(kobj, struct vmbus_channel, kobj);
141  	void *ring_buffer = page_address(channel->ringbuffer_page);
142  
143  	if (channel->state != CHANNEL_OPENED_STATE)
144  		return -ENODEV;
145  
146  	return vm_iomap_memory(vma, virt_to_phys(ring_buffer),
147  			       channel->ringbuffer_pagecount << PAGE_SHIFT);
148  }
149  
150  static const struct bin_attribute ring_buffer_bin_attr = {
151  	.attr = {
152  		.name = "ring",
153  		.mode = 0600,
154  	},
155  	.size = 2 * HV_RING_SIZE * PAGE_SIZE,
156  	.mmap = hv_uio_ring_mmap,
157  };
158  
159  /* Callback from VMBUS subsystem when new channel created. */
160  static void
hv_uio_new_channel(struct vmbus_channel * new_sc)161  hv_uio_new_channel(struct vmbus_channel *new_sc)
162  {
163  	struct hv_device *hv_dev = new_sc->primary_channel->device_obj;
164  	struct device *device = &hv_dev->device;
165  	const size_t ring_bytes = HV_RING_SIZE * PAGE_SIZE;
166  	int ret;
167  
168  	/* Create host communication ring */
169  	ret = vmbus_open(new_sc, ring_bytes, ring_bytes, NULL, 0,
170  			 hv_uio_channel_cb, new_sc);
171  	if (ret) {
172  		dev_err(device, "vmbus_open subchannel failed: %d\n", ret);
173  		return;
174  	}
175  
176  	/* Disable interrupts on sub channel */
177  	new_sc->inbound.ring_buffer->interrupt_mask = 1;
178  	set_channel_read_mode(new_sc, HV_CALL_ISR);
179  
180  	ret = sysfs_create_bin_file(&new_sc->kobj, &ring_buffer_bin_attr);
181  	if (ret) {
182  		dev_err(device, "sysfs create ring bin file failed; %d\n", ret);
183  		vmbus_close(new_sc);
184  	}
185  }
186  
187  /* free the reserved buffers for send and receive */
188  static void
hv_uio_cleanup(struct hv_device * dev,struct hv_uio_private_data * pdata)189  hv_uio_cleanup(struct hv_device *dev, struct hv_uio_private_data *pdata)
190  {
191  	if (pdata->send_gpadl.gpadl_handle) {
192  		vmbus_teardown_gpadl(dev->channel, &pdata->send_gpadl);
193  		if (!pdata->send_gpadl.decrypted)
194  			vfree(pdata->send_buf);
195  	}
196  
197  	if (pdata->recv_gpadl.gpadl_handle) {
198  		vmbus_teardown_gpadl(dev->channel, &pdata->recv_gpadl);
199  		if (!pdata->recv_gpadl.decrypted)
200  			vfree(pdata->recv_buf);
201  	}
202  }
203  
204  /* VMBus primary channel is opened on first use */
205  static int
hv_uio_open(struct uio_info * info,struct inode * inode)206  hv_uio_open(struct uio_info *info, struct inode *inode)
207  {
208  	struct hv_uio_private_data *pdata
209  		= container_of(info, struct hv_uio_private_data, info);
210  	struct hv_device *dev = pdata->device;
211  	int ret;
212  
213  	if (atomic_inc_return(&pdata->refcnt) != 1)
214  		return 0;
215  
216  	vmbus_set_chn_rescind_callback(dev->channel, hv_uio_rescind);
217  	vmbus_set_sc_create_callback(dev->channel, hv_uio_new_channel);
218  
219  	ret = vmbus_connect_ring(dev->channel,
220  				 hv_uio_channel_cb, dev->channel);
221  	if (ret == 0)
222  		dev->channel->inbound.ring_buffer->interrupt_mask = 1;
223  	else
224  		atomic_dec(&pdata->refcnt);
225  
226  	return ret;
227  }
228  
229  /* VMBus primary channel is closed on last close */
230  static int
hv_uio_release(struct uio_info * info,struct inode * inode)231  hv_uio_release(struct uio_info *info, struct inode *inode)
232  {
233  	struct hv_uio_private_data *pdata
234  		= container_of(info, struct hv_uio_private_data, info);
235  	struct hv_device *dev = pdata->device;
236  	int ret = 0;
237  
238  	if (atomic_dec_and_test(&pdata->refcnt))
239  		ret = vmbus_disconnect_ring(dev->channel);
240  
241  	return ret;
242  }
243  
244  static int
hv_uio_probe(struct hv_device * dev,const struct hv_vmbus_device_id * dev_id)245  hv_uio_probe(struct hv_device *dev,
246  	     const struct hv_vmbus_device_id *dev_id)
247  {
248  	struct vmbus_channel *channel = dev->channel;
249  	struct hv_uio_private_data *pdata;
250  	void *ring_buffer;
251  	int ret;
252  
253  	/* Communicating with host has to be via shared memory not hypercall */
254  	if (!channel->offermsg.monitor_allocated) {
255  		dev_err(&dev->device, "vmbus channel requires hypercall\n");
256  		return -ENOTSUPP;
257  	}
258  
259  	pdata = devm_kzalloc(&dev->device, sizeof(*pdata), GFP_KERNEL);
260  	if (!pdata)
261  		return -ENOMEM;
262  
263  	ret = vmbus_alloc_ring(channel, HV_RING_SIZE * PAGE_SIZE,
264  			       HV_RING_SIZE * PAGE_SIZE);
265  	if (ret)
266  		return ret;
267  
268  	set_channel_read_mode(channel, HV_CALL_ISR);
269  
270  	/* Fill general uio info */
271  	pdata->info.name = "uio_hv_generic";
272  	pdata->info.version = DRIVER_VERSION;
273  	pdata->info.irqcontrol = hv_uio_irqcontrol;
274  	pdata->info.open = hv_uio_open;
275  	pdata->info.release = hv_uio_release;
276  	pdata->info.irq = UIO_IRQ_CUSTOM;
277  	atomic_set(&pdata->refcnt, 0);
278  
279  	/* mem resources */
280  	pdata->info.mem[TXRX_RING_MAP].name = "txrx_rings";
281  	ring_buffer = page_address(channel->ringbuffer_page);
282  	pdata->info.mem[TXRX_RING_MAP].addr
283  		= (uintptr_t)virt_to_phys(ring_buffer);
284  	pdata->info.mem[TXRX_RING_MAP].size
285  		= channel->ringbuffer_pagecount << PAGE_SHIFT;
286  	pdata->info.mem[TXRX_RING_MAP].memtype = UIO_MEM_IOVA;
287  
288  	pdata->info.mem[INT_PAGE_MAP].name = "int_page";
289  	pdata->info.mem[INT_PAGE_MAP].addr
290  		= (uintptr_t)vmbus_connection.int_page;
291  	pdata->info.mem[INT_PAGE_MAP].size = PAGE_SIZE;
292  	pdata->info.mem[INT_PAGE_MAP].memtype = UIO_MEM_LOGICAL;
293  
294  	pdata->info.mem[MON_PAGE_MAP].name = "monitor_page";
295  	pdata->info.mem[MON_PAGE_MAP].addr
296  		= (uintptr_t)vmbus_connection.monitor_pages[1];
297  	pdata->info.mem[MON_PAGE_MAP].size = PAGE_SIZE;
298  	pdata->info.mem[MON_PAGE_MAP].memtype = UIO_MEM_LOGICAL;
299  
300  	pdata->recv_buf = vzalloc(RECV_BUFFER_SIZE);
301  	if (pdata->recv_buf == NULL) {
302  		ret = -ENOMEM;
303  		goto fail_free_ring;
304  	}
305  
306  	ret = vmbus_establish_gpadl(channel, pdata->recv_buf,
307  				    RECV_BUFFER_SIZE, &pdata->recv_gpadl);
308  	if (ret) {
309  		if (!pdata->recv_gpadl.decrypted)
310  			vfree(pdata->recv_buf);
311  		goto fail_close;
312  	}
313  
314  	/* put Global Physical Address Label in name */
315  	snprintf(pdata->recv_name, sizeof(pdata->recv_name),
316  		 "recv:%u", pdata->recv_gpadl.gpadl_handle);
317  	pdata->info.mem[RECV_BUF_MAP].name = pdata->recv_name;
318  	pdata->info.mem[RECV_BUF_MAP].addr
319  		= (uintptr_t)pdata->recv_buf;
320  	pdata->info.mem[RECV_BUF_MAP].size = RECV_BUFFER_SIZE;
321  	pdata->info.mem[RECV_BUF_MAP].memtype = UIO_MEM_VIRTUAL;
322  
323  	pdata->send_buf = vzalloc(SEND_BUFFER_SIZE);
324  	if (pdata->send_buf == NULL) {
325  		ret = -ENOMEM;
326  		goto fail_close;
327  	}
328  
329  	ret = vmbus_establish_gpadl(channel, pdata->send_buf,
330  				    SEND_BUFFER_SIZE, &pdata->send_gpadl);
331  	if (ret) {
332  		if (!pdata->send_gpadl.decrypted)
333  			vfree(pdata->send_buf);
334  		goto fail_close;
335  	}
336  
337  	snprintf(pdata->send_name, sizeof(pdata->send_name),
338  		 "send:%u", pdata->send_gpadl.gpadl_handle);
339  	pdata->info.mem[SEND_BUF_MAP].name = pdata->send_name;
340  	pdata->info.mem[SEND_BUF_MAP].addr
341  		= (uintptr_t)pdata->send_buf;
342  	pdata->info.mem[SEND_BUF_MAP].size = SEND_BUFFER_SIZE;
343  	pdata->info.mem[SEND_BUF_MAP].memtype = UIO_MEM_VIRTUAL;
344  
345  	pdata->info.priv = pdata;
346  	pdata->device = dev;
347  
348  	ret = uio_register_device(&dev->device, &pdata->info);
349  	if (ret) {
350  		dev_err(&dev->device, "hv_uio register failed\n");
351  		goto fail_close;
352  	}
353  
354  	ret = sysfs_create_bin_file(&channel->kobj, &ring_buffer_bin_attr);
355  	if (ret)
356  		dev_notice(&dev->device,
357  			   "sysfs create ring bin file failed; %d\n", ret);
358  
359  	hv_set_drvdata(dev, pdata);
360  
361  	return 0;
362  
363  fail_close:
364  	hv_uio_cleanup(dev, pdata);
365  fail_free_ring:
366  	vmbus_free_ring(dev->channel);
367  
368  	return ret;
369  }
370  
371  static void
hv_uio_remove(struct hv_device * dev)372  hv_uio_remove(struct hv_device *dev)
373  {
374  	struct hv_uio_private_data *pdata = hv_get_drvdata(dev);
375  
376  	if (!pdata)
377  		return;
378  
379  	sysfs_remove_bin_file(&dev->channel->kobj, &ring_buffer_bin_attr);
380  	uio_unregister_device(&pdata->info);
381  	hv_uio_cleanup(dev, pdata);
382  
383  	vmbus_free_ring(dev->channel);
384  }
385  
386  static struct hv_driver hv_uio_drv = {
387  	.name = "uio_hv_generic",
388  	.id_table = NULL, /* only dynamic id's */
389  	.probe = hv_uio_probe,
390  	.remove = hv_uio_remove,
391  };
392  
393  static int __init
hyperv_module_init(void)394  hyperv_module_init(void)
395  {
396  	return vmbus_driver_register(&hv_uio_drv);
397  }
398  
399  static void __exit
hyperv_module_exit(void)400  hyperv_module_exit(void)
401  {
402  	vmbus_driver_unregister(&hv_uio_drv);
403  }
404  
405  module_init(hyperv_module_init);
406  module_exit(hyperv_module_exit);
407  
408  MODULE_VERSION(DRIVER_VERSION);
409  MODULE_LICENSE("GPL v2");
410  MODULE_AUTHOR(DRIVER_AUTHOR);
411  MODULE_DESCRIPTION(DRIVER_DESC);
412