13b20eb23SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
246a97191SGreg Kroah-Hartman /*
346a97191SGreg Kroah-Hartman * Copyright (c) 2009, Microsoft Corporation.
446a97191SGreg Kroah-Hartman *
546a97191SGreg Kroah-Hartman * Authors:
646a97191SGreg Kroah-Hartman * Haiyang Zhang <haiyangz@microsoft.com>
746a97191SGreg Kroah-Hartman * Hank Janssen <hjanssen@microsoft.com>
846a97191SGreg Kroah-Hartman * K. Y. Srinivasan <kys@microsoft.com>
946a97191SGreg Kroah-Hartman */
1046a97191SGreg Kroah-Hartman #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
1146a97191SGreg Kroah-Hartman
1246a97191SGreg Kroah-Hartman #include <linux/init.h>
1346a97191SGreg Kroah-Hartman #include <linux/module.h>
1446a97191SGreg Kroah-Hartman #include <linux/device.h>
159c843423SSaurabh Sengar #include <linux/platform_device.h>
1646a97191SGreg Kroah-Hartman #include <linux/interrupt.h>
1746a97191SGreg Kroah-Hartman #include <linux/sysctl.h>
1846a97191SGreg Kroah-Hartman #include <linux/slab.h>
1946a97191SGreg Kroah-Hartman #include <linux/acpi.h>
2046a97191SGreg Kroah-Hartman #include <linux/completion.h>
2146a97191SGreg Kroah-Hartman #include <linux/hyperv.h>
22b0209501SK. Y. Srinivasan #include <linux/kernel_stat.h>
23f83705a5SSaurabh Sengar #include <linux/of_address.h>
244061ed9eSK. Y. Srinivasan #include <linux/clockchips.h>
25e513229bSVitaly Kuznetsov #include <linux/cpu.h>
266640b5dfSSaurabh Sengar #include <linux/sched/isolation.h>
2768db0cf1SIngo Molnar #include <linux/sched/task_stack.h>
2868db0cf1SIngo Molnar
291f48dcf1SDexuan Cui #include <linux/delay.h>
30f39650deSAndy Shevchenko #include <linux/panic_notifier.h>
3196c1d058SNick Meier #include <linux/ptrace.h>
3235464483SJake Oshins #include <linux/screen_info.h>
336d146aefSJake Oshins #include <linux/efi.h>
344b44f2d1SStephan Mueller #include <linux/random.h>
35f3a99e76STianyu Lan #include <linux/kernel.h>
3663ecc6d2SDexuan Cui #include <linux/syscore_ops.h>
37743b237cSTianyu Lan #include <linux/dma-map-ops.h>
382a8a8afbSVitaly Kuznetsov #include <linux/pci.h>
39fd1fea68SMichael Kelley #include <clocksource/hyperv_timer.h>
40e5dfd093SThomas Gleixner #include <asm/mshyperv.h>
4146a97191SGreg Kroah-Hartman #include "hyperv_vmbus.h"
4246a97191SGreg Kroah-Hartman
43fc76936dSStephen Hemminger struct vmbus_dynid {
44fc76936dSStephen Hemminger struct list_head node;
45fc76936dSStephen Hemminger struct hv_vmbus_device_id id;
46fc76936dSStephen Hemminger };
47fc76936dSStephen Hemminger
489c843423SSaurabh Sengar static struct device *hv_dev;
4946a97191SGreg Kroah-Hartman
5076d36ab7SVitaly Kuznetsov static int hyperv_cpuhp_online;
5196c1d058SNick Meier
52d608715dSMichael Kelley static long __percpu *vmbus_evt;
53d608715dSMichael Kelley
54626b901fSMichael Kelley /* Values parsed from ACPI DSDT */
55d608715dSMichael Kelley int vmbus_irq;
56626b901fSMichael Kelley int vmbus_interrupt;
57626b901fSMichael Kelley
58040026dfSTianyu Lan /*
59d786e00dSGuilherme G. Piccoli * The panic notifier below is responsible solely for unloading the
60d786e00dSGuilherme G. Piccoli * vmbus connection, which is necessary in a panic event.
61d786e00dSGuilherme G. Piccoli *
62d786e00dSGuilherme G. Piccoli * Notice an intrincate relation of this notifier with Hyper-V
63d786e00dSGuilherme G. Piccoli * framebuffer panic notifier exists - we need vmbus connection alive
64d786e00dSGuilherme G. Piccoli * there in order to succeed, so we need to order both with each other
65d786e00dSGuilherme G. Piccoli * [see hvfb_on_panic()] - this is done using notifiers' priorities.
66d786e00dSGuilherme G. Piccoli */
hv_panic_vmbus_unload(struct notifier_block * nb,unsigned long val,void * args)67d786e00dSGuilherme G. Piccoli static int hv_panic_vmbus_unload(struct notifier_block *nb, unsigned long val,
68510f7aefSVitaly Kuznetsov void *args)
69510f7aefSVitaly Kuznetsov {
7074347a99STianyu Lan vmbus_initiate_unload(true);
7196c1d058SNick Meier return NOTIFY_DONE;
7296c1d058SNick Meier }
73d786e00dSGuilherme G. Piccoli static struct notifier_block hyperv_panic_vmbus_unload_block = {
74d786e00dSGuilherme G. Piccoli .notifier_call = hv_panic_vmbus_unload,
75d786e00dSGuilherme G. Piccoli .priority = INT_MIN + 1, /* almost the latest one to execute */
76d786e00dSGuilherme G. Piccoli };
7796c1d058SNick Meier
786d146aefSJake Oshins static const char *fb_mmio_name = "fb_range";
796d146aefSJake Oshins static struct resource *fb_mmio;
80e2e80841SStephen Hemminger static struct resource *hyperv_mmio;
818aea7f82SDavidlohr Bueso static DEFINE_MUTEX(hyperv_mmio_lock);
8246a97191SGreg Kroah-Hartman
vmbus_exists(void)83cf6a2eacSK. Y. Srinivasan static int vmbus_exists(void)
84cf6a2eacSK. Y. Srinivasan {
859c843423SSaurabh Sengar if (hv_dev == NULL)
86cf6a2eacSK. Y. Srinivasan return -ENODEV;
87cf6a2eacSK. Y. Srinivasan
88cf6a2eacSK. Y. Srinivasan return 0;
89cf6a2eacSK. Y. Srinivasan }
90cf6a2eacSK. Y. Srinivasan
channel_monitor_group(const struct vmbus_channel * channel)91c2e5df61SStephen Hemminger static u8 channel_monitor_group(const struct vmbus_channel *channel)
9276c52bbeSGreg Kroah-Hartman {
9376c52bbeSGreg Kroah-Hartman return (u8)channel->offermsg.monitorid / 32;
9476c52bbeSGreg Kroah-Hartman }
9576c52bbeSGreg Kroah-Hartman
channel_monitor_offset(const struct vmbus_channel * channel)96c2e5df61SStephen Hemminger static u8 channel_monitor_offset(const struct vmbus_channel *channel)
9776c52bbeSGreg Kroah-Hartman {
9876c52bbeSGreg Kroah-Hartman return (u8)channel->offermsg.monitorid % 32;
9976c52bbeSGreg Kroah-Hartman }
10076c52bbeSGreg Kroah-Hartman
channel_pending(const struct vmbus_channel * channel,const struct hv_monitor_page * monitor_page)101c2e5df61SStephen Hemminger static u32 channel_pending(const struct vmbus_channel *channel,
102c2e5df61SStephen Hemminger const struct hv_monitor_page *monitor_page)
10376c52bbeSGreg Kroah-Hartman {
10476c52bbeSGreg Kroah-Hartman u8 monitor_group = channel_monitor_group(channel);
105c2e5df61SStephen Hemminger
10676c52bbeSGreg Kroah-Hartman return monitor_page->trigger_group[monitor_group].pending;
10776c52bbeSGreg Kroah-Hartman }
10876c52bbeSGreg Kroah-Hartman
channel_latency(const struct vmbus_channel * channel,const struct hv_monitor_page * monitor_page)109c2e5df61SStephen Hemminger static u32 channel_latency(const struct vmbus_channel *channel,
110c2e5df61SStephen Hemminger const struct hv_monitor_page *monitor_page)
1111cee272bSGreg Kroah-Hartman {
1121cee272bSGreg Kroah-Hartman u8 monitor_group = channel_monitor_group(channel);
1131cee272bSGreg Kroah-Hartman u8 monitor_offset = channel_monitor_offset(channel);
114c2e5df61SStephen Hemminger
1151cee272bSGreg Kroah-Hartman return monitor_page->latency[monitor_group][monitor_offset];
1161cee272bSGreg Kroah-Hartman }
1171cee272bSGreg Kroah-Hartman
channel_conn_id(struct vmbus_channel * channel,struct hv_monitor_page * monitor_page)1184947c745SGreg Kroah-Hartman static u32 channel_conn_id(struct vmbus_channel *channel,
1194947c745SGreg Kroah-Hartman struct hv_monitor_page *monitor_page)
1204947c745SGreg Kroah-Hartman {
1214947c745SGreg Kroah-Hartman u8 monitor_group = channel_monitor_group(channel);
1224947c745SGreg Kroah-Hartman u8 monitor_offset = channel_monitor_offset(channel);
123e4f2212eSMatheus Castello
1244947c745SGreg Kroah-Hartman return monitor_page->parameter[monitor_group][monitor_offset].connectionid.u.id;
1254947c745SGreg Kroah-Hartman }
1264947c745SGreg Kroah-Hartman
id_show(struct device * dev,struct device_attribute * dev_attr,char * buf)12703f3a910SGreg Kroah-Hartman static ssize_t id_show(struct device *dev, struct device_attribute *dev_attr,
12803f3a910SGreg Kroah-Hartman char *buf)
12903f3a910SGreg Kroah-Hartman {
13003f3a910SGreg Kroah-Hartman struct hv_device *hv_dev = device_to_hv_device(dev);
13103f3a910SGreg Kroah-Hartman
13203f3a910SGreg Kroah-Hartman if (!hv_dev->channel)
13303f3a910SGreg Kroah-Hartman return -ENODEV;
13403f3a910SGreg Kroah-Hartman return sprintf(buf, "%d\n", hv_dev->channel->offermsg.child_relid);
13503f3a910SGreg Kroah-Hartman }
13603f3a910SGreg Kroah-Hartman static DEVICE_ATTR_RO(id);
13703f3a910SGreg Kroah-Hartman
state_show(struct device * dev,struct device_attribute * dev_attr,char * buf)138a8fb5f3dSGreg Kroah-Hartman static ssize_t state_show(struct device *dev, struct device_attribute *dev_attr,
139a8fb5f3dSGreg Kroah-Hartman char *buf)
140a8fb5f3dSGreg Kroah-Hartman {
141a8fb5f3dSGreg Kroah-Hartman struct hv_device *hv_dev = device_to_hv_device(dev);
142a8fb5f3dSGreg Kroah-Hartman
143a8fb5f3dSGreg Kroah-Hartman if (!hv_dev->channel)
144a8fb5f3dSGreg Kroah-Hartman return -ENODEV;
145a8fb5f3dSGreg Kroah-Hartman return sprintf(buf, "%d\n", hv_dev->channel->state);
146a8fb5f3dSGreg Kroah-Hartman }
147a8fb5f3dSGreg Kroah-Hartman static DEVICE_ATTR_RO(state);
148a8fb5f3dSGreg Kroah-Hartman
monitor_id_show(struct device * dev,struct device_attribute * dev_attr,char * buf)1495ffd00e2SGreg Kroah-Hartman static ssize_t monitor_id_show(struct device *dev,
1505ffd00e2SGreg Kroah-Hartman struct device_attribute *dev_attr, char *buf)
1515ffd00e2SGreg Kroah-Hartman {
1525ffd00e2SGreg Kroah-Hartman struct hv_device *hv_dev = device_to_hv_device(dev);
1535ffd00e2SGreg Kroah-Hartman
1545ffd00e2SGreg Kroah-Hartman if (!hv_dev->channel)
1555ffd00e2SGreg Kroah-Hartman return -ENODEV;
1565ffd00e2SGreg Kroah-Hartman return sprintf(buf, "%d\n", hv_dev->channel->offermsg.monitorid);
1575ffd00e2SGreg Kroah-Hartman }
1585ffd00e2SGreg Kroah-Hartman static DEVICE_ATTR_RO(monitor_id);
1595ffd00e2SGreg Kroah-Hartman
class_id_show(struct device * dev,struct device_attribute * dev_attr,char * buf)16068234c04SGreg Kroah-Hartman static ssize_t class_id_show(struct device *dev,
16168234c04SGreg Kroah-Hartman struct device_attribute *dev_attr, char *buf)
16268234c04SGreg Kroah-Hartman {
16368234c04SGreg Kroah-Hartman struct hv_device *hv_dev = device_to_hv_device(dev);
16468234c04SGreg Kroah-Hartman
16568234c04SGreg Kroah-Hartman if (!hv_dev->channel)
16668234c04SGreg Kroah-Hartman return -ENODEV;
16768234c04SGreg Kroah-Hartman return sprintf(buf, "{%pUl}\n",
168458c4475SAndy Shevchenko &hv_dev->channel->offermsg.offer.if_type);
16968234c04SGreg Kroah-Hartman }
17068234c04SGreg Kroah-Hartman static DEVICE_ATTR_RO(class_id);
17168234c04SGreg Kroah-Hartman
device_id_show(struct device * dev,struct device_attribute * dev_attr,char * buf)1727c55e1d0SGreg Kroah-Hartman static ssize_t device_id_show(struct device *dev,
1737c55e1d0SGreg Kroah-Hartman struct device_attribute *dev_attr, char *buf)
1747c55e1d0SGreg Kroah-Hartman {
1757c55e1d0SGreg Kroah-Hartman struct hv_device *hv_dev = device_to_hv_device(dev);
1767c55e1d0SGreg Kroah-Hartman
1777c55e1d0SGreg Kroah-Hartman if (!hv_dev->channel)
1787c55e1d0SGreg Kroah-Hartman return -ENODEV;
1797c55e1d0SGreg Kroah-Hartman return sprintf(buf, "{%pUl}\n",
180458c4475SAndy Shevchenko &hv_dev->channel->offermsg.offer.if_instance);
1817c55e1d0SGreg Kroah-Hartman }
1827c55e1d0SGreg Kroah-Hartman static DEVICE_ATTR_RO(device_id);
1837c55e1d0SGreg Kroah-Hartman
modalias_show(struct device * dev,struct device_attribute * dev_attr,char * buf)184647fa371SGreg Kroah-Hartman static ssize_t modalias_show(struct device *dev,
185647fa371SGreg Kroah-Hartman struct device_attribute *dev_attr, char *buf)
186647fa371SGreg Kroah-Hartman {
187647fa371SGreg Kroah-Hartman struct hv_device *hv_dev = device_to_hv_device(dev);
188647fa371SGreg Kroah-Hartman
1890027e3fdSAndy Shevchenko return sprintf(buf, "vmbus:%*phN\n", UUID_SIZE, &hv_dev->dev_type);
190647fa371SGreg Kroah-Hartman }
191647fa371SGreg Kroah-Hartman static DEVICE_ATTR_RO(modalias);
192647fa371SGreg Kroah-Hartman
1937ceb1c37SStephen Hemminger #ifdef CONFIG_NUMA
numa_node_show(struct device * dev,struct device_attribute * attr,char * buf)1947ceb1c37SStephen Hemminger static ssize_t numa_node_show(struct device *dev,
1957ceb1c37SStephen Hemminger struct device_attribute *attr, char *buf)
1967ceb1c37SStephen Hemminger {
1977ceb1c37SStephen Hemminger struct hv_device *hv_dev = device_to_hv_device(dev);
1987ceb1c37SStephen Hemminger
1997ceb1c37SStephen Hemminger if (!hv_dev->channel)
2007ceb1c37SStephen Hemminger return -ENODEV;
2017ceb1c37SStephen Hemminger
202458d090fSAndrea Parri (Microsoft) return sprintf(buf, "%d\n", cpu_to_node(hv_dev->channel->target_cpu));
2037ceb1c37SStephen Hemminger }
2047ceb1c37SStephen Hemminger static DEVICE_ATTR_RO(numa_node);
2057ceb1c37SStephen Hemminger #endif
2067ceb1c37SStephen Hemminger
server_monitor_pending_show(struct device * dev,struct device_attribute * dev_attr,char * buf)20776c52bbeSGreg Kroah-Hartman static ssize_t server_monitor_pending_show(struct device *dev,
20876c52bbeSGreg Kroah-Hartman struct device_attribute *dev_attr,
20976c52bbeSGreg Kroah-Hartman char *buf)
21076c52bbeSGreg Kroah-Hartman {
21176c52bbeSGreg Kroah-Hartman struct hv_device *hv_dev = device_to_hv_device(dev);
21276c52bbeSGreg Kroah-Hartman
21376c52bbeSGreg Kroah-Hartman if (!hv_dev->channel)
21476c52bbeSGreg Kroah-Hartman return -ENODEV;
21576c52bbeSGreg Kroah-Hartman return sprintf(buf, "%d\n",
21676c52bbeSGreg Kroah-Hartman channel_pending(hv_dev->channel,
217fd8e3c35SKimberly Brown vmbus_connection.monitor_pages[0]));
21876c52bbeSGreg Kroah-Hartman }
21976c52bbeSGreg Kroah-Hartman static DEVICE_ATTR_RO(server_monitor_pending);
22076c52bbeSGreg Kroah-Hartman
client_monitor_pending_show(struct device * dev,struct device_attribute * dev_attr,char * buf)22176c52bbeSGreg Kroah-Hartman static ssize_t client_monitor_pending_show(struct device *dev,
22276c52bbeSGreg Kroah-Hartman struct device_attribute *dev_attr,
22376c52bbeSGreg Kroah-Hartman char *buf)
22476c52bbeSGreg Kroah-Hartman {
22576c52bbeSGreg Kroah-Hartman struct hv_device *hv_dev = device_to_hv_device(dev);
22676c52bbeSGreg Kroah-Hartman
22776c52bbeSGreg Kroah-Hartman if (!hv_dev->channel)
22876c52bbeSGreg Kroah-Hartman return -ENODEV;
22976c52bbeSGreg Kroah-Hartman return sprintf(buf, "%d\n",
23076c52bbeSGreg Kroah-Hartman channel_pending(hv_dev->channel,
23176c52bbeSGreg Kroah-Hartman vmbus_connection.monitor_pages[1]));
23276c52bbeSGreg Kroah-Hartman }
23376c52bbeSGreg Kroah-Hartman static DEVICE_ATTR_RO(client_monitor_pending);
23468234c04SGreg Kroah-Hartman
server_monitor_latency_show(struct device * dev,struct device_attribute * dev_attr,char * buf)2351cee272bSGreg Kroah-Hartman static ssize_t server_monitor_latency_show(struct device *dev,
2361cee272bSGreg Kroah-Hartman struct device_attribute *dev_attr,
2371cee272bSGreg Kroah-Hartman char *buf)
2381cee272bSGreg Kroah-Hartman {
2391cee272bSGreg Kroah-Hartman struct hv_device *hv_dev = device_to_hv_device(dev);
2401cee272bSGreg Kroah-Hartman
2411cee272bSGreg Kroah-Hartman if (!hv_dev->channel)
2421cee272bSGreg Kroah-Hartman return -ENODEV;
2431cee272bSGreg Kroah-Hartman return sprintf(buf, "%d\n",
2441cee272bSGreg Kroah-Hartman channel_latency(hv_dev->channel,
2451cee272bSGreg Kroah-Hartman vmbus_connection.monitor_pages[0]));
2461cee272bSGreg Kroah-Hartman }
2471cee272bSGreg Kroah-Hartman static DEVICE_ATTR_RO(server_monitor_latency);
2481cee272bSGreg Kroah-Hartman
client_monitor_latency_show(struct device * dev,struct device_attribute * dev_attr,char * buf)2491cee272bSGreg Kroah-Hartman static ssize_t client_monitor_latency_show(struct device *dev,
2501cee272bSGreg Kroah-Hartman struct device_attribute *dev_attr,
2511cee272bSGreg Kroah-Hartman char *buf)
2521cee272bSGreg Kroah-Hartman {
2531cee272bSGreg Kroah-Hartman struct hv_device *hv_dev = device_to_hv_device(dev);
2541cee272bSGreg Kroah-Hartman
2551cee272bSGreg Kroah-Hartman if (!hv_dev->channel)
2561cee272bSGreg Kroah-Hartman return -ENODEV;
2571cee272bSGreg Kroah-Hartman return sprintf(buf, "%d\n",
2581cee272bSGreg Kroah-Hartman channel_latency(hv_dev->channel,
2591cee272bSGreg Kroah-Hartman vmbus_connection.monitor_pages[1]));
2601cee272bSGreg Kroah-Hartman }
2611cee272bSGreg Kroah-Hartman static DEVICE_ATTR_RO(client_monitor_latency);
2621cee272bSGreg Kroah-Hartman
server_monitor_conn_id_show(struct device * dev,struct device_attribute * dev_attr,char * buf)2634947c745SGreg Kroah-Hartman static ssize_t server_monitor_conn_id_show(struct device *dev,
2644947c745SGreg Kroah-Hartman struct device_attribute *dev_attr,
2654947c745SGreg Kroah-Hartman char *buf)
2664947c745SGreg Kroah-Hartman {
2674947c745SGreg Kroah-Hartman struct hv_device *hv_dev = device_to_hv_device(dev);
2684947c745SGreg Kroah-Hartman
2694947c745SGreg Kroah-Hartman if (!hv_dev->channel)
2704947c745SGreg Kroah-Hartman return -ENODEV;
2714947c745SGreg Kroah-Hartman return sprintf(buf, "%d\n",
2724947c745SGreg Kroah-Hartman channel_conn_id(hv_dev->channel,
2734947c745SGreg Kroah-Hartman vmbus_connection.monitor_pages[0]));
2744947c745SGreg Kroah-Hartman }
2754947c745SGreg Kroah-Hartman static DEVICE_ATTR_RO(server_monitor_conn_id);
2764947c745SGreg Kroah-Hartman
client_monitor_conn_id_show(struct device * dev,struct device_attribute * dev_attr,char * buf)2774947c745SGreg Kroah-Hartman static ssize_t client_monitor_conn_id_show(struct device *dev,
2784947c745SGreg Kroah-Hartman struct device_attribute *dev_attr,
2794947c745SGreg Kroah-Hartman char *buf)
2804947c745SGreg Kroah-Hartman {
2814947c745SGreg Kroah-Hartman struct hv_device *hv_dev = device_to_hv_device(dev);
2824947c745SGreg Kroah-Hartman
2834947c745SGreg Kroah-Hartman if (!hv_dev->channel)
2844947c745SGreg Kroah-Hartman return -ENODEV;
2854947c745SGreg Kroah-Hartman return sprintf(buf, "%d\n",
2864947c745SGreg Kroah-Hartman channel_conn_id(hv_dev->channel,
2874947c745SGreg Kroah-Hartman vmbus_connection.monitor_pages[1]));
2884947c745SGreg Kroah-Hartman }
2894947c745SGreg Kroah-Hartman static DEVICE_ATTR_RO(client_monitor_conn_id);
2904947c745SGreg Kroah-Hartman
out_intr_mask_show(struct device * dev,struct device_attribute * dev_attr,char * buf)29198f4c651SGreg Kroah-Hartman static ssize_t out_intr_mask_show(struct device *dev,
29298f4c651SGreg Kroah-Hartman struct device_attribute *dev_attr, char *buf)
29398f4c651SGreg Kroah-Hartman {
29498f4c651SGreg Kroah-Hartman struct hv_device *hv_dev = device_to_hv_device(dev);
29598f4c651SGreg Kroah-Hartman struct hv_ring_buffer_debug_info outbound;
296ba50bf1cSDexuan Cui int ret;
29798f4c651SGreg Kroah-Hartman
29898f4c651SGreg Kroah-Hartman if (!hv_dev->channel)
29998f4c651SGreg Kroah-Hartman return -ENODEV;
300ba50bf1cSDexuan Cui
301ba50bf1cSDexuan Cui ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
302ba50bf1cSDexuan Cui &outbound);
303ba50bf1cSDexuan Cui if (ret < 0)
304ba50bf1cSDexuan Cui return ret;
305ba50bf1cSDexuan Cui
30698f4c651SGreg Kroah-Hartman return sprintf(buf, "%d\n", outbound.current_interrupt_mask);
30798f4c651SGreg Kroah-Hartman }
30898f4c651SGreg Kroah-Hartman static DEVICE_ATTR_RO(out_intr_mask);
30998f4c651SGreg Kroah-Hartman
out_read_index_show(struct device * dev,struct device_attribute * dev_attr,char * buf)31098f4c651SGreg Kroah-Hartman static ssize_t out_read_index_show(struct device *dev,
31198f4c651SGreg Kroah-Hartman struct device_attribute *dev_attr, char *buf)
31298f4c651SGreg Kroah-Hartman {
31398f4c651SGreg Kroah-Hartman struct hv_device *hv_dev = device_to_hv_device(dev);
31498f4c651SGreg Kroah-Hartman struct hv_ring_buffer_debug_info outbound;
315ba50bf1cSDexuan Cui int ret;
31698f4c651SGreg Kroah-Hartman
31798f4c651SGreg Kroah-Hartman if (!hv_dev->channel)
31898f4c651SGreg Kroah-Hartman return -ENODEV;
319ba50bf1cSDexuan Cui
320ba50bf1cSDexuan Cui ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
321ba50bf1cSDexuan Cui &outbound);
322ba50bf1cSDexuan Cui if (ret < 0)
323ba50bf1cSDexuan Cui return ret;
32498f4c651SGreg Kroah-Hartman return sprintf(buf, "%d\n", outbound.current_read_index);
32598f4c651SGreg Kroah-Hartman }
32698f4c651SGreg Kroah-Hartman static DEVICE_ATTR_RO(out_read_index);
32798f4c651SGreg Kroah-Hartman
out_write_index_show(struct device * dev,struct device_attribute * dev_attr,char * buf)32898f4c651SGreg Kroah-Hartman static ssize_t out_write_index_show(struct device *dev,
32998f4c651SGreg Kroah-Hartman struct device_attribute *dev_attr,
33098f4c651SGreg Kroah-Hartman char *buf)
33198f4c651SGreg Kroah-Hartman {
33298f4c651SGreg Kroah-Hartman struct hv_device *hv_dev = device_to_hv_device(dev);
33398f4c651SGreg Kroah-Hartman struct hv_ring_buffer_debug_info outbound;
334ba50bf1cSDexuan Cui int ret;
33598f4c651SGreg Kroah-Hartman
33698f4c651SGreg Kroah-Hartman if (!hv_dev->channel)
33798f4c651SGreg Kroah-Hartman return -ENODEV;
338ba50bf1cSDexuan Cui
339ba50bf1cSDexuan Cui ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
340ba50bf1cSDexuan Cui &outbound);
341ba50bf1cSDexuan Cui if (ret < 0)
342ba50bf1cSDexuan Cui return ret;
34398f4c651SGreg Kroah-Hartman return sprintf(buf, "%d\n", outbound.current_write_index);
34498f4c651SGreg Kroah-Hartman }
34598f4c651SGreg Kroah-Hartman static DEVICE_ATTR_RO(out_write_index);
34698f4c651SGreg Kroah-Hartman
out_read_bytes_avail_show(struct device * dev,struct device_attribute * dev_attr,char * buf)34798f4c651SGreg Kroah-Hartman static ssize_t out_read_bytes_avail_show(struct device *dev,
34898f4c651SGreg Kroah-Hartman struct device_attribute *dev_attr,
34998f4c651SGreg Kroah-Hartman char *buf)
35098f4c651SGreg Kroah-Hartman {
35198f4c651SGreg Kroah-Hartman struct hv_device *hv_dev = device_to_hv_device(dev);
35298f4c651SGreg Kroah-Hartman struct hv_ring_buffer_debug_info outbound;
353ba50bf1cSDexuan Cui int ret;
35498f4c651SGreg Kroah-Hartman
35598f4c651SGreg Kroah-Hartman if (!hv_dev->channel)
35698f4c651SGreg Kroah-Hartman return -ENODEV;
357ba50bf1cSDexuan Cui
358ba50bf1cSDexuan Cui ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
359ba50bf1cSDexuan Cui &outbound);
360ba50bf1cSDexuan Cui if (ret < 0)
361ba50bf1cSDexuan Cui return ret;
36298f4c651SGreg Kroah-Hartman return sprintf(buf, "%d\n", outbound.bytes_avail_toread);
36398f4c651SGreg Kroah-Hartman }
36498f4c651SGreg Kroah-Hartman static DEVICE_ATTR_RO(out_read_bytes_avail);
36598f4c651SGreg Kroah-Hartman
out_write_bytes_avail_show(struct device * dev,struct device_attribute * dev_attr,char * buf)36698f4c651SGreg Kroah-Hartman static ssize_t out_write_bytes_avail_show(struct device *dev,
36798f4c651SGreg Kroah-Hartman struct device_attribute *dev_attr,
36898f4c651SGreg Kroah-Hartman char *buf)
36998f4c651SGreg Kroah-Hartman {
37098f4c651SGreg Kroah-Hartman struct hv_device *hv_dev = device_to_hv_device(dev);
37198f4c651SGreg Kroah-Hartman struct hv_ring_buffer_debug_info outbound;
372ba50bf1cSDexuan Cui int ret;
37398f4c651SGreg Kroah-Hartman
37498f4c651SGreg Kroah-Hartman if (!hv_dev->channel)
37598f4c651SGreg Kroah-Hartman return -ENODEV;
376ba50bf1cSDexuan Cui
377ba50bf1cSDexuan Cui ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
378ba50bf1cSDexuan Cui &outbound);
379ba50bf1cSDexuan Cui if (ret < 0)
380ba50bf1cSDexuan Cui return ret;
38198f4c651SGreg Kroah-Hartman return sprintf(buf, "%d\n", outbound.bytes_avail_towrite);
38298f4c651SGreg Kroah-Hartman }
38398f4c651SGreg Kroah-Hartman static DEVICE_ATTR_RO(out_write_bytes_avail);
38498f4c651SGreg Kroah-Hartman
in_intr_mask_show(struct device * dev,struct device_attribute * dev_attr,char * buf)38598f4c651SGreg Kroah-Hartman static ssize_t in_intr_mask_show(struct device *dev,
38698f4c651SGreg Kroah-Hartman struct device_attribute *dev_attr, char *buf)
38798f4c651SGreg Kroah-Hartman {
38898f4c651SGreg Kroah-Hartman struct hv_device *hv_dev = device_to_hv_device(dev);
38998f4c651SGreg Kroah-Hartman struct hv_ring_buffer_debug_info inbound;
390ba50bf1cSDexuan Cui int ret;
39198f4c651SGreg Kroah-Hartman
39298f4c651SGreg Kroah-Hartman if (!hv_dev->channel)
39398f4c651SGreg Kroah-Hartman return -ENODEV;
394ba50bf1cSDexuan Cui
395ba50bf1cSDexuan Cui ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
396ba50bf1cSDexuan Cui if (ret < 0)
397ba50bf1cSDexuan Cui return ret;
398ba50bf1cSDexuan Cui
39998f4c651SGreg Kroah-Hartman return sprintf(buf, "%d\n", inbound.current_interrupt_mask);
40098f4c651SGreg Kroah-Hartman }
40198f4c651SGreg Kroah-Hartman static DEVICE_ATTR_RO(in_intr_mask);
40298f4c651SGreg Kroah-Hartman
in_read_index_show(struct device * dev,struct device_attribute * dev_attr,char * buf)40398f4c651SGreg Kroah-Hartman static ssize_t in_read_index_show(struct device *dev,
40498f4c651SGreg Kroah-Hartman struct device_attribute *dev_attr, char *buf)
40598f4c651SGreg Kroah-Hartman {
40698f4c651SGreg Kroah-Hartman struct hv_device *hv_dev = device_to_hv_device(dev);
40798f4c651SGreg Kroah-Hartman struct hv_ring_buffer_debug_info inbound;
408ba50bf1cSDexuan Cui int ret;
40998f4c651SGreg Kroah-Hartman
41098f4c651SGreg Kroah-Hartman if (!hv_dev->channel)
41198f4c651SGreg Kroah-Hartman return -ENODEV;
412ba50bf1cSDexuan Cui
413ba50bf1cSDexuan Cui ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
414ba50bf1cSDexuan Cui if (ret < 0)
415ba50bf1cSDexuan Cui return ret;
416ba50bf1cSDexuan Cui
41798f4c651SGreg Kroah-Hartman return sprintf(buf, "%d\n", inbound.current_read_index);
41898f4c651SGreg Kroah-Hartman }
41998f4c651SGreg Kroah-Hartman static DEVICE_ATTR_RO(in_read_index);
42098f4c651SGreg Kroah-Hartman
in_write_index_show(struct device * dev,struct device_attribute * dev_attr,char * buf)42198f4c651SGreg Kroah-Hartman static ssize_t in_write_index_show(struct device *dev,
42298f4c651SGreg Kroah-Hartman struct device_attribute *dev_attr, char *buf)
42398f4c651SGreg Kroah-Hartman {
42498f4c651SGreg Kroah-Hartman struct hv_device *hv_dev = device_to_hv_device(dev);
42598f4c651SGreg Kroah-Hartman struct hv_ring_buffer_debug_info inbound;
426ba50bf1cSDexuan Cui int ret;
42798f4c651SGreg Kroah-Hartman
42898f4c651SGreg Kroah-Hartman if (!hv_dev->channel)
42998f4c651SGreg Kroah-Hartman return -ENODEV;
430ba50bf1cSDexuan Cui
431ba50bf1cSDexuan Cui ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
432ba50bf1cSDexuan Cui if (ret < 0)
433ba50bf1cSDexuan Cui return ret;
434ba50bf1cSDexuan Cui
43598f4c651SGreg Kroah-Hartman return sprintf(buf, "%d\n", inbound.current_write_index);
43698f4c651SGreg Kroah-Hartman }
43798f4c651SGreg Kroah-Hartman static DEVICE_ATTR_RO(in_write_index);
43898f4c651SGreg Kroah-Hartman
in_read_bytes_avail_show(struct device * dev,struct device_attribute * dev_attr,char * buf)43998f4c651SGreg Kroah-Hartman static ssize_t in_read_bytes_avail_show(struct device *dev,
44098f4c651SGreg Kroah-Hartman struct device_attribute *dev_attr,
44198f4c651SGreg Kroah-Hartman char *buf)
44298f4c651SGreg Kroah-Hartman {
44398f4c651SGreg Kroah-Hartman struct hv_device *hv_dev = device_to_hv_device(dev);
44498f4c651SGreg Kroah-Hartman struct hv_ring_buffer_debug_info inbound;
445ba50bf1cSDexuan Cui int ret;
44698f4c651SGreg Kroah-Hartman
44798f4c651SGreg Kroah-Hartman if (!hv_dev->channel)
44898f4c651SGreg Kroah-Hartman return -ENODEV;
449ba50bf1cSDexuan Cui
450ba50bf1cSDexuan Cui ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
451ba50bf1cSDexuan Cui if (ret < 0)
452ba50bf1cSDexuan Cui return ret;
453ba50bf1cSDexuan Cui
45498f4c651SGreg Kroah-Hartman return sprintf(buf, "%d\n", inbound.bytes_avail_toread);
45598f4c651SGreg Kroah-Hartman }
45698f4c651SGreg Kroah-Hartman static DEVICE_ATTR_RO(in_read_bytes_avail);
45798f4c651SGreg Kroah-Hartman
in_write_bytes_avail_show(struct device * dev,struct device_attribute * dev_attr,char * buf)45898f4c651SGreg Kroah-Hartman static ssize_t in_write_bytes_avail_show(struct device *dev,
45998f4c651SGreg Kroah-Hartman struct device_attribute *dev_attr,
46098f4c651SGreg Kroah-Hartman char *buf)
46198f4c651SGreg Kroah-Hartman {
46298f4c651SGreg Kroah-Hartman struct hv_device *hv_dev = device_to_hv_device(dev);
46398f4c651SGreg Kroah-Hartman struct hv_ring_buffer_debug_info inbound;
464ba50bf1cSDexuan Cui int ret;
46598f4c651SGreg Kroah-Hartman
46698f4c651SGreg Kroah-Hartman if (!hv_dev->channel)
46798f4c651SGreg Kroah-Hartman return -ENODEV;
468ba50bf1cSDexuan Cui
469ba50bf1cSDexuan Cui ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
470ba50bf1cSDexuan Cui if (ret < 0)
471ba50bf1cSDexuan Cui return ret;
472ba50bf1cSDexuan Cui
47398f4c651SGreg Kroah-Hartman return sprintf(buf, "%d\n", inbound.bytes_avail_towrite);
47498f4c651SGreg Kroah-Hartman }
47598f4c651SGreg Kroah-Hartman static DEVICE_ATTR_RO(in_write_bytes_avail);
47698f4c651SGreg Kroah-Hartman
channel_vp_mapping_show(struct device * dev,struct device_attribute * dev_attr,char * buf)477042ab031SDexuan Cui static ssize_t channel_vp_mapping_show(struct device *dev,
478042ab031SDexuan Cui struct device_attribute *dev_attr,
479042ab031SDexuan Cui char *buf)
480042ab031SDexuan Cui {
481042ab031SDexuan Cui struct hv_device *hv_dev = device_to_hv_device(dev);
482042ab031SDexuan Cui struct vmbus_channel *channel = hv_dev->channel, *cur_sc;
483042ab031SDexuan Cui int buf_size = PAGE_SIZE, n_written, tot_written;
484042ab031SDexuan Cui struct list_head *cur;
485042ab031SDexuan Cui
486042ab031SDexuan Cui if (!channel)
487042ab031SDexuan Cui return -ENODEV;
488042ab031SDexuan Cui
4893eb0ac86SAndrea Parri (Microsoft) mutex_lock(&vmbus_connection.channel_mutex);
4903eb0ac86SAndrea Parri (Microsoft)
491042ab031SDexuan Cui tot_written = snprintf(buf, buf_size, "%u:%u\n",
492042ab031SDexuan Cui channel->offermsg.child_relid, channel->target_cpu);
493042ab031SDexuan Cui
494042ab031SDexuan Cui list_for_each(cur, &channel->sc_list) {
495042ab031SDexuan Cui if (tot_written >= buf_size - 1)
496042ab031SDexuan Cui break;
497042ab031SDexuan Cui
498042ab031SDexuan Cui cur_sc = list_entry(cur, struct vmbus_channel, sc_list);
499042ab031SDexuan Cui n_written = scnprintf(buf + tot_written,
500042ab031SDexuan Cui buf_size - tot_written,
501042ab031SDexuan Cui "%u:%u\n",
502042ab031SDexuan Cui cur_sc->offermsg.child_relid,
503042ab031SDexuan Cui cur_sc->target_cpu);
504042ab031SDexuan Cui tot_written += n_written;
505042ab031SDexuan Cui }
506042ab031SDexuan Cui
5073eb0ac86SAndrea Parri (Microsoft) mutex_unlock(&vmbus_connection.channel_mutex);
508042ab031SDexuan Cui
509042ab031SDexuan Cui return tot_written;
510042ab031SDexuan Cui }
511042ab031SDexuan Cui static DEVICE_ATTR_RO(channel_vp_mapping);
512042ab031SDexuan Cui
vendor_show(struct device * dev,struct device_attribute * dev_attr,char * buf)5137047f17dSK. Y. Srinivasan static ssize_t vendor_show(struct device *dev,
5147047f17dSK. Y. Srinivasan struct device_attribute *dev_attr,
5157047f17dSK. Y. Srinivasan char *buf)
5167047f17dSK. Y. Srinivasan {
5177047f17dSK. Y. Srinivasan struct hv_device *hv_dev = device_to_hv_device(dev);
518e4f2212eSMatheus Castello
5197047f17dSK. Y. Srinivasan return sprintf(buf, "0x%x\n", hv_dev->vendor_id);
5207047f17dSK. Y. Srinivasan }
5217047f17dSK. Y. Srinivasan static DEVICE_ATTR_RO(vendor);
5227047f17dSK. Y. Srinivasan
device_show(struct device * dev,struct device_attribute * dev_attr,char * buf)5237047f17dSK. Y. Srinivasan static ssize_t device_show(struct device *dev,
5247047f17dSK. Y. Srinivasan struct device_attribute *dev_attr,
5257047f17dSK. Y. Srinivasan char *buf)
5267047f17dSK. Y. Srinivasan {
5277047f17dSK. Y. Srinivasan struct hv_device *hv_dev = device_to_hv_device(dev);
528e4f2212eSMatheus Castello
5297047f17dSK. Y. Srinivasan return sprintf(buf, "0x%x\n", hv_dev->device_id);
5307047f17dSK. Y. Srinivasan }
5317047f17dSK. Y. Srinivasan static DEVICE_ATTR_RO(device);
5327047f17dSK. Y. Srinivasan
driver_override_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)533d765edbbSStephen Hemminger static ssize_t driver_override_store(struct device *dev,
534d765edbbSStephen Hemminger struct device_attribute *attr,
535d765edbbSStephen Hemminger const char *buf, size_t count)
536d765edbbSStephen Hemminger {
537d765edbbSStephen Hemminger struct hv_device *hv_dev = device_to_hv_device(dev);
53801ed1002SKrzysztof Kozlowski int ret;
539d765edbbSStephen Hemminger
54001ed1002SKrzysztof Kozlowski ret = driver_set_override(dev, &hv_dev->driver_override, buf, count);
54101ed1002SKrzysztof Kozlowski if (ret)
54201ed1002SKrzysztof Kozlowski return ret;
543d765edbbSStephen Hemminger
544d765edbbSStephen Hemminger return count;
545d765edbbSStephen Hemminger }
546d765edbbSStephen Hemminger
driver_override_show(struct device * dev,struct device_attribute * attr,char * buf)547d765edbbSStephen Hemminger static ssize_t driver_override_show(struct device *dev,
548d765edbbSStephen Hemminger struct device_attribute *attr, char *buf)
549d765edbbSStephen Hemminger {
550d765edbbSStephen Hemminger struct hv_device *hv_dev = device_to_hv_device(dev);
551d765edbbSStephen Hemminger ssize_t len;
552d765edbbSStephen Hemminger
553d765edbbSStephen Hemminger device_lock(dev);
554d765edbbSStephen Hemminger len = snprintf(buf, PAGE_SIZE, "%s\n", hv_dev->driver_override);
555d765edbbSStephen Hemminger device_unlock(dev);
556d765edbbSStephen Hemminger
557d765edbbSStephen Hemminger return len;
558d765edbbSStephen Hemminger }
559d765edbbSStephen Hemminger static DEVICE_ATTR_RW(driver_override);
560d765edbbSStephen Hemminger
56198f4c651SGreg Kroah-Hartman /* Set up per device attributes in /sys/bus/vmbus/devices/<bus device> */
562fc76936dSStephen Hemminger static struct attribute *vmbus_dev_attrs[] = {
56303f3a910SGreg Kroah-Hartman &dev_attr_id.attr,
564a8fb5f3dSGreg Kroah-Hartman &dev_attr_state.attr,
5655ffd00e2SGreg Kroah-Hartman &dev_attr_monitor_id.attr,
56668234c04SGreg Kroah-Hartman &dev_attr_class_id.attr,
5677c55e1d0SGreg Kroah-Hartman &dev_attr_device_id.attr,
568647fa371SGreg Kroah-Hartman &dev_attr_modalias.attr,
5697ceb1c37SStephen Hemminger #ifdef CONFIG_NUMA
5707ceb1c37SStephen Hemminger &dev_attr_numa_node.attr,
5717ceb1c37SStephen Hemminger #endif
57276c52bbeSGreg Kroah-Hartman &dev_attr_server_monitor_pending.attr,
57376c52bbeSGreg Kroah-Hartman &dev_attr_client_monitor_pending.attr,
5741cee272bSGreg Kroah-Hartman &dev_attr_server_monitor_latency.attr,
5751cee272bSGreg Kroah-Hartman &dev_attr_client_monitor_latency.attr,
5764947c745SGreg Kroah-Hartman &dev_attr_server_monitor_conn_id.attr,
5774947c745SGreg Kroah-Hartman &dev_attr_client_monitor_conn_id.attr,
57898f4c651SGreg Kroah-Hartman &dev_attr_out_intr_mask.attr,
57998f4c651SGreg Kroah-Hartman &dev_attr_out_read_index.attr,
58098f4c651SGreg Kroah-Hartman &dev_attr_out_write_index.attr,
58198f4c651SGreg Kroah-Hartman &dev_attr_out_read_bytes_avail.attr,
58298f4c651SGreg Kroah-Hartman &dev_attr_out_write_bytes_avail.attr,
58398f4c651SGreg Kroah-Hartman &dev_attr_in_intr_mask.attr,
58498f4c651SGreg Kroah-Hartman &dev_attr_in_read_index.attr,
58598f4c651SGreg Kroah-Hartman &dev_attr_in_write_index.attr,
58698f4c651SGreg Kroah-Hartman &dev_attr_in_read_bytes_avail.attr,
58798f4c651SGreg Kroah-Hartman &dev_attr_in_write_bytes_avail.attr,
588042ab031SDexuan Cui &dev_attr_channel_vp_mapping.attr,
5897047f17dSK. Y. Srinivasan &dev_attr_vendor.attr,
5907047f17dSK. Y. Srinivasan &dev_attr_device.attr,
591d765edbbSStephen Hemminger &dev_attr_driver_override.attr,
59203f3a910SGreg Kroah-Hartman NULL,
59303f3a910SGreg Kroah-Hartman };
59446fc1548SKimberly Brown
59546fc1548SKimberly Brown /*
59646fc1548SKimberly Brown * Device-level attribute_group callback function. Returns the permission for
59746fc1548SKimberly Brown * each attribute, and returns 0 if an attribute is not visible.
59846fc1548SKimberly Brown */
vmbus_dev_attr_is_visible(struct kobject * kobj,struct attribute * attr,int idx)59946fc1548SKimberly Brown static umode_t vmbus_dev_attr_is_visible(struct kobject *kobj,
60046fc1548SKimberly Brown struct attribute *attr, int idx)
60146fc1548SKimberly Brown {
60246fc1548SKimberly Brown struct device *dev = kobj_to_dev(kobj);
60346fc1548SKimberly Brown const struct hv_device *hv_dev = device_to_hv_device(dev);
60446fc1548SKimberly Brown
60546fc1548SKimberly Brown /* Hide the monitor attributes if the monitor mechanism is not used. */
60646fc1548SKimberly Brown if (!hv_dev->channel->offermsg.monitor_allocated &&
60746fc1548SKimberly Brown (attr == &dev_attr_monitor_id.attr ||
60846fc1548SKimberly Brown attr == &dev_attr_server_monitor_pending.attr ||
60946fc1548SKimberly Brown attr == &dev_attr_client_monitor_pending.attr ||
61046fc1548SKimberly Brown attr == &dev_attr_server_monitor_latency.attr ||
61146fc1548SKimberly Brown attr == &dev_attr_client_monitor_latency.attr ||
61246fc1548SKimberly Brown attr == &dev_attr_server_monitor_conn_id.attr ||
61346fc1548SKimberly Brown attr == &dev_attr_client_monitor_conn_id.attr))
61446fc1548SKimberly Brown return 0;
61546fc1548SKimberly Brown
61646fc1548SKimberly Brown return attr->mode;
61746fc1548SKimberly Brown }
61846fc1548SKimberly Brown
61946fc1548SKimberly Brown static const struct attribute_group vmbus_dev_group = {
62046fc1548SKimberly Brown .attrs = vmbus_dev_attrs,
62146fc1548SKimberly Brown .is_visible = vmbus_dev_attr_is_visible
62246fc1548SKimberly Brown };
62346fc1548SKimberly Brown __ATTRIBUTE_GROUPS(vmbus_dev);
62403f3a910SGreg Kroah-Hartman
625c068e3f4SDexuan Cui /* Set up the attribute for /sys/bus/vmbus/hibernation */
hibernation_show(const struct bus_type * bus,char * buf)62675cff725SGreg Kroah-Hartman static ssize_t hibernation_show(const struct bus_type *bus, char *buf)
627c068e3f4SDexuan Cui {
628c068e3f4SDexuan Cui return sprintf(buf, "%d\n", !!hv_is_hibernation_supported());
629c068e3f4SDexuan Cui }
630c068e3f4SDexuan Cui
631c068e3f4SDexuan Cui static BUS_ATTR_RO(hibernation);
632c068e3f4SDexuan Cui
633c068e3f4SDexuan Cui static struct attribute *vmbus_bus_attrs[] = {
634c068e3f4SDexuan Cui &bus_attr_hibernation.attr,
635c068e3f4SDexuan Cui NULL,
636c068e3f4SDexuan Cui };
637c068e3f4SDexuan Cui static const struct attribute_group vmbus_bus_group = {
638c068e3f4SDexuan Cui .attrs = vmbus_bus_attrs,
639c068e3f4SDexuan Cui };
640c068e3f4SDexuan Cui __ATTRIBUTE_GROUPS(vmbus_bus);
641c068e3f4SDexuan Cui
64246a97191SGreg Kroah-Hartman /*
64346a97191SGreg Kroah-Hartman * vmbus_uevent - add uevent for our device
64446a97191SGreg Kroah-Hartman *
64546a97191SGreg Kroah-Hartman * This routine is invoked when a device is added or removed on the vmbus to
64646a97191SGreg Kroah-Hartman * generate a uevent to udev in the userspace. The udev will then look at its
64746a97191SGreg Kroah-Hartman * rule and the uevent generated here to load the appropriate driver
64846a97191SGreg Kroah-Hartman *
64946a97191SGreg Kroah-Hartman * The alias string will be of the form vmbus:guid where guid is the string
65046a97191SGreg Kroah-Hartman * representation of the device guid (each byte of the guid will be
65146a97191SGreg Kroah-Hartman * represented with two hex characters.
65246a97191SGreg Kroah-Hartman */
vmbus_uevent(const struct device * device,struct kobj_uevent_env * env)6532a81ada3SGreg Kroah-Hartman static int vmbus_uevent(const struct device *device, struct kobj_uevent_env *env)
65446a97191SGreg Kroah-Hartman {
6552a81ada3SGreg Kroah-Hartman const struct hv_device *dev = device_to_hv_device(device);
6560027e3fdSAndy Shevchenko const char *format = "MODALIAS=vmbus:%*phN";
65746a97191SGreg Kroah-Hartman
6580027e3fdSAndy Shevchenko return add_uevent_var(env, format, UUID_SIZE, &dev->dev_type);
65946a97191SGreg Kroah-Hartman }
66046a97191SGreg Kroah-Hartman
661d765edbbSStephen Hemminger static const struct hv_vmbus_device_id *
hv_vmbus_dev_match(const struct hv_vmbus_device_id * id,const guid_t * guid)662593db803SAndy Shevchenko hv_vmbus_dev_match(const struct hv_vmbus_device_id *id, const guid_t *guid)
663d765edbbSStephen Hemminger {
664d765edbbSStephen Hemminger if (id == NULL)
665d765edbbSStephen Hemminger return NULL; /* empty device table */
666d765edbbSStephen Hemminger
667593db803SAndy Shevchenko for (; !guid_is_null(&id->guid); id++)
668593db803SAndy Shevchenko if (guid_equal(&id->guid, guid))
669d765edbbSStephen Hemminger return id;
670d765edbbSStephen Hemminger
671d765edbbSStephen Hemminger return NULL;
672d765edbbSStephen Hemminger }
673d765edbbSStephen Hemminger
674d765edbbSStephen Hemminger static const struct hv_vmbus_device_id *
hv_vmbus_dynid_match(struct hv_driver * drv,const guid_t * guid)675593db803SAndy Shevchenko hv_vmbus_dynid_match(struct hv_driver *drv, const guid_t *guid)
67646a97191SGreg Kroah-Hartman {
677fc76936dSStephen Hemminger const struct hv_vmbus_device_id *id = NULL;
678fc76936dSStephen Hemminger struct vmbus_dynid *dynid;
679fc76936dSStephen Hemminger
680fc76936dSStephen Hemminger spin_lock(&drv->dynids.lock);
681fc76936dSStephen Hemminger list_for_each_entry(dynid, &drv->dynids.list, node) {
682593db803SAndy Shevchenko if (guid_equal(&dynid->id.guid, guid)) {
683fc76936dSStephen Hemminger id = &dynid->id;
684fc76936dSStephen Hemminger break;
685fc76936dSStephen Hemminger }
686fc76936dSStephen Hemminger }
687fc76936dSStephen Hemminger spin_unlock(&drv->dynids.lock);
688fc76936dSStephen Hemminger
689fc76936dSStephen Hemminger return id;
690d765edbbSStephen Hemminger }
691fc76936dSStephen Hemminger
692593db803SAndy Shevchenko static const struct hv_vmbus_device_id vmbus_device_null;
693fc76936dSStephen Hemminger
694d765edbbSStephen Hemminger /*
695d765edbbSStephen Hemminger * Return a matching hv_vmbus_device_id pointer.
696d765edbbSStephen Hemminger * If there is no match, return NULL.
697d765edbbSStephen Hemminger */
hv_vmbus_get_id(struct hv_driver * drv,struct hv_device * dev)698d765edbbSStephen Hemminger static const struct hv_vmbus_device_id *hv_vmbus_get_id(struct hv_driver *drv,
699d765edbbSStephen Hemminger struct hv_device *dev)
700d765edbbSStephen Hemminger {
701593db803SAndy Shevchenko const guid_t *guid = &dev->dev_type;
702d765edbbSStephen Hemminger const struct hv_vmbus_device_id *id;
70346a97191SGreg Kroah-Hartman
704d765edbbSStephen Hemminger /* When driver_override is set, only bind to the matching driver */
705d765edbbSStephen Hemminger if (dev->driver_override && strcmp(dev->driver_override, drv->name))
70646a97191SGreg Kroah-Hartman return NULL;
707d765edbbSStephen Hemminger
708d765edbbSStephen Hemminger /* Look at the dynamic ids first, before the static ones */
709d765edbbSStephen Hemminger id = hv_vmbus_dynid_match(drv, guid);
710d765edbbSStephen Hemminger if (!id)
711d765edbbSStephen Hemminger id = hv_vmbus_dev_match(drv->id_table, guid);
712d765edbbSStephen Hemminger
713d765edbbSStephen Hemminger /* driver_override will always match, send a dummy id */
714d765edbbSStephen Hemminger if (!id && dev->driver_override)
715d765edbbSStephen Hemminger id = &vmbus_device_null;
716d765edbbSStephen Hemminger
717d765edbbSStephen Hemminger return id;
71846a97191SGreg Kroah-Hartman }
71946a97191SGreg Kroah-Hartman
720fc76936dSStephen Hemminger /* vmbus_add_dynid - add a new device ID to this driver and re-probe devices */
vmbus_add_dynid(struct hv_driver * drv,guid_t * guid)721593db803SAndy Shevchenko static int vmbus_add_dynid(struct hv_driver *drv, guid_t *guid)
722fc76936dSStephen Hemminger {
723fc76936dSStephen Hemminger struct vmbus_dynid *dynid;
724fc76936dSStephen Hemminger
725fc76936dSStephen Hemminger dynid = kzalloc(sizeof(*dynid), GFP_KERNEL);
726fc76936dSStephen Hemminger if (!dynid)
727fc76936dSStephen Hemminger return -ENOMEM;
728fc76936dSStephen Hemminger
729fc76936dSStephen Hemminger dynid->id.guid = *guid;
730fc76936dSStephen Hemminger
731fc76936dSStephen Hemminger spin_lock(&drv->dynids.lock);
732fc76936dSStephen Hemminger list_add_tail(&dynid->node, &drv->dynids.list);
733fc76936dSStephen Hemminger spin_unlock(&drv->dynids.lock);
734fc76936dSStephen Hemminger
735fc76936dSStephen Hemminger return driver_attach(&drv->driver);
736fc76936dSStephen Hemminger }
737fc76936dSStephen Hemminger
vmbus_free_dynids(struct hv_driver * drv)738fc76936dSStephen Hemminger static void vmbus_free_dynids(struct hv_driver *drv)
739fc76936dSStephen Hemminger {
740fc76936dSStephen Hemminger struct vmbus_dynid *dynid, *n;
741fc76936dSStephen Hemminger
742fc76936dSStephen Hemminger spin_lock(&drv->dynids.lock);
743fc76936dSStephen Hemminger list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
744fc76936dSStephen Hemminger list_del(&dynid->node);
745fc76936dSStephen Hemminger kfree(dynid);
746fc76936dSStephen Hemminger }
747fc76936dSStephen Hemminger spin_unlock(&drv->dynids.lock);
748fc76936dSStephen Hemminger }
749fc76936dSStephen Hemminger
750fc76936dSStephen Hemminger /*
751fc76936dSStephen Hemminger * store_new_id - sysfs frontend to vmbus_add_dynid()
752fc76936dSStephen Hemminger *
753fc76936dSStephen Hemminger * Allow GUIDs to be added to an existing driver via sysfs.
754fc76936dSStephen Hemminger */
new_id_store(struct device_driver * driver,const char * buf,size_t count)755fc76936dSStephen Hemminger static ssize_t new_id_store(struct device_driver *driver, const char *buf,
756fc76936dSStephen Hemminger size_t count)
757fc76936dSStephen Hemminger {
758fc76936dSStephen Hemminger struct hv_driver *drv = drv_to_hv_drv(driver);
759593db803SAndy Shevchenko guid_t guid;
760fc76936dSStephen Hemminger ssize_t retval;
761fc76936dSStephen Hemminger
762593db803SAndy Shevchenko retval = guid_parse(buf, &guid);
76331100108SAndy Shevchenko if (retval)
76431100108SAndy Shevchenko return retval;
765fc76936dSStephen Hemminger
766d765edbbSStephen Hemminger if (hv_vmbus_dynid_match(drv, &guid))
767fc76936dSStephen Hemminger return -EEXIST;
768fc76936dSStephen Hemminger
769fc76936dSStephen Hemminger retval = vmbus_add_dynid(drv, &guid);
770fc76936dSStephen Hemminger if (retval)
771fc76936dSStephen Hemminger return retval;
772fc76936dSStephen Hemminger return count;
773fc76936dSStephen Hemminger }
774fc76936dSStephen Hemminger static DRIVER_ATTR_WO(new_id);
775fc76936dSStephen Hemminger
776fc76936dSStephen Hemminger /*
777fc76936dSStephen Hemminger * store_remove_id - remove a PCI device ID from this driver
778fc76936dSStephen Hemminger *
779fc76936dSStephen Hemminger * Removes a dynamic pci device ID to this driver.
780fc76936dSStephen Hemminger */
remove_id_store(struct device_driver * driver,const char * buf,size_t count)781fc76936dSStephen Hemminger static ssize_t remove_id_store(struct device_driver *driver, const char *buf,
782fc76936dSStephen Hemminger size_t count)
783fc76936dSStephen Hemminger {
784fc76936dSStephen Hemminger struct hv_driver *drv = drv_to_hv_drv(driver);
785fc76936dSStephen Hemminger struct vmbus_dynid *dynid, *n;
786593db803SAndy Shevchenko guid_t guid;
78731100108SAndy Shevchenko ssize_t retval;
788fc76936dSStephen Hemminger
789593db803SAndy Shevchenko retval = guid_parse(buf, &guid);
79031100108SAndy Shevchenko if (retval)
79131100108SAndy Shevchenko return retval;
792fc76936dSStephen Hemminger
79331100108SAndy Shevchenko retval = -ENODEV;
794fc76936dSStephen Hemminger spin_lock(&drv->dynids.lock);
795fc76936dSStephen Hemminger list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
796fc76936dSStephen Hemminger struct hv_vmbus_device_id *id = &dynid->id;
797fc76936dSStephen Hemminger
798593db803SAndy Shevchenko if (guid_equal(&id->guid, &guid)) {
799fc76936dSStephen Hemminger list_del(&dynid->node);
800fc76936dSStephen Hemminger kfree(dynid);
801fc76936dSStephen Hemminger retval = count;
802fc76936dSStephen Hemminger break;
803fc76936dSStephen Hemminger }
804fc76936dSStephen Hemminger }
805fc76936dSStephen Hemminger spin_unlock(&drv->dynids.lock);
806fc76936dSStephen Hemminger
807fc76936dSStephen Hemminger return retval;
808fc76936dSStephen Hemminger }
809fc76936dSStephen Hemminger static DRIVER_ATTR_WO(remove_id);
810fc76936dSStephen Hemminger
811fc76936dSStephen Hemminger static struct attribute *vmbus_drv_attrs[] = {
812fc76936dSStephen Hemminger &driver_attr_new_id.attr,
813fc76936dSStephen Hemminger &driver_attr_remove_id.attr,
814fc76936dSStephen Hemminger NULL,
815fc76936dSStephen Hemminger };
816fc76936dSStephen Hemminger ATTRIBUTE_GROUPS(vmbus_drv);
81746a97191SGreg Kroah-Hartman
81846a97191SGreg Kroah-Hartman
81946a97191SGreg Kroah-Hartman /*
82046a97191SGreg Kroah-Hartman * vmbus_match - Attempt to match the specified device to the specified driver
82146a97191SGreg Kroah-Hartman */
vmbus_match(struct device * device,struct device_driver * driver)82246a97191SGreg Kroah-Hartman static int vmbus_match(struct device *device, struct device_driver *driver)
82346a97191SGreg Kroah-Hartman {
82446a97191SGreg Kroah-Hartman struct hv_driver *drv = drv_to_hv_drv(driver);
82546a97191SGreg Kroah-Hartman struct hv_device *hv_dev = device_to_hv_device(device);
82646a97191SGreg Kroah-Hartman
8278981da32SDexuan Cui /* The hv_sock driver handles all hv_sock offers. */
8288981da32SDexuan Cui if (is_hvsock_channel(hv_dev->channel))
8298981da32SDexuan Cui return drv->hvsock;
8308981da32SDexuan Cui
831d765edbbSStephen Hemminger if (hv_vmbus_get_id(drv, hv_dev))
83246a97191SGreg Kroah-Hartman return 1;
83346a97191SGreg Kroah-Hartman
83446a97191SGreg Kroah-Hartman return 0;
83546a97191SGreg Kroah-Hartman }
83646a97191SGreg Kroah-Hartman
83746a97191SGreg Kroah-Hartman /*
83846a97191SGreg Kroah-Hartman * vmbus_probe - Add the new vmbus's child device
83946a97191SGreg Kroah-Hartman */
vmbus_probe(struct device * child_device)84046a97191SGreg Kroah-Hartman static int vmbus_probe(struct device *child_device)
84146a97191SGreg Kroah-Hartman {
84246a97191SGreg Kroah-Hartman int ret = 0;
84346a97191SGreg Kroah-Hartman struct hv_driver *drv =
84446a97191SGreg Kroah-Hartman drv_to_hv_drv(child_device->driver);
84546a97191SGreg Kroah-Hartman struct hv_device *dev = device_to_hv_device(child_device);
84646a97191SGreg Kroah-Hartman const struct hv_vmbus_device_id *dev_id;
84746a97191SGreg Kroah-Hartman
848d765edbbSStephen Hemminger dev_id = hv_vmbus_get_id(drv, dev);
84946a97191SGreg Kroah-Hartman if (drv->probe) {
85046a97191SGreg Kroah-Hartman ret = drv->probe(dev, dev_id);
85146a97191SGreg Kroah-Hartman if (ret != 0)
85246a97191SGreg Kroah-Hartman pr_err("probe failed for device %s (%d)\n",
85346a97191SGreg Kroah-Hartman dev_name(child_device), ret);
85446a97191SGreg Kroah-Hartman
85546a97191SGreg Kroah-Hartman } else {
85646a97191SGreg Kroah-Hartman pr_err("probe not set for driver %s\n",
85746a97191SGreg Kroah-Hartman dev_name(child_device));
85846a97191SGreg Kroah-Hartman ret = -ENODEV;
85946a97191SGreg Kroah-Hartman }
86046a97191SGreg Kroah-Hartman return ret;
86146a97191SGreg Kroah-Hartman }
86246a97191SGreg Kroah-Hartman
86346a97191SGreg Kroah-Hartman /*
86437200078SMichael Kelley * vmbus_dma_configure -- Configure DMA coherence for VMbus device
86537200078SMichael Kelley */
vmbus_dma_configure(struct device * child_device)86637200078SMichael Kelley static int vmbus_dma_configure(struct device *child_device)
86737200078SMichael Kelley {
86837200078SMichael Kelley /*
86937200078SMichael Kelley * On ARM64, propagate the DMA coherence setting from the top level
87037200078SMichael Kelley * VMbus ACPI device to the child VMbus device being added here.
87137200078SMichael Kelley * On x86/x64 coherence is assumed and these calls have no effect.
87237200078SMichael Kelley */
87337200078SMichael Kelley hv_setup_dma_ops(child_device,
8749c843423SSaurabh Sengar device_get_dma_attr(hv_dev) == DEV_DMA_COHERENT);
87537200078SMichael Kelley return 0;
87637200078SMichael Kelley }
87737200078SMichael Kelley
87837200078SMichael Kelley /*
87946a97191SGreg Kroah-Hartman * vmbus_remove - Remove a vmbus device
88046a97191SGreg Kroah-Hartman */
vmbus_remove(struct device * child_device)881fc7a6209SUwe Kleine-König static void vmbus_remove(struct device *child_device)
88246a97191SGreg Kroah-Hartman {
883d15a0301SK. Y. Srinivasan struct hv_driver *drv;
88446a97191SGreg Kroah-Hartman struct hv_device *dev = device_to_hv_device(child_device);
88546a97191SGreg Kroah-Hartman
886d15a0301SK. Y. Srinivasan if (child_device->driver) {
887d15a0301SK. Y. Srinivasan drv = drv_to_hv_drv(child_device->driver);
88846a97191SGreg Kroah-Hartman if (drv->remove)
88946a97191SGreg Kroah-Hartman drv->remove(dev);
890ed6cfcc5SK. Y. Srinivasan }
89146a97191SGreg Kroah-Hartman }
89246a97191SGreg Kroah-Hartman
89346a97191SGreg Kroah-Hartman /*
89446a97191SGreg Kroah-Hartman * vmbus_shutdown - Shutdown a vmbus device
89546a97191SGreg Kroah-Hartman */
vmbus_shutdown(struct device * child_device)89646a97191SGreg Kroah-Hartman static void vmbus_shutdown(struct device *child_device)
89746a97191SGreg Kroah-Hartman {
89846a97191SGreg Kroah-Hartman struct hv_driver *drv;
89946a97191SGreg Kroah-Hartman struct hv_device *dev = device_to_hv_device(child_device);
90046a97191SGreg Kroah-Hartman
90146a97191SGreg Kroah-Hartman
90246a97191SGreg Kroah-Hartman /* The device may not be attached yet */
90346a97191SGreg Kroah-Hartman if (!child_device->driver)
90446a97191SGreg Kroah-Hartman return;
90546a97191SGreg Kroah-Hartman
90646a97191SGreg Kroah-Hartman drv = drv_to_hv_drv(child_device->driver);
90746a97191SGreg Kroah-Hartman
90846a97191SGreg Kroah-Hartman if (drv->shutdown)
90946a97191SGreg Kroah-Hartman drv->shutdown(dev);
91046a97191SGreg Kroah-Hartman }
91146a97191SGreg Kroah-Hartman
91283b50f83SDexuan Cui #ifdef CONFIG_PM_SLEEP
913271b2224SDexuan Cui /*
914271b2224SDexuan Cui * vmbus_suspend - Suspend a vmbus device
915271b2224SDexuan Cui */
vmbus_suspend(struct device * child_device)916271b2224SDexuan Cui static int vmbus_suspend(struct device *child_device)
917271b2224SDexuan Cui {
918271b2224SDexuan Cui struct hv_driver *drv;
919271b2224SDexuan Cui struct hv_device *dev = device_to_hv_device(child_device);
920271b2224SDexuan Cui
921271b2224SDexuan Cui /* The device may not be attached yet */
922271b2224SDexuan Cui if (!child_device->driver)
923271b2224SDexuan Cui return 0;
924271b2224SDexuan Cui
925271b2224SDexuan Cui drv = drv_to_hv_drv(child_device->driver);
926271b2224SDexuan Cui if (!drv->suspend)
927271b2224SDexuan Cui return -EOPNOTSUPP;
928271b2224SDexuan Cui
929271b2224SDexuan Cui return drv->suspend(dev);
930271b2224SDexuan Cui }
931271b2224SDexuan Cui
932271b2224SDexuan Cui /*
933271b2224SDexuan Cui * vmbus_resume - Resume a vmbus device
934271b2224SDexuan Cui */
vmbus_resume(struct device * child_device)935271b2224SDexuan Cui static int vmbus_resume(struct device *child_device)
936271b2224SDexuan Cui {
937271b2224SDexuan Cui struct hv_driver *drv;
938271b2224SDexuan Cui struct hv_device *dev = device_to_hv_device(child_device);
939271b2224SDexuan Cui
940271b2224SDexuan Cui /* The device may not be attached yet */
941271b2224SDexuan Cui if (!child_device->driver)
942271b2224SDexuan Cui return 0;
943271b2224SDexuan Cui
944271b2224SDexuan Cui drv = drv_to_hv_drv(child_device->driver);
945271b2224SDexuan Cui if (!drv->resume)
946271b2224SDexuan Cui return -EOPNOTSUPP;
947271b2224SDexuan Cui
948271b2224SDexuan Cui return drv->resume(dev);
949271b2224SDexuan Cui }
9501a06d017SDexuan Cui #else
9511a06d017SDexuan Cui #define vmbus_suspend NULL
9521a06d017SDexuan Cui #define vmbus_resume NULL
95383b50f83SDexuan Cui #endif /* CONFIG_PM_SLEEP */
95446a97191SGreg Kroah-Hartman
95546a97191SGreg Kroah-Hartman /*
95646a97191SGreg Kroah-Hartman * vmbus_device_release - Final callback release of the vmbus child device
95746a97191SGreg Kroah-Hartman */
vmbus_device_release(struct device * device)95846a97191SGreg Kroah-Hartman static void vmbus_device_release(struct device *device)
95946a97191SGreg Kroah-Hartman {
96046a97191SGreg Kroah-Hartman struct hv_device *hv_dev = device_to_hv_device(device);
96134c6801eSDexuan Cui struct vmbus_channel *channel = hv_dev->channel;
96246a97191SGreg Kroah-Hartman
963af9ca6f9SBranden Bonaby hv_debug_rm_dev_dir(hv_dev);
964af9ca6f9SBranden Bonaby
96554a66265SK. Y. Srinivasan mutex_lock(&vmbus_connection.channel_mutex);
966800b9329SStephen Hemminger hv_process_channel_removal(channel);
96754a66265SK. Y. Srinivasan mutex_unlock(&vmbus_connection.channel_mutex);
96846a97191SGreg Kroah-Hartman kfree(hv_dev);
96946a97191SGreg Kroah-Hartman }
97046a97191SGreg Kroah-Hartman
971271b2224SDexuan Cui /*
9721a06d017SDexuan Cui * Note: we must use the "noirq" ops: see the comment before vmbus_bus_pm.
9731a06d017SDexuan Cui *
9741a06d017SDexuan Cui * suspend_noirq/resume_noirq are set to NULL to support Suspend-to-Idle: we
9751a06d017SDexuan Cui * shouldn't suspend the vmbus devices upon Suspend-to-Idle, otherwise there
9761a06d017SDexuan Cui * is no way to wake up a Generation-2 VM.
9771a06d017SDexuan Cui *
9781a06d017SDexuan Cui * The other 4 ops are for hibernation.
979271b2224SDexuan Cui */
9801a06d017SDexuan Cui
981271b2224SDexuan Cui static const struct dev_pm_ops vmbus_pm = {
9821a06d017SDexuan Cui .suspend_noirq = NULL,
9831a06d017SDexuan Cui .resume_noirq = NULL,
9841a06d017SDexuan Cui .freeze_noirq = vmbus_suspend,
9851a06d017SDexuan Cui .thaw_noirq = vmbus_resume,
9861a06d017SDexuan Cui .poweroff_noirq = vmbus_suspend,
9871a06d017SDexuan Cui .restore_noirq = vmbus_resume,
988271b2224SDexuan Cui };
989271b2224SDexuan Cui
99046a97191SGreg Kroah-Hartman /* The one and only one */
99146a97191SGreg Kroah-Hartman static struct bus_type hv_bus = {
99246a97191SGreg Kroah-Hartman .name = "vmbus",
99346a97191SGreg Kroah-Hartman .match = vmbus_match,
99446a97191SGreg Kroah-Hartman .shutdown = vmbus_shutdown,
99546a97191SGreg Kroah-Hartman .remove = vmbus_remove,
99646a97191SGreg Kroah-Hartman .probe = vmbus_probe,
99746a97191SGreg Kroah-Hartman .uevent = vmbus_uevent,
99837200078SMichael Kelley .dma_configure = vmbus_dma_configure,
999fc76936dSStephen Hemminger .dev_groups = vmbus_dev_groups,
1000fc76936dSStephen Hemminger .drv_groups = vmbus_drv_groups,
1001c068e3f4SDexuan Cui .bus_groups = vmbus_bus_groups,
1002271b2224SDexuan Cui .pm = &vmbus_pm,
100346a97191SGreg Kroah-Hartman };
100446a97191SGreg Kroah-Hartman
100546a97191SGreg Kroah-Hartman struct onmessage_work_context {
100646a97191SGreg Kroah-Hartman struct work_struct work;
1007a276463bSVitaly Kuznetsov struct {
1008a276463bSVitaly Kuznetsov struct hv_message_header header;
1009a276463bSVitaly Kuznetsov u8 payload[];
1010a276463bSVitaly Kuznetsov } msg;
101146a97191SGreg Kroah-Hartman };
101246a97191SGreg Kroah-Hartman
vmbus_onmessage_work(struct work_struct * work)101346a97191SGreg Kroah-Hartman static void vmbus_onmessage_work(struct work_struct *work)
101446a97191SGreg Kroah-Hartman {
101546a97191SGreg Kroah-Hartman struct onmessage_work_context *ctx;
101646a97191SGreg Kroah-Hartman
101709a19628SVitaly Kuznetsov /* Do not process messages if we're in DISCONNECTED state */
101809a19628SVitaly Kuznetsov if (vmbus_connection.conn_state == DISCONNECTED)
101909a19628SVitaly Kuznetsov return;
102009a19628SVitaly Kuznetsov
102146a97191SGreg Kroah-Hartman ctx = container_of(work, struct onmessage_work_context,
102246a97191SGreg Kroah-Hartman work);
10235cc41500SVitaly Kuznetsov vmbus_onmessage((struct vmbus_channel_message_header *)
10245cc41500SVitaly Kuznetsov &ctx->msg.payload);
102546a97191SGreg Kroah-Hartman kfree(ctx);
102646a97191SGreg Kroah-Hartman }
102746a97191SGreg Kroah-Hartman
vmbus_on_msg_dpc(unsigned long data)1028d81274aaSK. Y. Srinivasan void vmbus_on_msg_dpc(unsigned long data)
102946a97191SGreg Kroah-Hartman {
103037cdd991SStephen Hemminger struct hv_per_cpu_context *hv_cpu = (void *)data;
103137cdd991SStephen Hemminger void *page_addr = hv_cpu->synic_message_page;
1032fe8c1b18SAndrea Parri (Microsoft) struct hv_message msg_copy, *msg = (struct hv_message *)page_addr +
103346a97191SGreg Kroah-Hartman VMBUS_MESSAGE_SINT;
1034652594c7SDexuan Cui struct vmbus_channel_message_header *hdr;
10359c400d35SAndrea Parri (Microsoft) enum vmbus_channel_message_type msgtype;
1036e6242fa0SStephen Hemminger const struct vmbus_channel_message_table_entry *entry;
103746a97191SGreg Kroah-Hartman struct onmessage_work_context *ctx;
10389c400d35SAndrea Parri (Microsoft) __u8 payload_size;
1039fe8c1b18SAndrea Parri (Microsoft) u32 message_type;
104046a97191SGreg Kroah-Hartman
1041b0a284dcSVitaly Kuznetsov /*
1042b0a284dcSVitaly Kuznetsov * 'enum vmbus_channel_message_type' is supposed to always be 'u32' as
1043b0a284dcSVitaly Kuznetsov * it is being used in 'struct vmbus_channel_message_header' definition
1044b0a284dcSVitaly Kuznetsov * which is supposed to match hypervisor ABI.
1045b0a284dcSVitaly Kuznetsov */
1046b0a284dcSVitaly Kuznetsov BUILD_BUG_ON(sizeof(enum vmbus_channel_message_type) != sizeof(u32));
1047b0a284dcSVitaly Kuznetsov
1048fe8c1b18SAndrea Parri (Microsoft) /*
1049fe8c1b18SAndrea Parri (Microsoft) * Since the message is in memory shared with the host, an erroneous or
1050fe8c1b18SAndrea Parri (Microsoft) * malicious Hyper-V could modify the message while vmbus_on_msg_dpc()
1051fe8c1b18SAndrea Parri (Microsoft) * or individual message handlers are executing; to prevent this, copy
1052fe8c1b18SAndrea Parri (Microsoft) * the message into private memory.
1053fe8c1b18SAndrea Parri (Microsoft) */
1054fe8c1b18SAndrea Parri (Microsoft) memcpy(&msg_copy, msg, sizeof(struct hv_message));
1055fe8c1b18SAndrea Parri (Microsoft)
1056fe8c1b18SAndrea Parri (Microsoft) message_type = msg_copy.header.message_type;
1057cd95aad5SVitaly Kuznetsov if (message_type == HVMSG_NONE)
105846a97191SGreg Kroah-Hartman /* no msg */
10597be3e169SVitaly Kuznetsov return;
1060652594c7SDexuan Cui
1061fe8c1b18SAndrea Parri (Microsoft) hdr = (struct vmbus_channel_message_header *)msg_copy.u.payload;
10629c400d35SAndrea Parri (Microsoft) msgtype = hdr->msgtype;
1063652594c7SDexuan Cui
1064c9fe0f8fSVitaly Kuznetsov trace_vmbus_on_msg_dpc(hdr);
1065c9fe0f8fSVitaly Kuznetsov
10669c400d35SAndrea Parri (Microsoft) if (msgtype >= CHANNELMSG_COUNT) {
10679c400d35SAndrea Parri (Microsoft) WARN_ONCE(1, "unknown msgtype=%d\n", msgtype);
1068652594c7SDexuan Cui goto msg_handled;
1069652594c7SDexuan Cui }
1070652594c7SDexuan Cui
1071fe8c1b18SAndrea Parri (Microsoft) payload_size = msg_copy.header.payload_size;
10729c400d35SAndrea Parri (Microsoft) if (payload_size > HV_MESSAGE_PAYLOAD_BYTE_COUNT) {
10739c400d35SAndrea Parri (Microsoft) WARN_ONCE(1, "payload size is too large (%d)\n", payload_size);
1074ac0f7d42SVitaly Kuznetsov goto msg_handled;
1075ac0f7d42SVitaly Kuznetsov }
1076ac0f7d42SVitaly Kuznetsov
10779c400d35SAndrea Parri (Microsoft) entry = &channel_message_table[msgtype];
1078ddc9d357SDexuan Cui
1079ddc9d357SDexuan Cui if (!entry->message_handler)
1080ddc9d357SDexuan Cui goto msg_handled;
1081ddc9d357SDexuan Cui
10829c400d35SAndrea Parri (Microsoft) if (payload_size < entry->min_payload_len) {
10839c400d35SAndrea Parri (Microsoft) WARN_ONCE(1, "message too short: msgtype=%d len=%d\n", msgtype, payload_size);
108452c7803fSVitaly Kuznetsov goto msg_handled;
108552c7803fSVitaly Kuznetsov }
108652c7803fSVitaly Kuznetsov
1087652594c7SDexuan Cui if (entry->handler_type == VMHT_BLOCKING) {
1088a70d298cSGustavo A. R. Silva ctx = kmalloc(struct_size(ctx, msg.payload, payload_size), GFP_ATOMIC);
108946a97191SGreg Kroah-Hartman if (ctx == NULL)
10907be3e169SVitaly Kuznetsov return;
1091652594c7SDexuan Cui
109246a97191SGreg Kroah-Hartman INIT_WORK(&ctx->work, vmbus_onmessage_work);
1093fb2d14adSKees Cook ctx->msg.header = msg_copy.header;
1094fb2d14adSKees Cook memcpy(&ctx->msg.payload, msg_copy.u.payload, payload_size);
109546a97191SGreg Kroah-Hartman
109654a66265SK. Y. Srinivasan /*
109754a66265SK. Y. Srinivasan * The host can generate a rescind message while we
109854a66265SK. Y. Srinivasan * may still be handling the original offer. We deal with
1099b9fa1b87SAndrea Parri (Microsoft) * this condition by relying on the synchronization provided
1100b9fa1b87SAndrea Parri (Microsoft) * by offer_in_progress and by channel_mutex. See also the
1101b9fa1b87SAndrea Parri (Microsoft) * inline comments in vmbus_onoffer_rescind().
110254a66265SK. Y. Srinivasan */
11039c400d35SAndrea Parri (Microsoft) switch (msgtype) {
110454a66265SK. Y. Srinivasan case CHANNELMSG_RESCIND_CHANNELOFFER:
110554a66265SK. Y. Srinivasan /*
110654a66265SK. Y. Srinivasan * If we are handling the rescind message;
110754a66265SK. Y. Srinivasan * schedule the work on the global work queue.
11088a857c55SAndrea Parri (Microsoft) *
11098a857c55SAndrea Parri (Microsoft) * The OFFER message and the RESCIND message should
11108a857c55SAndrea Parri (Microsoft) * not be handled by the same serialized work queue,
11118a857c55SAndrea Parri (Microsoft) * because the OFFER handler may call vmbus_open(),
11128a857c55SAndrea Parri (Microsoft) * which tries to open the channel by sending an
11138a857c55SAndrea Parri (Microsoft) * OPEN_CHANNEL message to the host and waits for
11148a857c55SAndrea Parri (Microsoft) * the host's response; however, if the host has
11158a857c55SAndrea Parri (Microsoft) * rescinded the channel before it receives the
11168a857c55SAndrea Parri (Microsoft) * OPEN_CHANNEL message, the host just silently
11178a857c55SAndrea Parri (Microsoft) * ignores the OPEN_CHANNEL message; as a result,
11188a857c55SAndrea Parri (Microsoft) * the guest's OFFER handler hangs for ever, if we
11198a857c55SAndrea Parri (Microsoft) * handle the RESCIND message in the same serialized
11208a857c55SAndrea Parri (Microsoft) * work queue: the RESCIND handler can not start to
11218a857c55SAndrea Parri (Microsoft) * run before the OFFER handler finishes.
112254a66265SK. Y. Srinivasan */
112352be9355SShradha Gupta if (vmbus_connection.ignore_any_offer_msg)
112452be9355SShradha Gupta break;
112552be9355SShradha Gupta queue_work(vmbus_connection.rescind_work_queue, &ctx->work);
112654a66265SK. Y. Srinivasan break;
112754a66265SK. Y. Srinivasan
112854a66265SK. Y. Srinivasan case CHANNELMSG_OFFERCHANNEL:
1129b9fa1b87SAndrea Parri (Microsoft) /*
1130b9fa1b87SAndrea Parri (Microsoft) * The host sends the offer message of a given channel
1131b9fa1b87SAndrea Parri (Microsoft) * before sending the rescind message of the same
1132b9fa1b87SAndrea Parri (Microsoft) * channel. These messages are sent to the guest's
1133b9fa1b87SAndrea Parri (Microsoft) * connect CPU; the guest then starts processing them
1134b9fa1b87SAndrea Parri (Microsoft) * in the tasklet handler on this CPU:
1135b9fa1b87SAndrea Parri (Microsoft) *
1136b9fa1b87SAndrea Parri (Microsoft) * VMBUS_CONNECT_CPU
1137b9fa1b87SAndrea Parri (Microsoft) *
1138b9fa1b87SAndrea Parri (Microsoft) * [vmbus_on_msg_dpc()]
1139b9fa1b87SAndrea Parri (Microsoft) * atomic_inc() // CHANNELMSG_OFFERCHANNEL
1140b9fa1b87SAndrea Parri (Microsoft) * queue_work()
1141b9fa1b87SAndrea Parri (Microsoft) * ...
1142b9fa1b87SAndrea Parri (Microsoft) * [vmbus_on_msg_dpc()]
1143b9fa1b87SAndrea Parri (Microsoft) * schedule_work() // CHANNELMSG_RESCIND_CHANNELOFFER
1144b9fa1b87SAndrea Parri (Microsoft) *
1145b9fa1b87SAndrea Parri (Microsoft) * We rely on the memory-ordering properties of the
1146b9fa1b87SAndrea Parri (Microsoft) * queue_work() and schedule_work() primitives, which
1147b9fa1b87SAndrea Parri (Microsoft) * guarantee that the atomic increment will be visible
1148b9fa1b87SAndrea Parri (Microsoft) * to the CPUs which will execute the offer & rescind
1149b9fa1b87SAndrea Parri (Microsoft) * works by the time these works will start execution.
1150b9fa1b87SAndrea Parri (Microsoft) */
115152be9355SShradha Gupta if (vmbus_connection.ignore_any_offer_msg)
115252be9355SShradha Gupta break;
115354a66265SK. Y. Srinivasan atomic_inc(&vmbus_connection.offer_in_progress);
1154b9fa1b87SAndrea Parri (Microsoft) fallthrough;
115554a66265SK. Y. Srinivasan
115654a66265SK. Y. Srinivasan default:
1157652594c7SDexuan Cui queue_work(vmbus_connection.work_queue, &ctx->work);
115854a66265SK. Y. Srinivasan }
1159652594c7SDexuan Cui } else
1160652594c7SDexuan Cui entry->message_handler(hdr);
1161652594c7SDexuan Cui
1162652594c7SDexuan Cui msg_handled:
1163cd95aad5SVitaly Kuznetsov vmbus_signal_eom(msg, message_type);
116446a97191SGreg Kroah-Hartman }
116546a97191SGreg Kroah-Hartman
116683b50f83SDexuan Cui #ifdef CONFIG_PM_SLEEP
11671f48dcf1SDexuan Cui /*
11681f48dcf1SDexuan Cui * Fake RESCIND_CHANNEL messages to clean up hv_sock channels by force for
11691f48dcf1SDexuan Cui * hibernation, because hv_sock connections can not persist across hibernation.
11701f48dcf1SDexuan Cui */
vmbus_force_channel_rescinded(struct vmbus_channel * channel)11711f48dcf1SDexuan Cui static void vmbus_force_channel_rescinded(struct vmbus_channel *channel)
11721f48dcf1SDexuan Cui {
11731f48dcf1SDexuan Cui struct onmessage_work_context *ctx;
11741f48dcf1SDexuan Cui struct vmbus_channel_rescind_offer *rescind;
11751f48dcf1SDexuan Cui
11761f48dcf1SDexuan Cui WARN_ON(!is_hvsock_channel(channel));
11771f48dcf1SDexuan Cui
11781f48dcf1SDexuan Cui /*
1179a276463bSVitaly Kuznetsov * Allocation size is small and the allocation should really not fail,
11801f48dcf1SDexuan Cui * otherwise the state of the hv_sock connections ends up in limbo.
11811f48dcf1SDexuan Cui */
1182a276463bSVitaly Kuznetsov ctx = kzalloc(sizeof(*ctx) + sizeof(*rescind),
1183a276463bSVitaly Kuznetsov GFP_KERNEL | __GFP_NOFAIL);
11841f48dcf1SDexuan Cui
11851f48dcf1SDexuan Cui /*
11861f48dcf1SDexuan Cui * So far, these are not really used by Linux. Just set them to the
11871f48dcf1SDexuan Cui * reasonable values conforming to the definitions of the fields.
11881f48dcf1SDexuan Cui */
11891f48dcf1SDexuan Cui ctx->msg.header.message_type = 1;
11901f48dcf1SDexuan Cui ctx->msg.header.payload_size = sizeof(*rescind);
11911f48dcf1SDexuan Cui
11921f48dcf1SDexuan Cui /* These values are actually used by Linux. */
1193a276463bSVitaly Kuznetsov rescind = (struct vmbus_channel_rescind_offer *)ctx->msg.payload;
11941f48dcf1SDexuan Cui rescind->header.msgtype = CHANNELMSG_RESCIND_CHANNELOFFER;
11951f48dcf1SDexuan Cui rescind->child_relid = channel->offermsg.child_relid;
11961f48dcf1SDexuan Cui
11971f48dcf1SDexuan Cui INIT_WORK(&ctx->work, vmbus_onmessage_work);
11981f48dcf1SDexuan Cui
1199b9fa1b87SAndrea Parri (Microsoft) queue_work(vmbus_connection.work_queue, &ctx->work);
12001f48dcf1SDexuan Cui }
120183b50f83SDexuan Cui #endif /* CONFIG_PM_SLEEP */
1202631e63a9SStephen Hemminger
1203631e63a9SStephen Hemminger /*
1204631e63a9SStephen Hemminger * Schedule all channels with events pending
1205631e63a9SStephen Hemminger */
vmbus_chan_sched(struct hv_per_cpu_context * hv_cpu)1206631e63a9SStephen Hemminger static void vmbus_chan_sched(struct hv_per_cpu_context *hv_cpu)
1207631e63a9SStephen Hemminger {
1208631e63a9SStephen Hemminger unsigned long *recv_int_page;
1209631e63a9SStephen Hemminger u32 maxbits, relid;
1210631e63a9SStephen Hemminger
1211631e63a9SStephen Hemminger /*
1212a6b94c6bSMichael Kelley * The event page can be directly checked to get the id of
1213a6b94c6bSMichael Kelley * the channel that has the interrupt pending.
1214631e63a9SStephen Hemminger */
1215631e63a9SStephen Hemminger void *page_addr = hv_cpu->synic_event_page;
1216631e63a9SStephen Hemminger union hv_synic_event_flags *event
1217631e63a9SStephen Hemminger = (union hv_synic_event_flags *)page_addr +
1218631e63a9SStephen Hemminger VMBUS_MESSAGE_SINT;
1219631e63a9SStephen Hemminger
1220631e63a9SStephen Hemminger maxbits = HV_EVENT_FLAGS_COUNT;
1221631e63a9SStephen Hemminger recv_int_page = event->flags;
1222631e63a9SStephen Hemminger
1223631e63a9SStephen Hemminger if (unlikely(!recv_int_page))
1224631e63a9SStephen Hemminger return;
1225631e63a9SStephen Hemminger
1226631e63a9SStephen Hemminger for_each_set_bit(relid, recv_int_page, maxbits) {
12279403b66eSAndrea Parri (Microsoft) void (*callback_fn)(void *context);
1228631e63a9SStephen Hemminger struct vmbus_channel *channel;
1229631e63a9SStephen Hemminger
1230631e63a9SStephen Hemminger if (!sync_test_and_clear_bit(relid, recv_int_page))
1231631e63a9SStephen Hemminger continue;
1232631e63a9SStephen Hemminger
1233631e63a9SStephen Hemminger /* Special case - vmbus channel protocol msg */
1234631e63a9SStephen Hemminger if (relid == 0)
1235631e63a9SStephen Hemminger continue;
1236631e63a9SStephen Hemminger
12378b6a877cSAndrea Parri (Microsoft) /*
12388b6a877cSAndrea Parri (Microsoft) * Pairs with the kfree_rcu() in vmbus_chan_release().
12398b6a877cSAndrea Parri (Microsoft) * Guarantees that the channel data structure doesn't
12408b6a877cSAndrea Parri (Microsoft) * get freed while the channel pointer below is being
12418b6a877cSAndrea Parri (Microsoft) * dereferenced.
12428b6a877cSAndrea Parri (Microsoft) */
12438200f208SStephen Hemminger rcu_read_lock();
12448200f208SStephen Hemminger
1245631e63a9SStephen Hemminger /* Find channel based on relid */
12468b6a877cSAndrea Parri (Microsoft) channel = relid2channel(relid);
12478b6a877cSAndrea Parri (Microsoft) if (channel == NULL)
12488b6a877cSAndrea Parri (Microsoft) goto sched_unlock_rcu;
1249b71e3282SStephen Hemminger
12506f3d791fSK. Y. Srinivasan if (channel->rescind)
12518b6a877cSAndrea Parri (Microsoft) goto sched_unlock_rcu;
12526f3d791fSK. Y. Srinivasan
12539403b66eSAndrea Parri (Microsoft) /*
12549403b66eSAndrea Parri (Microsoft) * Make sure that the ring buffer data structure doesn't get
12559403b66eSAndrea Parri (Microsoft) * freed while we dereference the ring buffer pointer. Test
12569403b66eSAndrea Parri (Microsoft) * for the channel's onchannel_callback being NULL within a
12579403b66eSAndrea Parri (Microsoft) * sched_lock critical section. See also the inline comments
12589403b66eSAndrea Parri (Microsoft) * in vmbus_reset_channel_cb().
12599403b66eSAndrea Parri (Microsoft) */
12609403b66eSAndrea Parri (Microsoft) spin_lock(&channel->sched_lock);
12619403b66eSAndrea Parri (Microsoft)
12629403b66eSAndrea Parri (Microsoft) callback_fn = channel->onchannel_callback;
12639403b66eSAndrea Parri (Microsoft) if (unlikely(callback_fn == NULL))
12649403b66eSAndrea Parri (Microsoft) goto sched_unlock;
1265b71e3282SStephen Hemminger
1266991f8f1cSVitaly Kuznetsov trace_vmbus_chan_sched(channel);
1267991f8f1cSVitaly Kuznetsov
12686981fbf3SStephen Hemminger ++channel->interrupts;
12696981fbf3SStephen Hemminger
1270631e63a9SStephen Hemminger switch (channel->callback_mode) {
1271631e63a9SStephen Hemminger case HV_CALL_ISR:
12729403b66eSAndrea Parri (Microsoft) (*callback_fn)(channel->channel_callback_context);
1273631e63a9SStephen Hemminger break;
1274b71e3282SStephen Hemminger
1275b71e3282SStephen Hemminger case HV_CALL_BATCHED:
1276b71e3282SStephen Hemminger hv_begin_read(&channel->inbound);
12778b6a877cSAndrea Parri (Microsoft) fallthrough;
1278b71e3282SStephen Hemminger case HV_CALL_DIRECT:
1279b71e3282SStephen Hemminger tasklet_schedule(&channel->callback_event);
1280631e63a9SStephen Hemminger }
12818200f208SStephen Hemminger
12829403b66eSAndrea Parri (Microsoft) sched_unlock:
12839403b66eSAndrea Parri (Microsoft) spin_unlock(&channel->sched_lock);
12848b6a877cSAndrea Parri (Microsoft) sched_unlock_rcu:
12858200f208SStephen Hemminger rcu_read_unlock();
1286631e63a9SStephen Hemminger }
1287631e63a9SStephen Hemminger }
1288631e63a9SStephen Hemminger
vmbus_isr(void)128976d388cdSThomas Gleixner static void vmbus_isr(void)
129046a97191SGreg Kroah-Hartman {
129137cdd991SStephen Hemminger struct hv_per_cpu_context *hv_cpu
129237cdd991SStephen Hemminger = this_cpu_ptr(hv_context.cpu_context);
1293a6b94c6bSMichael Kelley void *page_addr;
129446a97191SGreg Kroah-Hartman struct hv_message *msg;
129546a97191SGreg Kroah-Hartman
1296631e63a9SStephen Hemminger vmbus_chan_sched(hv_cpu);
129746a97191SGreg Kroah-Hartman
129837cdd991SStephen Hemminger page_addr = hv_cpu->synic_message_page;
129946a97191SGreg Kroah-Hartman msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
130046a97191SGreg Kroah-Hartman
130146a97191SGreg Kroah-Hartman /* Check if there are actual msgs to be processed */
13024061ed9eSK. Y. Srinivasan if (msg->header.message_type != HVMSG_NONE) {
1303fd1fea68SMichael Kelley if (msg->header.message_type == HVMSG_TIMER_EXPIRED) {
1304fd1fea68SMichael Kelley hv_stimer0_isr();
1305fd1fea68SMichael Kelley vmbus_signal_eom(msg, HVMSG_TIMER_EXPIRED);
1306fd1fea68SMichael Kelley } else
130737cdd991SStephen Hemminger tasklet_schedule(&hv_cpu->msg_dpc);
130846a97191SGreg Kroah-Hartman }
13094b44f2d1SStephan Mueller
1310703f7066SSebastian Andrzej Siewior add_interrupt_randomness(vmbus_interrupt);
1311d608715dSMichael Kelley }
1312d608715dSMichael Kelley
vmbus_percpu_isr(int irq,void * dev_id)1313d608715dSMichael Kelley static irqreturn_t vmbus_percpu_isr(int irq, void *dev_id)
1314d608715dSMichael Kelley {
1315d608715dSMichael Kelley vmbus_isr();
1316d608715dSMichael Kelley return IRQ_HANDLED;
13174061ed9eSK. Y. Srinivasan }
131846a97191SGreg Kroah-Hartman
131981b18bceSSunil Muthuswamy /*
132046a97191SGreg Kroah-Hartman * vmbus_bus_init -Main vmbus driver initialization routine.
132146a97191SGreg Kroah-Hartman *
132246a97191SGreg Kroah-Hartman * Here, we
132346a97191SGreg Kroah-Hartman * - initialize the vmbus driver context
132446a97191SGreg Kroah-Hartman * - invoke the vmbus hv main init routine
132546a97191SGreg Kroah-Hartman * - retrieve the channel offers
132646a97191SGreg Kroah-Hartman */
vmbus_bus_init(void)1327efc26722SK. Y. Srinivasan static int vmbus_bus_init(void)
132846a97191SGreg Kroah-Hartman {
132946a97191SGreg Kroah-Hartman int ret;
133046a97191SGreg Kroah-Hartman
133146a97191SGreg Kroah-Hartman ret = hv_init();
133246a97191SGreg Kroah-Hartman if (ret != 0) {
133346a97191SGreg Kroah-Hartman pr_err("Unable to initialize the hypervisor - 0x%x\n", ret);
133446a97191SGreg Kroah-Hartman return ret;
133546a97191SGreg Kroah-Hartman }
133646a97191SGreg Kroah-Hartman
133746a97191SGreg Kroah-Hartman ret = bus_register(&hv_bus);
133846a97191SGreg Kroah-Hartman if (ret)
1339d6f3609dSVitaly Kuznetsov return ret;
134046a97191SGreg Kroah-Hartman
1341d608715dSMichael Kelley /*
1342d608715dSMichael Kelley * VMbus interrupts are best modeled as per-cpu interrupts. If
1343d608715dSMichael Kelley * on an architecture with support for per-cpu IRQs (e.g. ARM64),
1344d608715dSMichael Kelley * allocate a per-cpu IRQ using standard Linux kernel functionality.
1345d608715dSMichael Kelley * If not on such an architecture (e.g., x86/x64), then rely on
1346d608715dSMichael Kelley * code in the arch-specific portion of the code tree to connect
1347d608715dSMichael Kelley * the VMbus interrupt handler.
1348d608715dSMichael Kelley */
1349d608715dSMichael Kelley
1350d608715dSMichael Kelley if (vmbus_irq == -1) {
1351d608715dSMichael Kelley hv_setup_vmbus_handler(vmbus_isr);
1352d608715dSMichael Kelley } else {
1353d608715dSMichael Kelley vmbus_evt = alloc_percpu(long);
1354d608715dSMichael Kelley ret = request_percpu_irq(vmbus_irq, vmbus_percpu_isr,
1355d608715dSMichael Kelley "Hyper-V VMbus", vmbus_evt);
1356d608715dSMichael Kelley if (ret) {
1357d608715dSMichael Kelley pr_err("Can't request Hyper-V VMbus IRQ %d, Err %d",
1358d608715dSMichael Kelley vmbus_irq, ret);
1359d608715dSMichael Kelley free_percpu(vmbus_evt);
1360626b901fSMichael Kelley goto err_setup;
1361d608715dSMichael Kelley }
1362d608715dSMichael Kelley }
136346a97191SGreg Kroah-Hartman
13642608fb65SJason Wang ret = hv_synic_alloc();
13652608fb65SJason Wang if (ret)
13662608fb65SJason Wang goto err_alloc;
1367fd1fea68SMichael Kelley
136846a97191SGreg Kroah-Hartman /*
1369fd1fea68SMichael Kelley * Initialize the per-cpu interrupt state and stimer state.
1370fd1fea68SMichael Kelley * Then connect to the host.
137146a97191SGreg Kroah-Hartman */
13724a5f3cdeSMichael Kelley ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "hyperv/vmbus:online",
137376d36ab7SVitaly Kuznetsov hv_synic_init, hv_synic_cleanup);
137476d36ab7SVitaly Kuznetsov if (ret < 0)
1375ec97e112SDexuan Cui goto err_alloc;
137676d36ab7SVitaly Kuznetsov hyperv_cpuhp_online = ret;
137776d36ab7SVitaly Kuznetsov
137846a97191SGreg Kroah-Hartman ret = vmbus_connect();
137946a97191SGreg Kroah-Hartman if (ret)
138017efbee8SAndrey Smetanin goto err_connect;
138146a97191SGreg Kroah-Hartman
138274347a99STianyu Lan /*
1383d786e00dSGuilherme G. Piccoli * Always register the vmbus unload panic notifier because we
1384d786e00dSGuilherme G. Piccoli * need to shut the VMbus channel connection on panic.
138574347a99STianyu Lan */
138696c1d058SNick Meier atomic_notifier_chain_register(&panic_notifier_list,
1387d786e00dSGuilherme G. Piccoli &hyperv_panic_vmbus_unload_block);
138896c1d058SNick Meier
138946a97191SGreg Kroah-Hartman vmbus_request_offers();
139046a97191SGreg Kroah-Hartman
139146a97191SGreg Kroah-Hartman return 0;
139246a97191SGreg Kroah-Hartman
139317efbee8SAndrey Smetanin err_connect:
139476d36ab7SVitaly Kuznetsov cpuhp_remove_state(hyperv_cpuhp_online);
13954df4cb9eSMichael Kelley err_alloc:
1396ec97e112SDexuan Cui hv_synic_free();
1397d608715dSMichael Kelley if (vmbus_irq == -1) {
1398d608715dSMichael Kelley hv_remove_vmbus_handler();
1399d608715dSMichael Kelley } else {
1400d608715dSMichael Kelley free_percpu_irq(vmbus_irq, vmbus_evt);
1401d608715dSMichael Kelley free_percpu(vmbus_evt);
1402d608715dSMichael Kelley }
1403626b901fSMichael Kelley err_setup:
140446a97191SGreg Kroah-Hartman bus_unregister(&hv_bus);
140546a97191SGreg Kroah-Hartman return ret;
140646a97191SGreg Kroah-Hartman }
140746a97191SGreg Kroah-Hartman
140846a97191SGreg Kroah-Hartman /**
1409e1a863cdSJiapeng Chong * __vmbus_driver_register() - Register a vmbus's driver
141035464483SJake Oshins * @hv_driver: Pointer to driver structure you want to register
141146a97191SGreg Kroah-Hartman * @owner: owner module of the drv
141246a97191SGreg Kroah-Hartman * @mod_name: module name string
141346a97191SGreg Kroah-Hartman *
141446a97191SGreg Kroah-Hartman * Registers the given driver with Linux through the 'driver_register()' call
141546a97191SGreg Kroah-Hartman * and sets up the hyper-v vmbus handling for this driver.
141646a97191SGreg Kroah-Hartman * It will return the state of the 'driver_register()' call.
141746a97191SGreg Kroah-Hartman *
141846a97191SGreg Kroah-Hartman */
__vmbus_driver_register(struct hv_driver * hv_driver,struct module * owner,const char * mod_name)141946a97191SGreg Kroah-Hartman int __vmbus_driver_register(struct hv_driver *hv_driver, struct module *owner, const char *mod_name)
142046a97191SGreg Kroah-Hartman {
142146a97191SGreg Kroah-Hartman int ret;
142246a97191SGreg Kroah-Hartman
142346a97191SGreg Kroah-Hartman pr_info("registering driver %s\n", hv_driver->name);
142446a97191SGreg Kroah-Hartman
1425cf6a2eacSK. Y. Srinivasan ret = vmbus_exists();
1426cf6a2eacSK. Y. Srinivasan if (ret < 0)
1427cf6a2eacSK. Y. Srinivasan return ret;
1428cf6a2eacSK. Y. Srinivasan
142946a97191SGreg Kroah-Hartman hv_driver->driver.name = hv_driver->name;
143046a97191SGreg Kroah-Hartman hv_driver->driver.owner = owner;
143146a97191SGreg Kroah-Hartman hv_driver->driver.mod_name = mod_name;
143246a97191SGreg Kroah-Hartman hv_driver->driver.bus = &hv_bus;
143346a97191SGreg Kroah-Hartman
1434fc76936dSStephen Hemminger spin_lock_init(&hv_driver->dynids.lock);
1435fc76936dSStephen Hemminger INIT_LIST_HEAD(&hv_driver->dynids.list);
1436fc76936dSStephen Hemminger
143746a97191SGreg Kroah-Hartman ret = driver_register(&hv_driver->driver);
143846a97191SGreg Kroah-Hartman
143946a97191SGreg Kroah-Hartman return ret;
144046a97191SGreg Kroah-Hartman }
144146a97191SGreg Kroah-Hartman EXPORT_SYMBOL_GPL(__vmbus_driver_register);
144246a97191SGreg Kroah-Hartman
144346a97191SGreg Kroah-Hartman /**
144446a97191SGreg Kroah-Hartman * vmbus_driver_unregister() - Unregister a vmbus's driver
144535464483SJake Oshins * @hv_driver: Pointer to driver structure you want to
144635464483SJake Oshins * un-register
144746a97191SGreg Kroah-Hartman *
144846a97191SGreg Kroah-Hartman * Un-register the given driver that was previous registered with a call to
144946a97191SGreg Kroah-Hartman * vmbus_driver_register()
145046a97191SGreg Kroah-Hartman */
vmbus_driver_unregister(struct hv_driver * hv_driver)145146a97191SGreg Kroah-Hartman void vmbus_driver_unregister(struct hv_driver *hv_driver)
145246a97191SGreg Kroah-Hartman {
145346a97191SGreg Kroah-Hartman pr_info("unregistering driver %s\n", hv_driver->name);
145446a97191SGreg Kroah-Hartman
1455fc76936dSStephen Hemminger if (!vmbus_exists()) {
145646a97191SGreg Kroah-Hartman driver_unregister(&hv_driver->driver);
1457fc76936dSStephen Hemminger vmbus_free_dynids(hv_driver);
1458fc76936dSStephen Hemminger }
145946a97191SGreg Kroah-Hartman }
146046a97191SGreg Kroah-Hartman EXPORT_SYMBOL_GPL(vmbus_driver_unregister);
146146a97191SGreg Kroah-Hartman
1462c2e5df61SStephen Hemminger
1463c2e5df61SStephen Hemminger /*
1464c2e5df61SStephen Hemminger * Called when last reference to channel is gone.
1465c2e5df61SStephen Hemminger */
vmbus_chan_release(struct kobject * kobj)1466c2e5df61SStephen Hemminger static void vmbus_chan_release(struct kobject *kobj)
1467c2e5df61SStephen Hemminger {
1468c2e5df61SStephen Hemminger struct vmbus_channel *channel
1469c2e5df61SStephen Hemminger = container_of(kobj, struct vmbus_channel, kobj);
1470c2e5df61SStephen Hemminger
1471c2e5df61SStephen Hemminger kfree_rcu(channel, rcu);
1472c2e5df61SStephen Hemminger }
1473c2e5df61SStephen Hemminger
1474c2e5df61SStephen Hemminger struct vmbus_chan_attribute {
1475c2e5df61SStephen Hemminger struct attribute attr;
147614948e39SKimberly Brown ssize_t (*show)(struct vmbus_channel *chan, char *buf);
1477c2e5df61SStephen Hemminger ssize_t (*store)(struct vmbus_channel *chan,
1478c2e5df61SStephen Hemminger const char *buf, size_t count);
1479c2e5df61SStephen Hemminger };
1480c2e5df61SStephen Hemminger #define VMBUS_CHAN_ATTR(_name, _mode, _show, _store) \
1481c2e5df61SStephen Hemminger struct vmbus_chan_attribute chan_attr_##_name \
1482c2e5df61SStephen Hemminger = __ATTR(_name, _mode, _show, _store)
1483c2e5df61SStephen Hemminger #define VMBUS_CHAN_ATTR_RW(_name) \
1484c2e5df61SStephen Hemminger struct vmbus_chan_attribute chan_attr_##_name = __ATTR_RW(_name)
1485c2e5df61SStephen Hemminger #define VMBUS_CHAN_ATTR_RO(_name) \
1486c2e5df61SStephen Hemminger struct vmbus_chan_attribute chan_attr_##_name = __ATTR_RO(_name)
1487c2e5df61SStephen Hemminger #define VMBUS_CHAN_ATTR_WO(_name) \
1488c2e5df61SStephen Hemminger struct vmbus_chan_attribute chan_attr_##_name = __ATTR_WO(_name)
1489c2e5df61SStephen Hemminger
vmbus_chan_attr_show(struct kobject * kobj,struct attribute * attr,char * buf)1490c2e5df61SStephen Hemminger static ssize_t vmbus_chan_attr_show(struct kobject *kobj,
1491c2e5df61SStephen Hemminger struct attribute *attr, char *buf)
1492c2e5df61SStephen Hemminger {
1493c2e5df61SStephen Hemminger const struct vmbus_chan_attribute *attribute
1494c2e5df61SStephen Hemminger = container_of(attr, struct vmbus_chan_attribute, attr);
149514948e39SKimberly Brown struct vmbus_channel *chan
1496c2e5df61SStephen Hemminger = container_of(kobj, struct vmbus_channel, kobj);
1497c2e5df61SStephen Hemminger
1498c2e5df61SStephen Hemminger if (!attribute->show)
1499c2e5df61SStephen Hemminger return -EIO;
1500c2e5df61SStephen Hemminger
1501c2e5df61SStephen Hemminger return attribute->show(chan, buf);
1502c2e5df61SStephen Hemminger }
1503c2e5df61SStephen Hemminger
vmbus_chan_attr_store(struct kobject * kobj,struct attribute * attr,const char * buf,size_t count)150475278105SAndrea Parri (Microsoft) static ssize_t vmbus_chan_attr_store(struct kobject *kobj,
150575278105SAndrea Parri (Microsoft) struct attribute *attr, const char *buf,
150675278105SAndrea Parri (Microsoft) size_t count)
150775278105SAndrea Parri (Microsoft) {
150875278105SAndrea Parri (Microsoft) const struct vmbus_chan_attribute *attribute
150975278105SAndrea Parri (Microsoft) = container_of(attr, struct vmbus_chan_attribute, attr);
151075278105SAndrea Parri (Microsoft) struct vmbus_channel *chan
151175278105SAndrea Parri (Microsoft) = container_of(kobj, struct vmbus_channel, kobj);
151275278105SAndrea Parri (Microsoft)
151375278105SAndrea Parri (Microsoft) if (!attribute->store)
151475278105SAndrea Parri (Microsoft) return -EIO;
151575278105SAndrea Parri (Microsoft)
151675278105SAndrea Parri (Microsoft) return attribute->store(chan, buf, count);
151775278105SAndrea Parri (Microsoft) }
151875278105SAndrea Parri (Microsoft)
1519c2e5df61SStephen Hemminger static const struct sysfs_ops vmbus_chan_sysfs_ops = {
1520c2e5df61SStephen Hemminger .show = vmbus_chan_attr_show,
152175278105SAndrea Parri (Microsoft) .store = vmbus_chan_attr_store,
1522c2e5df61SStephen Hemminger };
1523c2e5df61SStephen Hemminger
out_mask_show(struct vmbus_channel * channel,char * buf)152414948e39SKimberly Brown static ssize_t out_mask_show(struct vmbus_channel *channel, char *buf)
1525c2e5df61SStephen Hemminger {
152614948e39SKimberly Brown struct hv_ring_buffer_info *rbi = &channel->outbound;
152714948e39SKimberly Brown ssize_t ret;
1528c2e5df61SStephen Hemminger
152914948e39SKimberly Brown mutex_lock(&rbi->ring_buffer_mutex);
153014948e39SKimberly Brown if (!rbi->ring_buffer) {
153114948e39SKimberly Brown mutex_unlock(&rbi->ring_buffer_mutex);
1532fcedbb29SKimberly Brown return -EINVAL;
153314948e39SKimberly Brown }
1534fcedbb29SKimberly Brown
153514948e39SKimberly Brown ret = sprintf(buf, "%u\n", rbi->ring_buffer->interrupt_mask);
153614948e39SKimberly Brown mutex_unlock(&rbi->ring_buffer_mutex);
153714948e39SKimberly Brown return ret;
1538c2e5df61SStephen Hemminger }
1539875c362bSStephen Hemminger static VMBUS_CHAN_ATTR_RO(out_mask);
1540c2e5df61SStephen Hemminger
in_mask_show(struct vmbus_channel * channel,char * buf)154114948e39SKimberly Brown static ssize_t in_mask_show(struct vmbus_channel *channel, char *buf)
1542c2e5df61SStephen Hemminger {
154314948e39SKimberly Brown struct hv_ring_buffer_info *rbi = &channel->inbound;
154414948e39SKimberly Brown ssize_t ret;
1545c2e5df61SStephen Hemminger
154614948e39SKimberly Brown mutex_lock(&rbi->ring_buffer_mutex);
154714948e39SKimberly Brown if (!rbi->ring_buffer) {
154814948e39SKimberly Brown mutex_unlock(&rbi->ring_buffer_mutex);
1549fcedbb29SKimberly Brown return -EINVAL;
155014948e39SKimberly Brown }
1551fcedbb29SKimberly Brown
155214948e39SKimberly Brown ret = sprintf(buf, "%u\n", rbi->ring_buffer->interrupt_mask);
155314948e39SKimberly Brown mutex_unlock(&rbi->ring_buffer_mutex);
155414948e39SKimberly Brown return ret;
1555c2e5df61SStephen Hemminger }
1556875c362bSStephen Hemminger static VMBUS_CHAN_ATTR_RO(in_mask);
1557c2e5df61SStephen Hemminger
read_avail_show(struct vmbus_channel * channel,char * buf)155814948e39SKimberly Brown static ssize_t read_avail_show(struct vmbus_channel *channel, char *buf)
1559c2e5df61SStephen Hemminger {
156014948e39SKimberly Brown struct hv_ring_buffer_info *rbi = &channel->inbound;
156114948e39SKimberly Brown ssize_t ret;
1562c2e5df61SStephen Hemminger
156314948e39SKimberly Brown mutex_lock(&rbi->ring_buffer_mutex);
156414948e39SKimberly Brown if (!rbi->ring_buffer) {
156514948e39SKimberly Brown mutex_unlock(&rbi->ring_buffer_mutex);
1566fcedbb29SKimberly Brown return -EINVAL;
156714948e39SKimberly Brown }
1568fcedbb29SKimberly Brown
156914948e39SKimberly Brown ret = sprintf(buf, "%u\n", hv_get_bytes_to_read(rbi));
157014948e39SKimberly Brown mutex_unlock(&rbi->ring_buffer_mutex);
157114948e39SKimberly Brown return ret;
1572c2e5df61SStephen Hemminger }
1573875c362bSStephen Hemminger static VMBUS_CHAN_ATTR_RO(read_avail);
1574c2e5df61SStephen Hemminger
write_avail_show(struct vmbus_channel * channel,char * buf)157514948e39SKimberly Brown static ssize_t write_avail_show(struct vmbus_channel *channel, char *buf)
1576c2e5df61SStephen Hemminger {
157714948e39SKimberly Brown struct hv_ring_buffer_info *rbi = &channel->outbound;
157814948e39SKimberly Brown ssize_t ret;
1579c2e5df61SStephen Hemminger
158014948e39SKimberly Brown mutex_lock(&rbi->ring_buffer_mutex);
158114948e39SKimberly Brown if (!rbi->ring_buffer) {
158214948e39SKimberly Brown mutex_unlock(&rbi->ring_buffer_mutex);
1583fcedbb29SKimberly Brown return -EINVAL;
158414948e39SKimberly Brown }
1585fcedbb29SKimberly Brown
158614948e39SKimberly Brown ret = sprintf(buf, "%u\n", hv_get_bytes_to_write(rbi));
158714948e39SKimberly Brown mutex_unlock(&rbi->ring_buffer_mutex);
158814948e39SKimberly Brown return ret;
1589c2e5df61SStephen Hemminger }
1590875c362bSStephen Hemminger static VMBUS_CHAN_ATTR_RO(write_avail);
1591c2e5df61SStephen Hemminger
target_cpu_show(struct vmbus_channel * channel,char * buf)159275278105SAndrea Parri (Microsoft) static ssize_t target_cpu_show(struct vmbus_channel *channel, char *buf)
1593c2e5df61SStephen Hemminger {
1594c2e5df61SStephen Hemminger return sprintf(buf, "%u\n", channel->target_cpu);
1595c2e5df61SStephen Hemminger }
target_cpu_store(struct vmbus_channel * channel,const char * buf,size_t count)159675278105SAndrea Parri (Microsoft) static ssize_t target_cpu_store(struct vmbus_channel *channel,
159775278105SAndrea Parri (Microsoft) const char *buf, size_t count)
159875278105SAndrea Parri (Microsoft) {
1599afaa33daSAndrea Parri (Microsoft) u32 target_cpu, origin_cpu;
160075278105SAndrea Parri (Microsoft) ssize_t ret = count;
160175278105SAndrea Parri (Microsoft)
160275278105SAndrea Parri (Microsoft) if (vmbus_proto_version < VERSION_WIN10_V4_1)
160375278105SAndrea Parri (Microsoft) return -EIO;
160475278105SAndrea Parri (Microsoft)
160575278105SAndrea Parri (Microsoft) if (sscanf(buf, "%uu", &target_cpu) != 1)
160675278105SAndrea Parri (Microsoft) return -EIO;
160775278105SAndrea Parri (Microsoft)
160875278105SAndrea Parri (Microsoft) /* Validate target_cpu for the cpumask_test_cpu() operation below. */
160975278105SAndrea Parri (Microsoft) if (target_cpu >= nr_cpumask_bits)
161075278105SAndrea Parri (Microsoft) return -EINVAL;
161175278105SAndrea Parri (Microsoft)
16126640b5dfSSaurabh Sengar if (!cpumask_test_cpu(target_cpu, housekeeping_cpumask(HK_TYPE_MANAGED_IRQ)))
16136640b5dfSSaurabh Sengar return -EINVAL;
16146640b5dfSSaurabh Sengar
161575278105SAndrea Parri (Microsoft) /* No CPUs should come up or down during this. */
161675278105SAndrea Parri (Microsoft) cpus_read_lock();
161775278105SAndrea Parri (Microsoft)
16180a968209SAndrea Parri (Microsoft) if (!cpu_online(target_cpu)) {
161975278105SAndrea Parri (Microsoft) cpus_read_unlock();
162075278105SAndrea Parri (Microsoft) return -EINVAL;
162175278105SAndrea Parri (Microsoft) }
162275278105SAndrea Parri (Microsoft)
162375278105SAndrea Parri (Microsoft) /*
162475278105SAndrea Parri (Microsoft) * Synchronizes target_cpu_store() and channel closure:
162575278105SAndrea Parri (Microsoft) *
162675278105SAndrea Parri (Microsoft) * { Initially: state = CHANNEL_OPENED }
162775278105SAndrea Parri (Microsoft) *
162875278105SAndrea Parri (Microsoft) * CPU1 CPU2
162975278105SAndrea Parri (Microsoft) *
163075278105SAndrea Parri (Microsoft) * [target_cpu_store()] [vmbus_disconnect_ring()]
163175278105SAndrea Parri (Microsoft) *
163275278105SAndrea Parri (Microsoft) * LOCK channel_mutex LOCK channel_mutex
163375278105SAndrea Parri (Microsoft) * LOAD r1 = state LOAD r2 = state
163475278105SAndrea Parri (Microsoft) * IF (r1 == CHANNEL_OPENED) IF (r2 == CHANNEL_OPENED)
163575278105SAndrea Parri (Microsoft) * SEND MODIFYCHANNEL STORE state = CHANNEL_OPEN
163675278105SAndrea Parri (Microsoft) * [...] SEND CLOSECHANNEL
163775278105SAndrea Parri (Microsoft) * UNLOCK channel_mutex UNLOCK channel_mutex
163875278105SAndrea Parri (Microsoft) *
163975278105SAndrea Parri (Microsoft) * Forbids: r1 == r2 == CHANNEL_OPENED (i.e., CPU1's LOCK precedes
164075278105SAndrea Parri (Microsoft) * CPU2's LOCK) && CPU2's SEND precedes CPU1's SEND
164175278105SAndrea Parri (Microsoft) *
164275278105SAndrea Parri (Microsoft) * Note. The host processes the channel messages "sequentially", in
164375278105SAndrea Parri (Microsoft) * the order in which they are received on a per-partition basis.
164475278105SAndrea Parri (Microsoft) */
164575278105SAndrea Parri (Microsoft) mutex_lock(&vmbus_connection.channel_mutex);
164675278105SAndrea Parri (Microsoft)
164775278105SAndrea Parri (Microsoft) /*
164875278105SAndrea Parri (Microsoft) * Hyper-V will ignore MODIFYCHANNEL messages for "non-open" channels;
164975278105SAndrea Parri (Microsoft) * avoid sending the message and fail here for such channels.
165075278105SAndrea Parri (Microsoft) */
165175278105SAndrea Parri (Microsoft) if (channel->state != CHANNEL_OPENED_STATE) {
165275278105SAndrea Parri (Microsoft) ret = -EIO;
165375278105SAndrea Parri (Microsoft) goto cpu_store_unlock;
165475278105SAndrea Parri (Microsoft) }
165575278105SAndrea Parri (Microsoft)
1656afaa33daSAndrea Parri (Microsoft) origin_cpu = channel->target_cpu;
1657afaa33daSAndrea Parri (Microsoft) if (target_cpu == origin_cpu)
165875278105SAndrea Parri (Microsoft) goto cpu_store_unlock;
165975278105SAndrea Parri (Microsoft)
1660870ced05SAndrea Parri (Microsoft) if (vmbus_send_modifychannel(channel,
166175278105SAndrea Parri (Microsoft) hv_cpu_number_to_vp_number(target_cpu))) {
166275278105SAndrea Parri (Microsoft) ret = -EIO;
166375278105SAndrea Parri (Microsoft) goto cpu_store_unlock;
166475278105SAndrea Parri (Microsoft) }
166575278105SAndrea Parri (Microsoft)
166675278105SAndrea Parri (Microsoft) /*
1667870ced05SAndrea Parri (Microsoft) * For version before VERSION_WIN10_V5_3, the following warning holds:
1668870ced05SAndrea Parri (Microsoft) *
166975278105SAndrea Parri (Microsoft) * Warning. At this point, there is *no* guarantee that the host will
167075278105SAndrea Parri (Microsoft) * have successfully processed the vmbus_send_modifychannel() request.
167175278105SAndrea Parri (Microsoft) * See the header comment of vmbus_send_modifychannel() for more info.
167275278105SAndrea Parri (Microsoft) *
167375278105SAndrea Parri (Microsoft) * Lags in the processing of the above vmbus_send_modifychannel() can
167475278105SAndrea Parri (Microsoft) * result in missed interrupts if the "old" target CPU is taken offline
167575278105SAndrea Parri (Microsoft) * before Hyper-V starts sending interrupts to the "new" target CPU.
167675278105SAndrea Parri (Microsoft) * But apart from this offlining scenario, the code tolerates such
167775278105SAndrea Parri (Microsoft) * lags. It will function correctly even if a channel interrupt comes
167875278105SAndrea Parri (Microsoft) * in on a CPU that is different from the channel target_cpu value.
167975278105SAndrea Parri (Microsoft) */
168075278105SAndrea Parri (Microsoft)
168175278105SAndrea Parri (Microsoft) channel->target_cpu = target_cpu;
168275278105SAndrea Parri (Microsoft)
1683afaa33daSAndrea Parri (Microsoft) /* See init_vp_index(). */
1684afaa33daSAndrea Parri (Microsoft) if (hv_is_perf_channel(channel))
1685de96e8a0SVitaly Kuznetsov hv_update_allocated_cpus(origin_cpu, target_cpu);
1686afaa33daSAndrea Parri (Microsoft)
1687afaa33daSAndrea Parri (Microsoft) /* Currently set only for storvsc channels. */
1688afaa33daSAndrea Parri (Microsoft) if (channel->change_target_cpu_callback) {
1689afaa33daSAndrea Parri (Microsoft) (*channel->change_target_cpu_callback)(channel,
1690afaa33daSAndrea Parri (Microsoft) origin_cpu, target_cpu);
1691afaa33daSAndrea Parri (Microsoft) }
1692afaa33daSAndrea Parri (Microsoft)
169375278105SAndrea Parri (Microsoft) cpu_store_unlock:
169475278105SAndrea Parri (Microsoft) mutex_unlock(&vmbus_connection.channel_mutex);
169575278105SAndrea Parri (Microsoft) cpus_read_unlock();
169675278105SAndrea Parri (Microsoft) return ret;
169775278105SAndrea Parri (Microsoft) }
169875278105SAndrea Parri (Microsoft) static VMBUS_CHAN_ATTR(cpu, 0644, target_cpu_show, target_cpu_store);
1699c2e5df61SStephen Hemminger
channel_pending_show(struct vmbus_channel * channel,char * buf)170014948e39SKimberly Brown static ssize_t channel_pending_show(struct vmbus_channel *channel,
1701c2e5df61SStephen Hemminger char *buf)
1702c2e5df61SStephen Hemminger {
1703c2e5df61SStephen Hemminger return sprintf(buf, "%d\n",
1704c2e5df61SStephen Hemminger channel_pending(channel,
1705c2e5df61SStephen Hemminger vmbus_connection.monitor_pages[1]));
1706c2e5df61SStephen Hemminger }
1707f0434de4SMatheus Castello static VMBUS_CHAN_ATTR(pending, 0444, channel_pending_show, NULL);
1708c2e5df61SStephen Hemminger
channel_latency_show(struct vmbus_channel * channel,char * buf)170914948e39SKimberly Brown static ssize_t channel_latency_show(struct vmbus_channel *channel,
1710c2e5df61SStephen Hemminger char *buf)
1711c2e5df61SStephen Hemminger {
1712c2e5df61SStephen Hemminger return sprintf(buf, "%d\n",
1713c2e5df61SStephen Hemminger channel_latency(channel,
1714c2e5df61SStephen Hemminger vmbus_connection.monitor_pages[1]));
1715c2e5df61SStephen Hemminger }
1716f0434de4SMatheus Castello static VMBUS_CHAN_ATTR(latency, 0444, channel_latency_show, NULL);
1717c2e5df61SStephen Hemminger
channel_interrupts_show(struct vmbus_channel * channel,char * buf)171814948e39SKimberly Brown static ssize_t channel_interrupts_show(struct vmbus_channel *channel, char *buf)
17196981fbf3SStephen Hemminger {
17206981fbf3SStephen Hemminger return sprintf(buf, "%llu\n", channel->interrupts);
17216981fbf3SStephen Hemminger }
1722f0434de4SMatheus Castello static VMBUS_CHAN_ATTR(interrupts, 0444, channel_interrupts_show, NULL);
17236981fbf3SStephen Hemminger
channel_events_show(struct vmbus_channel * channel,char * buf)172414948e39SKimberly Brown static ssize_t channel_events_show(struct vmbus_channel *channel, char *buf)
17256981fbf3SStephen Hemminger {
17266981fbf3SStephen Hemminger return sprintf(buf, "%llu\n", channel->sig_events);
17276981fbf3SStephen Hemminger }
1728f0434de4SMatheus Castello static VMBUS_CHAN_ATTR(events, 0444, channel_events_show, NULL);
17296981fbf3SStephen Hemminger
channel_intr_in_full_show(struct vmbus_channel * channel,char * buf)173014948e39SKimberly Brown static ssize_t channel_intr_in_full_show(struct vmbus_channel *channel,
1731396ae57eSKimberly Brown char *buf)
1732396ae57eSKimberly Brown {
1733396ae57eSKimberly Brown return sprintf(buf, "%llu\n",
1734396ae57eSKimberly Brown (unsigned long long)channel->intr_in_full);
1735396ae57eSKimberly Brown }
1736396ae57eSKimberly Brown static VMBUS_CHAN_ATTR(intr_in_full, 0444, channel_intr_in_full_show, NULL);
1737396ae57eSKimberly Brown
channel_intr_out_empty_show(struct vmbus_channel * channel,char * buf)173814948e39SKimberly Brown static ssize_t channel_intr_out_empty_show(struct vmbus_channel *channel,
1739396ae57eSKimberly Brown char *buf)
1740396ae57eSKimberly Brown {
1741396ae57eSKimberly Brown return sprintf(buf, "%llu\n",
1742396ae57eSKimberly Brown (unsigned long long)channel->intr_out_empty);
1743396ae57eSKimberly Brown }
1744396ae57eSKimberly Brown static VMBUS_CHAN_ATTR(intr_out_empty, 0444, channel_intr_out_empty_show, NULL);
1745396ae57eSKimberly Brown
channel_out_full_first_show(struct vmbus_channel * channel,char * buf)174614948e39SKimberly Brown static ssize_t channel_out_full_first_show(struct vmbus_channel *channel,
1747396ae57eSKimberly Brown char *buf)
1748396ae57eSKimberly Brown {
1749396ae57eSKimberly Brown return sprintf(buf, "%llu\n",
1750396ae57eSKimberly Brown (unsigned long long)channel->out_full_first);
1751396ae57eSKimberly Brown }
1752396ae57eSKimberly Brown static VMBUS_CHAN_ATTR(out_full_first, 0444, channel_out_full_first_show, NULL);
1753396ae57eSKimberly Brown
channel_out_full_total_show(struct vmbus_channel * channel,char * buf)175414948e39SKimberly Brown static ssize_t channel_out_full_total_show(struct vmbus_channel *channel,
1755396ae57eSKimberly Brown char *buf)
1756396ae57eSKimberly Brown {
1757396ae57eSKimberly Brown return sprintf(buf, "%llu\n",
1758396ae57eSKimberly Brown (unsigned long long)channel->out_full_total);
1759396ae57eSKimberly Brown }
1760396ae57eSKimberly Brown static VMBUS_CHAN_ATTR(out_full_total, 0444, channel_out_full_total_show, NULL);
1761396ae57eSKimberly Brown
subchannel_monitor_id_show(struct vmbus_channel * channel,char * buf)176214948e39SKimberly Brown static ssize_t subchannel_monitor_id_show(struct vmbus_channel *channel,
1763f0fa2974SStephen Hemminger char *buf)
1764f0fa2974SStephen Hemminger {
1765f0fa2974SStephen Hemminger return sprintf(buf, "%u\n", channel->offermsg.monitorid);
1766f0fa2974SStephen Hemminger }
1767f0434de4SMatheus Castello static VMBUS_CHAN_ATTR(monitor_id, 0444, subchannel_monitor_id_show, NULL);
1768f0fa2974SStephen Hemminger
subchannel_id_show(struct vmbus_channel * channel,char * buf)176914948e39SKimberly Brown static ssize_t subchannel_id_show(struct vmbus_channel *channel,
1770f0fa2974SStephen Hemminger char *buf)
1771f0fa2974SStephen Hemminger {
1772f0fa2974SStephen Hemminger return sprintf(buf, "%u\n",
1773f0fa2974SStephen Hemminger channel->offermsg.offer.sub_channel_index);
1774f0fa2974SStephen Hemminger }
1775f0fa2974SStephen Hemminger static VMBUS_CHAN_ATTR_RO(subchannel_id);
1776f0fa2974SStephen Hemminger
1777c2e5df61SStephen Hemminger static struct attribute *vmbus_chan_attrs[] = {
1778c2e5df61SStephen Hemminger &chan_attr_out_mask.attr,
1779c2e5df61SStephen Hemminger &chan_attr_in_mask.attr,
1780c2e5df61SStephen Hemminger &chan_attr_read_avail.attr,
1781c2e5df61SStephen Hemminger &chan_attr_write_avail.attr,
1782c2e5df61SStephen Hemminger &chan_attr_cpu.attr,
1783c2e5df61SStephen Hemminger &chan_attr_pending.attr,
1784c2e5df61SStephen Hemminger &chan_attr_latency.attr,
17856981fbf3SStephen Hemminger &chan_attr_interrupts.attr,
17866981fbf3SStephen Hemminger &chan_attr_events.attr,
1787396ae57eSKimberly Brown &chan_attr_intr_in_full.attr,
1788396ae57eSKimberly Brown &chan_attr_intr_out_empty.attr,
1789396ae57eSKimberly Brown &chan_attr_out_full_first.attr,
1790396ae57eSKimberly Brown &chan_attr_out_full_total.attr,
1791f0fa2974SStephen Hemminger &chan_attr_monitor_id.attr,
1792f0fa2974SStephen Hemminger &chan_attr_subchannel_id.attr,
1793c2e5df61SStephen Hemminger NULL
1794c2e5df61SStephen Hemminger };
1795c2e5df61SStephen Hemminger
179646fc1548SKimberly Brown /*
179746fc1548SKimberly Brown * Channel-level attribute_group callback function. Returns the permission for
179846fc1548SKimberly Brown * each attribute, and returns 0 if an attribute is not visible.
179946fc1548SKimberly Brown */
vmbus_chan_attr_is_visible(struct kobject * kobj,struct attribute * attr,int idx)180046fc1548SKimberly Brown static umode_t vmbus_chan_attr_is_visible(struct kobject *kobj,
180146fc1548SKimberly Brown struct attribute *attr, int idx)
180246fc1548SKimberly Brown {
180346fc1548SKimberly Brown const struct vmbus_channel *channel =
180446fc1548SKimberly Brown container_of(kobj, struct vmbus_channel, kobj);
180546fc1548SKimberly Brown
180646fc1548SKimberly Brown /* Hide the monitor attributes if the monitor mechanism is not used. */
180746fc1548SKimberly Brown if (!channel->offermsg.monitor_allocated &&
180846fc1548SKimberly Brown (attr == &chan_attr_pending.attr ||
180946fc1548SKimberly Brown attr == &chan_attr_latency.attr ||
181046fc1548SKimberly Brown attr == &chan_attr_monitor_id.attr))
181146fc1548SKimberly Brown return 0;
181246fc1548SKimberly Brown
181346fc1548SKimberly Brown return attr->mode;
181446fc1548SKimberly Brown }
181546fc1548SKimberly Brown
181646fc1548SKimberly Brown static struct attribute_group vmbus_chan_group = {
181746fc1548SKimberly Brown .attrs = vmbus_chan_attrs,
181846fc1548SKimberly Brown .is_visible = vmbus_chan_attr_is_visible
181946fc1548SKimberly Brown };
182046fc1548SKimberly Brown
1821c2e5df61SStephen Hemminger static struct kobj_type vmbus_chan_ktype = {
1822c2e5df61SStephen Hemminger .sysfs_ops = &vmbus_chan_sysfs_ops,
1823c2e5df61SStephen Hemminger .release = vmbus_chan_release,
1824c2e5df61SStephen Hemminger };
1825c2e5df61SStephen Hemminger
1826c2e5df61SStephen Hemminger /*
1827c2e5df61SStephen Hemminger * vmbus_add_channel_kobj - setup a sub-directory under device/channels
1828c2e5df61SStephen Hemminger */
vmbus_add_channel_kobj(struct hv_device * dev,struct vmbus_channel * channel)1829c2e5df61SStephen Hemminger int vmbus_add_channel_kobj(struct hv_device *dev, struct vmbus_channel *channel)
1830c2e5df61SStephen Hemminger {
183146fc1548SKimberly Brown const struct device *device = &dev->device;
1832c2e5df61SStephen Hemminger struct kobject *kobj = &channel->kobj;
1833c2e5df61SStephen Hemminger u32 relid = channel->offermsg.child_relid;
1834c2e5df61SStephen Hemminger int ret;
1835c2e5df61SStephen Hemminger
1836c2e5df61SStephen Hemminger kobj->kset = dev->channels_kset;
1837c2e5df61SStephen Hemminger ret = kobject_init_and_add(kobj, &vmbus_chan_ktype, NULL,
1838c2e5df61SStephen Hemminger "%u", relid);
18398bc69f86SMiaoqian Lin if (ret) {
18408bc69f86SMiaoqian Lin kobject_put(kobj);
1841c2e5df61SStephen Hemminger return ret;
18428bc69f86SMiaoqian Lin }
1843c2e5df61SStephen Hemminger
184446fc1548SKimberly Brown ret = sysfs_create_group(kobj, &vmbus_chan_group);
184546fc1548SKimberly Brown
184646fc1548SKimberly Brown if (ret) {
184746fc1548SKimberly Brown /*
184846fc1548SKimberly Brown * The calling functions' error handling paths will cleanup the
184946fc1548SKimberly Brown * empty channel directory.
185046fc1548SKimberly Brown */
18518bc69f86SMiaoqian Lin kobject_put(kobj);
185246fc1548SKimberly Brown dev_err(device, "Unable to set up channel sysfs files\n");
185346fc1548SKimberly Brown return ret;
185446fc1548SKimberly Brown }
185546fc1548SKimberly Brown
1856c2e5df61SStephen Hemminger kobject_uevent(kobj, KOBJ_ADD);
1857c2e5df61SStephen Hemminger
1858c2e5df61SStephen Hemminger return 0;
1859c2e5df61SStephen Hemminger }
1860c2e5df61SStephen Hemminger
186146a97191SGreg Kroah-Hartman /*
186246fc1548SKimberly Brown * vmbus_remove_channel_attr_group - remove the channel's attribute group
186346fc1548SKimberly Brown */
vmbus_remove_channel_attr_group(struct vmbus_channel * channel)186446fc1548SKimberly Brown void vmbus_remove_channel_attr_group(struct vmbus_channel *channel)
186546fc1548SKimberly Brown {
186646fc1548SKimberly Brown sysfs_remove_group(&channel->kobj, &vmbus_chan_group);
186746fc1548SKimberly Brown }
186846fc1548SKimberly Brown
186946fc1548SKimberly Brown /*
187046a97191SGreg Kroah-Hartman * vmbus_device_create - Creates and registers a new child device
187146a97191SGreg Kroah-Hartman * on the vmbus.
187246a97191SGreg Kroah-Hartman */
vmbus_device_create(const guid_t * type,const guid_t * instance,struct vmbus_channel * channel)1873593db803SAndy Shevchenko struct hv_device *vmbus_device_create(const guid_t *type,
1874593db803SAndy Shevchenko const guid_t *instance,
187546a97191SGreg Kroah-Hartman struct vmbus_channel *channel)
187646a97191SGreg Kroah-Hartman {
187746a97191SGreg Kroah-Hartman struct hv_device *child_device_obj;
187846a97191SGreg Kroah-Hartman
187946a97191SGreg Kroah-Hartman child_device_obj = kzalloc(sizeof(struct hv_device), GFP_KERNEL);
188046a97191SGreg Kroah-Hartman if (!child_device_obj) {
188146a97191SGreg Kroah-Hartman pr_err("Unable to allocate device object for child device\n");
188246a97191SGreg Kroah-Hartman return NULL;
188346a97191SGreg Kroah-Hartman }
188446a97191SGreg Kroah-Hartman
188546a97191SGreg Kroah-Hartman child_device_obj->channel = channel;
1886593db803SAndy Shevchenko guid_copy(&child_device_obj->dev_type, type);
1887593db803SAndy Shevchenko guid_copy(&child_device_obj->dev_instance, instance);
1888a99aaf2eSEaswar Hariharan child_device_obj->vendor_id = PCI_VENDOR_ID_MICROSOFT;
188946a97191SGreg Kroah-Hartman
189046a97191SGreg Kroah-Hartman return child_device_obj;
189146a97191SGreg Kroah-Hartman }
189246a97191SGreg Kroah-Hartman
189346a97191SGreg Kroah-Hartman /*
189446a97191SGreg Kroah-Hartman * vmbus_device_register - Register the child device
189546a97191SGreg Kroah-Hartman */
vmbus_device_register(struct hv_device * child_device_obj)189646a97191SGreg Kroah-Hartman int vmbus_device_register(struct hv_device *child_device_obj)
189746a97191SGreg Kroah-Hartman {
1898c2e5df61SStephen Hemminger struct kobject *kobj = &child_device_obj->device.kobj;
1899c2e5df61SStephen Hemminger int ret;
190046a97191SGreg Kroah-Hartman
1901f6b2db08SStephen Hemminger dev_set_name(&child_device_obj->device, "%pUl",
1902458c4475SAndy Shevchenko &child_device_obj->channel->offermsg.offer.if_instance);
190346a97191SGreg Kroah-Hartman
190446a97191SGreg Kroah-Hartman child_device_obj->device.bus = &hv_bus;
19059c843423SSaurabh Sengar child_device_obj->device.parent = hv_dev;
190646a97191SGreg Kroah-Hartman child_device_obj->device.release = vmbus_device_release;
190746a97191SGreg Kroah-Hartman
19083a546958SAndrea Parri (Microsoft) child_device_obj->device.dma_parms = &child_device_obj->dma_parms;
19093a546958SAndrea Parri (Microsoft) child_device_obj->device.dma_mask = &child_device_obj->dma_mask;
19103a546958SAndrea Parri (Microsoft) dma_set_mask(&child_device_obj->device, DMA_BIT_MASK(64));
19113a546958SAndrea Parri (Microsoft)
191246a97191SGreg Kroah-Hartman /*
191346a97191SGreg Kroah-Hartman * Register with the LDM. This will kick off the driver/device
191446a97191SGreg Kroah-Hartman * binding...which will eventually call vmbus_match() and vmbus_probe()
191546a97191SGreg Kroah-Hartman */
191646a97191SGreg Kroah-Hartman ret = device_register(&child_device_obj->device);
1917c2e5df61SStephen Hemminger if (ret) {
191846a97191SGreg Kroah-Hartman pr_err("Unable to register child device\n");
191925c94b05SYang Yingliang put_device(&child_device_obj->device);
1920c2e5df61SStephen Hemminger return ret;
1921c2e5df61SStephen Hemminger }
192246a97191SGreg Kroah-Hartman
1923c2e5df61SStephen Hemminger child_device_obj->channels_kset = kset_create_and_add("channels",
1924c2e5df61SStephen Hemminger NULL, kobj);
1925c2e5df61SStephen Hemminger if (!child_device_obj->channels_kset) {
1926c2e5df61SStephen Hemminger ret = -ENOMEM;
1927c2e5df61SStephen Hemminger goto err_dev_unregister;
1928c2e5df61SStephen Hemminger }
1929c2e5df61SStephen Hemminger
1930c2e5df61SStephen Hemminger ret = vmbus_add_channel_kobj(child_device_obj,
1931c2e5df61SStephen Hemminger child_device_obj->channel);
1932c2e5df61SStephen Hemminger if (ret) {
1933c2e5df61SStephen Hemminger pr_err("Unable to register primary channeln");
1934c2e5df61SStephen Hemminger goto err_kset_unregister;
1935c2e5df61SStephen Hemminger }
1936af9ca6f9SBranden Bonaby hv_debug_add_dev_dir(child_device_obj);
1937c2e5df61SStephen Hemminger
1938c2e5df61SStephen Hemminger return 0;
1939c2e5df61SStephen Hemminger
1940c2e5df61SStephen Hemminger err_kset_unregister:
1941c2e5df61SStephen Hemminger kset_unregister(child_device_obj->channels_kset);
1942c2e5df61SStephen Hemminger
1943c2e5df61SStephen Hemminger err_dev_unregister:
1944c2e5df61SStephen Hemminger device_unregister(&child_device_obj->device);
194546a97191SGreg Kroah-Hartman return ret;
194646a97191SGreg Kroah-Hartman }
194746a97191SGreg Kroah-Hartman
194846a97191SGreg Kroah-Hartman /*
194946a97191SGreg Kroah-Hartman * vmbus_device_unregister - Remove the specified child device
195046a97191SGreg Kroah-Hartman * from the vmbus.
195146a97191SGreg Kroah-Hartman */
vmbus_device_unregister(struct hv_device * device_obj)195246a97191SGreg Kroah-Hartman void vmbus_device_unregister(struct hv_device *device_obj)
195346a97191SGreg Kroah-Hartman {
195484672369SFernando Soto pr_debug("child device %s unregistered\n",
195584672369SFernando Soto dev_name(&device_obj->device));
195684672369SFernando Soto
1957869b5567SDexuan Cui kset_unregister(device_obj->channels_kset);
1958869b5567SDexuan Cui
195946a97191SGreg Kroah-Hartman /*
196046a97191SGreg Kroah-Hartman * Kick off the process of unregistering the device.
196146a97191SGreg Kroah-Hartman * This will call vmbus_remove() and eventually vmbus_device_release()
196246a97191SGreg Kroah-Hartman */
196346a97191SGreg Kroah-Hartman device_unregister(&device_obj->device);
196446a97191SGreg Kroah-Hartman }
19656ed45748SNaman Jain EXPORT_SYMBOL_GPL(vmbus_device_unregister);
196646a97191SGreg Kroah-Hartman
1967f83705a5SSaurabh Sengar #ifdef CONFIG_ACPI
196846a97191SGreg Kroah-Hartman /*
19697f163a6fSJake Oshins * VMBUS is an acpi enumerated device. Get the information we
197090f34535SK. Y. Srinivasan * need from DSDT.
197146a97191SGreg Kroah-Hartman */
vmbus_walk_resources(struct acpi_resource * res,void * ctx)197290f34535SK. Y. Srinivasan static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *ctx)
197346a97191SGreg Kroah-Hartman {
19747f163a6fSJake Oshins resource_size_t start = 0;
19757f163a6fSJake Oshins resource_size_t end = 0;
19767f163a6fSJake Oshins struct resource *new_res;
19777f163a6fSJake Oshins struct resource **old_res = &hyperv_mmio;
19787f163a6fSJake Oshins struct resource **prev_res = NULL;
1979626b901fSMichael Kelley struct resource r;
19807f163a6fSJake Oshins
198190f34535SK. Y. Srinivasan switch (res->type) {
19827f163a6fSJake Oshins
19837f163a6fSJake Oshins /*
19847f163a6fSJake Oshins * "Address" descriptors are for bus windows. Ignore
19857f163a6fSJake Oshins * "memory" descriptors, which are for registers on
19867f163a6fSJake Oshins * devices.
19877f163a6fSJake Oshins */
19887f163a6fSJake Oshins case ACPI_RESOURCE_TYPE_ADDRESS32:
19897f163a6fSJake Oshins start = res->data.address32.address.minimum;
19907f163a6fSJake Oshins end = res->data.address32.address.maximum;
19914eb923f8SGerd Hoffmann break;
199246a97191SGreg Kroah-Hartman
199390f34535SK. Y. Srinivasan case ACPI_RESOURCE_TYPE_ADDRESS64:
19947f163a6fSJake Oshins start = res->data.address64.address.minimum;
19957f163a6fSJake Oshins end = res->data.address64.address.maximum;
19967f163a6fSJake Oshins break;
19977f163a6fSJake Oshins
1998626b901fSMichael Kelley /*
1999626b901fSMichael Kelley * The IRQ information is needed only on ARM64, which Hyper-V
2000626b901fSMichael Kelley * sets up in the extended format. IRQ information is present
2001626b901fSMichael Kelley * on x86/x64 in the non-extended format but it is not used by
2002626b901fSMichael Kelley * Linux. So don't bother checking for the non-extended format.
2003626b901fSMichael Kelley */
2004626b901fSMichael Kelley case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
2005626b901fSMichael Kelley if (!acpi_dev_resource_interrupt(res, 0, &r)) {
2006626b901fSMichael Kelley pr_err("Unable to parse Hyper-V ACPI interrupt\n");
2007626b901fSMichael Kelley return AE_ERROR;
2008626b901fSMichael Kelley }
2009626b901fSMichael Kelley /* ARM64 INTID for VMbus */
2010626b901fSMichael Kelley vmbus_interrupt = res->data.extended_irq.interrupts[0];
2011626b901fSMichael Kelley /* Linux IRQ number */
2012626b901fSMichael Kelley vmbus_irq = r.start;
2013626b901fSMichael Kelley return AE_OK;
2014626b901fSMichael Kelley
20157f163a6fSJake Oshins default:
20167f163a6fSJake Oshins /* Unused resource type */
20177f163a6fSJake Oshins return AE_OK;
20187f163a6fSJake Oshins
20197f163a6fSJake Oshins }
20207f163a6fSJake Oshins /*
20217f163a6fSJake Oshins * Ignore ranges that are below 1MB, as they're not
20227f163a6fSJake Oshins * necessary or useful here.
20237f163a6fSJake Oshins */
20247f163a6fSJake Oshins if (end < 0x100000)
20257f163a6fSJake Oshins return AE_OK;
20267f163a6fSJake Oshins
20277f163a6fSJake Oshins new_res = kzalloc(sizeof(*new_res), GFP_ATOMIC);
20287f163a6fSJake Oshins if (!new_res)
20297f163a6fSJake Oshins return AE_NO_MEMORY;
20307f163a6fSJake Oshins
20317f163a6fSJake Oshins /* If this range overlaps the virtual TPM, truncate it. */
20327f163a6fSJake Oshins if (end > VTPM_BASE_ADDRESS && start < VTPM_BASE_ADDRESS)
20337f163a6fSJake Oshins end = VTPM_BASE_ADDRESS;
20347f163a6fSJake Oshins
20357f163a6fSJake Oshins new_res->name = "hyperv mmio";
20367f163a6fSJake Oshins new_res->flags = IORESOURCE_MEM;
20377f163a6fSJake Oshins new_res->start = start;
20387f163a6fSJake Oshins new_res->end = end;
20397f163a6fSJake Oshins
204040f26f31SJake Oshins /*
204140f26f31SJake Oshins * If two ranges are adjacent, merge them.
204240f26f31SJake Oshins */
20437f163a6fSJake Oshins do {
20447f163a6fSJake Oshins if (!*old_res) {
20457f163a6fSJake Oshins *old_res = new_res;
20464eb923f8SGerd Hoffmann break;
204746a97191SGreg Kroah-Hartman }
204846a97191SGreg Kroah-Hartman
204940f26f31SJake Oshins if (((*old_res)->end + 1) == new_res->start) {
205040f26f31SJake Oshins (*old_res)->end = new_res->end;
205140f26f31SJake Oshins kfree(new_res);
205240f26f31SJake Oshins break;
205340f26f31SJake Oshins }
205440f26f31SJake Oshins
205540f26f31SJake Oshins if ((*old_res)->start == new_res->end + 1) {
205640f26f31SJake Oshins (*old_res)->start = new_res->start;
205740f26f31SJake Oshins kfree(new_res);
205840f26f31SJake Oshins break;
205940f26f31SJake Oshins }
206040f26f31SJake Oshins
206123a06831SJake Oshins if ((*old_res)->start > new_res->end) {
20627f163a6fSJake Oshins new_res->sibling = *old_res;
20637f163a6fSJake Oshins if (prev_res)
20647f163a6fSJake Oshins (*prev_res)->sibling = new_res;
20657f163a6fSJake Oshins *old_res = new_res;
20667f163a6fSJake Oshins break;
20677f163a6fSJake Oshins }
20687f163a6fSJake Oshins
20697f163a6fSJake Oshins prev_res = old_res;
20707f163a6fSJake Oshins old_res = &(*old_res)->sibling;
20717f163a6fSJake Oshins
20727f163a6fSJake Oshins } while (1);
20737f163a6fSJake Oshins
207446a97191SGreg Kroah-Hartman return AE_OK;
207546a97191SGreg Kroah-Hartman }
2076f83705a5SSaurabh Sengar #endif
207746a97191SGreg Kroah-Hartman
vmbus_mmio_remove(void)20789c843423SSaurabh Sengar static void vmbus_mmio_remove(void)
20797f163a6fSJake Oshins {
20807f163a6fSJake Oshins struct resource *cur_res;
20817f163a6fSJake Oshins struct resource *next_res;
20827f163a6fSJake Oshins
20837f163a6fSJake Oshins if (hyperv_mmio) {
20846d146aefSJake Oshins if (fb_mmio) {
20856d146aefSJake Oshins __release_region(hyperv_mmio, fb_mmio->start,
20866d146aefSJake Oshins resource_size(fb_mmio));
20876d146aefSJake Oshins fb_mmio = NULL;
20886d146aefSJake Oshins }
20896d146aefSJake Oshins
20907f163a6fSJake Oshins for (cur_res = hyperv_mmio; cur_res; cur_res = next_res) {
20917f163a6fSJake Oshins next_res = cur_res->sibling;
20927f163a6fSJake Oshins kfree(cur_res);
20937f163a6fSJake Oshins }
20947f163a6fSJake Oshins }
20957f163a6fSJake Oshins }
20967f163a6fSJake Oshins
vmbus_reserve_fb(void)2097f83705a5SSaurabh Sengar static void __maybe_unused vmbus_reserve_fb(void)
20986d146aefSJake Oshins {
20992a8a8afbSVitaly Kuznetsov resource_size_t start = 0, size;
21002a8a8afbSVitaly Kuznetsov struct pci_dev *pdev;
21012a8a8afbSVitaly Kuznetsov
21022a8a8afbSVitaly Kuznetsov if (efi_enabled(EFI_BOOT)) {
21032a8a8afbSVitaly Kuznetsov /* Gen2 VM: get FB base from EFI framebuffer */
21042a8a8afbSVitaly Kuznetsov start = screen_info.lfb_base;
21052a8a8afbSVitaly Kuznetsov size = max_t(__u32, screen_info.lfb_size, 0x800000);
21062a8a8afbSVitaly Kuznetsov } else {
21072a8a8afbSVitaly Kuznetsov /* Gen1 VM: get FB base from PCI */
21082a8a8afbSVitaly Kuznetsov pdev = pci_get_device(PCI_VENDOR_ID_MICROSOFT,
21092a8a8afbSVitaly Kuznetsov PCI_DEVICE_ID_HYPERV_VIDEO, NULL);
21102a8a8afbSVitaly Kuznetsov if (!pdev)
21112a8a8afbSVitaly Kuznetsov return;
21122a8a8afbSVitaly Kuznetsov
21132a8a8afbSVitaly Kuznetsov if (pdev->resource[0].flags & IORESOURCE_MEM) {
21142a8a8afbSVitaly Kuznetsov start = pci_resource_start(pdev, 0);
21152a8a8afbSVitaly Kuznetsov size = pci_resource_len(pdev, 0);
21162a8a8afbSVitaly Kuznetsov }
21172a8a8afbSVitaly Kuznetsov
21182a8a8afbSVitaly Kuznetsov /*
21192a8a8afbSVitaly Kuznetsov * Release the PCI device so hyperv_drm or hyperv_fb driver can
21202a8a8afbSVitaly Kuznetsov * grab it later.
21212a8a8afbSVitaly Kuznetsov */
21222a8a8afbSVitaly Kuznetsov pci_dev_put(pdev);
21232a8a8afbSVitaly Kuznetsov }
21242a8a8afbSVitaly Kuznetsov
21252a8a8afbSVitaly Kuznetsov if (!start)
21262a8a8afbSVitaly Kuznetsov return;
21272a8a8afbSVitaly Kuznetsov
21286d146aefSJake Oshins /*
21296d146aefSJake Oshins * Make a claim for the frame buffer in the resource tree under the
21306d146aefSJake Oshins * first node, which will be the one below 4GB. The length seems to
21316d146aefSJake Oshins * be underreported, particularly in a Generation 1 VM. So start out
21326d146aefSJake Oshins * reserving a larger area and make it smaller until it succeeds.
21336d146aefSJake Oshins */
21342a8a8afbSVitaly Kuznetsov for (; !fb_mmio && (size >= 0x100000); size >>= 1)
21352a8a8afbSVitaly Kuznetsov fb_mmio = __request_region(hyperv_mmio, start, size, fb_mmio_name, 0);
21366d146aefSJake Oshins }
21376d146aefSJake Oshins
213835464483SJake Oshins /**
213935464483SJake Oshins * vmbus_allocate_mmio() - Pick a memory-mapped I/O range.
214035464483SJake Oshins * @new: If successful, supplied a pointer to the
214135464483SJake Oshins * allocated MMIO space.
214235464483SJake Oshins * @device_obj: Identifies the caller
214335464483SJake Oshins * @min: Minimum guest physical address of the
214435464483SJake Oshins * allocation
214535464483SJake Oshins * @max: Maximum guest physical address
214635464483SJake Oshins * @size: Size of the range to be allocated
214735464483SJake Oshins * @align: Alignment of the range to be allocated
214835464483SJake Oshins * @fb_overlap_ok: Whether this allocation can be allowed
214935464483SJake Oshins * to overlap the video frame buffer.
215035464483SJake Oshins *
215135464483SJake Oshins * This function walks the resources granted to VMBus by the
215235464483SJake Oshins * _CRS object in the ACPI namespace underneath the parent
215335464483SJake Oshins * "bridge" whether that's a root PCI bus in the Generation 1
215435464483SJake Oshins * case or a Module Device in the Generation 2 case. It then
215535464483SJake Oshins * attempts to allocate from the global MMIO pool in a way that
215635464483SJake Oshins * matches the constraints supplied in these parameters and by
215735464483SJake Oshins * that _CRS.
215835464483SJake Oshins *
215935464483SJake Oshins * Return: 0 on success, -errno on failure
216035464483SJake Oshins */
vmbus_allocate_mmio(struct resource ** new,struct hv_device * device_obj,resource_size_t min,resource_size_t max,resource_size_t size,resource_size_t align,bool fb_overlap_ok)216135464483SJake Oshins int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
216235464483SJake Oshins resource_size_t min, resource_size_t max,
216335464483SJake Oshins resource_size_t size, resource_size_t align,
216435464483SJake Oshins bool fb_overlap_ok)
216535464483SJake Oshins {
2166be000f93SJake Oshins struct resource *iter, *shadow;
2167f0880e2cSVitaly Kuznetsov resource_size_t range_min, range_max, start, end;
216835464483SJake Oshins const char *dev_n = dev_name(&device_obj->device);
2169ea37a6b8SJake Oshins int retval;
2170e16dad6bSJake Oshins
2171e16dad6bSJake Oshins retval = -ENXIO;
21728aea7f82SDavidlohr Bueso mutex_lock(&hyperv_mmio_lock);
217335464483SJake Oshins
2174ea37a6b8SJake Oshins /*
2175ea37a6b8SJake Oshins * If overlaps with frame buffers are allowed, then first attempt to
2176ea37a6b8SJake Oshins * make the allocation from within the reserved region. Because it
2177ea37a6b8SJake Oshins * is already reserved, no shadow allocation is necessary.
2178ea37a6b8SJake Oshins */
2179ea37a6b8SJake Oshins if (fb_overlap_ok && fb_mmio && !(min > fb_mmio->end) &&
2180ea37a6b8SJake Oshins !(max < fb_mmio->start)) {
2181ea37a6b8SJake Oshins
2182ea37a6b8SJake Oshins range_min = fb_mmio->start;
2183ea37a6b8SJake Oshins range_max = fb_mmio->end;
2184ea37a6b8SJake Oshins start = (range_min + align - 1) & ~(align - 1);
2185ea37a6b8SJake Oshins for (; start + size - 1 <= range_max; start += align) {
2186ea37a6b8SJake Oshins *new = request_mem_region_exclusive(start, size, dev_n);
2187ea37a6b8SJake Oshins if (*new) {
2188ea37a6b8SJake Oshins retval = 0;
2189ea37a6b8SJake Oshins goto exit;
2190ea37a6b8SJake Oshins }
2191ea37a6b8SJake Oshins }
2192ea37a6b8SJake Oshins }
2193ea37a6b8SJake Oshins
219435464483SJake Oshins for (iter = hyperv_mmio; iter; iter = iter->sibling) {
219535464483SJake Oshins if ((iter->start >= max) || (iter->end <= min))
219635464483SJake Oshins continue;
219735464483SJake Oshins
219835464483SJake Oshins range_min = iter->start;
219935464483SJake Oshins range_max = iter->end;
2200ea37a6b8SJake Oshins start = (range_min + align - 1) & ~(align - 1);
2201ea37a6b8SJake Oshins for (; start + size - 1 <= range_max; start += align) {
2202f0880e2cSVitaly Kuznetsov end = start + size - 1;
2203f0880e2cSVitaly Kuznetsov
2204f0880e2cSVitaly Kuznetsov /* Skip the whole fb_mmio region if not fb_overlap_ok */
2205f0880e2cSVitaly Kuznetsov if (!fb_overlap_ok && fb_mmio &&
2206f0880e2cSVitaly Kuznetsov (((start >= fb_mmio->start) && (start <= fb_mmio->end)) ||
2207f0880e2cSVitaly Kuznetsov ((end >= fb_mmio->start) && (end <= fb_mmio->end))))
2208f0880e2cSVitaly Kuznetsov continue;
2209f0880e2cSVitaly Kuznetsov
2210ea37a6b8SJake Oshins shadow = __request_region(iter, start, size, NULL,
2211be000f93SJake Oshins IORESOURCE_BUSY);
2212be000f93SJake Oshins if (!shadow)
2213be000f93SJake Oshins continue;
2214be000f93SJake Oshins
2215ea37a6b8SJake Oshins *new = request_mem_region_exclusive(start, size, dev_n);
2216e16dad6bSJake Oshins if (*new) {
2217be000f93SJake Oshins shadow->name = (char *)*new;
2218e16dad6bSJake Oshins retval = 0;
2219e16dad6bSJake Oshins goto exit;
2220e16dad6bSJake Oshins }
2221be000f93SJake Oshins
2222be000f93SJake Oshins __release_region(iter, start, size);
222335464483SJake Oshins }
222435464483SJake Oshins }
222535464483SJake Oshins
2226e16dad6bSJake Oshins exit:
22278aea7f82SDavidlohr Bueso mutex_unlock(&hyperv_mmio_lock);
2228e16dad6bSJake Oshins return retval;
222935464483SJake Oshins }
223035464483SJake Oshins EXPORT_SYMBOL_GPL(vmbus_allocate_mmio);
223135464483SJake Oshins
2232619848bdSJake Oshins /**
223397fb77dcSJake Oshins * vmbus_free_mmio() - Free a memory-mapped I/O range.
223497fb77dcSJake Oshins * @start: Base address of region to release.
223597fb77dcSJake Oshins * @size: Size of the range to be allocated
223697fb77dcSJake Oshins *
223797fb77dcSJake Oshins * This function releases anything requested by
223897fb77dcSJake Oshins * vmbus_mmio_allocate().
223997fb77dcSJake Oshins */
vmbus_free_mmio(resource_size_t start,resource_size_t size)224097fb77dcSJake Oshins void vmbus_free_mmio(resource_size_t start, resource_size_t size)
224197fb77dcSJake Oshins {
2242be000f93SJake Oshins struct resource *iter;
2243be000f93SJake Oshins
22448aea7f82SDavidlohr Bueso mutex_lock(&hyperv_mmio_lock);
2245*466ae740SMichael Kelley
2246*466ae740SMichael Kelley /*
2247*466ae740SMichael Kelley * If all bytes of the MMIO range to be released are within the
2248*466ae740SMichael Kelley * special case fb_mmio shadow region, skip releasing the shadow
2249*466ae740SMichael Kelley * region since no corresponding __request_region() was done
2250*466ae740SMichael Kelley * in vmbus_allocate_mmio().
2251*466ae740SMichael Kelley */
2252*466ae740SMichael Kelley if (fb_mmio && start >= fb_mmio->start &&
2253*466ae740SMichael Kelley (start + size - 1 <= fb_mmio->end))
2254*466ae740SMichael Kelley goto skip_shadow_release;
2255*466ae740SMichael Kelley
2256be000f93SJake Oshins for (iter = hyperv_mmio; iter; iter = iter->sibling) {
2257be000f93SJake Oshins if ((iter->start >= start + size) || (iter->end <= start))
2258be000f93SJake Oshins continue;
2259be000f93SJake Oshins
2260be000f93SJake Oshins __release_region(iter, start, size);
2261be000f93SJake Oshins }
2262*466ae740SMichael Kelley
2263*466ae740SMichael Kelley skip_shadow_release:
226497fb77dcSJake Oshins release_mem_region(start, size);
22658aea7f82SDavidlohr Bueso mutex_unlock(&hyperv_mmio_lock);
226697fb77dcSJake Oshins
226797fb77dcSJake Oshins }
226897fb77dcSJake Oshins EXPORT_SYMBOL_GPL(vmbus_free_mmio);
226997fb77dcSJake Oshins
2270f83705a5SSaurabh Sengar #ifdef CONFIG_ACPI
22719c843423SSaurabh Sengar static int vmbus_acpi_add(struct platform_device *pdev)
227246a97191SGreg Kroah-Hartman {
227346a97191SGreg Kroah-Hartman acpi_status result;
227490f34535SK. Y. Srinivasan int ret_val = -ENODEV;
22757f163a6fSJake Oshins struct acpi_device *ancestor;
22769c843423SSaurabh Sengar struct acpi_device *device = ACPI_COMPANION(&pdev->dev);
227746a97191SGreg Kroah-Hartman
22789c843423SSaurabh Sengar hv_dev = &device->dev;
227946a97191SGreg Kroah-Hartman
228037200078SMichael Kelley /*
228137200078SMichael Kelley * Older versions of Hyper-V for ARM64 fail to include the _CCA
228237200078SMichael Kelley * method on the top level VMbus device in the DSDT. But devices
228337200078SMichael Kelley * are hardware coherent in all current Hyper-V use cases, so fix
228437200078SMichael Kelley * up the ACPI device to behave as if _CCA is present and indicates
228537200078SMichael Kelley * hardware coherence.
228637200078SMichael Kelley */
228737200078SMichael Kelley ACPI_COMPANION_SET(&device->dev, device);
228837200078SMichael Kelley if (IS_ENABLED(CONFIG_ACPI_CCA_REQUIRED) &&
228937200078SMichael Kelley device_get_dma_attr(&device->dev) == DEV_DMA_NOT_SUPPORTED) {
229037200078SMichael Kelley pr_info("No ACPI _CCA found; assuming coherent device I/O\n");
229137200078SMichael Kelley device->flags.cca_seen = true;
229237200078SMichael Kelley device->flags.coherent_dma = true;
229337200078SMichael Kelley }
229437200078SMichael Kelley
229546a97191SGreg Kroah-Hartman result = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
229690f34535SK. Y. Srinivasan vmbus_walk_resources, NULL);
229746a97191SGreg Kroah-Hartman
229890f34535SK. Y. Srinivasan if (ACPI_FAILURE(result))
229990f34535SK. Y. Srinivasan goto acpi_walk_err;
230090f34535SK. Y. Srinivasan /*
23017f163a6fSJake Oshins * Some ancestor of the vmbus acpi device (Gen1 or Gen2
23027f163a6fSJake Oshins * firmware) is the VMOD that has the mmio ranges. Get that.
230390f34535SK. Y. Srinivasan */
230478e04bbfSMaciej S. Szmigiero for (ancestor = acpi_dev_parent(device);
230578e04bbfSMaciej S. Szmigiero ancestor && ancestor->handle != ACPI_ROOT_OBJECT;
230662fcb99bSRafael J. Wysocki ancestor = acpi_dev_parent(ancestor)) {
23077f163a6fSJake Oshins result = acpi_walk_resources(ancestor->handle, METHOD_NAME__CRS,
230890f34535SK. Y. Srinivasan vmbus_walk_resources, NULL);
230990f34535SK. Y. Srinivasan
231090f34535SK. Y. Srinivasan if (ACPI_FAILURE(result))
23117f163a6fSJake Oshins continue;
vmbus_acpi_add(struct platform_device * pdev)23126d146aefSJake Oshins if (hyperv_mmio) {
23136d146aefSJake Oshins vmbus_reserve_fb();
23147f163a6fSJake Oshins break;
231546a97191SGreg Kroah-Hartman }
23166d146aefSJake Oshins }
231790f34535SK. Y. Srinivasan ret_val = 0;
vmbus_device_add(struct platform_device * pdev)231890f34535SK. Y. Srinivasan
231990f34535SK. Y. Srinivasan acpi_walk_err:
23207f163a6fSJake Oshins if (ret_val)
23219c843423SSaurabh Sengar vmbus_mmio_remove();
232290f34535SK. Y. Srinivasan return ret_val;
232346a97191SGreg Kroah-Hartman }
2324f83705a5SSaurabh Sengar #else
2325f83705a5SSaurabh Sengar static int vmbus_acpi_add(struct platform_device *pdev)
2326f83705a5SSaurabh Sengar {
2327f83705a5SSaurabh Sengar return 0;
2328f83705a5SSaurabh Sengar }
2329f83705a5SSaurabh Sengar #endif
2330f83705a5SSaurabh Sengar
2331f83705a5SSaurabh Sengar static int vmbus_device_add(struct platform_device *pdev)
2332f83705a5SSaurabh Sengar {
2333f83705a5SSaurabh Sengar struct resource **cur_res = &hyperv_mmio;
2334f83705a5SSaurabh Sengar struct of_range range;
2335f83705a5SSaurabh Sengar struct of_range_parser parser;
2336f83705a5SSaurabh Sengar struct device_node *np = pdev->dev.of_node;
2337f83705a5SSaurabh Sengar int ret;
2338f83705a5SSaurabh Sengar
2339f83705a5SSaurabh Sengar hv_dev = &pdev->dev;
2340f83705a5SSaurabh Sengar
2341f83705a5SSaurabh Sengar ret = of_range_parser_init(&parser, np);
2342f83705a5SSaurabh Sengar if (ret)
2343f83705a5SSaurabh Sengar return ret;
2344f83705a5SSaurabh Sengar
2345f83705a5SSaurabh Sengar for_each_of_range(&parser, &range) {
2346f83705a5SSaurabh Sengar struct resource *res;
2347f83705a5SSaurabh Sengar
2348f83705a5SSaurabh Sengar res = kzalloc(sizeof(*res), GFP_KERNEL);
2349f83705a5SSaurabh Sengar if (!res) {
2350f83705a5SSaurabh Sengar vmbus_mmio_remove();
2351f83705a5SSaurabh Sengar return -ENOMEM;
2352f83705a5SSaurabh Sengar }
vmbus_platform_driver_probe(struct platform_device * pdev)2353f83705a5SSaurabh Sengar
2354f83705a5SSaurabh Sengar res->name = "hyperv mmio";
2355f83705a5SSaurabh Sengar res->flags = range.flags;
2356f83705a5SSaurabh Sengar res->start = range.cpu_addr;
2357f83705a5SSaurabh Sengar res->end = range.cpu_addr + range.size;
2358f83705a5SSaurabh Sengar
2359f83705a5SSaurabh Sengar *cur_res = res;
2360f83705a5SSaurabh Sengar cur_res = &res->sibling;
vmbus_platform_driver_remove(struct platform_device * pdev)2361f83705a5SSaurabh Sengar }
2362f83705a5SSaurabh Sengar
2363f83705a5SSaurabh Sengar return ret;
2364f83705a5SSaurabh Sengar }
236546a97191SGreg Kroah-Hartman
23669c843423SSaurabh Sengar static int vmbus_platform_driver_probe(struct platform_device *pdev)
23679c843423SSaurabh Sengar {
vmbus_bus_suspend(struct device * dev)2368f83705a5SSaurabh Sengar if (acpi_disabled)
2369f83705a5SSaurabh Sengar return vmbus_device_add(pdev);
2370f83705a5SSaurabh Sengar else
23719c843423SSaurabh Sengar return vmbus_acpi_add(pdev);
23729c843423SSaurabh Sengar }
23739c843423SSaurabh Sengar
23749c843423SSaurabh Sengar static int vmbus_platform_driver_remove(struct platform_device *pdev)
23759c843423SSaurabh Sengar {
23769c843423SSaurabh Sengar vmbus_mmio_remove();
23779c843423SSaurabh Sengar return 0;
23789c843423SSaurabh Sengar }
237946a97191SGreg Kroah-Hartman
238083b50f83SDexuan Cui #ifdef CONFIG_PM_SLEEP
2381f53335e3SDexuan Cui static int vmbus_bus_suspend(struct device *dev)
2382f53335e3SDexuan Cui {
238352be9355SShradha Gupta struct hv_per_cpu_context *hv_cpu = per_cpu_ptr(
238452be9355SShradha Gupta hv_context.cpu_context, VMBUS_CONNECT_CPU);
2385b307b389SDexuan Cui struct vmbus_channel *channel, *sc;
23861f48dcf1SDexuan Cui
238752be9355SShradha Gupta tasklet_disable(&hv_cpu->msg_dpc);
238852be9355SShradha Gupta vmbus_connection.ignore_any_offer_msg = true;
238952be9355SShradha Gupta /* The tasklet_enable() takes care of providing a memory barrier */
239052be9355SShradha Gupta tasklet_enable(&hv_cpu->msg_dpc);
239152be9355SShradha Gupta
239252be9355SShradha Gupta /* Drain all the workqueues as we are in suspend */
239352be9355SShradha Gupta drain_workqueue(vmbus_connection.rescind_work_queue);
239452be9355SShradha Gupta drain_workqueue(vmbus_connection.work_queue);
239552be9355SShradha Gupta drain_workqueue(vmbus_connection.handle_primary_chan_wq);
239652be9355SShradha Gupta drain_workqueue(vmbus_connection.handle_sub_chan_wq);
23971f48dcf1SDexuan Cui
23981f48dcf1SDexuan Cui mutex_lock(&vmbus_connection.channel_mutex);
23991f48dcf1SDexuan Cui list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
24001f48dcf1SDexuan Cui if (!is_hvsock_channel(channel))
24011f48dcf1SDexuan Cui continue;
24021f48dcf1SDexuan Cui
24031f48dcf1SDexuan Cui vmbus_force_channel_rescinded(channel);
24041f48dcf1SDexuan Cui }
24051f48dcf1SDexuan Cui mutex_unlock(&vmbus_connection.channel_mutex);
24061f48dcf1SDexuan Cui
2407b307b389SDexuan Cui /*
2408b307b389SDexuan Cui * Wait until all the sub-channels and hv_sock channels have been
2409b307b389SDexuan Cui * cleaned up. Sub-channels should be destroyed upon suspend, otherwise
2410b307b389SDexuan Cui * they would conflict with the new sub-channels that will be created
2411b307b389SDexuan Cui * in the resume path. hv_sock channels should also be destroyed, but
2412b307b389SDexuan Cui * a hv_sock channel of an established hv_sock connection can not be
2413b307b389SDexuan Cui * really destroyed since it may still be referenced by the userspace
2414b307b389SDexuan Cui * application, so we just force the hv_sock channel to be rescinded
2415b307b389SDexuan Cui * by vmbus_force_channel_rescinded(), and the userspace application
2416b307b389SDexuan Cui * will thoroughly destroy the channel after hibernation.
2417b307b389SDexuan Cui *
2418b307b389SDexuan Cui * Note: the counter nr_chan_close_on_suspend may never go above 0 if
2419b307b389SDexuan Cui * the VM has no sub-channel and hv_sock channel, e.g. a 1-vCPU VM.
2420b307b389SDexuan Cui */
2421b307b389SDexuan Cui if (atomic_read(&vmbus_connection.nr_chan_close_on_suspend) > 0)
2422b307b389SDexuan Cui wait_for_completion(&vmbus_connection.ready_for_suspend_event);
2423b307b389SDexuan Cui
242419873eecSDexuan Cui if (atomic_read(&vmbus_connection.nr_chan_fixup_on_resume) != 0) {
242519873eecSDexuan Cui pr_err("Can not suspend due to a previous failed resuming\n");
242619873eecSDexuan Cui return -EBUSY;
242719873eecSDexuan Cui }
2428d8bd2d44SDexuan Cui
2429b307b389SDexuan Cui mutex_lock(&vmbus_connection.channel_mutex);
2430b307b389SDexuan Cui
2431b307b389SDexuan Cui list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
2432d8bd2d44SDexuan Cui /*
24338b6a877cSAndrea Parri (Microsoft) * Remove the channel from the array of channels and invalidate
24348b6a877cSAndrea Parri (Microsoft) * the channel's relid. Upon resume, vmbus_onoffer() will fix
24358b6a877cSAndrea Parri (Microsoft) * up the relid (and other fields, if necessary) and add the
24368b6a877cSAndrea Parri (Microsoft) * channel back to the array.
2437d8bd2d44SDexuan Cui */
24388b6a877cSAndrea Parri (Microsoft) vmbus_channel_unmap_relid(channel);
2439d8bd2d44SDexuan Cui channel->offermsg.child_relid = INVALID_RELID;
2440d8bd2d44SDexuan Cui
2441b307b389SDexuan Cui if (is_hvsock_channel(channel)) {
2442b307b389SDexuan Cui if (!channel->rescind) {
2443b307b389SDexuan Cui pr_err("hv_sock channel not rescinded!\n");
2444b307b389SDexuan Cui WARN_ON_ONCE(1);
2445b307b389SDexuan Cui }
2446b307b389SDexuan Cui continue;
2447b307b389SDexuan Cui }
2448b307b389SDexuan Cui
2449b307b389SDexuan Cui list_for_each_entry(sc, &channel->sc_list, sc_list) {
2450b307b389SDexuan Cui pr_err("Sub-channel not deleted!\n");
2451b307b389SDexuan Cui WARN_ON_ONCE(1);
2452b307b389SDexuan Cui }
2453d8bd2d44SDexuan Cui
vmbus_bus_resume(struct device * dev)2454d8bd2d44SDexuan Cui atomic_inc(&vmbus_connection.nr_chan_fixup_on_resume);
2455b307b389SDexuan Cui }
2456b307b389SDexuan Cui
2457b307b389SDexuan Cui mutex_unlock(&vmbus_connection.channel_mutex);
2458b307b389SDexuan Cui
2459f53335e3SDexuan Cui vmbus_initiate_unload(false);
2460f53335e3SDexuan Cui
2461d8bd2d44SDexuan Cui /* Reset the event for the next resume. */
2462d8bd2d44SDexuan Cui reinit_completion(&vmbus_connection.ready_for_resume_event);
2463d8bd2d44SDexuan Cui
2464f53335e3SDexuan Cui return 0;
2465f53335e3SDexuan Cui }
2466f53335e3SDexuan Cui
2467f53335e3SDexuan Cui static int vmbus_bus_resume(struct device *dev)
2468f53335e3SDexuan Cui {
2469f53335e3SDexuan Cui struct vmbus_channel_msginfo *msginfo;
2470f53335e3SDexuan Cui size_t msgsize;
2471f53335e3SDexuan Cui int ret;
2472f53335e3SDexuan Cui
247352be9355SShradha Gupta vmbus_connection.ignore_any_offer_msg = false;
247452be9355SShradha Gupta
2475f53335e3SDexuan Cui /*
2476f53335e3SDexuan Cui * We only use the 'vmbus_proto_version', which was in use before
2477f53335e3SDexuan Cui * hibernation, to re-negotiate with the host.
2478f53335e3SDexuan Cui */
2479bedc61a9SAndrea Parri if (!vmbus_proto_version) {
2480f53335e3SDexuan Cui pr_err("Invalid proto version = 0x%x\n", vmbus_proto_version);
2481f53335e3SDexuan Cui return -EINVAL;
2482f53335e3SDexuan Cui }
2483f53335e3SDexuan Cui
2484f53335e3SDexuan Cui msgsize = sizeof(*msginfo) +
2485f53335e3SDexuan Cui sizeof(struct vmbus_channel_initiate_contact);
2486f53335e3SDexuan Cui
2487f53335e3SDexuan Cui msginfo = kzalloc(msgsize, GFP_KERNEL);
2488f53335e3SDexuan Cui
2489f53335e3SDexuan Cui if (msginfo == NULL)
2490f53335e3SDexuan Cui return -ENOMEM;
2491f53335e3SDexuan Cui
2492f53335e3SDexuan Cui ret = vmbus_negotiate_version(msginfo, vmbus_proto_version);
2493f53335e3SDexuan Cui
2494f53335e3SDexuan Cui kfree(msginfo);
2495f53335e3SDexuan Cui
2496f53335e3SDexuan Cui if (ret != 0)
2497f53335e3SDexuan Cui return ret;
2498f53335e3SDexuan Cui
2499d8bd2d44SDexuan Cui WARN_ON(atomic_read(&vmbus_connection.nr_chan_fixup_on_resume) == 0);
2500d8bd2d44SDexuan Cui
2501f53335e3SDexuan Cui vmbus_request_offers();
2502f53335e3SDexuan Cui
250319873eecSDexuan Cui if (wait_for_completion_timeout(
250419873eecSDexuan Cui &vmbus_connection.ready_for_resume_event, 10 * HZ) == 0)
250519873eecSDexuan Cui pr_err("Some vmbus device is missing after suspending?\n");
2506d8bd2d44SDexuan Cui
2507b307b389SDexuan Cui /* Reset the event for the next suspend. */
2508b307b389SDexuan Cui reinit_completion(&vmbus_connection.ready_for_suspend_event);
2509b307b389SDexuan Cui
2510f53335e3SDexuan Cui return 0;
2511f53335e3SDexuan Cui }
25121a06d017SDexuan Cui #else
25131a06d017SDexuan Cui #define vmbus_bus_suspend NULL
25141a06d017SDexuan Cui #define vmbus_bus_resume NULL
251583b50f83SDexuan Cui #endif /* CONFIG_PM_SLEEP */
2516f53335e3SDexuan Cui
2517f83705a5SSaurabh Sengar static const __maybe_unused struct of_device_id vmbus_of_match[] = {
2518f83705a5SSaurabh Sengar {
2519f83705a5SSaurabh Sengar .compatible = "microsoft,vmbus",
2520f83705a5SSaurabh Sengar },
2521f83705a5SSaurabh Sengar {
2522f83705a5SSaurabh Sengar /* sentinel */
2523f83705a5SSaurabh Sengar },
2524f83705a5SSaurabh Sengar };
2525f83705a5SSaurabh Sengar MODULE_DEVICE_TABLE(of, vmbus_of_match);
2526f83705a5SSaurabh Sengar
2527f83705a5SSaurabh Sengar static const __maybe_unused struct acpi_device_id vmbus_acpi_device_ids[] = {
252846a97191SGreg Kroah-Hartman {"VMBUS", 0},
252946a97191SGreg Kroah-Hartman {"VMBus", 0},
253046a97191SGreg Kroah-Hartman {"", 0},
253146a97191SGreg Kroah-Hartman };
253246a97191SGreg Kroah-Hartman MODULE_DEVICE_TABLE(acpi, vmbus_acpi_device_ids);
253346a97191SGreg Kroah-Hartman
2534f53335e3SDexuan Cui /*
25351a06d017SDexuan Cui * Note: we must use the "no_irq" ops, otherwise hibernation can not work with
25361a06d017SDexuan Cui * PCI device assignment, because "pci_dev_pm_ops" uses the "noirq" ops: in
25371a06d017SDexuan Cui * the resume path, the pci "noirq" restore op runs before "non-noirq" op (see
2538f53335e3SDexuan Cui * resume_target_kernel() -> dpm_resume_start(), and hibernation_restore() ->
2539f53335e3SDexuan Cui * dpm_resume_end()). This means vmbus_bus_resume() and the pci-hyperv's
25401a06d017SDexuan Cui * resume callback must also run via the "noirq" ops.
25411a06d017SDexuan Cui *
25421a06d017SDexuan Cui * Set suspend_noirq/resume_noirq to NULL for Suspend-to-Idle: see the comment
25431a06d017SDexuan Cui * earlier in this file before vmbus_pm.
2544f53335e3SDexuan Cui */
25451a06d017SDexuan Cui
2546f53335e3SDexuan Cui static const struct dev_pm_ops vmbus_bus_pm = {
25471a06d017SDexuan Cui .suspend_noirq = NULL,
25481a06d017SDexuan Cui .resume_noirq = NULL,
25491a06d017SDexuan Cui .freeze_noirq = vmbus_bus_suspend,
25501a06d017SDexuan Cui .thaw_noirq = vmbus_bus_resume,
25511a06d017SDexuan Cui .poweroff_noirq = vmbus_bus_suspend,
25521a06d017SDexuan Cui .restore_noirq = vmbus_bus_resume
2553f53335e3SDexuan Cui };
hv_kexec_handler(void)2554f53335e3SDexuan Cui
25559c843423SSaurabh Sengar static struct platform_driver vmbus_platform_driver = {
25569c843423SSaurabh Sengar .probe = vmbus_platform_driver_probe,
25579c843423SSaurabh Sengar .remove = vmbus_platform_driver_remove,
25589c843423SSaurabh Sengar .driver = {
255946a97191SGreg Kroah-Hartman .name = "vmbus",
25609c843423SSaurabh Sengar .acpi_match_table = ACPI_PTR(vmbus_acpi_device_ids),
2561f83705a5SSaurabh Sengar .of_match_table = of_match_ptr(vmbus_of_match),
25629c843423SSaurabh Sengar .pm = &vmbus_bus_pm,
25639c843423SSaurabh Sengar .probe_type = PROBE_FORCE_SYNCHRONOUS,
25649c843423SSaurabh Sengar }
256546a97191SGreg Kroah-Hartman };
256646a97191SGreg Kroah-Hartman
25672517281dSVitaly Kuznetsov static void hv_kexec_handler(void)
25682517281dSVitaly Kuznetsov {
2569fd1fea68SMichael Kelley hv_stimer_global_cleanup();
257075ff3a8aSVitaly Kuznetsov vmbus_initiate_unload(false);
2571523b9408SVitaly Kuznetsov /* Make sure conn_state is set as hv_synic_cleanup checks for it */
2572523b9408SVitaly Kuznetsov mb();
257376d36ab7SVitaly Kuznetsov cpuhp_remove_state(hyperv_cpuhp_online);
25742517281dSVitaly Kuznetsov };
25752517281dSVitaly Kuznetsov
2576b4370df2SVitaly Kuznetsov static void hv_crash_handler(struct pt_regs *regs)
2577b4370df2SVitaly Kuznetsov {
hv_synic_suspend(void)2578fd1fea68SMichael Kelley int cpu;
2579fd1fea68SMichael Kelley
258075ff3a8aSVitaly Kuznetsov vmbus_initiate_unload(true);
2581b4370df2SVitaly Kuznetsov /*
2582b4370df2SVitaly Kuznetsov * In crash handler we can't schedule synic cleanup for all CPUs,
2583b4370df2SVitaly Kuznetsov * doing the cleanup for current CPU only. This should be sufficient
2584b4370df2SVitaly Kuznetsov * for kdump.
2585b4370df2SVitaly Kuznetsov */
2586fd1fea68SMichael Kelley cpu = smp_processor_id();
2587fd1fea68SMichael Kelley hv_stimer_cleanup(cpu);
25887a1323b5SMichael Kelley hv_synic_disable_regs(cpu);
2589b4370df2SVitaly Kuznetsov };
2590b4370df2SVitaly Kuznetsov
259163ecc6d2SDexuan Cui static int hv_synic_suspend(void)
259263ecc6d2SDexuan Cui {
259363ecc6d2SDexuan Cui /*
25944df4cb9eSMichael Kelley * When we reach here, all the non-boot CPUs have been offlined.
25954df4cb9eSMichael Kelley * If we're in a legacy configuration where stimer Direct Mode is
25964df4cb9eSMichael Kelley * not enabled, the stimers on the non-boot CPUs have been unbound
25974df4cb9eSMichael Kelley * in hv_synic_cleanup() -> hv_stimer_legacy_cleanup() ->
259863ecc6d2SDexuan Cui * hv_stimer_cleanup() -> clockevents_unbind_device().
259963ecc6d2SDexuan Cui *
26004df4cb9eSMichael Kelley * hv_synic_suspend() only runs on CPU0 with interrupts disabled.
26014df4cb9eSMichael Kelley * Here we do not call hv_stimer_legacy_cleanup() on CPU0 because:
26024df4cb9eSMichael Kelley * 1) it's unnecessary as interrupts remain disabled between
26034df4cb9eSMichael Kelley * syscore_suspend() and syscore_resume(): see create_image() and
26044df4cb9eSMichael Kelley * resume_target_kernel()
hv_synic_resume(void)260563ecc6d2SDexuan Cui * 2) the stimer on CPU0 is automatically disabled later by
260663ecc6d2SDexuan Cui * syscore_suspend() -> timekeeping_suspend() -> tick_suspend() -> ...
26074df4cb9eSMichael Kelley * -> clockevents_shutdown() -> ... -> hv_ce_shutdown()
26084df4cb9eSMichael Kelley * 3) a warning would be triggered if we call
26094df4cb9eSMichael Kelley * clockevents_unbind_device(), which may sleep, in an
26104df4cb9eSMichael Kelley * interrupts-disabled context.
261163ecc6d2SDexuan Cui */
261263ecc6d2SDexuan Cui
261363ecc6d2SDexuan Cui hv_synic_disable_regs(0);
261463ecc6d2SDexuan Cui
261563ecc6d2SDexuan Cui return 0;
261663ecc6d2SDexuan Cui }
261763ecc6d2SDexuan Cui
261863ecc6d2SDexuan Cui static void hv_synic_resume(void)
261963ecc6d2SDexuan Cui {
262063ecc6d2SDexuan Cui hv_synic_enable_regs(0);
262163ecc6d2SDexuan Cui
hv_acpi_init(void)262263ecc6d2SDexuan Cui /*
262363ecc6d2SDexuan Cui * Note: we don't need to call hv_stimer_init(0), because the timer
262463ecc6d2SDexuan Cui * on CPU0 is not unbound in hv_synic_suspend(), and the timer is
262563ecc6d2SDexuan Cui * automatically re-enabled in timekeeping_resume().
262663ecc6d2SDexuan Cui */
262763ecc6d2SDexuan Cui }
262863ecc6d2SDexuan Cui
262963ecc6d2SDexuan Cui /* The callbacks run only on CPU0, with irqs_disabled. */
263063ecc6d2SDexuan Cui static struct syscore_ops hv_synic_syscore_ops = {
263163ecc6d2SDexuan Cui .suspend = hv_synic_suspend,
263263ecc6d2SDexuan Cui .resume = hv_synic_resume,
263363ecc6d2SDexuan Cui };
263463ecc6d2SDexuan Cui
263546a97191SGreg Kroah-Hartman static int __init hv_acpi_init(void)
263646a97191SGreg Kroah-Hartman {
2637f7ac541eSStanislav Kinsburskiy int ret;
263846a97191SGreg Kroah-Hartman
26394a5f3cdeSMichael Kelley if (!hv_is_hyperv_initialized())
26400592969eSJason Wang return -ENODEV;
26410592969eSJason Wang
26428536290fSJinank Jain if (hv_root_partition && !hv_nested)
26437e279d78SWei Liu return 0;
26447e279d78SWei Liu
264546a97191SGreg Kroah-Hartman /*
2646efc26722SK. Y. Srinivasan * Get ACPI resources first.
264746a97191SGreg Kroah-Hartman */
26489c843423SSaurabh Sengar ret = platform_driver_register(&vmbus_platform_driver);
264946a97191SGreg Kroah-Hartman if (ret)
265046a97191SGreg Kroah-Hartman return ret;
265146a97191SGreg Kroah-Hartman
26529c843423SSaurabh Sengar if (!hv_dev) {
2653f7ac541eSStanislav Kinsburskiy ret = -ENODEV;
265446a97191SGreg Kroah-Hartman goto cleanup;
265546a97191SGreg Kroah-Hartman }
2656d608715dSMichael Kelley
2657d608715dSMichael Kelley /*
2658d608715dSMichael Kelley * If we're on an architecture with a hardcoded hypervisor
2659d608715dSMichael Kelley * vector (i.e. x86/x64), override the VMbus interrupt found
2660d608715dSMichael Kelley * in the ACPI tables. Ensure vmbus_irq is not set since the
2661d608715dSMichael Kelley * normal Linux IRQ mechanism is not used in this case.
2662d608715dSMichael Kelley */
2663d608715dSMichael Kelley #ifdef HYPERVISOR_CALLBACK_VECTOR
2664d608715dSMichael Kelley vmbus_interrupt = HYPERVISOR_CALLBACK_VECTOR;
2665d608715dSMichael Kelley vmbus_irq = -1;
2666d608715dSMichael Kelley #endif
2667d608715dSMichael Kelley
2668af9ca6f9SBranden Bonaby hv_debug_init();
266946a97191SGreg Kroah-Hartman
2670efc26722SK. Y. Srinivasan ret = vmbus_bus_init();
267146a97191SGreg Kroah-Hartman if (ret)
267246a97191SGreg Kroah-Hartman goto cleanup;
267346a97191SGreg Kroah-Hartman
vmbus_exit(void)26742517281dSVitaly Kuznetsov hv_setup_kexec_handler(hv_kexec_handler);
2675b4370df2SVitaly Kuznetsov hv_setup_crash_handler(hv_crash_handler);
26762517281dSVitaly Kuznetsov
267763ecc6d2SDexuan Cui register_syscore_ops(&hv_synic_syscore_ops);
267863ecc6d2SDexuan Cui
267946a97191SGreg Kroah-Hartman return 0;
268046a97191SGreg Kroah-Hartman
268146a97191SGreg Kroah-Hartman cleanup:
26829c843423SSaurabh Sengar platform_driver_unregister(&vmbus_platform_driver);
26839c843423SSaurabh Sengar hv_dev = NULL;
268446a97191SGreg Kroah-Hartman return ret;
268546a97191SGreg Kroah-Hartman }
268646a97191SGreg Kroah-Hartman
268793e5bd06SK. Y. Srinivasan static void __exit vmbus_exit(void)
268893e5bd06SK. Y. Srinivasan {
2689e72e7ac5SVitaly Kuznetsov int cpu;
2690e72e7ac5SVitaly Kuznetsov
269163ecc6d2SDexuan Cui unregister_syscore_ops(&hv_synic_syscore_ops);
269263ecc6d2SDexuan Cui
26932517281dSVitaly Kuznetsov hv_remove_kexec_handler();
2694b4370df2SVitaly Kuznetsov hv_remove_crash_handler();
269509a19628SVitaly Kuznetsov vmbus_connection.conn_state = DISCONNECTED;
2696fd1fea68SMichael Kelley hv_stimer_global_cleanup();
26972db84effSK. Y. Srinivasan vmbus_disconnect();
2698d608715dSMichael Kelley if (vmbus_irq == -1) {
2699d608715dSMichael Kelley hv_remove_vmbus_handler();
2700d608715dSMichael Kelley } else {
2701d608715dSMichael Kelley free_percpu_irq(vmbus_irq, vmbus_evt);
2702d608715dSMichael Kelley free_percpu(vmbus_evt);
2703d608715dSMichael Kelley }
270437cdd991SStephen Hemminger for_each_online_cpu(cpu) {
270537cdd991SStephen Hemminger struct hv_per_cpu_context *hv_cpu
270637cdd991SStephen Hemminger = per_cpu_ptr(hv_context.cpu_context, cpu);
270737cdd991SStephen Hemminger
270837cdd991SStephen Hemminger tasklet_kill(&hv_cpu->msg_dpc);
270937cdd991SStephen Hemminger }
2710af9ca6f9SBranden Bonaby hv_debug_rm_all_dir();
2711af9ca6f9SBranden Bonaby
271293e5bd06SK. Y. Srinivasan vmbus_free_channels();
27138b6a877cSAndrea Parri (Microsoft) kfree(vmbus_connection.channels);
271437cdd991SStephen Hemminger
2715792f232dSGuilherme G. Piccoli /*
2716d786e00dSGuilherme G. Piccoli * The vmbus panic notifier is always registered, hence we should
2717792f232dSGuilherme G. Piccoli * also unconditionally unregister it here as well.
2718792f232dSGuilherme G. Piccoli */
2719096c605fSVitaly Kuznetsov atomic_notifier_chain_unregister(&panic_notifier_list,
2720d786e00dSGuilherme G. Piccoli &hyperv_panic_vmbus_unload_block);
272181b18bceSSunil Muthuswamy
272293e5bd06SK. Y. Srinivasan bus_unregister(&hv_bus);
272337cdd991SStephen Hemminger
272476d36ab7SVitaly Kuznetsov cpuhp_remove_state(hyperv_cpuhp_online);
272506210b42SVitaly Kuznetsov hv_synic_free();
27269c843423SSaurabh Sengar platform_driver_unregister(&vmbus_platform_driver);
272793e5bd06SK. Y. Srinivasan }
272893e5bd06SK. Y. Srinivasan
272946a97191SGreg Kroah-Hartman
273046a97191SGreg Kroah-Hartman MODULE_LICENSE("GPL");
2731674eecb3SJoseph Salisbury MODULE_DESCRIPTION("Microsoft Hyper-V VMBus Driver");
273246a97191SGreg Kroah-Hartman
273343d4e119SK. Y. Srinivasan subsys_initcall(hv_acpi_init);
273493e5bd06SK. Y. Srinivasan module_exit(vmbus_exit);
2735