172f293deSShuo Liu // SPDX-License-Identifier: GPL-2.0
272f293deSShuo Liu /*
372f293deSShuo Liu * ACRN_HSM: Handle I/O requests
472f293deSShuo Liu *
572f293deSShuo Liu * Copyright (C) 2020 Intel Corporation. All rights reserved.
672f293deSShuo Liu *
772f293deSShuo Liu * Authors:
872f293deSShuo Liu * Jason Chen CJ <jason.cj.chen@intel.com>
972f293deSShuo Liu * Fengwei Yin <fengwei.yin@intel.com>
1072f293deSShuo Liu */
1172f293deSShuo Liu
1272f293deSShuo Liu #include <linux/interrupt.h>
1372f293deSShuo Liu #include <linux/io.h>
1472f293deSShuo Liu #include <linux/kthread.h>
1572f293deSShuo Liu #include <linux/mm.h>
1672f293deSShuo Liu #include <linux/slab.h>
1772f293deSShuo Liu
1872f293deSShuo Liu #include <asm/acrn.h>
1972f293deSShuo Liu
2072f293deSShuo Liu #include "acrn_drv.h"
2172f293deSShuo Liu
2272f293deSShuo Liu static void ioreq_pause(void);
2372f293deSShuo Liu static void ioreq_resume(void);
2472f293deSShuo Liu
2572f293deSShuo Liu static void ioreq_dispatcher(struct work_struct *work);
2672f293deSShuo Liu static struct workqueue_struct *ioreq_wq;
2772f293deSShuo Liu static DECLARE_WORK(ioreq_work, ioreq_dispatcher);
2872f293deSShuo Liu
has_pending_request(struct acrn_ioreq_client * client)2972f293deSShuo Liu static inline bool has_pending_request(struct acrn_ioreq_client *client)
3072f293deSShuo Liu {
3172f293deSShuo Liu return !bitmap_empty(client->ioreqs_map, ACRN_IO_REQUEST_MAX);
3272f293deSShuo Liu }
3372f293deSShuo Liu
is_destroying(struct acrn_ioreq_client * client)3472f293deSShuo Liu static inline bool is_destroying(struct acrn_ioreq_client *client)
3572f293deSShuo Liu {
3672f293deSShuo Liu return test_bit(ACRN_IOREQ_CLIENT_DESTROYING, &client->flags);
3772f293deSShuo Liu }
3872f293deSShuo Liu
ioreq_complete_request(struct acrn_vm * vm,u16 vcpu,struct acrn_io_request * acrn_req)3972f293deSShuo Liu static int ioreq_complete_request(struct acrn_vm *vm, u16 vcpu,
4072f293deSShuo Liu struct acrn_io_request *acrn_req)
4172f293deSShuo Liu {
4272f293deSShuo Liu bool polling_mode;
4372f293deSShuo Liu int ret = 0;
4472f293deSShuo Liu
4572f293deSShuo Liu polling_mode = acrn_req->completion_polling;
4672f293deSShuo Liu /* Add barrier() to make sure the writes are done before completion */
4772f293deSShuo Liu smp_store_release(&acrn_req->processed, ACRN_IOREQ_STATE_COMPLETE);
4872f293deSShuo Liu
4972f293deSShuo Liu /*
5072f293deSShuo Liu * To fulfill the requirement of real-time in several industry
5172f293deSShuo Liu * scenarios, like automotive, ACRN can run under the partition mode,
5272f293deSShuo Liu * in which User VMs and Service VM are bound to dedicated CPU cores.
5372f293deSShuo Liu * Polling mode of handling the I/O request is introduced to achieve a
5472f293deSShuo Liu * faster I/O request handling. In polling mode, the hypervisor polls
5572f293deSShuo Liu * I/O request's completion. Once an I/O request is marked as
5672f293deSShuo Liu * ACRN_IOREQ_STATE_COMPLETE, hypervisor resumes from the polling point
5772f293deSShuo Liu * to continue the I/O request flow. Thus, the completion notification
5872f293deSShuo Liu * from HSM of I/O request is not needed. Please note,
5972f293deSShuo Liu * completion_polling needs to be read before the I/O request being
6072f293deSShuo Liu * marked as ACRN_IOREQ_STATE_COMPLETE to avoid racing with the
6172f293deSShuo Liu * hypervisor.
6272f293deSShuo Liu */
6372f293deSShuo Liu if (!polling_mode) {
6472f293deSShuo Liu ret = hcall_notify_req_finish(vm->vmid, vcpu);
6572f293deSShuo Liu if (ret < 0)
6672f293deSShuo Liu dev_err(acrn_dev.this_device,
6772f293deSShuo Liu "Notify I/O request finished failed!\n");
6872f293deSShuo Liu }
6972f293deSShuo Liu
7072f293deSShuo Liu return ret;
7172f293deSShuo Liu }
7272f293deSShuo Liu
acrn_ioreq_complete_request(struct acrn_ioreq_client * client,u16 vcpu,struct acrn_io_request * acrn_req)7372f293deSShuo Liu static int acrn_ioreq_complete_request(struct acrn_ioreq_client *client,
7472f293deSShuo Liu u16 vcpu,
7572f293deSShuo Liu struct acrn_io_request *acrn_req)
7672f293deSShuo Liu {
7772f293deSShuo Liu int ret;
7872f293deSShuo Liu
7972f293deSShuo Liu if (vcpu >= client->vm->vcpu_num)
8072f293deSShuo Liu return -EINVAL;
8172f293deSShuo Liu
8272f293deSShuo Liu clear_bit(vcpu, client->ioreqs_map);
8372f293deSShuo Liu if (!acrn_req) {
8472f293deSShuo Liu acrn_req = (struct acrn_io_request *)client->vm->ioreq_buf;
8572f293deSShuo Liu acrn_req += vcpu;
8672f293deSShuo Liu }
8772f293deSShuo Liu
8872f293deSShuo Liu ret = ioreq_complete_request(client->vm, vcpu, acrn_req);
8972f293deSShuo Liu
9072f293deSShuo Liu return ret;
9172f293deSShuo Liu }
9272f293deSShuo Liu
acrn_ioreq_request_default_complete(struct acrn_vm * vm,u16 vcpu)9372f293deSShuo Liu int acrn_ioreq_request_default_complete(struct acrn_vm *vm, u16 vcpu)
9472f293deSShuo Liu {
9572f293deSShuo Liu int ret = 0;
9672f293deSShuo Liu
9772f293deSShuo Liu spin_lock_bh(&vm->ioreq_clients_lock);
9872f293deSShuo Liu if (vm->default_client)
9972f293deSShuo Liu ret = acrn_ioreq_complete_request(vm->default_client,
10072f293deSShuo Liu vcpu, NULL);
10172f293deSShuo Liu spin_unlock_bh(&vm->ioreq_clients_lock);
10272f293deSShuo Liu
10372f293deSShuo Liu return ret;
10472f293deSShuo Liu }
10572f293deSShuo Liu
1065a0c9f17SShuo Liu /**
1075a0c9f17SShuo Liu * acrn_ioreq_range_add() - Add an iorange monitored by an ioreq client
1085a0c9f17SShuo Liu * @client: The ioreq client
1095a0c9f17SShuo Liu * @type: Type (ACRN_IOREQ_TYPE_MMIO or ACRN_IOREQ_TYPE_PORTIO)
1105a0c9f17SShuo Liu * @start: Start address of iorange
1115a0c9f17SShuo Liu * @end: End address of iorange
1125a0c9f17SShuo Liu *
1135a0c9f17SShuo Liu * Return: 0 on success, <0 on error
1145a0c9f17SShuo Liu */
acrn_ioreq_range_add(struct acrn_ioreq_client * client,u32 type,u64 start,u64 end)1155a0c9f17SShuo Liu int acrn_ioreq_range_add(struct acrn_ioreq_client *client,
1165a0c9f17SShuo Liu u32 type, u64 start, u64 end)
1175a0c9f17SShuo Liu {
1185a0c9f17SShuo Liu struct acrn_ioreq_range *range;
1195a0c9f17SShuo Liu
1205a0c9f17SShuo Liu if (end < start) {
1215a0c9f17SShuo Liu dev_err(acrn_dev.this_device,
1225a0c9f17SShuo Liu "Invalid IO range [0x%llx,0x%llx]\n", start, end);
1235a0c9f17SShuo Liu return -EINVAL;
1245a0c9f17SShuo Liu }
1255a0c9f17SShuo Liu
1265a0c9f17SShuo Liu range = kzalloc(sizeof(*range), GFP_KERNEL);
1275a0c9f17SShuo Liu if (!range)
1285a0c9f17SShuo Liu return -ENOMEM;
1295a0c9f17SShuo Liu
1305a0c9f17SShuo Liu range->type = type;
1315a0c9f17SShuo Liu range->start = start;
1325a0c9f17SShuo Liu range->end = end;
1335a0c9f17SShuo Liu
1345a0c9f17SShuo Liu write_lock_bh(&client->range_lock);
1355a0c9f17SShuo Liu list_add(&range->list, &client->range_list);
1365a0c9f17SShuo Liu write_unlock_bh(&client->range_lock);
1375a0c9f17SShuo Liu
1385a0c9f17SShuo Liu return 0;
1395a0c9f17SShuo Liu }
1405a0c9f17SShuo Liu
1415a0c9f17SShuo Liu /**
1425a0c9f17SShuo Liu * acrn_ioreq_range_del() - Del an iorange monitored by an ioreq client
1435a0c9f17SShuo Liu * @client: The ioreq client
1445a0c9f17SShuo Liu * @type: Type (ACRN_IOREQ_TYPE_MMIO or ACRN_IOREQ_TYPE_PORTIO)
1455a0c9f17SShuo Liu * @start: Start address of iorange
1465a0c9f17SShuo Liu * @end: End address of iorange
1475a0c9f17SShuo Liu */
acrn_ioreq_range_del(struct acrn_ioreq_client * client,u32 type,u64 start,u64 end)1485a0c9f17SShuo Liu void acrn_ioreq_range_del(struct acrn_ioreq_client *client,
1495a0c9f17SShuo Liu u32 type, u64 start, u64 end)
1505a0c9f17SShuo Liu {
1515a0c9f17SShuo Liu struct acrn_ioreq_range *range;
1525a0c9f17SShuo Liu
1535a0c9f17SShuo Liu write_lock_bh(&client->range_lock);
1545a0c9f17SShuo Liu list_for_each_entry(range, &client->range_list, list) {
1555a0c9f17SShuo Liu if (type == range->type &&
1565a0c9f17SShuo Liu start == range->start &&
1575a0c9f17SShuo Liu end == range->end) {
1585a0c9f17SShuo Liu list_del(&range->list);
1595a0c9f17SShuo Liu kfree(range);
1605a0c9f17SShuo Liu break;
1615a0c9f17SShuo Liu }
1625a0c9f17SShuo Liu }
1635a0c9f17SShuo Liu write_unlock_bh(&client->range_lock);
1645a0c9f17SShuo Liu }
1655a0c9f17SShuo Liu
16672f293deSShuo Liu /*
16772f293deSShuo Liu * ioreq_task() is the execution entity of handler thread of an I/O client.
16872f293deSShuo Liu * The handler callback of the I/O client is called within the handler thread.
16972f293deSShuo Liu */
ioreq_task(void * data)17072f293deSShuo Liu static int ioreq_task(void *data)
17172f293deSShuo Liu {
17272f293deSShuo Liu struct acrn_ioreq_client *client = data;
17372f293deSShuo Liu struct acrn_io_request *req;
17472f293deSShuo Liu unsigned long *ioreqs_map;
17572f293deSShuo Liu int vcpu, ret;
17672f293deSShuo Liu
17772f293deSShuo Liu /*
17872f293deSShuo Liu * Lockless access to ioreqs_map is safe, because
17972f293deSShuo Liu * 1) set_bit() and clear_bit() are atomic operations.
18072f293deSShuo Liu * 2) I/O requests arrives serialized. The access flow of ioreqs_map is:
18172f293deSShuo Liu * set_bit() - in ioreq_work handler
18272f293deSShuo Liu * Handler callback handles corresponding I/O request
18372f293deSShuo Liu * clear_bit() - in handler thread (include ACRN userspace)
18472f293deSShuo Liu * Mark corresponding I/O request completed
18572f293deSShuo Liu * Loop again if a new I/O request occurs
18672f293deSShuo Liu */
18772f293deSShuo Liu ioreqs_map = client->ioreqs_map;
18872f293deSShuo Liu while (!kthread_should_stop()) {
18972f293deSShuo Liu acrn_ioreq_client_wait(client);
19072f293deSShuo Liu while (has_pending_request(client)) {
19172f293deSShuo Liu vcpu = find_first_bit(ioreqs_map, client->vm->vcpu_num);
19272f293deSShuo Liu req = client->vm->ioreq_buf->req_slot + vcpu;
19372f293deSShuo Liu ret = client->handler(client, req);
19472f293deSShuo Liu if (ret < 0) {
19572f293deSShuo Liu dev_err(acrn_dev.this_device,
19672f293deSShuo Liu "IO handle failure: %d\n", ret);
19772f293deSShuo Liu break;
19872f293deSShuo Liu }
19972f293deSShuo Liu acrn_ioreq_complete_request(client, vcpu, req);
20072f293deSShuo Liu }
20172f293deSShuo Liu }
20272f293deSShuo Liu
20372f293deSShuo Liu return 0;
20472f293deSShuo Liu }
20572f293deSShuo Liu
20672f293deSShuo Liu /*
20772f293deSShuo Liu * For the non-default I/O clients, give them chance to complete the current
20872f293deSShuo Liu * I/O requests if there are any. For the default I/O client, it is safe to
20972f293deSShuo Liu * clear all pending I/O requests because the clearing request is from ACRN
21072f293deSShuo Liu * userspace.
21172f293deSShuo Liu */
acrn_ioreq_request_clear(struct acrn_vm * vm)21272f293deSShuo Liu void acrn_ioreq_request_clear(struct acrn_vm *vm)
21372f293deSShuo Liu {
21472f293deSShuo Liu struct acrn_ioreq_client *client;
21572f293deSShuo Liu bool has_pending = false;
21672f293deSShuo Liu unsigned long vcpu;
21772f293deSShuo Liu int retry = 10;
21872f293deSShuo Liu
21972f293deSShuo Liu /*
22072f293deSShuo Liu * IO requests of this VM will be completed directly in
22172f293deSShuo Liu * acrn_ioreq_dispatch if ACRN_VM_FLAG_CLEARING_IOREQ flag is set.
22272f293deSShuo Liu */
22372f293deSShuo Liu set_bit(ACRN_VM_FLAG_CLEARING_IOREQ, &vm->flags);
22472f293deSShuo Liu
22572f293deSShuo Liu /*
22672f293deSShuo Liu * acrn_ioreq_request_clear is only called in VM reset case. Simply
22772f293deSShuo Liu * wait 100ms in total for the IO requests' completion.
22872f293deSShuo Liu */
22972f293deSShuo Liu do {
23072f293deSShuo Liu spin_lock_bh(&vm->ioreq_clients_lock);
23172f293deSShuo Liu list_for_each_entry(client, &vm->ioreq_clients, list) {
23272f293deSShuo Liu has_pending = has_pending_request(client);
23372f293deSShuo Liu if (has_pending)
23472f293deSShuo Liu break;
23572f293deSShuo Liu }
23672f293deSShuo Liu spin_unlock_bh(&vm->ioreq_clients_lock);
23772f293deSShuo Liu
23872f293deSShuo Liu if (has_pending)
23972f293deSShuo Liu schedule_timeout_interruptible(HZ / 100);
24072f293deSShuo Liu } while (has_pending && --retry > 0);
24172f293deSShuo Liu if (retry == 0)
24272f293deSShuo Liu dev_warn(acrn_dev.this_device,
24372f293deSShuo Liu "%s cannot flush pending request!\n", client->name);
24472f293deSShuo Liu
24572f293deSShuo Liu /* Clear all ioreqs belonging to the default client */
24672f293deSShuo Liu spin_lock_bh(&vm->ioreq_clients_lock);
24772f293deSShuo Liu client = vm->default_client;
24872f293deSShuo Liu if (client) {
249e5535ff1SYang Yingliang for_each_set_bit(vcpu, client->ioreqs_map, ACRN_IO_REQUEST_MAX)
25072f293deSShuo Liu acrn_ioreq_complete_request(client, vcpu, NULL);
25172f293deSShuo Liu }
25272f293deSShuo Liu spin_unlock_bh(&vm->ioreq_clients_lock);
25372f293deSShuo Liu
25472f293deSShuo Liu /* Clear ACRN_VM_FLAG_CLEARING_IOREQ flag after the clearing */
25572f293deSShuo Liu clear_bit(ACRN_VM_FLAG_CLEARING_IOREQ, &vm->flags);
25672f293deSShuo Liu }
25772f293deSShuo Liu
acrn_ioreq_client_wait(struct acrn_ioreq_client * client)25872f293deSShuo Liu int acrn_ioreq_client_wait(struct acrn_ioreq_client *client)
25972f293deSShuo Liu {
26072f293deSShuo Liu if (client->is_default) {
26172f293deSShuo Liu /*
26272f293deSShuo Liu * In the default client, a user space thread waits on the
26372f293deSShuo Liu * waitqueue. The is_destroying() check is used to notify user
26472f293deSShuo Liu * space the client is going to be destroyed.
26572f293deSShuo Liu */
26672f293deSShuo Liu wait_event_interruptible(client->wq,
26772f293deSShuo Liu has_pending_request(client) ||
26872f293deSShuo Liu is_destroying(client));
26972f293deSShuo Liu if (is_destroying(client))
27072f293deSShuo Liu return -ENODEV;
27172f293deSShuo Liu } else {
27272f293deSShuo Liu wait_event_interruptible(client->wq,
27372f293deSShuo Liu has_pending_request(client) ||
27472f293deSShuo Liu kthread_should_stop());
27572f293deSShuo Liu }
27672f293deSShuo Liu
27772f293deSShuo Liu return 0;
27872f293deSShuo Liu }
27972f293deSShuo Liu
is_cfg_addr(struct acrn_io_request * req)2803c4c3316SShuo Liu static bool is_cfg_addr(struct acrn_io_request *req)
2813c4c3316SShuo Liu {
2823c4c3316SShuo Liu return ((req->type == ACRN_IOREQ_TYPE_PORTIO) &&
2833c4c3316SShuo Liu (req->reqs.pio_request.address == 0xcf8));
2843c4c3316SShuo Liu }
2853c4c3316SShuo Liu
is_cfg_data(struct acrn_io_request * req)2863c4c3316SShuo Liu static bool is_cfg_data(struct acrn_io_request *req)
2873c4c3316SShuo Liu {
2883c4c3316SShuo Liu return ((req->type == ACRN_IOREQ_TYPE_PORTIO) &&
2893c4c3316SShuo Liu ((req->reqs.pio_request.address >= 0xcfc) &&
2903c4c3316SShuo Liu (req->reqs.pio_request.address < (0xcfc + 4))));
2913c4c3316SShuo Liu }
2923c4c3316SShuo Liu
2933c4c3316SShuo Liu /* The low 8-bit of supported pci_reg addr.*/
2943c4c3316SShuo Liu #define PCI_LOWREG_MASK 0xFC
2953c4c3316SShuo Liu /* The high 4-bit of supported pci_reg addr */
2963c4c3316SShuo Liu #define PCI_HIGHREG_MASK 0xF00
2973c4c3316SShuo Liu /* Max number of supported functions */
2983c4c3316SShuo Liu #define PCI_FUNCMAX 7
2993c4c3316SShuo Liu /* Max number of supported slots */
3003c4c3316SShuo Liu #define PCI_SLOTMAX 31
3013c4c3316SShuo Liu /* Max number of supported buses */
3023c4c3316SShuo Liu #define PCI_BUSMAX 255
3033c4c3316SShuo Liu #define CONF1_ENABLE 0x80000000UL
3043c4c3316SShuo Liu /*
3053c4c3316SShuo Liu * A PCI configuration space access via PIO 0xCF8 and 0xCFC normally has two
3063c4c3316SShuo Liu * following steps:
3073c4c3316SShuo Liu * 1) writes address into 0xCF8 port
3083c4c3316SShuo Liu * 2) accesses data in/from 0xCFC
3093c4c3316SShuo Liu * This function combines such paired PCI configuration space I/O requests into
3103c4c3316SShuo Liu * one ACRN_IOREQ_TYPE_PCICFG type I/O request and continues the processing.
3113c4c3316SShuo Liu */
handle_cf8cfc(struct acrn_vm * vm,struct acrn_io_request * req,u16 vcpu)3123c4c3316SShuo Liu static bool handle_cf8cfc(struct acrn_vm *vm,
3133c4c3316SShuo Liu struct acrn_io_request *req, u16 vcpu)
3143c4c3316SShuo Liu {
3153c4c3316SShuo Liu int offset, pci_cfg_addr, pci_reg;
3163c4c3316SShuo Liu bool is_handled = false;
3173c4c3316SShuo Liu
3183c4c3316SShuo Liu if (is_cfg_addr(req)) {
3193c4c3316SShuo Liu WARN_ON(req->reqs.pio_request.size != 4);
3203c4c3316SShuo Liu if (req->reqs.pio_request.direction == ACRN_IOREQ_DIR_WRITE)
3213c4c3316SShuo Liu vm->pci_conf_addr = req->reqs.pio_request.value;
3223c4c3316SShuo Liu else
3233c4c3316SShuo Liu req->reqs.pio_request.value = vm->pci_conf_addr;
3243c4c3316SShuo Liu is_handled = true;
3253c4c3316SShuo Liu } else if (is_cfg_data(req)) {
3263c4c3316SShuo Liu if (!(vm->pci_conf_addr & CONF1_ENABLE)) {
3273c4c3316SShuo Liu if (req->reqs.pio_request.direction ==
3283c4c3316SShuo Liu ACRN_IOREQ_DIR_READ)
3293c4c3316SShuo Liu req->reqs.pio_request.value = 0xffffffff;
3303c4c3316SShuo Liu is_handled = true;
3313c4c3316SShuo Liu } else {
3323c4c3316SShuo Liu offset = req->reqs.pio_request.address - 0xcfc;
3333c4c3316SShuo Liu
3343c4c3316SShuo Liu req->type = ACRN_IOREQ_TYPE_PCICFG;
3353c4c3316SShuo Liu pci_cfg_addr = vm->pci_conf_addr;
3363c4c3316SShuo Liu req->reqs.pci_request.bus =
3373c4c3316SShuo Liu (pci_cfg_addr >> 16) & PCI_BUSMAX;
3383c4c3316SShuo Liu req->reqs.pci_request.dev =
3393c4c3316SShuo Liu (pci_cfg_addr >> 11) & PCI_SLOTMAX;
3403c4c3316SShuo Liu req->reqs.pci_request.func =
3413c4c3316SShuo Liu (pci_cfg_addr >> 8) & PCI_FUNCMAX;
3423c4c3316SShuo Liu pci_reg = (pci_cfg_addr & PCI_LOWREG_MASK) +
3433c4c3316SShuo Liu ((pci_cfg_addr >> 16) & PCI_HIGHREG_MASK);
3443c4c3316SShuo Liu req->reqs.pci_request.reg = pci_reg + offset;
3453c4c3316SShuo Liu }
3463c4c3316SShuo Liu }
3473c4c3316SShuo Liu
3483c4c3316SShuo Liu if (is_handled)
3493c4c3316SShuo Liu ioreq_complete_request(vm, vcpu, req);
3503c4c3316SShuo Liu
3513c4c3316SShuo Liu return is_handled;
3523c4c3316SShuo Liu }
3533c4c3316SShuo Liu
acrn_in_range(struct acrn_ioreq_range * range,struct acrn_io_request * req)354*f9bff0e3SMatthew Wilcox (Oracle) static bool acrn_in_range(struct acrn_ioreq_range *range,
35572f293deSShuo Liu struct acrn_io_request *req)
35672f293deSShuo Liu {
35772f293deSShuo Liu bool ret = false;
35872f293deSShuo Liu
35972f293deSShuo Liu if (range->type == req->type) {
36072f293deSShuo Liu switch (req->type) {
36172f293deSShuo Liu case ACRN_IOREQ_TYPE_MMIO:
36272f293deSShuo Liu if (req->reqs.mmio_request.address >= range->start &&
36372f293deSShuo Liu (req->reqs.mmio_request.address +
36472f293deSShuo Liu req->reqs.mmio_request.size - 1) <= range->end)
36572f293deSShuo Liu ret = true;
36672f293deSShuo Liu break;
36772f293deSShuo Liu case ACRN_IOREQ_TYPE_PORTIO:
36872f293deSShuo Liu if (req->reqs.pio_request.address >= range->start &&
36972f293deSShuo Liu (req->reqs.pio_request.address +
37072f293deSShuo Liu req->reqs.pio_request.size - 1) <= range->end)
37172f293deSShuo Liu ret = true;
37272f293deSShuo Liu break;
37372f293deSShuo Liu default:
37472f293deSShuo Liu break;
37572f293deSShuo Liu }
37672f293deSShuo Liu }
37772f293deSShuo Liu
37872f293deSShuo Liu return ret;
37972f293deSShuo Liu }
38072f293deSShuo Liu
find_ioreq_client(struct acrn_vm * vm,struct acrn_io_request * req)38172f293deSShuo Liu static struct acrn_ioreq_client *find_ioreq_client(struct acrn_vm *vm,
38272f293deSShuo Liu struct acrn_io_request *req)
38372f293deSShuo Liu {
38472f293deSShuo Liu struct acrn_ioreq_client *client, *found = NULL;
38572f293deSShuo Liu struct acrn_ioreq_range *range;
38672f293deSShuo Liu
38772f293deSShuo Liu lockdep_assert_held(&vm->ioreq_clients_lock);
38872f293deSShuo Liu
38972f293deSShuo Liu list_for_each_entry(client, &vm->ioreq_clients, list) {
39072f293deSShuo Liu read_lock_bh(&client->range_lock);
39172f293deSShuo Liu list_for_each_entry(range, &client->range_list, list) {
392*f9bff0e3SMatthew Wilcox (Oracle) if (acrn_in_range(range, req)) {
39372f293deSShuo Liu found = client;
39472f293deSShuo Liu break;
39572f293deSShuo Liu }
39672f293deSShuo Liu }
39772f293deSShuo Liu read_unlock_bh(&client->range_lock);
39872f293deSShuo Liu if (found)
39972f293deSShuo Liu break;
40072f293deSShuo Liu }
40172f293deSShuo Liu return found ? found : vm->default_client;
40272f293deSShuo Liu }
40372f293deSShuo Liu
40472f293deSShuo Liu /**
40572f293deSShuo Liu * acrn_ioreq_client_create() - Create an ioreq client
40672f293deSShuo Liu * @vm: The VM that this client belongs to
40772f293deSShuo Liu * @handler: The ioreq_handler of ioreq client acrn_hsm will create a kernel
40872f293deSShuo Liu * thread and call the handler to handle I/O requests.
40972f293deSShuo Liu * @priv: Private data for the handler
41072f293deSShuo Liu * @is_default: If it is the default client
41172f293deSShuo Liu * @name: The name of ioreq client
41272f293deSShuo Liu *
41372f293deSShuo Liu * Return: acrn_ioreq_client pointer on success, NULL on error
41472f293deSShuo Liu */
acrn_ioreq_client_create(struct acrn_vm * vm,ioreq_handler_t handler,void * priv,bool is_default,const char * name)41572f293deSShuo Liu struct acrn_ioreq_client *acrn_ioreq_client_create(struct acrn_vm *vm,
41672f293deSShuo Liu ioreq_handler_t handler,
41772f293deSShuo Liu void *priv, bool is_default,
41872f293deSShuo Liu const char *name)
41972f293deSShuo Liu {
42072f293deSShuo Liu struct acrn_ioreq_client *client;
42172f293deSShuo Liu
42272f293deSShuo Liu if (!handler && !is_default) {
42372f293deSShuo Liu dev_dbg(acrn_dev.this_device,
42472f293deSShuo Liu "Cannot create non-default client w/o handler!\n");
42572f293deSShuo Liu return NULL;
42672f293deSShuo Liu }
42772f293deSShuo Liu client = kzalloc(sizeof(*client), GFP_KERNEL);
42872f293deSShuo Liu if (!client)
42972f293deSShuo Liu return NULL;
43072f293deSShuo Liu
43172f293deSShuo Liu client->handler = handler;
43272f293deSShuo Liu client->vm = vm;
43372f293deSShuo Liu client->priv = priv;
43472f293deSShuo Liu client->is_default = is_default;
43572f293deSShuo Liu if (name)
43672f293deSShuo Liu strncpy(client->name, name, sizeof(client->name) - 1);
43772f293deSShuo Liu rwlock_init(&client->range_lock);
43872f293deSShuo Liu INIT_LIST_HEAD(&client->range_list);
43972f293deSShuo Liu init_waitqueue_head(&client->wq);
44072f293deSShuo Liu
44172f293deSShuo Liu if (client->handler) {
44272f293deSShuo Liu client->thread = kthread_run(ioreq_task, client, "VM%u-%s",
44372f293deSShuo Liu client->vm->vmid, client->name);
44472f293deSShuo Liu if (IS_ERR(client->thread)) {
44572f293deSShuo Liu kfree(client);
44672f293deSShuo Liu return NULL;
44772f293deSShuo Liu }
44872f293deSShuo Liu }
44972f293deSShuo Liu
45072f293deSShuo Liu spin_lock_bh(&vm->ioreq_clients_lock);
45172f293deSShuo Liu if (is_default)
45272f293deSShuo Liu vm->default_client = client;
45372f293deSShuo Liu else
45472f293deSShuo Liu list_add(&client->list, &vm->ioreq_clients);
45572f293deSShuo Liu spin_unlock_bh(&vm->ioreq_clients_lock);
45672f293deSShuo Liu
45772f293deSShuo Liu dev_dbg(acrn_dev.this_device, "Created ioreq client %s.\n", name);
45872f293deSShuo Liu return client;
45972f293deSShuo Liu }
46072f293deSShuo Liu
46172f293deSShuo Liu /**
46272f293deSShuo Liu * acrn_ioreq_client_destroy() - Destroy an ioreq client
46372f293deSShuo Liu * @client: The ioreq client
46472f293deSShuo Liu */
acrn_ioreq_client_destroy(struct acrn_ioreq_client * client)46572f293deSShuo Liu void acrn_ioreq_client_destroy(struct acrn_ioreq_client *client)
46672f293deSShuo Liu {
46772f293deSShuo Liu struct acrn_ioreq_range *range, *next;
46872f293deSShuo Liu struct acrn_vm *vm = client->vm;
46972f293deSShuo Liu
47072f293deSShuo Liu dev_dbg(acrn_dev.this_device,
47172f293deSShuo Liu "Destroy ioreq client %s.\n", client->name);
47272f293deSShuo Liu ioreq_pause();
47372f293deSShuo Liu set_bit(ACRN_IOREQ_CLIENT_DESTROYING, &client->flags);
47472f293deSShuo Liu if (client->is_default)
47572f293deSShuo Liu wake_up_interruptible(&client->wq);
47672f293deSShuo Liu else
47772f293deSShuo Liu kthread_stop(client->thread);
47872f293deSShuo Liu
47972f293deSShuo Liu spin_lock_bh(&vm->ioreq_clients_lock);
48072f293deSShuo Liu if (client->is_default)
48172f293deSShuo Liu vm->default_client = NULL;
48272f293deSShuo Liu else
48372f293deSShuo Liu list_del(&client->list);
48472f293deSShuo Liu spin_unlock_bh(&vm->ioreq_clients_lock);
48572f293deSShuo Liu
48672f293deSShuo Liu write_lock_bh(&client->range_lock);
48772f293deSShuo Liu list_for_each_entry_safe(range, next, &client->range_list, list) {
48872f293deSShuo Liu list_del(&range->list);
48972f293deSShuo Liu kfree(range);
49072f293deSShuo Liu }
49172f293deSShuo Liu write_unlock_bh(&client->range_lock);
49272f293deSShuo Liu kfree(client);
49372f293deSShuo Liu
49472f293deSShuo Liu ioreq_resume();
49572f293deSShuo Liu }
49672f293deSShuo Liu
acrn_ioreq_dispatch(struct acrn_vm * vm)49772f293deSShuo Liu static int acrn_ioreq_dispatch(struct acrn_vm *vm)
49872f293deSShuo Liu {
49972f293deSShuo Liu struct acrn_ioreq_client *client;
50072f293deSShuo Liu struct acrn_io_request *req;
50172f293deSShuo Liu int i;
50272f293deSShuo Liu
50372f293deSShuo Liu for (i = 0; i < vm->vcpu_num; i++) {
50472f293deSShuo Liu req = vm->ioreq_buf->req_slot + i;
50572f293deSShuo Liu
50672f293deSShuo Liu /* barrier the read of processed of acrn_io_request */
50772f293deSShuo Liu if (smp_load_acquire(&req->processed) ==
50872f293deSShuo Liu ACRN_IOREQ_STATE_PENDING) {
50972f293deSShuo Liu /* Complete the IO request directly in clearing stage */
51072f293deSShuo Liu if (test_bit(ACRN_VM_FLAG_CLEARING_IOREQ, &vm->flags)) {
51172f293deSShuo Liu ioreq_complete_request(vm, i, req);
51272f293deSShuo Liu continue;
51372f293deSShuo Liu }
5143c4c3316SShuo Liu if (handle_cf8cfc(vm, req, i))
5153c4c3316SShuo Liu continue;
51672f293deSShuo Liu
51772f293deSShuo Liu spin_lock_bh(&vm->ioreq_clients_lock);
51872f293deSShuo Liu client = find_ioreq_client(vm, req);
51972f293deSShuo Liu if (!client) {
52072f293deSShuo Liu dev_err(acrn_dev.this_device,
52172f293deSShuo Liu "Failed to find ioreq client!\n");
52272f293deSShuo Liu spin_unlock_bh(&vm->ioreq_clients_lock);
52372f293deSShuo Liu return -EINVAL;
52472f293deSShuo Liu }
52572f293deSShuo Liu if (!client->is_default)
52672f293deSShuo Liu req->kernel_handled = 1;
52772f293deSShuo Liu else
52872f293deSShuo Liu req->kernel_handled = 0;
52972f293deSShuo Liu /*
53072f293deSShuo Liu * Add barrier() to make sure the writes are done
53172f293deSShuo Liu * before setting ACRN_IOREQ_STATE_PROCESSING
53272f293deSShuo Liu */
53372f293deSShuo Liu smp_store_release(&req->processed,
53472f293deSShuo Liu ACRN_IOREQ_STATE_PROCESSING);
53572f293deSShuo Liu set_bit(i, client->ioreqs_map);
53672f293deSShuo Liu wake_up_interruptible(&client->wq);
53772f293deSShuo Liu spin_unlock_bh(&vm->ioreq_clients_lock);
53872f293deSShuo Liu }
53972f293deSShuo Liu }
54072f293deSShuo Liu
54172f293deSShuo Liu return 0;
54272f293deSShuo Liu }
54372f293deSShuo Liu
ioreq_dispatcher(struct work_struct * work)54472f293deSShuo Liu static void ioreq_dispatcher(struct work_struct *work)
54572f293deSShuo Liu {
54672f293deSShuo Liu struct acrn_vm *vm;
54772f293deSShuo Liu
54872f293deSShuo Liu read_lock(&acrn_vm_list_lock);
54972f293deSShuo Liu list_for_each_entry(vm, &acrn_vm_list, list) {
55072f293deSShuo Liu if (!vm->ioreq_buf)
55172f293deSShuo Liu break;
55272f293deSShuo Liu acrn_ioreq_dispatch(vm);
55372f293deSShuo Liu }
55472f293deSShuo Liu read_unlock(&acrn_vm_list_lock);
55572f293deSShuo Liu }
55672f293deSShuo Liu
ioreq_intr_handler(void)55772f293deSShuo Liu static void ioreq_intr_handler(void)
55872f293deSShuo Liu {
55972f293deSShuo Liu queue_work(ioreq_wq, &ioreq_work);
56072f293deSShuo Liu }
56172f293deSShuo Liu
ioreq_pause(void)56272f293deSShuo Liu static void ioreq_pause(void)
56372f293deSShuo Liu {
56472f293deSShuo Liu /* Flush and unarm the handler to ensure no I/O requests pending */
56572f293deSShuo Liu acrn_remove_intr_handler();
56672f293deSShuo Liu drain_workqueue(ioreq_wq);
56772f293deSShuo Liu }
56872f293deSShuo Liu
ioreq_resume(void)56972f293deSShuo Liu static void ioreq_resume(void)
57072f293deSShuo Liu {
57172f293deSShuo Liu /* Schedule after enabling in case other clients miss interrupt */
57272f293deSShuo Liu acrn_setup_intr_handler(ioreq_intr_handler);
57372f293deSShuo Liu queue_work(ioreq_wq, &ioreq_work);
57472f293deSShuo Liu }
57572f293deSShuo Liu
acrn_ioreq_intr_setup(void)57672f293deSShuo Liu int acrn_ioreq_intr_setup(void)
57772f293deSShuo Liu {
57872f293deSShuo Liu acrn_setup_intr_handler(ioreq_intr_handler);
579255c1273STejun Heo ioreq_wq = alloc_ordered_workqueue("ioreq_wq",
580255c1273STejun Heo WQ_HIGHPRI | WQ_MEM_RECLAIM);
58172f293deSShuo Liu if (!ioreq_wq) {
58272f293deSShuo Liu dev_err(acrn_dev.this_device, "Failed to alloc workqueue!\n");
58372f293deSShuo Liu acrn_remove_intr_handler();
58472f293deSShuo Liu return -ENOMEM;
58572f293deSShuo Liu }
58672f293deSShuo Liu return 0;
58772f293deSShuo Liu }
58872f293deSShuo Liu
acrn_ioreq_intr_remove(void)58972f293deSShuo Liu void acrn_ioreq_intr_remove(void)
59072f293deSShuo Liu {
59172f293deSShuo Liu if (ioreq_wq)
59272f293deSShuo Liu destroy_workqueue(ioreq_wq);
59372f293deSShuo Liu acrn_remove_intr_handler();
59472f293deSShuo Liu }
59572f293deSShuo Liu
acrn_ioreq_init(struct acrn_vm * vm,u64 buf_vma)59672f293deSShuo Liu int acrn_ioreq_init(struct acrn_vm *vm, u64 buf_vma)
59772f293deSShuo Liu {
59872f293deSShuo Liu struct acrn_ioreq_buffer *set_buffer;
59972f293deSShuo Liu struct page *page;
60072f293deSShuo Liu int ret;
60172f293deSShuo Liu
60272f293deSShuo Liu if (vm->ioreq_buf)
60372f293deSShuo Liu return -EEXIST;
60472f293deSShuo Liu
60572f293deSShuo Liu set_buffer = kzalloc(sizeof(*set_buffer), GFP_KERNEL);
60672f293deSShuo Liu if (!set_buffer)
60772f293deSShuo Liu return -ENOMEM;
60872f293deSShuo Liu
60972f293deSShuo Liu ret = pin_user_pages_fast(buf_vma, 1,
61072f293deSShuo Liu FOLL_WRITE | FOLL_LONGTERM, &page);
61172f293deSShuo Liu if (unlikely(ret != 1) || !page) {
61272f293deSShuo Liu dev_err(acrn_dev.this_device, "Failed to pin ioreq page!\n");
61372f293deSShuo Liu ret = -EFAULT;
61472f293deSShuo Liu goto free_buf;
61572f293deSShuo Liu }
61672f293deSShuo Liu
61772f293deSShuo Liu vm->ioreq_buf = page_address(page);
61872f293deSShuo Liu vm->ioreq_page = page;
61972f293deSShuo Liu set_buffer->ioreq_buf = page_to_phys(page);
62072f293deSShuo Liu ret = hcall_set_ioreq_buffer(vm->vmid, virt_to_phys(set_buffer));
62172f293deSShuo Liu if (ret < 0) {
62272f293deSShuo Liu dev_err(acrn_dev.this_device, "Failed to init ioreq buffer!\n");
62372f293deSShuo Liu unpin_user_page(page);
62472f293deSShuo Liu vm->ioreq_buf = NULL;
62572f293deSShuo Liu goto free_buf;
62672f293deSShuo Liu }
62772f293deSShuo Liu
62872f293deSShuo Liu dev_dbg(acrn_dev.this_device,
62972f293deSShuo Liu "Init ioreq buffer %pK!\n", vm->ioreq_buf);
63072f293deSShuo Liu ret = 0;
63172f293deSShuo Liu free_buf:
63272f293deSShuo Liu kfree(set_buffer);
63372f293deSShuo Liu return ret;
63472f293deSShuo Liu }
63572f293deSShuo Liu
acrn_ioreq_deinit(struct acrn_vm * vm)63672f293deSShuo Liu void acrn_ioreq_deinit(struct acrn_vm *vm)
63772f293deSShuo Liu {
63872f293deSShuo Liu struct acrn_ioreq_client *client, *next;
63972f293deSShuo Liu
64072f293deSShuo Liu dev_dbg(acrn_dev.this_device,
64172f293deSShuo Liu "Deinit ioreq buffer %pK!\n", vm->ioreq_buf);
64272f293deSShuo Liu /* Destroy all clients belonging to this VM */
64372f293deSShuo Liu list_for_each_entry_safe(client, next, &vm->ioreq_clients, list)
64472f293deSShuo Liu acrn_ioreq_client_destroy(client);
64572f293deSShuo Liu if (vm->default_client)
64672f293deSShuo Liu acrn_ioreq_client_destroy(vm->default_client);
64772f293deSShuo Liu
64872f293deSShuo Liu if (vm->ioreq_buf && vm->ioreq_page) {
64972f293deSShuo Liu unpin_user_page(vm->ioreq_page);
65072f293deSShuo Liu vm->ioreq_buf = NULL;
65172f293deSShuo Liu }
65272f293deSShuo Liu }
653