1*99cdc6c1SAnup Patel // SPDX-License-Identifier: GPL-2.0 2*99cdc6c1SAnup Patel /* 3*99cdc6c1SAnup Patel * Copyright (C) 2019 Western Digital Corporation or its affiliates. 4*99cdc6c1SAnup Patel * 5*99cdc6c1SAnup Patel * Authors: 6*99cdc6c1SAnup Patel * Anup Patel <anup.patel@wdc.com> 7*99cdc6c1SAnup Patel */ 8*99cdc6c1SAnup Patel 9*99cdc6c1SAnup Patel #include <linux/errno.h> 10*99cdc6c1SAnup Patel #include <linux/err.h> 11*99cdc6c1SAnup Patel #include <linux/module.h> 12*99cdc6c1SAnup Patel #include <linux/uaccess.h> 13*99cdc6c1SAnup Patel #include <linux/kvm_host.h> 14*99cdc6c1SAnup Patel 15*99cdc6c1SAnup Patel const struct _kvm_stats_desc kvm_vm_stats_desc[] = { 16*99cdc6c1SAnup Patel KVM_GENERIC_VM_STATS() 17*99cdc6c1SAnup Patel }; 18*99cdc6c1SAnup Patel static_assert(ARRAY_SIZE(kvm_vm_stats_desc) == 19*99cdc6c1SAnup Patel sizeof(struct kvm_vm_stat) / sizeof(u64)); 20*99cdc6c1SAnup Patel 21*99cdc6c1SAnup Patel const struct kvm_stats_header kvm_vm_stats_header = { 22*99cdc6c1SAnup Patel .name_size = KVM_STATS_NAME_SIZE, 23*99cdc6c1SAnup Patel .num_desc = ARRAY_SIZE(kvm_vm_stats_desc), 24*99cdc6c1SAnup Patel .id_offset = sizeof(struct kvm_stats_header), 25*99cdc6c1SAnup Patel .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE, 26*99cdc6c1SAnup Patel .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE + 27*99cdc6c1SAnup Patel sizeof(kvm_vm_stats_desc), 28*99cdc6c1SAnup Patel }; 29*99cdc6c1SAnup Patel 30*99cdc6c1SAnup Patel int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) 31*99cdc6c1SAnup Patel { 32*99cdc6c1SAnup Patel /* TODO: To be added later. */ 33*99cdc6c1SAnup Patel return -EOPNOTSUPP; 34*99cdc6c1SAnup Patel } 35*99cdc6c1SAnup Patel 36*99cdc6c1SAnup Patel int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) 37*99cdc6c1SAnup Patel { 38*99cdc6c1SAnup Patel int r; 39*99cdc6c1SAnup Patel 40*99cdc6c1SAnup Patel r = kvm_riscv_stage2_alloc_pgd(kvm); 41*99cdc6c1SAnup Patel if (r) 42*99cdc6c1SAnup Patel return r; 43*99cdc6c1SAnup Patel 44*99cdc6c1SAnup Patel return 0; 45*99cdc6c1SAnup Patel } 46*99cdc6c1SAnup Patel 47*99cdc6c1SAnup Patel void kvm_arch_destroy_vm(struct kvm *kvm) 48*99cdc6c1SAnup Patel { 49*99cdc6c1SAnup Patel int i; 50*99cdc6c1SAnup Patel 51*99cdc6c1SAnup Patel for (i = 0; i < KVM_MAX_VCPUS; ++i) { 52*99cdc6c1SAnup Patel if (kvm->vcpus[i]) { 53*99cdc6c1SAnup Patel kvm_vcpu_destroy(kvm->vcpus[i]); 54*99cdc6c1SAnup Patel kvm->vcpus[i] = NULL; 55*99cdc6c1SAnup Patel } 56*99cdc6c1SAnup Patel } 57*99cdc6c1SAnup Patel atomic_set(&kvm->online_vcpus, 0); 58*99cdc6c1SAnup Patel } 59*99cdc6c1SAnup Patel 60*99cdc6c1SAnup Patel int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) 61*99cdc6c1SAnup Patel { 62*99cdc6c1SAnup Patel int r; 63*99cdc6c1SAnup Patel 64*99cdc6c1SAnup Patel switch (ext) { 65*99cdc6c1SAnup Patel case KVM_CAP_DEVICE_CTRL: 66*99cdc6c1SAnup Patel case KVM_CAP_USER_MEMORY: 67*99cdc6c1SAnup Patel case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: 68*99cdc6c1SAnup Patel case KVM_CAP_ONE_REG: 69*99cdc6c1SAnup Patel case KVM_CAP_READONLY_MEM: 70*99cdc6c1SAnup Patel case KVM_CAP_MP_STATE: 71*99cdc6c1SAnup Patel case KVM_CAP_IMMEDIATE_EXIT: 72*99cdc6c1SAnup Patel r = 1; 73*99cdc6c1SAnup Patel break; 74*99cdc6c1SAnup Patel case KVM_CAP_NR_VCPUS: 75*99cdc6c1SAnup Patel r = num_online_cpus(); 76*99cdc6c1SAnup Patel break; 77*99cdc6c1SAnup Patel case KVM_CAP_MAX_VCPUS: 78*99cdc6c1SAnup Patel r = KVM_MAX_VCPUS; 79*99cdc6c1SAnup Patel break; 80*99cdc6c1SAnup Patel case KVM_CAP_NR_MEMSLOTS: 81*99cdc6c1SAnup Patel r = KVM_USER_MEM_SLOTS; 82*99cdc6c1SAnup Patel break; 83*99cdc6c1SAnup Patel default: 84*99cdc6c1SAnup Patel r = 0; 85*99cdc6c1SAnup Patel break; 86*99cdc6c1SAnup Patel } 87*99cdc6c1SAnup Patel 88*99cdc6c1SAnup Patel return r; 89*99cdc6c1SAnup Patel } 90*99cdc6c1SAnup Patel 91*99cdc6c1SAnup Patel long kvm_arch_vm_ioctl(struct file *filp, 92*99cdc6c1SAnup Patel unsigned int ioctl, unsigned long arg) 93*99cdc6c1SAnup Patel { 94*99cdc6c1SAnup Patel return -EINVAL; 95*99cdc6c1SAnup Patel } 96