1 /* 2 * Copyright 2018 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 #include "priv.h" 23 24 #include <core/memory.h> 25 #include <core/notify.h> 26 #include <subdev/bar.h> 27 #include <subdev/mmu.h> 28 29 static void 30 nvkm_fault_ntfy_fini(struct nvkm_event *event, int type, int index) 31 { 32 struct nvkm_fault *fault = container_of(event, typeof(*fault), event); 33 fault->func->buffer.fini(fault->buffer[index]); 34 } 35 36 static void 37 nvkm_fault_ntfy_init(struct nvkm_event *event, int type, int index) 38 { 39 struct nvkm_fault *fault = container_of(event, typeof(*fault), event); 40 fault->func->buffer.init(fault->buffer[index]); 41 } 42 43 static int 44 nvkm_fault_ntfy_ctor(struct nvkm_object *object, void *argv, u32 argc, 45 struct nvkm_notify *notify) 46 { 47 struct nvkm_fault_buffer *buffer = nvkm_fault_buffer(object); 48 if (argc == 0) { 49 notify->size = 0; 50 notify->types = 1; 51 notify->index = buffer->id; 52 return 0; 53 } 54 return -ENOSYS; 55 } 56 57 static const struct nvkm_event_func 58 nvkm_fault_ntfy = { 59 .ctor = nvkm_fault_ntfy_ctor, 60 .init = nvkm_fault_ntfy_init, 61 .fini = nvkm_fault_ntfy_fini, 62 }; 63 64 static void 65 nvkm_fault_intr(struct nvkm_subdev *subdev) 66 { 67 struct nvkm_fault *fault = nvkm_fault(subdev); 68 return fault->func->intr(fault); 69 } 70 71 static int 72 nvkm_fault_fini(struct nvkm_subdev *subdev, bool suspend) 73 { 74 struct nvkm_fault *fault = nvkm_fault(subdev); 75 if (fault->func->fini) 76 fault->func->fini(fault); 77 return 0; 78 } 79 80 static int 81 nvkm_fault_init(struct nvkm_subdev *subdev) 82 { 83 struct nvkm_fault *fault = nvkm_fault(subdev); 84 if (fault->func->init) 85 fault->func->init(fault); 86 return 0; 87 } 88 89 static int 90 nvkm_fault_oneinit_buffer(struct nvkm_fault *fault, int id) 91 { 92 struct nvkm_subdev *subdev = &fault->subdev; 93 struct nvkm_device *device = subdev->device; 94 struct nvkm_vmm *bar2 = nvkm_bar_bar2_vmm(device); 95 struct nvkm_fault_buffer *buffer; 96 int ret; 97 98 if (!(buffer = kzalloc(sizeof(*buffer), GFP_KERNEL))) 99 return -ENOMEM; 100 buffer->fault = fault; 101 buffer->id = id; 102 buffer->entries = fault->func->buffer.entries(buffer); 103 fault->buffer[id] = buffer; 104 105 nvkm_debug(subdev, "buffer %d: %d entries\n", id, buffer->entries); 106 107 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, buffer->entries * 108 fault->func->buffer.entry_size, 0x1000, true, 109 &buffer->mem); 110 if (ret) 111 return ret; 112 113 ret = nvkm_vmm_get(bar2, 12, nvkm_memory_size(buffer->mem), 114 &buffer->vma); 115 if (ret) 116 return ret; 117 118 return nvkm_memory_map(buffer->mem, 0, bar2, buffer->vma, NULL, 0); 119 } 120 121 static int 122 nvkm_fault_oneinit(struct nvkm_subdev *subdev) 123 { 124 struct nvkm_fault *fault = nvkm_fault(subdev); 125 int ret, i; 126 127 for (i = 0; i < ARRAY_SIZE(fault->buffer); i++) { 128 if (i < fault->func->buffer.nr) { 129 ret = nvkm_fault_oneinit_buffer(fault, i); 130 if (ret) 131 return ret; 132 fault->buffer_nr = i + 1; 133 } 134 } 135 136 return nvkm_event_init(&nvkm_fault_ntfy, 1, fault->buffer_nr, 137 &fault->event); 138 } 139 140 static void * 141 nvkm_fault_dtor(struct nvkm_subdev *subdev) 142 { 143 struct nvkm_vmm *bar2 = nvkm_bar_bar2_vmm(subdev->device); 144 struct nvkm_fault *fault = nvkm_fault(subdev); 145 int i; 146 147 nvkm_event_fini(&fault->event); 148 149 for (i = 0; i < fault->buffer_nr; i++) { 150 if (fault->buffer[i]) { 151 nvkm_vmm_put(bar2, &fault->buffer[i]->vma); 152 nvkm_memory_unref(&fault->buffer[i]->mem); 153 kfree(fault->buffer[i]); 154 } 155 } 156 157 return fault; 158 } 159 160 static const struct nvkm_subdev_func 161 nvkm_fault = { 162 .dtor = nvkm_fault_dtor, 163 .oneinit = nvkm_fault_oneinit, 164 .init = nvkm_fault_init, 165 .fini = nvkm_fault_fini, 166 .intr = nvkm_fault_intr, 167 }; 168 169 int 170 nvkm_fault_new_(const struct nvkm_fault_func *func, struct nvkm_device *device, 171 int index, struct nvkm_fault **pfault) 172 { 173 struct nvkm_fault *fault; 174 if (!(fault = *pfault = kzalloc(sizeof(*fault), GFP_KERNEL))) 175 return -ENOMEM; 176 nvkm_subdev_ctor(&nvkm_fault, device, index, &fault->subdev); 177 fault->func = func; 178 return 0; 179 } 180