1 /* 2 * Copyright 2018 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 #include "priv.h" 23 24 #include <core/memory.h> 25 #include <core/notify.h> 26 27 static void 28 nvkm_fault_ntfy_fini(struct nvkm_event *event, int type, int index) 29 { 30 struct nvkm_fault *fault = container_of(event, typeof(*fault), event); 31 fault->func->buffer.intr(fault->buffer[index], false); 32 } 33 34 static void 35 nvkm_fault_ntfy_init(struct nvkm_event *event, int type, int index) 36 { 37 struct nvkm_fault *fault = container_of(event, typeof(*fault), event); 38 fault->func->buffer.intr(fault->buffer[index], true); 39 } 40 41 static int 42 nvkm_fault_ntfy_ctor(struct nvkm_object *object, void *argv, u32 argc, 43 struct nvkm_notify *notify) 44 { 45 struct nvkm_fault_buffer *buffer = nvkm_fault_buffer(object); 46 if (argc == 0) { 47 notify->size = 0; 48 notify->types = 1; 49 notify->index = buffer->id; 50 return 0; 51 } 52 return -ENOSYS; 53 } 54 55 static const struct nvkm_event_func 56 nvkm_fault_ntfy = { 57 .ctor = nvkm_fault_ntfy_ctor, 58 .init = nvkm_fault_ntfy_init, 59 .fini = nvkm_fault_ntfy_fini, 60 }; 61 62 static void 63 nvkm_fault_intr(struct nvkm_subdev *subdev) 64 { 65 struct nvkm_fault *fault = nvkm_fault(subdev); 66 return fault->func->intr(fault); 67 } 68 69 static int 70 nvkm_fault_fini(struct nvkm_subdev *subdev, bool suspend) 71 { 72 struct nvkm_fault *fault = nvkm_fault(subdev); 73 if (fault->func->fini) 74 fault->func->fini(fault); 75 return 0; 76 } 77 78 static int 79 nvkm_fault_init(struct nvkm_subdev *subdev) 80 { 81 struct nvkm_fault *fault = nvkm_fault(subdev); 82 if (fault->func->init) 83 fault->func->init(fault); 84 return 0; 85 } 86 87 static int 88 nvkm_fault_oneinit_buffer(struct nvkm_fault *fault, int id) 89 { 90 struct nvkm_subdev *subdev = &fault->subdev; 91 struct nvkm_device *device = subdev->device; 92 struct nvkm_fault_buffer *buffer; 93 int ret; 94 95 if (!(buffer = kzalloc(sizeof(*buffer), GFP_KERNEL))) 96 return -ENOMEM; 97 buffer->fault = fault; 98 buffer->id = id; 99 fault->func->buffer.info(buffer); 100 fault->buffer[id] = buffer; 101 102 nvkm_debug(subdev, "buffer %d: %d entries\n", id, buffer->entries); 103 104 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, buffer->entries * 105 fault->func->buffer.entry_size, 0x1000, true, 106 &buffer->mem); 107 if (ret) 108 return ret; 109 110 /* Pin fault buffer in BAR2. */ 111 buffer->addr = fault->func->buffer.pin(buffer); 112 if (buffer->addr == ~0ULL) 113 return -EFAULT; 114 115 return 0; 116 } 117 118 static int 119 nvkm_fault_oneinit(struct nvkm_subdev *subdev) 120 { 121 struct nvkm_fault *fault = nvkm_fault(subdev); 122 int ret, i; 123 124 for (i = 0; i < ARRAY_SIZE(fault->buffer); i++) { 125 if (i < fault->func->buffer.nr) { 126 ret = nvkm_fault_oneinit_buffer(fault, i); 127 if (ret) 128 return ret; 129 fault->buffer_nr = i + 1; 130 } 131 } 132 133 ret = nvkm_event_init(&nvkm_fault_ntfy, 1, fault->buffer_nr, 134 &fault->event); 135 if (ret) 136 return ret; 137 138 if (fault->func->oneinit) 139 ret = fault->func->oneinit(fault); 140 return ret; 141 } 142 143 static void * 144 nvkm_fault_dtor(struct nvkm_subdev *subdev) 145 { 146 struct nvkm_fault *fault = nvkm_fault(subdev); 147 int i; 148 149 nvkm_notify_fini(&fault->nrpfb); 150 nvkm_event_fini(&fault->event); 151 152 for (i = 0; i < fault->buffer_nr; i++) { 153 if (fault->buffer[i]) { 154 nvkm_memory_unref(&fault->buffer[i]->mem); 155 kfree(fault->buffer[i]); 156 } 157 } 158 159 return fault; 160 } 161 162 static const struct nvkm_subdev_func 163 nvkm_fault = { 164 .dtor = nvkm_fault_dtor, 165 .oneinit = nvkm_fault_oneinit, 166 .init = nvkm_fault_init, 167 .fini = nvkm_fault_fini, 168 .intr = nvkm_fault_intr, 169 }; 170 171 int 172 nvkm_fault_new_(const struct nvkm_fault_func *func, struct nvkm_device *device, 173 int index, struct nvkm_fault **pfault) 174 { 175 struct nvkm_fault *fault; 176 if (!(fault = *pfault = kzalloc(sizeof(*fault), GFP_KERNEL))) 177 return -ENOMEM; 178 nvkm_subdev_ctor(&nvkm_fault, device, index, &fault->subdev); 179 fault->func = func; 180 fault->user.ctor = nvkm_ufault_new; 181 fault->user.base = func->user.base; 182 return 0; 183 } 184