1 /* 2 * Copyright 2018 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 #include "priv.h" 23 24 #include <core/memory.h> 25 26 static void 27 nvkm_fault_ntfy_fini(struct nvkm_event *event, int type, int index) 28 { 29 struct nvkm_fault *fault = container_of(event, typeof(*fault), event); 30 fault->func->buffer.intr(fault->buffer[index], false); 31 } 32 33 static void 34 nvkm_fault_ntfy_init(struct nvkm_event *event, int type, int index) 35 { 36 struct nvkm_fault *fault = container_of(event, typeof(*fault), event); 37 fault->func->buffer.intr(fault->buffer[index], true); 38 } 39 40 static const struct nvkm_event_func 41 nvkm_fault_ntfy = { 42 .init = nvkm_fault_ntfy_init, 43 .fini = nvkm_fault_ntfy_fini, 44 }; 45 46 static void 47 nvkm_fault_intr(struct nvkm_subdev *subdev) 48 { 49 struct nvkm_fault *fault = nvkm_fault(subdev); 50 return fault->func->intr(fault); 51 } 52 53 static int 54 nvkm_fault_fini(struct nvkm_subdev *subdev, bool suspend) 55 { 56 struct nvkm_fault *fault = nvkm_fault(subdev); 57 if (fault->func->fini) 58 fault->func->fini(fault); 59 return 0; 60 } 61 62 static int 63 nvkm_fault_init(struct nvkm_subdev *subdev) 64 { 65 struct nvkm_fault *fault = nvkm_fault(subdev); 66 if (fault->func->init) 67 fault->func->init(fault); 68 return 0; 69 } 70 71 static int 72 nvkm_fault_oneinit_buffer(struct nvkm_fault *fault, int id) 73 { 74 struct nvkm_subdev *subdev = &fault->subdev; 75 struct nvkm_device *device = subdev->device; 76 struct nvkm_fault_buffer *buffer; 77 int ret; 78 79 if (!(buffer = kzalloc(sizeof(*buffer), GFP_KERNEL))) 80 return -ENOMEM; 81 buffer->fault = fault; 82 buffer->id = id; 83 fault->func->buffer.info(buffer); 84 fault->buffer[id] = buffer; 85 86 nvkm_debug(subdev, "buffer %d: %d entries\n", id, buffer->entries); 87 88 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, buffer->entries * 89 fault->func->buffer.entry_size, 0x1000, true, 90 &buffer->mem); 91 if (ret) 92 return ret; 93 94 /* Pin fault buffer in BAR2. */ 95 buffer->addr = fault->func->buffer.pin(buffer); 96 if (buffer->addr == ~0ULL) 97 return -EFAULT; 98 99 return 0; 100 } 101 102 static int 103 nvkm_fault_oneinit(struct nvkm_subdev *subdev) 104 { 105 struct nvkm_fault *fault = nvkm_fault(subdev); 106 int ret, i; 107 108 for (i = 0; i < ARRAY_SIZE(fault->buffer); i++) { 109 if (i < fault->func->buffer.nr) { 110 ret = nvkm_fault_oneinit_buffer(fault, i); 111 if (ret) 112 return ret; 113 fault->buffer_nr = i + 1; 114 } 115 } 116 117 ret = nvkm_event_init(&nvkm_fault_ntfy, subdev, 1, fault->buffer_nr, &fault->event); 118 if (ret) 119 return ret; 120 121 if (fault->func->oneinit) 122 ret = fault->func->oneinit(fault); 123 return ret; 124 } 125 126 static void * 127 nvkm_fault_dtor(struct nvkm_subdev *subdev) 128 { 129 struct nvkm_fault *fault = nvkm_fault(subdev); 130 int i; 131 132 nvkm_event_ntfy_del(&fault->nrpfb); 133 nvkm_event_fini(&fault->event); 134 135 for (i = 0; i < fault->buffer_nr; i++) { 136 if (fault->buffer[i]) { 137 nvkm_memory_unref(&fault->buffer[i]->mem); 138 kfree(fault->buffer[i]); 139 } 140 } 141 142 return fault; 143 } 144 145 static const struct nvkm_subdev_func 146 nvkm_fault = { 147 .dtor = nvkm_fault_dtor, 148 .oneinit = nvkm_fault_oneinit, 149 .init = nvkm_fault_init, 150 .fini = nvkm_fault_fini, 151 .intr = nvkm_fault_intr, 152 }; 153 154 int 155 nvkm_fault_new_(const struct nvkm_fault_func *func, struct nvkm_device *device, 156 enum nvkm_subdev_type type, int inst, struct nvkm_fault **pfault) 157 { 158 struct nvkm_fault *fault; 159 if (!(fault = *pfault = kzalloc(sizeof(*fault), GFP_KERNEL))) 160 return -ENOMEM; 161 nvkm_subdev_ctor(&nvkm_fault, device, type, inst, &fault->subdev); 162 fault->func = func; 163 fault->user.ctor = nvkm_ufault_new; 164 fault->user.base = func->user.base; 165 return 0; 166 } 167