1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 */ 25 #include <drm/drmP.h> 26 #include "amdgpu.h" 27 #include "amdgpu_gfx.h" 28 29 /* 30 * GPU scratch registers helpers function. 31 */ 32 /** 33 * amdgpu_gfx_scratch_get - Allocate a scratch register 34 * 35 * @adev: amdgpu_device pointer 36 * @reg: scratch register mmio offset 37 * 38 * Allocate a CP scratch register for use by the driver (all asics). 39 * Returns 0 on success or -EINVAL on failure. 40 */ 41 int amdgpu_gfx_scratch_get(struct amdgpu_device *adev, uint32_t *reg) 42 { 43 int i; 44 45 i = ffs(adev->gfx.scratch.free_mask); 46 if (i != 0 && i <= adev->gfx.scratch.num_reg) { 47 i--; 48 adev->gfx.scratch.free_mask &= ~(1u << i); 49 *reg = adev->gfx.scratch.reg_base + i; 50 return 0; 51 } 52 return -EINVAL; 53 } 54 55 /** 56 * amdgpu_gfx_scratch_free - Free a scratch register 57 * 58 * @adev: amdgpu_device pointer 59 * @reg: scratch register mmio offset 60 * 61 * Free a CP scratch register allocated for use by the driver (all asics) 62 */ 63 void amdgpu_gfx_scratch_free(struct amdgpu_device *adev, uint32_t reg) 64 { 65 adev->gfx.scratch.free_mask |= 1u << (reg - adev->gfx.scratch.reg_base); 66 } 67 68 /** 69 * amdgpu_gfx_parse_disable_cu - Parse the disable_cu module parameter 70 * 71 * @mask: array in which the per-shader array disable masks will be stored 72 * @max_se: number of SEs 73 * @max_sh: number of SHs 74 * 75 * The bitmask of CUs to be disabled in the shader array determined by se and 76 * sh is stored in mask[se * max_sh + sh]. 77 */ 78 void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_sh) 79 { 80 unsigned se, sh, cu; 81 const char *p; 82 83 memset(mask, 0, sizeof(*mask) * max_se * max_sh); 84 85 if (!amdgpu_disable_cu || !*amdgpu_disable_cu) 86 return; 87 88 p = amdgpu_disable_cu; 89 for (;;) { 90 char *next; 91 int ret = sscanf(p, "%u.%u.%u", &se, &sh, &cu); 92 if (ret < 3) { 93 DRM_ERROR("amdgpu: could not parse disable_cu\n"); 94 return; 95 } 96 97 if (se < max_se && sh < max_sh && cu < 16) { 98 DRM_INFO("amdgpu: disabling CU %u.%u.%u\n", se, sh, cu); 99 mask[se * max_sh + sh] |= 1u << cu; 100 } else { 101 DRM_ERROR("amdgpu: disable_cu %u.%u.%u is out of range\n", 102 se, sh, cu); 103 } 104 105 next = strchr(p, ','); 106 if (!next) 107 break; 108 p = next + 1; 109 } 110 } 111 112 void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev) 113 { 114 int i, queue, pipe, mec; 115 116 /* policy for amdgpu compute queue ownership */ 117 for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) { 118 queue = i % adev->gfx.mec.num_queue_per_pipe; 119 pipe = (i / adev->gfx.mec.num_queue_per_pipe) 120 % adev->gfx.mec.num_pipe_per_mec; 121 mec = (i / adev->gfx.mec.num_queue_per_pipe) 122 / adev->gfx.mec.num_pipe_per_mec; 123 124 /* we've run out of HW */ 125 if (mec >= adev->gfx.mec.num_mec) 126 break; 127 128 /* FIXME: spreading the queues across pipes causes perf regressions */ 129 if (0) { 130 /* policy: amdgpu owns the first two queues of the first MEC */ 131 if (mec == 0 && queue < 2) 132 set_bit(i, adev->gfx.mec.queue_bitmap); 133 } else { 134 /* policy: amdgpu owns all queues in the first pipe */ 135 if (mec == 0 && pipe == 0) 136 set_bit(i, adev->gfx.mec.queue_bitmap); 137 } 138 } 139 140 /* update the number of active compute rings */ 141 adev->gfx.num_compute_rings = 142 bitmap_weight(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); 143 144 /* If you hit this case and edited the policy, you probably just 145 * need to increase AMDGPU_MAX_COMPUTE_RINGS */ 146 if (WARN_ON(adev->gfx.num_compute_rings > AMDGPU_MAX_COMPUTE_RINGS)) 147 adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS; 148 } 149 150 static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev, 151 struct amdgpu_ring *ring) 152 { 153 int queue_bit; 154 int mec, pipe, queue; 155 156 queue_bit = adev->gfx.mec.num_mec 157 * adev->gfx.mec.num_pipe_per_mec 158 * adev->gfx.mec.num_queue_per_pipe; 159 160 while (queue_bit-- >= 0) { 161 if (test_bit(queue_bit, adev->gfx.mec.queue_bitmap)) 162 continue; 163 164 amdgpu_gfx_bit_to_queue(adev, queue_bit, &mec, &pipe, &queue); 165 166 /* Using pipes 2/3 from MEC 2 seems cause problems */ 167 if (mec == 1 && pipe > 1) 168 continue; 169 170 ring->me = mec + 1; 171 ring->pipe = pipe; 172 ring->queue = queue; 173 174 return 0; 175 } 176 177 dev_err(adev->dev, "Failed to find a queue for KIQ\n"); 178 return -EINVAL; 179 } 180 181 int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev, 182 struct amdgpu_ring *ring, 183 struct amdgpu_irq_src *irq) 184 { 185 struct amdgpu_kiq *kiq = &adev->gfx.kiq; 186 int r = 0; 187 188 mutex_init(&kiq->ring_mutex); 189 190 r = amdgpu_wb_get(adev, &adev->virt.reg_val_offs); 191 if (r) 192 return r; 193 194 ring->adev = NULL; 195 ring->ring_obj = NULL; 196 ring->use_doorbell = true; 197 ring->doorbell_index = AMDGPU_DOORBELL_KIQ; 198 199 r = amdgpu_gfx_kiq_acquire(adev, ring); 200 if (r) 201 return r; 202 203 ring->eop_gpu_addr = kiq->eop_gpu_addr; 204 sprintf(ring->name, "kiq_%d.%d.%d", ring->me, ring->pipe, ring->queue); 205 r = amdgpu_ring_init(adev, ring, 1024, 206 irq, AMDGPU_CP_KIQ_IRQ_DRIVER0); 207 if (r) 208 dev_warn(adev->dev, "(%d) failed to init kiq ring\n", r); 209 210 return r; 211 } 212 213 void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring, 214 struct amdgpu_irq_src *irq) 215 { 216 amdgpu_wb_free(ring->adev, ring->adev->virt.reg_val_offs); 217 amdgpu_ring_fini(ring); 218 } 219 220 void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev) 221 { 222 struct amdgpu_kiq *kiq = &adev->gfx.kiq; 223 224 amdgpu_bo_free_kernel(&kiq->eop_obj, &kiq->eop_gpu_addr, NULL); 225 } 226 227 int amdgpu_gfx_kiq_init(struct amdgpu_device *adev, 228 unsigned hpd_size) 229 { 230 int r; 231 u32 *hpd; 232 struct amdgpu_kiq *kiq = &adev->gfx.kiq; 233 234 r = amdgpu_bo_create_kernel(adev, hpd_size, PAGE_SIZE, 235 AMDGPU_GEM_DOMAIN_GTT, &kiq->eop_obj, 236 &kiq->eop_gpu_addr, (void **)&hpd); 237 if (r) { 238 dev_warn(adev->dev, "failed to create KIQ bo (%d).\n", r); 239 return r; 240 } 241 242 memset(hpd, 0, hpd_size); 243 244 r = amdgpu_bo_reserve(kiq->eop_obj, true); 245 if (unlikely(r != 0)) 246 dev_warn(adev->dev, "(%d) reserve kiq eop bo failed\n", r); 247 amdgpu_bo_kunmap(kiq->eop_obj); 248 amdgpu_bo_unreserve(kiq->eop_obj); 249 250 return 0; 251 } 252 253 /* create MQD for each compute queue */ 254 int amdgpu_gfx_compute_mqd_sw_init(struct amdgpu_device *adev, 255 unsigned mqd_size) 256 { 257 struct amdgpu_ring *ring = NULL; 258 int r, i; 259 260 /* create MQD for KIQ */ 261 ring = &adev->gfx.kiq.ring; 262 if (!ring->mqd_obj) { 263 r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE, 264 AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj, 265 &ring->mqd_gpu_addr, &ring->mqd_ptr); 266 if (r) { 267 dev_warn(adev->dev, "failed to create ring mqd ob (%d)", r); 268 return r; 269 } 270 271 /* prepare MQD backup */ 272 adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS] = kmalloc(mqd_size, GFP_KERNEL); 273 if (!adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS]) 274 dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name); 275 } 276 277 /* create MQD for each KCQ */ 278 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 279 ring = &adev->gfx.compute_ring[i]; 280 if (!ring->mqd_obj) { 281 r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE, 282 AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj, 283 &ring->mqd_gpu_addr, &ring->mqd_ptr); 284 if (r) { 285 dev_warn(adev->dev, "failed to create ring mqd ob (%d)", r); 286 return r; 287 } 288 289 /* prepare MQD backup */ 290 adev->gfx.mec.mqd_backup[i] = kmalloc(mqd_size, GFP_KERNEL); 291 if (!adev->gfx.mec.mqd_backup[i]) 292 dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name); 293 } 294 } 295 296 return 0; 297 } 298 299 void amdgpu_gfx_compute_mqd_sw_fini(struct amdgpu_device *adev) 300 { 301 struct amdgpu_ring *ring = NULL; 302 int i; 303 304 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 305 ring = &adev->gfx.compute_ring[i]; 306 kfree(adev->gfx.mec.mqd_backup[i]); 307 amdgpu_bo_free_kernel(&ring->mqd_obj, 308 &ring->mqd_gpu_addr, 309 &ring->mqd_ptr); 310 } 311 312 ring = &adev->gfx.kiq.ring; 313 kfree(adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS]); 314 amdgpu_bo_free_kernel(&ring->mqd_obj, 315 &ring->mqd_gpu_addr, 316 &ring->mqd_ptr); 317 } 318