1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * AMD Passthrough DMA device driver 4 * -- Based on the CCP driver 5 * 6 * Copyright (C) 2016,2021 Advanced Micro Devices, Inc. 7 * 8 * Author: Sanjay R Mehta <sanju.mehta@amd.com> 9 * Author: Gary R Hook <gary.hook@amd.com> 10 */ 11 12 #include "ptdma.h" 13 #include "../dmaengine.h" 14 #include "../virt-dma.h" 15 16 static inline struct pt_dma_chan *to_pt_chan(struct dma_chan *dma_chan) 17 { 18 return container_of(dma_chan, struct pt_dma_chan, vc.chan); 19 } 20 21 static inline struct pt_dma_desc *to_pt_desc(struct virt_dma_desc *vd) 22 { 23 return container_of(vd, struct pt_dma_desc, vd); 24 } 25 26 static void pt_free_chan_resources(struct dma_chan *dma_chan) 27 { 28 struct pt_dma_chan *chan = to_pt_chan(dma_chan); 29 30 vchan_free_chan_resources(&chan->vc); 31 } 32 33 static void pt_synchronize(struct dma_chan *dma_chan) 34 { 35 struct pt_dma_chan *chan = to_pt_chan(dma_chan); 36 37 vchan_synchronize(&chan->vc); 38 } 39 40 static void pt_do_cleanup(struct virt_dma_desc *vd) 41 { 42 struct pt_dma_desc *desc = to_pt_desc(vd); 43 struct pt_device *pt = desc->pt; 44 45 kmem_cache_free(pt->dma_desc_cache, desc); 46 } 47 48 static int pt_dma_start_desc(struct pt_dma_desc *desc) 49 { 50 struct pt_passthru_engine *pt_engine; 51 struct pt_device *pt; 52 struct pt_cmd *pt_cmd; 53 struct pt_cmd_queue *cmd_q; 54 55 desc->issued_to_hw = 1; 56 57 pt_cmd = &desc->pt_cmd; 58 pt = pt_cmd->pt; 59 cmd_q = &pt->cmd_q; 60 pt_engine = &pt_cmd->passthru; 61 62 pt->tdata.cmd = pt_cmd; 63 64 /* Execute the command */ 65 pt_cmd->ret = pt_core_perform_passthru(cmd_q, pt_engine); 66 67 return 0; 68 } 69 70 static struct pt_dma_desc *pt_next_dma_desc(struct pt_dma_chan *chan) 71 { 72 /* Get the next DMA descriptor on the active list */ 73 struct virt_dma_desc *vd = vchan_next_desc(&chan->vc); 74 75 return vd ? to_pt_desc(vd) : NULL; 76 } 77 78 static struct pt_dma_desc *pt_handle_active_desc(struct pt_dma_chan *chan, 79 struct pt_dma_desc *desc) 80 { 81 struct dma_async_tx_descriptor *tx_desc; 82 struct virt_dma_desc *vd; 83 unsigned long flags; 84 85 /* Loop over descriptors until one is found with commands */ 86 do { 87 if (desc) { 88 if (!desc->issued_to_hw) { 89 /* No errors, keep going */ 90 if (desc->status != DMA_ERROR) 91 return desc; 92 } 93 94 tx_desc = &desc->vd.tx; 95 vd = &desc->vd; 96 } else { 97 tx_desc = NULL; 98 } 99 100 spin_lock_irqsave(&chan->vc.lock, flags); 101 102 if (desc) { 103 if (desc->status != DMA_COMPLETE) { 104 if (desc->status != DMA_ERROR) 105 desc->status = DMA_COMPLETE; 106 107 dma_cookie_complete(tx_desc); 108 dma_descriptor_unmap(tx_desc); 109 list_del(&desc->vd.node); 110 } else { 111 /* Don't handle it twice */ 112 tx_desc = NULL; 113 } 114 } 115 116 desc = pt_next_dma_desc(chan); 117 118 spin_unlock_irqrestore(&chan->vc.lock, flags); 119 120 if (tx_desc) { 121 dmaengine_desc_get_callback_invoke(tx_desc, NULL); 122 dma_run_dependencies(tx_desc); 123 vchan_vdesc_fini(vd); 124 } 125 } while (desc); 126 127 return NULL; 128 } 129 130 static void pt_cmd_callback(void *data, int err) 131 { 132 struct pt_dma_desc *desc = data; 133 struct dma_chan *dma_chan; 134 struct pt_dma_chan *chan; 135 int ret; 136 137 if (err == -EINPROGRESS) 138 return; 139 140 dma_chan = desc->vd.tx.chan; 141 chan = to_pt_chan(dma_chan); 142 143 if (err) 144 desc->status = DMA_ERROR; 145 146 while (true) { 147 /* Check for DMA descriptor completion */ 148 desc = pt_handle_active_desc(chan, desc); 149 150 /* Don't submit cmd if no descriptor or DMA is paused */ 151 if (!desc) 152 break; 153 154 ret = pt_dma_start_desc(desc); 155 if (!ret) 156 break; 157 158 desc->status = DMA_ERROR; 159 } 160 } 161 162 static struct pt_dma_desc *pt_alloc_dma_desc(struct pt_dma_chan *chan, 163 unsigned long flags) 164 { 165 struct pt_dma_desc *desc; 166 167 desc = kmem_cache_zalloc(chan->pt->dma_desc_cache, GFP_NOWAIT); 168 if (!desc) 169 return NULL; 170 171 vchan_tx_prep(&chan->vc, &desc->vd, flags); 172 173 desc->pt = chan->pt; 174 desc->issued_to_hw = 0; 175 desc->status = DMA_IN_PROGRESS; 176 177 return desc; 178 } 179 180 static struct pt_dma_desc *pt_create_desc(struct dma_chan *dma_chan, 181 dma_addr_t dst, 182 dma_addr_t src, 183 unsigned int len, 184 unsigned long flags) 185 { 186 struct pt_dma_chan *chan = to_pt_chan(dma_chan); 187 struct pt_passthru_engine *pt_engine; 188 struct pt_dma_desc *desc; 189 struct pt_cmd *pt_cmd; 190 191 desc = pt_alloc_dma_desc(chan, flags); 192 if (!desc) 193 return NULL; 194 195 pt_cmd = &desc->pt_cmd; 196 pt_cmd->pt = chan->pt; 197 pt_engine = &pt_cmd->passthru; 198 pt_cmd->engine = PT_ENGINE_PASSTHRU; 199 pt_engine->src_dma = src; 200 pt_engine->dst_dma = dst; 201 pt_engine->src_len = len; 202 pt_cmd->pt_cmd_callback = pt_cmd_callback; 203 pt_cmd->data = desc; 204 205 desc->len = len; 206 207 return desc; 208 } 209 210 static struct dma_async_tx_descriptor * 211 pt_prep_dma_memcpy(struct dma_chan *dma_chan, dma_addr_t dst, 212 dma_addr_t src, size_t len, unsigned long flags) 213 { 214 struct pt_dma_desc *desc; 215 216 desc = pt_create_desc(dma_chan, dst, src, len, flags); 217 if (!desc) 218 return NULL; 219 220 return &desc->vd.tx; 221 } 222 223 static struct dma_async_tx_descriptor * 224 pt_prep_dma_interrupt(struct dma_chan *dma_chan, unsigned long flags) 225 { 226 struct pt_dma_chan *chan = to_pt_chan(dma_chan); 227 struct pt_dma_desc *desc; 228 229 desc = pt_alloc_dma_desc(chan, flags); 230 if (!desc) 231 return NULL; 232 233 return &desc->vd.tx; 234 } 235 236 static void pt_issue_pending(struct dma_chan *dma_chan) 237 { 238 struct pt_dma_chan *chan = to_pt_chan(dma_chan); 239 struct pt_dma_desc *desc; 240 unsigned long flags; 241 bool engine_is_idle = true; 242 243 spin_lock_irqsave(&chan->vc.lock, flags); 244 245 desc = pt_next_dma_desc(chan); 246 if (desc) 247 engine_is_idle = false; 248 249 vchan_issue_pending(&chan->vc); 250 251 desc = pt_next_dma_desc(chan); 252 253 spin_unlock_irqrestore(&chan->vc.lock, flags); 254 255 /* If there was nothing active, start processing */ 256 if (engine_is_idle) 257 pt_cmd_callback(desc, 0); 258 } 259 260 static int pt_pause(struct dma_chan *dma_chan) 261 { 262 struct pt_dma_chan *chan = to_pt_chan(dma_chan); 263 unsigned long flags; 264 265 spin_lock_irqsave(&chan->vc.lock, flags); 266 pt_stop_queue(&chan->pt->cmd_q); 267 spin_unlock_irqrestore(&chan->vc.lock, flags); 268 269 return 0; 270 } 271 272 static int pt_resume(struct dma_chan *dma_chan) 273 { 274 struct pt_dma_chan *chan = to_pt_chan(dma_chan); 275 struct pt_dma_desc *desc = NULL; 276 unsigned long flags; 277 278 spin_lock_irqsave(&chan->vc.lock, flags); 279 pt_start_queue(&chan->pt->cmd_q); 280 desc = pt_next_dma_desc(chan); 281 spin_unlock_irqrestore(&chan->vc.lock, flags); 282 283 /* If there was something active, re-start */ 284 if (desc) 285 pt_cmd_callback(desc, 0); 286 287 return 0; 288 } 289 290 static int pt_terminate_all(struct dma_chan *dma_chan) 291 { 292 struct pt_dma_chan *chan = to_pt_chan(dma_chan); 293 unsigned long flags; 294 LIST_HEAD(head); 295 296 spin_lock_irqsave(&chan->vc.lock, flags); 297 vchan_get_all_descriptors(&chan->vc, &head); 298 spin_unlock_irqrestore(&chan->vc.lock, flags); 299 300 vchan_dma_desc_free_list(&chan->vc, &head); 301 vchan_free_chan_resources(&chan->vc); 302 303 return 0; 304 } 305 306 int pt_dmaengine_register(struct pt_device *pt) 307 { 308 struct pt_dma_chan *chan; 309 struct dma_device *dma_dev = &pt->dma_dev; 310 char *cmd_cache_name; 311 char *desc_cache_name; 312 int ret; 313 314 pt->pt_dma_chan = devm_kzalloc(pt->dev, sizeof(*pt->pt_dma_chan), 315 GFP_KERNEL); 316 if (!pt->pt_dma_chan) 317 return -ENOMEM; 318 319 cmd_cache_name = devm_kasprintf(pt->dev, GFP_KERNEL, 320 "%s-dmaengine-cmd-cache", 321 dev_name(pt->dev)); 322 if (!cmd_cache_name) 323 return -ENOMEM; 324 325 desc_cache_name = devm_kasprintf(pt->dev, GFP_KERNEL, 326 "%s-dmaengine-desc-cache", 327 dev_name(pt->dev)); 328 if (!desc_cache_name) { 329 ret = -ENOMEM; 330 goto err_cache; 331 } 332 333 pt->dma_desc_cache = kmem_cache_create(desc_cache_name, 334 sizeof(struct pt_dma_desc), 0, 335 SLAB_HWCACHE_ALIGN, NULL); 336 if (!pt->dma_desc_cache) { 337 ret = -ENOMEM; 338 goto err_cache; 339 } 340 341 dma_dev->dev = pt->dev; 342 dma_dev->src_addr_widths = DMA_SLAVE_BUSWIDTH_64_BYTES; 343 dma_dev->dst_addr_widths = DMA_SLAVE_BUSWIDTH_64_BYTES; 344 dma_dev->directions = DMA_MEM_TO_MEM; 345 dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; 346 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); 347 dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask); 348 349 /* 350 * PTDMA is intended to be used with the AMD NTB devices, hence 351 * marking it as DMA_PRIVATE. 352 */ 353 dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask); 354 355 INIT_LIST_HEAD(&dma_dev->channels); 356 357 chan = pt->pt_dma_chan; 358 chan->pt = pt; 359 360 /* Set base and prep routines */ 361 dma_dev->device_free_chan_resources = pt_free_chan_resources; 362 dma_dev->device_prep_dma_memcpy = pt_prep_dma_memcpy; 363 dma_dev->device_prep_dma_interrupt = pt_prep_dma_interrupt; 364 dma_dev->device_issue_pending = pt_issue_pending; 365 dma_dev->device_tx_status = dma_cookie_status; 366 dma_dev->device_pause = pt_pause; 367 dma_dev->device_resume = pt_resume; 368 dma_dev->device_terminate_all = pt_terminate_all; 369 dma_dev->device_synchronize = pt_synchronize; 370 371 chan->vc.desc_free = pt_do_cleanup; 372 vchan_init(&chan->vc, dma_dev); 373 374 dma_set_mask_and_coherent(pt->dev, DMA_BIT_MASK(64)); 375 376 ret = dma_async_device_register(dma_dev); 377 if (ret) 378 goto err_reg; 379 380 return 0; 381 382 err_reg: 383 kmem_cache_destroy(pt->dma_desc_cache); 384 385 err_cache: 386 kmem_cache_destroy(pt->dma_cmd_cache); 387 388 return ret; 389 } 390 391 void pt_dmaengine_unregister(struct pt_device *pt) 392 { 393 struct dma_device *dma_dev = &pt->dma_dev; 394 395 dma_async_device_unregister(dma_dev); 396 397 kmem_cache_destroy(pt->dma_desc_cache); 398 kmem_cache_destroy(pt->dma_cmd_cache); 399 } 400