job.c (e5855aa3e681bc417165604212c061e1c4b7cbda) | job.c (47f89c10ddc439638bc0ea51a7f9872e1b7734ce) |
---|---|
1/* 2 * Tegra host1x Job 3 * 4 * Copyright (c) 2010-2015, NVIDIA Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. --- 123 unchanged lines hidden (view full) --- 132} 133 134/* 135 * Check driver supplied waitchk structs for syncpt thresholds 136 * that have already been satisfied and NULL the comparison (to 137 * avoid a wrap condition in the HW). 138 */ 139static int do_waitchks(struct host1x_job *job, struct host1x *host, | 1/* 2 * Tegra host1x Job 3 * 4 * Copyright (c) 2010-2015, NVIDIA Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. --- 123 unchanged lines hidden (view full) --- 132} 133 134/* 135 * Check driver supplied waitchk structs for syncpt thresholds 136 * that have already been satisfied and NULL the comparison (to 137 * avoid a wrap condition in the HW). 138 */ 139static int do_waitchks(struct host1x_job *job, struct host1x *host, |
140 struct host1x_bo *patch) | 140 struct host1x_job_gather *g) |
141{ | 141{ |
142 struct host1x_bo *patch = g->bo; |
|
142 int i; 143 144 /* compare syncpt vs wait threshold */ 145 for (i = 0; i < job->num_waitchk; i++) { 146 struct host1x_waitchk *wait = &job->waitchk[i]; 147 struct host1x_syncpt *sp = 148 host1x_syncpt_get(host, wait->syncpt_id); 149 --- 10 unchanged lines hidden (view full) --- 160 host1x_syncpt_read_min(sp)); 161 162 if (host1x_syncpt_is_expired(sp, wait->thresh)) { 163 dev_dbg(host->dev, 164 "drop WAIT id %u (%s) thresh 0x%x, min 0x%x\n", 165 wait->syncpt_id, sp->name, wait->thresh, 166 host1x_syncpt_read_min(sp)); 167 | 143 int i; 144 145 /* compare syncpt vs wait threshold */ 146 for (i = 0; i < job->num_waitchk; i++) { 147 struct host1x_waitchk *wait = &job->waitchk[i]; 148 struct host1x_syncpt *sp = 149 host1x_syncpt_get(host, wait->syncpt_id); 150 --- 10 unchanged lines hidden (view full) --- 161 host1x_syncpt_read_min(sp)); 162 163 if (host1x_syncpt_is_expired(sp, wait->thresh)) { 164 dev_dbg(host->dev, 165 "drop WAIT id %u (%s) thresh 0x%x, min 0x%x\n", 166 wait->syncpt_id, sp->name, wait->thresh, 167 host1x_syncpt_read_min(sp)); 168 |
168 host1x_syncpt_patch_offset(sp, patch, wait->offset); | 169 host1x_syncpt_patch_offset(sp, patch, 170 g->offset + wait->offset); |
169 } 170 171 wait->bo = NULL; 172 } 173 174 return 0; 175} 176 --- 87 unchanged lines hidden (view full) --- 264 265 return 0; 266 267unpin: 268 host1x_job_unpin(job); 269 return err; 270} 271 | 171 } 172 173 wait->bo = NULL; 174 } 175 176 return 0; 177} 178 --- 87 unchanged lines hidden (view full) --- 266 267 return 0; 268 269unpin: 270 host1x_job_unpin(job); 271 return err; 272} 273 |
272static int do_relocs(struct host1x_job *job, struct host1x_bo *cmdbuf) | 274static int do_relocs(struct host1x_job *job, struct host1x_job_gather *g) |
273{ 274 int i = 0; 275 u32 last_page = ~0; 276 void *cmdbuf_page_addr = NULL; | 275{ 276 int i = 0; 277 u32 last_page = ~0; 278 void *cmdbuf_page_addr = NULL; |
279 struct host1x_bo *cmdbuf = g->bo; |
|
277 278 /* pin & patch the relocs for one gather */ 279 for (i = 0; i < job->num_relocs; i++) { 280 struct host1x_reloc *reloc = &job->relocarray[i]; 281 u32 reloc_addr = (job->reloc_addr_phys[i] + 282 reloc->target.offset) >> reloc->shift; 283 u32 *target; 284 285 /* skip all other gathers */ 286 if (cmdbuf != reloc->cmdbuf.bo) 287 continue; 288 | 280 281 /* pin & patch the relocs for one gather */ 282 for (i = 0; i < job->num_relocs; i++) { 283 struct host1x_reloc *reloc = &job->relocarray[i]; 284 u32 reloc_addr = (job->reloc_addr_phys[i] + 285 reloc->target.offset) >> reloc->shift; 286 u32 *target; 287 288 /* skip all other gathers */ 289 if (cmdbuf != reloc->cmdbuf.bo) 290 continue; 291 |
292 if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL)) { 293 target = (u32 *)job->gather_copy_mapped + 294 reloc->cmdbuf.offset / sizeof(u32) + 295 g->offset / sizeof(u32); 296 goto patch_reloc; 297 } 298 |
|
289 if (last_page != reloc->cmdbuf.offset >> PAGE_SHIFT) { 290 if (cmdbuf_page_addr) 291 host1x_bo_kunmap(cmdbuf, last_page, 292 cmdbuf_page_addr); 293 294 cmdbuf_page_addr = host1x_bo_kmap(cmdbuf, 295 reloc->cmdbuf.offset >> PAGE_SHIFT); 296 last_page = reloc->cmdbuf.offset >> PAGE_SHIFT; 297 298 if (unlikely(!cmdbuf_page_addr)) { 299 pr_err("Could not map cmdbuf for relocation\n"); 300 return -ENOMEM; 301 } 302 } 303 304 target = cmdbuf_page_addr + (reloc->cmdbuf.offset & ~PAGE_MASK); | 299 if (last_page != reloc->cmdbuf.offset >> PAGE_SHIFT) { 300 if (cmdbuf_page_addr) 301 host1x_bo_kunmap(cmdbuf, last_page, 302 cmdbuf_page_addr); 303 304 cmdbuf_page_addr = host1x_bo_kmap(cmdbuf, 305 reloc->cmdbuf.offset >> PAGE_SHIFT); 306 last_page = reloc->cmdbuf.offset >> PAGE_SHIFT; 307 308 if (unlikely(!cmdbuf_page_addr)) { 309 pr_err("Could not map cmdbuf for relocation\n"); 310 return -ENOMEM; 311 } 312 } 313 314 target = cmdbuf_page_addr + (reloc->cmdbuf.offset & ~PAGE_MASK); |
315patch_reloc: |
|
305 *target = reloc_addr; 306 } 307 308 if (cmdbuf_page_addr) 309 host1x_bo_kunmap(cmdbuf, last_page, cmdbuf_page_addr); 310 311 return 0; 312} --- 255 unchanged lines hidden (view full) --- 568 for_each_set_bit(i, waitchk_mask, host1x_syncpt_nb_pts(host)) 569 host1x_syncpt_load(host->syncpt + i); 570 571 /* pin memory */ 572 err = pin_job(host, job); 573 if (err) 574 goto out; 575 | 316 *target = reloc_addr; 317 } 318 319 if (cmdbuf_page_addr) 320 host1x_bo_kunmap(cmdbuf, last_page, cmdbuf_page_addr); 321 322 return 0; 323} --- 255 unchanged lines hidden (view full) --- 579 for_each_set_bit(i, waitchk_mask, host1x_syncpt_nb_pts(host)) 580 host1x_syncpt_load(host->syncpt + i); 581 582 /* pin memory */ 583 err = pin_job(host, job); 584 if (err) 585 goto out; 586 |
587 if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL)) { 588 err = copy_gathers(job, dev); 589 if (err) 590 goto out; 591 } 592 |
|
576 /* patch gathers */ 577 for (i = 0; i < job->num_gathers; i++) { 578 struct host1x_job_gather *g = &job->gathers[i]; 579 580 /* process each gather mem only once */ 581 if (g->handled) 582 continue; 583 | 593 /* patch gathers */ 594 for (i = 0; i < job->num_gathers; i++) { 595 struct host1x_job_gather *g = &job->gathers[i]; 596 597 /* process each gather mem only once */ 598 if (g->handled) 599 continue; 600 |
584 g->base = job->gather_addr_phys[i]; | 601 /* copy_gathers() sets gathers base if firewall is enabled */ 602 if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL)) 603 g->base = job->gather_addr_phys[i]; |
585 586 for (j = i + 1; j < job->num_gathers; j++) { 587 if (job->gathers[j].bo == g->bo) { 588 job->gathers[j].handled = true; 589 job->gathers[j].base = g->base; 590 } 591 } 592 | 604 605 for (j = i + 1; j < job->num_gathers; j++) { 606 if (job->gathers[j].bo == g->bo) { 607 job->gathers[j].handled = true; 608 job->gathers[j].base = g->base; 609 } 610 } 611 |
593 err = do_relocs(job, g->bo); | 612 err = do_relocs(job, g); |
594 if (err) | 613 if (err) |
595 goto out; | 614 break; |
596 | 615 |
597 err = do_waitchks(job, host, g->bo); | 616 err = do_waitchks(job, host, g); |
598 if (err) | 617 if (err) |
599 goto out; | 618 break; |
600 } 601 | 619 } 620 |
602 if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL)) 603 goto out; 604 605 err = copy_gathers(job, dev); | |
606out: 607 if (err) 608 host1x_job_unpin(job); 609 wmb(); 610 611 return err; 612} 613EXPORT_SYMBOL(host1x_job_pin); --- 40 unchanged lines hidden --- | 621out: 622 if (err) 623 host1x_job_unpin(job); 624 wmb(); 625 626 return err; 627} 628EXPORT_SYMBOL(host1x_job_pin); --- 40 unchanged lines hidden --- |