Lines Matching +full:dclk +full:- +full:div
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
72 INIT_DELAYED_WORK(&rdev->uvd.idle_work, radeon_uvd_idle_work_handler); in radeon_uvd_init()
74 switch (rdev->family) { in radeon_uvd_init()
134 return -EINVAL; in radeon_uvd_init()
137 rdev->uvd.fw_header_present = false; in radeon_uvd_init()
138 rdev->uvd.max_handles = RADEON_DEFAULT_UVD_HANDLES; in radeon_uvd_init()
141 r = request_firmware(&rdev->uvd_fw, fw_name, rdev->dev); in radeon_uvd_init()
143 dev_err(rdev->dev, "radeon_uvd: Can't load firmware \"%s\"\n", in radeon_uvd_init()
146 struct common_firmware_header *hdr = (void *)rdev->uvd_fw->data; in radeon_uvd_init()
149 r = radeon_ucode_validate(rdev->uvd_fw); in radeon_uvd_init()
153 rdev->uvd.fw_header_present = true; in radeon_uvd_init()
155 family_id = (__force u32)(hdr->ucode_version) & 0xff; in radeon_uvd_init()
156 version_major = (le32_to_cpu((__force __le32)(hdr->ucode_version)) in radeon_uvd_init()
158 version_minor = (le32_to_cpu((__force __le32)(hdr->ucode_version)) in radeon_uvd_init()
168 rdev->uvd.max_handles = RADEON_MAX_UVD_HANDLES; in radeon_uvd_init()
178 r = request_firmware(&rdev->uvd_fw, legacy_fw_name, rdev->dev); in radeon_uvd_init()
180 dev_err(rdev->dev, "radeon_uvd: Can't load firmware \"%s\"\n", in radeon_uvd_init()
186 bo_size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 8) + in radeon_uvd_init()
188 RADEON_UVD_SESSION_SIZE * rdev->uvd.max_handles; in radeon_uvd_init()
191 NULL, &rdev->uvd.vcpu_bo); in radeon_uvd_init()
193 dev_err(rdev->dev, "(%d) failed to allocate UVD bo\n", r); in radeon_uvd_init()
197 r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false); in radeon_uvd_init()
199 radeon_bo_unref(&rdev->uvd.vcpu_bo); in radeon_uvd_init()
200 dev_err(rdev->dev, "(%d) failed to reserve UVD bo\n", r); in radeon_uvd_init()
204 r = radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_VRAM, in radeon_uvd_init()
205 &rdev->uvd.gpu_addr); in radeon_uvd_init()
207 radeon_bo_unreserve(rdev->uvd.vcpu_bo); in radeon_uvd_init()
208 radeon_bo_unref(&rdev->uvd.vcpu_bo); in radeon_uvd_init()
209 dev_err(rdev->dev, "(%d) UVD bo pin failed\n", r); in radeon_uvd_init()
213 r = radeon_bo_kmap(rdev->uvd.vcpu_bo, &rdev->uvd.cpu_addr); in radeon_uvd_init()
215 dev_err(rdev->dev, "(%d) UVD map failed\n", r); in radeon_uvd_init()
219 radeon_bo_unreserve(rdev->uvd.vcpu_bo); in radeon_uvd_init()
221 for (i = 0; i < rdev->uvd.max_handles; ++i) { in radeon_uvd_init()
222 atomic_set(&rdev->uvd.handles[i], 0); in radeon_uvd_init()
223 rdev->uvd.filp[i] = NULL; in radeon_uvd_init()
224 rdev->uvd.img_size[i] = 0; in radeon_uvd_init()
234 if (rdev->uvd.vcpu_bo == NULL) in radeon_uvd_fini()
237 r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false); in radeon_uvd_fini()
239 radeon_bo_kunmap(rdev->uvd.vcpu_bo); in radeon_uvd_fini()
240 radeon_bo_unpin(rdev->uvd.vcpu_bo); in radeon_uvd_fini()
241 radeon_bo_unreserve(rdev->uvd.vcpu_bo); in radeon_uvd_fini()
244 radeon_bo_unref(&rdev->uvd.vcpu_bo); in radeon_uvd_fini()
246 radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX]); in radeon_uvd_fini()
248 release_firmware(rdev->uvd_fw); in radeon_uvd_fini()
255 if (rdev->uvd.vcpu_bo == NULL) in radeon_uvd_suspend()
258 for (i = 0; i < rdev->uvd.max_handles; ++i) { in radeon_uvd_suspend()
259 uint32_t handle = atomic_read(&rdev->uvd.handles[i]); in radeon_uvd_suspend()
275 rdev->uvd.filp[i] = NULL; in radeon_uvd_suspend()
276 atomic_set(&rdev->uvd.handles[i], 0); in radeon_uvd_suspend()
288 if (rdev->uvd.vcpu_bo == NULL) in radeon_uvd_resume()
289 return -EINVAL; in radeon_uvd_resume()
291 memcpy_toio((void __iomem *)rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size); in radeon_uvd_resume()
293 size = radeon_bo_size(rdev->uvd.vcpu_bo); in radeon_uvd_resume()
294 size -= rdev->uvd_fw->size; in radeon_uvd_resume()
296 ptr = rdev->uvd.cpu_addr; in radeon_uvd_resume()
297 ptr += rdev->uvd_fw->size; in radeon_uvd_resume()
309 for (i = 0; i < rbo->placement.num_placement; ++i) { in radeon_uvd_force_into_uvd_segment()
310 rbo->placements[i].fpfn = 0 >> PAGE_SHIFT; in radeon_uvd_force_into_uvd_segment()
311 rbo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT; in radeon_uvd_force_into_uvd_segment()
319 if (rbo->placement.num_placement > 1) in radeon_uvd_force_into_uvd_segment()
323 rbo->placements[1] = rbo->placements[0]; in radeon_uvd_force_into_uvd_segment()
324 rbo->placements[1].fpfn += (256 * 1024 * 1024) >> PAGE_SHIFT; in radeon_uvd_force_into_uvd_segment()
325 rbo->placements[1].lpfn += (256 * 1024 * 1024) >> PAGE_SHIFT; in radeon_uvd_force_into_uvd_segment()
326 rbo->placement.num_placement++; in radeon_uvd_force_into_uvd_segment()
327 rbo->placement.num_busy_placement++; in radeon_uvd_force_into_uvd_segment()
333 for (i = 0; i < rdev->uvd.max_handles; ++i) { in radeon_uvd_free_handles()
334 uint32_t handle = atomic_read(&rdev->uvd.handles[i]); in radeon_uvd_free_handles()
335 if (handle != 0 && rdev->uvd.filp[i] == filp) { in radeon_uvd_free_handles()
350 rdev->uvd.filp[i] = NULL; in radeon_uvd_free_handles()
351 atomic_set(&rdev->uvd.handles[i], 0); in radeon_uvd_free_handles()
425 return -EINVAL; in radeon_uvd_cs_msg_decode()
430 return -EINVAL; in radeon_uvd_cs_msg_decode()
436 return -EINVAL; in radeon_uvd_cs_msg_decode()
456 if (p->rdev->family >= CHIP_PALM) in radeon_uvd_validate_codec()
463 return -EINVAL; in radeon_uvd_validate_codec()
477 return -EINVAL; in radeon_uvd_cs_msg()
494 return -EINVAL; in radeon_uvd_cs_msg()
508 for (i = 0; i < p->rdev->uvd.max_handles; ++i) { in radeon_uvd_cs_msg()
509 if (atomic_read(&p->rdev->uvd.handles[i]) == handle) { in radeon_uvd_cs_msg()
511 return -EINVAL; in radeon_uvd_cs_msg()
514 if (!atomic_cmpxchg(&p->rdev->uvd.handles[i], 0, handle)) { in radeon_uvd_cs_msg()
515 p->rdev->uvd.filp[i] = p->filp; in radeon_uvd_cs_msg()
516 p->rdev->uvd.img_size[i] = img_size; in radeon_uvd_cs_msg()
522 return -EINVAL; in radeon_uvd_cs_msg()
534 for (i = 0; i < p->rdev->uvd.max_handles; ++i) { in radeon_uvd_cs_msg()
535 if (atomic_read(&p->rdev->uvd.handles[i]) == handle) { in radeon_uvd_cs_msg()
536 if (p->rdev->uvd.filp[i] != p->filp) { in radeon_uvd_cs_msg()
538 return -EINVAL; in radeon_uvd_cs_msg()
545 return -ENOENT; in radeon_uvd_cs_msg()
549 for (i = 0; i < p->rdev->uvd.max_handles; ++i) in radeon_uvd_cs_msg()
550 atomic_cmpxchg(&p->rdev->uvd.handles[i], handle, 0); in radeon_uvd_cs_msg()
559 return -EINVAL; in radeon_uvd_cs_msg()
572 relocs_chunk = p->chunk_relocs; in radeon_uvd_cs_reloc()
575 if (idx >= relocs_chunk->length_dw) { in radeon_uvd_cs_reloc()
577 idx, relocs_chunk->length_dw); in radeon_uvd_cs_reloc()
578 return -EINVAL; in radeon_uvd_cs_reloc()
581 reloc = &p->relocs[(idx / 4)]; in radeon_uvd_cs_reloc()
582 start = reloc->gpu_offset; in radeon_uvd_cs_reloc()
583 end = start + radeon_bo_size(reloc->robj); in radeon_uvd_cs_reloc()
586 p->ib.ptr[data0] = start & 0xFFFFFFFF; in radeon_uvd_cs_reloc()
587 p->ib.ptr[data1] = start >> 32; in radeon_uvd_cs_reloc()
589 cmd = radeon_get_ib_value(p, p->idx) >> 1; in radeon_uvd_cs_reloc()
594 return -EINVAL; in radeon_uvd_cs_reloc()
596 if ((end - start) < buf_sizes[cmd]) { in radeon_uvd_cs_reloc()
598 (unsigned)(end - start), buf_sizes[cmd]); in radeon_uvd_cs_reloc()
599 return -EINVAL; in radeon_uvd_cs_reloc()
604 return -EINVAL; in radeon_uvd_cs_reloc()
607 if ((start >> 28) != ((end - 1) >> 28)) { in radeon_uvd_cs_reloc()
608 DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n", in radeon_uvd_cs_reloc()
610 return -EINVAL; in radeon_uvd_cs_reloc()
615 (start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) { in radeon_uvd_cs_reloc()
616 DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n", in radeon_uvd_cs_reloc()
618 return -EINVAL; in radeon_uvd_cs_reloc()
623 DRM_ERROR("More than one message in a UVD-IB!\n"); in radeon_uvd_cs_reloc()
624 return -EINVAL; in radeon_uvd_cs_reloc()
627 r = radeon_uvd_cs_msg(p, reloc->robj, offset, buf_sizes); in radeon_uvd_cs_reloc()
632 return -EINVAL; in radeon_uvd_cs_reloc()
646 p->idx++; in radeon_uvd_cs_reg()
647 for (i = 0; i <= pkt->count; ++i) { in radeon_uvd_cs_reg()
648 switch (pkt->reg + i*4) { in radeon_uvd_cs_reg()
650 *data0 = p->idx; in radeon_uvd_cs_reg()
653 *data1 = p->idx; in radeon_uvd_cs_reg()
666 pkt->reg + i*4); in radeon_uvd_cs_reg()
667 return -EINVAL; in radeon_uvd_cs_reg()
669 p->idx++; in radeon_uvd_cs_reg()
690 if (p->chunk_ib->length_dw % 16) { in radeon_uvd_cs_parse()
692 p->chunk_ib->length_dw); in radeon_uvd_cs_parse()
693 return -EINVAL; in radeon_uvd_cs_parse()
696 if (p->chunk_relocs == NULL) { in radeon_uvd_cs_parse()
698 return -EINVAL; in radeon_uvd_cs_parse()
703 r = radeon_cs_packet_parse(p, &pkt, p->idx); in radeon_uvd_cs_parse()
714 p->idx += pkt.count + 2; in radeon_uvd_cs_parse()
718 return -EINVAL; in radeon_uvd_cs_parse()
720 } while (p->idx < p->chunk_ib->length_dw); in radeon_uvd_cs_parse()
723 DRM_ERROR("UVD-IBs need a msg command!\n"); in radeon_uvd_cs_parse()
724 return -EINVAL; in radeon_uvd_cs_parse()
771 uint64_t offs = radeon_bo_size(rdev->uvd.vcpu_bo) - in radeon_uvd_get_create_msg()
774 uint32_t __iomem *msg = (void __iomem *)(rdev->uvd.cpu_addr + offs); in radeon_uvd_get_create_msg()
775 uint64_t addr = rdev->uvd.gpu_addr + offs; in radeon_uvd_get_create_msg()
779 r = radeon_bo_reserve(rdev->uvd.vcpu_bo, true); in radeon_uvd_get_create_msg()
799 radeon_bo_unreserve(rdev->uvd.vcpu_bo); in radeon_uvd_get_create_msg()
807 uint64_t offs = radeon_bo_size(rdev->uvd.vcpu_bo) - in radeon_uvd_get_destroy_msg()
810 uint32_t __iomem *msg = (void __iomem *)(rdev->uvd.cpu_addr + offs); in radeon_uvd_get_destroy_msg()
811 uint64_t addr = rdev->uvd.gpu_addr + offs; in radeon_uvd_get_destroy_msg()
815 r = radeon_bo_reserve(rdev->uvd.vcpu_bo, true); in radeon_uvd_get_destroy_msg()
828 radeon_bo_unreserve(rdev->uvd.vcpu_bo); in radeon_uvd_get_destroy_msg()
833 * radeon_uvd_count_handles - count number of open streams
849 for (i = 0; i < rdev->uvd.max_handles; ++i) { in radeon_uvd_count_handles()
850 if (!atomic_read(&rdev->uvd.handles[i])) in radeon_uvd_count_handles()
853 if (rdev->uvd.img_size[i] >= 720*576) in radeon_uvd_count_handles()
866 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { in radeon_uvd_idle_work_handler()
867 radeon_uvd_count_handles(rdev, &rdev->pm.dpm.sd, in radeon_uvd_idle_work_handler()
868 &rdev->pm.dpm.hd); in radeon_uvd_idle_work_handler()
874 schedule_delayed_work(&rdev->uvd.idle_work, in radeon_uvd_idle_work_handler()
882 bool set_clocks = !cancel_delayed_work_sync(&rdev->uvd.idle_work); in radeon_uvd_note_usage()
883 set_clocks &= schedule_delayed_work(&rdev->uvd.idle_work, in radeon_uvd_note_usage()
886 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { in radeon_uvd_note_usage()
889 if ((rdev->pm.dpm.sd != sd) || in radeon_uvd_note_usage()
890 (rdev->pm.dpm.hd != hd)) { in radeon_uvd_note_usage()
891 rdev->pm.dpm.sd = sd; in radeon_uvd_note_usage()
892 rdev->pm.dpm.hd = hd; in radeon_uvd_note_usage()
899 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { in radeon_uvd_note_usage()
930 * radeon_uvd_calc_upll_dividers - calc UPLL clock dividers
934 * @dclk: wanted DCLK
944 * @optimal_dclk_div: resulting dclk post divider
946 * Calculate dividers for UVDs UPLL (R6xx-SI, except APUs).
947 * Returns zero on success -EINVAL on error.
950 unsigned vclk, unsigned dclk, in radeon_uvd_calc_upll_dividers() argument
959 unsigned vco_freq, ref_freq = rdev->clock.spll.reference_freq; in radeon_uvd_calc_upll_dividers()
965 vco_min = max(max(vco_min, vclk), dclk); in radeon_uvd_calc_upll_dividers()
973 /* fb div out of range ? */ in radeon_uvd_calc_upll_dividers()
985 /* calc dclk divider with current vco freq */ in radeon_uvd_calc_upll_dividers()
986 dclk_div = radeon_uvd_calc_upll_post_div(vco_freq, dclk, in radeon_uvd_calc_upll_dividers()
992 score = vclk - (vco_freq / vclk_div) + dclk - (vco_freq / dclk_div); in radeon_uvd_calc_upll_dividers()
1007 return -EINVAL; in radeon_uvd_calc_upll_dividers()
1038 return -ETIMEDOUT; in radeon_uvd_send_upll_ctlreq()