/openbmc/linux/drivers/gpu/drm/vmwgfx/ |
H A D | vmwgfx_page_dirty.c | 84 struct vmw_bo_dirty *dirty = vbo->dirty; in vmw_bo_dirty_scan_pagetable() 122 struct vmw_bo_dirty *dirty = vbo->dirty; in vmw_bo_dirty_scan_mkwrite() 165 struct vmw_bo_dirty *dirty = vbo->dirty; in vmw_bo_dirty_scan() 168 vmw_bo_dirty_scan_pagetable(vbo); in vmw_bo_dirty_scan() 170 vmw_bo_dirty_scan_mkwrite(vbo); in vmw_bo_dirty_scan() 230 int vmw_bo_dirty_add(struct vmw_bo *vbo) in vmw_bo_dirty_add() argument 269 vbo->dirty = dirty; in vmw_bo_dirty_add() 293 vbo->dirty = NULL; in vmw_bo_dirty_release() 402 if (vbo->dirty && vbo->dirty->method == VMW_BO_DIRTY_MKWRITE && in vmw_bo_vm_mkwrite() 433 if (vbo->dirty) { in vmw_bo_vm_fault() [all …]
|
H A D | vmwgfx_bo.h | 130 void vmw_bo_unmap(struct vmw_bo *vbo); 146 int i = ARRAY_SIZE(vbo->res_prios); in vmw_bo_prio_adjust() 149 if (vbo->res_prios[i]) { in vmw_bo_prio_adjust() 150 vbo->tbo.priority = i; in vmw_bo_prio_adjust() 155 vbo->tbo.priority = 3; in vmw_bo_prio_adjust() 169 if (vbo->res_prios[prio]++ == 0) in vmw_bo_prio_add() 170 vmw_bo_prio_adjust(vbo); in vmw_bo_prio_add() 184 if (--vbo->res_prios[prio] == 0) in vmw_bo_prio_del() 185 vmw_bo_prio_adjust(vbo); in vmw_bo_prio_del() 205 drm_gem_object_get(&vbo->tbo.base); in vmw_user_bo_ref() [all …]
|
H A D | vmwgfx_bo.c | 39 vmw_bo_unmap(vbo); in vmw_bo_release() 52 WARN_ON(vbo->dirty); in vmw_bo_free() 54 vmw_bo_release(vbo); in vmw_bo_free() 55 kfree(vbo); in vmw_bo_free() 360 if (vbo->map.bo == NULL) in vmw_bo_unmap() 367 vbo->map.bo = NULL; in vmw_bo_unmap() 537 struct vmw_bo *vbo; in vmw_user_bo_synccpu_ioctl() local 555 vmw_user_bo_unref(&vbo); in vmw_user_bo_synccpu_ioctl() 684 struct vmw_bo *vbo; in vmw_dumb_create() local 706 &vbo); in vmw_dumb_create() [all …]
|
H A D | vmwgfx_validation.c | 175 struct vmw_bo *vbo) in vmw_validation_find_bo_dup() argument 196 if (entry->base.bo == &vbo->tbo) { in vmw_validation_find_bo_dup() 264 struct vmw_bo *vbo) in vmw_validation_add_bo() argument 400 struct vmw_bo *vbo, in vmw_validation_res_switch_backup() argument 411 val->new_guest_memory_bo = vbo; in vmw_validation_res_switch_backup() 443 vmw_bo_placement_set(vbo, in vmw_validation_res_reserve() 525 if (vbo->tbo.pin_count > 0) in vmw_validation_bo_validate_single() 572 ret = vmw_bo_dirty_add(vbo); in vmw_validation_bo_validate() 581 if (vbo->dirty) in vmw_validation_bo_validate() 582 vmw_bo_dirty_scan(vbo); in vmw_validation_bo_validate() [all …]
|
H A D | vmwgfx_gem.c | 54 struct vmw_bo *vbo = to_vmw_bo(obj); in vmw_gem_pin_private() local 61 vmw_bo_pin_reserved(vbo, do_pin); in vmw_gem_pin_private() 159 struct vmw_bo *vbo; in vmw_prime_import_sg_table() local 173 ret = vmw_bo_create(dev_priv, ¶ms, &vbo); in vmw_prime_import_sg_table() 177 vbo->tbo.base.funcs = &vmw_gem_object_funcs; in vmw_prime_import_sg_table() 179 gem = &vbo->tbo.base; in vmw_prime_import_sg_table() 193 struct vmw_bo *vbo; in vmw_gem_object_create_ioctl() local 198 req->size, &handle, &vbo); in vmw_gem_object_create_ioctl() 203 rep->map_handle = drm_vma_node_offset_addr(&vbo->tbo.base.vma_node); in vmw_gem_object_create_ioctl() 207 drm_gem_object_put(&vbo->tbo.base); in vmw_gem_object_create_ioctl()
|
H A D | vmwgfx_resource.c | 754 .bo = &vbo->tbo, in vmw_resource_unbind_list() 969 struct vmw_bo *vbo = NULL; in vmw_resource_pin() local 972 vbo = res->guest_memory_bo; in vmw_resource_pin() 977 if (!vbo->tbo.pin_count) { in vmw_resource_pin() 978 vmw_bo_placement_set(vbo, in vmw_resource_pin() 982 (&vbo->tbo, in vmw_resource_pin() 983 &vbo->placement, in vmw_resource_pin() 986 ttm_bo_unreserve(&vbo->tbo); in vmw_resource_pin() 995 if (vbo) in vmw_resource_pin() 996 ttm_bo_unreserve(&vbo->tbo); in vmw_resource_pin() [all …]
|
H A D | vmwgfx_ttm_buffer.c | 600 struct vmw_bo *vbo; in vmw_bo_create_and_populate() local 610 ret = vmw_bo_create(dev_priv, &bo_params, &vbo); in vmw_bo_create_and_populate() 614 ret = ttm_bo_reserve(&vbo->tbo, false, true, NULL); in vmw_bo_create_and_populate() 616 ret = vmw_ttm_populate(vbo->tbo.bdev, vbo->tbo.ttm, &ctx); in vmw_bo_create_and_populate() 619 container_of(vbo->tbo.ttm, struct vmw_ttm_tt, dma_ttm); in vmw_bo_create_and_populate() 623 ttm_bo_unreserve(&vbo->tbo); in vmw_bo_create_and_populate() 626 *bo_p = vbo; in vmw_bo_create_and_populate()
|
H A D | vmwgfx_validation.h | 162 struct vmw_bo *vbo); 178 struct vmw_bo *vbo,
|
H A D | vmwgfx_drv.c | 393 struct vmw_bo *vbo; in vmw_dummy_query_bo_create() local 410 ret = vmw_bo_create(dev_priv, &bo_params, &vbo); in vmw_dummy_query_bo_create() 414 ret = ttm_bo_reserve(&vbo->tbo, false, true, NULL); in vmw_dummy_query_bo_create() 416 vmw_bo_pin_reserved(vbo, true); in vmw_dummy_query_bo_create() 418 ret = ttm_bo_kmap(&vbo->tbo, 0, 1, &map); in vmw_dummy_query_bo_create() 426 vmw_bo_pin_reserved(vbo, false); in vmw_dummy_query_bo_create() 427 ttm_bo_unreserve(&vbo->tbo); in vmw_dummy_query_bo_create() 431 vmw_bo_unreference(&vbo); in vmw_dummy_query_bo_create() 433 dev_priv->dummy_query_bo = vbo; in vmw_dummy_query_bo_create()
|
H A D | vmwgfx_drv.h | 834 void vmw_resource_unbind_list(struct vmw_bo *vbo); 839 int vmw_resources_clean(struct vmw_bo *vbo, pgoff_t start, 1399 void vmw_bo_dirty_scan(struct vmw_bo *vbo); 1400 int vmw_bo_dirty_add(struct vmw_bo *vbo); 1403 void vmw_bo_dirty_release(struct vmw_bo *vbo); 1404 void vmw_bo_dirty_unmap(struct vmw_bo *vbo,
|
H A D | vmwgfx_kms.c | 225 static void vmw_du_destroy_cursor_mob(struct vmw_bo **vbo) in vmw_du_destroy_cursor_mob() argument 227 if (!(*vbo)) in vmw_du_destroy_cursor_mob() 230 ttm_bo_unpin(&(*vbo)->tbo); in vmw_du_destroy_cursor_mob() 231 vmw_bo_unreference(vbo); in vmw_du_destroy_cursor_mob() 624 struct vmw_bo *vbo = vps->cursor.bo; in vmw_du_cursor_plane_unmap_cm() local 626 if (!vbo || !vbo->map.virtual) in vmw_du_cursor_plane_unmap_cm() 629 ret = ttm_bo_reserve(&vbo->tbo, true, false, NULL); in vmw_du_cursor_plane_unmap_cm() 631 vmw_bo_unmap(vbo); in vmw_du_cursor_plane_unmap_cm() 632 ttm_bo_unreserve(&vbo->tbo); in vmw_du_cursor_plane_unmap_cm()
|
H A D | vmwgfx_execbuf.c | 68 struct vmw_bo *vbo; member 1177 reloc->vbo = vmw_bo; in vmw_translate_mob_ptr() 1234 reloc->vbo = vmw_bo; in vmw_translate_guest_ptr() 1713 struct vmw_bo *vbo; in vmw_cmd_res_switch_backup() local 1721 ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &vbo); in vmw_cmd_res_switch_backup() 1725 vmw_validation_res_switch_backup(sw_context->ctx, info, vbo, in vmw_cmd_res_switch_backup() 3765 bo = &reloc->vbo->tbo; in vmw_apply_relocations()
|
/openbmc/linux/fs/ntfs3/ |
H A D | attrib.c | 1245 u64 vbo; in attr_data_read_resident() local 1281 u64 vbo; in attr_data_write_resident() local 1406 vbo[1] = frame << 2; in attr_wof_frame_info() 1456 if (vbo[1]) { in attr_wof_frame_info() 1458 vbo[0] = vbo[1] - bytes_per_off; in attr_wof_frame_info() 1462 vbo[0] = 0; in attr_wof_frame_info() 1909 if (vbo > data_size) in attr_collapse_range() 2093 valid_size = vbo; in attr_collapse_range() 2152 from = vbo; in attr_punch_hole() 2171 bytes += vbo; in attr_punch_hole() [all …]
|
H A D | dir.c | 353 const struct INDEX_HDR *hdr, u64 vbo, u64 pos, in ntfs_read_hdr() argument 374 if (vbo + off < pos) in ntfs_read_hdr() 380 ctx->pos = vbo + off; in ntfs_read_hdr() 400 u64 vbo; in ntfs_readdir() local 468 vbo = (u64)bit << index_bits; in ntfs_readdir() 469 if (vbo >= i_size) { in ntfs_readdir() 483 vbo = (u64)bit << index_bits; in ntfs_readdir() 484 if (vbo >= i_size) { in ntfs_readdir() 495 vbo + sbi->record_size, pos, name, ctx); in ntfs_readdir()
|
H A D | fsntfs.c | 774 u64 vbo; in ntfs_clear_mft_tail() local 786 vbo = (u64)from * rs; in ntfs_clear_mft_tail() 1214 lbo = vbo + sbi->mft.lbo; in ntfs_read_run_nb() 1341 off = vbo & sbi->cluster_mask; in ntfs_get_bh() 1520 vbo = vbo & ~511ull; in ntfs_bio_pages() 1521 bytes = lbo - vbo; in ntfs_bio_pages() 1523 vcn = vbo >> cluster_bits; in ntfs_bio_pages() 1528 off = vbo & sbi->cluster_mask; in ntfs_bio_pages() 1545 off = vbo & (PAGE_SIZE - 1); in ntfs_bio_pages() 1554 vbo += add; in ntfs_bio_pages() [all …]
|
H A D | file.c | 183 pgoff_t idx = vbo >> PAGE_SHIFT; in ntfs_zero_range() 184 u32 from = vbo & (PAGE_SIZE - 1); in ntfs_zero_range() 436 loff_t end = vbo + len; in ntfs_fallocate() 499 err = attr_punch_hole(ni, vbo, len, &frame_size); in ntfs_fallocate() 510 vbo_a = (vbo + mask) & ~mask; in ntfs_fallocate() 514 if (tmp > vbo) { in ntfs_fallocate() 515 err = ntfs_zero_range(inode, vbo, tmp); in ntfs_fallocate() 520 if (vbo < end_a && end_a < end) { in ntfs_fallocate() 554 err = attr_collapse_range(ni, vbo, len); in ntfs_fallocate() 572 err = attr_insert_range(ni, vbo, len); in ntfs_fallocate() [all …]
|
H A D | frecord.c | 950 u64 vbo; in ni_ins_attr_ext() local 2012 end = vbo + len; in ni_fiemap() 2019 while (vbo < end) { in ni_fiemap() 2086 if (vbo + bytes >= end) in ni_fiemap() 2087 bytes = end - vbo; in ni_fiemap() 2097 if (vbo + dlen >= end) in ni_fiemap() 2110 vbo = valid; in ni_fiemap() 2131 vbo += bytes; in ni_fiemap() 2252 u64 vbo; in ni_decompress_file() local 2283 for (vbo = 0; vbo < i_size; vbo += bytes) { in ni_decompress_file() [all …]
|
H A D | fslog.c | 949 if (off == vbo) { in alloc_rsttbl_from_idx() 971 if (off == vbo) { in alloc_rsttbl_from_idx() 1001 u32 vbo; member 1083 return vbo; in lsn_to_vbo() 1207 u64 vbo; in log_read_rst() local 1212 vbo = 0; in log_read_rst() 1215 vbo = 512; in log_read_rst() 1220 for (; vbo < log->l_size; vbo = 2 * vbo + skip, skip = 0) { in log_read_rst() 1255 info->vbo = vbo; in log_read_rst() 1477 if (end <= vbo) in next_log_lsn() [all …]
|
H A D | index.c | 263 vbo = off & ~(size_t)sbi->block_mask; in bmp_buf_get() 265 bbuf->new_valid = vbo + blocksize; in bmp_buf_get() 271 if (vbo >= valid_size) { in bmp_buf_get() 273 } else if (vbo + blocksize > valid_size) { in bmp_buf_get() 375 size_t vbo = from >> 3; in scan_nres_bitmap() local 390 vcn = vbo >> sbi->cluster_bits; in scan_nres_bitmap() 423 if (vbo >= valid_size) { in scan_nres_bitmap() 431 if (vbo + blocksize > data_size) in scan_nres_bitmap() 432 nbits = 8 * (data_size - vbo); in scan_nres_bitmap() 440 *ret += 8 * vbo; in scan_nres_bitmap() [all …]
|
H A D | bitmap.c | 508 size_t wpos, wbit, iw, vbo; in wnd_rescan() local 517 vbo = 0; in wnd_rescan() 528 vbo * 8 - prev_tail, in wnd_rescan() 543 u32 off = vbo & sbi->cluster_mask; in wnd_rescan() 545 if (!run_lookup_entry(&wnd->run, vbo >> cluster_bits, in wnd_rescan() 569 wbit = vbo * 8; in wnd_rescan() 613 vbo += blocksize; in wnd_rescan() 682 size_t vbo; in wnd_map() local 690 vbo = (u64)iw << sb->s_blocksize_bits; in wnd_map() 1374 u64 vbo, lbo, bytes; in wnd_extend() local [all …]
|
H A D | record.c | 121 u64 vbo = (u64)mi->rno << sbi->record_bits; in mi_read() local 133 err = ntfs_read_bh(sbi, run, vbo, &rec->rhdr, bpr, &mi->nb); in mi_read() 152 vbo >> sbi->cluster_bits); in mi_read() 162 err = ntfs_read_bh(sbi, run, vbo, &rec->rhdr, bpr, &mi->nb); in mi_read() 405 u64 vbo = (u64)rno << sbi->record_bits; in mi_format_new() local 446 err = ntfs_get_bh(sbi, &ni->file.run, vbo, sbi->record_size, in mi_format_new()
|
H A D | ntfs_fs.h | 452 int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes); 453 int attr_insert_range(struct ntfs_inode *ni, u64 vbo, u64 bytes); 454 int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size); 561 __u64 vbo, __u64 len); 617 u64 vbo, const void *buf, size_t bytes, int sync); 619 const struct runs_tree *run, u64 vbo); 621 u64 vbo, void *buf, u32 bytes, struct ntfs_buffers *nb); 622 int ntfs_read_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo, 625 int ntfs_get_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo, 630 struct page **pages, u32 nr_pages, u64 vbo, u32 bytes, [all …]
|
H A D | inode.c | 593 vcn = vbo >> cluster_bits; in ntfs_get_block_vbo() 594 off = vbo & sbi->cluster_mask; in ntfs_get_block_vbo() 629 if (vbo >= valid) in ntfs_get_block_vbo() 636 if (vbo >= valid) in ntfs_get_block_vbo() 639 if (vbo + bytes > valid) { in ntfs_get_block_vbo() 640 ni->i_valid = vbo + bytes; in ntfs_get_block_vbo() 643 } else if (vbo >= valid) { in ntfs_get_block_vbo() 658 u32 voff = valid - vbo; in ntfs_get_block_vbo() 787 loff_t vbo = iocb->ki_pos; in ntfs_direct_IO() local 805 end = vbo + ret; in ntfs_direct_IO() [all …]
|
/openbmc/linux/drivers/gpu/drm/imx/ipuv3/ |
H A D | ipuv3-plane.c | 377 unsigned long eba, ubo, vbo, old_ubo, old_vbo, alpha_eba; in ipu_plane_atomic_check() local 466 vbo = drm_plane_state_to_vbo(new_state); in ipu_plane_atomic_check() 468 if (vbo & 0x7 || vbo > 0xfffff8) in ipu_plane_atomic_check() 473 if (vbo != old_vbo) in ipu_plane_atomic_check() 586 unsigned long eba, ubo, vbo; in ipu_plane_atomic_update() local 710 vbo = drm_plane_state_to_vbo(new_state); in ipu_plane_atomic_update() 714 swap(ubo, vbo); in ipu_plane_atomic_update() 717 fb->pitches[1], ubo, vbo); in ipu_plane_atomic_update() 720 "phy = %lu %lu %lu, x = %d, y = %d", eba, ubo, vbo, in ipu_plane_atomic_update()
|
/openbmc/linux/drivers/gpu/drm/vc4/ |
H A D | vc4_validate.c | 899 struct drm_gem_dma_object *vbo = in validate_gl_shader_rec() local 908 to_vc4_bo(&vbo->base)->write_seqno); in validate_gl_shader_rec() 913 if (vbo->base.size < offset || in validate_gl_shader_rec() 914 vbo->base.size - offset < attr_size) { in validate_gl_shader_rec() 916 offset, attr_size, vbo->base.size); in validate_gl_shader_rec() 921 max_index = ((vbo->base.size - offset - attr_size) / in validate_gl_shader_rec() 931 *(uint32_t *)(pkt_v + o) = vbo->dma_addr + offset; in validate_gl_shader_rec()
|