Home
last modified time | relevance | path

Searched refs:vbo (Results 1 – 25 of 25) sorted by relevance

/openbmc/linux/drivers/gpu/drm/vmwgfx/
H A Dvmwgfx_page_dirty.c82 static void vmw_bo_dirty_scan_pagetable(struct vmw_bo *vbo) in vmw_bo_dirty_scan_pagetable() argument
84 struct vmw_bo_dirty *dirty = vbo->dirty; in vmw_bo_dirty_scan_pagetable()
85 pgoff_t offset = drm_vma_node_start(&vbo->tbo.base.vma_node); in vmw_bo_dirty_scan_pagetable()
86 struct address_space *mapping = vbo->tbo.bdev->dev_mapping; in vmw_bo_dirty_scan_pagetable()
120 static void vmw_bo_dirty_scan_mkwrite(struct vmw_bo *vbo) in vmw_bo_dirty_scan_mkwrite() argument
122 struct vmw_bo_dirty *dirty = vbo->dirty; in vmw_bo_dirty_scan_mkwrite()
123 unsigned long offset = drm_vma_node_start(&vbo->tbo.base.vma_node); in vmw_bo_dirty_scan_mkwrite()
124 struct address_space *mapping = vbo->tbo.bdev->dev_mapping; in vmw_bo_dirty_scan_mkwrite()
130 num_marked = wp_shared_mapping_range(vbo->tbo.bdev->dev_mapping, in vmw_bo_dirty_scan_mkwrite()
163 void vmw_bo_dirty_scan(struct vmw_bo *vbo) in vmw_bo_dirty_scan() argument
[all …]
H A Dvmwgfx_bo.h130 void *vmw_bo_map_and_cache(struct vmw_bo *vbo);
131 void vmw_bo_unmap(struct vmw_bo *vbo);
145 static inline void vmw_bo_prio_adjust(struct vmw_bo *vbo) in vmw_bo_prio_adjust() argument
147 int i = ARRAY_SIZE(vbo->res_prios); in vmw_bo_prio_adjust()
150 if (vbo->res_prios[i]) { in vmw_bo_prio_adjust()
151 vbo->tbo.priority = i; in vmw_bo_prio_adjust()
156 vbo->tbo.priority = 3; in vmw_bo_prio_adjust()
168 static inline void vmw_bo_prio_add(struct vmw_bo *vbo, int prio) in vmw_bo_prio_add() argument
170 if (vbo->res_prios[prio]++ == 0) in vmw_bo_prio_add()
171 vmw_bo_prio_adjust(vbo); in vmw_bo_prio_add()
[all …]
H A Dvmwgfx_bo.c35 static void vmw_bo_release(struct vmw_bo *vbo) in vmw_bo_release() argument
37 WARN_ON(vbo->tbo.base.funcs && in vmw_bo_release()
38 kref_read(&vbo->tbo.base.refcount) != 0); in vmw_bo_release()
39 vmw_bo_unmap(vbo); in vmw_bo_release()
40 drm_gem_object_release(&vbo->tbo.base); in vmw_bo_release()
50 struct vmw_bo *vbo = to_vmw_bo(&bo->base); in vmw_bo_free() local
52 WARN_ON(vbo->dirty); in vmw_bo_free()
53 WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree)); in vmw_bo_free()
54 vmw_bo_release(vbo); in vmw_bo_free()
55 kfree(vbo); in vmw_bo_free()
[all …]
H A Dvmwgfx_validation.c175 struct vmw_bo *vbo) in vmw_validation_find_bo_dup() argument
184 unsigned long key = (unsigned long) vbo; in vmw_validation_find_bo_dup()
196 if (entry->base.bo == &vbo->tbo) { in vmw_validation_find_bo_dup()
264 struct vmw_bo *vbo) in vmw_validation_add_bo() argument
268 bo_node = vmw_validation_find_bo_dup(ctx, vbo); in vmw_validation_add_bo()
277 bo_node->hash.key = (unsigned long) vbo; in vmw_validation_add_bo()
282 val_buf->bo = ttm_bo_get_unless_zero(&vbo->tbo); in vmw_validation_add_bo()
400 struct vmw_bo *vbo, in vmw_validation_res_switch_backup() argument
411 val->new_guest_memory_bo = vbo; in vmw_validation_res_switch_backup()
441 struct vmw_bo *vbo = res->guest_memory_bo; in vmw_validation_res_reserve() local
[all …]
H A Dvmwgfx_gem.c54 struct vmw_bo *vbo = to_vmw_bo(obj); in vmw_gem_pin_private() local
61 vmw_bo_pin_reserved(vbo, do_pin); in vmw_gem_pin_private()
159 struct vmw_bo *vbo; in vmw_prime_import_sg_table() local
174 ret = vmw_bo_create(dev_priv, &params, &vbo); in vmw_prime_import_sg_table()
178 vbo->tbo.base.funcs = &vmw_gem_object_funcs; in vmw_prime_import_sg_table()
180 gem = &vbo->tbo.base; in vmw_prime_import_sg_table()
194 struct vmw_bo *vbo; in vmw_gem_object_create_ioctl() local
199 req->size, &handle, &vbo); in vmw_gem_object_create_ioctl()
204 rep->map_handle = drm_vma_node_offset_addr(&vbo->tbo.base.vma_node); in vmw_gem_object_create_ioctl()
208 drm_gem_object_put(&vbo->tbo.base); in vmw_gem_object_create_ioctl()
H A Dvmwgfx_resource.c751 void vmw_resource_unbind_list(struct vmw_bo *vbo) in vmw_resource_unbind_list() argument
754 .bo = &vbo->tbo, in vmw_resource_unbind_list()
758 dma_resv_assert_held(vbo->tbo.base.resv); in vmw_resource_unbind_list()
759 while (!RB_EMPTY_ROOT(&vbo->res_tree)) { in vmw_resource_unbind_list()
760 struct rb_node *node = vbo->res_tree.rb_node; in vmw_resource_unbind_list()
772 (void) ttm_bo_wait(&vbo->tbo, false, false); in vmw_resource_unbind_list()
969 struct vmw_bo *vbo = NULL; in vmw_resource_pin() local
972 vbo = res->guest_memory_bo; in vmw_resource_pin()
974 ret = ttm_bo_reserve(&vbo->tbo, interruptible, false, NULL); in vmw_resource_pin()
977 if (!vbo->tbo.pin_count) { in vmw_resource_pin()
[all …]
H A Dvmwgfx_ttm_buffer.c600 struct vmw_bo *vbo; in vmw_bo_create_and_populate() local
611 ret = vmw_bo_create(dev_priv, &bo_params, &vbo); in vmw_bo_create_and_populate()
615 ret = vmw_ttm_populate(vbo->tbo.bdev, vbo->tbo.ttm, &ctx); in vmw_bo_create_and_populate()
618 container_of(vbo->tbo.ttm, struct vmw_ttm_tt, dma_ttm); in vmw_bo_create_and_populate()
622 ttm_bo_unreserve(&vbo->tbo); in vmw_bo_create_and_populate()
625 *bo_p = vbo; in vmw_bo_create_and_populate()
H A Dvmwgfx_validation.h162 struct vmw_bo *vbo);
178 struct vmw_bo *vbo,
H A Dvmwgfx_drv.h834 void vmw_resource_unbind_list(struct vmw_bo *vbo);
839 int vmw_resources_clean(struct vmw_bo *vbo, pgoff_t start,
1399 void vmw_bo_dirty_scan(struct vmw_bo *vbo);
1400 int vmw_bo_dirty_add(struct vmw_bo *vbo);
1403 void vmw_bo_dirty_release(struct vmw_bo *vbo);
1404 void vmw_bo_dirty_unmap(struct vmw_bo *vbo,
H A Dvmwgfx_drv.c393 struct vmw_bo *vbo; in vmw_dummy_query_bo_create() local
411 ret = vmw_bo_create(dev_priv, &bo_params, &vbo); in vmw_dummy_query_bo_create()
415 ret = ttm_bo_kmap(&vbo->tbo, 0, 1, &map); in vmw_dummy_query_bo_create()
423 vmw_bo_pin_reserved(vbo, false); in vmw_dummy_query_bo_create()
424 ttm_bo_unreserve(&vbo->tbo); in vmw_dummy_query_bo_create()
428 vmw_bo_unreference(&vbo); in vmw_dummy_query_bo_create()
430 dev_priv->dummy_query_bo = vbo; in vmw_dummy_query_bo_create()
H A Dvmwgfx_kms.c225 static void vmw_du_destroy_cursor_mob(struct vmw_bo **vbo) in vmw_du_destroy_cursor_mob() argument
227 if (!(*vbo)) in vmw_du_destroy_cursor_mob()
230 ttm_bo_unpin(&(*vbo)->tbo); in vmw_du_destroy_cursor_mob()
231 vmw_bo_unreference(vbo); in vmw_du_destroy_cursor_mob()
624 struct vmw_bo *vbo = vps->cursor.bo; in vmw_du_cursor_plane_unmap_cm() local
626 if (!vbo || !vbo->map.virtual) in vmw_du_cursor_plane_unmap_cm()
629 ret = ttm_bo_reserve(&vbo->tbo, true, false, NULL); in vmw_du_cursor_plane_unmap_cm()
631 vmw_bo_unmap(vbo); in vmw_du_cursor_plane_unmap_cm()
632 ttm_bo_unreserve(&vbo->tbo); in vmw_du_cursor_plane_unmap_cm()
H A Dvmwgfx_execbuf.c68 struct vmw_bo *vbo; member
1177 reloc->vbo = vmw_bo; in vmw_translate_mob_ptr()
1234 reloc->vbo = vmw_bo; in vmw_translate_guest_ptr()
1713 struct vmw_bo *vbo; in vmw_cmd_res_switch_backup() local
1721 ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &vbo); in vmw_cmd_res_switch_backup()
1725 vmw_validation_res_switch_backup(sw_context->ctx, info, vbo, in vmw_cmd_res_switch_backup()
3765 bo = &reloc->vbo->tbo; in vmw_apply_relocations()
/openbmc/linux/fs/ntfs3/
H A Dattrib.c1247 u64 vbo; in attr_data_read_resident() local
1258 vbo = page->index << PAGE_SHIFT; in attr_data_read_resident()
1260 if (vbo < data_size) { in attr_data_read_resident()
1263 u32 use = data_size - vbo; in attr_data_read_resident()
1268 memcpy(kaddr, data + vbo, use); in attr_data_read_resident()
1283 u64 vbo; in attr_data_write_resident() local
1297 vbo = page->index << PAGE_SHIFT; in attr_data_write_resident()
1299 if (vbo < data_size) { in attr_data_write_resident()
1302 u32 use = data_size - vbo; in attr_data_write_resident()
1306 memcpy(data + vbo, kaddr, use); in attr_data_write_resident()
[all …]
H A Ddir.c353 const struct INDEX_HDR *hdr, u64 vbo, u64 pos, in ntfs_read_hdr() argument
374 if (vbo + off < pos) in ntfs_read_hdr()
380 ctx->pos = vbo + off; in ntfs_read_hdr()
400 u64 vbo; in ntfs_readdir() local
468 vbo = (u64)bit << index_bits; in ntfs_readdir()
469 if (vbo >= i_size) { in ntfs_readdir()
483 vbo = (u64)bit << index_bits; in ntfs_readdir()
484 if (vbo >= i_size) { in ntfs_readdir()
495 vbo + sbi->record_size, pos, name, ctx); in ntfs_readdir()
H A Dfsntfs.c774 u64 vbo; in ntfs_clear_mft_tail() local
786 vbo = (u64)from * rs; in ntfs_clear_mft_tail()
787 for (; from < to; from++, vbo += rs) { in ntfs_clear_mft_tail()
790 err = ntfs_get_bh(sbi, run, vbo, rs, &nb); in ntfs_clear_mft_tail()
1128 u64 vbo, const void *buf, size_t bytes, int sync) in ntfs_sb_write_run() argument
1132 u32 off = vbo & sbi->cluster_mask; in ntfs_sb_write_run()
1133 CLST lcn, clen, vcn = vbo >> cluster_bits, vcn_next; in ntfs_sb_write_run()
1176 const struct runs_tree *run, u64 vbo) in ntfs_bread_run() argument
1183 if (!run_lookup_entry(run, vbo >> cluster_bits, &lcn, NULL, NULL)) in ntfs_bread_run()
1186 lbo = ((u64)lcn << cluster_bits) + (vbo & sbi->cluster_mask); in ntfs_bread_run()
[all …]
H A Dfile.c178 static int ntfs_zero_range(struct inode *inode, u64 vbo, u64 vbo_to) in ntfs_zero_range() argument
183 pgoff_t idx = vbo >> PAGE_SHIFT; in ntfs_zero_range()
184 u32 from = vbo & (PAGE_SIZE - 1); in ntfs_zero_range()
429 static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len) in ntfs_fallocate() argument
436 loff_t end = vbo + len; in ntfs_fallocate()
437 loff_t vbo_down = round_down(vbo, max_t(unsigned long, in ntfs_fallocate()
499 err = attr_punch_hole(ni, vbo, len, &frame_size); in ntfs_fallocate()
510 vbo_a = (vbo + mask) & ~mask; in ntfs_fallocate()
514 if (tmp > vbo) { in ntfs_fallocate()
515 err = ntfs_zero_range(inode, vbo, tmp); in ntfs_fallocate()
[all …]
H A Dfrecord.c950 u64 vbo; in ni_ins_attr_ext() local
981 vbo = is_mft_data ? ((u64)svcn << sbi->cluster_bits) : 0; in ni_ins_attr_ext()
997 vbo <= ((u64)mi->rno << sbi->record_bits))) { in ni_ins_attr_ext()
1044 if (is_mft_data && vbo <= ((u64)rno << sbi->record_bits)) { in ni_ins_attr_ext()
1910 __u64 vbo, __u64 len) in ni_fiemap() argument
1917 CLST vcn = vbo >> cluster_bits; in ni_fiemap()
1956 end = vbo + len; in ni_fiemap()
1962 while (vbo < end) { in ni_fiemap()
1997 vbo = (u64)vcn << cluster_bits; in ni_fiemap()
2018 vbo = (u64)vcn << cluster_bits; in ni_fiemap()
[all …]
H A Dfslog.c911 static inline void *alloc_rsttbl_from_idx(struct RESTART_TABLE **tbl, u32 vbo) in alloc_rsttbl_from_idx() argument
920 if (vbo >= bytes) { in alloc_rsttbl_from_idx()
925 u32 bytes2idx = vbo - bytes; in alloc_rsttbl_from_idx()
937 e = Add2Ptr(rt, vbo); in alloc_rsttbl_from_idx()
949 if (off == vbo) { in alloc_rsttbl_from_idx()
971 if (off == vbo) { in alloc_rsttbl_from_idx()
1001 u32 vbo; member
1081 u32 vbo = (lsn << log->seq_num_bits) >> (log->seq_num_bits - 3); in lsn_to_vbo() local
1083 return vbo; in lsn_to_vbo()
1142 static int read_log_page(struct ntfs_log *log, u32 vbo, in read_log_page() argument
[all …]
H A Dindex.c208 size_t data_size, valid_size, vbo, off = bit >> 3; in bmp_buf_get() local
263 vbo = off & ~(size_t)sbi->block_mask; in bmp_buf_get()
265 bbuf->new_valid = vbo + blocksize; in bmp_buf_get()
271 if (vbo >= valid_size) { in bmp_buf_get()
273 } else if (vbo + blocksize > valid_size) { in bmp_buf_get()
375 size_t vbo = from >> 3; in scan_nres_bitmap() local
376 sector_t blk = (vbo & sbi->cluster_mask) >> sb->s_blocksize_bits; in scan_nres_bitmap()
377 sector_t vblock = vbo >> sb->s_blocksize_bits; in scan_nres_bitmap()
390 vcn = vbo >> sbi->cluster_bits; in scan_nres_bitmap()
422 vbo = (u64)vblock << sb->s_blocksize_bits; in scan_nres_bitmap()
[all …]
H A Dbitmap.c508 size_t wpos, wbit, iw, vbo; in wnd_rescan() local
517 vbo = 0; in wnd_rescan()
528 vbo * 8 - prev_tail, in wnd_rescan()
543 u32 off = vbo & sbi->cluster_mask; in wnd_rescan()
545 if (!run_lookup_entry(&wnd->run, vbo >> cluster_bits, in wnd_rescan()
569 wbit = vbo * 8; in wnd_rescan()
613 vbo += blocksize; in wnd_rescan()
682 size_t vbo; in wnd_map() local
690 vbo = (u64)iw << sb->s_blocksize_bits; in wnd_map()
692 if (!run_lookup_entry(&wnd->run, vbo >> sbi->cluster_bits, &lcn, &clen, in wnd_map()
[all …]
H A Drecord.c121 u64 vbo = (u64)mi->rno << sbi->record_bits; in mi_read() local
133 err = ntfs_read_bh(sbi, run, vbo, &rec->rhdr, bpr, &mi->nb); in mi_read()
152 vbo >> sbi->cluster_bits); in mi_read()
162 err = ntfs_read_bh(sbi, run, vbo, &rec->rhdr, bpr, &mi->nb); in mi_read()
405 u64 vbo = (u64)rno << sbi->record_bits; in mi_format_new() local
446 err = ntfs_get_bh(sbi, &ni->file.run, vbo, sbi->record_size, in mi_format_new()
H A Dntfs_fs.h453 int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes);
454 int attr_insert_range(struct ntfs_inode *ni, u64 vbo, u64 bytes);
455 int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size);
562 __u64 vbo, __u64 len);
618 u64 vbo, const void *buf, size_t bytes, int sync);
620 const struct runs_tree *run, u64 vbo);
622 u64 vbo, void *buf, u32 bytes, struct ntfs_buffers *nb);
623 int ntfs_read_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
626 int ntfs_get_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
631 struct page **pages, u32 nr_pages, u64 vbo, u32 bytes,
[all …]
H A Dinode.c557 static noinline int ntfs_get_block_vbo(struct inode *inode, u64 vbo, in ntfs_get_block_vbo() argument
593 vcn = vbo >> cluster_bits; in ntfs_get_block_vbo()
594 off = vbo & sbi->cluster_mask; in ntfs_get_block_vbo()
630 if (vbo >= valid) in ntfs_get_block_vbo()
637 if (vbo >= valid) in ntfs_get_block_vbo()
640 if (vbo + bytes > valid) { in ntfs_get_block_vbo()
641 ni->i_valid = vbo + bytes; in ntfs_get_block_vbo()
644 } else if (vbo >= valid) { in ntfs_get_block_vbo()
647 } else if (vbo + bytes <= valid) { in ntfs_get_block_vbo()
649 } else if (vbo + block_size <= valid) { in ntfs_get_block_vbo()
[all …]
/openbmc/linux/drivers/gpu/drm/imx/ipuv3/
H A Dipuv3-plane.c377 unsigned long eba, ubo, vbo, old_ubo, old_vbo, alpha_eba; in ipu_plane_atomic_check() local
466 vbo = drm_plane_state_to_vbo(new_state); in ipu_plane_atomic_check()
468 if (vbo & 0x7 || vbo > 0xfffff8) in ipu_plane_atomic_check()
473 if (vbo != old_vbo) in ipu_plane_atomic_check()
586 unsigned long eba, ubo, vbo; in ipu_plane_atomic_update() local
710 vbo = drm_plane_state_to_vbo(new_state); in ipu_plane_atomic_update()
714 swap(ubo, vbo); in ipu_plane_atomic_update()
717 fb->pitches[1], ubo, vbo); in ipu_plane_atomic_update()
720 "phy = %lu %lu %lu, x = %d, y = %d", eba, ubo, vbo, in ipu_plane_atomic_update()
/openbmc/linux/drivers/gpu/drm/vc4/
H A Dvc4_validate.c899 struct drm_gem_dma_object *vbo = in validate_gl_shader_rec() local
908 to_vc4_bo(&vbo->base)->write_seqno); in validate_gl_shader_rec()
913 if (vbo->base.size < offset || in validate_gl_shader_rec()
914 vbo->base.size - offset < attr_size) { in validate_gl_shader_rec()
916 offset, attr_size, vbo->base.size); in validate_gl_shader_rec()
921 max_index = ((vbo->base.size - offset - attr_size) / in validate_gl_shader_rec()
931 *(uint32_t *)(pkt_v + o) = vbo->dma_addr + offset; in validate_gl_shader_rec()