/openbmc/linux/fs/ntfs3/ |
H A D | run.c | 21 CLST vcn; /* Virtual cluster number. */ member 34 static bool run_lookup(const struct runs_tree *run, CLST vcn, size_t *index) in run_lookup() argument 49 if (vcn < r->vcn) { in run_lookup() 54 if (vcn < r->vcn + r->len) { in run_lookup() 60 if (vcn >= r->vcn + r->len) { in run_lookup() 65 if (vcn >= r->vcn) { in run_lookup() 74 if (vcn < r->vcn) { in run_lookup() 78 } else if (vcn >= r->vcn + r->len) { in run_lookup() 104 CLST end = r->vcn + r->len; in run_consolidate() 108 if (n->vcn > end) in run_consolidate() [all …]
|
H A D | attrib.c | 61 struct runs_tree *run, const CLST *vcn) in attr_load_runs() argument 72 if (vcn && (evcn < *vcn || *vcn < svcn)) in attr_load_runs() 82 vcn ? *vcn : svcn, Add2Ptr(attr, run_off), in attr_load_runs() 94 CLST vcn, CLST len, CLST *done, bool trim) in run_deallocate_ex() argument 97 CLST vcn_next, vcn0 = vcn, lcn, clen, dn = 0; in run_deallocate_ex() 103 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) { in run_deallocate_ex() 131 vcn_next = vcn + clen; in run_deallocate_ex() 132 if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) || in run_deallocate_ex() 133 vcn != vcn_next) { in run_deallocate_ex() 150 CLST vcn, CLST lcn, CLST len, CLST *pre_alloc, in attr_allocate_clusters() argument [all …]
|
H A D | attrlist.c | 188 u8 name_len, const CLST *vcn) in al_find_ex() argument 207 le_vcn = le64_to_cpu(le->vcn); in al_find_ex() 222 if (!vcn) in al_find_ex() 225 if (*vcn == le_vcn) in al_find_ex() 228 if (*vcn < le_vcn) in al_find_ex() 245 u8 name_len, CLST vcn) in al_find_le_to_insert() argument 260 if (!le->vcn) { in al_find_le_to_insert() 274 if (le64_to_cpu(le->vcn) >= vcn) in al_find_le_to_insert() 334 le->vcn = cpu_to_le64(svcn); in al_add_le() 388 bool al_delete_le(struct ntfs_inode *ni, enum ATTR_TYPE type, CLST vcn, in al_delete_le() argument [all …]
|
H A D | ntfs_fs.h | 424 CLST vcn, CLST lcn, CLST len, CLST *pre_alloc, 435 int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn, 441 CLST vcn); 469 u8 name_len, const CLST *vcn); 474 bool al_delete_le(struct ntfs_inode *ni, enum ATTR_TYPE type, CLST vcn, 521 u8 name_len, const CLST *vcn, 527 const __le16 *name, u8 name_len, CLST vcn, 792 bool run_lookup_entry(const struct runs_tree *run, CLST vcn, CLST *lcn, 794 void run_truncate(struct runs_tree *run, CLST vcn); 795 void run_truncate_head(struct runs_tree *run, CLST vcn); [all …]
|
H A D | file.c | 120 CLST vcn, lcn, clen; in ntfs_extend_initialized_size() local 124 vcn = pos >> bits; in ntfs_extend_initialized_size() 126 err = attr_data_get_block(ni, vcn, 1, &lcn, &clen, NULL, in ntfs_extend_initialized_size() 132 pos = ((loff_t)clen + vcn) << bits; in ntfs_extend_initialized_size() 289 CLST vcn = from >> sbi->cluster_bits; in ntfs_file_mmap() local 293 for (; vcn < end; vcn += len) { in ntfs_file_mmap() 294 err = attr_data_get_block(ni, vcn, 1, &lcn, in ntfs_file_mmap() 606 CLST vcn = vbo >> cluster_bits; in ntfs_fallocate() local 619 for (; vcn < cend_v; vcn += clen) { in ntfs_fallocate() 620 err = attr_data_get_block(ni, vcn, cend_v - vcn, in ntfs_fallocate() [all …]
|
/openbmc/linux/drivers/gpu/drm/amd/amdgpu/ |
H A D | amdgpu_vcn.c | 94 r = amdgpu_ucode_request(adev, &adev->vcn.fw, fw_name); in amdgpu_vcn_early_init() 96 amdgpu_ucode_release(&adev->vcn.fw); in amdgpu_vcn_early_init() 109 INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler); in amdgpu_vcn_sw_init() 110 mutex_init(&adev->vcn.vcn_pg_lock); in amdgpu_vcn_sw_init() 111 mutex_init(&adev->vcn.vcn1_jpeg1_workaround); in amdgpu_vcn_sw_init() 112 atomic_set(&adev->vcn.total_submission_cnt, 0); in amdgpu_vcn_sw_init() 113 for (i = 0; i < adev->vcn.num_vcn_inst; i++) in amdgpu_vcn_sw_init() 114 atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0); in amdgpu_vcn_sw_init() 118 adev->vcn.indirect_sram = true; in amdgpu_vcn_sw_init() 132 adev->vcn.indirect_sram = false; in amdgpu_vcn_sw_init() [all …]
|
H A D | vcn_v2_5.c | 86 adev->vcn.num_vcn_inst = 2; in vcn_v2_5_early_init() 87 adev->vcn.harvest_config = 0; in vcn_v2_5_early_init() 88 adev->vcn.num_enc_rings = 1; in vcn_v2_5_early_init() 93 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { in vcn_v2_5_early_init() 96 adev->vcn.harvest_config |= 1 << i; in vcn_v2_5_early_init() 98 if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 | in vcn_v2_5_early_init() 103 adev->vcn.num_enc_rings = 2; in vcn_v2_5_early_init() 127 for (j = 0; j < adev->vcn.num_vcn_inst; j++) { in vcn_v2_5_sw_init() 128 if (adev->vcn.harvest_config & (1 << j)) in vcn_v2_5_sw_init() 132 VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.inst[j].irq); in vcn_v2_5_sw_init() [all …]
|
H A D | vcn_v2_0.c | 77 adev->vcn.num_enc_rings = 1; in vcn_v2_0_early_init() 79 adev->vcn.num_enc_rings = 2; in vcn_v2_0_early_init() 105 &adev->vcn.inst->irq); in vcn_v2_0_sw_init() 110 for (i = 0; i < adev->vcn.num_enc_rings; ++i) { in vcn_v2_0_sw_init() 113 &adev->vcn.inst->irq); in vcn_v2_0_sw_init() 128 ring = &adev->vcn.inst->ring_dec; in vcn_v2_0_sw_init() 131 ring->doorbell_index = adev->doorbell_index.vcn.vcn_ring0_1 << 1; in vcn_v2_0_sw_init() 135 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0, in vcn_v2_0_sw_init() 140 adev->vcn.internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET; in vcn_v2_0_sw_init() 141 adev->vcn.internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET; in vcn_v2_0_sw_init() [all …]
|
H A D | vcn_v3_0.c | 93 adev->vcn.num_vcn_inst = VCN_INSTANCES_SIENNA_CICHLID; in vcn_v3_0_early_init() 94 adev->vcn.harvest_config = 0; in vcn_v3_0_early_init() 95 adev->vcn.num_enc_rings = 1; in vcn_v3_0_early_init() 98 if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 | in vcn_v3_0_early_init() 104 adev->vcn.num_enc_rings = 0; in vcn_v3_0_early_init() 106 adev->vcn.num_enc_rings = 2; in vcn_v3_0_early_init() 148 vcn_doorbell_index = adev->doorbell_index.vcn.vcn_ring0_1; in vcn_v3_0_sw_init() 153 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { in vcn_v3_0_sw_init() 156 if (adev->vcn.harvest_config & (1 << i)) in vcn_v3_0_sw_init() 159 adev->vcn.internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET; in vcn_v3_0_sw_init() [all …]
|
H A D | vcn_v4_0.c | 84 adev->vcn.harvest_config = VCN_HARVEST_MMSCH; in vcn_v4_0_early_init() 85 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { in vcn_v4_0_early_init() 87 adev->vcn.harvest_config |= 1 << i; in vcn_v4_0_early_init() 94 adev->vcn.num_enc_rings = 1; in vcn_v4_0_early_init() 126 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { in vcn_v4_0_sw_init() 129 if (adev->vcn.harvest_config & (1 << i)) in vcn_v4_0_sw_init() 134 atomic_set(&adev->vcn.inst[i].sched_score, 1); in vcn_v4_0_sw_init() 136 atomic_set(&adev->vcn.inst[i].sched_score, 0); in vcn_v4_0_sw_init() 140 VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[i].irq); in vcn_v4_0_sw_init() 146 VCN_4_0__SRCID_UVD_POISON, &adev->vcn.inst[i].ras_poison_irq); in vcn_v4_0_sw_init() [all …]
|
H A D | vcn_v4_0_3.c | 71 adev->vcn.num_enc_rings = 1; in vcn_v4_0_3_early_init() 84 fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; in vcn_v4_0_3_fw_shared_init() 89 amdgpu_vcn_fwlog_init(&adev->vcn.inst[inst_idx]); in vcn_v4_0_3_fw_shared_init() 119 VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst->irq); in vcn_v4_0_3_sw_init() 123 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { in vcn_v4_0_3_sw_init() 126 ring = &adev->vcn.inst[i].ring_enc[0]; in vcn_v4_0_3_sw_init() 131 (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + in vcn_v4_0_3_sw_init() 135 (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + in vcn_v4_0_3_sw_init() 138 ring->vm_hub = AMDGPU_MMHUB0(adev->vcn.inst[i].aid_id); in vcn_v4_0_3_sw_init() 139 sprintf(ring->name, "vcn_unified_%d", adev->vcn.inst[i].aid_id); in vcn_v4_0_3_sw_init() [all …]
|
H A D | vcn_v1_0.c | 71 adev->vcn.num_enc_rings = 2; in vcn_v1_0_early_init() 97 VCN_1_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.inst->irq); in vcn_v1_0_sw_init() 102 for (i = 0; i < adev->vcn.num_enc_rings; ++i) { in vcn_v1_0_sw_init() 104 &adev->vcn.inst->irq); in vcn_v1_0_sw_init() 114 adev->vcn.idle_work.work.func = vcn_v1_0_idle_work_handler; in vcn_v1_0_sw_init() 122 ring = &adev->vcn.inst->ring_dec; in vcn_v1_0_sw_init() 125 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0, in vcn_v1_0_sw_init() 130 adev->vcn.internal.scratch9 = adev->vcn.inst->external.scratch9 = in vcn_v1_0_sw_init() 132 adev->vcn.internal.data0 = adev->vcn.inst->external.data0 = in vcn_v1_0_sw_init() 134 adev->vcn.internal.data1 = adev->vcn.inst->external.data1 = in vcn_v1_0_sw_init() [all …]
|
H A D | vega10_reg_init.c | 83 adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_DOORBELL64_VCN0_1; in vega10_doorbell_index_init() 84 adev->doorbell_index.vcn.vcn_ring2_3 = AMDGPU_DOORBELL64_VCN2_3; in vega10_doorbell_index_init() 85 adev->doorbell_index.vcn.vcn_ring4_5 = AMDGPU_DOORBELL64_VCN4_5; in vega10_doorbell_index_init() 86 adev->doorbell_index.vcn.vcn_ring6_7 = AMDGPU_DOORBELL64_VCN6_7; in vega10_doorbell_index_init()
|
H A D | vega20_reg_init.c | 89 adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_VEGA20_DOORBELL64_VCN0_1; in vega20_doorbell_index_init() 90 adev->doorbell_index.vcn.vcn_ring2_3 = AMDGPU_VEGA20_DOORBELL64_VCN2_3; in vega20_doorbell_index_init() 91 adev->doorbell_index.vcn.vcn_ring4_5 = AMDGPU_VEGA20_DOORBELL64_VCN4_5; in vega20_doorbell_index_init() 92 adev->doorbell_index.vcn.vcn_ring6_7 = AMDGPU_VEGA20_DOORBELL64_VCN6_7; in vega20_doorbell_index_init()
|
/openbmc/linux/fs/ntfs/ |
H A D | runlist.c | 151 if ((dst->vcn + dst->length) != src->vcn) in ntfs_are_rl_mergeable() 238 dst[loc].length = dst[loc + 1].vcn - dst[loc].vcn; in ntfs_rl_append() 242 dst[marker].vcn = dst[marker - 1].vcn + dst[marker - 1].length; in ntfs_rl_append() 286 disc = (src[0].vcn > 0); in ntfs_rl_insert() 296 disc = (src[0].vcn > dst[loc - 1].vcn + merged_length); in ntfs_rl_insert() 325 dst[marker].vcn = dst[marker - 1].vcn + dst[marker - 1].length; in ntfs_rl_insert() 328 dst[marker].length = dst[marker + 1].vcn - dst[marker].vcn; in ntfs_rl_insert() 333 dst[loc].vcn = dst[loc - 1].vcn + dst[loc - 1].length; in ntfs_rl_insert() 334 dst[loc].length = dst[loc + 1].vcn - dst[loc].vcn; in ntfs_rl_insert() 336 dst[loc].vcn = 0; in ntfs_rl_insert() [all …]
|
H A D | index.c | 108 VCN vcn, old_vcn; in ntfs_index_lookup() local 248 vcn = sle64_to_cpup((sle64*)((u8*)ie + le16_to_cpu(ie->length) - 8)); in ntfs_index_lookup() 264 page = ntfs_map_page(ia_mapping, vcn << in ntfs_index_lookup() 276 ia = (INDEX_ALLOCATION*)(kaddr + ((vcn << in ntfs_index_lookup() 288 (long long)vcn, idx_ni->mft_no); in ntfs_index_lookup() 291 if (sle64_to_cpu(ia->index_block_vcn) != vcn) { in ntfs_index_lookup() 297 (unsigned long long)vcn, idx_ni->mft_no); in ntfs_index_lookup() 305 "driver bug.", (unsigned long long)vcn, in ntfs_index_lookup() 316 "driver.", (unsigned long long)vcn, in ntfs_index_lookup() 324 (unsigned long long)vcn, idx_ni->mft_no); in ntfs_index_lookup() [all …]
|
H A D | dir.c | 80 VCN vcn, old_vcn; in ntfs_lookup_inode_by_name() local 292 vcn = sle64_to_cpup((sle64*)((u8*)ie + le16_to_cpu(ie->length) - 8)); in ntfs_lookup_inode_by_name() 308 page = ntfs_map_page(ia_mapping, vcn << in ntfs_lookup_inode_by_name() 320 ia = (INDEX_ALLOCATION*)(kaddr + ((vcn << in ntfs_lookup_inode_by_name() 332 (unsigned long long)vcn, dir_ni->mft_no); in ntfs_lookup_inode_by_name() 335 if (sle64_to_cpu(ia->index_block_vcn) != vcn) { in ntfs_lookup_inode_by_name() 341 (unsigned long long)vcn, dir_ni->mft_no); in ntfs_lookup_inode_by_name() 350 (unsigned long long)vcn, dir_ni->mft_no, in ntfs_lookup_inode_by_name() 360 "driver.", (unsigned long long)vcn, in ntfs_lookup_inode_by_name() 368 (unsigned long long)vcn, dir_ni->mft_no); in ntfs_lookup_inode_by_name() [all …]
|
H A D | logfile.c | 714 VCN vcn, end_vcn; in ntfs_empty_logfile() local 738 vcn = 0; in ntfs_empty_logfile() 746 if (unlikely(!rl || vcn < rl->vcn || !rl->length)) { in ntfs_empty_logfile() 748 err = ntfs_map_runlist_nolock(log_ni, vcn, NULL); in ntfs_empty_logfile() 755 BUG_ON(!rl || vcn < rl->vcn || !rl->length); in ntfs_empty_logfile() 758 while (rl->length && vcn >= rl[1].vcn) in ntfs_empty_logfile() 771 vcn = rl->vcn; in ntfs_empty_logfile() 782 if (rl[1].vcn > end_vcn) in ntfs_empty_logfile() 783 len = end_vcn - rl->vcn; in ntfs_empty_logfile() 819 } while ((++rl)->vcn < end_vcn); in ntfs_empty_logfile()
|
H A D | aops.c | 167 VCN vcn; in ntfs_read_block() local 242 vcn = (VCN)iblock << blocksize_bits >> in ntfs_read_block() 253 while (rl->length && rl[1].vcn <= vcn) in ntfs_read_block() 255 lcn = ntfs_rl_vcn_to_lcn(rl, vcn); in ntfs_read_block() 283 err = ntfs_map_runlist(ni, vcn); in ntfs_read_block() 308 ni->type, (unsigned long long)vcn, in ntfs_read_block() 533 VCN vcn; in ntfs_write_block() local 701 vcn = (VCN)block << blocksize_bits; in ntfs_write_block() 702 vcn_ofs = vcn & vol->cluster_size_mask; in ntfs_write_block() 703 vcn >>= vol->cluster_size_bits; in ntfs_write_block() [all …]
|
H A D | attrib.h | 49 extern int ntfs_map_runlist_nolock(ntfs_inode *ni, VCN vcn, 51 extern int ntfs_map_runlist(ntfs_inode *ni, VCN vcn); 53 extern LCN ntfs_attr_vcn_to_lcn_nolock(ntfs_inode *ni, const VCN vcn, 57 const VCN vcn, ntfs_attr_search_ctx *ctx);
|
H A D | runlist.h | 29 VCN vcn; /* vcn = Starting virtual cluster number. */ member 65 extern LCN ntfs_rl_vcn_to_lcn(const runlist_element *rl, const VCN vcn); 70 const VCN vcn);
|
H A D | attrib.c | 70 int ntfs_map_runlist_nolock(ntfs_inode *ni, VCN vcn, ntfs_attr_search_ctx *ctx) in ntfs_map_runlist_nolock() argument 84 (unsigned long long)vcn); in ntfs_map_runlist_nolock() 120 if (vcn >= allocated_size_vcn || (a->type == ni->type && in ntfs_map_runlist_nolock() 125 <= vcn && end_vcn >= vcn)) in ntfs_map_runlist_nolock() 153 CASE_SENSITIVE, vcn, NULL, 0, ctx); in ntfs_map_runlist_nolock() 169 if (unlikely(vcn && vcn >= end_vcn)) { in ntfs_map_runlist_nolock() 284 int ntfs_map_runlist(ntfs_inode *ni, VCN vcn) in ntfs_map_runlist() argument 290 if (likely(ntfs_rl_vcn_to_lcn(ni->runlist.rl, vcn) <= in ntfs_map_runlist() 292 err = ntfs_map_runlist_nolock(ni, vcn, NULL); in ntfs_map_runlist() 327 LCN ntfs_attr_vcn_to_lcn_nolock(ntfs_inode *ni, const VCN vcn, in ntfs_attr_vcn_to_lcn_nolock() argument [all …]
|
H A D | compress.c | 478 VCN vcn; in ntfs_read_compressed_block() local 596 for (vcn = start_vcn, start_vcn += cb_clusters; vcn < start_vcn; in ntfs_read_compressed_block() 597 vcn++) { in ntfs_read_compressed_block() 607 while (rl->length && rl[1].vcn <= vcn) in ntfs_read_compressed_block() 609 lcn = ntfs_rl_vcn_to_lcn(rl, vcn); in ntfs_read_compressed_block() 613 (unsigned long long)vcn, in ntfs_read_compressed_block() 630 if (!ntfs_map_runlist(ni, vcn)) in ntfs_read_compressed_block() 730 if (vcn == start_vcn - cb_clusters) { in ntfs_read_compressed_block() 773 } else if (vcn == start_vcn) { in ntfs_read_compressed_block()
|
H A D | lcnalloc.c | 388 rl[rlpos].vcn = rl[rlpos - 1].vcn + in ntfs_cluster_alloc() 393 rl[rlpos].vcn = start_vcn; in ntfs_cluster_alloc() 726 rl[rlpos].vcn = rl[rlpos - 1].vcn + rl[rlpos - 1].length; in ntfs_cluster_alloc() 883 delta = start_vcn - rl->vcn; in __ntfs_cluster_free() 916 VCN vcn; in __ntfs_cluster_free() local 919 vcn = rl->vcn; in __ntfs_cluster_free() 920 rl = ntfs_attr_find_vcn_nolock(ni, vcn, ctx); in __ntfs_cluster_free()
|
H A D | mft.c | 525 VCN vcn; in ntfs_sync_mft_mirror() local 531 vcn = ((VCN)mft_no << vol->mft_record_size_bits) + in ntfs_sync_mft_mirror() 533 vcn_ofs = vcn & vol->cluster_size_mask; in ntfs_sync_mft_mirror() 534 vcn >>= vol->cluster_size_bits; in ntfs_sync_mft_mirror() 546 while (rl->length && rl[1].vcn <= vcn) in ntfs_sync_mft_mirror() 548 lcn = ntfs_rl_vcn_to_lcn(rl, vcn); in ntfs_sync_mft_mirror() 718 VCN vcn; in write_mft_record_nolock() local 724 vcn = ((VCN)ni->mft_no << vol->mft_record_size_bits) + in write_mft_record_nolock() 726 vcn_ofs = vcn & vol->cluster_size_mask; in write_mft_record_nolock() 727 vcn >>= vol->cluster_size_bits; in write_mft_record_nolock() [all …]
|