/openbmc/linux/drivers/net/ethernet/engleder/ |
H A D | tsnep_selftests.c | 357 qopt = kzalloc(struct_size(qopt, entries, 255), GFP_KERNEL); in tsnep_test_taprio() 361 qopt->entries[i].command = TC_TAPRIO_CMD_SET_GATES; in tsnep_test_taprio() 367 qopt->entries[0].gate_mask = 0x02; in tsnep_test_taprio() 368 qopt->entries[0].interval = 200000; in tsnep_test_taprio() 369 qopt->entries[1].gate_mask = 0x03; in tsnep_test_taprio() 370 qopt->entries[1].interval = 800000; in tsnep_test_taprio() 371 qopt->entries[2].gate_mask = 0x07; in tsnep_test_taprio() 372 qopt->entries[2].interval = 240000; in tsnep_test_taprio() 373 qopt->entries[3].gate_mask = 0x01; in tsnep_test_taprio() 374 qopt->entries[3].interval = 80000; in tsnep_test_taprio() [all …]
|
/openbmc/linux/lib/ |
H A D | stackdepot.c | 63 unsigned long entries[]; /* Variable-sized array of frames */ member 126 unsigned long entries = 0; in stack_depot_early_init() local 149 entries = 1UL << stack_bucket_number_order; in stack_depot_early_init() 153 entries, in stack_depot_early_init() 173 unsigned long entries; in stack_depot_init() local 186 entries = 1UL << stack_bucket_number_order; in stack_depot_init() 190 entries = nr_free_buffer_pages(); in stack_depot_init() 191 entries = roundup_pow_of_two(entries); in stack_depot_init() 194 entries >>= (scale - PAGE_SHIFT); in stack_depot_init() 196 entries <<= (PAGE_SHIFT - scale); in stack_depot_init() [all …]
|
H A D | hashtable_test.c | 125 struct hashtable_test_entry entries[3]; in hashtable_test_hash_for_each() local 132 entries[i].key = i; in hashtable_test_hash_for_each() 133 entries[i].data = i + 10; in hashtable_test_hash_for_each() 134 entries[i].visited = 0; in hashtable_test_hash_for_each() 135 hash_add(hash, &entries[i].node, entries[i].key); in hashtable_test_hash_for_each() 149 KUNIT_EXPECT_EQ(test, entries[j].visited, 1); in hashtable_test_hash_for_each() 154 struct hashtable_test_entry entries[3]; in hashtable_test_hash_for_each_safe() local 162 entries[i].key = i; in hashtable_test_hash_for_each_safe() 163 entries[i].data = i + 10; in hashtable_test_hash_for_each_safe() 164 entries[i].visited = 0; in hashtable_test_hash_for_each_safe() [all …]
|
H A D | list-test.c | 387 struct list_head entries[3], *cur; in list_test_list_cut_position() local 392 list_add_tail(&entries[0], &list1); in list_test_list_cut_position() 393 list_add_tail(&entries[1], &list1); in list_test_list_cut_position() 394 list_add_tail(&entries[2], &list1); in list_test_list_cut_position() 397 list_cut_position(&list2, &list1, &entries[1]); in list_test_list_cut_position() 401 KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]); in list_test_list_cut_position() 408 KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]); in list_test_list_cut_position() 415 struct list_head entries[3], *cur; in list_test_list_cut_before() local 420 list_add_tail(&entries[0], &list1); in list_test_list_cut_before() 421 list_add_tail(&entries[1], &list1); in list_test_list_cut_before() [all …]
|
H A D | test_rhashtable.c | 73 unsigned int entries; member 138 unsigned int entries) in test_rht_lookup() argument 142 for (i = 0; i < entries; i++) { in test_rht_lookup() 175 static void test_bucket_stats(struct rhashtable *ht, unsigned int entries) in test_bucket_stats() argument 202 total, atomic_read(&ht->nelems), entries, chain_len); in test_bucket_stats() 204 if (total != atomic_read(&ht->nelems) || total != entries) in test_bucket_stats() 209 unsigned int entries) in test_rhashtable() argument 220 pr_info(" Adding %d keys\n", entries); in test_rhashtable() 222 for (i = 0; i < entries; i++) { in test_rhashtable() 237 test_bucket_stats(ht, entries); in test_rhashtable() [all …]
|
/openbmc/u-boot/arch/x86/cpu/qemu/ |
H A D | e820.c | 12 struct e820_entry *entries) in install_e820_map() argument 14 entries[0].addr = 0; in install_e820_map() 15 entries[0].size = ISA_START_ADDRESS; in install_e820_map() 16 entries[0].type = E820_RAM; in install_e820_map() 18 entries[1].addr = ISA_START_ADDRESS; in install_e820_map() 19 entries[1].size = ISA_END_ADDRESS - ISA_START_ADDRESS; in install_e820_map() 20 entries[1].type = E820_RESERVED; in install_e820_map() 27 entries[2].addr = ISA_END_ADDRESS; in install_e820_map() 28 entries[2].size = gd->relocaddr - TOTAL_MALLOC_LEN - ISA_END_ADDRESS; in install_e820_map() 29 entries[2].type = E820_RAM; in install_e820_map() [all …]
|
/openbmc/u-boot/arch/x86/lib/ |
H A D | e820.c | 21 struct e820_entry *entries) in install_e820_map() argument 23 entries[0].addr = 0; in install_e820_map() 24 entries[0].size = ISA_START_ADDRESS; in install_e820_map() 25 entries[0].type = E820_RAM; in install_e820_map() 26 entries[1].addr = ISA_START_ADDRESS; in install_e820_map() 27 entries[1].size = ISA_END_ADDRESS - ISA_START_ADDRESS; in install_e820_map() 28 entries[1].type = E820_RESERVED; in install_e820_map() 29 entries[2].addr = ISA_END_ADDRESS; in install_e820_map() 30 entries[2].size = gd->ram_size - ISA_END_ADDRESS; in install_e820_map() 31 entries[2].type = E820_RAM; in install_e820_map() [all …]
|
/openbmc/linux/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ |
H A D | smu_helper.c | 224 vvalue = vol_table->entries[i].value; in phm_trim_voltage_table() 228 if (vvalue == table->entries[j].value) { in phm_trim_voltage_table() 235 table->entries[table->count].value = vvalue; in phm_trim_voltage_table() 236 table->entries[table->count].smio_low = in phm_trim_voltage_table() 237 vol_table->entries[i].smio_low; in phm_trim_voltage_table() 265 vol_table->entries[i].value = dep_table->entries[i].mvdd; in phm_get_svi2_mvdd_voltage_table() 266 vol_table->entries[i].smio_low = 0; in phm_get_svi2_mvdd_voltage_table() 293 vol_table->entries[i].value = dep_table->entries[i].vddci; in phm_get_svi2_vddci_voltage_table() 294 vol_table->entries[i].smio_low = 0; in phm_get_svi2_vddci_voltage_table() 321 vol_table->entries[i].value = lookup_table->entries[i].us_vdd; in phm_get_svi2_vdd_voltage_table() [all …]
|
/openbmc/linux/kernel/events/ |
H A D | callchain.c | 50 struct callchain_cpus_entries *entries; in release_callchain_buffers_rcu() local 53 entries = container_of(head, struct callchain_cpus_entries, rcu_head); in release_callchain_buffers_rcu() 56 kfree(entries->cpu_entries[cpu]); in release_callchain_buffers_rcu() 58 kfree(entries); in release_callchain_buffers_rcu() 63 struct callchain_cpus_entries *entries; in release_callchain_buffers() local 65 entries = callchain_cpus_entries; in release_callchain_buffers() 67 call_rcu(&entries->rcu_head, release_callchain_buffers_rcu); in release_callchain_buffers() 74 struct callchain_cpus_entries *entries; in alloc_callchain_buffers() local 83 entries = kzalloc(size, GFP_KERNEL); in alloc_callchain_buffers() 84 if (!entries) in alloc_callchain_buffers() [all …]
|
/openbmc/linux/arch/powerpc/mm/book3s64/ |
H A D | iommu_api.c | 34 u64 entries; /* number of entries in hpas/hpages[] */ member 57 unsigned long entries, unsigned long dev_hpa, in mm_iommu_do_alloc() argument 66 ret = account_locked_vm(mm, entries, true); in mm_iommu_do_alloc() 70 locked_entries = entries; in mm_iommu_do_alloc() 80 mem->pageshift = __ffs(dev_hpa | (entries << PAGE_SHIFT)); in mm_iommu_do_alloc() 91 mem->pageshift = __ffs(ua | (entries << PAGE_SHIFT)); in mm_iommu_do_alloc() 92 mem->hpas = vzalloc(array_size(entries, sizeof(mem->hpas[0]))); in mm_iommu_do_alloc() 102 chunk = min(chunk, entries); in mm_iommu_do_alloc() 103 for (entry = 0; entry < entries; entry += chunk) { in mm_iommu_do_alloc() 104 unsigned long n = min(entries - entry, chunk); in mm_iommu_do_alloc() [all …]
|
/openbmc/linux/tools/lib/api/fd/ |
H A D | array.c | 15 fda->entries = NULL; in fdarray__init() 27 struct pollfd *entries = realloc(fda->entries, size); in fdarray__grow() local 29 if (entries == NULL) in fdarray__grow() 34 free(entries); in fdarray__grow() 38 memset(&entries[fda->nr_alloc], 0, sizeof(struct pollfd) * nr); in fdarray__grow() 42 fda->entries = entries; in fdarray__grow() 65 free(fda->entries); in fdarray__exit() 84 fda->entries[fda->nr].fd = fd; in fdarray__add() 85 fda->entries[fda->nr].events = revents; in fdarray__add() 99 entry = &from->entries[pos]; in fdarray__dup_entry_from() [all …]
|
/openbmc/linux/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/ |
H A D | dcn315_clk_mgr.c | 256 .entries = { 304 .entries = { 341 .entries = { 389 if (!bw_params->wm_table.entries[i].valid) in dcn315_build_watermark_ranges() 392 table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmSetting = bw_params->wm_table.entries[i].wm_inst; in dcn315_build_watermark_ranges() 393 table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmType = bw_params->wm_table.entries[i].wm_type; in dcn315_build_watermark_ranges() 404 bw_params->clk_table.entries[i - 1].dcfclk_mhz + 1; in dcn315_build_watermark_ranges() 407 bw_params->clk_table.entries[i].dcfclk_mhz; in dcn315_build_watermark_ranges() 487 …struct clk_limit_table_entry def_max = bw_params->clk_table.entries[bw_params->clk_table.num_entri… in dcn315_clk_mgr_helper_populate_bw_params() 504 if (bw_params->clk_table.entries[j].dcfclk_mhz <= clock_table->DcfClocks[i]) in dcn315_clk_mgr_helper_populate_bw_params() [all …]
|
/openbmc/qemu/block/ |
H A D | qcow2-cache.c | 39 Qcow2CachedTable *entries; member 91 Qcow2CachedTable *t = &c->entries[i]; in can_clean_entry() 109 c->entries[i].offset = 0; in qcow2_cache_clean_unused() 110 c->entries[i].lru_counter = 0; in qcow2_cache_clean_unused() 137 c->entries = g_try_new0(Qcow2CachedTable, num_tables); in qcow2_cache_create() 141 if (!c->entries || !c->table_array) { in qcow2_cache_create() 143 g_free(c->entries); in qcow2_cache_create() 156 assert(c->entries[i].ref == 0); in qcow2_cache_destroy() 160 g_free(c->entries); in qcow2_cache_destroy() 188 if (!c->entries[i].dirty || !c->entries[i].offset) { in qcow2_cache_entry_flush() [all …]
|
/openbmc/linux/drivers/net/ethernet/netronome/nfp/nfpcore/ |
H A D | nfp_nsp_eth.c | 281 union eth_table_entry *entries; in __nfp_eth_read_ports() local 285 entries = kzalloc(NSP_ETH_TABLE_SIZE, GFP_KERNEL); in __nfp_eth_read_ports() 286 if (!entries) in __nfp_eth_read_ports() 289 ret = nfp_nsp_read_eth_table(nsp, entries, NSP_ETH_TABLE_SIZE); in __nfp_eth_read_ports() 296 if (entries[i].port & NSP_ETH_PORT_LANES_MASK) in __nfp_eth_read_ports() 315 if (entries[i].port & NSP_ETH_PORT_LANES_MASK) in __nfp_eth_read_ports() 316 nfp_eth_port_translate(nsp, &entries[i], i, in __nfp_eth_read_ports() 325 kfree(entries); in __nfp_eth_read_ports() 330 kfree(entries); in __nfp_eth_read_ports() 336 union eth_table_entry *entries; in nfp_eth_config_start() local [all …]
|
/openbmc/linux/tools/perf/util/ |
H A D | mem2node.c | 50 struct phys_entry *entries, *tmp_entries; in mem2node__init() local 62 entries = zalloc(sizeof(*entries) * max); in mem2node__init() 63 if (!entries) in mem2node__init() 84 struct phys_entry *prev = &entries[j - 1]; in mem2node__init() 93 phys_entry__init(&entries[j++], start, bsize, n->node); in mem2node__init() 98 tmp_entries = realloc(entries, sizeof(*entries) * j); in mem2node__init() 101 entries = tmp_entries; in mem2node__init() 105 entries[i].node, entries[i].start, entries[i].end); in mem2node__init() 107 phys_entry__insert(&entries[i], &map->root); in mem2node__init() 110 map->entries = entries; in mem2node__init() [all …]
|
H A D | arm64-frame-pointer-unwind-support.c | 12 struct entries { struct 25 struct entries *entries = arg; in add_entry() argument 27 entries->stack[entries->length++] = entry->ip; in add_entry() 34 struct entries entries = {}; in get_leaf_frame_caller_aarch64() local 56 ret = unwind__get_entries(add_entry, &entries, thread, sample, 2, true); in get_leaf_frame_caller_aarch64() 59 if (ret || entries.length != 2) in get_leaf_frame_caller_aarch64() 62 return callchain_param.order == ORDER_CALLER ? entries.stack[0] : entries.stack[1]; in get_leaf_frame_caller_aarch64()
|
/openbmc/linux/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/ |
H A D | dcn314_clk_mgr.c | 359 .entries = { 396 .entries = { 444 if (!bw_params->wm_table.entries[i].valid) in dcn314_build_watermark_ranges() 447 table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmSetting = bw_params->wm_table.entries[i].wm_inst; in dcn314_build_watermark_ranges() 448 table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmType = bw_params->wm_table.entries[i].wm_type; in dcn314_build_watermark_ranges() 459 bw_params->clk_table.entries[i - 1].dcfclk_mhz + 1; in dcn314_build_watermark_ranges() 462 bw_params->clk_table.entries[i].dcfclk_mhz; in dcn314_build_watermark_ranges() 572 …struct clk_limit_table_entry def_max = bw_params->clk_table.entries[bw_params->clk_table.num_entri… in dcn314_clk_mgr_helper_populate_bw_params() 614 if (bw_params->clk_table.entries[j].dcfclk_mhz <= clock_table->DcfClocks[i]) in dcn314_clk_mgr_helper_populate_bw_params() 617 bw_params->clk_table.entries[i].phyclk_mhz = bw_params->clk_table.entries[j].phyclk_mhz; in dcn314_clk_mgr_helper_populate_bw_params() [all …]
|
/openbmc/u-boot/arch/x86/lib/fsp/ |
H A D | fsp_dram.c | 65 struct e820_entry *entries) in install_e820_map() argument 76 entries[num_entries].addr = res_desc->phys_start; in install_e820_map() 77 entries[num_entries].size = res_desc->len; in install_e820_map() 80 entries[num_entries].type = E820_RAM; in install_e820_map() 82 entries[num_entries].type = E820_RESERVED; in install_e820_map() 90 entries[num_entries].addr = CONFIG_PCIE_ECAM_BASE; in install_e820_map() 91 entries[num_entries].size = CONFIG_PCIE_ECAM_SIZE; in install_e820_map() 92 entries[num_entries].type = E820_RESERVED; in install_e820_map() 100 entries[num_entries].addr = gd->start_addr_sp - CONFIG_STACK_SIZE; in install_e820_map() 101 entries[num_entries].size = gd->ram_top - gd->start_addr_sp + \ in install_e820_map() [all …]
|
/openbmc/qemu/util/ |
H A D | qdist.c | 21 dist->entries = g_new(struct qdist_entry, 1); in qdist_init() 28 g_free(dist->entries); in qdist_destroy() 57 entry = bsearch(&e, dist->entries, dist->n, sizeof(e), qdist_cmp); in qdist_add() 67 dist->entries = g_renew(struct qdist_entry, dist->entries, dist->size); in qdist_add() 70 entry = &dist->entries[dist->n - 1]; in qdist_add() 73 qsort(dist->entries, dist->n, sizeof(*entry), qdist_cmp); in qdist_add() 114 if (dist->entries[0].count) { in qdist_pr_internal() 123 min = dist->entries[0].count; in qdist_pr_internal() 126 struct qdist_entry *e = &dist->entries[i]; in qdist_pr_internal() 137 struct qdist_entry *e = &dist->entries[i]; in qdist_pr_internal() [all …]
|
/openbmc/linux/fs/nfs_common/ |
H A D | nfsacl.c | 95 int entries = (acl && acl->a_count) ? max_t(int, acl->a_count, 4) : 0; in nfsacl_encode() local 99 .array_len = encode_entries ? entries : 0, in nfsacl_encode() 110 if (entries > NFS_ACL_MAX_ENTRIES || in nfsacl_encode() 111 xdr_encode_word(buf, base, entries)) in nfsacl_encode() 157 u32 entries = (acl && acl->a_count) ? max_t(int, acl->a_count, 4) : 0; in nfs_stream_encode_acl() local 161 .array_len = encode_entries ? entries : 0, in nfs_stream_encode_acl() 173 if (entries > NFS_ACL_MAX_ENTRIES) in nfs_stream_encode_acl() 175 if (xdr_stream_encode_u32(xdr, entries) < 0) in nfs_stream_encode_acl() 345 u32 entries; in nfsacl_decode() local 348 if (xdr_decode_word(buf, base, &entries) || in nfsacl_decode() [all …]
|
/openbmc/linux/drivers/gpu/drm/amd/display/dc/dml/dcn321/ |
H A D | dcn321_fpu.c | 365 if (bw_params->clk_table.entries[i].dcfclk_mhz > max_clk_data.dcfclk_mhz) in build_synthetic_soc_states() 366 max_clk_data.dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz; in build_synthetic_soc_states() 367 if (bw_params->clk_table.entries[i].fclk_mhz > max_clk_data.fclk_mhz) in build_synthetic_soc_states() 368 max_clk_data.fclk_mhz = bw_params->clk_table.entries[i].fclk_mhz; in build_synthetic_soc_states() 369 if (bw_params->clk_table.entries[i].memclk_mhz > max_clk_data.memclk_mhz) in build_synthetic_soc_states() 370 max_clk_data.memclk_mhz = bw_params->clk_table.entries[i].memclk_mhz; in build_synthetic_soc_states() 371 if (bw_params->clk_table.entries[i].dispclk_mhz > max_clk_data.dispclk_mhz) in build_synthetic_soc_states() 372 max_clk_data.dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz; in build_synthetic_soc_states() 373 if (bw_params->clk_table.entries[i].dppclk_mhz > max_clk_data.dppclk_mhz) in build_synthetic_soc_states() 374 max_clk_data.dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz; in build_synthetic_soc_states() [all …]
|
/openbmc/linux/arch/x86/kernel/cpu/ |
H A D | intel.c | 828 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries) in intel_tlb_lookup() 829 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries; in intel_tlb_lookup() 830 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries) in intel_tlb_lookup() 831 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries; in intel_tlb_lookup() 834 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries) in intel_tlb_lookup() 835 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries; in intel_tlb_lookup() 836 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries) in intel_tlb_lookup() 837 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries; in intel_tlb_lookup() 838 if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries) in intel_tlb_lookup() 839 tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries; in intel_tlb_lookup() [all …]
|
/openbmc/linux/drivers/net/dsa/sja1105/ |
H A D | sja1105_vl.c | 27 if (list_empty(&gating_cfg->entries)) { in sja1105_insert_gate_entry() 28 list_add(&e->list, &gating_cfg->entries); in sja1105_insert_gate_entry() 32 list_for_each_entry(p, &gating_cfg->entries, list) { in sja1105_insert_gate_entry() 65 list_for_each_entry(e, &gating_cfg->entries, list) { in sja1105_gating_cfg_time_to_interval() 70 if (prev == &gating_cfg->entries) in sja1105_gating_cfg_time_to_interval() 76 last_e = list_last_entry(&gating_cfg->entries, in sja1105_gating_cfg_time_to_interval() 85 list_for_each_entry_safe(e, n, &gating_cfg->entries, list) { in sja1105_free_gating_config() 144 u8 gate_state = rule->vl.entries[i].gate_state; in sja1105_compose_gating_subschedule() 157 time += rule->vl.entries[i].interval; in sja1105_compose_gating_subschedule() 353 kfree(table->entries); in sja1105_init_virtual_links() [all …]
|
/openbmc/linux/tools/perf/trace/beauty/ |
H A D | ioctl.c | 41 if (nr < strarray__ioctl_tty_cmd.nr_entries && strarray__ioctl_tty_cmd.entries[nr] != NULL) in ioctl__scnprintf_tty_cmd() 42 return scnprintf(bf, size, "%s", strarray__ioctl_tty_cmd.entries[nr]); in ioctl__scnprintf_tty_cmd() 52 if (nr < strarray__drm_ioctl_cmds.nr_entries && strarray__drm_ioctl_cmds.entries[nr] != NULL) in ioctl__scnprintf_drm_cmd() 53 return scnprintf(bf, size, "DRM_%s", strarray__drm_ioctl_cmds.entries[nr]); in ioctl__scnprintf_drm_cmd() 63 …if (nr < strarray__sndrv_pcm_ioctl_cmds.nr_entries && strarray__sndrv_pcm_ioctl_cmds.entries[nr] !… in ioctl__scnprintf_sndrv_pcm_cmd() 64 return scnprintf(bf, size, "SNDRV_PCM_%s", strarray__sndrv_pcm_ioctl_cmds.entries[nr]); in ioctl__scnprintf_sndrv_pcm_cmd() 74 …if (nr < strarray__sndrv_ctl_ioctl_cmds.nr_entries && strarray__sndrv_ctl_ioctl_cmds.entries[nr] !… in ioctl__scnprintf_sndrv_ctl_cmd() 75 return scnprintf(bf, size, "SNDRV_CTL_%s", strarray__sndrv_ctl_ioctl_cmds.entries[nr]); in ioctl__scnprintf_sndrv_ctl_cmd() 85 if (nr < strarray__kvm_ioctl_cmds.nr_entries && strarray__kvm_ioctl_cmds.entries[nr] != NULL) in ioctl__scnprintf_kvm_cmd() 86 return scnprintf(bf, size, "KVM_%s", strarray__kvm_ioctl_cmds.entries[nr]); in ioctl__scnprintf_kvm_cmd() [all …]
|
/openbmc/libpldm/tests/dsp/ |
H A D | bios_table_iter.c | 27 struct pldm_bios_attr_table_entry entries[2] = {0}; in main() local 31 static_assert(2 * sizeof(entries[0]) == sizeof(entries), ""); in main() 33 entries[0].attr_type = PLDM_BIOS_PASSWORD; in main() 34 entries[1].attr_type = PLDM_BIOS_STRING_READ_ONLY; in main() 36 iter = pldm_bios_table_iter_create(entries, sizeof(entries), in main()
|