/openbmc/qemu/tests/unit/ |
H A D | test-interval-tree.c | 12 static IntervalTreeNode nodes[20]; variable 41 nodes[0].start = 1; in test_find_one_point() 42 nodes[0].last = 1; in test_find_one_point() 44 interval_tree_insert(&nodes[0], &root); in test_find_one_point() 46 g_assert(interval_tree_iter_first(&root, 0, 9) == &nodes[0]); in test_find_one_point() 47 g_assert(interval_tree_iter_next(&nodes[0], 0, 9) == NULL); in test_find_one_point() 49 g_assert(interval_tree_iter_next(&nodes[0], 0, 0) == NULL); in test_find_one_point() 50 g_assert(interval_tree_iter_first(&root, 0, 1) == &nodes[0]); in test_find_one_point() 51 g_assert(interval_tree_iter_first(&root, 1, 1) == &nodes[0]); in test_find_one_point() 52 g_assert(interval_tree_iter_first(&root, 1, 2) == &nodes[0]); in test_find_one_point() [all …]
|
/openbmc/linux/lib/ |
H A D | interval_tree.c | 21 * Roll nodes[1] into nodes[0] by advancing nodes[1] to the end of a contiguous 22 * span of nodes. This makes nodes[0]->last the end of that contiguous used span 23 * indexes that started at the original nodes[1]->start. nodes[1] is now the 24 * first node starting the next used span. A hole span is between nodes[0]->last 25 * and nodes[1]->start. nodes[1] must be !NULL. 30 struct interval_tree_node *cur = state->nodes[1]; in interval_tree_span_iter_next_gap() 32 state->nodes[0] = cur; in interval_tree_span_iter_next_gap() 34 if (cur->last > state->nodes[0]->last) in interval_tree_span_iter_next_gap() 35 state->nodes[0] = cur; in interval_tree_span_iter_next_gap() 38 } while (cur && (state->nodes[0]->last >= cur->start || in interval_tree_span_iter_next_gap() [all …]
|
H A D | interval_tree_test.c | 14 __param(int, nnodes, 100, "Number of nodes in the interval tree"); 19 __param(bool, search_all, false, "Searches will iterate all nodes in the tree"); 24 static struct interval_tree_node *nodes = NULL; variable 49 nodes[i].start = a; in init() 50 nodes[i].last = b; in init() 68 nodes = kmalloc_array(nnodes, sizeof(struct interval_tree_node), in interval_tree_test_init() 70 if (!nodes) in interval_tree_test_init() 75 kfree(nodes); in interval_tree_test_init() 88 interval_tree_insert(nodes + j, &root); in interval_tree_test_init() 90 interval_tree_remove(nodes + j, &root); in interval_tree_test_init() [all …]
|
H A D | rbtree_test.c | 14 __param(int, nnodes, 100, "Number of nodes in the rb-tree"); 28 static struct test_node *nodes = NULL; variable 153 nodes[i].key = prandom_u32_state(&rnd); in init() 154 nodes[i].val = prandom_u32_state(&rnd); in init() 248 nodes = kmalloc_array(nnodes, sizeof(*nodes), GFP_KERNEL); in rbtree_test_init() 249 if (!nodes) in rbtree_test_init() 261 insert(nodes + j, &root); in rbtree_test_init() 263 erase(nodes + j, &root); in rbtree_test_init() 277 insert_cached(nodes + j, &root); in rbtree_test_init() 279 erase_cached(nodes + j, &root); in rbtree_test_init() [all …]
|
/openbmc/qemu/docs/sphinx/ |
H A D | qapidoc.py | 33 from docutils import nodes 44 from sphinx.util.nodes import nested_parse_with_titles 68 """A QAPI schema visitor which generates docutils/Sphinx nodes 70 This class builds up a tree of docutils/Sphinx nodes corresponding 77 nodes. Once you've added all the documentation via 'freeform' and 79 the final list of document nodes (in a form suitable for returning 85 self._top_node = nodes.section() 91 term should be a list of Text and literal nodes. 94 - a list of Text and literal nodes, which will be put into 97 dlitem = nodes.definition_list_item() [all …]
|
/openbmc/linux/drivers/net/ethernet/wangxun/txgbe/ |
H A D | txgbe_phy.c | 25 struct txgbe_nodes *nodes = &txgbe->nodes; in txgbe_swnodes_register() local 32 snprintf(nodes->gpio_name, sizeof(nodes->gpio_name), "txgbe_gpio-%x", id); in txgbe_swnodes_register() 33 snprintf(nodes->i2c_name, sizeof(nodes->i2c_name), "txgbe_i2c-%x", id); in txgbe_swnodes_register() 34 snprintf(nodes->sfp_name, sizeof(nodes->sfp_name), "txgbe_sfp-%x", id); in txgbe_swnodes_register() 35 snprintf(nodes->phylink_name, sizeof(nodes->phylink_name), "txgbe_phylink-%x", id); in txgbe_swnodes_register() 37 swnodes = nodes->swnodes; in txgbe_swnodes_register() 46 nodes->gpio_props[0] = PROPERTY_ENTRY_STRING("pinctrl-names", "default"); in txgbe_swnodes_register() 47 swnodes[SWNODE_GPIO] = NODE_PROP(nodes->gpio_name, nodes->gpio_props); in txgbe_swnodes_register() 48 nodes->gpio0_ref[0] = SOFTWARE_NODE_REFERENCE(&swnodes[SWNODE_GPIO], 0, GPIO_ACTIVE_HIGH); in txgbe_swnodes_register() 49 nodes->gpio1_ref[0] = SOFTWARE_NODE_REFERENCE(&swnodes[SWNODE_GPIO], 1, GPIO_ACTIVE_HIGH); in txgbe_swnodes_register() [all …]
|
/openbmc/linux/drivers/gpu/drm/tests/ |
H A D | drm_mm_test.c | 251 struct drm_mm_node nodes[2]; in drm_test_mm_debug() local 253 /* Create a small drm_mm with a couple of nodes and a few holes, and in drm_test_mm_debug() 259 memset(nodes, 0, sizeof(nodes)); in drm_test_mm_debug() 260 nodes[0].start = 512; in drm_test_mm_debug() 261 nodes[0].size = 1024; in drm_test_mm_debug() 262 KUNIT_ASSERT_FALSE_MSG(test, drm_mm_reserve_node(&mm, &nodes[0]), in drm_test_mm_debug() 264 nodes[0].start, nodes[0].size); in drm_test_mm_debug() 266 nodes[1].size = 1024; in drm_test_mm_debug() 267 nodes[1].start = 4096 - 512 - nodes[1].size; in drm_test_mm_debug() 268 KUNIT_ASSERT_FALSE_MSG(test, drm_mm_reserve_node(&mm, &nodes[1]), in drm_test_mm_debug() [all …]
|
/openbmc/linux/Documentation/admin-guide/mm/ |
H A D | numa_memory_policy.rst | 17 which is an administrative mechanism for restricting the nodes from which 40 allocations across all nodes with "sufficient" memory, so as 164 an optional set of nodes. The mode determines the behavior of the 166 and the optional set of nodes can be viewed as the arguments to the 188 does not use the optional set of nodes. 190 It is an error for the set of nodes specified for this policy to 195 nodes specified by the policy. Memory will be allocated from 202 allocation fails, the kernel will search other nodes, in order 222 page granularity, across the nodes specified in the policy. 227 Interleave mode indexes the set of nodes specified by the [all …]
|
/openbmc/openbmc-test-automation/xcat/ |
H A D | test_xcat_group.robot | 21 Add BMC Nodes To XCAT 22 [Documentation] Connect and add BMC nodes. 25 # Add BMC nodes one by one and check whether it is successfully added. 27 Add Nodes To XCAT ${bmc} 54 Add Nodes To Group List 55 [Documentation] Add BMC nodes into group. 58 # Add BMC nodes to group and validate. 60 Add Nodes To Group ${bmc} ${GROUP} 74 ${nodes}= Get List Of Nodes In Group ${GROUP} 75 Should Not Be Empty ${nodes} msg=Group is empty. [all …]
|
/openbmc/linux/Documentation/devicetree/bindings/cpu/ |
H A D | cpu-topology.txt | 20 For instance in a system where CPUs support SMT, "cpu" nodes represent all 22 In systems where SMT is not supported "cpu" nodes represent all cores present 25 CPU topology bindings allow one to associate cpu nodes with hierarchical groups 27 tree nodes. 32 The cpu nodes, as per bindings defined in [4], represent the devices that 35 A topology description containing phandles to cpu nodes that are not compliant 44 nodes are listed. 60 The cpu-map node's child nodes can be: 62 - one or more cluster nodes or 63 - one or more socket nodes in a multi-socket system [all …]
|
/openbmc/u-boot/fs/ubifs/ |
H A D | gc.c | 14 * nodes) or not. For non-index LEBs, garbage collection finds a LEB which 15 * contains a lot of dirty space (obsolete nodes), and copies the non-obsolete 16 * nodes to the journal, at which point the garbage-collected LEB is free to be 17 * reused. For index LEBs, garbage collection marks the non-obsolete index nodes 19 * to be reused. Garbage collection will cause the number of dirty index nodes 33 * the UBIFS nodes GC deals with. Large nodes make GC waste more space. Indeed, 34 * if GC move data from LEB A to LEB B and nodes in LEB A are large, GC would 35 * have to waste large pieces of free space at the end of LEB B, because nodes 36 * from LEB A would not fit. And the worst situation is when all nodes are of 107 * data_nodes_cmp - compare 2 data nodes. [all …]
|
/openbmc/linux/Documentation/mm/ |
H A D | numa.rst | 47 abstractions called "nodes". Linux maps the nodes onto the physical cells 49 architectures. As with physical cells, software nodes may contain 0 or more 51 "closer" nodes--nodes that map to closer cells--will generally experience 62 the emulation of additional nodes. For NUMA emulation, linux will carve up 63 the existing nodes--or the system memory for non-NUMA platforms--into multiple 64 nodes. Each emulated node will manage a fraction of the underlying cells' 74 an ordered "zonelist". A zonelist specifies the zones/nodes to visit when a 79 Because some nodes contain multiple zones containing different types of 85 from the same node before using remote nodes which are ordered by NUMA distance. 92 nodes' zones in the selected zonelist looking for the first zone in the list [all …]
|
/openbmc/linux/fs/ubifs/ |
H A D | gc.c | 14 * nodes) or not. For non-index LEBs, garbage collection finds a LEB which 15 * contains a lot of dirty space (obsolete nodes), and copies the non-obsolete 16 * nodes to the journal, at which point the garbage-collected LEB is free to be 17 * reused. For index LEBs, garbage collection marks the non-obsolete index nodes 19 * to be reused. Garbage collection will cause the number of dirty index nodes 33 * the UBIFS nodes GC deals with. Large nodes make GC waste more space. Indeed, 34 * if GC move data from LEB A to LEB B and nodes in LEB A are large, GC would 35 * have to waste large pieces of free space at the end of LEB B, because nodes 36 * from LEB A would not fit. And the worst situation is when all nodes are of 97 * data_nodes_cmp - compare 2 data nodes. [all …]
|
/openbmc/linux/drivers/media/pci/intel/ |
H A D | ipu-bridge.c | 313 struct software_node *nodes = sensor->swnodes; in ipu_bridge_create_fwnode_properties() local 319 SOFTWARE_NODE_REFERENCE(&nodes[SWNODE_IVSC_SENSOR_ENDPOINT]); in ipu_bridge_create_fwnode_properties() 321 SOFTWARE_NODE_REFERENCE(&nodes[SWNODE_IVSC_IPU_ENDPOINT]); in ipu_bridge_create_fwnode_properties() 323 SOFTWARE_NODE_REFERENCE(&nodes[SWNODE_SENSOR_ENDPOINT]); in ipu_bridge_create_fwnode_properties() 325 SOFTWARE_NODE_REFERENCE(&nodes[SWNODE_IPU_ENDPOINT]); in ipu_bridge_create_fwnode_properties() 350 SOFTWARE_NODE_REFERENCE(&nodes[SWNODE_IPU_ENDPOINT]); in ipu_bridge_create_fwnode_properties() 352 SOFTWARE_NODE_REFERENCE(&nodes[SWNODE_SENSOR_ENDPOINT]); in ipu_bridge_create_fwnode_properties() 407 /* append link to distinguish nodes with same model VCM */ in ipu_bridge_init_swnode_names() 424 struct software_node *nodes = sensor->swnodes; in ipu_bridge_init_swnode_group() local 426 sensor->group[SWNODE_SENSOR_HID] = &nodes[SWNODE_SENSOR_HID]; in ipu_bridge_init_swnode_group() [all …]
|
/openbmc/linux/Documentation/devicetree/bindings/usb/ |
H A D | usb-device.yaml | 17 Four types of device-tree nodes are defined: "host-controller nodes" 18 representing USB host controllers, "device nodes" representing USB devices, 19 "interface nodes" representing USB interfaces and "combined nodes" 32 description: Device nodes or combined nodes. 46 description: should be 1 for hub nodes with device nodes, 47 should be 2 for device nodes with interface nodes. 56 description: USB interface nodes. 63 description: Interface nodes.
|
/openbmc/linux/mm/ |
H A D | mempolicy.c | 15 * interleave Allocate memory interleaved over a set of nodes, 22 * bind Only allocate memory on a specific set of nodes, 26 * the allocation to memory nodes instead 34 * preferred many Try a set of nodes first before normal fallback. This is 186 int (*create)(struct mempolicy *pol, const nodemask_t *nodes); 187 void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes); 203 static int mpol_new_nodemask(struct mempolicy *pol, const nodemask_t *nodes) in mpol_new_nodemask() argument 205 if (nodes_empty(*nodes)) in mpol_new_nodemask() 207 pol->nodes = *nodes; in mpol_new_nodemask() 211 static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes) in mpol_new_preferred() argument [all …]
|
/openbmc/linux/Documentation/driver-api/md/ |
H A D | md-cluster.rst | 54 node may write to those sectors. This is used when a new nodes 60 Each node has to communicate with other nodes when starting or ending 70 Normally all nodes hold a concurrent-read lock on this device. 75 Messages can be broadcast to all nodes, and the sender waits for all 76 other nodes to acknowledge the message before proceeding. Only one 87 informs other nodes that the metadata has 94 informs other nodes that a resync is initiated or 104 informs other nodes that a device is being added to 128 The DLM LVB is used to communicate within nodes of the cluster. There 145 acknowledged by all nodes in the cluster. The BAST of the resource [all …]
|
/openbmc/linux/Documentation/filesystems/ |
H A D | ubifs-authentication.rst | 80 - *Index*: an on-flash B+ tree where the leaf nodes contain filesystem data 98 Basic on-flash UBIFS entities are called *nodes*. UBIFS knows different types 99 of nodes. Eg. data nodes (``struct ubifs_data_node``) which store chunks of file 100 contents or inode nodes (``struct ubifs_ino_node``) which represent VFS inodes. 101 Almost all types of nodes share a common header (``ubifs_ch``) containing basic 104 and some less important node types like padding nodes which are used to pad 108 as *wandering tree*, where only the changed nodes are re-written and previous 121 a dirty-flag which marks nodes that have to be persisted the next time the 126 on-flash filesystem structures like the index. On every commit, the TNC nodes 135 any changes (in form of inode nodes, data nodes etc.) between commits [all …]
|
/openbmc/linux/include/linux/ |
H A D | interconnect-provider.h | 31 * @num_nodes: number of nodes in this device 32 * @nodes: array of pointers to the nodes in this device 36 struct icc_node *nodes[] __counted_by(num_nodes); 47 * @nodes: internal list of the interconnect provider nodes 53 * @xlate: provider-specific callback for mapping nodes from phandle arguments 62 struct list_head nodes; member 82 * @num_links: number of links to other interconnect nodes 84 * @node_list: the list entry in the parent provider's "nodes" list 85 * @search_list: list used when walking the nodes graph 86 * @reverse: pointer to previous node when walking the nodes graph [all …]
|
/openbmc/linux/drivers/md/persistent-data/ |
H A D | dm-btree-spine.c | 128 s->nodes[0] = NULL; in init_ro_spine() 129 s->nodes[1] = NULL; in init_ro_spine() 137 unlock_block(s->info, s->nodes[i]); in exit_ro_spine() 145 unlock_block(s->info, s->nodes[0]); in ro_step() 146 s->nodes[0] = s->nodes[1]; in ro_step() 150 r = bn_read_lock(s->info, new_child, s->nodes + s->count); in ro_step() 161 unlock_block(s->info, s->nodes[s->count]); in ro_pop() 169 block = s->nodes[s->count - 1]; in ro_node() 187 unlock_block(s->info, s->nodes[i]); in exit_shadow_spine() 196 unlock_block(s->info, s->nodes[0]); in shadow_step() [all …]
|
/openbmc/linux/tools/perf/tests/ |
H A D | mem2node.c | 50 struct memory_node nodes[3]; in test__mem2node() local 52 .memory_nodes = (struct memory_node *) &nodes[0], in test__mem2node() 53 .nr_memory_nodes = ARRAY_SIZE(nodes), in test__mem2node() 58 for (i = 0; i < ARRAY_SIZE(nodes); i++) { in test__mem2node() 59 nodes[i].node = test_nodes[i].node; in test__mem2node() 60 nodes[i].size = 10; in test__mem2node() 63 (nodes[i].set = get_bitmap(test_nodes[i].map, 10))); in test__mem2node() 75 for (i = 0; i < ARRAY_SIZE(nodes); i++) in test__mem2node() 76 zfree(&nodes[i].set); in test__mem2node()
|
/openbmc/linux/arch/x86/mm/ |
H A D | numa_emulation.c | 78 * Sets up nr_nodes fake nodes interleaved over physical nodes ranging from addr 109 * Calculate the number of big nodes that can be allocated as a result in split_nodes_interleave() 123 * Continue to fill physical nodes with fake nodes until there is no in split_nodes_interleave() 211 * Sets up fake nodes of `size' interleaved over physical nodes ranging from 232 * physical block and try to create nodes of at least size in split_nodes_size_interleave_uniform() 235 * In the uniform case, split the nodes strictly by physical in split_nodes_size_interleave_uniform() 252 * The limit on emulated nodes is MAX_NUMNODES, so the in split_nodes_size_interleave_uniform() 256 * (but not necessarily over physical nodes). in split_nodes_size_interleave_uniform() 270 * Fill physical nodes with fake nodes of size until there is no memory in split_nodes_size_interleave_uniform() 345 * numa_emulation - Emulate NUMA nodes [all …]
|
/openbmc/linux/security/selinux/ss/ |
H A D | conditional.c | 34 struct cond_expr_node *node = &expr->nodes[i]; in cond_evaluate_expr() 105 avnode = node->true_list.nodes[i]; in evaluate_cond_node() 113 avnode = node->false_list.nodes[i]; in evaluate_cond_node() 142 kfree(node->expr.nodes); in cond_node_destroy() 143 /* the avtab_ptr_t nodes are destroyed by the avtab */ in cond_node_destroy() 144 kfree(node->true_list.nodes); in cond_node_destroy() 145 kfree(node->false_list.nodes); in cond_node_destroy() 296 if (other->nodes[i] == node_ptr) { in cond_insertf() 341 list->nodes = kcalloc(len, sizeof(*list->nodes), GFP_KERNEL); in cond_read_av_list() 342 if (!list->nodes) in cond_read_av_list() [all …]
|
/openbmc/linux/arch/arm/mach-sunxi/ |
H A D | mc_smp.c | 689 * This holds any device nodes that we requested resources for, 702 int (*get_smp_nodes)(struct sunxi_mc_smp_nodes *nodes); 706 static void __init sunxi_mc_smp_put_nodes(struct sunxi_mc_smp_nodes *nodes) in sunxi_mc_smp_put_nodes() argument 708 of_node_put(nodes->prcm_node); in sunxi_mc_smp_put_nodes() 709 of_node_put(nodes->cpucfg_node); in sunxi_mc_smp_put_nodes() 710 of_node_put(nodes->sram_node); in sunxi_mc_smp_put_nodes() 711 of_node_put(nodes->r_cpucfg_node); in sunxi_mc_smp_put_nodes() 712 memset(nodes, 0, sizeof(*nodes)); in sunxi_mc_smp_put_nodes() 715 static int __init sun9i_a80_get_smp_nodes(struct sunxi_mc_smp_nodes *nodes) in sun9i_a80_get_smp_nodes() argument 717 nodes->prcm_node = of_find_compatible_node(NULL, NULL, in sun9i_a80_get_smp_nodes() [all …]
|
/openbmc/openbmc-test-automation/lib/xcat/ |
H A D | xcat_utils.robot | 42 Get List Of BMC Nodes 43 [Documentation] Get list of BMC nodes. 46 # Get the list of BMC nodes to be added. 47 # This keyword expects file having list of BMC nodes 49 # File should have IP addresses of BMC nodes. 57 Add Nodes To XCAT 58 [Documentation] Add nodes to XCAT configuration. 116 Add Nodes To Group 117 [Documentation] Add BMC nodes to group. 128 Get List Of Nodes In Group [all …]
|