/openbmc/linux/drivers/block/null_blk/ |
H A D | zoned.c | 35 struct nullb_zone *zone) in null_init_zone_lock() argument 38 spin_lock_init(&zone->spinlock); in null_init_zone_lock() 40 mutex_init(&zone->mutex); in null_init_zone_lock() 44 struct nullb_zone *zone) in null_lock_zone() argument 47 spin_lock_irq(&zone->spinlock); in null_lock_zone() 49 mutex_lock(&zone->mutex); in null_lock_zone() 53 struct nullb_zone *zone) in null_unlock_zone() argument 56 spin_unlock_irq(&zone->spinlock); in null_unlock_zone() 58 mutex_unlock(&zone->mutex); in null_unlock_zone() 64 struct nullb_zone *zone; in null_init_zoned_dev() local [all …]
|
/openbmc/linux/fs/pstore/ |
H A D | zone.c | 160 static inline int buffer_datalen(struct pstore_zone *zone) in buffer_datalen() argument 162 return atomic_read(&zone->buffer->datalen); in buffer_datalen() 165 static inline int buffer_start(struct pstore_zone *zone) in buffer_start() argument 167 return atomic_read(&zone->buffer->start); in buffer_start() 175 static ssize_t psz_zone_read_buffer(struct pstore_zone *zone, char *buf, in psz_zone_read_buffer() argument 178 if (!buf || !zone || !zone->buffer) in psz_zone_read_buffer() 180 if (off > zone->buffer_size) in psz_zone_read_buffer() 182 len = min_t(size_t, len, zone->buffer_size - off); in psz_zone_read_buffer() 183 memcpy(buf, zone->buffer->data + off, len); in psz_zone_read_buffer() 187 static int psz_zone_read_oldbuf(struct pstore_zone *zone, char *buf, in psz_zone_read_oldbuf() argument [all …]
|
/openbmc/linux/mm/ |
H A D | page_alloc.c | 305 static bool cond_accept_memory(struct zone *zone, unsigned int order); 331 _deferred_grow_zone(struct zone *zone, unsigned int order) in _deferred_grow_zone() argument 333 return deferred_grow_zone(zone, order); in _deferred_grow_zone() 441 static int page_outside_zone_boundaries(struct zone *zone, struct page *page) in page_outside_zone_boundaries() argument 449 seq = zone_span_seqbegin(zone); in page_outside_zone_boundaries() 450 start_pfn = zone->zone_start_pfn; in page_outside_zone_boundaries() 451 sp = zone->spanned_pages; in page_outside_zone_boundaries() 452 ret = !zone_spans_pfn(zone, pfn); in page_outside_zone_boundaries() 453 } while (zone_span_seqretry(zone, seq)); in page_outside_zone_boundaries() 457 pfn, zone_to_nid(zone), zone->name, in page_outside_zone_boundaries() [all …]
|
H A D | show_mem.c | 26 static inline void show_node(struct zone *zone) in show_node() argument 29 printk("Node %d ", zone_to_nid(zone)); in show_node() 39 struct zone *zone; in si_mem_available() local 45 for_each_zone(zone) in si_mem_available() 46 wmark_low += low_wmark_pages(zone); in si_mem_available() 107 struct zone *zone = &pgdat->node_zones[zone_type]; in si_meminfo_node() local 109 if (is_highmem(zone)) { in si_meminfo_node() 110 managed_highpages += zone_managed_pages(zone); in si_meminfo_node() 111 free_highpages += zone_page_state(zone, NR_FREE_PAGES); in si_meminfo_node() 193 struct zone *zone; in show_free_areas() local [all …]
|
H A D | vmstat.c | 38 static void zero_zone_numa_counters(struct zone *zone) in zero_zone_numa_counters() argument 43 atomic_long_set(&zone->vm_numa_event[item], 0); in zero_zone_numa_counters() 45 per_cpu_ptr(zone->per_cpu_zonestats, cpu)->vm_numa_event[item] in zero_zone_numa_counters() 54 struct zone *zone; in zero_zones_numa_counters() local 56 for_each_populated_zone(zone) in zero_zones_numa_counters() 57 zero_zone_numa_counters(zone); in zero_zones_numa_counters() 169 static void fold_vm_zone_numa_events(struct zone *zone) in fold_vm_zone_numa_events() argument 178 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu); in fold_vm_zone_numa_events() 184 zone_numa_event_add(zone_numa_events[item], zone, item); in fold_vm_zone_numa_events() 189 struct zone *zone; in fold_vm_numa_events() local [all …]
|
H A D | compaction.c | 153 static void defer_compaction(struct zone *zone, int order) in defer_compaction() argument 155 zone->compact_considered = 0; in defer_compaction() 156 zone->compact_defer_shift++; in defer_compaction() 158 if (order < zone->compact_order_failed) in defer_compaction() 159 zone->compact_order_failed = order; in defer_compaction() 161 if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT) in defer_compaction() 162 zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT; in defer_compaction() 164 trace_mm_compaction_defer_compaction(zone, order); in defer_compaction() 168 static bool compaction_deferred(struct zone *zone, int order) in compaction_deferred() argument 170 unsigned long defer_limit = 1UL << zone->compact_defer_shift; in compaction_deferred() [all …]
|
H A D | memory_hotplug.c | 414 static unsigned long find_smallest_section_pfn(int nid, struct zone *zone, in find_smallest_section_pfn() argument 425 if (zone != page_zone(pfn_to_page(start_pfn))) in find_smallest_section_pfn() 435 static unsigned long find_biggest_section_pfn(int nid, struct zone *zone, in find_biggest_section_pfn() argument 450 if (zone != page_zone(pfn_to_page(pfn))) in find_biggest_section_pfn() 459 static void shrink_zone_span(struct zone *zone, unsigned long start_pfn, in shrink_zone_span() argument 463 int nid = zone_to_nid(zone); in shrink_zone_span() 465 if (zone->zone_start_pfn == start_pfn) { in shrink_zone_span() 472 pfn = find_smallest_section_pfn(nid, zone, end_pfn, in shrink_zone_span() 473 zone_end_pfn(zone)); in shrink_zone_span() 475 zone->spanned_pages = zone_end_pfn(zone) - pfn; in shrink_zone_span() [all …]
|
H A D | mm_init.c | 49 struct zone *zone; in mminit_verify_zonelist() local 61 zone = &pgdat->node_zones[zoneid]; in mminit_verify_zonelist() 62 if (!populated_zone(zone)) in mminit_verify_zonelist() 68 zone->name); in mminit_verify_zonelist() 71 for_each_zone_zonelist(zone, z, zonelist, zoneid) in mminit_verify_zonelist() 72 pr_cont("%d:%s ", zone_to_nid(zone), zone->name); in mminit_verify_zonelist() 565 unsigned long zone, int nid) in __init_single_page() argument 568 set_page_links(page, zone, nid, pfn); in __init_single_page() 577 if (!is_highmem_idx(zone)) in __init_single_page() 717 struct zone *zone = &pgdat->node_zones[zid]; in init_reserved_page() local [all …]
|
/openbmc/phosphor-pid-control/test/ |
H A D | pid_zone_unittest.cpp | 6 #include "pid/zone.hpp" 62 int64_t zone = 1; in TEST() 83 DbusPidZone p(zone, minThermalOutput, failSafePercent, cycleTime, m, 120 zone = std::make_unique<DbusPidZone>( 150 std::unique_ptr<DbusPidZone> zone; in TEST_F() 157 EXPECT_EQ(zoneId, zone->getZoneID()); in TEST_F() 162 // Verifies that the zone starts in manual mode. Verifies that one can set in TEST_F() 164 EXPECT_FALSE(zone->getManualMode()); in TEST_F() 166 zone->setManualMode(true); in TEST_F() 167 EXPECT_TRUE(zone in TEST_F() 58 int64_t zone = 1; TEST() local 146 std::unique_ptr<DbusPidZone> zone; global() member in pid_control::__anon416c5d230111::PidZoneTest [all...] |
/openbmc/phosphor-pid-control/pid/ |
H A D | pidloop.cpp | 36 static void processThermals(std::shared_ptr<ZoneInterface> zone) in processThermals() argument 39 zone->updateSensors(); in processThermals() 41 zone->clearSetPoints(); in processThermals() 42 zone->clearRPMCeilings(); in processThermals() 44 zone->processThermals(); in processThermals() 46 zone->determineMaxSetPointRequest(); in processThermals() 49 void pidControlLoop(std::shared_ptr<ZoneInterface> zone, in pidControlLoop() argument 62 zone->initializeLog(); in pidControlLoop() 65 zone->initializeCache(); in pidControlLoop() 66 processThermals(zone); in pidControlLoop() [all …]
|
H A D | builder.cpp | 43 static std::string getControlPath(int64_t zone) in getControlPath() argument 45 return std::string(objectPath) + std::to_string(zone); in getControlPath() 48 static std::string getPidControlPath(int64_t zone, std::string pidname) in getPidControlPath() argument 50 return std::string(objectPath) + std::to_string(zone) + "/" + pidname; in getPidControlPath() 76 auto zone = std::make_shared<DbusPidZone>( in buildZones() local 82 std::cerr << "Zone Id: " << zone->getZoneID() << "\n"; in buildZones() 99 zone->addFanInput(i.name, i.missingIsAcceptable); in buildZones() 103 zone.get(), name, splitNames(inputs), info.pidInfo); in buildZones() 104 zone->addFanPID(std::move(pid)); in buildZones() 105 zone->addPidFailSafePercent(splitNames(inputs), in buildZones() [all …]
|
/openbmc/linux/tools/power/cpupower/lib/ |
H A D | powercap.c | 124 static int sysfs_powercap_get64_val(struct powercap_zone *zone, in sysfs_powercap_get64_val() argument 132 strcat(file, zone->sys_name); in sysfs_powercap_get64_val() 146 int powercap_get_max_energy_range_uj(struct powercap_zone *zone, uint64_t *val) in powercap_get_max_energy_range_uj() argument 148 return sysfs_powercap_get64_val(zone, GET_MAX_ENERGY_RANGE_UJ, val); in powercap_get_max_energy_range_uj() 151 int powercap_get_energy_uj(struct powercap_zone *zone, uint64_t *val) in powercap_get_energy_uj() argument 153 return sysfs_powercap_get64_val(zone, GET_ENERGY_UJ, val); in powercap_get_energy_uj() 156 int powercap_get_max_power_range_uw(struct powercap_zone *zone, uint64_t *val) in powercap_get_max_power_range_uw() argument 158 return sysfs_powercap_get64_val(zone, GET_MAX_POWER_RANGE_UW, val); in powercap_get_max_power_range_uw() 161 int powercap_get_power_uw(struct powercap_zone *zone, uint64_t *val) in powercap_get_power_uw() argument 163 return sysfs_powercap_get64_val(zone, GET_POWER_UW, val); in powercap_get_power_uw() [all …]
|
/openbmc/linux/include/linux/ |
H A D | memory_hotplug.h | 11 struct zone; 132 static inline unsigned zone_span_seqbegin(struct zone *zone) in zone_span_seqbegin() argument 134 return read_seqbegin(&zone->span_seqlock); in zone_span_seqbegin() 136 static inline int zone_span_seqretry(struct zone *zone, unsigned iv) in zone_span_seqretry() argument 138 return read_seqretry(&zone->span_seqlock, iv); in zone_span_seqretry() 140 static inline void zone_span_writelock(struct zone *zone) in zone_span_writelock() argument 142 write_seqlock(&zone->span_seqlock); in zone_span_writelock() 144 static inline void zone_span_writeunlock(struct zone *zone) in zone_span_writeunlock() argument 146 write_sequnlock(&zone->span_seqlock); in zone_span_writeunlock() 148 static inline void zone_seqlock_init(struct zone *zone) in zone_seqlock_init() argument [all …]
|
H A D | mmzone.h | 815 struct zone { struct 1008 static inline unsigned long zone_managed_pages(struct zone *zone) in zone_managed_pages() argument 1010 return (unsigned long)atomic_long_read(&zone->managed_pages); in zone_managed_pages() 1013 static inline unsigned long zone_cma_pages(struct zone *zone) in zone_cma_pages() argument 1016 return zone->cma_pages; in zone_cma_pages() 1022 static inline unsigned long zone_end_pfn(const struct zone *zone) in zone_end_pfn() argument 1024 return zone->zone_start_pfn + zone->spanned_pages; in zone_end_pfn() 1027 static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn) in zone_spans_pfn() argument 1029 return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone); in zone_spans_pfn() 1032 static inline bool zone_is_initialized(struct zone *zone) in zone_is_initialized() argument [all …]
|
H A D | vmstat.h | 145 static inline void zone_numa_event_add(long x, struct zone *zone, in zone_numa_event_add() argument 148 atomic_long_add(x, &zone->vm_numa_event[item]); in zone_numa_event_add() 152 static inline unsigned long zone_numa_event_state(struct zone *zone, in zone_numa_event_state() argument 155 return atomic_long_read(&zone->vm_numa_event[item]); in zone_numa_event_state() 165 static inline void zone_page_state_add(long x, struct zone *zone, in zone_page_state_add() argument 168 atomic_long_add(x, &zone->vm_stat[item]); in zone_page_state_add() 207 static inline unsigned long zone_page_state(struct zone *zone, in zone_page_state() argument 210 long x = atomic_long_read(&zone->vm_stat[item]); in zone_page_state() 224 static inline unsigned long zone_page_state_snapshot(struct zone *zone, in zone_page_state_snapshot() argument 227 long x = atomic_long_read(&zone->vm_stat[item]); in zone_page_state_snapshot() [all …]
|
/openbmc/phosphor-fan-presence/control/ |
H A D | actions.cpp | 18 control::Zone& zone, const Group& group) { in call_actions_based_on_timer() argument 21 auto it = zone.getTimerEvents().find(__func__); in call_actions_based_on_timer() 22 if (it != zone.getTimerEvents().end()) in call_actions_based_on_timer() 25 auto timerIter = zone.findTimer(group, actions, timers); in call_actions_based_on_timer() 29 zone.addTimer(__func__, group, actions, tConf); in call_actions_based_on_timer() 37 zone.getTimerEvents().erase(it); in call_actions_based_on_timer() 44 zone.addTimer(__func__, group, actions, tConf); in call_actions_based_on_timer() 54 void default_floor_on_missing_owner(Zone& zone, const Group& group) in default_floor_on_missing_owner() argument 57 zone.setServices(&group); in default_floor_on_missing_owner() 58 auto services = zone.getGroupServices(&group); in default_floor_on_missing_owner() [all …]
|
H A D | actions.hpp | 43 void default_floor_on_missing_owner(Zone& zone, const Group& group); 70 void set_request_speed_base_with_max(Zone& zone, const Group& group); 89 state = std::forward<T>(state)](auto& zone, auto& group) { in count_state_before_speed() argument 95 if (zone.template getPropertyValue<T>( in count_state_before_speed() 108 zone.setSpeed(speed); in count_state_before_speed() 113 zone.setActiveAllow(&group, !(numAtState >= count)); in count_state_before_speed() 132 return [val_to_speed = std::move(val_to_speed)](control::Zone& zone, in set_floor_from_average_sensor_value() 134 auto speed = zone.getDefFloor(); in set_floor_from_average_sensor_value() 140 [&zone, &count](T sum, const auto& entry) { in set_floor_from_average_sensor_value() 143 return sum + zone.template getPropertyValue<T>( in set_floor_from_average_sensor_value() [all …]
|
H A D | preconditions.cpp | 24 return [pg = std::move(pg), sse = std::move(sse)](auto& zone, auto& group) { in property_states_match() argument 27 std::all_of(pg.begin(), pg.end(), [&zone](const auto& entry) { in property_states_match() 30 return zone.getPropValueVariant( in property_states_match() 49 std::for_each(sse.begin(), sse.end(), [&zone](const auto& entry) { in property_states_match() 50 zone.initEvent(entry); in property_states_match() 59 std::for_each(sse.begin(), sse.end(), [&zone](const auto& entry) { in property_states_match() 60 zone.removeEvent(entry); in property_states_match() 62 zone.setFullSpeed(); in property_states_match() 65 zone.setActiveAllow(&group, precondState); in property_states_match() 71 return [sse = std::move(sse)](auto& zone, auto& group) { in services_missing_owner() argument [all …]
|
/openbmc/linux/drivers/md/ |
H A D | dm-zoned-metadata.c | 137 struct dm_zone *zone; member 221 static unsigned int dmz_dev_zone_id(struct dmz_metadata *zmd, struct dm_zone *zone) in dmz_dev_zone_id() argument 223 if (WARN_ON(!zone)) in dmz_dev_zone_id() 226 return zone->id - zone->dev->zone_offset; in dmz_dev_zone_id() 229 sector_t dmz_start_sect(struct dmz_metadata *zmd, struct dm_zone *zone) in dmz_start_sect() argument 231 unsigned int zone_id = dmz_dev_zone_id(zmd, zone); in dmz_start_sect() 236 sector_t dmz_start_block(struct dmz_metadata *zmd, struct dm_zone *zone) in dmz_start_block() argument 238 unsigned int zone_id = dmz_dev_zone_id(zmd, zone); in dmz_start_block() 311 struct dm_zone *zone = kzalloc(sizeof(struct dm_zone), GFP_KERNEL); in dmz_insert() local 313 if (!zone) in dmz_insert() [all …]
|
/openbmc/linux/include/net/netfilter/ |
H A D | nf_conntrack_zones.h | 12 return &ct->zone; in nf_ct_zone() 19 nf_ct_zone_init(struct nf_conntrack_zone *zone, u16 id, u8 dir, u8 flags) in nf_ct_zone_init() argument 21 zone->id = id; in nf_ct_zone_init() 22 zone->flags = flags; in nf_ct_zone_init() 23 zone->dir = dir; in nf_ct_zone_init() 25 return zone; in nf_ct_zone_init() 36 if (tmpl->zone.flags & NF_CT_FLAG_MARK) in nf_ct_zone_tmpl() 37 return nf_ct_zone_init(tmp, skb->mark, tmpl->zone.dir, 0); in nf_ct_zone_tmpl() 43 const struct nf_conntrack_zone *zone) in nf_ct_zone_add() argument 46 ct->zone = *zone; in nf_ct_zone_add() [all …]
|
/openbmc/linux/kernel/power/ |
H A D | snapshot.c | 404 struct mem_zone_bm_rtree *zone; member 466 static int add_rtree_block(struct mem_zone_bm_rtree *zone, gfp_t gfp_mask, in add_rtree_block() argument 473 block_nr = zone->blocks; in add_rtree_block() 483 for (i = zone->levels; i < levels_needed; i++) { in add_rtree_block() 485 &zone->nodes); in add_rtree_block() 489 node->data[0] = (unsigned long)zone->rtree; in add_rtree_block() 490 zone->rtree = node; in add_rtree_block() 491 zone->levels += 1; in add_rtree_block() 495 block = alloc_rtree_node(gfp_mask, safe_needed, ca, &zone->leaves); in add_rtree_block() 500 node = zone->rtree; in add_rtree_block() [all …]
|
/openbmc/linux/fs/adfs/ |
H A D | map.c | 159 static int scan_map(struct adfs_sb_info *asb, unsigned int zone, in scan_map() argument 166 dm = asb->s_map + zone; in scan_map() 167 zone = asb->s_map_size; in scan_map() 168 dm_end = asb->s_map + zone; in scan_map() 179 } while (--zone > 0); in scan_map() 202 unsigned int zone; in adfs_map_statfs() local 205 zone = asb->s_map_size; in adfs_map_statfs() 209 } while (--zone > 0); in adfs_map_statfs() 220 unsigned int zone, mapoff; in adfs_map_lookup() local 228 zone = asb->s_map_size >> 1; in adfs_map_lookup() [all …]
|
/openbmc/linux/virt/kvm/ |
H A D | coalesced_mmio.c | 36 if (addr < dev->zone.addr) in coalesced_mmio_in_range() 38 if (addr + len > dev->zone.addr + dev->zone.size) in coalesced_mmio_in_range() 89 ring->coalesced_mmio[insert].pio = dev->zone.pio; in coalesced_mmio_write() 138 struct kvm_coalesced_mmio_zone *zone) in kvm_vm_ioctl_register_coalesced_mmio() argument 143 if (zone->pio != 1 && zone->pio != 0) in kvm_vm_ioctl_register_coalesced_mmio() 153 dev->zone = *zone; in kvm_vm_ioctl_register_coalesced_mmio() 157 zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS, in kvm_vm_ioctl_register_coalesced_mmio() 158 zone->addr, zone->size, &dev->dev); in kvm_vm_ioctl_register_coalesced_mmio() 174 struct kvm_coalesced_mmio_zone *zone) in kvm_vm_ioctl_unregister_coalesced_mmio() argument 179 if (zone->pio != 1 && zone->pio != 0) in kvm_vm_ioctl_unregister_coalesced_mmio() [all …]
|
/openbmc/linux/drivers/net/ethernet/mellanox/mlx4/ |
H A D | alloc.c | 250 struct mlx4_zone_entry *zone = kmalloc(sizeof(*zone), GFP_KERNEL); in mlx4_zone_add_one() local 252 if (NULL == zone) in mlx4_zone_add_one() 255 zone->flags = flags; in mlx4_zone_add_one() 256 zone->bitmap = bitmap; in mlx4_zone_add_one() 257 zone->use_rr = (flags & MLX4_ZONE_USE_RR) ? MLX4_USE_RR : 0; in mlx4_zone_add_one() 258 zone->priority = priority; in mlx4_zone_add_one() 259 zone->offset = offset; in mlx4_zone_add_one() 263 zone->uid = zone_alloc->last_uid++; in mlx4_zone_add_one() 264 zone->allocator = zone_alloc; in mlx4_zone_add_one() 274 list_add_tail(&zone->prio_list, &it->prio_list); in mlx4_zone_add_one() [all …]
|
/openbmc/linux/include/trace/events/ |
H A D | compaction.h | 194 TP_PROTO(struct zone *zone, 198 TP_ARGS(zone, order, ret), 208 __entry->nid = zone_to_nid(zone); 209 __entry->idx = zone_idx(zone); 223 TP_PROTO(struct zone *zone, 227 TP_ARGS(zone, order, ret) 232 TP_PROTO(struct zone *zone, 236 TP_ARGS(zone, order, ret) 241 TP_PROTO(struct zone *zone, int order), 243 TP_ARGS(zone, order), [all …]
|