/openbmc/linux/drivers/gpu/drm/i915/gvt/ |
H A D | scheduler.c | 191 workload->engine->name, workload->ctx_desc.lrca, in populate_shadow_context() 499 workload); in intel_gvt_scan_and_shadow_workload() 784 ret = workload->prepare(workload); in prepare_workload() 806 workload->engine->name, workload); in dispatch_workload() 836 workload->engine->name, workload->req); in dispatch_workload() 1112 ring_id, workload, workload->status); in complete_current_workload() 1135 workload->complete(workload); in complete_current_workload() 1169 if (workload) in workload_thread() 1213 workload, workload->status); in workload_thread() 1548 if (!workload) in alloc_workload() [all …]
|
H A D | execlist.c | 372 struct intel_vgpu *vgpu = workload->vgpu; in prepare_execlist_workload() 377 if (!workload->emulate_schedule_in) in prepare_execlist_workload() 394 struct intel_vgpu *vgpu = workload->vgpu; in complete_execlist_workload() 397 &s->execlist[workload->engine->id]; in complete_execlist_workload() 404 workload, workload->status); in complete_execlist_workload() 406 if (workload->status || vgpu->resetting_eng & workload->engine->mask) in complete_execlist_workload() 414 this_desc = &workload->ctx_desc; in complete_execlist_workload() 436 struct intel_vgpu_workload *workload = NULL; in submit_context() local 439 if (IS_ERR(workload)) in submit_context() 440 return PTR_ERR(workload); in submit_context() [all …]
|
H A D | cmd_parser.c | 2844 gma_head = workload->rb_start + workload->rb_head; in scan_workload() 2845 gma_tail = workload->rb_start + workload->rb_tail; in scan_workload() 2856 s.workload = workload; in scan_workload() 2866 ret = command_scan(&s, workload->rb_head, workload->rb_tail, in scan_workload() 2867 workload->rb_start, _RING_CTL_BUF_SIZE(workload->rb_ctl)); in scan_workload() 2903 s.workload = workload; in scan_wa_ctx() 2927 workload->rb_len = (workload->rb_tail + guest_rb_size - in shadow_workload_ring_buffer() 2930 gma_head = workload->rb_start + workload->rb_head; in shadow_workload_ring_buffer() 2931 gma_tail = workload->rb_start + workload->rb_tail; in shadow_workload_ring_buffer() 2945 s->ring_scan_buffer_size[workload->engine->id] = workload->rb_len; in shadow_workload_ring_buffer() [all …]
|
H A D | cmd_parser.h | 50 int intel_gvt_scan_and_shadow_ringbuffer(struct intel_vgpu_workload *workload); 56 int intel_gvt_scan_engine_context(struct intel_vgpu_workload *workload);
|
H A D | trace.h | 231 void *workload, const char *cmd_name), 234 buf_addr_type, workload, cmd_name), 243 __field(void*, workload) 255 __entry->workload = workload; 271 __entry->workload)
|
H A D | scheduler.h | 139 void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload); 166 void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload);
|
/openbmc/linux/Documentation/admin-guide/ |
H A D | workload-tracing.rst | 42 is a workload that provides full coverage of a workload then the method 45 workload. 106 perf bench (all) workload 116 Stress-ng netdev stressor workload 129 paxtest kiddie workload 144 by a workload. It can be used: 317 Tracing perf bench all workload 324 **System Calls made by the workload** 434 **System Calls made by the workload** 521 Tracing paxtest kiddie workload [all …]
|
/openbmc/linux/tools/perf/tests/ |
H A D | perf-record.c | 114 err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask); in test__PERF_RECORD() 126 if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, &cpu_mask) < 0) { in test__PERF_RECORD() 212 if ((pid_t)sample.pid != evlist->workload.pid) { in test__PERF_RECORD() 214 name, evlist->workload.pid, sample.pid); in test__PERF_RECORD() 218 if ((pid_t)sample.tid != evlist->workload.pid) { in test__PERF_RECORD() 220 name, evlist->workload.pid, sample.tid); in test__PERF_RECORD() 229 (pid_t)event->comm.pid != evlist->workload.pid) { in test__PERF_RECORD()
|
H A D | builtin-test.c | 523 const char *workload = NULL; in cmd_test() local 530 OPT_STRING('w', "workload", &workload, "work", "workload to run for testing"), in cmd_test() 548 if (workload) in cmd_test() 549 return run_workload(workload, argc, argv); in cmd_test()
|
/openbmc/linux/Documentation/accel/qaic/ |
H A D | aic100.rst | 84 one workload, AIC100 is limited to 16 concurrent workloads. Workload 94 workload is assigned a single DMA Bridge channel. The DMA Bridge exposes 117 1. Compile the workload into an ELF targeting the NSP(s) 123 workload. 124 5. Once the workload is no longer required, make a request to the QSM to 125 deactivate the workload, thus putting the NSPs back into an idle state. 381 the data into the memory of the workload when the workload is ready to process 444 Activate a workload onto NSPs. The host must provide memory to be 448 Deactivate an active workload and return the NSPs to idle. 478 have multiple users, each with their own workload running. If the workload of [all …]
|
H A D | qaic.rst | 15 if the workload is particularly quick, and the host is responsive. If the host 19 workload's ability to process inputs. The lprnet (license plate reader network) 20 workload is known to trigger this condition, and can generate in excess of 100k 29 sleep for a time to see if the workload will generate more activity. The IRQ 36 workload throughput performance (within run to run noise variation). 82 or receive data from a workload. The call will return a GEM handle that 93 get sent where to a workload. This requires a set of DMA transfers for the 132 workload should be allowed to interface with the DBC.
|
/openbmc/linux/tools/perf/Documentation/ |
H A D | perf-sched.txt | 18 of an arbitrary workload. 21 and other scheduling properties of the workload. 23 'perf sched script' to see a detailed trace of the workload that 26 'perf sched replay' to simulate the workload that was recorded 28 that mimic the workload based on the events in the trace. These 30 of the workload as it occurred when it was recorded - and can repeat 34 workload captured via perf sched record. Columns stand for
|
/openbmc/linux/Documentation/admin-guide/mm/ |
H A D | idle_page_tracking.rst | 9 accessed by a workload and which are idle. This information can be useful for 10 estimating the workload's working set size, which, in turn, can be taken into 11 account when configuring the workload parameters, setting memory cgroup limits, 12 or deciding where to place the workload within a compute cluster. 51 workload one should: 53 1. Mark all the workload's pages as idle by setting corresponding bits in 55 ``/proc/pid/pagemap`` if the workload is represented by a process, or by 56 filtering out alien pages using ``/proc/kpagecgroup`` in case the workload 59 2. Wait until the workload accesses its working set.
|
/openbmc/linux/tools/perf/tests/shell/ |
H A D | stat_metrics_values.sh | 20 workload="perf bench futex hash -r 2 -s" 25 $PYTHON $pythonvalidator -rule $rulefile -output_dir $tmpdir -wl "${workload}"
|
H A D | test_intel_pt.sh | 23 workload="${temp_dir}/workload" 63 cat << _end_of_file_ | /usr/bin/cc -o "${workload}" -xc - -pthread && have_workload=true 254 $workload & 256 $workload &
|
/openbmc/linux/tools/perf/bench/ |
H A D | find-bit-bench.c | 34 static noinline void workload(int val) in workload() function 82 workload(bit); in do_for_each_set_bit() 97 workload(bit); in do_for_each_set_bit()
|
/openbmc/linux/tools/perf/tests/shell/lib/ |
H A D | perf_metric_validation.py | 10 …name, reportfname='', t=5, debug=False, datafname='', fullrulefname='', workload='true', metrics='… argument 19 self.workloads = [x for x in workload.split(",") if x] 360 def _run_perf(self, metric, workload: str): 363 wl = workload.split() 371 def collect_perf(self, workload: str): 392 else: wl = workload 400 workload = self.workloads[self.wlidx] 402 data = self._run_perf(metric, workload) 562 datafname=datafile, fullrulefname=fullrule, workload=args.wl,
|
/openbmc/linux/Documentation/filesystems/nfs/ |
H A D | knfsd-stats.rst | 54 Depending on the NFS workload patterns and various network stack 58 However this is a more accurate and less workload-dependent measure 74 pool for the NFS workload (the workload is thread-limited), in which 76 performance of the NFS workload. 93 threads configured than can be used by the NFS workload. This is 99 slow; the idle timeout is 60 minutes. Unless the NFS workload
|
/openbmc/linux/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ |
H A D | pp_psm.c | 275 long workload[1]; in psm_adjust_power_state_dynamic() local 300 workload[0] = hwmgr->workload_setting[index]; in psm_adjust_power_state_dynamic() 302 if (hwmgr->power_profile_mode != workload[0] && hwmgr->hwmgr_func->set_power_profile_mode) in psm_adjust_power_state_dynamic() 303 hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, workload, 0); in psm_adjust_power_state_dynamic()
|
/openbmc/linux/Documentation/tools/rtla/ |
H A D | common_timerlat_options.rst | 32 Set timerlat to run without a workload, and then dispatches user-space workloads 33 to wait on the timerlat_fd. Once the workload is awakes, it goes to sleep again
|
/openbmc/linux/Documentation/admin-guide/mm/damon/ |
H A D | start.rst | 50 with your real workload. The last line asks ``damo`` to record the access 123 >=60 seconds in your workload to be swapped out. :: 127 <pid of your workload>
|
/openbmc/linux/Documentation/scheduler/ |
H A D | sched-capacity.rst | 72 With a workload that periodically does a fixed amount of work, you will get an 103 Executing the same workload as described in 1.3.1, which each CPU running at its 111 workload on CPU1 151 One issue that needs to be taken into account is that a workload's duty cycle is 153 periodic workload at a given frequency F:: 162 Now, consider running the *same* workload at frequency F/2:: 184 identical workload on CPUs of different capacity values will yield different 192 Executing a given periodic workload on each CPU at their maximum frequency would 383 workload on CPU0 390 workload on CPU1 [all …]
|
/openbmc/linux/Documentation/translations/zh_CN/scheduler/ |
H A D | sched-capacity.rst | 108 workload on CPU1 339 workload on CPU0 346 workload on CPU1
|
/openbmc/linux/Documentation/admin-guide/pm/ |
H A D | intel-speed-select.rst | 10 variety of diverse workload requirements. 82 This feature allows configuration of a server dynamically based on workload 216 workload, disable turbo:: 220 Then runs a busy workload on all CPUs, for example:: 540 the user control base frequency. If some critical workload threads demand 568 Before enabling Intel(R) SST-BF and measuring its impact on a workload 569 performance, execute some workload and measure performance and get a baseline 588 Below, the workload is measuring average scheduler wakeup latency, so a lower 688 With this configuration, if the same workload is executed by pinning the 689 workload to high priority CPUs (CPU 5 and 6 in this case):: [all …]
|
/openbmc/openbmc/poky/meta/recipes-extended/stress-ng/ |
H A D | stress-ng_0.17.06.bb | 2 DESCRIPTION = "Deliberately simple workload generator for POSIX systems. It \
|