Home
last modified time | relevance | path

Searched refs:rq (Results 1 – 25 of 664) sorted by relevance

12345678910>>...27

/openbmc/linux/drivers/gpu/drm/i915/
H A Di915_request.c115 struct i915_request *rq = to_request(fence); in i915_fence_release() local
117 GEM_BUG_ON(rq->guc_prio != GUC_PRIO_INIT && in i915_fence_release()
118 rq->guc_prio != GUC_PRIO_FINI); in i915_fence_release()
120 i915_request_free_capture_list(fetch_and_zero(&rq->capture_list)); in i915_fence_release()
121 if (rq->batch_res) { in i915_fence_release()
122 i915_vma_resource_put(rq->batch_res); in i915_fence_release()
123 rq->batch_res = NULL; in i915_fence_release()
133 i915_sw_fence_fini(&rq->submit); in i915_fence_release()
134 i915_sw_fence_fini(&rq->semaphore); in i915_fence_release()
167 if (is_power_of_2(rq->execution_mask) && in i915_fence_release()
[all …]
H A Di915_request.h64 #define RQ_TRACE(rq, fmt, ...) do { \ argument
65 const struct i915_request *rq__ = (rq); \
378 void __i915_request_skip(struct i915_request *rq);
379 bool i915_request_set_error_once(struct i915_request *rq, int error);
380 struct i915_request *i915_request_mark_eio(struct i915_request *rq);
383 void __i915_request_queue(struct i915_request *rq,
385 void __i915_request_queue_bh(struct i915_request *rq);
387 bool i915_request_retire(struct i915_request *rq);
388 void i915_request_retire_upto(struct i915_request *rq);
400 i915_request_get(struct i915_request *rq) in i915_request_get() argument
[all …]
/openbmc/linux/kernel/sched/
H A Dpelt.h7 int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
8 int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
11 int update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity);
13 static inline u64 thermal_load_avg(struct rq *rq) in thermal_load_avg() argument
15 return READ_ONCE(rq->avg_thermal.load_avg); in thermal_load_avg()
19 update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity) in update_thermal_load_avg() argument
24 static inline u64 thermal_load_avg(struct rq *rq) in thermal_load_avg() argument
31 int update_irq_load_avg(struct rq *rq, u64 running);
34 update_irq_load_avg(struct rq *rq, u64 running) in update_irq_load_avg() argument
64 static inline u64 rq_clock_pelt(struct rq *rq) in rq_clock_pelt() argument
[all …]
H A Dsched.h102 struct rq;
116 extern void calc_global_load_tick(struct rq *this_rq);
117 extern long calc_load_fold_active(struct rq *this_rq, long adjust);
119 extern void call_trace_sched_update_nr_running(struct rq *rq, int count);
614 struct rq *rq; /* CPU runqueue to which this cfs_rq is attached */ member
695 struct rq *rq; member
896 extern void rq_attach_root(struct rq *rq, struct root_domain *rd);
949 struct rq;
952 void (*func)(struct rq *rq);
962 struct rq { struct
[all …]
H A Dstats.h13 rq_sched_info_arrive(struct rq *rq, unsigned long long delta) in rq_sched_info_arrive() argument
15 if (rq) { in rq_sched_info_arrive()
16 rq->rq_sched_info.run_delay += delta; in rq_sched_info_arrive()
17 rq->rq_sched_info.pcount++; in rq_sched_info_arrive()
25 rq_sched_info_depart(struct rq *rq, unsigned long long delta) in rq_sched_info_depart() argument
27 if (rq) in rq_sched_info_depart()
28 rq->rq_cpu_time += delta; in rq_sched_info_depart()
32 rq_sched_info_dequeue(struct rq *rq, unsigned long long delta) in rq_sched_info_dequeue() argument
34 if (rq) in rq_sched_info_dequeue()
35 rq->rq_sched_info.run_delay += delta; in rq_sched_info_dequeue()
[all …]
H A Dstop_task.c19 balance_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) in balance_stop() argument
21 return sched_stop_runnable(rq); in balance_stop()
26 wakeup_preempt_stop(struct rq *rq, struct task_struct *p, int flags) in wakeup_preempt_stop() argument
31 static void set_next_task_stop(struct rq *rq, struct task_struct *stop, bool first) in set_next_task_stop() argument
33 stop->se.exec_start = rq_clock_task(rq); in set_next_task_stop()
36 static struct task_struct *pick_task_stop(struct rq *rq) in pick_task_stop() argument
38 if (!sched_stop_runnable(rq)) in pick_task_stop()
41 return rq->stop; in pick_task_stop()
44 static struct task_struct *pick_next_task_stop(struct rq *rq) in pick_next_task_stop() argument
46 struct task_struct *p = pick_task_stop(rq); in pick_next_task_stop()
[all …]
H A Ddeadline.c62 static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq) in rq_of_dl_rq()
64 return container_of(dl_rq, struct rq, dl); in rq_of_dl_rq()
70 struct rq *rq = task_rq(p); in dl_rq_of_se() local
72 return &rq->dl; in dl_rq_of_se()
177 struct rq *rq = cpu_rq(i); in __dl_update() local
179 rq->dl.extra_bw += bw; in __dl_update()
313 struct rq *rq; in dl_change_utilization() local
320 rq = task_rq(p); in dl_change_utilization()
322 sub_running_bw(&p->dl, &rq->dl); in dl_change_utilization()
334 __sub_rq_bw(p->dl.dl_bw, &rq->dl); in dl_change_utilization()
[all …]
H A Drt.c179 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) in rq_of_rt_rq()
181 return rt_rq->rq; in rq_of_rt_rq()
189 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se) in rq_of_rt_se()
193 return rt_rq->rq; in rq_of_rt_se()
222 struct rq *rq = cpu_rq(cpu); in init_tg_rt_entry() local
226 rt_rq->rq = rq; in init_tg_rt_entry()
236 rt_se->rt_rq = &rq->rt; in init_tg_rt_entry()
294 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) in rq_of_rt_rq()
296 return container_of(rt_rq, struct rq, rt); in rq_of_rt_rq()
299 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se) in rq_of_rt_se()
[all …]
H A Dcore.c118 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
239 void sched_core_enqueue(struct rq *rq, struct task_struct *p) in sched_core_enqueue() argument
241 rq->core->core_task_seq++; in sched_core_enqueue()
246 rb_add(&p->core_node, &rq->core_tree, rb_sched_core_less); in sched_core_enqueue()
249 void sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags) in sched_core_dequeue() argument
251 rq->core->core_task_seq++; in sched_core_dequeue()
254 rb_erase(&p->core_node, &rq->core_tree); in sched_core_dequeue()
263 if (!(flags & DEQUEUE_SAVE) && rq in sched_core_dequeue()
299 sched_core_find(struct rq * rq,unsigned long cookie) sched_core_find() argument
453 sched_core_enqueue(struct rq * rq,struct task_struct * p) sched_core_enqueue() argument
455 sched_core_dequeue(struct rq * rq,struct task_struct * p,int flags) sched_core_dequeue() argument
551 raw_spin_rq_lock_nested(struct rq * rq,int subclass) raw_spin_rq_lock_nested() argument
576 raw_spin_rq_trylock(struct rq * rq) raw_spin_rq_trylock() argument
600 raw_spin_rq_unlock(struct rq * rq) raw_spin_rq_unlock() argument
630 struct rq *rq; __task_rq_lock() local
655 struct rq *rq; task_rq_lock() local
694 update_rq_clock_task(struct rq * rq,s64 delta) update_rq_clock_task() argument
751 update_rq_clock(struct rq * rq) update_rq_clock() argument
778 hrtick_clear(struct rq * rq) hrtick_clear() argument
790 struct rq *rq = container_of(timer, struct rq, hrtick_timer); hrtick() local
805 __hrtick_restart(struct rq * rq) __hrtick_restart() argument
818 struct rq *rq = arg; __hrtick_start() local
831 hrtick_start(struct rq * rq,u64 delay) hrtick_start() argument
855 hrtick_start(struct rq * rq,u64 delay) hrtick_start() argument
868 hrtick_rq_init(struct rq * rq) hrtick_rq_init() argument
877 hrtick_clear(struct rq * rq) hrtick_clear() argument
881 hrtick_rq_init(struct rq * rq) hrtick_rq_init() argument
1042 resched_curr(struct rq * rq) resched_curr() argument
1068 struct rq *rq = cpu_rq(cpu); resched_cpu() local
1131 struct rq *rq = cpu_rq(cpu); wake_up_idle_cpu() local
1175 struct rq *rq = info; nohz_csd_func() local
1195 __need_bw_check(struct rq * rq,struct task_struct * p) __need_bw_check() argument
1209 sched_can_stop_tick(struct rq * rq) sched_can_stop_tick() argument
1415 uclamp_idle_value(struct rq * rq,enum uclamp_id clamp_id,unsigned int clamp_value) uclamp_idle_value() argument
1431 uclamp_idle_reset(struct rq * rq,enum uclamp_id clamp_id,unsigned int clamp_value) uclamp_idle_reset() argument
1442 uclamp_rq_max_value(struct rq * rq,enum uclamp_id clamp_id,unsigned int clamp_value) uclamp_rq_max_value() argument
1482 struct rq *rq; uclamp_update_util_min_rt_default() local
1564 uclamp_rq_inc_id(struct rq * rq,struct task_struct * p,enum uclamp_id clamp_id) uclamp_rq_inc_id() argument
1602 uclamp_rq_dec_id(struct rq * rq,struct task_struct * p,enum uclamp_id clamp_id) uclamp_rq_dec_id() argument
1668 uclamp_rq_inc(struct rq * rq,struct task_struct * p) uclamp_rq_inc() argument
1692 uclamp_rq_dec(struct rq * rq,struct task_struct * p) uclamp_rq_dec() argument
1712 uclamp_rq_reinc_id(struct rq * rq,struct task_struct * p,enum uclamp_id clamp_id) uclamp_rq_reinc_id() argument
1734 struct rq *rq; uclamp_update_active() local
2011 init_uclamp_rq(struct rq * rq) init_uclamp_rq() argument
2051 uclamp_rq_inc(struct rq * rq,struct task_struct * p) uclamp_rq_inc() argument
2052 uclamp_rq_dec(struct rq * rq,struct task_struct * p) uclamp_rq_dec() argument
2089 enqueue_task(struct rq * rq,struct task_struct * p,int flags) enqueue_task() argument
2106 dequeue_task(struct rq * rq,struct task_struct * p,int flags) dequeue_task() argument
2123 activate_task(struct rq * rq,struct task_struct * p,int flags) activate_task() argument
2135 deactivate_task(struct rq * rq,struct task_struct * p,int flags) deactivate_task() argument
2206 check_class_changed(struct rq * rq,struct task_struct * p,const struct sched_class * prev_class,int oldprio) check_class_changed() argument
2219 wakeup_preempt(struct rq * rq,struct task_struct * p,int flags) wakeup_preempt() argument
2288 struct rq *rq; wait_task_inactive() local
2390 migrate_disable_switch(struct rq * rq,struct task_struct * p) migrate_disable_switch() argument
2460 rq_has_pinned_tasks(struct rq * rq) rq_has_pinned_tasks() argument
2514 move_queued_task(struct rq * rq,struct rq_flags * rf,struct task_struct * p,int new_cpu) move_queued_task() argument
2560 __migrate_task(struct rq * rq,struct rq_flags * rf,struct task_struct * p,int dest_cpu) __migrate_task() argument
2582 struct rq *rq = this_rq(); migration_cpu_stop() local
2685 struct rq *lowest_rq = NULL, *rq = this_rq(); push_cpu_stop() local
2750 struct rq *rq = task_rq(p); __do_set_cpus_allowed() local
2961 affine_move_task(struct rq * rq,struct task_struct * p,struct rq_flags * rf,int dest_cpu,unsigned int flags) affine_move_task() argument
3110 __set_cpus_allowed_ptr_locked(struct task_struct * p,struct affinity_context * ctx,struct rq * rq,struct rq_flags * rf) __set_cpus_allowed_ptr_locked() argument
3200 struct rq *rq; __set_cpus_allowed_ptr() local
3244 struct rq *rq; restrict_cpus_allowed_ptr() local
3706 migrate_disable_switch(struct rq * rq,struct task_struct * p) migrate_disable_switch() argument
3708 rq_has_pinned_tasks(struct rq * rq) rq_has_pinned_tasks() argument
3723 struct rq *rq; ttwu_stat() local
3769 ttwu_do_activate(struct rq * rq,struct task_struct * p,int wake_flags,struct rq_flags * rf) ttwu_do_activate() argument
3850 struct rq *rq; ttwu_runnable() local
3875 struct rq *rq = this_rq(); sched_ttwu_pending() local
3933 struct rq *rq = cpu_rq(cpu); __ttwu_queue_wakelist() local
3943 struct rq *rq = cpu_rq(cpu); wake_up_if_idle() local
4023 struct rq *rq = cpu_rq(cpu); ttwu_queue() local
4407 struct rq *rq = NULL; task_call_func() local
4454 struct rq *rq = cpu_rq(cpu); cpu_curr_snapshot() local
4851 struct rq *rq; wake_up_new_task() local
5010 do_balance_callbacks(struct rq * rq,struct balance_callback * head) do_balance_callbacks() argument
5046 __splice_balance_callbacks(struct rq * rq,bool split) __splice_balance_callbacks() argument
5070 splice_balance_callbacks(struct rq * rq) splice_balance_callbacks() argument
5075 __balance_callbacks(struct rq * rq) __balance_callbacks() argument
5080 balance_callbacks(struct rq * rq,struct balance_callback * head) balance_callbacks() argument
5093 __balance_callbacks(struct rq * rq) __balance_callbacks() argument
5097 splice_balance_callbacks(struct rq * rq) splice_balance_callbacks() argument
5102 balance_callbacks(struct rq * rq,struct balance_callback * head) balance_callbacks() argument
5109 prepare_lock_switch(struct rq * rq,struct task_struct * next,struct rq_flags * rf) prepare_lock_switch() argument
5125 finish_lock_switch(struct rq * rq) finish_lock_switch() argument
5179 prepare_task_switch(struct rq * rq,struct task_struct * prev,struct task_struct * next) prepare_task_switch() argument
5214 struct rq *rq = this_rq(); finish_task_switch() local
5324 context_switch(struct rq * rq,struct task_struct * prev,struct task_struct * next,struct rq_flags * rf) context_switch() argument
5547 struct rq *rq; task_sched_runtime() local
5584 cpu_resched_latency(struct rq * rq) cpu_resched_latency() argument
5629 cpu_resched_latency(struct rq * rq) cpu_resched_latency() argument
5639 struct rq *rq = cpu_rq(cpu); scheduler_tick() local
5723 struct rq *rq = cpu_rq(cpu); sched_tick_remote() local
5965 put_prev_task_balance(struct rq * rq,struct task_struct * prev,struct rq_flags * rf) put_prev_task_balance() argument
5991 __pick_next_task(struct rq * rq,struct task_struct * prev,struct rq_flags * rf) __pick_next_task() argument
6049 pick_task(struct rq * rq) pick_task() argument
6068 pick_next_task(struct rq * rq,struct task_struct * prev,struct rq_flags * rf) pick_next_task() argument
6367 sched_core_balance(struct rq * rq) sched_core_balance() argument
6389 queue_core_balance(struct rq * rq) queue_core_balance() argument
6411 struct rq *rq = cpu_rq(cpu), *core_rq = NULL; sched_core_cpu_starting() local
6450 struct rq *rq = cpu_rq(cpu), *core_rq = NULL; sched_core_cpu_deactivate() local
6500 struct rq *rq = cpu_rq(cpu); sched_core_cpu_dying() local
6513 pick_next_task(struct rq * rq,struct task_struct * prev,struct rq_flags * rf) pick_next_task() argument
6583 struct rq *rq; __schedule() local
7071 struct rq *rq; rt_mutex_setprio() local
7194 struct rq *rq; set_user_nice() local
7329 struct rq *rq = cpu_rq(cpu); idle_cpu() local
7376 struct rq *rq = cpu_rq(cpu); sched_core_idle_cpu() local
7412 struct rq *rq = cpu_rq(cpu); effective_cpu_util() local
7633 struct rq *rq; __sched_setscheduler() local
8533 struct rq *rq; do_sched_yield() local
8936 struct rq *rq, *p_rq; yield_to() local
9091 struct rq *rq; sched_rr_get_interval() local
9260 struct rq *rq = cpu_rq(cpu); init_idle() local
9382 struct rq *rq; sched_setnuma() local
9426 struct rq *rq = this_rq(); __balance_push_cpu_stop() local
9456 balance_push(struct rq * rq) balance_push() argument
9521 struct rq *rq = cpu_rq(cpu); balance_push_set() local
9542 struct rq *rq = this_rq(); balance_hotplug_wait() local
9551 balance_push(struct rq * rq) balance_push() argument
9565 set_rq_online(struct rq * rq) set_rq_online() argument
9580 set_rq_offline(struct rq * rq) set_rq_offline() argument
9596 sched_set_rq_online(struct rq * rq,int cpu) sched_set_rq_online() argument
9608 sched_set_rq_offline(struct rq * rq,int cpu) sched_set_rq_offline() argument
9688 struct rq *rq = cpu_rq(cpu); sched_cpu_activate() local
9724 struct rq *rq = cpu_rq(cpu); sched_cpu_deactivate() local
9785 struct rq *rq = cpu_rq(cpu); sched_rq_cpu_starting() local
9827 calc_load_migrate(struct rq * rq) calc_load_migrate() argument
9835 dump_rq_tasks(struct rq * rq,const char * loglvl) dump_rq_tasks() argument
9856 struct rq *rq = cpu_rq(cpu); sched_cpu_dying() local
10000 struct rq *rq; sched_init() local
10529 struct rq *rq; sched_move_task() local
10928 struct rq *rq = cfs_rq->rq; tg_set_cfs_bandwidth() local
11598 call_trace_sched_update_nr_running(struct rq * rq,int count) call_trace_sched_update_nr_running() argument
11880 struct rq *rq = cpu_rq(cpu); sched_mm_cid_remote_clear() local
11938 struct rq *rq = cpu_rq(cpu); sched_mm_cid_remote_clear_old() local
12040 task_tick_mm_cid(struct rq * rq,struct task_struct * curr) task_tick_mm_cid() argument
12059 struct rq *rq; sched_mm_cid_exit_signals() local
12083 struct rq *rq; sched_mm_cid_before_execve() local
12107 struct rq *rq; sched_mm_cid_after_execve() local
[all...]
/openbmc/linux/drivers/scsi/fnic/
H A Dvnic_rq.c15 static int vnic_rq_alloc_bufs(struct vnic_rq *rq) in vnic_rq_alloc_bufs() argument
18 unsigned int i, j, count = rq->ring.desc_count; in vnic_rq_alloc_bufs()
22 rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ, GFP_ATOMIC); in vnic_rq_alloc_bufs()
23 if (!rq->bufs[i]) { in vnic_rq_alloc_bufs()
30 buf = rq->bufs[i]; in vnic_rq_alloc_bufs()
33 buf->desc = (u8 *)rq->ring.descs + in vnic_rq_alloc_bufs()
34 rq->ring.desc_size * buf->index; in vnic_rq_alloc_bufs()
36 buf->next = rq->bufs[0]; in vnic_rq_alloc_bufs()
39 buf->next = rq->bufs[i + 1]; in vnic_rq_alloc_bufs()
47 rq->to_use = rq->to_clean = rq->bufs[0]; in vnic_rq_alloc_bufs()
[all …]
H A Dvnic_rq.h93 static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq) in vnic_rq_desc_avail() argument
96 return rq->ring.desc_avail; in vnic_rq_desc_avail()
99 static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq) in vnic_rq_desc_used() argument
102 return rq->ring.desc_count - rq->ring.desc_avail - 1; in vnic_rq_desc_used()
105 static inline void *vnic_rq_next_desc(struct vnic_rq *rq) in vnic_rq_next_desc() argument
107 return rq->to_use->desc; in vnic_rq_next_desc()
110 static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq) in vnic_rq_next_index() argument
112 return rq->to_use->index; in vnic_rq_next_index()
115 static inline unsigned int vnic_rq_next_buf_index(struct vnic_rq *rq) in vnic_rq_next_buf_index() argument
117 return rq->buf_index++; in vnic_rq_next_buf_index()
[all …]
/openbmc/linux/drivers/net/ethernet/cisco/enic/
H A Dvnic_rq.c18 static int vnic_rq_alloc_bufs(struct vnic_rq *rq) in vnic_rq_alloc_bufs() argument
21 unsigned int i, j, count = rq->ring.desc_count; in vnic_rq_alloc_bufs()
25 rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ(count), GFP_KERNEL); in vnic_rq_alloc_bufs()
26 if (!rq->bufs[i]) in vnic_rq_alloc_bufs()
31 buf = rq->bufs[i]; in vnic_rq_alloc_bufs()
34 buf->desc = (u8 *)rq->ring.descs + in vnic_rq_alloc_bufs()
35 rq->ring.desc_size * buf->index; in vnic_rq_alloc_bufs()
37 buf->next = rq->bufs[0]; in vnic_rq_alloc_bufs()
40 buf->next = rq->bufs[i + 1]; in vnic_rq_alloc_bufs()
48 rq->to_use = rq->to_clean = rq->bufs[0]; in vnic_rq_alloc_bufs()
[all …]
H A Dvnic_rq.h84 static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq) in vnic_rq_desc_avail() argument
87 return rq->ring.desc_avail; in vnic_rq_desc_avail()
90 static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq) in vnic_rq_desc_used() argument
93 return rq->ring.desc_count - rq->ring.desc_avail - 1; in vnic_rq_desc_used()
96 static inline void *vnic_rq_next_desc(struct vnic_rq *rq) in vnic_rq_next_desc() argument
98 return rq->to_use->desc; in vnic_rq_next_desc()
101 static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq) in vnic_rq_next_index() argument
103 return rq->to_use->index; in vnic_rq_next_index()
106 static inline void vnic_rq_post(struct vnic_rq *rq, in vnic_rq_post() argument
111 struct vnic_rq_buf *buf = rq->to_use; in vnic_rq_post()
[all …]
/openbmc/linux/drivers/gpu/drm/i915/gt/
H A Dgen8_engine_cs.c13 int gen8_emit_flush_rcs(struct i915_request *rq, u32 mode) in gen8_emit_flush_rcs() argument
42 if (GRAPHICS_VER(rq->i915) == 9) in gen8_emit_flush_rcs()
46 if (IS_KABYLAKE(rq->i915) && IS_GRAPHICS_STEP(rq->i915, 0, STEP_C0)) in gen8_emit_flush_rcs()
58 cs = intel_ring_begin(rq, len); in gen8_emit_flush_rcs()
74 intel_ring_advance(rq, cs); in gen8_emit_flush_rcs()
79 int gen8_emit_flush_xcs(struct i915_request *rq, u32 mode) in gen8_emit_flush_xcs() argument
83 cs = intel_ring_begin(rq, 4); in gen8_emit_flush_xcs()
99 if (rq->engine->class == VIDEO_DECODE_CLASS) in gen8_emit_flush_xcs()
107 intel_ring_advance(rq, cs); in gen8_emit_flush_xcs()
112 int gen11_emit_flush_rcs(struct i915_request *rq, u32 mode) in gen11_emit_flush_rcs() argument
[all …]
H A Dgen6_engine_cs.c55 gen6_emit_post_sync_nonzero_flush(struct i915_request *rq) in gen6_emit_post_sync_nonzero_flush() argument
58 intel_gt_scratch_offset(rq->engine->gt, in gen6_emit_post_sync_nonzero_flush()
62 cs = intel_ring_begin(rq, 6); in gen6_emit_post_sync_nonzero_flush()
72 intel_ring_advance(rq, cs); in gen6_emit_post_sync_nonzero_flush()
74 cs = intel_ring_begin(rq, 6); in gen6_emit_post_sync_nonzero_flush()
84 intel_ring_advance(rq, cs); in gen6_emit_post_sync_nonzero_flush()
89 int gen6_emit_flush_rcs(struct i915_request *rq, u32 mode) in gen6_emit_flush_rcs() argument
92 intel_gt_scratch_offset(rq->engine->gt, in gen6_emit_flush_rcs()
98 ret = gen6_emit_post_sync_nonzero_flush(rq); in gen6_emit_flush_rcs()
130 cs = intel_ring_begin(rq, 4); in gen6_emit_flush_rcs()
[all …]
H A Dselftest_execlists.c28 static bool is_active(struct i915_request *rq) in is_active() argument
30 if (i915_request_is_active(rq)) in is_active()
33 if (i915_request_on_hold(rq)) in is_active()
36 if (i915_request_has_initial_breadcrumb(rq) && i915_request_started(rq)) in is_active()
43 struct i915_request *rq, in wait_for_submit() argument
53 if (i915_request_completed(rq)) /* that was quick! */ in wait_for_submit()
58 if (!READ_ONCE(engine->execlists.pending[0]) && is_active(rq)) in wait_for_submit()
69 struct i915_request *rq, in wait_for_reset() argument
81 if (i915_request_completed(rq)) in wait_for_reset()
84 if (READ_ONCE(rq->fence.error)) in wait_for_reset()
[all …]
H A Dintel_breadcrumbs.c106 check_signal_order(struct intel_context *ce, struct i915_request *rq) in check_signal_order() argument
108 if (rq->context != ce) in check_signal_order()
111 if (!list_is_last(&rq->signal_link, &ce->signals) && in check_signal_order()
112 i915_seqno_passed(rq->fence.seqno, in check_signal_order()
113 list_next_entry(rq, signal_link)->fence.seqno)) in check_signal_order()
116 if (!list_is_first(&rq->signal_link, &ce->signals) && in check_signal_order()
117 i915_seqno_passed(list_prev_entry(rq, signal_link)->fence.seqno, in check_signal_order()
118 rq->fence.seqno)) in check_signal_order()
207 struct i915_request *rq; in signal_irq_work() local
209 list_for_each_entry_rcu(rq, &ce->signals, signal_link) { in signal_irq_work()
[all …]
/openbmc/linux/include/linux/
H A Dblk-mq.h198 static inline bool blk_rq_is_passthrough(struct request *rq) in blk_rq_is_passthrough() argument
200 return blk_op_is_passthrough(rq->cmd_flags); in blk_rq_is_passthrough()
208 #define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ) argument
210 #define rq_dma_dir(rq) \ argument
211 (op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)
213 #define rq_list_add(listptr, rq) do { \ argument
214 (rq)->rq_next = *(listptr); \
215 *(listptr) = rq; \
218 #define rq_list_add_tail(lastpptr, rq) do { \ argument
219 (rq)->rq_next = NULL; \
[all …]
/openbmc/linux/fs/erofs/
H A Ddecompressor.c20 struct z_erofs_decompress_req *rq; member
68 struct z_erofs_decompress_req *rq = ctx->rq; in z_erofs_lz4_prepare_dstpages() local
73 EROFS_SB(rq->sb)->lz4.max_distance_pages; in z_erofs_lz4_prepare_dstpages()
79 struct page *const page = rq->out[i]; in z_erofs_lz4_prepare_dstpages()
86 if (!rq->fillgaps && test_bit(j, bounced)) { in z_erofs_lz4_prepare_dstpages()
89 availables[top++] = rq->out[i - lz4_max_distance_pages]; in z_erofs_lz4_prepare_dstpages()
119 rq->out[i] = victim; in z_erofs_lz4_prepare_dstpages()
128 struct z_erofs_decompress_req *rq = ctx->rq; in z_erofs_lz4_handle_overlap() local
133 if (rq->inplace_io) { in z_erofs_lz4_handle_overlap()
135 if (rq->partial_decoding || !may_inplace || in z_erofs_lz4_handle_overlap()
[all …]
/openbmc/linux/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/
H A Drx.c19 int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) in mlx5e_xsk_alloc_rx_mpwqe() argument
21 struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, ix); in mlx5e_xsk_alloc_rx_mpwqe()
22 struct mlx5e_icosq *icosq = rq->icosq; in mlx5e_xsk_alloc_rx_mpwqe()
30 if (unlikely(!xsk_buff_can_alloc(rq->xsk_pool, rq->mpwqe.pages_per_wqe))) in mlx5e_xsk_alloc_rx_mpwqe()
35 batch = xsk_buff_alloc_batch(rq->xsk_pool, xsk_buffs, in mlx5e_xsk_alloc_rx_mpwqe()
36 rq->mpwqe.pages_per_wqe); in mlx5e_xsk_alloc_rx_mpwqe()
44 for (; batch < rq->mpwqe.pages_per_wqe; batch++) { in mlx5e_xsk_alloc_rx_mpwqe()
45 xsk_buffs[batch] = xsk_buff_alloc(rq->xsk_pool); in mlx5e_xsk_alloc_rx_mpwqe()
50 pi = mlx5e_icosq_get_next_pi(icosq, rq->mpwqe.umr_wqebbs); in mlx5e_xsk_alloc_rx_mpwqe()
52 memcpy(umr_wqe, &rq->mpwqe.umr_wqe, sizeof(struct mlx5e_umr_wqe)); in mlx5e_xsk_alloc_rx_mpwqe()
[all …]
/openbmc/linux/drivers/scsi/esas2r/
H A Desas2r_disc.c49 struct esas2r_request *rq);
51 struct esas2r_request *rq);
55 struct esas2r_request *rq);
59 struct esas2r_request *rq);
61 struct esas2r_request *rq);
63 struct esas2r_request *rq);
65 struct esas2r_request *rq);
67 struct esas2r_request *rq);
69 struct esas2r_request *rq);
71 struct esas2r_request *rq);
[all …]
H A Desas2r_vda.c59 static void clear_vda_request(struct esas2r_request *rq);
62 struct esas2r_request *rq);
67 struct esas2r_request *rq, in esas2r_process_vda_ioctl() argument
93 clear_vda_request(rq); in esas2r_process_vda_ioctl()
95 rq->vrq->scsi.function = vi->function; in esas2r_process_vda_ioctl()
96 rq->interrupt_cb = esas2r_complete_vda_ioctl; in esas2r_process_vda_ioctl()
97 rq->interrupt_cx = vi; in esas2r_process_vda_ioctl()
112 rq->vrq->flash.length = cpu_to_le32(datalen); in esas2r_process_vda_ioctl()
113 rq->vrq->flash.sub_func = vi->cmd.flash.sub_func; in esas2r_process_vda_ioctl()
115 memcpy(rq->vrq->flash.data.file.file_name, in esas2r_process_vda_ioctl()
[all …]
/openbmc/linux/drivers/net/ethernet/mellanox/mlx5/core/
H A Den_rx.c64 mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
68 mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
71 static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
72 static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
73 static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
89 static void mlx5e_read_enhanced_title_slot(struct mlx5e_rq *rq, in mlx5e_read_enhanced_title_slot() argument
92 struct mlx5e_cq_decomp *cqd = &rq->cqd; in mlx5e_read_enhanced_title_slot()
97 if (likely(test_bit(MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, &rq->state))) in mlx5e_read_enhanced_title_slot()
100 if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) in mlx5e_read_enhanced_title_slot()
105 mlx5_wq_cyc_ctr2ix(&rq->wqe.wq, be16_to_cpu(title->wqe_counter) + 1); in mlx5e_read_enhanced_title_slot()
[all …]
/openbmc/linux/block/
H A Dblk-flush.c103 static unsigned int blk_flush_policy(unsigned long fflags, struct request *rq) in blk_flush_policy() argument
107 if (blk_rq_sectors(rq)) in blk_flush_policy()
111 if (rq->cmd_flags & REQ_PREFLUSH) in blk_flush_policy()
114 (rq->cmd_flags & REQ_FUA)) in blk_flush_policy()
120 static unsigned int blk_flush_cur_seq(struct request *rq) in blk_flush_cur_seq() argument
122 return 1 << ffz(rq->flush.seq); in blk_flush_cur_seq()
125 static void blk_flush_restore_request(struct request *rq) in blk_flush_restore_request() argument
132 rq->bio = rq->biotail; in blk_flush_restore_request()
135 rq->rq_flags &= ~RQF_FLUSH_SEQ; in blk_flush_restore_request()
136 rq->end_io = rq->flush.saved_end_io; in blk_flush_restore_request()
[all …]
H A Dblk-mq.c48 static void blk_mq_insert_request(struct request *rq, blk_insert_t flags);
49 static void blk_mq_request_bypass_insert(struct request *rq,
92 static bool blk_mq_check_inflight(struct request *rq, void *priv) in blk_mq_check_inflight() argument
96 if (rq->part && blk_do_io_stat(rq) && in blk_mq_check_inflight()
97 (!mi->part->bd_partno || rq->part == mi->part) && in blk_mq_check_inflight()
98 blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT) in blk_mq_check_inflight()
99 mi->inflight[rq_data_dir(rq)]++; in blk_mq_check_inflight()
316 void blk_rq_init(struct request_queue *q, struct request *rq) in blk_rq_init() argument
318 memset(rq, 0, sizeof(*rq)); in blk_rq_init()
320 INIT_LIST_HEAD(&rq->queuelist); in blk_rq_init()
[all …]

12345678910>>...27