/openbmc/linux/drivers/gpu/drm/scheduler/ |
H A D | sched_entity.c | 86 spin_lock_init(&entity->rq_lock); in drm_sched_entity_init() 114 spin_lock(&entity->rq_lock); in drm_sched_entity_modify_sched() 117 spin_unlock(&entity->rq_lock); in drm_sched_entity_modify_sched() 225 spin_lock(&entity->rq_lock); in drm_sched_entity_kill() 228 spin_unlock(&entity->rq_lock); in drm_sched_entity_kill() 377 spin_lock(&entity->rq_lock); in drm_sched_entity_set_priority() 379 spin_unlock(&entity->rq_lock); in drm_sched_entity_set_priority() 536 spin_lock(&entity->rq_lock); in drm_sched_entity_select_rq() 543 spin_unlock(&entity->rq_lock); in drm_sched_entity_select_rq() 581 spin_lock(&entity->rq_lock); in drm_sched_entity_push_job() [all …]
|
H A D | sched_main.c | 105 spin_lock(&entity->rq_lock); in drm_sched_rq_update_fifo() 116 spin_unlock(&entity->rq_lock); in drm_sched_rq_update_fifo()
|
/openbmc/qemu/include/hw/virtio/ |
H A D | virtio-blk.h | 57 QemuMutex rq_lock; member
|
/openbmc/linux/drivers/net/ethernet/intel/ice/ |
H A D | ice_controlq.c | 544 mutex_lock(&cq->rq_lock); in ice_shutdown_rq() 566 mutex_unlock(&cq->rq_lock); in ice_shutdown_rq() 790 mutex_init(&cq->rq_lock); in ice_init_ctrlq_locks() 828 mutex_destroy(&cq->rq_lock); in ice_destroy_ctrlq_locks() 1173 mutex_lock(&cq->rq_lock); in ice_clean_rq_elem() 1241 mutex_unlock(&cq->rq_lock); in ice_clean_rq_elem()
|
H A D | ice_controlq.h | 95 struct mutex rq_lock; /* Receive queue lock */ member
|
/openbmc/qemu/hw/block/ |
H A D | virtio-blk.c | 87 WITH_QEMU_LOCK_GUARD(&s->rq_lock) { in virtio_blk_handle_rw_error() 1072 WITH_QEMU_LOCK_GUARD(&s->rq_lock) { in virtio_blk_dma_restart_cb() 1116 WITH_QEMU_LOCK_GUARD(&s->rq_lock) { in virtio_blk_reset() 1305 WITH_QEMU_LOCK_GUARD(&s->rq_lock) { in virtio_blk_save_device() 1346 WITH_QEMU_LOCK_GUARD(&s->rq_lock) { in virtio_blk_load_device() 1904 qemu_mutex_init(&s->rq_lock); in virtio_blk_device_realize() 1962 qemu_mutex_destroy(&s->rq_lock); in virtio_blk_device_unrealize()
|
/openbmc/linux/include/drm/ |
H A D | gpu_scheduler.h | 148 spinlock_t rq_lock; member
|
/openbmc/linux/drivers/infiniband/hw/bnxt_re/ |
H A D | ib_verbs.h | 87 spinlock_t rq_lock; /* protect rq */ member
|
H A D | ib_verbs.c | 1572 spin_lock_init(&qp->rq_lock); in bnxt_re_create_qp() 2883 spin_lock_irqsave(&qp->rq_lock, flags); in bnxt_re_post_recv() 2926 spin_unlock_irqrestore(&qp->rq_lock, flags); in bnxt_re_post_recv()
|
/openbmc/linux/kernel/sched/ |
H A D | sched.h | 1679 rq_lock(struct rq *rq, struct rq_flags *rf) in rq_lock() function 1710 DEFINE_LOCK_GUARD_1(rq_lock, struct rq, 1711 rq_lock(_T->lock, &_T->rf), 1733 rq_lock(rq, rf); in this_rq_lock_irq()
|
H A D | core.c | 795 rq_lock(rq, &rf); in hrtick() 821 rq_lock(rq, &rf); in __hrtick_start() 2525 rq_lock(rq, rf); in move_queued_task() 2599 rq_lock(rq, &rf); in migration_cpu_stop() 4029 rq_lock(rq, &rf); in ttwu_queue() 5650 rq_lock(rq, &rf); in scheduler_tick() 6613 rq_lock(rq, &rf); in __schedule() 9431 rq_lock(rq, &rf); in __balance_push_cpu_stop()
|
H A D | fair.c | 5946 rq_lock(rq, &rf); in __cfsb_csd_unthrottle() 9216 rq_lock(rq, &rf); in attach_one_task() 9232 rq_lock(env->dst_rq, &rf); in attach_tasks() 12646 rq_lock(rq, &rf); in task_fork_fair()
|
H A D | deadline.c | 1888 rq_lock(rq, &rf); in migrate_task_rq_dl()
|
H A D | rt.c | 943 rq_lock(rq, &rf); in do_sched_rt_period_timer()
|
/openbmc/linux/drivers/infiniband/sw/siw/ |
H A D | siw_verbs.c | 356 spin_lock_init(&qp->rq_lock); in siw_create_qp() 1061 spin_lock_irqsave(&qp->rq_lock, flags); in siw_post_receive() 1089 spin_unlock_irqrestore(&qp->rq_lock, flags); in siw_post_receive()
|
H A D | siw.h | 451 spinlock_t rq_lock; member
|