/openbmc/linux/drivers/gpu/drm/i915/gt/uc/ |
H A D | intel_gsc_uc.c | 141 gsc->wq = alloc_ordered_workqueue("i915_gsc", 0); in intel_gsc_uc_init_early() 142 if (!gsc->wq) { in intel_gsc_uc_init_early() 261 if (gsc->wq) { in intel_gsc_uc_fini() 262 destroy_workqueue(gsc->wq); in intel_gsc_uc_fini() 263 gsc->wq = NULL; in intel_gsc_uc_fini() 317 queue_work(gsc->wq, &gsc->work); in intel_gsc_uc_load_start()
|
/openbmc/linux/drivers/net/ethernet/mellanox/mlx5/core/ |
H A D | en_tx.c | 363 pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc); in mlx5e_tx_flush() 370 wqe = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc); in mlx5e_tx_flush() 381 struct mlx5_wq_cyc *wq = &sq->wq; in mlx5e_txwqe_complete() local 423 mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg); in mlx5e_txwqe_complete() 582 pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc); in mlx5e_tx_mpwqe_session_complete() 805 cqe = mlx5_cqwq_get_cqe(&cq->wq); in mlx5e_poll_tx_cq() 829 mlx5_cqwq_pop(&cq->wq); in mlx5e_poll_tx_cq() 836 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc); in mlx5e_poll_tx_cq() 869 queue_work(cq->priv->wq, &sq->recover_work); in mlx5e_poll_tx_cq() 878 mlx5_cqwq_update_db_record(&cq->wq); in mlx5e_poll_tx_cq() [all …]
|
H A D | events.c | 57 struct workqueue_struct *wq; member 318 queue_work(events->wq, &events->pcie_core_work); in pcie_core() 355 events->wq = create_singlethread_workqueue("mlx5_events"); in mlx5_events_init() 356 if (!events->wq) { in mlx5_events_init() 368 destroy_workqueue(dev->priv.events->wq); in mlx5_events_cleanup() 391 flush_workqueue(events->wq); in mlx5_events_stop() 447 queue_work(dev->priv.events->wq, work); in mlx5_events_work_enqueue()
|
H A D | en_txrx.c | 75 struct mlx5_wq_cyc *wq = &sq->wq; in mlx5e_trigger_irq() local 77 u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); in mlx5e_trigger_irq() 84 nopwqe = mlx5e_post_nop(wq, sq->sqn, &sq->pc); in mlx5e_trigger_irq() 85 mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nopwqe->ctrl); in mlx5e_trigger_irq()
|
H A D | fw_reset.c | 21 struct workqueue_struct *wq; member 277 queue_work(fw_reset->wq, &fw_reset->reset_reload_work); in poll_sync_reset() 619 queue_work(fw_reset->wq, &fw_reset->reset_request_work); in mlx5_sync_reset_events_handle() 622 queue_work(fw_reset->wq, &fw_reset->reset_unload_work); in mlx5_sync_reset_events_handle() 625 queue_work(fw_reset->wq, &fw_reset->reset_now_work); in mlx5_sync_reset_events_handle() 628 queue_work(fw_reset->wq, &fw_reset->reset_abort_work); in mlx5_sync_reset_events_handle() 643 queue_work(fw_reset->wq, &fw_reset->fw_live_patch_work); in fw_reset_event_notifier() 735 fw_reset->wq = create_singlethread_workqueue("mlx5_fw_reset_events"); in mlx5_fw_reset_init() 736 if (!fw_reset->wq) { in mlx5_fw_reset_init() 748 destroy_workqueue(fw_reset->wq); in mlx5_fw_reset_init() [all …]
|
/openbmc/linux/drivers/scsi/snic/ |
H A D | vnic_dev.c | 23 struct vnic_wq wq; member 365 &dc2c->wq, in svnic_dev_init_devcmd2() 371 fetch_idx = ioread32(&dc2c->wq.ctrl->fetch_index); in svnic_dev_init_devcmd2() 382 vnic_wq_init_start(&dc2c->wq, 0, fetch_idx, fetch_idx, 0, 0); in svnic_dev_init_devcmd2() 383 svnic_wq_enable(&dc2c->wq); in svnic_dev_init_devcmd2() 392 dc2c->cmd_ring = (struct vnic_devcmd2 *) dc2c->wq.ring.descs; in svnic_dev_init_devcmd2() 393 dc2c->wq_ctrl = dc2c->wq.ctrl; in svnic_dev_init_devcmd2() 410 svnic_wq_disable(&dc2c->wq); in svnic_dev_init_devcmd2() 411 svnic_wq_free(&dc2c->wq); in svnic_dev_init_devcmd2() 428 svnic_wq_disable(&dc2c->wq); in vnic_dev_deinit_devcmd2() [all …]
|
H A D | snic_res.h | 49 snic_queue_wq_eth_desc(struct vnic_wq *wq, in snic_queue_wq_eth_desc() argument 57 struct wq_enet_desc *desc = svnic_wq_next_desc(wq); in snic_queue_wq_eth_desc() 72 svnic_wq_post(wq, os_buf, dma_addr, len, 1, 1); in snic_queue_wq_eth_desc()
|
/openbmc/linux/fs/ |
H A D | userfaultfd.c | 108 wait_queue_entry_t wq; member 164 uwq = container_of(wq, struct userfaultfd_wait_queue, wq); in userfaultfd_wake_function() 190 list_del_init(&wq->entry); in userfaultfd_wake_function() 520 uwq.wq.private = current; in handle_userfault() 585 list_del(&uwq.wq.entry); in handle_userfault() 969 wait_queue_entry_t *wq; in find_userfault_in() local 978 wq = list_last_entry(&wqh->head, typeof(*wq), entry); in find_userfault_in() 979 uwq = container_of(wq, struct userfaultfd_wait_queue, wq); in find_userfault_in() 1104 list_del(&uwq->wq.entry); in userfaultfd_ctx_read() 1173 wq.entry); in userfaultfd_ctx_read() [all …]
|
/openbmc/linux/drivers/net/ethernet/cisco/enic/ |
H A D | enic.h | 163 ____cacheline_aligned struct vnic_wq wq[ENIC_WQ_MAX]; member 224 static inline unsigned int enic_cq_wq(struct enic *enic, unsigned int wq) in enic_cq_wq() argument 226 return enic->rq_count + wq; in enic_cq_wq() 236 unsigned int wq) in enic_msix_wq_intr() argument 238 return enic->cq[enic_cq_wq(enic, wq)].interrupt_offset; in enic_msix_wq_intr()
|
/openbmc/linux/drivers/dma/idxd/ |
H A D | compat.c | 49 struct idxd_wq *wq = confdev_to_wq(dev); in bind_store() local 51 if (is_idxd_wq_kernel(wq)) in bind_store() 53 else if (is_idxd_wq_user(wq)) in bind_store()
|
/openbmc/linux/drivers/iio/adc/ |
H A D | berlin2-adc.c | 77 wait_queue_head_t wq; member 128 ret = wait_event_interruptible_timeout(priv->wq, priv->data_available, in berlin2_adc_read() 179 ret = wait_event_interruptible_timeout(priv->wq, priv->data_available, in berlin2_adc_tsen_read() 255 wake_up_interruptible(&priv->wq); in berlin2_adc_irq() 275 wake_up_interruptible(&priv->wq); in berlin2_adc_tsen_irq() 331 init_waitqueue_head(&priv->wq); in berlin2_adc_probe()
|
/openbmc/linux/drivers/infiniband/hw/mthca/ |
H A D | mthca_cq.c | 484 struct mthca_wq *wq; in mthca_poll_one() local 534 wq = &(*cur_qp)->sq; in mthca_poll_one() 536 >> wq->wqe_shift); in mthca_poll_one() 542 wq = NULL; in mthca_poll_one() 548 wq = &(*cur_qp)->rq; in mthca_poll_one() 557 wqe_index = wq->max - 1; in mthca_poll_one() 561 if (wq) { in mthca_poll_one() 562 if (wq->last_comp < wqe_index) in mthca_poll_one() 563 wq->tail += wqe_index - wq->last_comp; in mthca_poll_one() 565 wq->tail += wqe_index + wq->max - wq->last_comp; in mthca_poll_one() [all …]
|
/openbmc/linux/drivers/infiniband/hw/mlx5/ |
H A D | qp.c | 196 wq->offset, wq->wqe_cnt, in mlx5_ib_read_user_wqe_sq() 222 wq->wqe_cnt, wq->wqe_shift, in mlx5_ib_read_user_wqe_sq() 258 wq->offset, wq->wqe_cnt, in mlx5_ib_read_user_wqe_rq() 1394 wq = MLX5_ADDR_OF(sqc, sqc, wq); in create_raw_packet_qp_sq() 1401 MLX5_SET(wq, wq, log_wq_pg_sz, in create_raw_packet_qp_sq() 1480 wq = MLX5_ADDR_OF(rqc, rqc, wq); in create_raw_packet_qp_rq() 1488 MLX5_SET(wq, wq, log_wq_pg_sz, in create_raw_packet_qp_rq() 5227 wq = MLX5_ADDR_OF(rqc, rqc, wq); in create_rq() 5228 MLX5_SET(wq, wq, wq_type, in create_rq() 5251 MLX5_SET(wq, wq, log_wqe_stride_size, in create_rq() [all …]
|
/openbmc/linux/drivers/staging/ks7010/ |
H A D | ks7010_sdio.c | 252 queue_delayed_work(priv->wq, &priv->rw_dwork, 0); in _ks_wlan_hw_power_save() 274 queue_delayed_work(priv->wq, &priv->rw_dwork, 1); in _ks_wlan_hw_power_save() 279 queue_delayed_work(priv->wq, &priv->rw_dwork, 1); in ks_wlan_hw_power_save() 375 queue_delayed_work(priv->wq, &priv->rw_dwork, 0); in tx_device_task() 403 queue_delayed_work(priv->wq, &priv->rw_dwork, 0); in ks_wlan_hw_tx() 596 queue_delayed_work(priv->wq, in ks_sdio_interrupt() 607 queue_delayed_work(priv->wq, &priv->rw_dwork, 0); in ks_sdio_interrupt() 1022 if (!priv->wq) { in ks7010_sdio_probe() 1037 destroy_workqueue(priv->wq); in ks7010_sdio_probe() 1105 if (priv->wq) in ks7010_sdio_remove() [all …]
|
/openbmc/linux/drivers/net/ethernet/microsoft/mana/ |
H A D | gdma_main.c | 1007 u32 used_space = (wq->head - wq->tail) * GDMA_WQE_BU_SIZE; in mana_gd_wq_avail_space() 1008 u32 wq_size = wq->queue_size; in mana_gd_wq_avail_space() 1021 return wq->queue_mem_ptr + offset; in mana_gd_get_wqe_ptr() 1075 base_ptr = wq->queue_mem_ptr; in mana_gd_write_sgl() 1076 end_ptr = base_ptr + wq->queue_size; in mana_gd_write_sgl() 1104 if (wq->type == GDMA_RQ) { in mana_gd_post_work_request() 1125 if (wq->monitor_avl_buf && wqe_size > mana_gd_wq_avail_space(wq)) { in mana_gd_post_work_request() 1126 gc = wq->gdma_dev->gdma_context; in mana_gd_post_work_request() 1134 wqe_ptr = mana_gd_get_wqe_ptr(wq, wq->head); in mana_gd_post_work_request() 1137 if (wqe_ptr >= (u8 *)wq->queue_mem_ptr + wq->queue_size) in mana_gd_post_work_request() [all …]
|
/openbmc/linux/drivers/usb/chipidea/ |
H A D | otg.c | 246 ci->wq = create_freezable_workqueue("ci_otg"); in ci_hdrc_otg_init() 247 if (!ci->wq) { in ci_hdrc_otg_init() 264 if (ci->wq) in ci_hdrc_otg_destroy() 265 destroy_workqueue(ci->wq); in ci_hdrc_otg_destroy()
|
/openbmc/linux/drivers/net/ethernet/cavium/liquidio/ |
H A D | response_manager.c | 42 oct->dma_comp_wq.wq = alloc_workqueue("dma-comp", WQ_MEM_RECLAIM, 0); in octeon_setup_response_list() 43 if (!oct->dma_comp_wq.wq) { in octeon_setup_response_list() 60 destroy_workqueue(oct->dma_comp_wq.wq); in octeon_delete_response_list() 236 queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(1)); in oct_poll_req_completion()
|
/openbmc/linux/drivers/nfc/ |
H A D | virtual_ncidev.c | 28 struct wait_queue_head wq; member 70 wake_up_interruptible(&vdev->wq); in virtual_nci_send() 91 if (wait_event_interruptible(vdev->wq, vdev->send_buff)) in virtual_ncidev_read() 153 init_waitqueue_head(&vdev->wq); in virtual_ncidev_open()
|
/openbmc/linux/drivers/char/tpm/ |
H A D | tpm_ibmvtpm.h | 29 wait_queue_head_t wq; member 41 wait_queue_head_t wq; member
|
/openbmc/linux/drivers/md/ |
H A D | dm-zoned-reclaim.c | 18 struct workqueue_struct *wq; member 514 mod_delayed_work(zrc->wq, &zrc->work, DMZ_IDLE_PERIOD); in dmz_reclaim_work() 577 zrc->wq = alloc_ordered_workqueue("dmz_rwq_%s_%d", WQ_MEM_RECLAIM, in dmz_ctr_reclaim() 579 if (!zrc->wq) { in dmz_ctr_reclaim() 585 queue_delayed_work(zrc->wq, &zrc->work, 0); in dmz_ctr_reclaim() 602 destroy_workqueue(zrc->wq); in dmz_dtr_reclaim() 620 queue_delayed_work(zrc->wq, &zrc->work, DMZ_IDLE_PERIOD); in dmz_resume_reclaim() 639 mod_delayed_work(zrc->wq, &zrc->work, 0); in dmz_schedule_reclaim()
|
/openbmc/linux/drivers/most/ |
H A D | most_cdev.c | 31 wait_queue_head_t wq; member 196 if (wait_event_interruptible(c->wq, ch_has_mbo(c) || !c->dev)) in comp_write() 248 if (wait_event_interruptible(c->wq, in comp_read() 286 poll_wait(filp, &c->wq, wait); in comp_poll() 335 wake_up_interruptible(&c->wq); in comp_disconnect_channel() 373 wake_up_interruptible(&c->wq); in comp_rx_completion() 397 wake_up_interruptible(&c->wq); in comp_tx_completion() 453 init_waitqueue_head(&c->wq); in comp_probe()
|
/openbmc/linux/drivers/media/i2c/ |
H A D | saa7110.c | 50 wait_queue_head_t wq; member 186 prepare_to_wait(&decoder->wq, &wait, TASK_UNINTERRUPTIBLE); in determine_norm() 188 finish_wait(&decoder->wq, &wait); in determine_norm() 221 prepare_to_wait(&decoder->wq, &wait, TASK_UNINTERRUPTIBLE); in determine_norm() 223 finish_wait(&decoder->wq, &wait); in determine_norm() 401 init_waitqueue_head(&decoder->wq); in saa7110_probe()
|
/openbmc/linux/drivers/net/ethernet/mellanox/mlx5/core/lag/ |
H A D | mp.c | 100 flush_workqueue(mp->wq); in mlx5_lag_fib_event_flush() 333 queue_work(mp->wq, &fib_work->work); in mlx5_lag_fib_event() 359 mp->wq = create_singlethread_workqueue("mlx5_lag_mp"); in mlx5_lag_mp_init() 360 if (!mp->wq) in mlx5_lag_mp_init() 367 destroy_workqueue(mp->wq); in mlx5_lag_mp_init() 382 destroy_workqueue(mp->wq); in mlx5_lag_mp_cleanup()
|
/openbmc/linux/drivers/gpu/drm/i915/display/ |
H A D | intel_display_driver.c | 226 i915->display.wq.modeset = alloc_ordered_workqueue("i915_modeset", 0); in intel_display_driver_probe_noirq() 227 i915->display.wq.flip = alloc_workqueue("i915_flip", WQ_HIGHPRI | in intel_display_driver_probe_noirq() 417 flush_workqueue(i915->display.wq.flip); in intel_display_driver_remove() 418 flush_workqueue(i915->display.wq.modeset); in intel_display_driver_remove() 459 destroy_workqueue(i915->display.wq.flip); in intel_display_driver_remove_noirq() 460 destroy_workqueue(i915->display.wq.modeset); in intel_display_driver_remove_noirq()
|
/openbmc/linux/drivers/net/ethernet/mellanox/mlx5/core/fpga/ |
H A D | conn.h | 56 struct mlx5_cqwq wq; member 66 struct mlx5_wq_qp wq; member
|