Lines Matching refs:udev

106 	struct tcmu_dev *udev;  member
305 struct tcmu_dev *udev = nl_cmd->udev; in tcmu_fail_netlink_cmd() local
313 pr_debug("Aborting nl cmd %d on %s\n", nl_cmd->cmd, udev->name); in tcmu_fail_netlink_cmd()
374 struct tcmu_dev *udev = NULL; in tcmu_genl_cmd_done() local
389 if (nl_cmd->udev->se_dev.dev_index == dev_id) { in tcmu_genl_cmd_done()
390 udev = nl_cmd->udev; in tcmu_genl_cmd_done()
395 if (!udev) { in tcmu_genl_cmd_done()
404 udev->name, dev_id, nl_cmd->cmd, completed_cmd, rc, in tcmu_genl_cmd_done()
409 udev->name, completed_cmd, nl_cmd->cmd); in tcmu_genl_cmd_done()
499 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; in tcmu_cmd_free_data() local
503 clear_bit(tcmu_cmd->dbi[i], udev->data_bitmap); in tcmu_cmd_free_data()
506 static inline int tcmu_get_empty_block(struct tcmu_dev *udev, in tcmu_get_empty_block() argument
510 XA_STATE(xas, &udev->data_pages, 0); in tcmu_get_empty_block()
515 dbi = find_first_zero_bit(udev->data_bitmap, udev->dbi_thresh); in tcmu_get_empty_block()
516 if (dbi == udev->dbi_thresh) in tcmu_get_empty_block()
519 dpi = dbi * udev->data_pages_per_blk; in tcmu_get_empty_block()
533 if (xa_store(&udev->data_pages, dpi + i, page, GFP_NOIO)) { in tcmu_get_empty_block()
542 if (i && dbi > udev->dbi_max) in tcmu_get_empty_block()
543 udev->dbi_max = dbi; in tcmu_get_empty_block()
545 set_bit(dbi, udev->data_bitmap); in tcmu_get_empty_block()
554 static int tcmu_get_empty_blocks(struct tcmu_dev *udev, in tcmu_get_empty_blocks() argument
560 uint32_t blk_size = udev->data_blk_size; in tcmu_get_empty_blocks()
564 dbi = tcmu_get_empty_block(udev, tcmu_cmd, dbi, blk_data_len, in tcmu_get_empty_blocks()
596 static int new_block_to_iov(struct tcmu_dev *udev, struct tcmu_cmd *cmd, in new_block_to_iov() argument
603 len = min_t(int, len, udev->data_blk_size); in new_block_to_iov()
615 (udev->data_off + dbi * udev->data_blk_size); in new_block_to_iov()
622 static void tcmu_setup_iovs(struct tcmu_dev *udev, struct tcmu_cmd *cmd, in tcmu_setup_iovs() argument
629 for (; data_length > 0; data_length -= udev->data_blk_size) in tcmu_setup_iovs()
630 dbi = new_block_to_iov(udev, cmd, iov, dbi, data_length); in tcmu_setup_iovs()
636 struct tcmu_dev *udev = TCMU_DEV(se_dev); in tcmu_alloc_cmd() local
645 tcmu_cmd->tcmu_dev = udev; in tcmu_alloc_cmd()
702 static inline void tcmu_copy_data(struct tcmu_dev *udev, in tcmu_copy_data() argument
724 dbi = new_block_to_iov(udev, tcmu_cmd, iov, dbi, in tcmu_copy_data()
730 if (page_cnt > udev->data_pages_per_blk) in tcmu_copy_data()
731 page_cnt = udev->data_pages_per_blk; in tcmu_copy_data()
733 dpi = dbi * udev->data_pages_per_blk; in tcmu_copy_data()
736 page = xa_load(&udev->data_pages, dpi); in tcmu_copy_data()
774 static void scatter_data_area(struct tcmu_dev *udev, struct tcmu_cmd *tcmu_cmd, in scatter_data_area() argument
779 tcmu_copy_data(udev, tcmu_cmd, TCMU_SG_TO_DATA_AREA, se_cmd->t_data_sg, in scatter_data_area()
783 static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *tcmu_cmd, in gather_data_area() argument
806 tcmu_copy_data(udev, tcmu_cmd, TCMU_DATA_AREA_TO_SG, data_sg, in gather_data_area()
820 static bool is_ring_space_avail(struct tcmu_dev *udev, size_t cmd_size) in is_ring_space_avail() argument
822 struct tcmu_mailbox *mb = udev->mb_addr; in is_ring_space_avail()
828 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ in is_ring_space_avail()
834 if (head_to_end(cmd_head, udev->cmdr_size) >= cmd_size) in is_ring_space_avail()
837 cmd_needed = cmd_size + head_to_end(cmd_head, udev->cmdr_size); in is_ring_space_avail()
839 space = spc_free(cmd_head, udev->cmdr_last_cleaned, udev->cmdr_size); in is_ring_space_avail()
842 udev->cmdr_last_cleaned, udev->cmdr_size); in is_ring_space_avail()
854 static int tcmu_alloc_data_space(struct tcmu_dev *udev, struct tcmu_cmd *cmd, in tcmu_alloc_data_space() argument
863 space = spc_bitmap_free(udev->data_bitmap, udev->dbi_thresh); in tcmu_alloc_data_space()
866 (udev->max_blocks - udev->dbi_thresh) + space; in tcmu_alloc_data_space()
870 blocks_left * udev->data_blk_size, in tcmu_alloc_data_space()
871 cmd->dbi_cnt * udev->data_blk_size); in tcmu_alloc_data_space()
875 udev->dbi_thresh += cmd->dbi_cnt; in tcmu_alloc_data_space()
876 if (udev->dbi_thresh > udev->max_blocks) in tcmu_alloc_data_space()
877 udev->dbi_thresh = udev->max_blocks; in tcmu_alloc_data_space()
880 iov_cnt = tcmu_get_empty_blocks(udev, cmd, cmd->se_cmd->data_length); in tcmu_alloc_data_space()
885 ret = tcmu_get_empty_blocks(udev, cmd, cmd->data_len_bidi); in tcmu_alloc_data_space()
931 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; in add_to_qfull_queue() local
938 if (!udev->qfull_time_out) in add_to_qfull_queue()
940 else if (udev->qfull_time_out > 0) in add_to_qfull_queue()
941 tmo = udev->qfull_time_out; in add_to_qfull_queue()
942 else if (udev->cmd_time_out) in add_to_qfull_queue()
943 tmo = udev->cmd_time_out; in add_to_qfull_queue()
947 tcmu_setup_cmd_timer(tcmu_cmd, tmo, &udev->qfull_timer); in add_to_qfull_queue()
949 list_add_tail(&tcmu_cmd->queue_entry, &udev->qfull_queue); in add_to_qfull_queue()
951 tcmu_cmd, udev->name); in add_to_qfull_queue()
955 static uint32_t ring_insert_padding(struct tcmu_dev *udev, size_t cmd_size) in ring_insert_padding() argument
958 struct tcmu_mailbox *mb = udev->mb_addr; in ring_insert_padding()
959 uint32_t cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ in ring_insert_padding()
962 if (head_to_end(cmd_head, udev->cmdr_size) < cmd_size) { in ring_insert_padding()
963 size_t pad_size = head_to_end(cmd_head, udev->cmdr_size); in ring_insert_padding()
965 hdr = udev->cmdr + cmd_head; in ring_insert_padding()
973 UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size); in ring_insert_padding()
976 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ in ring_insert_padding()
986 struct tcmu_dev *udev = TCMU_DEV(se_dev); in tcmu_unplug_device() local
988 clear_bit(TCMU_DEV_BIT_PLUGGED, &udev->flags); in tcmu_unplug_device()
989 uio_event_notify(&udev->uio_info); in tcmu_unplug_device()
994 struct tcmu_dev *udev = TCMU_DEV(se_dev); in tcmu_plug_device() local
996 if (!test_and_set_bit(TCMU_DEV_BIT_PLUGGED, &udev->flags)) in tcmu_plug_device()
997 return &udev->se_plug; in tcmu_plug_device()
1014 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; in queue_cmd_ring() local
1017 struct tcmu_mailbox *mb = udev->mb_addr; in queue_cmd_ring()
1023 uint32_t blk_size = udev->data_blk_size; in queue_cmd_ring()
1029 if (test_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags)) { in queue_cmd_ring()
1034 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) { in queue_cmd_ring()
1039 if (!list_empty(&udev->qfull_queue)) in queue_cmd_ring()
1042 if (data_length > (size_t)udev->max_blocks * blk_size) { in queue_cmd_ring()
1044 data_length, (size_t)udev->max_blocks * blk_size); in queue_cmd_ring()
1049 iov_cnt = tcmu_alloc_data_space(udev, tcmu_cmd, &iov_bidi_cnt); in queue_cmd_ring()
1060 if (command_size > (udev->cmdr_size / 2)) { in queue_cmd_ring()
1062 command_size, udev->cmdr_size); in queue_cmd_ring()
1068 if (!is_ring_space_avail(udev, command_size)) in queue_cmd_ring()
1075 if (xa_alloc(&udev->commands, &cmd_id, tcmu_cmd, XA_LIMIT(1, 0xffff), in queue_cmd_ring()
1086 tcmu_cmd, udev->name); in queue_cmd_ring()
1088 cmd_head = ring_insert_padding(udev, command_size); in queue_cmd_ring()
1090 entry = udev->cmdr + cmd_head; in queue_cmd_ring()
1100 scatter_data_area(udev, tcmu_cmd, &iov); in queue_cmd_ring()
1102 tcmu_setup_iovs(udev, tcmu_cmd, &iov, se_cmd->data_length); in queue_cmd_ring()
1109 tcmu_setup_iovs(udev, tcmu_cmd, &iov, tcmu_cmd->data_len_bidi); in queue_cmd_ring()
1113 tcmu_setup_cmd_timer(tcmu_cmd, udev->cmd_time_out, &udev->cmd_timer); in queue_cmd_ring()
1125 UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size); in queue_cmd_ring()
1128 list_add_tail(&tcmu_cmd->queue_entry, &udev->inflight_queue); in queue_cmd_ring()
1130 if (!test_bit(TCMU_DEV_BIT_PLUGGED, &udev->flags)) in queue_cmd_ring()
1131 uio_event_notify(&udev->uio_info); in queue_cmd_ring()
1158 queue_tmr_ring(struct tcmu_dev *udev, struct tcmu_tmr *tmr) in queue_tmr_ring() argument
1163 struct tcmu_mailbox *mb = udev->mb_addr; in queue_tmr_ring()
1166 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) in queue_tmr_ring()
1172 if (!list_empty(&udev->tmr_queue) || in queue_tmr_ring()
1173 !is_ring_space_avail(udev, cmd_size)) { in queue_tmr_ring()
1174 list_add_tail(&tmr->queue_entry, &udev->tmr_queue); in queue_tmr_ring()
1176 tmr, udev->name); in queue_tmr_ring()
1180 cmd_head = ring_insert_padding(udev, cmd_size); in queue_tmr_ring()
1182 entry = udev->cmdr + cmd_head; in queue_tmr_ring()
1191 UPDATE_HEAD(mb->cmd_head, cmd_size, udev->cmdr_size); in queue_tmr_ring()
1194 uio_event_notify(&udev->uio_info); in queue_tmr_ring()
1206 struct tcmu_dev *udev = TCMU_DEV(se_dev); in tcmu_queue_cmd() local
1215 mutex_lock(&udev->cmdr_lock); in tcmu_queue_cmd()
1222 mutex_unlock(&udev->cmdr_lock); in tcmu_queue_cmd()
1263 struct tcmu_dev *udev = TCMU_DEV(se_dev); in tcmu_tmr_notify() local
1265 mutex_lock(&udev->cmdr_lock); in tcmu_tmr_notify()
1279 cmd, udev->name); in tcmu_tmr_notify()
1288 tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer); in tcmu_tmr_notify()
1290 if (!test_bit(TCMU_DEV_BIT_TMR_NOTIFY, &udev->flags)) in tcmu_tmr_notify()
1294 tcmu_tmr_type(tmf), udev->name, i, cmd_cnt); in tcmu_tmr_notify()
1314 queue_tmr_ring(udev, tmr); in tcmu_tmr_notify()
1317 mutex_unlock(&udev->cmdr_lock); in tcmu_tmr_notify()
1324 struct tcmu_dev *udev = cmd->tcmu_dev; in tcmu_handle_completion() local
1340 set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags); in tcmu_handle_completion()
1373 gather_data_area(udev, cmd, true, read_len); in tcmu_handle_completion()
1375 gather_data_area(udev, cmd, false, read_len); in tcmu_handle_completion()
1411 static int tcmu_run_tmr_queue(struct tcmu_dev *udev) in tcmu_run_tmr_queue() argument
1416 if (list_empty(&udev->tmr_queue)) in tcmu_run_tmr_queue()
1419 pr_debug("running %s's tmr queue\n", udev->name); in tcmu_run_tmr_queue()
1421 list_splice_init(&udev->tmr_queue, &tmrs); in tcmu_run_tmr_queue()
1427 tmr, udev->name); in tcmu_run_tmr_queue()
1429 if (queue_tmr_ring(udev, tmr)) { in tcmu_run_tmr_queue()
1435 list_splice_tail(&tmrs, &udev->tmr_queue); in tcmu_run_tmr_queue()
1443 static bool tcmu_handle_completions(struct tcmu_dev *udev) in tcmu_handle_completions() argument
1449 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) { in tcmu_handle_completions()
1454 mb = udev->mb_addr; in tcmu_handle_completions()
1457 while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) { in tcmu_handle_completions()
1459 struct tcmu_cmd_entry *entry = udev->cmdr + udev->cmdr_last_cleaned; in tcmu_handle_completions()
1466 size_t ring_left = head_to_end(udev->cmdr_last_cleaned, in tcmu_handle_completions()
1467 udev->cmdr_size); in tcmu_handle_completions()
1475 UPDATE_HEAD(udev->cmdr_last_cleaned, in tcmu_handle_completions()
1477 udev->cmdr_size); in tcmu_handle_completions()
1484 cmd = xa_load(&udev->commands, entry->hdr.cmd_id); in tcmu_handle_completions()
1486 cmd = xa_erase(&udev->commands, entry->hdr.cmd_id); in tcmu_handle_completions()
1490 set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags); in tcmu_handle_completions()
1497 UPDATE_HEAD(udev->cmdr_last_cleaned, in tcmu_handle_completions()
1499 udev->cmdr_size); in tcmu_handle_completions()
1502 free_space = tcmu_run_tmr_queue(udev); in tcmu_handle_completions()
1505 xa_empty(&udev->commands) && list_empty(&udev->qfull_queue)) { in tcmu_handle_completions()
1512 if (udev->cmd_time_out) in tcmu_handle_completions()
1513 tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer); in tcmu_handle_completions()
1555 static void tcmu_device_timedout(struct tcmu_dev *udev) in tcmu_device_timedout() argument
1558 if (list_empty(&udev->timedout_entry)) in tcmu_device_timedout()
1559 list_add_tail(&udev->timedout_entry, &timed_out_udevs); in tcmu_device_timedout()
1567 struct tcmu_dev *udev = from_timer(udev, t, cmd_timer); in tcmu_cmd_timedout() local
1569 pr_debug("%s cmd timeout has expired\n", udev->name); in tcmu_cmd_timedout()
1570 tcmu_device_timedout(udev); in tcmu_cmd_timedout()
1575 struct tcmu_dev *udev = from_timer(udev, t, qfull_timer); in tcmu_qfull_timedout() local
1577 pr_debug("%s qfull timeout has expired\n", udev->name); in tcmu_qfull_timedout()
1578 tcmu_device_timedout(udev); in tcmu_qfull_timedout()
1603 struct tcmu_dev *udev; in tcmu_alloc_device() local
1605 udev = kzalloc(sizeof(struct tcmu_dev), GFP_KERNEL); in tcmu_alloc_device()
1606 if (!udev) in tcmu_alloc_device()
1608 kref_init(&udev->kref); in tcmu_alloc_device()
1610 udev->name = kstrdup(name, GFP_KERNEL); in tcmu_alloc_device()
1611 if (!udev->name) { in tcmu_alloc_device()
1612 kfree(udev); in tcmu_alloc_device()
1616 udev->hba = hba; in tcmu_alloc_device()
1617 udev->cmd_time_out = TCMU_TIME_OUT; in tcmu_alloc_device()
1618 udev->qfull_time_out = -1; in tcmu_alloc_device()
1620 udev->data_pages_per_blk = DATA_PAGES_PER_BLK_DEF; in tcmu_alloc_device()
1621 udev->max_blocks = DATA_AREA_PAGES_DEF / udev->data_pages_per_blk; in tcmu_alloc_device()
1622 udev->cmdr_size = CMDR_SIZE_DEF; in tcmu_alloc_device()
1623 udev->data_area_mb = TCMU_PAGES_TO_MBS(DATA_AREA_PAGES_DEF); in tcmu_alloc_device()
1625 mutex_init(&udev->cmdr_lock); in tcmu_alloc_device()
1627 INIT_LIST_HEAD(&udev->node); in tcmu_alloc_device()
1628 INIT_LIST_HEAD(&udev->timedout_entry); in tcmu_alloc_device()
1629 INIT_LIST_HEAD(&udev->qfull_queue); in tcmu_alloc_device()
1630 INIT_LIST_HEAD(&udev->tmr_queue); in tcmu_alloc_device()
1631 INIT_LIST_HEAD(&udev->inflight_queue); in tcmu_alloc_device()
1632 xa_init_flags(&udev->commands, XA_FLAGS_ALLOC1); in tcmu_alloc_device()
1634 timer_setup(&udev->qfull_timer, tcmu_qfull_timedout, 0); in tcmu_alloc_device()
1635 timer_setup(&udev->cmd_timer, tcmu_cmd_timedout, 0); in tcmu_alloc_device()
1637 xa_init(&udev->data_pages); in tcmu_alloc_device()
1639 return &udev->se_dev; in tcmu_alloc_device()
1645 struct tcmu_dev *udev = TCMU_DEV(dev); in tcmu_dev_call_rcu() local
1647 kfree(udev->uio_info.name); in tcmu_dev_call_rcu()
1648 kfree(udev->name); in tcmu_dev_call_rcu()
1649 kfree(udev); in tcmu_dev_call_rcu()
1662 static u32 tcmu_blocks_release(struct tcmu_dev *udev, unsigned long first, in tcmu_blocks_release() argument
1669 first = first * udev->data_pages_per_blk; in tcmu_blocks_release()
1670 last = (last + 1) * udev->data_pages_per_blk - 1; in tcmu_blocks_release()
1671 xa_for_each_range(&udev->data_pages, dpi, page, first, last) { in tcmu_blocks_release()
1672 xa_erase(&udev->data_pages, dpi); in tcmu_blocks_release()
1702 static void tcmu_remove_all_queued_tmr(struct tcmu_dev *udev) in tcmu_remove_all_queued_tmr() argument
1706 list_for_each_entry_safe(tmr, tmp, &udev->tmr_queue, queue_entry) { in tcmu_remove_all_queued_tmr()
1714 struct tcmu_dev *udev = container_of(kref, struct tcmu_dev, kref); in tcmu_dev_kref_release() local
1715 struct se_device *dev = &udev->se_dev; in tcmu_dev_kref_release()
1720 vfree(udev->mb_addr); in tcmu_dev_kref_release()
1721 udev->mb_addr = NULL; in tcmu_dev_kref_release()
1724 if (!list_empty(&udev->timedout_entry)) in tcmu_dev_kref_release()
1725 list_del(&udev->timedout_entry); in tcmu_dev_kref_release()
1729 mutex_lock(&udev->cmdr_lock); in tcmu_dev_kref_release()
1730 xa_for_each(&udev->commands, i, cmd) { in tcmu_dev_kref_release()
1735 tcmu_remove_all_queued_tmr(udev); in tcmu_dev_kref_release()
1736 if (!list_empty(&udev->qfull_queue)) in tcmu_dev_kref_release()
1738 xa_destroy(&udev->commands); in tcmu_dev_kref_release()
1741 tcmu_blocks_release(udev, 0, udev->dbi_max); in tcmu_dev_kref_release()
1742 bitmap_free(udev->data_bitmap); in tcmu_dev_kref_release()
1743 mutex_unlock(&udev->cmdr_lock); in tcmu_dev_kref_release()
1750 static void run_qfull_queue(struct tcmu_dev *udev, bool fail) in run_qfull_queue() argument
1757 if (list_empty(&udev->qfull_queue)) in run_qfull_queue()
1760 pr_debug("running %s's cmdr queue forcefail %d\n", udev->name, fail); in run_qfull_queue()
1762 list_splice_init(&udev->qfull_queue, &cmds); in run_qfull_queue()
1768 tcmu_cmd, udev->name); in run_qfull_queue()
1787 tcmu_cmd, udev->name, scsi_ret); in run_qfull_queue()
1802 list_splice_tail(&cmds, &udev->qfull_queue); in run_qfull_queue()
1807 tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer); in run_qfull_queue()
1812 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); in tcmu_irqcontrol() local
1814 mutex_lock(&udev->cmdr_lock); in tcmu_irqcontrol()
1815 if (tcmu_handle_completions(udev)) in tcmu_irqcontrol()
1816 run_qfull_queue(udev, false); in tcmu_irqcontrol()
1817 mutex_unlock(&udev->cmdr_lock); in tcmu_irqcontrol()
1828 struct tcmu_dev *udev = vma->vm_private_data; in tcmu_find_mem_index() local
1829 struct uio_info *info = &udev->uio_info; in tcmu_find_mem_index()
1839 static struct page *tcmu_try_get_data_page(struct tcmu_dev *udev, uint32_t dpi) in tcmu_try_get_data_page() argument
1843 mutex_lock(&udev->cmdr_lock); in tcmu_try_get_data_page()
1844 page = xa_load(&udev->data_pages, dpi); in tcmu_try_get_data_page()
1848 mutex_unlock(&udev->cmdr_lock); in tcmu_try_get_data_page()
1857 dpi, udev->name); in tcmu_try_get_data_page()
1858 mutex_unlock(&udev->cmdr_lock); in tcmu_try_get_data_page()
1865 struct tcmu_dev *udev = vma->vm_private_data; in tcmu_vma_open() local
1869 kref_get(&udev->kref); in tcmu_vma_open()
1874 struct tcmu_dev *udev = vma->vm_private_data; in tcmu_vma_close() local
1879 kref_put(&udev->kref, tcmu_dev_kref_release); in tcmu_vma_close()
1884 struct tcmu_dev *udev = vmf->vma->vm_private_data; in tcmu_vma_fault() local
1885 struct uio_info *info = &udev->uio_info; in tcmu_vma_fault()
1901 if (offset < udev->data_off) { in tcmu_vma_fault()
1910 dpi = (offset - udev->data_off) / PAGE_SIZE; in tcmu_vma_fault()
1911 page = tcmu_try_get_data_page(udev, dpi); in tcmu_vma_fault()
1929 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); in tcmu_mmap() local
1934 vma->vm_private_data = udev; in tcmu_mmap()
1937 if (vma_pages(vma) != udev->mmap_pages) in tcmu_mmap()
1947 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); in tcmu_open() local
1950 if (test_and_set_bit(TCMU_DEV_BIT_OPEN, &udev->flags)) in tcmu_open()
1953 udev->inode = inode; in tcmu_open()
1962 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); in tcmu_release() local
1967 mutex_lock(&udev->cmdr_lock); in tcmu_release()
1969 xa_for_each(&udev->commands, i, cmd) { in tcmu_release()
1980 cmd->cmd_id, udev->name); in tcmu_release()
1983 xa_erase(&udev->commands, i); in tcmu_release()
1991 if (freed && list_empty(&udev->tmr_queue)) in tcmu_release()
1992 run_qfull_queue(udev, false); in tcmu_release()
1994 mutex_unlock(&udev->cmdr_lock); in tcmu_release()
1996 clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags); in tcmu_release()
2003 static int tcmu_init_genl_cmd_reply(struct tcmu_dev *udev, int cmd) in tcmu_init_genl_cmd_reply() argument
2005 struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd; in tcmu_init_genl_cmd_reply()
2010 if (udev->nl_reply_supported <= 0) in tcmu_init_genl_cmd_reply()
2018 udev->name); in tcmu_init_genl_cmd_reply()
2025 nl_cmd->cmd, udev->name); in tcmu_init_genl_cmd_reply()
2031 nl_cmd->udev = udev; in tcmu_init_genl_cmd_reply()
2041 static void tcmu_destroy_genl_cmd_reply(struct tcmu_dev *udev) in tcmu_destroy_genl_cmd_reply() argument
2043 struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd; in tcmu_destroy_genl_cmd_reply()
2048 if (udev->nl_reply_supported <= 0) in tcmu_destroy_genl_cmd_reply()
2059 static int tcmu_wait_genl_cmd_reply(struct tcmu_dev *udev) in tcmu_wait_genl_cmd_reply() argument
2061 struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd; in tcmu_wait_genl_cmd_reply()
2067 if (udev->nl_reply_supported <= 0) in tcmu_wait_genl_cmd_reply()
2081 static int tcmu_netlink_event_init(struct tcmu_dev *udev, in tcmu_netlink_event_init() argument
2097 ret = nla_put_string(skb, TCMU_ATTR_DEVICE, udev->uio_info.name); in tcmu_netlink_event_init()
2101 ret = nla_put_u32(skb, TCMU_ATTR_MINOR, udev->uio_info.uio_dev->minor); in tcmu_netlink_event_init()
2105 ret = nla_put_u32(skb, TCMU_ATTR_DEVICE_ID, udev->se_dev.dev_index); in tcmu_netlink_event_init()
2118 static int tcmu_netlink_event_send(struct tcmu_dev *udev, in tcmu_netlink_event_send() argument
2126 ret = tcmu_init_genl_cmd_reply(udev, cmd); in tcmu_netlink_event_send()
2138 return tcmu_wait_genl_cmd_reply(udev); in tcmu_netlink_event_send()
2140 tcmu_destroy_genl_cmd_reply(udev); in tcmu_netlink_event_send()
2145 static int tcmu_send_dev_add_event(struct tcmu_dev *udev) in tcmu_send_dev_add_event() argument
2151 ret = tcmu_netlink_event_init(udev, TCMU_CMD_ADDED_DEVICE, &skb, in tcmu_send_dev_add_event()
2155 return tcmu_netlink_event_send(udev, TCMU_CMD_ADDED_DEVICE, skb, in tcmu_send_dev_add_event()
2159 static int tcmu_send_dev_remove_event(struct tcmu_dev *udev) in tcmu_send_dev_remove_event() argument
2165 ret = tcmu_netlink_event_init(udev, TCMU_CMD_REMOVED_DEVICE, in tcmu_send_dev_remove_event()
2169 return tcmu_netlink_event_send(udev, TCMU_CMD_REMOVED_DEVICE, in tcmu_send_dev_remove_event()
2173 static int tcmu_update_uio_info(struct tcmu_dev *udev) in tcmu_update_uio_info() argument
2175 struct tcmu_hba *hba = udev->hba->hba_ptr; in tcmu_update_uio_info()
2179 info = &udev->uio_info; in tcmu_update_uio_info()
2181 if (udev->dev_config[0]) in tcmu_update_uio_info()
2183 udev->name, udev->dev_config); in tcmu_update_uio_info()
2186 udev->name); in tcmu_update_uio_info()
2199 struct tcmu_dev *udev = TCMU_DEV(dev); in tcmu_configure_device() local
2205 ret = tcmu_update_uio_info(udev); in tcmu_configure_device()
2209 info = &udev->uio_info; in tcmu_configure_device()
2211 mutex_lock(&udev->cmdr_lock); in tcmu_configure_device()
2212 udev->data_bitmap = bitmap_zalloc(udev->max_blocks, GFP_KERNEL); in tcmu_configure_device()
2213 mutex_unlock(&udev->cmdr_lock); in tcmu_configure_device()
2214 if (!udev->data_bitmap) { in tcmu_configure_device()
2219 mb = vzalloc(udev->cmdr_size + CMDR_OFF); in tcmu_configure_device()
2226 udev->mb_addr = mb; in tcmu_configure_device()
2227 udev->cmdr = (void *)mb + CMDR_OFF; in tcmu_configure_device()
2228 udev->data_off = udev->cmdr_size + CMDR_OFF; in tcmu_configure_device()
2229 data_size = TCMU_MBS_TO_PAGES(udev->data_area_mb) << PAGE_SHIFT; in tcmu_configure_device()
2230 udev->mmap_pages = (data_size + udev->cmdr_size + CMDR_OFF) >> PAGE_SHIFT; in tcmu_configure_device()
2231 udev->data_blk_size = udev->data_pages_per_blk * PAGE_SIZE; in tcmu_configure_device()
2232 udev->dbi_thresh = 0; /* Default in Idle state */ in tcmu_configure_device()
2241 mb->cmdr_size = udev->cmdr_size; in tcmu_configure_device()
2243 WARN_ON(!PAGE_ALIGNED(udev->data_off)); in tcmu_configure_device()
2249 info->mem[0].addr = (phys_addr_t)(uintptr_t)udev->mb_addr; in tcmu_configure_device()
2250 info->mem[0].size = data_size + udev->cmdr_size + CMDR_OFF; in tcmu_configure_device()
2277 if (udev->nl_reply_supported >= 0) in tcmu_configure_device()
2278 udev->nl_reply_supported = tcmu_kern_cmd_reply_supported; in tcmu_configure_device()
2284 kref_get(&udev->kref); in tcmu_configure_device()
2286 ret = tcmu_send_dev_add_event(udev); in tcmu_configure_device()
2291 list_add(&udev->node, &root_udev); in tcmu_configure_device()
2297 kref_put(&udev->kref, tcmu_dev_kref_release); in tcmu_configure_device()
2298 uio_unregister_device(&udev->uio_info); in tcmu_configure_device()
2300 vfree(udev->mb_addr); in tcmu_configure_device()
2301 udev->mb_addr = NULL; in tcmu_configure_device()
2303 bitmap_free(udev->data_bitmap); in tcmu_configure_device()
2304 udev->data_bitmap = NULL; in tcmu_configure_device()
2314 struct tcmu_dev *udev = TCMU_DEV(dev); in tcmu_free_device() local
2317 kref_put(&udev->kref, tcmu_dev_kref_release); in tcmu_free_device()
2322 struct tcmu_dev *udev = TCMU_DEV(dev); in tcmu_destroy_device() local
2324 del_timer_sync(&udev->cmd_timer); in tcmu_destroy_device()
2325 del_timer_sync(&udev->qfull_timer); in tcmu_destroy_device()
2328 list_del(&udev->node); in tcmu_destroy_device()
2331 tcmu_send_dev_remove_event(udev); in tcmu_destroy_device()
2333 uio_unregister_device(&udev->uio_info); in tcmu_destroy_device()
2336 kref_put(&udev->kref, tcmu_dev_kref_release); in tcmu_destroy_device()
2339 static void tcmu_unblock_dev(struct tcmu_dev *udev) in tcmu_unblock_dev() argument
2341 mutex_lock(&udev->cmdr_lock); in tcmu_unblock_dev()
2342 clear_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags); in tcmu_unblock_dev()
2343 mutex_unlock(&udev->cmdr_lock); in tcmu_unblock_dev()
2346 static void tcmu_block_dev(struct tcmu_dev *udev) in tcmu_block_dev() argument
2348 mutex_lock(&udev->cmdr_lock); in tcmu_block_dev()
2350 if (test_and_set_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags)) in tcmu_block_dev()
2354 tcmu_handle_completions(udev); in tcmu_block_dev()
2356 run_qfull_queue(udev, true); in tcmu_block_dev()
2359 mutex_unlock(&udev->cmdr_lock); in tcmu_block_dev()
2362 static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level) in tcmu_reset_ring() argument
2368 mutex_lock(&udev->cmdr_lock); in tcmu_reset_ring()
2370 xa_for_each(&udev->commands, i, cmd) { in tcmu_reset_ring()
2372 cmd->cmd_id, udev->name, in tcmu_reset_ring()
2378 xa_erase(&udev->commands, i); in tcmu_reset_ring()
2400 mb = udev->mb_addr; in tcmu_reset_ring()
2402 pr_debug("mb last %u head %u tail %u\n", udev->cmdr_last_cleaned, in tcmu_reset_ring()
2405 udev->cmdr_last_cleaned = 0; in tcmu_reset_ring()
2409 clear_bit(TCMU_DEV_BIT_BROKEN, &udev->flags); in tcmu_reset_ring()
2411 del_timer(&udev->cmd_timer); in tcmu_reset_ring()
2420 tcmu_remove_all_queued_tmr(udev); in tcmu_reset_ring()
2422 run_qfull_queue(udev, false); in tcmu_reset_ring()
2424 mutex_unlock(&udev->cmdr_lock); in tcmu_reset_ring()
2465 static int tcmu_set_max_blocks_param(struct tcmu_dev *udev, substring_t *arg) in tcmu_set_max_blocks_param() argument
2468 uint32_t pages_per_blk = udev->data_pages_per_blk; in tcmu_set_max_blocks_param()
2491 mutex_lock(&udev->cmdr_lock); in tcmu_set_max_blocks_param()
2492 if (udev->data_bitmap) { in tcmu_set_max_blocks_param()
2498 udev->data_area_mb = val; in tcmu_set_max_blocks_param()
2499 udev->max_blocks = TCMU_MBS_TO_PAGES(val) / pages_per_blk; in tcmu_set_max_blocks_param()
2502 mutex_unlock(&udev->cmdr_lock); in tcmu_set_max_blocks_param()
2506 static int tcmu_set_data_pages_per_blk(struct tcmu_dev *udev, substring_t *arg) in tcmu_set_data_pages_per_blk() argument
2517 if (val > TCMU_MBS_TO_PAGES(udev->data_area_mb)) { in tcmu_set_data_pages_per_blk()
2519 val, udev->data_area_mb, in tcmu_set_data_pages_per_blk()
2520 TCMU_MBS_TO_PAGES(udev->data_area_mb)); in tcmu_set_data_pages_per_blk()
2524 mutex_lock(&udev->cmdr_lock); in tcmu_set_data_pages_per_blk()
2525 if (udev->data_bitmap) { in tcmu_set_data_pages_per_blk()
2531 udev->data_pages_per_blk = val; in tcmu_set_data_pages_per_blk()
2532 udev->max_blocks = TCMU_MBS_TO_PAGES(udev->data_area_mb) / val; in tcmu_set_data_pages_per_blk()
2535 mutex_unlock(&udev->cmdr_lock); in tcmu_set_data_pages_per_blk()
2539 static int tcmu_set_cmd_ring_size(struct tcmu_dev *udev, substring_t *arg) in tcmu_set_cmd_ring_size() argument
2555 mutex_lock(&udev->cmdr_lock); in tcmu_set_cmd_ring_size()
2556 if (udev->data_bitmap) { in tcmu_set_cmd_ring_size()
2562 udev->cmdr_size = (val << 20) - CMDR_OFF; in tcmu_set_cmd_ring_size()
2566 udev->cmdr_size = CMDR_SIZE_DEF; in tcmu_set_cmd_ring_size()
2570 mutex_unlock(&udev->cmdr_lock); in tcmu_set_cmd_ring_size()
2577 struct tcmu_dev *udev = TCMU_DEV(dev); in tcmu_set_configfs_dev_params() local
2595 if (match_strlcpy(udev->dev_config, &args[0], in tcmu_set_configfs_dev_params()
2600 pr_debug("TCMU: Referencing Path: %s\n", udev->dev_config); in tcmu_set_configfs_dev_params()
2603 ret = match_u64(&args[0], &udev->dev_size); in tcmu_set_configfs_dev_params()
2617 ret = match_int(&args[0], &udev->nl_reply_supported); in tcmu_set_configfs_dev_params()
2623 ret = tcmu_set_max_blocks_param(udev, &args[0]); in tcmu_set_configfs_dev_params()
2626 ret = tcmu_set_data_pages_per_blk(udev, &args[0]); in tcmu_set_configfs_dev_params()
2629 ret = tcmu_set_cmd_ring_size(udev, &args[0]); in tcmu_set_configfs_dev_params()
2645 struct tcmu_dev *udev = TCMU_DEV(dev); in tcmu_show_configfs_dev_params() local
2649 udev->dev_config[0] ? udev->dev_config : "NULL"); in tcmu_show_configfs_dev_params()
2650 bl += sprintf(b + bl, "Size: %llu ", udev->dev_size); in tcmu_show_configfs_dev_params()
2651 bl += sprintf(b + bl, "MaxDataAreaMB: %u ", udev->data_area_mb); in tcmu_show_configfs_dev_params()
2652 bl += sprintf(b + bl, "DataPagesPerBlk: %u ", udev->data_pages_per_blk); in tcmu_show_configfs_dev_params()
2654 (udev->cmdr_size + CMDR_OFF) >> 20); in tcmu_show_configfs_dev_params()
2661 struct tcmu_dev *udev = TCMU_DEV(dev); in tcmu_get_blocks() local
2663 return div_u64(udev->dev_size - dev->dev_attrib.block_size, in tcmu_get_blocks()
2677 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); in tcmu_cmd_time_out_show() local
2679 return snprintf(page, PAGE_SIZE, "%lu\n", udev->cmd_time_out / MSEC_PER_SEC); in tcmu_cmd_time_out_show()
2687 struct tcmu_dev *udev = container_of(da->da_dev, in tcmu_cmd_time_out_store() local
2701 udev->cmd_time_out = val * MSEC_PER_SEC; in tcmu_cmd_time_out_store()
2710 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); in tcmu_qfull_time_out_show() local
2712 return snprintf(page, PAGE_SIZE, "%ld\n", udev->qfull_time_out <= 0 ? in tcmu_qfull_time_out_show()
2713 udev->qfull_time_out : in tcmu_qfull_time_out_show()
2714 udev->qfull_time_out / MSEC_PER_SEC); in tcmu_qfull_time_out_show()
2722 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); in tcmu_qfull_time_out_store() local
2731 udev->qfull_time_out = val * MSEC_PER_SEC; in tcmu_qfull_time_out_store()
2733 udev->qfull_time_out = val; in tcmu_qfull_time_out_store()
2746 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); in tcmu_max_data_area_mb_show() local
2748 return snprintf(page, PAGE_SIZE, "%u\n", udev->data_area_mb); in tcmu_max_data_area_mb_show()
2757 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); in tcmu_data_pages_per_blk_show() local
2759 return snprintf(page, PAGE_SIZE, "%u\n", udev->data_pages_per_blk); in tcmu_data_pages_per_blk_show()
2767 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); in tcmu_cmd_ring_size_mb_show() local
2770 (udev->cmdr_size + CMDR_OFF) >> 20); in tcmu_cmd_ring_size_mb_show()
2778 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); in tcmu_dev_config_show() local
2780 return snprintf(page, PAGE_SIZE, "%s\n", udev->dev_config); in tcmu_dev_config_show()
2783 static int tcmu_send_dev_config_event(struct tcmu_dev *udev, in tcmu_send_dev_config_event() argument
2790 ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE, in tcmu_send_dev_config_event()
2799 return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE, in tcmu_send_dev_config_event()
2809 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); in tcmu_dev_config_store() local
2817 if (target_dev_configured(&udev->se_dev)) { in tcmu_dev_config_store()
2818 ret = tcmu_send_dev_config_event(udev, page); in tcmu_dev_config_store()
2823 strscpy(udev->dev_config, page, TCMU_CONFIG_LEN); in tcmu_dev_config_store()
2825 ret = tcmu_update_uio_info(udev); in tcmu_dev_config_store()
2830 strscpy(udev->dev_config, page, TCMU_CONFIG_LEN); in tcmu_dev_config_store()
2840 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); in tcmu_dev_size_show() local
2842 return snprintf(page, PAGE_SIZE, "%llu\n", udev->dev_size); in tcmu_dev_size_show()
2845 static int tcmu_send_dev_size_event(struct tcmu_dev *udev, u64 size) in tcmu_send_dev_size_event() argument
2851 ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE, in tcmu_send_dev_size_event()
2861 return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE, in tcmu_send_dev_size_event()
2870 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); in tcmu_dev_size_store() local
2879 if (target_dev_configured(&udev->se_dev)) { in tcmu_dev_size_store()
2880 ret = tcmu_send_dev_size_event(udev, val); in tcmu_dev_size_store()
2886 udev->dev_size = val; in tcmu_dev_size_store()
2896 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); in tcmu_nl_reply_supported_show() local
2898 return snprintf(page, PAGE_SIZE, "%d\n", udev->nl_reply_supported); in tcmu_nl_reply_supported_show()
2906 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); in tcmu_nl_reply_supported_store() local
2914 udev->nl_reply_supported = val; in tcmu_nl_reply_supported_store()
2928 static int tcmu_send_emulate_write_cache(struct tcmu_dev *udev, u8 val) in tcmu_send_emulate_write_cache() argument
2934 ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE, in tcmu_send_emulate_write_cache()
2943 return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE, in tcmu_send_emulate_write_cache()
2952 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); in tcmu_emulate_write_cache_store() local
2961 if (target_dev_configured(&udev->se_dev)) { in tcmu_emulate_write_cache_store()
2962 ret = tcmu_send_emulate_write_cache(udev, val); in tcmu_emulate_write_cache_store()
2978 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); in tcmu_tmr_notification_show() local
2981 test_bit(TCMU_DEV_BIT_TMR_NOTIFY, &udev->flags)); in tcmu_tmr_notification_show()
2989 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); in tcmu_tmr_notification_store() local
3000 set_bit(TCMU_DEV_BIT_TMR_NOTIFY, &udev->flags); in tcmu_tmr_notification_store()
3002 clear_bit(TCMU_DEV_BIT_TMR_NOTIFY, &udev->flags); in tcmu_tmr_notification_store()
3012 struct tcmu_dev *udev = TCMU_DEV(se_dev); in tcmu_block_dev_show() local
3014 if (test_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags)) in tcmu_block_dev_show()
3026 struct tcmu_dev *udev = TCMU_DEV(se_dev); in tcmu_block_dev_store() local
3030 if (!target_dev_configured(&udev->se_dev)) { in tcmu_block_dev_store()
3045 tcmu_unblock_dev(udev); in tcmu_block_dev_store()
3047 tcmu_block_dev(udev); in tcmu_block_dev_store()
3058 struct tcmu_dev *udev = TCMU_DEV(se_dev); in tcmu_reset_ring_store() local
3062 if (!target_dev_configured(&udev->se_dev)) { in tcmu_reset_ring_store()
3076 tcmu_reset_ring(udev, val); in tcmu_reset_ring_store()
3087 struct tcmu_dev *udev = TCMU_DEV(se_dev); in tcmu_free_kept_buf_store() local
3092 if (!target_dev_configured(&udev->se_dev)) { in tcmu_free_kept_buf_store()
3101 mutex_lock(&udev->cmdr_lock); in tcmu_free_kept_buf_store()
3104 XA_STATE(xas, &udev->commands, cmd_id); in tcmu_free_kept_buf_store()
3131 if (list_empty(&udev->tmr_queue)) in tcmu_free_kept_buf_store()
3132 run_qfull_queue(udev, false); in tcmu_free_kept_buf_store()
3135 mutex_unlock(&udev->cmdr_lock); in tcmu_free_kept_buf_store()
3188 struct tcmu_dev *udev; in find_free_blocks() local
3197 list_for_each_entry(udev, &root_udev, node) { in find_free_blocks()
3198 mutex_lock(&udev->cmdr_lock); in find_free_blocks()
3200 if (!target_dev_configured(&udev->se_dev)) { in find_free_blocks()
3201 mutex_unlock(&udev->cmdr_lock); in find_free_blocks()
3206 if (tcmu_handle_completions(udev)) in find_free_blocks()
3207 run_qfull_queue(udev, false); in find_free_blocks()
3210 if (!udev->dbi_thresh) { in find_free_blocks()
3211 mutex_unlock(&udev->cmdr_lock); in find_free_blocks()
3215 end = udev->dbi_max + 1; in find_free_blocks()
3216 block = find_last_bit(udev->data_bitmap, end); in find_free_blocks()
3217 if (block == udev->dbi_max) { in find_free_blocks()
3222 mutex_unlock(&udev->cmdr_lock); in find_free_blocks()
3226 udev->dbi_thresh = start = 0; in find_free_blocks()
3227 udev->dbi_max = 0; in find_free_blocks()
3229 udev->dbi_thresh = start = block + 1; in find_free_blocks()
3230 udev->dbi_max = block; in find_free_blocks()
3243 pages_freed = tcmu_blocks_release(udev, start, end - 1); in find_free_blocks()
3246 off = udev->data_off + (loff_t)start * udev->data_blk_size; in find_free_blocks()
3247 unmap_mapping_range(udev->inode->i_mapping, off, 0, 1); in find_free_blocks()
3249 mutex_unlock(&udev->cmdr_lock); in find_free_blocks()
3255 total_blocks_freed, udev->name); in find_free_blocks()
3265 struct tcmu_dev *udev, *tmp_dev; in check_timedout_devices() local
3272 list_for_each_entry_safe(udev, tmp_dev, &devs, timedout_entry) { in check_timedout_devices()
3273 list_del_init(&udev->timedout_entry); in check_timedout_devices()
3276 mutex_lock(&udev->cmdr_lock); in check_timedout_devices()
3282 if (udev->cmd_time_out) { in check_timedout_devices()
3284 &udev->inflight_queue, in check_timedout_devices()
3288 tcmu_set_next_deadline(&udev->inflight_queue, in check_timedout_devices()
3289 &udev->cmd_timer); in check_timedout_devices()
3291 list_for_each_entry_safe(cmd, tmp_cmd, &udev->qfull_queue, in check_timedout_devices()
3295 tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer); in check_timedout_devices()
3297 mutex_unlock(&udev->cmdr_lock); in check_timedout_devices()