1f7a7a5c2SJack Wang // SPDX-License-Identifier: GPL-2.0-or-later 2f7a7a5c2SJack Wang /* 3f7a7a5c2SJack Wang * RDMA Network Block Driver 4f7a7a5c2SJack Wang * 5f7a7a5c2SJack Wang * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved. 6f7a7a5c2SJack Wang * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved. 7f7a7a5c2SJack Wang * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved. 8f7a7a5c2SJack Wang */ 9f7a7a5c2SJack Wang 10f7a7a5c2SJack Wang #undef pr_fmt 11f7a7a5c2SJack Wang #define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt 12f7a7a5c2SJack Wang 13f7a7a5c2SJack Wang #include <linux/module.h> 14f7a7a5c2SJack Wang #include <linux/blkdev.h> 15f7a7a5c2SJack Wang #include <linux/hdreg.h> 16f7a7a5c2SJack Wang #include <linux/scatterlist.h> 17f7a7a5c2SJack Wang #include <linux/idr.h> 18f7a7a5c2SJack Wang 19f7a7a5c2SJack Wang #include "rnbd-clt.h" 20f7a7a5c2SJack Wang 21f7a7a5c2SJack Wang MODULE_DESCRIPTION("RDMA Network Block Device Client"); 22f7a7a5c2SJack Wang MODULE_LICENSE("GPL"); 23f7a7a5c2SJack Wang 24f7a7a5c2SJack Wang static int rnbd_client_major; 25f7a7a5c2SJack Wang static DEFINE_IDA(index_ida); 26f7a7a5c2SJack Wang static DEFINE_MUTEX(ida_lock); 27f7a7a5c2SJack Wang static DEFINE_MUTEX(sess_lock); 28f7a7a5c2SJack Wang static LIST_HEAD(sess_list); 29f7a7a5c2SJack Wang 30f7a7a5c2SJack Wang /* 31f7a7a5c2SJack Wang * Maximum number of partitions an instance can have. 32f7a7a5c2SJack Wang * 6 bits = 64 minors = 63 partitions (one minor is used for the device itself) 33f7a7a5c2SJack Wang */ 34f7a7a5c2SJack Wang #define RNBD_PART_BITS 6 35f7a7a5c2SJack Wang 36f7a7a5c2SJack Wang static inline bool rnbd_clt_get_sess(struct rnbd_clt_session *sess) 37f7a7a5c2SJack Wang { 38f7a7a5c2SJack Wang return refcount_inc_not_zero(&sess->refcount); 39f7a7a5c2SJack Wang } 40f7a7a5c2SJack Wang 41f7a7a5c2SJack Wang static void free_sess(struct rnbd_clt_session *sess); 42f7a7a5c2SJack Wang 43f7a7a5c2SJack Wang static void rnbd_clt_put_sess(struct rnbd_clt_session *sess) 44f7a7a5c2SJack Wang { 45f7a7a5c2SJack Wang might_sleep(); 46f7a7a5c2SJack Wang 47f7a7a5c2SJack Wang if (refcount_dec_and_test(&sess->refcount)) 48f7a7a5c2SJack Wang free_sess(sess); 49f7a7a5c2SJack Wang } 50f7a7a5c2SJack Wang 51f7a7a5c2SJack Wang static void rnbd_clt_put_dev(struct rnbd_clt_dev *dev) 52f7a7a5c2SJack Wang { 53f7a7a5c2SJack Wang might_sleep(); 54f7a7a5c2SJack Wang 55f7a7a5c2SJack Wang if (!refcount_dec_and_test(&dev->refcount)) 56f7a7a5c2SJack Wang return; 57f7a7a5c2SJack Wang 58f7a7a5c2SJack Wang mutex_lock(&ida_lock); 59f7a7a5c2SJack Wang ida_simple_remove(&index_ida, dev->clt_device_id); 60f7a7a5c2SJack Wang mutex_unlock(&ida_lock); 61f7a7a5c2SJack Wang kfree(dev->hw_queues); 6264e8a6ecSMd Haris Iqbal kfree(dev->pathname); 63f7a7a5c2SJack Wang rnbd_clt_put_sess(dev->sess); 64f7a7a5c2SJack Wang mutex_destroy(&dev->lock); 65f7a7a5c2SJack Wang kfree(dev); 66f7a7a5c2SJack Wang } 67f7a7a5c2SJack Wang 68f7a7a5c2SJack Wang static inline bool rnbd_clt_get_dev(struct rnbd_clt_dev *dev) 69f7a7a5c2SJack Wang { 70f7a7a5c2SJack Wang return refcount_inc_not_zero(&dev->refcount); 71f7a7a5c2SJack Wang } 72f7a7a5c2SJack Wang 73f7a7a5c2SJack Wang static int rnbd_clt_set_dev_attr(struct rnbd_clt_dev *dev, 74f7a7a5c2SJack Wang const struct rnbd_msg_open_rsp *rsp) 75f7a7a5c2SJack Wang { 76f7a7a5c2SJack Wang struct rnbd_clt_session *sess = dev->sess; 77f7a7a5c2SJack Wang 78f7a7a5c2SJack Wang if (!rsp->logical_block_size) 79f7a7a5c2SJack Wang return -EINVAL; 80f7a7a5c2SJack Wang 81f7a7a5c2SJack Wang dev->device_id = le32_to_cpu(rsp->device_id); 82f7a7a5c2SJack Wang dev->nsectors = le64_to_cpu(rsp->nsectors); 83f7a7a5c2SJack Wang dev->logical_block_size = le16_to_cpu(rsp->logical_block_size); 84f7a7a5c2SJack Wang dev->physical_block_size = le16_to_cpu(rsp->physical_block_size); 85f7a7a5c2SJack Wang dev->max_write_same_sectors = le32_to_cpu(rsp->max_write_same_sectors); 86f7a7a5c2SJack Wang dev->max_discard_sectors = le32_to_cpu(rsp->max_discard_sectors); 87f7a7a5c2SJack Wang dev->discard_granularity = le32_to_cpu(rsp->discard_granularity); 88f7a7a5c2SJack Wang dev->discard_alignment = le32_to_cpu(rsp->discard_alignment); 89f7a7a5c2SJack Wang dev->secure_discard = le16_to_cpu(rsp->secure_discard); 90f7a7a5c2SJack Wang dev->rotational = rsp->rotational; 91512c781fSGioh Kim dev->wc = !!(rsp->cache_policy & RNBD_WRITEBACK); 92512c781fSGioh Kim dev->fua = !!(rsp->cache_policy & RNBD_FUA); 93f7a7a5c2SJack Wang 94f7a7a5c2SJack Wang dev->max_hw_sectors = sess->max_io_size / SECTOR_SIZE; 95f7a7a5c2SJack Wang dev->max_segments = BMAX_SEGMENTS; 96f7a7a5c2SJack Wang 97f7a7a5c2SJack Wang return 0; 98f7a7a5c2SJack Wang } 99f7a7a5c2SJack Wang 100f7a7a5c2SJack Wang static int rnbd_clt_change_capacity(struct rnbd_clt_dev *dev, 101f7a7a5c2SJack Wang size_t new_nsectors) 102f7a7a5c2SJack Wang { 103f7a7a5c2SJack Wang rnbd_clt_info(dev, "Device size changed from %zu to %zu sectors\n", 104f7a7a5c2SJack Wang dev->nsectors, new_nsectors); 105f7a7a5c2SJack Wang dev->nsectors = new_nsectors; 106230272b4SChristoph Hellwig set_capacity_and_notify(dev->gd, dev->nsectors); 107659e56baSChristoph Hellwig return 0; 108f7a7a5c2SJack Wang } 109f7a7a5c2SJack Wang 110f7a7a5c2SJack Wang static int process_msg_open_rsp(struct rnbd_clt_dev *dev, 111f7a7a5c2SJack Wang struct rnbd_msg_open_rsp *rsp) 112f7a7a5c2SJack Wang { 113f7a7a5c2SJack Wang int err = 0; 114f7a7a5c2SJack Wang 115f7a7a5c2SJack Wang mutex_lock(&dev->lock); 116f7a7a5c2SJack Wang if (dev->dev_state == DEV_STATE_UNMAPPED) { 117f7a7a5c2SJack Wang rnbd_clt_info(dev, 118f7a7a5c2SJack Wang "Ignoring Open-Response message from server for unmapped device\n"); 119f7a7a5c2SJack Wang err = -ENOENT; 120f7a7a5c2SJack Wang goto out; 121f7a7a5c2SJack Wang } 122f7a7a5c2SJack Wang if (dev->dev_state == DEV_STATE_MAPPED_DISCONNECTED) { 123f7a7a5c2SJack Wang u64 nsectors = le64_to_cpu(rsp->nsectors); 124f7a7a5c2SJack Wang 125f7a7a5c2SJack Wang /* 126f7a7a5c2SJack Wang * If the device was remapped and the size changed in the 127f7a7a5c2SJack Wang * meantime we need to revalidate it 128f7a7a5c2SJack Wang */ 129f7a7a5c2SJack Wang if (dev->nsectors != nsectors) 130f7a7a5c2SJack Wang rnbd_clt_change_capacity(dev, nsectors); 131f7a7a5c2SJack Wang rnbd_clt_info(dev, "Device online, device remapped successfully\n"); 132f7a7a5c2SJack Wang } 133f7a7a5c2SJack Wang err = rnbd_clt_set_dev_attr(dev, rsp); 134f7a7a5c2SJack Wang if (err) 135f7a7a5c2SJack Wang goto out; 136f7a7a5c2SJack Wang dev->dev_state = DEV_STATE_MAPPED; 137f7a7a5c2SJack Wang 138f7a7a5c2SJack Wang out: 139f7a7a5c2SJack Wang mutex_unlock(&dev->lock); 140f7a7a5c2SJack Wang 141f7a7a5c2SJack Wang return err; 142f7a7a5c2SJack Wang } 143f7a7a5c2SJack Wang 144f7a7a5c2SJack Wang int rnbd_clt_resize_disk(struct rnbd_clt_dev *dev, size_t newsize) 145f7a7a5c2SJack Wang { 146f7a7a5c2SJack Wang int ret = 0; 147f7a7a5c2SJack Wang 148f7a7a5c2SJack Wang mutex_lock(&dev->lock); 149f7a7a5c2SJack Wang if (dev->dev_state != DEV_STATE_MAPPED) { 150f7a7a5c2SJack Wang pr_err("Failed to set new size of the device, device is not opened\n"); 151f7a7a5c2SJack Wang ret = -ENOENT; 152f7a7a5c2SJack Wang goto out; 153f7a7a5c2SJack Wang } 154f7a7a5c2SJack Wang ret = rnbd_clt_change_capacity(dev, newsize); 155f7a7a5c2SJack Wang 156f7a7a5c2SJack Wang out: 157f7a7a5c2SJack Wang mutex_unlock(&dev->lock); 158f7a7a5c2SJack Wang 159f7a7a5c2SJack Wang return ret; 160f7a7a5c2SJack Wang } 161f7a7a5c2SJack Wang 162f7a7a5c2SJack Wang static inline void rnbd_clt_dev_requeue(struct rnbd_queue *q) 163f7a7a5c2SJack Wang { 164f7a7a5c2SJack Wang if (WARN_ON(!q->hctx)) 165f7a7a5c2SJack Wang return; 166f7a7a5c2SJack Wang 167f7a7a5c2SJack Wang /* We can come here from interrupt, thus async=true */ 168f7a7a5c2SJack Wang blk_mq_run_hw_queue(q->hctx, true); 169f7a7a5c2SJack Wang } 170f7a7a5c2SJack Wang 171f7a7a5c2SJack Wang enum { 172f7a7a5c2SJack Wang RNBD_DELAY_IFBUSY = -1, 173f7a7a5c2SJack Wang }; 174f7a7a5c2SJack Wang 175f7a7a5c2SJack Wang /** 176f7a7a5c2SJack Wang * rnbd_get_cpu_qlist() - finds a list with HW queues to be rerun 177f7a7a5c2SJack Wang * @sess: Session to find a queue for 178f7a7a5c2SJack Wang * @cpu: Cpu to start the search from 179f7a7a5c2SJack Wang * 180f7a7a5c2SJack Wang * Description: 181f7a7a5c2SJack Wang * Each CPU has a list of HW queues, which needs to be rerun. If a list 182f7a7a5c2SJack Wang * is not empty - it is marked with a bit. This function finds first 183f7a7a5c2SJack Wang * set bit in a bitmap and returns corresponding CPU list. 184f7a7a5c2SJack Wang */ 185f7a7a5c2SJack Wang static struct rnbd_cpu_qlist * 186f7a7a5c2SJack Wang rnbd_get_cpu_qlist(struct rnbd_clt_session *sess, int cpu) 187f7a7a5c2SJack Wang { 188f7a7a5c2SJack Wang int bit; 189f7a7a5c2SJack Wang 190f7a7a5c2SJack Wang /* Search from cpu to nr_cpu_ids */ 191f7a7a5c2SJack Wang bit = find_next_bit(sess->cpu_queues_bm, nr_cpu_ids, cpu); 192f7a7a5c2SJack Wang if (bit < nr_cpu_ids) { 193f7a7a5c2SJack Wang return per_cpu_ptr(sess->cpu_queues, bit); 194f7a7a5c2SJack Wang } else if (cpu != 0) { 195f7a7a5c2SJack Wang /* Search from 0 to cpu */ 196f7a7a5c2SJack Wang bit = find_next_bit(sess->cpu_queues_bm, cpu, 0); 197f7a7a5c2SJack Wang if (bit < cpu) 198f7a7a5c2SJack Wang return per_cpu_ptr(sess->cpu_queues, bit); 199f7a7a5c2SJack Wang } 200f7a7a5c2SJack Wang 201f7a7a5c2SJack Wang return NULL; 202f7a7a5c2SJack Wang } 203f7a7a5c2SJack Wang 204f7a7a5c2SJack Wang static inline int nxt_cpu(int cpu) 205f7a7a5c2SJack Wang { 206f7a7a5c2SJack Wang return (cpu + 1) % nr_cpu_ids; 207f7a7a5c2SJack Wang } 208f7a7a5c2SJack Wang 209f7a7a5c2SJack Wang /** 210f7a7a5c2SJack Wang * rnbd_rerun_if_needed() - rerun next queue marked as stopped 211f7a7a5c2SJack Wang * @sess: Session to rerun a queue on 212f7a7a5c2SJack Wang * 213f7a7a5c2SJack Wang * Description: 214f7a7a5c2SJack Wang * Each CPU has it's own list of HW queues, which should be rerun. 215f7a7a5c2SJack Wang * Function finds such list with HW queues, takes a list lock, picks up 216f7a7a5c2SJack Wang * the first HW queue out of the list and requeues it. 217f7a7a5c2SJack Wang * 218f7a7a5c2SJack Wang * Return: 219f7a7a5c2SJack Wang * True if the queue was requeued, false otherwise. 220f7a7a5c2SJack Wang * 221f7a7a5c2SJack Wang * Context: 222f7a7a5c2SJack Wang * Does not matter. 223f7a7a5c2SJack Wang */ 224f7a7a5c2SJack Wang static bool rnbd_rerun_if_needed(struct rnbd_clt_session *sess) 225f7a7a5c2SJack Wang { 226f7a7a5c2SJack Wang struct rnbd_queue *q = NULL; 227f7a7a5c2SJack Wang struct rnbd_cpu_qlist *cpu_q; 228f7a7a5c2SJack Wang unsigned long flags; 229f7a7a5c2SJack Wang int *cpup; 230f7a7a5c2SJack Wang 231f7a7a5c2SJack Wang /* 232f7a7a5c2SJack Wang * To keep fairness and not to let other queues starve we always 233f7a7a5c2SJack Wang * try to wake up someone else in round-robin manner. That of course 234f7a7a5c2SJack Wang * increases latency but queues always have a chance to be executed. 235f7a7a5c2SJack Wang */ 236f7a7a5c2SJack Wang cpup = get_cpu_ptr(sess->cpu_rr); 237f7a7a5c2SJack Wang for (cpu_q = rnbd_get_cpu_qlist(sess, nxt_cpu(*cpup)); cpu_q; 238f7a7a5c2SJack Wang cpu_q = rnbd_get_cpu_qlist(sess, nxt_cpu(cpu_q->cpu))) { 239f7a7a5c2SJack Wang if (!spin_trylock_irqsave(&cpu_q->requeue_lock, flags)) 240f7a7a5c2SJack Wang continue; 241f7a7a5c2SJack Wang if (unlikely(!test_bit(cpu_q->cpu, sess->cpu_queues_bm))) 242f7a7a5c2SJack Wang goto unlock; 243f7a7a5c2SJack Wang q = list_first_entry_or_null(&cpu_q->requeue_list, 244f7a7a5c2SJack Wang typeof(*q), requeue_list); 245f7a7a5c2SJack Wang if (WARN_ON(!q)) 246f7a7a5c2SJack Wang goto clear_bit; 247f7a7a5c2SJack Wang list_del_init(&q->requeue_list); 248f7a7a5c2SJack Wang clear_bit_unlock(0, &q->in_list); 249f7a7a5c2SJack Wang 250f7a7a5c2SJack Wang if (list_empty(&cpu_q->requeue_list)) { 251f7a7a5c2SJack Wang /* Clear bit if nothing is left */ 252f7a7a5c2SJack Wang clear_bit: 253f7a7a5c2SJack Wang clear_bit(cpu_q->cpu, sess->cpu_queues_bm); 254f7a7a5c2SJack Wang } 255f7a7a5c2SJack Wang unlock: 256f7a7a5c2SJack Wang spin_unlock_irqrestore(&cpu_q->requeue_lock, flags); 257f7a7a5c2SJack Wang 258f7a7a5c2SJack Wang if (q) 259f7a7a5c2SJack Wang break; 260f7a7a5c2SJack Wang } 261f7a7a5c2SJack Wang 262f7a7a5c2SJack Wang /** 263f7a7a5c2SJack Wang * Saves the CPU that is going to be requeued on the per-cpu var. Just 264f7a7a5c2SJack Wang * incrementing it doesn't work because rnbd_get_cpu_qlist() will 265f7a7a5c2SJack Wang * always return the first CPU with something on the queue list when the 266f7a7a5c2SJack Wang * value stored on the var is greater than the last CPU with something 267f7a7a5c2SJack Wang * on the list. 268f7a7a5c2SJack Wang */ 269f7a7a5c2SJack Wang if (cpu_q) 270f7a7a5c2SJack Wang *cpup = cpu_q->cpu; 271f7a7a5c2SJack Wang put_cpu_var(sess->cpu_rr); 272f7a7a5c2SJack Wang 273f7a7a5c2SJack Wang if (q) 274f7a7a5c2SJack Wang rnbd_clt_dev_requeue(q); 275f7a7a5c2SJack Wang 276f7a7a5c2SJack Wang return q; 277f7a7a5c2SJack Wang } 278f7a7a5c2SJack Wang 279f7a7a5c2SJack Wang /** 280f7a7a5c2SJack Wang * rnbd_rerun_all_if_idle() - rerun all queues left in the list if 281f7a7a5c2SJack Wang * session is idling (there are no requests 282f7a7a5c2SJack Wang * in-flight). 283f7a7a5c2SJack Wang * @sess: Session to rerun the queues on 284f7a7a5c2SJack Wang * 285f7a7a5c2SJack Wang * Description: 286f7a7a5c2SJack Wang * This function tries to rerun all stopped queues if there are no 287f7a7a5c2SJack Wang * requests in-flight anymore. This function tries to solve an obvious 288f7a7a5c2SJack Wang * problem, when number of tags < than number of queues (hctx), which 289f7a7a5c2SJack Wang * are stopped and put to sleep. If last permit, which has been just put, 290f7a7a5c2SJack Wang * does not wake up all left queues (hctxs), IO requests hang forever. 291f7a7a5c2SJack Wang * 292f7a7a5c2SJack Wang * That can happen when all number of permits, say N, have been exhausted 293f7a7a5c2SJack Wang * from one CPU, and we have many block devices per session, say M. 294f7a7a5c2SJack Wang * Each block device has it's own queue (hctx) for each CPU, so eventually 295f7a7a5c2SJack Wang * we can put that number of queues (hctxs) to sleep: M x nr_cpu_ids. 296f7a7a5c2SJack Wang * If number of permits N < M x nr_cpu_ids finally we will get an IO hang. 297f7a7a5c2SJack Wang * 298f7a7a5c2SJack Wang * To avoid this hang last caller of rnbd_put_permit() (last caller is the 299f7a7a5c2SJack Wang * one who observes sess->busy == 0) must wake up all remaining queues. 300f7a7a5c2SJack Wang * 301f7a7a5c2SJack Wang * Context: 302f7a7a5c2SJack Wang * Does not matter. 303f7a7a5c2SJack Wang */ 304f7a7a5c2SJack Wang static void rnbd_rerun_all_if_idle(struct rnbd_clt_session *sess) 305f7a7a5c2SJack Wang { 306f7a7a5c2SJack Wang bool requeued; 307f7a7a5c2SJack Wang 308f7a7a5c2SJack Wang do { 309f7a7a5c2SJack Wang requeued = rnbd_rerun_if_needed(sess); 310f7a7a5c2SJack Wang } while (atomic_read(&sess->busy) == 0 && requeued); 311f7a7a5c2SJack Wang } 312f7a7a5c2SJack Wang 313f7a7a5c2SJack Wang static struct rtrs_permit *rnbd_get_permit(struct rnbd_clt_session *sess, 314f7a7a5c2SJack Wang enum rtrs_clt_con_type con_type, 315f7a7a5c2SJack Wang int wait) 316f7a7a5c2SJack Wang { 317f7a7a5c2SJack Wang struct rtrs_permit *permit; 318f7a7a5c2SJack Wang 319f7a7a5c2SJack Wang permit = rtrs_clt_get_permit(sess->rtrs, con_type, 320f7a7a5c2SJack Wang wait ? RTRS_PERMIT_WAIT : 321f7a7a5c2SJack Wang RTRS_PERMIT_NOWAIT); 322f7a7a5c2SJack Wang if (likely(permit)) 323f7a7a5c2SJack Wang /* We have a subtle rare case here, when all permits can be 324f7a7a5c2SJack Wang * consumed before busy counter increased. This is safe, 325f7a7a5c2SJack Wang * because loser will get NULL as a permit, observe 0 busy 326f7a7a5c2SJack Wang * counter and immediately restart the queue himself. 327f7a7a5c2SJack Wang */ 328f7a7a5c2SJack Wang atomic_inc(&sess->busy); 329f7a7a5c2SJack Wang 330f7a7a5c2SJack Wang return permit; 331f7a7a5c2SJack Wang } 332f7a7a5c2SJack Wang 333f7a7a5c2SJack Wang static void rnbd_put_permit(struct rnbd_clt_session *sess, 334f7a7a5c2SJack Wang struct rtrs_permit *permit) 335f7a7a5c2SJack Wang { 336f7a7a5c2SJack Wang rtrs_clt_put_permit(sess->rtrs, permit); 337f7a7a5c2SJack Wang atomic_dec(&sess->busy); 338f7a7a5c2SJack Wang /* Paired with rnbd_clt_dev_add_to_requeue(). Decrement first 339f7a7a5c2SJack Wang * and then check queue bits. 340f7a7a5c2SJack Wang */ 341f7a7a5c2SJack Wang smp_mb__after_atomic(); 342f7a7a5c2SJack Wang rnbd_rerun_all_if_idle(sess); 343f7a7a5c2SJack Wang } 344f7a7a5c2SJack Wang 345f7a7a5c2SJack Wang static struct rnbd_iu *rnbd_get_iu(struct rnbd_clt_session *sess, 346f7a7a5c2SJack Wang enum rtrs_clt_con_type con_type, 347f7a7a5c2SJack Wang int wait) 348f7a7a5c2SJack Wang { 349f7a7a5c2SJack Wang struct rnbd_iu *iu; 350f7a7a5c2SJack Wang struct rtrs_permit *permit; 351f7a7a5c2SJack Wang 352f7a7a5c2SJack Wang permit = rnbd_get_permit(sess, con_type, 353f7a7a5c2SJack Wang wait ? RTRS_PERMIT_WAIT : 354f7a7a5c2SJack Wang RTRS_PERMIT_NOWAIT); 355f7a7a5c2SJack Wang if (unlikely(!permit)) 356f7a7a5c2SJack Wang return NULL; 357f7a7a5c2SJack Wang iu = rtrs_permit_to_pdu(permit); 358f7a7a5c2SJack Wang iu->permit = permit; 359f7a7a5c2SJack Wang /* 360f7a7a5c2SJack Wang * 1st reference is dropped after finishing sending a "user" message, 361f7a7a5c2SJack Wang * 2nd reference is dropped after confirmation with the response is 362f7a7a5c2SJack Wang * returned. 363f7a7a5c2SJack Wang * 1st and 2nd can happen in any order, so the rnbd_iu should be 3643877ece0SJack Wang * released (rtrs_permit returned to rtrs) only after both 365f7a7a5c2SJack Wang * are finished. 366f7a7a5c2SJack Wang */ 367f7a7a5c2SJack Wang atomic_set(&iu->refcount, 2); 368f7a7a5c2SJack Wang init_waitqueue_head(&iu->comp.wait); 369f7a7a5c2SJack Wang iu->comp.errno = INT_MAX; 370f7a7a5c2SJack Wang 371f7a7a5c2SJack Wang return iu; 372f7a7a5c2SJack Wang } 373f7a7a5c2SJack Wang 374f7a7a5c2SJack Wang static void rnbd_put_iu(struct rnbd_clt_session *sess, struct rnbd_iu *iu) 375f7a7a5c2SJack Wang { 376f7a7a5c2SJack Wang if (atomic_dec_and_test(&iu->refcount)) 377f7a7a5c2SJack Wang rnbd_put_permit(sess, iu->permit); 378f7a7a5c2SJack Wang } 379f7a7a5c2SJack Wang 380f7a7a5c2SJack Wang static void rnbd_softirq_done_fn(struct request *rq) 381f7a7a5c2SJack Wang { 382f7a7a5c2SJack Wang struct rnbd_clt_dev *dev = rq->rq_disk->private_data; 383f7a7a5c2SJack Wang struct rnbd_clt_session *sess = dev->sess; 384f7a7a5c2SJack Wang struct rnbd_iu *iu; 385f7a7a5c2SJack Wang 386f7a7a5c2SJack Wang iu = blk_mq_rq_to_pdu(rq); 387*5a1328d0SGioh Kim sg_free_table_chained(&iu->sgt, RNBD_INLINE_SG_CNT); 388f7a7a5c2SJack Wang rnbd_put_permit(sess, iu->permit); 389f7a7a5c2SJack Wang blk_mq_end_request(rq, errno_to_blk_status(iu->errno)); 390f7a7a5c2SJack Wang } 391f7a7a5c2SJack Wang 392f7a7a5c2SJack Wang static void msg_io_conf(void *priv, int errno) 393f7a7a5c2SJack Wang { 394f7a7a5c2SJack Wang struct rnbd_iu *iu = priv; 395f7a7a5c2SJack Wang struct rnbd_clt_dev *dev = iu->dev; 396f7a7a5c2SJack Wang struct request *rq = iu->rq; 397f7a7a5c2SJack Wang int rw = rq_data_dir(rq); 398f7a7a5c2SJack Wang 399f7a7a5c2SJack Wang iu->errno = errno; 400f7a7a5c2SJack Wang 401f7a7a5c2SJack Wang blk_mq_complete_request(rq); 402f7a7a5c2SJack Wang 403f7a7a5c2SJack Wang if (errno) 404f7a7a5c2SJack Wang rnbd_clt_info_rl(dev, "%s I/O failed with err: %d\n", 405f7a7a5c2SJack Wang rw == READ ? "read" : "write", errno); 406f7a7a5c2SJack Wang } 407f7a7a5c2SJack Wang 408f7a7a5c2SJack Wang static void wake_up_iu_comp(struct rnbd_iu *iu, int errno) 409f7a7a5c2SJack Wang { 410f7a7a5c2SJack Wang iu->comp.errno = errno; 411f7a7a5c2SJack Wang wake_up(&iu->comp.wait); 412f7a7a5c2SJack Wang } 413f7a7a5c2SJack Wang 414f7a7a5c2SJack Wang static void msg_conf(void *priv, int errno) 415f7a7a5c2SJack Wang { 416f7a7a5c2SJack Wang struct rnbd_iu *iu = priv; 417f7a7a5c2SJack Wang 418f7a7a5c2SJack Wang iu->errno = errno; 419f7a7a5c2SJack Wang schedule_work(&iu->work); 420f7a7a5c2SJack Wang } 421f7a7a5c2SJack Wang 422f7a7a5c2SJack Wang enum wait_type { 423f7a7a5c2SJack Wang NO_WAIT = 0, 424f7a7a5c2SJack Wang WAIT = 1 425f7a7a5c2SJack Wang }; 426f7a7a5c2SJack Wang 427f7a7a5c2SJack Wang static int send_usr_msg(struct rtrs_clt *rtrs, int dir, 42846a99e0cSGuoqing Jiang struct rnbd_iu *iu, struct kvec *vec, 429f7a7a5c2SJack Wang size_t len, struct scatterlist *sg, unsigned int sg_len, 430f7a7a5c2SJack Wang void (*conf)(struct work_struct *work), 431f7a7a5c2SJack Wang int *errno, enum wait_type wait) 432f7a7a5c2SJack Wang { 433f7a7a5c2SJack Wang int err; 434f7a7a5c2SJack Wang struct rtrs_clt_req_ops req_ops; 435f7a7a5c2SJack Wang 436f7a7a5c2SJack Wang INIT_WORK(&iu->work, conf); 437f7a7a5c2SJack Wang req_ops = (struct rtrs_clt_req_ops) { 438f7a7a5c2SJack Wang .priv = iu, 439f7a7a5c2SJack Wang .conf_fn = msg_conf, 440f7a7a5c2SJack Wang }; 441f7a7a5c2SJack Wang err = rtrs_clt_request(dir, &req_ops, rtrs, iu->permit, 44246a99e0cSGuoqing Jiang vec, 1, len, sg, sg_len); 443f7a7a5c2SJack Wang if (!err && wait) { 444f7a7a5c2SJack Wang wait_event(iu->comp.wait, iu->comp.errno != INT_MAX); 445f7a7a5c2SJack Wang *errno = iu->comp.errno; 446f7a7a5c2SJack Wang } else { 447f7a7a5c2SJack Wang *errno = 0; 448f7a7a5c2SJack Wang } 449f7a7a5c2SJack Wang 450f7a7a5c2SJack Wang return err; 451f7a7a5c2SJack Wang } 452f7a7a5c2SJack Wang 453f7a7a5c2SJack Wang static void msg_close_conf(struct work_struct *work) 454f7a7a5c2SJack Wang { 455f7a7a5c2SJack Wang struct rnbd_iu *iu = container_of(work, struct rnbd_iu, work); 456f7a7a5c2SJack Wang struct rnbd_clt_dev *dev = iu->dev; 457f7a7a5c2SJack Wang 458f7a7a5c2SJack Wang wake_up_iu_comp(iu, iu->errno); 459f7a7a5c2SJack Wang rnbd_put_iu(dev->sess, iu); 460f7a7a5c2SJack Wang rnbd_clt_put_dev(dev); 461f7a7a5c2SJack Wang } 462f7a7a5c2SJack Wang 463f7a7a5c2SJack Wang static int send_msg_close(struct rnbd_clt_dev *dev, u32 device_id, bool wait) 464f7a7a5c2SJack Wang { 465f7a7a5c2SJack Wang struct rnbd_clt_session *sess = dev->sess; 466f7a7a5c2SJack Wang struct rnbd_msg_close msg; 467f7a7a5c2SJack Wang struct rnbd_iu *iu; 468f7a7a5c2SJack Wang struct kvec vec = { 469f7a7a5c2SJack Wang .iov_base = &msg, 470f7a7a5c2SJack Wang .iov_len = sizeof(msg) 471f7a7a5c2SJack Wang }; 472f7a7a5c2SJack Wang int err, errno; 473f7a7a5c2SJack Wang 474f7a7a5c2SJack Wang iu = rnbd_get_iu(sess, RTRS_ADMIN_CON, RTRS_PERMIT_WAIT); 475f7a7a5c2SJack Wang if (!iu) 476f7a7a5c2SJack Wang return -ENOMEM; 477f7a7a5c2SJack Wang 478f7a7a5c2SJack Wang iu->buf = NULL; 479f7a7a5c2SJack Wang iu->dev = dev; 480f7a7a5c2SJack Wang 481*5a1328d0SGioh Kim sg_alloc_table(&iu->sgt, 1, GFP_KERNEL); 482f7a7a5c2SJack Wang 483f7a7a5c2SJack Wang msg.hdr.type = cpu_to_le16(RNBD_MSG_CLOSE); 484f7a7a5c2SJack Wang msg.device_id = cpu_to_le32(device_id); 485f7a7a5c2SJack Wang 486f7a7a5c2SJack Wang WARN_ON(!rnbd_clt_get_dev(dev)); 48746a99e0cSGuoqing Jiang err = send_usr_msg(sess->rtrs, WRITE, iu, &vec, 0, NULL, 0, 488f7a7a5c2SJack Wang msg_close_conf, &errno, wait); 489f7a7a5c2SJack Wang if (err) { 490f7a7a5c2SJack Wang rnbd_clt_put_dev(dev); 491f7a7a5c2SJack Wang rnbd_put_iu(sess, iu); 492f7a7a5c2SJack Wang } else { 493f7a7a5c2SJack Wang err = errno; 494f7a7a5c2SJack Wang } 495f7a7a5c2SJack Wang 496*5a1328d0SGioh Kim sg_free_table(&iu->sgt); 497f7a7a5c2SJack Wang rnbd_put_iu(sess, iu); 498f7a7a5c2SJack Wang return err; 499f7a7a5c2SJack Wang } 500f7a7a5c2SJack Wang 501f7a7a5c2SJack Wang static void msg_open_conf(struct work_struct *work) 502f7a7a5c2SJack Wang { 503f7a7a5c2SJack Wang struct rnbd_iu *iu = container_of(work, struct rnbd_iu, work); 504f7a7a5c2SJack Wang struct rnbd_msg_open_rsp *rsp = iu->buf; 505f7a7a5c2SJack Wang struct rnbd_clt_dev *dev = iu->dev; 506f7a7a5c2SJack Wang int errno = iu->errno; 507f7a7a5c2SJack Wang 508f7a7a5c2SJack Wang if (errno) { 509f7a7a5c2SJack Wang rnbd_clt_err(dev, 510f7a7a5c2SJack Wang "Opening failed, server responded: %d\n", 511f7a7a5c2SJack Wang errno); 512f7a7a5c2SJack Wang } else { 513f7a7a5c2SJack Wang errno = process_msg_open_rsp(dev, rsp); 514f7a7a5c2SJack Wang if (errno) { 515f7a7a5c2SJack Wang u32 device_id = le32_to_cpu(rsp->device_id); 516f7a7a5c2SJack Wang /* 517f7a7a5c2SJack Wang * If server thinks its fine, but we fail to process 518f7a7a5c2SJack Wang * then be nice and send a close to server. 519f7a7a5c2SJack Wang */ 520f7a7a5c2SJack Wang (void)send_msg_close(dev, device_id, NO_WAIT); 521f7a7a5c2SJack Wang } 522f7a7a5c2SJack Wang } 523f7a7a5c2SJack Wang kfree(rsp); 524f7a7a5c2SJack Wang wake_up_iu_comp(iu, errno); 525f7a7a5c2SJack Wang rnbd_put_iu(dev->sess, iu); 526f7a7a5c2SJack Wang rnbd_clt_put_dev(dev); 527f7a7a5c2SJack Wang } 528f7a7a5c2SJack Wang 529f7a7a5c2SJack Wang static void msg_sess_info_conf(struct work_struct *work) 530f7a7a5c2SJack Wang { 531f7a7a5c2SJack Wang struct rnbd_iu *iu = container_of(work, struct rnbd_iu, work); 532f7a7a5c2SJack Wang struct rnbd_msg_sess_info_rsp *rsp = iu->buf; 533f7a7a5c2SJack Wang struct rnbd_clt_session *sess = iu->sess; 534f7a7a5c2SJack Wang 535f7a7a5c2SJack Wang if (!iu->errno) 536f7a7a5c2SJack Wang sess->ver = min_t(u8, rsp->ver, RNBD_PROTO_VER_MAJOR); 537f7a7a5c2SJack Wang 538f7a7a5c2SJack Wang kfree(rsp); 539f7a7a5c2SJack Wang wake_up_iu_comp(iu, iu->errno); 540f7a7a5c2SJack Wang rnbd_put_iu(sess, iu); 541f7a7a5c2SJack Wang rnbd_clt_put_sess(sess); 542f7a7a5c2SJack Wang } 543f7a7a5c2SJack Wang 544f7a7a5c2SJack Wang static int send_msg_open(struct rnbd_clt_dev *dev, bool wait) 545f7a7a5c2SJack Wang { 546f7a7a5c2SJack Wang struct rnbd_clt_session *sess = dev->sess; 547f7a7a5c2SJack Wang struct rnbd_msg_open_rsp *rsp; 548f7a7a5c2SJack Wang struct rnbd_msg_open msg; 549f7a7a5c2SJack Wang struct rnbd_iu *iu; 550f7a7a5c2SJack Wang struct kvec vec = { 551f7a7a5c2SJack Wang .iov_base = &msg, 552f7a7a5c2SJack Wang .iov_len = sizeof(msg) 553f7a7a5c2SJack Wang }; 554f7a7a5c2SJack Wang int err, errno; 555f7a7a5c2SJack Wang 556f7a7a5c2SJack Wang rsp = kzalloc(sizeof(*rsp), GFP_KERNEL); 557f7a7a5c2SJack Wang if (!rsp) 558f7a7a5c2SJack Wang return -ENOMEM; 559f7a7a5c2SJack Wang 560f7a7a5c2SJack Wang iu = rnbd_get_iu(sess, RTRS_ADMIN_CON, RTRS_PERMIT_WAIT); 561f7a7a5c2SJack Wang if (!iu) { 562f7a7a5c2SJack Wang kfree(rsp); 563f7a7a5c2SJack Wang return -ENOMEM; 564f7a7a5c2SJack Wang } 565f7a7a5c2SJack Wang 566f7a7a5c2SJack Wang iu->buf = rsp; 567f7a7a5c2SJack Wang iu->dev = dev; 568f7a7a5c2SJack Wang 569*5a1328d0SGioh Kim sg_alloc_table(&iu->sgt, 1, GFP_KERNEL); 570*5a1328d0SGioh Kim sg_init_one(iu->sgt.sgl, rsp, sizeof(*rsp)); 571f7a7a5c2SJack Wang 572f7a7a5c2SJack Wang msg.hdr.type = cpu_to_le16(RNBD_MSG_OPEN); 573f7a7a5c2SJack Wang msg.access_mode = dev->access_mode; 574f7a7a5c2SJack Wang strlcpy(msg.dev_name, dev->pathname, sizeof(msg.dev_name)); 575f7a7a5c2SJack Wang 576f7a7a5c2SJack Wang WARN_ON(!rnbd_clt_get_dev(dev)); 577f7a7a5c2SJack Wang err = send_usr_msg(sess->rtrs, READ, iu, 578*5a1328d0SGioh Kim &vec, sizeof(*rsp), iu->sgt.sgl, 1, 579f7a7a5c2SJack Wang msg_open_conf, &errno, wait); 580f7a7a5c2SJack Wang if (err) { 581f7a7a5c2SJack Wang rnbd_clt_put_dev(dev); 582f7a7a5c2SJack Wang rnbd_put_iu(sess, iu); 583f7a7a5c2SJack Wang kfree(rsp); 584f7a7a5c2SJack Wang } else { 585f7a7a5c2SJack Wang err = errno; 586f7a7a5c2SJack Wang } 587f7a7a5c2SJack Wang 588*5a1328d0SGioh Kim sg_free_table(&iu->sgt); 589f7a7a5c2SJack Wang rnbd_put_iu(sess, iu); 590f7a7a5c2SJack Wang return err; 591f7a7a5c2SJack Wang } 592f7a7a5c2SJack Wang 593f7a7a5c2SJack Wang static int send_msg_sess_info(struct rnbd_clt_session *sess, bool wait) 594f7a7a5c2SJack Wang { 595f7a7a5c2SJack Wang struct rnbd_msg_sess_info_rsp *rsp; 596f7a7a5c2SJack Wang struct rnbd_msg_sess_info msg; 597f7a7a5c2SJack Wang struct rnbd_iu *iu; 598f7a7a5c2SJack Wang struct kvec vec = { 599f7a7a5c2SJack Wang .iov_base = &msg, 600f7a7a5c2SJack Wang .iov_len = sizeof(msg) 601f7a7a5c2SJack Wang }; 602f7a7a5c2SJack Wang int err, errno; 603f7a7a5c2SJack Wang 604f7a7a5c2SJack Wang rsp = kzalloc(sizeof(*rsp), GFP_KERNEL); 605f7a7a5c2SJack Wang if (!rsp) 606f7a7a5c2SJack Wang return -ENOMEM; 607f7a7a5c2SJack Wang 608f7a7a5c2SJack Wang iu = rnbd_get_iu(sess, RTRS_ADMIN_CON, RTRS_PERMIT_WAIT); 609f7a7a5c2SJack Wang if (!iu) { 610f7a7a5c2SJack Wang kfree(rsp); 611f7a7a5c2SJack Wang return -ENOMEM; 612f7a7a5c2SJack Wang } 613f7a7a5c2SJack Wang 614f7a7a5c2SJack Wang iu->buf = rsp; 615f7a7a5c2SJack Wang iu->sess = sess; 616f7a7a5c2SJack Wang 617*5a1328d0SGioh Kim sg_alloc_table(&iu->sgt, 1, GFP_KERNEL); 618*5a1328d0SGioh Kim sg_init_one(iu->sgt.sgl, rsp, sizeof(*rsp)); 619f7a7a5c2SJack Wang 620f7a7a5c2SJack Wang msg.hdr.type = cpu_to_le16(RNBD_MSG_SESS_INFO); 621f7a7a5c2SJack Wang msg.ver = RNBD_PROTO_VER_MAJOR; 622f7a7a5c2SJack Wang 623f7a7a5c2SJack Wang if (!rnbd_clt_get_sess(sess)) { 624f7a7a5c2SJack Wang /* 625f7a7a5c2SJack Wang * That can happen only in one case, when RTRS has restablished 626f7a7a5c2SJack Wang * the connection and link_ev() is called, but session is almost 627f7a7a5c2SJack Wang * dead, last reference on session is put and caller is waiting 628f7a7a5c2SJack Wang * for RTRS to close everything. 629f7a7a5c2SJack Wang */ 630f7a7a5c2SJack Wang err = -ENODEV; 631f7a7a5c2SJack Wang goto put_iu; 632f7a7a5c2SJack Wang } 633f7a7a5c2SJack Wang err = send_usr_msg(sess->rtrs, READ, iu, 634*5a1328d0SGioh Kim &vec, sizeof(*rsp), iu->sgt.sgl, 1, 635f7a7a5c2SJack Wang msg_sess_info_conf, &errno, wait); 636f7a7a5c2SJack Wang if (err) { 637f7a7a5c2SJack Wang rnbd_clt_put_sess(sess); 638f7a7a5c2SJack Wang put_iu: 639f7a7a5c2SJack Wang rnbd_put_iu(sess, iu); 640f7a7a5c2SJack Wang kfree(rsp); 641f7a7a5c2SJack Wang } else { 642f7a7a5c2SJack Wang err = errno; 643f7a7a5c2SJack Wang } 644*5a1328d0SGioh Kim sg_free_table(&iu->sgt); 645f7a7a5c2SJack Wang rnbd_put_iu(sess, iu); 646f7a7a5c2SJack Wang return err; 647f7a7a5c2SJack Wang } 648f7a7a5c2SJack Wang 649f7a7a5c2SJack Wang static void set_dev_states_to_disconnected(struct rnbd_clt_session *sess) 650f7a7a5c2SJack Wang { 651f7a7a5c2SJack Wang struct rnbd_clt_dev *dev; 652f7a7a5c2SJack Wang 653f7a7a5c2SJack Wang mutex_lock(&sess->lock); 654f7a7a5c2SJack Wang list_for_each_entry(dev, &sess->devs_list, list) { 655f7a7a5c2SJack Wang rnbd_clt_err(dev, "Device disconnected.\n"); 656f7a7a5c2SJack Wang 657f7a7a5c2SJack Wang mutex_lock(&dev->lock); 658f7a7a5c2SJack Wang if (dev->dev_state == DEV_STATE_MAPPED) 659f7a7a5c2SJack Wang dev->dev_state = DEV_STATE_MAPPED_DISCONNECTED; 660f7a7a5c2SJack Wang mutex_unlock(&dev->lock); 661f7a7a5c2SJack Wang } 662f7a7a5c2SJack Wang mutex_unlock(&sess->lock); 663f7a7a5c2SJack Wang } 664f7a7a5c2SJack Wang 665f7a7a5c2SJack Wang static void remap_devs(struct rnbd_clt_session *sess) 666f7a7a5c2SJack Wang { 667f7a7a5c2SJack Wang struct rnbd_clt_dev *dev; 668f7a7a5c2SJack Wang struct rtrs_attrs attrs; 669f7a7a5c2SJack Wang int err; 670f7a7a5c2SJack Wang 671f7a7a5c2SJack Wang /* 672f7a7a5c2SJack Wang * Careful here: we are called from RTRS link event directly, 673f7a7a5c2SJack Wang * thus we can't send any RTRS request and wait for response 674f7a7a5c2SJack Wang * or RTRS will not be able to complete request with failure 675f7a7a5c2SJack Wang * if something goes wrong (failing of outstanding requests 676f7a7a5c2SJack Wang * happens exactly from the context where we are blocking now). 677f7a7a5c2SJack Wang * 678f7a7a5c2SJack Wang * So to avoid deadlocks each usr message sent from here must 679f7a7a5c2SJack Wang * be asynchronous. 680f7a7a5c2SJack Wang */ 681f7a7a5c2SJack Wang 682f7a7a5c2SJack Wang err = send_msg_sess_info(sess, NO_WAIT); 683f7a7a5c2SJack Wang if (err) { 684f7a7a5c2SJack Wang pr_err("send_msg_sess_info(\"%s\"): %d\n", sess->sessname, err); 685f7a7a5c2SJack Wang return; 686f7a7a5c2SJack Wang } 687f7a7a5c2SJack Wang 688f7a7a5c2SJack Wang rtrs_clt_query(sess->rtrs, &attrs); 689f7a7a5c2SJack Wang mutex_lock(&sess->lock); 690f7a7a5c2SJack Wang sess->max_io_size = attrs.max_io_size; 691f7a7a5c2SJack Wang 692f7a7a5c2SJack Wang list_for_each_entry(dev, &sess->devs_list, list) { 693f7a7a5c2SJack Wang bool skip; 694f7a7a5c2SJack Wang 695f7a7a5c2SJack Wang mutex_lock(&dev->lock); 696f7a7a5c2SJack Wang skip = (dev->dev_state == DEV_STATE_INIT); 697f7a7a5c2SJack Wang mutex_unlock(&dev->lock); 698f7a7a5c2SJack Wang if (skip) 699f7a7a5c2SJack Wang /* 700f7a7a5c2SJack Wang * When device is establishing connection for the first 701f7a7a5c2SJack Wang * time - do not remap, it will be closed soon. 702f7a7a5c2SJack Wang */ 703f7a7a5c2SJack Wang continue; 704f7a7a5c2SJack Wang 705f7a7a5c2SJack Wang rnbd_clt_info(dev, "session reconnected, remapping device\n"); 706f7a7a5c2SJack Wang err = send_msg_open(dev, NO_WAIT); 707f7a7a5c2SJack Wang if (err) { 708f7a7a5c2SJack Wang rnbd_clt_err(dev, "send_msg_open(): %d\n", err); 709f7a7a5c2SJack Wang break; 710f7a7a5c2SJack Wang } 711f7a7a5c2SJack Wang } 712f7a7a5c2SJack Wang mutex_unlock(&sess->lock); 713f7a7a5c2SJack Wang } 714f7a7a5c2SJack Wang 715f7a7a5c2SJack Wang static void rnbd_clt_link_ev(void *priv, enum rtrs_clt_link_ev ev) 716f7a7a5c2SJack Wang { 717f7a7a5c2SJack Wang struct rnbd_clt_session *sess = priv; 718f7a7a5c2SJack Wang 719f7a7a5c2SJack Wang switch (ev) { 720f7a7a5c2SJack Wang case RTRS_CLT_LINK_EV_DISCONNECTED: 721f7a7a5c2SJack Wang set_dev_states_to_disconnected(sess); 722f7a7a5c2SJack Wang break; 723f7a7a5c2SJack Wang case RTRS_CLT_LINK_EV_RECONNECTED: 724f7a7a5c2SJack Wang remap_devs(sess); 725f7a7a5c2SJack Wang break; 726f7a7a5c2SJack Wang default: 727f7a7a5c2SJack Wang pr_err("Unknown session event received (%d), session: %s\n", 728f7a7a5c2SJack Wang ev, sess->sessname); 729f7a7a5c2SJack Wang } 730f7a7a5c2SJack Wang } 731f7a7a5c2SJack Wang 732f7a7a5c2SJack Wang static void rnbd_init_cpu_qlists(struct rnbd_cpu_qlist __percpu *cpu_queues) 733f7a7a5c2SJack Wang { 734f7a7a5c2SJack Wang unsigned int cpu; 735f7a7a5c2SJack Wang struct rnbd_cpu_qlist *cpu_q; 736f7a7a5c2SJack Wang 737f7a7a5c2SJack Wang for_each_possible_cpu(cpu) { 738f7a7a5c2SJack Wang cpu_q = per_cpu_ptr(cpu_queues, cpu); 739f7a7a5c2SJack Wang 740f7a7a5c2SJack Wang cpu_q->cpu = cpu; 741f7a7a5c2SJack Wang INIT_LIST_HEAD(&cpu_q->requeue_list); 742f7a7a5c2SJack Wang spin_lock_init(&cpu_q->requeue_lock); 743f7a7a5c2SJack Wang } 744f7a7a5c2SJack Wang } 745f7a7a5c2SJack Wang 746f7a7a5c2SJack Wang static void destroy_mq_tags(struct rnbd_clt_session *sess) 747f7a7a5c2SJack Wang { 748f7a7a5c2SJack Wang if (sess->tag_set.tags) 749f7a7a5c2SJack Wang blk_mq_free_tag_set(&sess->tag_set); 750f7a7a5c2SJack Wang } 751f7a7a5c2SJack Wang 752f7a7a5c2SJack Wang static inline void wake_up_rtrs_waiters(struct rnbd_clt_session *sess) 753f7a7a5c2SJack Wang { 754f7a7a5c2SJack Wang sess->rtrs_ready = true; 755f7a7a5c2SJack Wang wake_up_all(&sess->rtrs_waitq); 756f7a7a5c2SJack Wang } 757f7a7a5c2SJack Wang 758f7a7a5c2SJack Wang static void close_rtrs(struct rnbd_clt_session *sess) 759f7a7a5c2SJack Wang { 760f7a7a5c2SJack Wang might_sleep(); 761f7a7a5c2SJack Wang 762f7a7a5c2SJack Wang if (!IS_ERR_OR_NULL(sess->rtrs)) { 763f7a7a5c2SJack Wang rtrs_clt_close(sess->rtrs); 764f7a7a5c2SJack Wang sess->rtrs = NULL; 765f7a7a5c2SJack Wang wake_up_rtrs_waiters(sess); 766f7a7a5c2SJack Wang } 767f7a7a5c2SJack Wang } 768f7a7a5c2SJack Wang 769f7a7a5c2SJack Wang static void free_sess(struct rnbd_clt_session *sess) 770f7a7a5c2SJack Wang { 771f7a7a5c2SJack Wang WARN_ON(!list_empty(&sess->devs_list)); 772f7a7a5c2SJack Wang 773f7a7a5c2SJack Wang might_sleep(); 774f7a7a5c2SJack Wang 775f7a7a5c2SJack Wang close_rtrs(sess); 776f7a7a5c2SJack Wang destroy_mq_tags(sess); 777f7a7a5c2SJack Wang if (!list_empty(&sess->list)) { 778f7a7a5c2SJack Wang mutex_lock(&sess_lock); 779f7a7a5c2SJack Wang list_del(&sess->list); 780f7a7a5c2SJack Wang mutex_unlock(&sess_lock); 781f7a7a5c2SJack Wang } 782f7a7a5c2SJack Wang free_percpu(sess->cpu_queues); 783f7a7a5c2SJack Wang free_percpu(sess->cpu_rr); 784f7a7a5c2SJack Wang mutex_destroy(&sess->lock); 785f7a7a5c2SJack Wang kfree(sess); 786f7a7a5c2SJack Wang } 787f7a7a5c2SJack Wang 788f7a7a5c2SJack Wang static struct rnbd_clt_session *alloc_sess(const char *sessname) 789f7a7a5c2SJack Wang { 790f7a7a5c2SJack Wang struct rnbd_clt_session *sess; 791f7a7a5c2SJack Wang int err, cpu; 792f7a7a5c2SJack Wang 793f7a7a5c2SJack Wang sess = kzalloc_node(sizeof(*sess), GFP_KERNEL, NUMA_NO_NODE); 794f7a7a5c2SJack Wang if (!sess) 795f7a7a5c2SJack Wang return ERR_PTR(-ENOMEM); 796f7a7a5c2SJack Wang strlcpy(sess->sessname, sessname, sizeof(sess->sessname)); 797f7a7a5c2SJack Wang atomic_set(&sess->busy, 0); 798f7a7a5c2SJack Wang mutex_init(&sess->lock); 799f7a7a5c2SJack Wang INIT_LIST_HEAD(&sess->devs_list); 800f7a7a5c2SJack Wang INIT_LIST_HEAD(&sess->list); 801f7a7a5c2SJack Wang bitmap_zero(sess->cpu_queues_bm, NR_CPUS); 802f7a7a5c2SJack Wang init_waitqueue_head(&sess->rtrs_waitq); 803f7a7a5c2SJack Wang refcount_set(&sess->refcount, 1); 804f7a7a5c2SJack Wang 805f7a7a5c2SJack Wang sess->cpu_queues = alloc_percpu(struct rnbd_cpu_qlist); 806f7a7a5c2SJack Wang if (!sess->cpu_queues) { 807f7a7a5c2SJack Wang err = -ENOMEM; 808f7a7a5c2SJack Wang goto err; 809f7a7a5c2SJack Wang } 810f7a7a5c2SJack Wang rnbd_init_cpu_qlists(sess->cpu_queues); 811f7a7a5c2SJack Wang 812f7a7a5c2SJack Wang /* 8133877ece0SJack Wang * That is simple percpu variable which stores cpu indices, which are 814f7a7a5c2SJack Wang * incremented on each access. We need that for the sake of fairness 815f7a7a5c2SJack Wang * to wake up queues in a round-robin manner. 816f7a7a5c2SJack Wang */ 817f7a7a5c2SJack Wang sess->cpu_rr = alloc_percpu(int); 818f7a7a5c2SJack Wang if (!sess->cpu_rr) { 819f7a7a5c2SJack Wang err = -ENOMEM; 820f7a7a5c2SJack Wang goto err; 821f7a7a5c2SJack Wang } 822f7a7a5c2SJack Wang for_each_possible_cpu(cpu) 823f7a7a5c2SJack Wang * per_cpu_ptr(sess->cpu_rr, cpu) = cpu; 824f7a7a5c2SJack Wang 825f7a7a5c2SJack Wang return sess; 826f7a7a5c2SJack Wang 827f7a7a5c2SJack Wang err: 828f7a7a5c2SJack Wang free_sess(sess); 829f7a7a5c2SJack Wang 830f7a7a5c2SJack Wang return ERR_PTR(err); 831f7a7a5c2SJack Wang } 832f7a7a5c2SJack Wang 833f7a7a5c2SJack Wang static int wait_for_rtrs_connection(struct rnbd_clt_session *sess) 834f7a7a5c2SJack Wang { 835f7a7a5c2SJack Wang wait_event(sess->rtrs_waitq, sess->rtrs_ready); 836f7a7a5c2SJack Wang if (IS_ERR_OR_NULL(sess->rtrs)) 837f7a7a5c2SJack Wang return -ECONNRESET; 838f7a7a5c2SJack Wang 839f7a7a5c2SJack Wang return 0; 840f7a7a5c2SJack Wang } 841f7a7a5c2SJack Wang 842f7a7a5c2SJack Wang static void wait_for_rtrs_disconnection(struct rnbd_clt_session *sess) 843f7a7a5c2SJack Wang __releases(&sess_lock) 844f7a7a5c2SJack Wang __acquires(&sess_lock) 845f7a7a5c2SJack Wang { 846f7a7a5c2SJack Wang DEFINE_WAIT(wait); 847f7a7a5c2SJack Wang 848f7a7a5c2SJack Wang prepare_to_wait(&sess->rtrs_waitq, &wait, TASK_UNINTERRUPTIBLE); 849f7a7a5c2SJack Wang if (IS_ERR_OR_NULL(sess->rtrs)) { 850f7a7a5c2SJack Wang finish_wait(&sess->rtrs_waitq, &wait); 851f7a7a5c2SJack Wang return; 852f7a7a5c2SJack Wang } 853f7a7a5c2SJack Wang mutex_unlock(&sess_lock); 854f7a7a5c2SJack Wang /* loop in caller, see __find_and_get_sess(). 855f7a7a5c2SJack Wang * You can't leave mutex locked and call schedule(), you will catch a 856f7a7a5c2SJack Wang * deadlock with a caller of free_sess(), which has just put the last 857f7a7a5c2SJack Wang * reference and is about to take the sess_lock in order to delete 858f7a7a5c2SJack Wang * the session from the list. 859f7a7a5c2SJack Wang */ 860f7a7a5c2SJack Wang schedule(); 861f7a7a5c2SJack Wang mutex_lock(&sess_lock); 862f7a7a5c2SJack Wang } 863f7a7a5c2SJack Wang 864f7a7a5c2SJack Wang static struct rnbd_clt_session *__find_and_get_sess(const char *sessname) 865f7a7a5c2SJack Wang __releases(&sess_lock) 866f7a7a5c2SJack Wang __acquires(&sess_lock) 867f7a7a5c2SJack Wang { 868f7a7a5c2SJack Wang struct rnbd_clt_session *sess, *sn; 869f7a7a5c2SJack Wang int err; 870f7a7a5c2SJack Wang 871f7a7a5c2SJack Wang again: 872f7a7a5c2SJack Wang list_for_each_entry_safe(sess, sn, &sess_list, list) { 873f7a7a5c2SJack Wang if (strcmp(sessname, sess->sessname)) 874f7a7a5c2SJack Wang continue; 875f7a7a5c2SJack Wang 876f7a7a5c2SJack Wang if (sess->rtrs_ready && IS_ERR_OR_NULL(sess->rtrs)) 877f7a7a5c2SJack Wang /* 878f7a7a5c2SJack Wang * No RTRS connection, session is dying. 879f7a7a5c2SJack Wang */ 880f7a7a5c2SJack Wang continue; 881f7a7a5c2SJack Wang 882f7a7a5c2SJack Wang if (rnbd_clt_get_sess(sess)) { 883f7a7a5c2SJack Wang /* 884f7a7a5c2SJack Wang * Alive session is found, wait for RTRS connection. 885f7a7a5c2SJack Wang */ 886f7a7a5c2SJack Wang mutex_unlock(&sess_lock); 887f7a7a5c2SJack Wang err = wait_for_rtrs_connection(sess); 888f7a7a5c2SJack Wang if (err) 889f7a7a5c2SJack Wang rnbd_clt_put_sess(sess); 890f7a7a5c2SJack Wang mutex_lock(&sess_lock); 891f7a7a5c2SJack Wang 892f7a7a5c2SJack Wang if (err) 893f7a7a5c2SJack Wang /* Session is dying, repeat the loop */ 894f7a7a5c2SJack Wang goto again; 895f7a7a5c2SJack Wang 896f7a7a5c2SJack Wang return sess; 897f7a7a5c2SJack Wang } 898f7a7a5c2SJack Wang /* 899f7a7a5c2SJack Wang * Ref is 0, session is dying, wait for RTRS disconnect 900f7a7a5c2SJack Wang * in order to avoid session names clashes. 901f7a7a5c2SJack Wang */ 902f7a7a5c2SJack Wang wait_for_rtrs_disconnection(sess); 903f7a7a5c2SJack Wang /* 904f7a7a5c2SJack Wang * RTRS is disconnected and soon session will be freed, 905f7a7a5c2SJack Wang * so repeat a loop. 906f7a7a5c2SJack Wang */ 907f7a7a5c2SJack Wang goto again; 908f7a7a5c2SJack Wang } 909f7a7a5c2SJack Wang 910f7a7a5c2SJack Wang return NULL; 911f7a7a5c2SJack Wang } 912f7a7a5c2SJack Wang 913f7a7a5c2SJack Wang static struct 914f7a7a5c2SJack Wang rnbd_clt_session *find_or_create_sess(const char *sessname, bool *first) 915f7a7a5c2SJack Wang { 916f7a7a5c2SJack Wang struct rnbd_clt_session *sess = NULL; 917f7a7a5c2SJack Wang 918f7a7a5c2SJack Wang mutex_lock(&sess_lock); 919f7a7a5c2SJack Wang sess = __find_and_get_sess(sessname); 920f7a7a5c2SJack Wang if (!sess) { 921f7a7a5c2SJack Wang sess = alloc_sess(sessname); 92247393fb5SDan Carpenter if (IS_ERR(sess)) { 92347393fb5SDan Carpenter mutex_unlock(&sess_lock); 92447393fb5SDan Carpenter return sess; 92547393fb5SDan Carpenter } 926f7a7a5c2SJack Wang list_add(&sess->list, &sess_list); 927f7a7a5c2SJack Wang *first = true; 928f7a7a5c2SJack Wang } else 929f7a7a5c2SJack Wang *first = false; 930f7a7a5c2SJack Wang mutex_unlock(&sess_lock); 931f7a7a5c2SJack Wang 932f7a7a5c2SJack Wang return sess; 933f7a7a5c2SJack Wang } 934f7a7a5c2SJack Wang 935f7a7a5c2SJack Wang static int rnbd_client_open(struct block_device *block_device, fmode_t mode) 936f7a7a5c2SJack Wang { 937f7a7a5c2SJack Wang struct rnbd_clt_dev *dev = block_device->bd_disk->private_data; 938f7a7a5c2SJack Wang 939f7a7a5c2SJack Wang if (dev->read_only && (mode & FMODE_WRITE)) 940f7a7a5c2SJack Wang return -EPERM; 941f7a7a5c2SJack Wang 942f7a7a5c2SJack Wang if (dev->dev_state == DEV_STATE_UNMAPPED || 943f7a7a5c2SJack Wang !rnbd_clt_get_dev(dev)) 944f7a7a5c2SJack Wang return -EIO; 945f7a7a5c2SJack Wang 946f7a7a5c2SJack Wang return 0; 947f7a7a5c2SJack Wang } 948f7a7a5c2SJack Wang 949f7a7a5c2SJack Wang static void rnbd_client_release(struct gendisk *gen, fmode_t mode) 950f7a7a5c2SJack Wang { 951f7a7a5c2SJack Wang struct rnbd_clt_dev *dev = gen->private_data; 952f7a7a5c2SJack Wang 953f7a7a5c2SJack Wang rnbd_clt_put_dev(dev); 954f7a7a5c2SJack Wang } 955f7a7a5c2SJack Wang 956f7a7a5c2SJack Wang static int rnbd_client_getgeo(struct block_device *block_device, 957f7a7a5c2SJack Wang struct hd_geometry *geo) 958f7a7a5c2SJack Wang { 959f7a7a5c2SJack Wang u64 size; 960f7a7a5c2SJack Wang struct rnbd_clt_dev *dev; 961f7a7a5c2SJack Wang 962f7a7a5c2SJack Wang dev = block_device->bd_disk->private_data; 963f7a7a5c2SJack Wang size = dev->size * (dev->logical_block_size / SECTOR_SIZE); 964f7a7a5c2SJack Wang geo->cylinders = size >> 6; /* size/64 */ 965f7a7a5c2SJack Wang geo->heads = 4; 966f7a7a5c2SJack Wang geo->sectors = 16; 967f7a7a5c2SJack Wang geo->start = 0; 968f7a7a5c2SJack Wang 969f7a7a5c2SJack Wang return 0; 970f7a7a5c2SJack Wang } 971f7a7a5c2SJack Wang 972f7a7a5c2SJack Wang static const struct block_device_operations rnbd_client_ops = { 973f7a7a5c2SJack Wang .owner = THIS_MODULE, 974f7a7a5c2SJack Wang .open = rnbd_client_open, 975f7a7a5c2SJack Wang .release = rnbd_client_release, 976f7a7a5c2SJack Wang .getgeo = rnbd_client_getgeo 977f7a7a5c2SJack Wang }; 978f7a7a5c2SJack Wang 979f7a7a5c2SJack Wang /* The amount of data that belongs to an I/O and the amount of data that 980f7a7a5c2SJack Wang * should be read or written to the disk (bi_size) can differ. 981f7a7a5c2SJack Wang * 982f7a7a5c2SJack Wang * E.g. When WRITE_SAME is used, only a small amount of data is 983f7a7a5c2SJack Wang * transferred that is then written repeatedly over a lot of sectors. 984f7a7a5c2SJack Wang * 985f7a7a5c2SJack Wang * Get the size of data to be transferred via RTRS by summing up the size 986f7a7a5c2SJack Wang * of the scather-gather list entries. 987f7a7a5c2SJack Wang */ 988f7a7a5c2SJack Wang static size_t rnbd_clt_get_sg_size(struct scatterlist *sglist, u32 len) 989f7a7a5c2SJack Wang { 990f7a7a5c2SJack Wang struct scatterlist *sg; 991f7a7a5c2SJack Wang size_t tsize = 0; 992f7a7a5c2SJack Wang int i; 993f7a7a5c2SJack Wang 994f7a7a5c2SJack Wang for_each_sg(sglist, sg, len, i) 995f7a7a5c2SJack Wang tsize += sg->length; 996f7a7a5c2SJack Wang return tsize; 997f7a7a5c2SJack Wang } 998f7a7a5c2SJack Wang 999f7a7a5c2SJack Wang static int rnbd_client_xfer_request(struct rnbd_clt_dev *dev, 1000f7a7a5c2SJack Wang struct request *rq, 1001f7a7a5c2SJack Wang struct rnbd_iu *iu) 1002f7a7a5c2SJack Wang { 1003f7a7a5c2SJack Wang struct rtrs_clt *rtrs = dev->sess->rtrs; 1004f7a7a5c2SJack Wang struct rtrs_permit *permit = iu->permit; 1005f7a7a5c2SJack Wang struct rnbd_msg_io msg; 1006f7a7a5c2SJack Wang struct rtrs_clt_req_ops req_ops; 1007f7a7a5c2SJack Wang unsigned int sg_cnt = 0; 1008f7a7a5c2SJack Wang struct kvec vec; 1009f7a7a5c2SJack Wang size_t size; 1010f7a7a5c2SJack Wang int err; 1011f7a7a5c2SJack Wang 1012f7a7a5c2SJack Wang iu->rq = rq; 1013f7a7a5c2SJack Wang iu->dev = dev; 1014f7a7a5c2SJack Wang msg.sector = cpu_to_le64(blk_rq_pos(rq)); 1015f7a7a5c2SJack Wang msg.bi_size = cpu_to_le32(blk_rq_bytes(rq)); 1016f7a7a5c2SJack Wang msg.rw = cpu_to_le32(rq_to_rnbd_flags(rq)); 1017f7a7a5c2SJack Wang msg.prio = cpu_to_le16(req_get_ioprio(rq)); 1018f7a7a5c2SJack Wang 1019f7a7a5c2SJack Wang /* 1020f7a7a5c2SJack Wang * We only support discards with single segment for now. 1021f7a7a5c2SJack Wang * See queue limits. 1022f7a7a5c2SJack Wang */ 1023f7a7a5c2SJack Wang if (req_op(rq) != REQ_OP_DISCARD) 1024*5a1328d0SGioh Kim sg_cnt = blk_rq_map_sg(dev->queue, rq, iu->sgt.sgl); 1025f7a7a5c2SJack Wang 1026f7a7a5c2SJack Wang if (sg_cnt == 0) 1027*5a1328d0SGioh Kim sg_mark_end(&iu->sgt.sgl[0]); 1028f7a7a5c2SJack Wang 1029f7a7a5c2SJack Wang msg.hdr.type = cpu_to_le16(RNBD_MSG_IO); 1030f7a7a5c2SJack Wang msg.device_id = cpu_to_le32(dev->device_id); 1031f7a7a5c2SJack Wang 1032f7a7a5c2SJack Wang vec = (struct kvec) { 1033f7a7a5c2SJack Wang .iov_base = &msg, 1034f7a7a5c2SJack Wang .iov_len = sizeof(msg) 1035f7a7a5c2SJack Wang }; 1036*5a1328d0SGioh Kim size = rnbd_clt_get_sg_size(iu->sgt.sgl, sg_cnt); 1037f7a7a5c2SJack Wang req_ops = (struct rtrs_clt_req_ops) { 1038f7a7a5c2SJack Wang .priv = iu, 1039f7a7a5c2SJack Wang .conf_fn = msg_io_conf, 1040f7a7a5c2SJack Wang }; 1041f7a7a5c2SJack Wang err = rtrs_clt_request(rq_data_dir(rq), &req_ops, rtrs, permit, 1042*5a1328d0SGioh Kim &vec, 1, size, iu->sgt.sgl, sg_cnt); 1043f7a7a5c2SJack Wang if (unlikely(err)) { 1044f7a7a5c2SJack Wang rnbd_clt_err_rl(dev, "RTRS failed to transfer IO, err: %d\n", 1045f7a7a5c2SJack Wang err); 1046f7a7a5c2SJack Wang return err; 1047f7a7a5c2SJack Wang } 1048f7a7a5c2SJack Wang 1049f7a7a5c2SJack Wang return 0; 1050f7a7a5c2SJack Wang } 1051f7a7a5c2SJack Wang 1052f7a7a5c2SJack Wang /** 1053f7a7a5c2SJack Wang * rnbd_clt_dev_add_to_requeue() - add device to requeue if session is busy 1054f7a7a5c2SJack Wang * @dev: Device to be checked 1055f7a7a5c2SJack Wang * @q: Queue to be added to the requeue list if required 1056f7a7a5c2SJack Wang * 1057f7a7a5c2SJack Wang * Description: 1058f7a7a5c2SJack Wang * If session is busy, that means someone will requeue us when resources 1059f7a7a5c2SJack Wang * are freed. If session is not doing anything - device is not added to 1060f7a7a5c2SJack Wang * the list and @false is returned. 1061f7a7a5c2SJack Wang */ 1062f7a7a5c2SJack Wang static bool rnbd_clt_dev_add_to_requeue(struct rnbd_clt_dev *dev, 1063f7a7a5c2SJack Wang struct rnbd_queue *q) 1064f7a7a5c2SJack Wang { 1065f7a7a5c2SJack Wang struct rnbd_clt_session *sess = dev->sess; 1066f7a7a5c2SJack Wang struct rnbd_cpu_qlist *cpu_q; 1067f7a7a5c2SJack Wang unsigned long flags; 1068f7a7a5c2SJack Wang bool added = true; 1069f7a7a5c2SJack Wang bool need_set; 1070f7a7a5c2SJack Wang 1071f7a7a5c2SJack Wang cpu_q = get_cpu_ptr(sess->cpu_queues); 1072f7a7a5c2SJack Wang spin_lock_irqsave(&cpu_q->requeue_lock, flags); 1073f7a7a5c2SJack Wang 1074f7a7a5c2SJack Wang if (likely(!test_and_set_bit_lock(0, &q->in_list))) { 1075f7a7a5c2SJack Wang if (WARN_ON(!list_empty(&q->requeue_list))) 1076f7a7a5c2SJack Wang goto unlock; 1077f7a7a5c2SJack Wang 1078f7a7a5c2SJack Wang need_set = !test_bit(cpu_q->cpu, sess->cpu_queues_bm); 1079f7a7a5c2SJack Wang if (need_set) { 1080f7a7a5c2SJack Wang set_bit(cpu_q->cpu, sess->cpu_queues_bm); 1081f7a7a5c2SJack Wang /* Paired with rnbd_put_permit(). Set a bit first 1082f7a7a5c2SJack Wang * and then observe the busy counter. 1083f7a7a5c2SJack Wang */ 1084f7a7a5c2SJack Wang smp_mb__before_atomic(); 1085f7a7a5c2SJack Wang } 1086f7a7a5c2SJack Wang if (likely(atomic_read(&sess->busy))) { 1087f7a7a5c2SJack Wang list_add_tail(&q->requeue_list, &cpu_q->requeue_list); 1088f7a7a5c2SJack Wang } else { 1089f7a7a5c2SJack Wang /* Very unlikely, but possible: busy counter was 1090f7a7a5c2SJack Wang * observed as zero. Drop all bits and return 1091f7a7a5c2SJack Wang * false to restart the queue by ourselves. 1092f7a7a5c2SJack Wang */ 1093f7a7a5c2SJack Wang if (need_set) 1094f7a7a5c2SJack Wang clear_bit(cpu_q->cpu, sess->cpu_queues_bm); 1095f7a7a5c2SJack Wang clear_bit_unlock(0, &q->in_list); 1096f7a7a5c2SJack Wang added = false; 1097f7a7a5c2SJack Wang } 1098f7a7a5c2SJack Wang } 1099f7a7a5c2SJack Wang unlock: 1100f7a7a5c2SJack Wang spin_unlock_irqrestore(&cpu_q->requeue_lock, flags); 1101f7a7a5c2SJack Wang put_cpu_ptr(sess->cpu_queues); 1102f7a7a5c2SJack Wang 1103f7a7a5c2SJack Wang return added; 1104f7a7a5c2SJack Wang } 1105f7a7a5c2SJack Wang 1106f7a7a5c2SJack Wang static void rnbd_clt_dev_kick_mq_queue(struct rnbd_clt_dev *dev, 1107f7a7a5c2SJack Wang struct blk_mq_hw_ctx *hctx, 1108f7a7a5c2SJack Wang int delay) 1109f7a7a5c2SJack Wang { 1110f7a7a5c2SJack Wang struct rnbd_queue *q = hctx->driver_data; 1111f7a7a5c2SJack Wang 1112f7a7a5c2SJack Wang if (delay != RNBD_DELAY_IFBUSY) 1113f7a7a5c2SJack Wang blk_mq_delay_run_hw_queue(hctx, delay); 1114f7a7a5c2SJack Wang else if (unlikely(!rnbd_clt_dev_add_to_requeue(dev, q))) 1115f7a7a5c2SJack Wang /* 1116f7a7a5c2SJack Wang * If session is not busy we have to restart 1117f7a7a5c2SJack Wang * the queue ourselves. 1118f7a7a5c2SJack Wang */ 1119f7a7a5c2SJack Wang blk_mq_delay_run_hw_queue(hctx, 10/*ms*/); 1120f7a7a5c2SJack Wang } 1121f7a7a5c2SJack Wang 1122f7a7a5c2SJack Wang static blk_status_t rnbd_queue_rq(struct blk_mq_hw_ctx *hctx, 1123f7a7a5c2SJack Wang const struct blk_mq_queue_data *bd) 1124f7a7a5c2SJack Wang { 1125f7a7a5c2SJack Wang struct request *rq = bd->rq; 1126f7a7a5c2SJack Wang struct rnbd_clt_dev *dev = rq->rq_disk->private_data; 1127f7a7a5c2SJack Wang struct rnbd_iu *iu = blk_mq_rq_to_pdu(rq); 1128f7a7a5c2SJack Wang int err; 1129*5a1328d0SGioh Kim blk_status_t ret = BLK_STS_IOERR; 1130f7a7a5c2SJack Wang 1131f7a7a5c2SJack Wang if (unlikely(dev->dev_state != DEV_STATE_MAPPED)) 1132f7a7a5c2SJack Wang return BLK_STS_IOERR; 1133f7a7a5c2SJack Wang 1134f7a7a5c2SJack Wang iu->permit = rnbd_get_permit(dev->sess, RTRS_IO_CON, 1135f7a7a5c2SJack Wang RTRS_PERMIT_NOWAIT); 1136f7a7a5c2SJack Wang if (unlikely(!iu->permit)) { 1137f7a7a5c2SJack Wang rnbd_clt_dev_kick_mq_queue(dev, hctx, RNBD_DELAY_IFBUSY); 1138f7a7a5c2SJack Wang return BLK_STS_RESOURCE; 1139f7a7a5c2SJack Wang } 1140f7a7a5c2SJack Wang 1141*5a1328d0SGioh Kim iu->sgt.sgl = iu->first_sgl; 1142*5a1328d0SGioh Kim err = sg_alloc_table_chained(&iu->sgt, 1143*5a1328d0SGioh Kim /* Even-if the request has no segment, 1144*5a1328d0SGioh Kim * sglist must have one entry at least */ 1145*5a1328d0SGioh Kim blk_rq_nr_phys_segments(rq) ? : 1, 1146*5a1328d0SGioh Kim iu->sgt.sgl, 1147*5a1328d0SGioh Kim RNBD_INLINE_SG_CNT); 1148*5a1328d0SGioh Kim if (err) { 1149*5a1328d0SGioh Kim rnbd_clt_err_rl(dev, "sg_alloc_table_chained ret=%d\n", err); 1150*5a1328d0SGioh Kim rnbd_clt_dev_kick_mq_queue(dev, hctx, 10/*ms*/); 1151*5a1328d0SGioh Kim rnbd_put_permit(dev->sess, iu->permit); 1152*5a1328d0SGioh Kim return BLK_STS_RESOURCE; 1153*5a1328d0SGioh Kim } 1154*5a1328d0SGioh Kim 1155f7a7a5c2SJack Wang blk_mq_start_request(rq); 1156f7a7a5c2SJack Wang err = rnbd_client_xfer_request(dev, rq, iu); 1157f7a7a5c2SJack Wang if (likely(err == 0)) 1158f7a7a5c2SJack Wang return BLK_STS_OK; 1159f7a7a5c2SJack Wang if (unlikely(err == -EAGAIN || err == -ENOMEM)) { 1160f7a7a5c2SJack Wang rnbd_clt_dev_kick_mq_queue(dev, hctx, 10/*ms*/); 1161*5a1328d0SGioh Kim ret = BLK_STS_RESOURCE; 1162f7a7a5c2SJack Wang } 1163*5a1328d0SGioh Kim sg_free_table_chained(&iu->sgt, RNBD_INLINE_SG_CNT); 1164f7a7a5c2SJack Wang rnbd_put_permit(dev->sess, iu->permit); 1165*5a1328d0SGioh Kim return ret; 1166f7a7a5c2SJack Wang } 1167f7a7a5c2SJack Wang 1168f7a7a5c2SJack Wang static struct blk_mq_ops rnbd_mq_ops = { 1169f7a7a5c2SJack Wang .queue_rq = rnbd_queue_rq, 1170f7a7a5c2SJack Wang .complete = rnbd_softirq_done_fn, 1171f7a7a5c2SJack Wang }; 1172f7a7a5c2SJack Wang 1173f7a7a5c2SJack Wang static int setup_mq_tags(struct rnbd_clt_session *sess) 1174f7a7a5c2SJack Wang { 1175f7a7a5c2SJack Wang struct blk_mq_tag_set *tag_set = &sess->tag_set; 1176f7a7a5c2SJack Wang 1177f7a7a5c2SJack Wang memset(tag_set, 0, sizeof(*tag_set)); 1178f7a7a5c2SJack Wang tag_set->ops = &rnbd_mq_ops; 1179f7a7a5c2SJack Wang tag_set->queue_depth = sess->queue_depth; 1180f7a7a5c2SJack Wang tag_set->numa_node = NUMA_NO_NODE; 1181f7a7a5c2SJack Wang tag_set->flags = BLK_MQ_F_SHOULD_MERGE | 118251db1c37SMing Lei BLK_MQ_F_TAG_QUEUE_SHARED; 1183*5a1328d0SGioh Kim tag_set->cmd_size = sizeof(struct rnbd_iu) + RNBD_RDMA_SGL_SIZE; 1184f7a7a5c2SJack Wang tag_set->nr_hw_queues = num_online_cpus(); 1185f7a7a5c2SJack Wang 1186f7a7a5c2SJack Wang return blk_mq_alloc_tag_set(tag_set); 1187f7a7a5c2SJack Wang } 1188f7a7a5c2SJack Wang 1189f7a7a5c2SJack Wang static struct rnbd_clt_session * 1190f7a7a5c2SJack Wang find_and_get_or_create_sess(const char *sessname, 1191f7a7a5c2SJack Wang const struct rtrs_addr *paths, 1192f7a7a5c2SJack Wang size_t path_cnt, u16 port_nr) 1193f7a7a5c2SJack Wang { 1194f7a7a5c2SJack Wang struct rnbd_clt_session *sess; 1195f7a7a5c2SJack Wang struct rtrs_attrs attrs; 1196f7a7a5c2SJack Wang int err; 1197f7a7a5c2SJack Wang bool first; 1198f7a7a5c2SJack Wang struct rtrs_clt_ops rtrs_ops; 1199f7a7a5c2SJack Wang 1200f7a7a5c2SJack Wang sess = find_or_create_sess(sessname, &first); 1201f7a7a5c2SJack Wang if (sess == ERR_PTR(-ENOMEM)) 1202f7a7a5c2SJack Wang return ERR_PTR(-ENOMEM); 1203f7a7a5c2SJack Wang else if (!first) 1204f7a7a5c2SJack Wang return sess; 1205f7a7a5c2SJack Wang 1206ce9fe18aSMd Haris Iqbal if (!path_cnt) { 1207ce9fe18aSMd Haris Iqbal pr_err("Session %s not found, and path parameter not given", sessname); 1208ce9fe18aSMd Haris Iqbal err = -ENXIO; 1209ce9fe18aSMd Haris Iqbal goto put_sess; 1210ce9fe18aSMd Haris Iqbal } 1211ce9fe18aSMd Haris Iqbal 1212f7a7a5c2SJack Wang rtrs_ops = (struct rtrs_clt_ops) { 1213f7a7a5c2SJack Wang .priv = sess, 1214f7a7a5c2SJack Wang .link_ev = rnbd_clt_link_ev, 1215f7a7a5c2SJack Wang }; 1216f7a7a5c2SJack Wang /* 1217f7a7a5c2SJack Wang * Nothing was found, establish rtrs connection and proceed further. 1218f7a7a5c2SJack Wang */ 1219f7a7a5c2SJack Wang sess->rtrs = rtrs_clt_open(&rtrs_ops, sessname, 1220f7a7a5c2SJack Wang paths, path_cnt, port_nr, 1221f7a7a5c2SJack Wang sizeof(struct rnbd_iu), 1222f7a7a5c2SJack Wang RECONNECT_DELAY, BMAX_SEGMENTS, 1223d6ea3950SDanil Kipnis BLK_MAX_SEGMENT_SIZE, 1224f7a7a5c2SJack Wang MAX_RECONNECTS); 1225f7a7a5c2SJack Wang if (IS_ERR(sess->rtrs)) { 1226f7a7a5c2SJack Wang err = PTR_ERR(sess->rtrs); 1227f7a7a5c2SJack Wang goto wake_up_and_put; 1228f7a7a5c2SJack Wang } 1229f7a7a5c2SJack Wang rtrs_clt_query(sess->rtrs, &attrs); 1230f7a7a5c2SJack Wang sess->max_io_size = attrs.max_io_size; 1231f7a7a5c2SJack Wang sess->queue_depth = attrs.queue_depth; 1232f7a7a5c2SJack Wang 1233f7a7a5c2SJack Wang err = setup_mq_tags(sess); 1234f7a7a5c2SJack Wang if (err) 1235f7a7a5c2SJack Wang goto close_rtrs; 1236f7a7a5c2SJack Wang 1237f7a7a5c2SJack Wang err = send_msg_sess_info(sess, WAIT); 1238f7a7a5c2SJack Wang if (err) 1239f7a7a5c2SJack Wang goto close_rtrs; 1240f7a7a5c2SJack Wang 1241f7a7a5c2SJack Wang wake_up_rtrs_waiters(sess); 1242f7a7a5c2SJack Wang 1243f7a7a5c2SJack Wang return sess; 1244f7a7a5c2SJack Wang 1245f7a7a5c2SJack Wang close_rtrs: 1246f7a7a5c2SJack Wang close_rtrs(sess); 1247f7a7a5c2SJack Wang put_sess: 1248f7a7a5c2SJack Wang rnbd_clt_put_sess(sess); 1249f7a7a5c2SJack Wang 1250f7a7a5c2SJack Wang return ERR_PTR(err); 1251f7a7a5c2SJack Wang 1252f7a7a5c2SJack Wang wake_up_and_put: 1253f7a7a5c2SJack Wang wake_up_rtrs_waiters(sess); 1254f7a7a5c2SJack Wang goto put_sess; 1255f7a7a5c2SJack Wang } 1256f7a7a5c2SJack Wang 1257f7a7a5c2SJack Wang static inline void rnbd_init_hw_queue(struct rnbd_clt_dev *dev, 1258f7a7a5c2SJack Wang struct rnbd_queue *q, 1259f7a7a5c2SJack Wang struct blk_mq_hw_ctx *hctx) 1260f7a7a5c2SJack Wang { 1261f7a7a5c2SJack Wang INIT_LIST_HEAD(&q->requeue_list); 1262f7a7a5c2SJack Wang q->dev = dev; 1263f7a7a5c2SJack Wang q->hctx = hctx; 1264f7a7a5c2SJack Wang } 1265f7a7a5c2SJack Wang 1266f7a7a5c2SJack Wang static void rnbd_init_mq_hw_queues(struct rnbd_clt_dev *dev) 1267f7a7a5c2SJack Wang { 1268f7a7a5c2SJack Wang int i; 1269f7a7a5c2SJack Wang struct blk_mq_hw_ctx *hctx; 1270f7a7a5c2SJack Wang struct rnbd_queue *q; 1271f7a7a5c2SJack Wang 1272f7a7a5c2SJack Wang queue_for_each_hw_ctx(dev->queue, hctx, i) { 1273f7a7a5c2SJack Wang q = &dev->hw_queues[i]; 1274f7a7a5c2SJack Wang rnbd_init_hw_queue(dev, q, hctx); 1275f7a7a5c2SJack Wang hctx->driver_data = q; 1276f7a7a5c2SJack Wang } 1277f7a7a5c2SJack Wang } 1278f7a7a5c2SJack Wang 1279f7a7a5c2SJack Wang static int setup_mq_dev(struct rnbd_clt_dev *dev) 1280f7a7a5c2SJack Wang { 1281f7a7a5c2SJack Wang dev->queue = blk_mq_init_queue(&dev->sess->tag_set); 1282f7a7a5c2SJack Wang if (IS_ERR(dev->queue)) { 1283f7a7a5c2SJack Wang rnbd_clt_err(dev, "Initializing multiqueue queue failed, err: %ld\n", 1284f7a7a5c2SJack Wang PTR_ERR(dev->queue)); 1285f7a7a5c2SJack Wang return PTR_ERR(dev->queue); 1286f7a7a5c2SJack Wang } 1287f7a7a5c2SJack Wang rnbd_init_mq_hw_queues(dev); 1288f7a7a5c2SJack Wang return 0; 1289f7a7a5c2SJack Wang } 1290f7a7a5c2SJack Wang 1291f7a7a5c2SJack Wang static void setup_request_queue(struct rnbd_clt_dev *dev) 1292f7a7a5c2SJack Wang { 1293f7a7a5c2SJack Wang blk_queue_logical_block_size(dev->queue, dev->logical_block_size); 1294f7a7a5c2SJack Wang blk_queue_physical_block_size(dev->queue, dev->physical_block_size); 1295f7a7a5c2SJack Wang blk_queue_max_hw_sectors(dev->queue, dev->max_hw_sectors); 1296f7a7a5c2SJack Wang blk_queue_max_write_same_sectors(dev->queue, 1297f7a7a5c2SJack Wang dev->max_write_same_sectors); 1298f7a7a5c2SJack Wang 1299f7a7a5c2SJack Wang /* 1300f7a7a5c2SJack Wang * we don't support discards to "discontiguous" segments 1301f7a7a5c2SJack Wang * in on request 1302f7a7a5c2SJack Wang */ 1303f7a7a5c2SJack Wang blk_queue_max_discard_segments(dev->queue, 1); 1304f7a7a5c2SJack Wang 1305f7a7a5c2SJack Wang blk_queue_max_discard_sectors(dev->queue, dev->max_discard_sectors); 1306f7a7a5c2SJack Wang dev->queue->limits.discard_granularity = dev->discard_granularity; 1307f7a7a5c2SJack Wang dev->queue->limits.discard_alignment = dev->discard_alignment; 1308f7a7a5c2SJack Wang if (dev->max_discard_sectors) 1309f7a7a5c2SJack Wang blk_queue_flag_set(QUEUE_FLAG_DISCARD, dev->queue); 1310f7a7a5c2SJack Wang if (dev->secure_discard) 1311f7a7a5c2SJack Wang blk_queue_flag_set(QUEUE_FLAG_SECERASE, dev->queue); 1312f7a7a5c2SJack Wang 1313f7a7a5c2SJack Wang blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, dev->queue); 1314f7a7a5c2SJack Wang blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, dev->queue); 1315f7a7a5c2SJack Wang blk_queue_max_segments(dev->queue, dev->max_segments); 1316f7a7a5c2SJack Wang blk_queue_io_opt(dev->queue, dev->sess->max_io_size); 1317f7a7a5c2SJack Wang blk_queue_virt_boundary(dev->queue, SZ_4K - 1); 1318512c781fSGioh Kim blk_queue_write_cache(dev->queue, dev->wc, dev->fua); 1319f7a7a5c2SJack Wang dev->queue->queuedata = dev; 1320f7a7a5c2SJack Wang } 1321f7a7a5c2SJack Wang 1322f7a7a5c2SJack Wang static void rnbd_clt_setup_gen_disk(struct rnbd_clt_dev *dev, int idx) 1323f7a7a5c2SJack Wang { 1324f7a7a5c2SJack Wang dev->gd->major = rnbd_client_major; 1325f7a7a5c2SJack Wang dev->gd->first_minor = idx << RNBD_PART_BITS; 1326f7a7a5c2SJack Wang dev->gd->fops = &rnbd_client_ops; 1327f7a7a5c2SJack Wang dev->gd->queue = dev->queue; 1328f7a7a5c2SJack Wang dev->gd->private_data = dev; 1329f7a7a5c2SJack Wang snprintf(dev->gd->disk_name, sizeof(dev->gd->disk_name), "rnbd%d", 1330f7a7a5c2SJack Wang idx); 1331f7a7a5c2SJack Wang pr_debug("disk_name=%s, capacity=%zu\n", 1332f7a7a5c2SJack Wang dev->gd->disk_name, 1333f7a7a5c2SJack Wang dev->nsectors * (dev->logical_block_size / SECTOR_SIZE) 1334f7a7a5c2SJack Wang ); 1335f7a7a5c2SJack Wang 1336f7a7a5c2SJack Wang set_capacity(dev->gd, dev->nsectors); 1337f7a7a5c2SJack Wang 1338f7a7a5c2SJack Wang if (dev->access_mode == RNBD_ACCESS_RO) { 1339f7a7a5c2SJack Wang dev->read_only = true; 1340f7a7a5c2SJack Wang set_disk_ro(dev->gd, true); 1341f7a7a5c2SJack Wang } else { 1342f7a7a5c2SJack Wang dev->read_only = false; 1343f7a7a5c2SJack Wang } 1344f7a7a5c2SJack Wang 1345f7a7a5c2SJack Wang if (!dev->rotational) 1346f7a7a5c2SJack Wang blk_queue_flag_set(QUEUE_FLAG_NONROT, dev->queue); 1347f7a7a5c2SJack Wang } 1348f7a7a5c2SJack Wang 1349f7a7a5c2SJack Wang static int rnbd_client_setup_device(struct rnbd_clt_session *sess, 1350f7a7a5c2SJack Wang struct rnbd_clt_dev *dev, int idx) 1351f7a7a5c2SJack Wang { 1352f7a7a5c2SJack Wang int err; 1353f7a7a5c2SJack Wang 1354f7a7a5c2SJack Wang dev->size = dev->nsectors * dev->logical_block_size; 1355f7a7a5c2SJack Wang 1356f7a7a5c2SJack Wang err = setup_mq_dev(dev); 1357f7a7a5c2SJack Wang if (err) 1358f7a7a5c2SJack Wang return err; 1359f7a7a5c2SJack Wang 1360f7a7a5c2SJack Wang setup_request_queue(dev); 1361f7a7a5c2SJack Wang 1362f7a7a5c2SJack Wang dev->gd = alloc_disk_node(1 << RNBD_PART_BITS, NUMA_NO_NODE); 1363f7a7a5c2SJack Wang if (!dev->gd) { 1364f7a7a5c2SJack Wang blk_cleanup_queue(dev->queue); 1365f7a7a5c2SJack Wang return -ENOMEM; 1366f7a7a5c2SJack Wang } 1367f7a7a5c2SJack Wang 1368f7a7a5c2SJack Wang rnbd_clt_setup_gen_disk(dev, idx); 1369f7a7a5c2SJack Wang 1370f7a7a5c2SJack Wang return 0; 1371f7a7a5c2SJack Wang } 1372f7a7a5c2SJack Wang 1373f7a7a5c2SJack Wang static struct rnbd_clt_dev *init_dev(struct rnbd_clt_session *sess, 1374f7a7a5c2SJack Wang enum rnbd_access_mode access_mode, 1375f7a7a5c2SJack Wang const char *pathname) 1376f7a7a5c2SJack Wang { 1377f7a7a5c2SJack Wang struct rnbd_clt_dev *dev; 1378f7a7a5c2SJack Wang int ret; 1379f7a7a5c2SJack Wang 1380f7a7a5c2SJack Wang dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, NUMA_NO_NODE); 1381f7a7a5c2SJack Wang if (!dev) 1382f7a7a5c2SJack Wang return ERR_PTR(-ENOMEM); 1383f7a7a5c2SJack Wang 1384f7a7a5c2SJack Wang dev->hw_queues = kcalloc(nr_cpu_ids, sizeof(*dev->hw_queues), 1385f7a7a5c2SJack Wang GFP_KERNEL); 1386f7a7a5c2SJack Wang if (!dev->hw_queues) { 1387f7a7a5c2SJack Wang ret = -ENOMEM; 1388f7a7a5c2SJack Wang goto out_alloc; 1389f7a7a5c2SJack Wang } 1390f7a7a5c2SJack Wang 1391f7a7a5c2SJack Wang mutex_lock(&ida_lock); 1392f7a7a5c2SJack Wang ret = ida_simple_get(&index_ida, 0, 1 << (MINORBITS - RNBD_PART_BITS), 1393f7a7a5c2SJack Wang GFP_KERNEL); 1394f7a7a5c2SJack Wang mutex_unlock(&ida_lock); 1395f7a7a5c2SJack Wang if (ret < 0) { 1396f7a7a5c2SJack Wang pr_err("Failed to initialize device '%s' from session %s, allocating idr failed, err: %d\n", 1397f7a7a5c2SJack Wang pathname, sess->sessname, ret); 1398f7a7a5c2SJack Wang goto out_queues; 1399f7a7a5c2SJack Wang } 140064e8a6ecSMd Haris Iqbal 1401e7508d48SMd Haris Iqbal dev->pathname = kstrdup(pathname, GFP_KERNEL); 140264e8a6ecSMd Haris Iqbal if (!dev->pathname) { 140364e8a6ecSMd Haris Iqbal ret = -ENOMEM; 140464e8a6ecSMd Haris Iqbal goto out_queues; 140564e8a6ecSMd Haris Iqbal } 140664e8a6ecSMd Haris Iqbal 1407f7a7a5c2SJack Wang dev->clt_device_id = ret; 1408f7a7a5c2SJack Wang dev->sess = sess; 1409f7a7a5c2SJack Wang dev->access_mode = access_mode; 1410f7a7a5c2SJack Wang mutex_init(&dev->lock); 1411f7a7a5c2SJack Wang refcount_set(&dev->refcount, 1); 1412f7a7a5c2SJack Wang dev->dev_state = DEV_STATE_INIT; 1413f7a7a5c2SJack Wang 1414f7a7a5c2SJack Wang /* 1415f7a7a5c2SJack Wang * Here we called from sysfs entry, thus clt-sysfs is 1416f7a7a5c2SJack Wang * responsible that session will not disappear. 1417f7a7a5c2SJack Wang */ 1418f7a7a5c2SJack Wang WARN_ON(!rnbd_clt_get_sess(sess)); 1419f7a7a5c2SJack Wang 1420f7a7a5c2SJack Wang return dev; 1421f7a7a5c2SJack Wang 1422f7a7a5c2SJack Wang out_queues: 1423f7a7a5c2SJack Wang kfree(dev->hw_queues); 1424f7a7a5c2SJack Wang out_alloc: 1425f7a7a5c2SJack Wang kfree(dev); 1426f7a7a5c2SJack Wang return ERR_PTR(ret); 1427f7a7a5c2SJack Wang } 1428f7a7a5c2SJack Wang 142991f4acb2SGuoqing Jiang static bool __exists_dev(const char *pathname, const char *sessname) 1430f7a7a5c2SJack Wang { 1431f7a7a5c2SJack Wang struct rnbd_clt_session *sess; 1432f7a7a5c2SJack Wang struct rnbd_clt_dev *dev; 1433f7a7a5c2SJack Wang bool found = false; 1434f7a7a5c2SJack Wang 1435f7a7a5c2SJack Wang list_for_each_entry(sess, &sess_list, list) { 143691f4acb2SGuoqing Jiang if (sessname && strncmp(sess->sessname, sessname, 143791f4acb2SGuoqing Jiang sizeof(sess->sessname))) 143891f4acb2SGuoqing Jiang continue; 1439f7a7a5c2SJack Wang mutex_lock(&sess->lock); 1440f7a7a5c2SJack Wang list_for_each_entry(dev, &sess->devs_list, list) { 144164e8a6ecSMd Haris Iqbal if (strlen(dev->pathname) == strlen(pathname) && 144264e8a6ecSMd Haris Iqbal !strcmp(dev->pathname, pathname)) { 1443f7a7a5c2SJack Wang found = true; 1444f7a7a5c2SJack Wang break; 1445f7a7a5c2SJack Wang } 1446f7a7a5c2SJack Wang } 1447f7a7a5c2SJack Wang mutex_unlock(&sess->lock); 1448f7a7a5c2SJack Wang if (found) 1449f7a7a5c2SJack Wang break; 1450f7a7a5c2SJack Wang } 1451f7a7a5c2SJack Wang 1452f7a7a5c2SJack Wang return found; 1453f7a7a5c2SJack Wang } 1454f7a7a5c2SJack Wang 145591f4acb2SGuoqing Jiang static bool exists_devpath(const char *pathname, const char *sessname) 1456f7a7a5c2SJack Wang { 1457f7a7a5c2SJack Wang bool found; 1458f7a7a5c2SJack Wang 1459f7a7a5c2SJack Wang mutex_lock(&sess_lock); 146091f4acb2SGuoqing Jiang found = __exists_dev(pathname, sessname); 1461f7a7a5c2SJack Wang mutex_unlock(&sess_lock); 1462f7a7a5c2SJack Wang 1463f7a7a5c2SJack Wang return found; 1464f7a7a5c2SJack Wang } 1465f7a7a5c2SJack Wang 1466f7a7a5c2SJack Wang static bool insert_dev_if_not_exists_devpath(const char *pathname, 1467f7a7a5c2SJack Wang struct rnbd_clt_session *sess, 1468f7a7a5c2SJack Wang struct rnbd_clt_dev *dev) 1469f7a7a5c2SJack Wang { 1470f7a7a5c2SJack Wang bool found; 1471f7a7a5c2SJack Wang 1472f7a7a5c2SJack Wang mutex_lock(&sess_lock); 147391f4acb2SGuoqing Jiang found = __exists_dev(pathname, sess->sessname); 1474f7a7a5c2SJack Wang if (!found) { 1475f7a7a5c2SJack Wang mutex_lock(&sess->lock); 1476f7a7a5c2SJack Wang list_add_tail(&dev->list, &sess->devs_list); 1477f7a7a5c2SJack Wang mutex_unlock(&sess->lock); 1478f7a7a5c2SJack Wang } 1479f7a7a5c2SJack Wang mutex_unlock(&sess_lock); 1480f7a7a5c2SJack Wang 1481f7a7a5c2SJack Wang return found; 1482f7a7a5c2SJack Wang } 1483f7a7a5c2SJack Wang 1484f7a7a5c2SJack Wang static void delete_dev(struct rnbd_clt_dev *dev) 1485f7a7a5c2SJack Wang { 1486f7a7a5c2SJack Wang struct rnbd_clt_session *sess = dev->sess; 1487f7a7a5c2SJack Wang 1488f7a7a5c2SJack Wang mutex_lock(&sess->lock); 1489f7a7a5c2SJack Wang list_del(&dev->list); 1490f7a7a5c2SJack Wang mutex_unlock(&sess->lock); 1491f7a7a5c2SJack Wang } 1492f7a7a5c2SJack Wang 1493f7a7a5c2SJack Wang struct rnbd_clt_dev *rnbd_clt_map_device(const char *sessname, 1494f7a7a5c2SJack Wang struct rtrs_addr *paths, 1495f7a7a5c2SJack Wang size_t path_cnt, u16 port_nr, 1496f7a7a5c2SJack Wang const char *pathname, 1497f7a7a5c2SJack Wang enum rnbd_access_mode access_mode) 1498f7a7a5c2SJack Wang { 1499f7a7a5c2SJack Wang struct rnbd_clt_session *sess; 1500f7a7a5c2SJack Wang struct rnbd_clt_dev *dev; 1501f7a7a5c2SJack Wang int ret; 1502f7a7a5c2SJack Wang 150391f4acb2SGuoqing Jiang if (unlikely(exists_devpath(pathname, sessname))) 1504f7a7a5c2SJack Wang return ERR_PTR(-EEXIST); 1505f7a7a5c2SJack Wang 1506f7a7a5c2SJack Wang sess = find_and_get_or_create_sess(sessname, paths, path_cnt, port_nr); 1507f7a7a5c2SJack Wang if (IS_ERR(sess)) 1508f7a7a5c2SJack Wang return ERR_CAST(sess); 1509f7a7a5c2SJack Wang 1510f7a7a5c2SJack Wang dev = init_dev(sess, access_mode, pathname); 1511f7a7a5c2SJack Wang if (IS_ERR(dev)) { 1512f7a7a5c2SJack Wang pr_err("map_device: failed to map device '%s' from session %s, can't initialize device, err: %ld\n", 1513f7a7a5c2SJack Wang pathname, sess->sessname, PTR_ERR(dev)); 1514f7a7a5c2SJack Wang ret = PTR_ERR(dev); 1515f7a7a5c2SJack Wang goto put_sess; 1516f7a7a5c2SJack Wang } 1517f7a7a5c2SJack Wang if (insert_dev_if_not_exists_devpath(pathname, sess, dev)) { 1518f7a7a5c2SJack Wang ret = -EEXIST; 1519f7a7a5c2SJack Wang goto put_dev; 1520f7a7a5c2SJack Wang } 1521f7a7a5c2SJack Wang ret = send_msg_open(dev, WAIT); 1522f7a7a5c2SJack Wang if (ret) { 1523f7a7a5c2SJack Wang rnbd_clt_err(dev, 1524f7a7a5c2SJack Wang "map_device: failed, can't open remote device, err: %d\n", 1525f7a7a5c2SJack Wang ret); 1526f7a7a5c2SJack Wang goto del_dev; 1527f7a7a5c2SJack Wang } 1528f7a7a5c2SJack Wang mutex_lock(&dev->lock); 1529f7a7a5c2SJack Wang pr_debug("Opened remote device: session=%s, path='%s'\n", 1530f7a7a5c2SJack Wang sess->sessname, pathname); 1531f7a7a5c2SJack Wang ret = rnbd_client_setup_device(sess, dev, dev->clt_device_id); 1532f7a7a5c2SJack Wang if (ret) { 1533f7a7a5c2SJack Wang rnbd_clt_err(dev, 1534f7a7a5c2SJack Wang "map_device: Failed to configure device, err: %d\n", 1535f7a7a5c2SJack Wang ret); 1536f7a7a5c2SJack Wang mutex_unlock(&dev->lock); 153747be77c2SGioh Kim goto send_close; 1538f7a7a5c2SJack Wang } 1539f7a7a5c2SJack Wang 1540f7a7a5c2SJack Wang rnbd_clt_info(dev, 1541512c781fSGioh Kim "map_device: Device mapped as %s (nsectors: %zu, logical_block_size: %d, physical_block_size: %d, max_write_same_sectors: %d, max_discard_sectors: %d, discard_granularity: %d, discard_alignment: %d, secure_discard: %d, max_segments: %d, max_hw_sectors: %d, rotational: %d, wc: %d, fua: %d)\n", 1542f7a7a5c2SJack Wang dev->gd->disk_name, dev->nsectors, 1543f7a7a5c2SJack Wang dev->logical_block_size, dev->physical_block_size, 1544f7a7a5c2SJack Wang dev->max_write_same_sectors, dev->max_discard_sectors, 1545f7a7a5c2SJack Wang dev->discard_granularity, dev->discard_alignment, 1546f7a7a5c2SJack Wang dev->secure_discard, dev->max_segments, 1547512c781fSGioh Kim dev->max_hw_sectors, dev->rotational, dev->wc, dev->fua); 1548f7a7a5c2SJack Wang 1549f7a7a5c2SJack Wang mutex_unlock(&dev->lock); 1550f7a7a5c2SJack Wang 1551f7a7a5c2SJack Wang add_disk(dev->gd); 1552f7a7a5c2SJack Wang rnbd_clt_put_sess(sess); 1553f7a7a5c2SJack Wang 1554f7a7a5c2SJack Wang return dev; 1555f7a7a5c2SJack Wang 155647be77c2SGioh Kim send_close: 155747be77c2SGioh Kim send_msg_close(dev, dev->device_id, WAIT); 1558f7a7a5c2SJack Wang del_dev: 1559f7a7a5c2SJack Wang delete_dev(dev); 1560f7a7a5c2SJack Wang put_dev: 1561f7a7a5c2SJack Wang rnbd_clt_put_dev(dev); 1562f7a7a5c2SJack Wang put_sess: 1563f7a7a5c2SJack Wang rnbd_clt_put_sess(sess); 1564f7a7a5c2SJack Wang 1565f7a7a5c2SJack Wang return ERR_PTR(ret); 1566f7a7a5c2SJack Wang } 1567f7a7a5c2SJack Wang 1568f7a7a5c2SJack Wang static void destroy_gen_disk(struct rnbd_clt_dev *dev) 1569f7a7a5c2SJack Wang { 1570f7a7a5c2SJack Wang del_gendisk(dev->gd); 1571f7a7a5c2SJack Wang blk_cleanup_queue(dev->queue); 1572f7a7a5c2SJack Wang put_disk(dev->gd); 1573f7a7a5c2SJack Wang } 1574f7a7a5c2SJack Wang 1575f7a7a5c2SJack Wang static void destroy_sysfs(struct rnbd_clt_dev *dev, 1576f7a7a5c2SJack Wang const struct attribute *sysfs_self) 1577f7a7a5c2SJack Wang { 1578f7a7a5c2SJack Wang rnbd_clt_remove_dev_symlink(dev); 1579f7a7a5c2SJack Wang if (dev->kobj.state_initialized) { 1580f7a7a5c2SJack Wang if (sysfs_self) 1581f7a7a5c2SJack Wang /* To avoid deadlock firstly remove itself */ 1582f7a7a5c2SJack Wang sysfs_remove_file_self(&dev->kobj, sysfs_self); 1583f7a7a5c2SJack Wang kobject_del(&dev->kobj); 1584f7a7a5c2SJack Wang kobject_put(&dev->kobj); 1585f7a7a5c2SJack Wang } 1586f7a7a5c2SJack Wang } 1587f7a7a5c2SJack Wang 1588f7a7a5c2SJack Wang int rnbd_clt_unmap_device(struct rnbd_clt_dev *dev, bool force, 1589f7a7a5c2SJack Wang const struct attribute *sysfs_self) 1590f7a7a5c2SJack Wang { 1591f7a7a5c2SJack Wang struct rnbd_clt_session *sess = dev->sess; 1592f7a7a5c2SJack Wang int refcount, ret = 0; 1593f7a7a5c2SJack Wang bool was_mapped; 1594f7a7a5c2SJack Wang 1595f7a7a5c2SJack Wang mutex_lock(&dev->lock); 1596f7a7a5c2SJack Wang if (dev->dev_state == DEV_STATE_UNMAPPED) { 1597f7a7a5c2SJack Wang rnbd_clt_info(dev, "Device is already being unmapped\n"); 1598f7a7a5c2SJack Wang ret = -EALREADY; 1599f7a7a5c2SJack Wang goto err; 1600f7a7a5c2SJack Wang } 1601f7a7a5c2SJack Wang refcount = refcount_read(&dev->refcount); 1602f7a7a5c2SJack Wang if (!force && refcount > 1) { 1603f7a7a5c2SJack Wang rnbd_clt_err(dev, 1604f7a7a5c2SJack Wang "Closing device failed, device is in use, (%d device users)\n", 1605f7a7a5c2SJack Wang refcount - 1); 1606f7a7a5c2SJack Wang ret = -EBUSY; 1607f7a7a5c2SJack Wang goto err; 1608f7a7a5c2SJack Wang } 1609f7a7a5c2SJack Wang was_mapped = (dev->dev_state == DEV_STATE_MAPPED); 1610f7a7a5c2SJack Wang dev->dev_state = DEV_STATE_UNMAPPED; 1611f7a7a5c2SJack Wang mutex_unlock(&dev->lock); 1612f7a7a5c2SJack Wang 1613f7a7a5c2SJack Wang delete_dev(dev); 1614f7a7a5c2SJack Wang destroy_sysfs(dev, sysfs_self); 1615f7a7a5c2SJack Wang destroy_gen_disk(dev); 1616f7a7a5c2SJack Wang if (was_mapped && sess->rtrs) 1617f7a7a5c2SJack Wang send_msg_close(dev, dev->device_id, WAIT); 1618f7a7a5c2SJack Wang 1619f7a7a5c2SJack Wang rnbd_clt_info(dev, "Device is unmapped\n"); 1620f7a7a5c2SJack Wang 1621f7a7a5c2SJack Wang /* Likely last reference put */ 1622f7a7a5c2SJack Wang rnbd_clt_put_dev(dev); 1623f7a7a5c2SJack Wang 1624f7a7a5c2SJack Wang /* 1625f7a7a5c2SJack Wang * Here device and session can be vanished! 1626f7a7a5c2SJack Wang */ 1627f7a7a5c2SJack Wang 1628f7a7a5c2SJack Wang return 0; 1629f7a7a5c2SJack Wang err: 1630f7a7a5c2SJack Wang mutex_unlock(&dev->lock); 1631f7a7a5c2SJack Wang 1632f7a7a5c2SJack Wang return ret; 1633f7a7a5c2SJack Wang } 1634f7a7a5c2SJack Wang 1635f7a7a5c2SJack Wang int rnbd_clt_remap_device(struct rnbd_clt_dev *dev) 1636f7a7a5c2SJack Wang { 1637f7a7a5c2SJack Wang int err; 1638f7a7a5c2SJack Wang 1639f7a7a5c2SJack Wang mutex_lock(&dev->lock); 1640f7a7a5c2SJack Wang if (dev->dev_state == DEV_STATE_MAPPED_DISCONNECTED) 1641f7a7a5c2SJack Wang err = 0; 1642f7a7a5c2SJack Wang else if (dev->dev_state == DEV_STATE_UNMAPPED) 1643f7a7a5c2SJack Wang err = -ENODEV; 1644f7a7a5c2SJack Wang else if (dev->dev_state == DEV_STATE_MAPPED) 1645f7a7a5c2SJack Wang err = -EALREADY; 1646f7a7a5c2SJack Wang else 1647f7a7a5c2SJack Wang err = -EBUSY; 1648f7a7a5c2SJack Wang mutex_unlock(&dev->lock); 1649f7a7a5c2SJack Wang if (!err) { 1650f7a7a5c2SJack Wang rnbd_clt_info(dev, "Remapping device.\n"); 1651f7a7a5c2SJack Wang err = send_msg_open(dev, WAIT); 1652f7a7a5c2SJack Wang if (err) 1653f7a7a5c2SJack Wang rnbd_clt_err(dev, "remap_device: %d\n", err); 1654f7a7a5c2SJack Wang } 1655f7a7a5c2SJack Wang 1656f7a7a5c2SJack Wang return err; 1657f7a7a5c2SJack Wang } 1658f7a7a5c2SJack Wang 1659f7a7a5c2SJack Wang static void unmap_device_work(struct work_struct *work) 1660f7a7a5c2SJack Wang { 1661f7a7a5c2SJack Wang struct rnbd_clt_dev *dev; 1662f7a7a5c2SJack Wang 1663f7a7a5c2SJack Wang dev = container_of(work, typeof(*dev), unmap_on_rmmod_work); 1664f7a7a5c2SJack Wang rnbd_clt_unmap_device(dev, true, NULL); 1665f7a7a5c2SJack Wang } 1666f7a7a5c2SJack Wang 1667f7a7a5c2SJack Wang static void rnbd_destroy_sessions(void) 1668f7a7a5c2SJack Wang { 1669f7a7a5c2SJack Wang struct rnbd_clt_session *sess, *sn; 1670f7a7a5c2SJack Wang struct rnbd_clt_dev *dev, *tn; 1671f7a7a5c2SJack Wang 1672f7a7a5c2SJack Wang /* Firstly forbid access through sysfs interface */ 1673f7a7a5c2SJack Wang rnbd_clt_destroy_default_group(); 1674f7a7a5c2SJack Wang rnbd_clt_destroy_sysfs_files(); 1675f7a7a5c2SJack Wang 1676f7a7a5c2SJack Wang /* 1677f7a7a5c2SJack Wang * Here at this point there is no any concurrent access to sessions 1678f7a7a5c2SJack Wang * list and devices list: 16793877ece0SJack Wang * 1. New session or device can't be created - session sysfs files 1680f7a7a5c2SJack Wang * are removed. 1681f7a7a5c2SJack Wang * 2. Device or session can't be removed - module reference is taken 1682f7a7a5c2SJack Wang * into account in unmap device sysfs callback. 1683f7a7a5c2SJack Wang * 3. No IO requests inflight - each file open of block_dev increases 1684f7a7a5c2SJack Wang * module reference in get_disk(). 1685f7a7a5c2SJack Wang * 1686f7a7a5c2SJack Wang * But still there can be user requests inflights, which are sent by 1687f7a7a5c2SJack Wang * asynchronous send_msg_*() functions, thus before unmapping devices 1688f7a7a5c2SJack Wang * RTRS session must be explicitly closed. 1689f7a7a5c2SJack Wang */ 1690f7a7a5c2SJack Wang 1691f7a7a5c2SJack Wang list_for_each_entry_safe(sess, sn, &sess_list, list) { 1692f7a7a5c2SJack Wang WARN_ON(!rnbd_clt_get_sess(sess)); 1693f7a7a5c2SJack Wang close_rtrs(sess); 1694f7a7a5c2SJack Wang list_for_each_entry_safe(dev, tn, &sess->devs_list, list) { 1695f7a7a5c2SJack Wang /* 1696f7a7a5c2SJack Wang * Here unmap happens in parallel for only one reason: 1697f7a7a5c2SJack Wang * blk_cleanup_queue() takes around half a second, so 1698f7a7a5c2SJack Wang * on huge amount of devices the whole module unload 1699f7a7a5c2SJack Wang * procedure takes minutes. 1700f7a7a5c2SJack Wang */ 1701f7a7a5c2SJack Wang INIT_WORK(&dev->unmap_on_rmmod_work, unmap_device_work); 1702f7a7a5c2SJack Wang queue_work(system_long_wq, &dev->unmap_on_rmmod_work); 1703f7a7a5c2SJack Wang } 1704f7a7a5c2SJack Wang rnbd_clt_put_sess(sess); 1705f7a7a5c2SJack Wang } 1706f7a7a5c2SJack Wang /* Wait for all scheduled unmap works */ 1707f7a7a5c2SJack Wang flush_workqueue(system_long_wq); 1708f7a7a5c2SJack Wang WARN_ON(!list_empty(&sess_list)); 1709f7a7a5c2SJack Wang } 1710f7a7a5c2SJack Wang 1711f7a7a5c2SJack Wang static int __init rnbd_client_init(void) 1712f7a7a5c2SJack Wang { 1713f7a7a5c2SJack Wang int err = 0; 1714f7a7a5c2SJack Wang 1715f7a7a5c2SJack Wang BUILD_BUG_ON(sizeof(struct rnbd_msg_hdr) != 4); 1716f7a7a5c2SJack Wang BUILD_BUG_ON(sizeof(struct rnbd_msg_sess_info) != 36); 1717f7a7a5c2SJack Wang BUILD_BUG_ON(sizeof(struct rnbd_msg_sess_info_rsp) != 36); 1718f7a7a5c2SJack Wang BUILD_BUG_ON(sizeof(struct rnbd_msg_open) != 264); 1719f7a7a5c2SJack Wang BUILD_BUG_ON(sizeof(struct rnbd_msg_close) != 8); 1720f7a7a5c2SJack Wang BUILD_BUG_ON(sizeof(struct rnbd_msg_open_rsp) != 56); 1721f7a7a5c2SJack Wang rnbd_client_major = register_blkdev(rnbd_client_major, "rnbd"); 1722f7a7a5c2SJack Wang if (rnbd_client_major <= 0) { 1723f7a7a5c2SJack Wang pr_err("Failed to load module, block device registration failed\n"); 1724f7a7a5c2SJack Wang return -EBUSY; 1725f7a7a5c2SJack Wang } 1726f7a7a5c2SJack Wang 1727f7a7a5c2SJack Wang err = rnbd_clt_create_sysfs_files(); 1728f7a7a5c2SJack Wang if (err) { 1729f7a7a5c2SJack Wang pr_err("Failed to load module, creating sysfs device files failed, err: %d\n", 1730f7a7a5c2SJack Wang err); 1731f7a7a5c2SJack Wang unregister_blkdev(rnbd_client_major, "rnbd"); 1732f7a7a5c2SJack Wang } 1733f7a7a5c2SJack Wang 1734f7a7a5c2SJack Wang return err; 1735f7a7a5c2SJack Wang } 1736f7a7a5c2SJack Wang 1737f7a7a5c2SJack Wang static void __exit rnbd_client_exit(void) 1738f7a7a5c2SJack Wang { 1739f7a7a5c2SJack Wang rnbd_destroy_sessions(); 1740f7a7a5c2SJack Wang unregister_blkdev(rnbd_client_major, "rnbd"); 1741f7a7a5c2SJack Wang ida_destroy(&index_ida); 1742f7a7a5c2SJack Wang } 1743f7a7a5c2SJack Wang 1744f7a7a5c2SJack Wang module_init(rnbd_client_init); 1745f7a7a5c2SJack Wang module_exit(rnbd_client_exit); 1746