1f7a7a5c2SJack Wang // SPDX-License-Identifier: GPL-2.0-or-later 2f7a7a5c2SJack Wang /* 3f7a7a5c2SJack Wang * RDMA Network Block Driver 4f7a7a5c2SJack Wang * 5f7a7a5c2SJack Wang * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved. 6f7a7a5c2SJack Wang * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved. 7f7a7a5c2SJack Wang * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved. 8f7a7a5c2SJack Wang */ 9f7a7a5c2SJack Wang 10f7a7a5c2SJack Wang #undef pr_fmt 11f7a7a5c2SJack Wang #define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt 12f7a7a5c2SJack Wang 13f7a7a5c2SJack Wang #include <linux/module.h> 14f7a7a5c2SJack Wang #include <linux/blkdev.h> 15f7a7a5c2SJack Wang #include <linux/hdreg.h> 16f7a7a5c2SJack Wang #include <linux/scatterlist.h> 17f7a7a5c2SJack Wang #include <linux/idr.h> 18f7a7a5c2SJack Wang 19f7a7a5c2SJack Wang #include "rnbd-clt.h" 20f7a7a5c2SJack Wang 21f7a7a5c2SJack Wang MODULE_DESCRIPTION("RDMA Network Block Device Client"); 22f7a7a5c2SJack Wang MODULE_LICENSE("GPL"); 23f7a7a5c2SJack Wang 24f7a7a5c2SJack Wang static int rnbd_client_major; 25f7a7a5c2SJack Wang static DEFINE_IDA(index_ida); 26f7a7a5c2SJack Wang static DEFINE_MUTEX(ida_lock); 27f7a7a5c2SJack Wang static DEFINE_MUTEX(sess_lock); 28f7a7a5c2SJack Wang static LIST_HEAD(sess_list); 29f7a7a5c2SJack Wang 30f7a7a5c2SJack Wang /* 31f7a7a5c2SJack Wang * Maximum number of partitions an instance can have. 32f7a7a5c2SJack Wang * 6 bits = 64 minors = 63 partitions (one minor is used for the device itself) 33f7a7a5c2SJack Wang */ 34f7a7a5c2SJack Wang #define RNBD_PART_BITS 6 35f7a7a5c2SJack Wang 36f7a7a5c2SJack Wang static inline bool rnbd_clt_get_sess(struct rnbd_clt_session *sess) 37f7a7a5c2SJack Wang { 38f7a7a5c2SJack Wang return refcount_inc_not_zero(&sess->refcount); 39f7a7a5c2SJack Wang } 40f7a7a5c2SJack Wang 41f7a7a5c2SJack Wang static void free_sess(struct rnbd_clt_session *sess); 42f7a7a5c2SJack Wang 43f7a7a5c2SJack Wang static void rnbd_clt_put_sess(struct rnbd_clt_session *sess) 44f7a7a5c2SJack Wang { 45f7a7a5c2SJack Wang might_sleep(); 46f7a7a5c2SJack Wang 47f7a7a5c2SJack Wang if (refcount_dec_and_test(&sess->refcount)) 48f7a7a5c2SJack Wang free_sess(sess); 49f7a7a5c2SJack Wang } 50f7a7a5c2SJack Wang 51f7a7a5c2SJack Wang static void rnbd_clt_put_dev(struct rnbd_clt_dev *dev) 52f7a7a5c2SJack Wang { 53f7a7a5c2SJack Wang might_sleep(); 54f7a7a5c2SJack Wang 55f7a7a5c2SJack Wang if (!refcount_dec_and_test(&dev->refcount)) 56f7a7a5c2SJack Wang return; 57f7a7a5c2SJack Wang 58f7a7a5c2SJack Wang mutex_lock(&ida_lock); 59f7a7a5c2SJack Wang ida_simple_remove(&index_ida, dev->clt_device_id); 60f7a7a5c2SJack Wang mutex_unlock(&ida_lock); 61f7a7a5c2SJack Wang kfree(dev->hw_queues); 6264e8a6ecSMd Haris Iqbal kfree(dev->pathname); 63f7a7a5c2SJack Wang rnbd_clt_put_sess(dev->sess); 64f7a7a5c2SJack Wang mutex_destroy(&dev->lock); 65f7a7a5c2SJack Wang kfree(dev); 66f7a7a5c2SJack Wang } 67f7a7a5c2SJack Wang 68f7a7a5c2SJack Wang static inline bool rnbd_clt_get_dev(struct rnbd_clt_dev *dev) 69f7a7a5c2SJack Wang { 70f7a7a5c2SJack Wang return refcount_inc_not_zero(&dev->refcount); 71f7a7a5c2SJack Wang } 72f7a7a5c2SJack Wang 73f7a7a5c2SJack Wang static int rnbd_clt_set_dev_attr(struct rnbd_clt_dev *dev, 74f7a7a5c2SJack Wang const struct rnbd_msg_open_rsp *rsp) 75f7a7a5c2SJack Wang { 76f7a7a5c2SJack Wang struct rnbd_clt_session *sess = dev->sess; 77f7a7a5c2SJack Wang 78f7a7a5c2SJack Wang if (!rsp->logical_block_size) 79f7a7a5c2SJack Wang return -EINVAL; 80f7a7a5c2SJack Wang 81f7a7a5c2SJack Wang dev->device_id = le32_to_cpu(rsp->device_id); 82f7a7a5c2SJack Wang dev->nsectors = le64_to_cpu(rsp->nsectors); 83f7a7a5c2SJack Wang dev->logical_block_size = le16_to_cpu(rsp->logical_block_size); 84f7a7a5c2SJack Wang dev->physical_block_size = le16_to_cpu(rsp->physical_block_size); 85f7a7a5c2SJack Wang dev->max_write_same_sectors = le32_to_cpu(rsp->max_write_same_sectors); 86f7a7a5c2SJack Wang dev->max_discard_sectors = le32_to_cpu(rsp->max_discard_sectors); 87f7a7a5c2SJack Wang dev->discard_granularity = le32_to_cpu(rsp->discard_granularity); 88f7a7a5c2SJack Wang dev->discard_alignment = le32_to_cpu(rsp->discard_alignment); 89f7a7a5c2SJack Wang dev->secure_discard = le16_to_cpu(rsp->secure_discard); 90f7a7a5c2SJack Wang dev->rotational = rsp->rotational; 91512c781fSGioh Kim dev->wc = !!(rsp->cache_policy & RNBD_WRITEBACK); 92512c781fSGioh Kim dev->fua = !!(rsp->cache_policy & RNBD_FUA); 93f7a7a5c2SJack Wang 94f7a7a5c2SJack Wang dev->max_hw_sectors = sess->max_io_size / SECTOR_SIZE; 95f7a7a5c2SJack Wang dev->max_segments = BMAX_SEGMENTS; 96f7a7a5c2SJack Wang 97f7a7a5c2SJack Wang return 0; 98f7a7a5c2SJack Wang } 99f7a7a5c2SJack Wang 100f7a7a5c2SJack Wang static int rnbd_clt_change_capacity(struct rnbd_clt_dev *dev, 101f7a7a5c2SJack Wang size_t new_nsectors) 102f7a7a5c2SJack Wang { 103f7a7a5c2SJack Wang rnbd_clt_info(dev, "Device size changed from %zu to %zu sectors\n", 104f7a7a5c2SJack Wang dev->nsectors, new_nsectors); 105f7a7a5c2SJack Wang dev->nsectors = new_nsectors; 106230272b4SChristoph Hellwig set_capacity_and_notify(dev->gd, dev->nsectors); 107659e56baSChristoph Hellwig return 0; 108f7a7a5c2SJack Wang } 109f7a7a5c2SJack Wang 110f7a7a5c2SJack Wang static int process_msg_open_rsp(struct rnbd_clt_dev *dev, 111f7a7a5c2SJack Wang struct rnbd_msg_open_rsp *rsp) 112f7a7a5c2SJack Wang { 113f7a7a5c2SJack Wang int err = 0; 114f7a7a5c2SJack Wang 115f7a7a5c2SJack Wang mutex_lock(&dev->lock); 116f7a7a5c2SJack Wang if (dev->dev_state == DEV_STATE_UNMAPPED) { 117f7a7a5c2SJack Wang rnbd_clt_info(dev, 118f7a7a5c2SJack Wang "Ignoring Open-Response message from server for unmapped device\n"); 119f7a7a5c2SJack Wang err = -ENOENT; 120f7a7a5c2SJack Wang goto out; 121f7a7a5c2SJack Wang } 122f7a7a5c2SJack Wang if (dev->dev_state == DEV_STATE_MAPPED_DISCONNECTED) { 123f7a7a5c2SJack Wang u64 nsectors = le64_to_cpu(rsp->nsectors); 124f7a7a5c2SJack Wang 125f7a7a5c2SJack Wang /* 126f7a7a5c2SJack Wang * If the device was remapped and the size changed in the 127f7a7a5c2SJack Wang * meantime we need to revalidate it 128f7a7a5c2SJack Wang */ 129f7a7a5c2SJack Wang if (dev->nsectors != nsectors) 130f7a7a5c2SJack Wang rnbd_clt_change_capacity(dev, nsectors); 131f7a7a5c2SJack Wang rnbd_clt_info(dev, "Device online, device remapped successfully\n"); 132f7a7a5c2SJack Wang } 133f7a7a5c2SJack Wang err = rnbd_clt_set_dev_attr(dev, rsp); 134f7a7a5c2SJack Wang if (err) 135f7a7a5c2SJack Wang goto out; 136f7a7a5c2SJack Wang dev->dev_state = DEV_STATE_MAPPED; 137f7a7a5c2SJack Wang 138f7a7a5c2SJack Wang out: 139f7a7a5c2SJack Wang mutex_unlock(&dev->lock); 140f7a7a5c2SJack Wang 141f7a7a5c2SJack Wang return err; 142f7a7a5c2SJack Wang } 143f7a7a5c2SJack Wang 144f7a7a5c2SJack Wang int rnbd_clt_resize_disk(struct rnbd_clt_dev *dev, size_t newsize) 145f7a7a5c2SJack Wang { 146f7a7a5c2SJack Wang int ret = 0; 147f7a7a5c2SJack Wang 148f7a7a5c2SJack Wang mutex_lock(&dev->lock); 149f7a7a5c2SJack Wang if (dev->dev_state != DEV_STATE_MAPPED) { 150f7a7a5c2SJack Wang pr_err("Failed to set new size of the device, device is not opened\n"); 151f7a7a5c2SJack Wang ret = -ENOENT; 152f7a7a5c2SJack Wang goto out; 153f7a7a5c2SJack Wang } 154f7a7a5c2SJack Wang ret = rnbd_clt_change_capacity(dev, newsize); 155f7a7a5c2SJack Wang 156f7a7a5c2SJack Wang out: 157f7a7a5c2SJack Wang mutex_unlock(&dev->lock); 158f7a7a5c2SJack Wang 159f7a7a5c2SJack Wang return ret; 160f7a7a5c2SJack Wang } 161f7a7a5c2SJack Wang 162f7a7a5c2SJack Wang static inline void rnbd_clt_dev_requeue(struct rnbd_queue *q) 163f7a7a5c2SJack Wang { 164f7a7a5c2SJack Wang if (WARN_ON(!q->hctx)) 165f7a7a5c2SJack Wang return; 166f7a7a5c2SJack Wang 167f7a7a5c2SJack Wang /* We can come here from interrupt, thus async=true */ 168f7a7a5c2SJack Wang blk_mq_run_hw_queue(q->hctx, true); 169f7a7a5c2SJack Wang } 170f7a7a5c2SJack Wang 171f7a7a5c2SJack Wang enum { 172f7a7a5c2SJack Wang RNBD_DELAY_IFBUSY = -1, 173f7a7a5c2SJack Wang }; 174f7a7a5c2SJack Wang 175f7a7a5c2SJack Wang /** 176f7a7a5c2SJack Wang * rnbd_get_cpu_qlist() - finds a list with HW queues to be rerun 177f7a7a5c2SJack Wang * @sess: Session to find a queue for 178f7a7a5c2SJack Wang * @cpu: Cpu to start the search from 179f7a7a5c2SJack Wang * 180f7a7a5c2SJack Wang * Description: 181f7a7a5c2SJack Wang * Each CPU has a list of HW queues, which needs to be rerun. If a list 182f7a7a5c2SJack Wang * is not empty - it is marked with a bit. This function finds first 183f7a7a5c2SJack Wang * set bit in a bitmap and returns corresponding CPU list. 184f7a7a5c2SJack Wang */ 185f7a7a5c2SJack Wang static struct rnbd_cpu_qlist * 186f7a7a5c2SJack Wang rnbd_get_cpu_qlist(struct rnbd_clt_session *sess, int cpu) 187f7a7a5c2SJack Wang { 188f7a7a5c2SJack Wang int bit; 189f7a7a5c2SJack Wang 190f7a7a5c2SJack Wang /* Search from cpu to nr_cpu_ids */ 191f7a7a5c2SJack Wang bit = find_next_bit(sess->cpu_queues_bm, nr_cpu_ids, cpu); 192f7a7a5c2SJack Wang if (bit < nr_cpu_ids) { 193f7a7a5c2SJack Wang return per_cpu_ptr(sess->cpu_queues, bit); 194f7a7a5c2SJack Wang } else if (cpu != 0) { 195f7a7a5c2SJack Wang /* Search from 0 to cpu */ 196f7a7a5c2SJack Wang bit = find_next_bit(sess->cpu_queues_bm, cpu, 0); 197f7a7a5c2SJack Wang if (bit < cpu) 198f7a7a5c2SJack Wang return per_cpu_ptr(sess->cpu_queues, bit); 199f7a7a5c2SJack Wang } 200f7a7a5c2SJack Wang 201f7a7a5c2SJack Wang return NULL; 202f7a7a5c2SJack Wang } 203f7a7a5c2SJack Wang 204f7a7a5c2SJack Wang static inline int nxt_cpu(int cpu) 205f7a7a5c2SJack Wang { 206f7a7a5c2SJack Wang return (cpu + 1) % nr_cpu_ids; 207f7a7a5c2SJack Wang } 208f7a7a5c2SJack Wang 209f7a7a5c2SJack Wang /** 210f7a7a5c2SJack Wang * rnbd_rerun_if_needed() - rerun next queue marked as stopped 211f7a7a5c2SJack Wang * @sess: Session to rerun a queue on 212f7a7a5c2SJack Wang * 213f7a7a5c2SJack Wang * Description: 214f7a7a5c2SJack Wang * Each CPU has it's own list of HW queues, which should be rerun. 215f7a7a5c2SJack Wang * Function finds such list with HW queues, takes a list lock, picks up 216f7a7a5c2SJack Wang * the first HW queue out of the list and requeues it. 217f7a7a5c2SJack Wang * 218f7a7a5c2SJack Wang * Return: 219f7a7a5c2SJack Wang * True if the queue was requeued, false otherwise. 220f7a7a5c2SJack Wang * 221f7a7a5c2SJack Wang * Context: 222f7a7a5c2SJack Wang * Does not matter. 223f7a7a5c2SJack Wang */ 224f7a7a5c2SJack Wang static bool rnbd_rerun_if_needed(struct rnbd_clt_session *sess) 225f7a7a5c2SJack Wang { 226f7a7a5c2SJack Wang struct rnbd_queue *q = NULL; 227f7a7a5c2SJack Wang struct rnbd_cpu_qlist *cpu_q; 228f7a7a5c2SJack Wang unsigned long flags; 229f7a7a5c2SJack Wang int *cpup; 230f7a7a5c2SJack Wang 231f7a7a5c2SJack Wang /* 232f7a7a5c2SJack Wang * To keep fairness and not to let other queues starve we always 233f7a7a5c2SJack Wang * try to wake up someone else in round-robin manner. That of course 234f7a7a5c2SJack Wang * increases latency but queues always have a chance to be executed. 235f7a7a5c2SJack Wang */ 236f7a7a5c2SJack Wang cpup = get_cpu_ptr(sess->cpu_rr); 237f7a7a5c2SJack Wang for (cpu_q = rnbd_get_cpu_qlist(sess, nxt_cpu(*cpup)); cpu_q; 238f7a7a5c2SJack Wang cpu_q = rnbd_get_cpu_qlist(sess, nxt_cpu(cpu_q->cpu))) { 239f7a7a5c2SJack Wang if (!spin_trylock_irqsave(&cpu_q->requeue_lock, flags)) 240f7a7a5c2SJack Wang continue; 241f7a7a5c2SJack Wang if (unlikely(!test_bit(cpu_q->cpu, sess->cpu_queues_bm))) 242f7a7a5c2SJack Wang goto unlock; 243f7a7a5c2SJack Wang q = list_first_entry_or_null(&cpu_q->requeue_list, 244f7a7a5c2SJack Wang typeof(*q), requeue_list); 245f7a7a5c2SJack Wang if (WARN_ON(!q)) 246f7a7a5c2SJack Wang goto clear_bit; 247f7a7a5c2SJack Wang list_del_init(&q->requeue_list); 248f7a7a5c2SJack Wang clear_bit_unlock(0, &q->in_list); 249f7a7a5c2SJack Wang 250f7a7a5c2SJack Wang if (list_empty(&cpu_q->requeue_list)) { 251f7a7a5c2SJack Wang /* Clear bit if nothing is left */ 252f7a7a5c2SJack Wang clear_bit: 253f7a7a5c2SJack Wang clear_bit(cpu_q->cpu, sess->cpu_queues_bm); 254f7a7a5c2SJack Wang } 255f7a7a5c2SJack Wang unlock: 256f7a7a5c2SJack Wang spin_unlock_irqrestore(&cpu_q->requeue_lock, flags); 257f7a7a5c2SJack Wang 258f7a7a5c2SJack Wang if (q) 259f7a7a5c2SJack Wang break; 260f7a7a5c2SJack Wang } 261f7a7a5c2SJack Wang 262f7a7a5c2SJack Wang /** 263f7a7a5c2SJack Wang * Saves the CPU that is going to be requeued on the per-cpu var. Just 264f7a7a5c2SJack Wang * incrementing it doesn't work because rnbd_get_cpu_qlist() will 265f7a7a5c2SJack Wang * always return the first CPU with something on the queue list when the 266f7a7a5c2SJack Wang * value stored on the var is greater than the last CPU with something 267f7a7a5c2SJack Wang * on the list. 268f7a7a5c2SJack Wang */ 269f7a7a5c2SJack Wang if (cpu_q) 270f7a7a5c2SJack Wang *cpup = cpu_q->cpu; 271f7a7a5c2SJack Wang put_cpu_var(sess->cpu_rr); 272f7a7a5c2SJack Wang 273f7a7a5c2SJack Wang if (q) 274f7a7a5c2SJack Wang rnbd_clt_dev_requeue(q); 275f7a7a5c2SJack Wang 276f7a7a5c2SJack Wang return q; 277f7a7a5c2SJack Wang } 278f7a7a5c2SJack Wang 279f7a7a5c2SJack Wang /** 280f7a7a5c2SJack Wang * rnbd_rerun_all_if_idle() - rerun all queues left in the list if 281f7a7a5c2SJack Wang * session is idling (there are no requests 282f7a7a5c2SJack Wang * in-flight). 283f7a7a5c2SJack Wang * @sess: Session to rerun the queues on 284f7a7a5c2SJack Wang * 285f7a7a5c2SJack Wang * Description: 286f7a7a5c2SJack Wang * This function tries to rerun all stopped queues if there are no 287f7a7a5c2SJack Wang * requests in-flight anymore. This function tries to solve an obvious 288f7a7a5c2SJack Wang * problem, when number of tags < than number of queues (hctx), which 289f7a7a5c2SJack Wang * are stopped and put to sleep. If last permit, which has been just put, 290f7a7a5c2SJack Wang * does not wake up all left queues (hctxs), IO requests hang forever. 291f7a7a5c2SJack Wang * 292f7a7a5c2SJack Wang * That can happen when all number of permits, say N, have been exhausted 293f7a7a5c2SJack Wang * from one CPU, and we have many block devices per session, say M. 294f7a7a5c2SJack Wang * Each block device has it's own queue (hctx) for each CPU, so eventually 295f7a7a5c2SJack Wang * we can put that number of queues (hctxs) to sleep: M x nr_cpu_ids. 296f7a7a5c2SJack Wang * If number of permits N < M x nr_cpu_ids finally we will get an IO hang. 297f7a7a5c2SJack Wang * 298f7a7a5c2SJack Wang * To avoid this hang last caller of rnbd_put_permit() (last caller is the 299f7a7a5c2SJack Wang * one who observes sess->busy == 0) must wake up all remaining queues. 300f7a7a5c2SJack Wang * 301f7a7a5c2SJack Wang * Context: 302f7a7a5c2SJack Wang * Does not matter. 303f7a7a5c2SJack Wang */ 304f7a7a5c2SJack Wang static void rnbd_rerun_all_if_idle(struct rnbd_clt_session *sess) 305f7a7a5c2SJack Wang { 306f7a7a5c2SJack Wang bool requeued; 307f7a7a5c2SJack Wang 308f7a7a5c2SJack Wang do { 309f7a7a5c2SJack Wang requeued = rnbd_rerun_if_needed(sess); 310f7a7a5c2SJack Wang } while (atomic_read(&sess->busy) == 0 && requeued); 311f7a7a5c2SJack Wang } 312f7a7a5c2SJack Wang 313f7a7a5c2SJack Wang static struct rtrs_permit *rnbd_get_permit(struct rnbd_clt_session *sess, 314f7a7a5c2SJack Wang enum rtrs_clt_con_type con_type, 315f7a7a5c2SJack Wang int wait) 316f7a7a5c2SJack Wang { 317f7a7a5c2SJack Wang struct rtrs_permit *permit; 318f7a7a5c2SJack Wang 319f7a7a5c2SJack Wang permit = rtrs_clt_get_permit(sess->rtrs, con_type, 320f7a7a5c2SJack Wang wait ? RTRS_PERMIT_WAIT : 321f7a7a5c2SJack Wang RTRS_PERMIT_NOWAIT); 322f7a7a5c2SJack Wang if (likely(permit)) 323f7a7a5c2SJack Wang /* We have a subtle rare case here, when all permits can be 324f7a7a5c2SJack Wang * consumed before busy counter increased. This is safe, 325f7a7a5c2SJack Wang * because loser will get NULL as a permit, observe 0 busy 326f7a7a5c2SJack Wang * counter and immediately restart the queue himself. 327f7a7a5c2SJack Wang */ 328f7a7a5c2SJack Wang atomic_inc(&sess->busy); 329f7a7a5c2SJack Wang 330f7a7a5c2SJack Wang return permit; 331f7a7a5c2SJack Wang } 332f7a7a5c2SJack Wang 333f7a7a5c2SJack Wang static void rnbd_put_permit(struct rnbd_clt_session *sess, 334f7a7a5c2SJack Wang struct rtrs_permit *permit) 335f7a7a5c2SJack Wang { 336f7a7a5c2SJack Wang rtrs_clt_put_permit(sess->rtrs, permit); 337f7a7a5c2SJack Wang atomic_dec(&sess->busy); 338f7a7a5c2SJack Wang /* Paired with rnbd_clt_dev_add_to_requeue(). Decrement first 339f7a7a5c2SJack Wang * and then check queue bits. 340f7a7a5c2SJack Wang */ 341f7a7a5c2SJack Wang smp_mb__after_atomic(); 342f7a7a5c2SJack Wang rnbd_rerun_all_if_idle(sess); 343f7a7a5c2SJack Wang } 344f7a7a5c2SJack Wang 345f7a7a5c2SJack Wang static struct rnbd_iu *rnbd_get_iu(struct rnbd_clt_session *sess, 346f7a7a5c2SJack Wang enum rtrs_clt_con_type con_type, 347f7a7a5c2SJack Wang int wait) 348f7a7a5c2SJack Wang { 349f7a7a5c2SJack Wang struct rnbd_iu *iu; 350f7a7a5c2SJack Wang struct rtrs_permit *permit; 351f7a7a5c2SJack Wang 352*9aaf9a2aSGioh Kim iu = kzalloc(sizeof(*iu), GFP_KERNEL); 353*9aaf9a2aSGioh Kim if (!iu) { 354*9aaf9a2aSGioh Kim return NULL; 355*9aaf9a2aSGioh Kim } 356*9aaf9a2aSGioh Kim 357f7a7a5c2SJack Wang permit = rnbd_get_permit(sess, con_type, 358f7a7a5c2SJack Wang wait ? RTRS_PERMIT_WAIT : 359f7a7a5c2SJack Wang RTRS_PERMIT_NOWAIT); 360*9aaf9a2aSGioh Kim if (unlikely(!permit)) { 361*9aaf9a2aSGioh Kim kfree(iu); 362f7a7a5c2SJack Wang return NULL; 363*9aaf9a2aSGioh Kim } 364*9aaf9a2aSGioh Kim 365f7a7a5c2SJack Wang iu->permit = permit; 366f7a7a5c2SJack Wang /* 367f7a7a5c2SJack Wang * 1st reference is dropped after finishing sending a "user" message, 368f7a7a5c2SJack Wang * 2nd reference is dropped after confirmation with the response is 369f7a7a5c2SJack Wang * returned. 370f7a7a5c2SJack Wang * 1st and 2nd can happen in any order, so the rnbd_iu should be 3713877ece0SJack Wang * released (rtrs_permit returned to rtrs) only after both 372f7a7a5c2SJack Wang * are finished. 373f7a7a5c2SJack Wang */ 374f7a7a5c2SJack Wang atomic_set(&iu->refcount, 2); 375f7a7a5c2SJack Wang init_waitqueue_head(&iu->comp.wait); 376f7a7a5c2SJack Wang iu->comp.errno = INT_MAX; 377f7a7a5c2SJack Wang 378f7a7a5c2SJack Wang return iu; 379f7a7a5c2SJack Wang } 380f7a7a5c2SJack Wang 381f7a7a5c2SJack Wang static void rnbd_put_iu(struct rnbd_clt_session *sess, struct rnbd_iu *iu) 382f7a7a5c2SJack Wang { 383*9aaf9a2aSGioh Kim if (atomic_dec_and_test(&iu->refcount)) { 384f7a7a5c2SJack Wang rnbd_put_permit(sess, iu->permit); 385*9aaf9a2aSGioh Kim kfree(iu); 386*9aaf9a2aSGioh Kim } 387f7a7a5c2SJack Wang } 388f7a7a5c2SJack Wang 389f7a7a5c2SJack Wang static void rnbd_softirq_done_fn(struct request *rq) 390f7a7a5c2SJack Wang { 391f7a7a5c2SJack Wang struct rnbd_clt_dev *dev = rq->rq_disk->private_data; 392f7a7a5c2SJack Wang struct rnbd_clt_session *sess = dev->sess; 393f7a7a5c2SJack Wang struct rnbd_iu *iu; 394f7a7a5c2SJack Wang 395f7a7a5c2SJack Wang iu = blk_mq_rq_to_pdu(rq); 3965a1328d0SGioh Kim sg_free_table_chained(&iu->sgt, RNBD_INLINE_SG_CNT); 397f7a7a5c2SJack Wang rnbd_put_permit(sess, iu->permit); 398f7a7a5c2SJack Wang blk_mq_end_request(rq, errno_to_blk_status(iu->errno)); 399f7a7a5c2SJack Wang } 400f7a7a5c2SJack Wang 401f7a7a5c2SJack Wang static void msg_io_conf(void *priv, int errno) 402f7a7a5c2SJack Wang { 403f7a7a5c2SJack Wang struct rnbd_iu *iu = priv; 404f7a7a5c2SJack Wang struct rnbd_clt_dev *dev = iu->dev; 405f7a7a5c2SJack Wang struct request *rq = iu->rq; 406f7a7a5c2SJack Wang int rw = rq_data_dir(rq); 407f7a7a5c2SJack Wang 408f7a7a5c2SJack Wang iu->errno = errno; 409f7a7a5c2SJack Wang 410f7a7a5c2SJack Wang blk_mq_complete_request(rq); 411f7a7a5c2SJack Wang 412f7a7a5c2SJack Wang if (errno) 413f7a7a5c2SJack Wang rnbd_clt_info_rl(dev, "%s I/O failed with err: %d\n", 414f7a7a5c2SJack Wang rw == READ ? "read" : "write", errno); 415f7a7a5c2SJack Wang } 416f7a7a5c2SJack Wang 417f7a7a5c2SJack Wang static void wake_up_iu_comp(struct rnbd_iu *iu, int errno) 418f7a7a5c2SJack Wang { 419f7a7a5c2SJack Wang iu->comp.errno = errno; 420f7a7a5c2SJack Wang wake_up(&iu->comp.wait); 421f7a7a5c2SJack Wang } 422f7a7a5c2SJack Wang 423f7a7a5c2SJack Wang static void msg_conf(void *priv, int errno) 424f7a7a5c2SJack Wang { 425f7a7a5c2SJack Wang struct rnbd_iu *iu = priv; 426f7a7a5c2SJack Wang 427f7a7a5c2SJack Wang iu->errno = errno; 428f7a7a5c2SJack Wang schedule_work(&iu->work); 429f7a7a5c2SJack Wang } 430f7a7a5c2SJack Wang 431f7a7a5c2SJack Wang enum wait_type { 432f7a7a5c2SJack Wang NO_WAIT = 0, 433f7a7a5c2SJack Wang WAIT = 1 434f7a7a5c2SJack Wang }; 435f7a7a5c2SJack Wang 436f7a7a5c2SJack Wang static int send_usr_msg(struct rtrs_clt *rtrs, int dir, 43746a99e0cSGuoqing Jiang struct rnbd_iu *iu, struct kvec *vec, 438f7a7a5c2SJack Wang size_t len, struct scatterlist *sg, unsigned int sg_len, 439f7a7a5c2SJack Wang void (*conf)(struct work_struct *work), 440f7a7a5c2SJack Wang int *errno, enum wait_type wait) 441f7a7a5c2SJack Wang { 442f7a7a5c2SJack Wang int err; 443f7a7a5c2SJack Wang struct rtrs_clt_req_ops req_ops; 444f7a7a5c2SJack Wang 445f7a7a5c2SJack Wang INIT_WORK(&iu->work, conf); 446f7a7a5c2SJack Wang req_ops = (struct rtrs_clt_req_ops) { 447f7a7a5c2SJack Wang .priv = iu, 448f7a7a5c2SJack Wang .conf_fn = msg_conf, 449f7a7a5c2SJack Wang }; 450f7a7a5c2SJack Wang err = rtrs_clt_request(dir, &req_ops, rtrs, iu->permit, 45146a99e0cSGuoqing Jiang vec, 1, len, sg, sg_len); 452f7a7a5c2SJack Wang if (!err && wait) { 453f7a7a5c2SJack Wang wait_event(iu->comp.wait, iu->comp.errno != INT_MAX); 454f7a7a5c2SJack Wang *errno = iu->comp.errno; 455f7a7a5c2SJack Wang } else { 456f7a7a5c2SJack Wang *errno = 0; 457f7a7a5c2SJack Wang } 458f7a7a5c2SJack Wang 459f7a7a5c2SJack Wang return err; 460f7a7a5c2SJack Wang } 461f7a7a5c2SJack Wang 462f7a7a5c2SJack Wang static void msg_close_conf(struct work_struct *work) 463f7a7a5c2SJack Wang { 464f7a7a5c2SJack Wang struct rnbd_iu *iu = container_of(work, struct rnbd_iu, work); 465f7a7a5c2SJack Wang struct rnbd_clt_dev *dev = iu->dev; 466f7a7a5c2SJack Wang 467f7a7a5c2SJack Wang wake_up_iu_comp(iu, iu->errno); 468f7a7a5c2SJack Wang rnbd_put_iu(dev->sess, iu); 469f7a7a5c2SJack Wang rnbd_clt_put_dev(dev); 470f7a7a5c2SJack Wang } 471f7a7a5c2SJack Wang 472f7a7a5c2SJack Wang static int send_msg_close(struct rnbd_clt_dev *dev, u32 device_id, bool wait) 473f7a7a5c2SJack Wang { 474f7a7a5c2SJack Wang struct rnbd_clt_session *sess = dev->sess; 475f7a7a5c2SJack Wang struct rnbd_msg_close msg; 476f7a7a5c2SJack Wang struct rnbd_iu *iu; 477f7a7a5c2SJack Wang struct kvec vec = { 478f7a7a5c2SJack Wang .iov_base = &msg, 479f7a7a5c2SJack Wang .iov_len = sizeof(msg) 480f7a7a5c2SJack Wang }; 481f7a7a5c2SJack Wang int err, errno; 482f7a7a5c2SJack Wang 483f7a7a5c2SJack Wang iu = rnbd_get_iu(sess, RTRS_ADMIN_CON, RTRS_PERMIT_WAIT); 484f7a7a5c2SJack Wang if (!iu) 485f7a7a5c2SJack Wang return -ENOMEM; 486f7a7a5c2SJack Wang 487f7a7a5c2SJack Wang iu->buf = NULL; 488f7a7a5c2SJack Wang iu->dev = dev; 489f7a7a5c2SJack Wang 4905a1328d0SGioh Kim sg_alloc_table(&iu->sgt, 1, GFP_KERNEL); 491f7a7a5c2SJack Wang 492f7a7a5c2SJack Wang msg.hdr.type = cpu_to_le16(RNBD_MSG_CLOSE); 493f7a7a5c2SJack Wang msg.device_id = cpu_to_le32(device_id); 494f7a7a5c2SJack Wang 495f7a7a5c2SJack Wang WARN_ON(!rnbd_clt_get_dev(dev)); 49646a99e0cSGuoqing Jiang err = send_usr_msg(sess->rtrs, WRITE, iu, &vec, 0, NULL, 0, 497f7a7a5c2SJack Wang msg_close_conf, &errno, wait); 498f7a7a5c2SJack Wang if (err) { 499f7a7a5c2SJack Wang rnbd_clt_put_dev(dev); 500f7a7a5c2SJack Wang rnbd_put_iu(sess, iu); 501f7a7a5c2SJack Wang } else { 502f7a7a5c2SJack Wang err = errno; 503f7a7a5c2SJack Wang } 504f7a7a5c2SJack Wang 5055a1328d0SGioh Kim sg_free_table(&iu->sgt); 506f7a7a5c2SJack Wang rnbd_put_iu(sess, iu); 507f7a7a5c2SJack Wang return err; 508f7a7a5c2SJack Wang } 509f7a7a5c2SJack Wang 510f7a7a5c2SJack Wang static void msg_open_conf(struct work_struct *work) 511f7a7a5c2SJack Wang { 512f7a7a5c2SJack Wang struct rnbd_iu *iu = container_of(work, struct rnbd_iu, work); 513f7a7a5c2SJack Wang struct rnbd_msg_open_rsp *rsp = iu->buf; 514f7a7a5c2SJack Wang struct rnbd_clt_dev *dev = iu->dev; 515f7a7a5c2SJack Wang int errno = iu->errno; 516f7a7a5c2SJack Wang 517f7a7a5c2SJack Wang if (errno) { 518f7a7a5c2SJack Wang rnbd_clt_err(dev, 519f7a7a5c2SJack Wang "Opening failed, server responded: %d\n", 520f7a7a5c2SJack Wang errno); 521f7a7a5c2SJack Wang } else { 522f7a7a5c2SJack Wang errno = process_msg_open_rsp(dev, rsp); 523f7a7a5c2SJack Wang if (errno) { 524f7a7a5c2SJack Wang u32 device_id = le32_to_cpu(rsp->device_id); 525f7a7a5c2SJack Wang /* 526f7a7a5c2SJack Wang * If server thinks its fine, but we fail to process 527f7a7a5c2SJack Wang * then be nice and send a close to server. 528f7a7a5c2SJack Wang */ 529f7a7a5c2SJack Wang (void)send_msg_close(dev, device_id, NO_WAIT); 530f7a7a5c2SJack Wang } 531f7a7a5c2SJack Wang } 532f7a7a5c2SJack Wang kfree(rsp); 533f7a7a5c2SJack Wang wake_up_iu_comp(iu, errno); 534f7a7a5c2SJack Wang rnbd_put_iu(dev->sess, iu); 535f7a7a5c2SJack Wang rnbd_clt_put_dev(dev); 536f7a7a5c2SJack Wang } 537f7a7a5c2SJack Wang 538f7a7a5c2SJack Wang static void msg_sess_info_conf(struct work_struct *work) 539f7a7a5c2SJack Wang { 540f7a7a5c2SJack Wang struct rnbd_iu *iu = container_of(work, struct rnbd_iu, work); 541f7a7a5c2SJack Wang struct rnbd_msg_sess_info_rsp *rsp = iu->buf; 542f7a7a5c2SJack Wang struct rnbd_clt_session *sess = iu->sess; 543f7a7a5c2SJack Wang 544f7a7a5c2SJack Wang if (!iu->errno) 545f7a7a5c2SJack Wang sess->ver = min_t(u8, rsp->ver, RNBD_PROTO_VER_MAJOR); 546f7a7a5c2SJack Wang 547f7a7a5c2SJack Wang kfree(rsp); 548f7a7a5c2SJack Wang wake_up_iu_comp(iu, iu->errno); 549f7a7a5c2SJack Wang rnbd_put_iu(sess, iu); 550f7a7a5c2SJack Wang rnbd_clt_put_sess(sess); 551f7a7a5c2SJack Wang } 552f7a7a5c2SJack Wang 553f7a7a5c2SJack Wang static int send_msg_open(struct rnbd_clt_dev *dev, bool wait) 554f7a7a5c2SJack Wang { 555f7a7a5c2SJack Wang struct rnbd_clt_session *sess = dev->sess; 556f7a7a5c2SJack Wang struct rnbd_msg_open_rsp *rsp; 557f7a7a5c2SJack Wang struct rnbd_msg_open msg; 558f7a7a5c2SJack Wang struct rnbd_iu *iu; 559f7a7a5c2SJack Wang struct kvec vec = { 560f7a7a5c2SJack Wang .iov_base = &msg, 561f7a7a5c2SJack Wang .iov_len = sizeof(msg) 562f7a7a5c2SJack Wang }; 563f7a7a5c2SJack Wang int err, errno; 564f7a7a5c2SJack Wang 565f7a7a5c2SJack Wang rsp = kzalloc(sizeof(*rsp), GFP_KERNEL); 566f7a7a5c2SJack Wang if (!rsp) 567f7a7a5c2SJack Wang return -ENOMEM; 568f7a7a5c2SJack Wang 569f7a7a5c2SJack Wang iu = rnbd_get_iu(sess, RTRS_ADMIN_CON, RTRS_PERMIT_WAIT); 570f7a7a5c2SJack Wang if (!iu) { 571f7a7a5c2SJack Wang kfree(rsp); 572f7a7a5c2SJack Wang return -ENOMEM; 573f7a7a5c2SJack Wang } 574f7a7a5c2SJack Wang 575f7a7a5c2SJack Wang iu->buf = rsp; 576f7a7a5c2SJack Wang iu->dev = dev; 577f7a7a5c2SJack Wang 5785a1328d0SGioh Kim sg_alloc_table(&iu->sgt, 1, GFP_KERNEL); 5795a1328d0SGioh Kim sg_init_one(iu->sgt.sgl, rsp, sizeof(*rsp)); 580f7a7a5c2SJack Wang 581f7a7a5c2SJack Wang msg.hdr.type = cpu_to_le16(RNBD_MSG_OPEN); 582f7a7a5c2SJack Wang msg.access_mode = dev->access_mode; 583f7a7a5c2SJack Wang strlcpy(msg.dev_name, dev->pathname, sizeof(msg.dev_name)); 584f7a7a5c2SJack Wang 585f7a7a5c2SJack Wang WARN_ON(!rnbd_clt_get_dev(dev)); 586f7a7a5c2SJack Wang err = send_usr_msg(sess->rtrs, READ, iu, 5875a1328d0SGioh Kim &vec, sizeof(*rsp), iu->sgt.sgl, 1, 588f7a7a5c2SJack Wang msg_open_conf, &errno, wait); 589f7a7a5c2SJack Wang if (err) { 590f7a7a5c2SJack Wang rnbd_clt_put_dev(dev); 591f7a7a5c2SJack Wang rnbd_put_iu(sess, iu); 592f7a7a5c2SJack Wang kfree(rsp); 593f7a7a5c2SJack Wang } else { 594f7a7a5c2SJack Wang err = errno; 595f7a7a5c2SJack Wang } 596f7a7a5c2SJack Wang 5975a1328d0SGioh Kim sg_free_table(&iu->sgt); 598f7a7a5c2SJack Wang rnbd_put_iu(sess, iu); 599f7a7a5c2SJack Wang return err; 600f7a7a5c2SJack Wang } 601f7a7a5c2SJack Wang 602f7a7a5c2SJack Wang static int send_msg_sess_info(struct rnbd_clt_session *sess, bool wait) 603f7a7a5c2SJack Wang { 604f7a7a5c2SJack Wang struct rnbd_msg_sess_info_rsp *rsp; 605f7a7a5c2SJack Wang struct rnbd_msg_sess_info msg; 606f7a7a5c2SJack Wang struct rnbd_iu *iu; 607f7a7a5c2SJack Wang struct kvec vec = { 608f7a7a5c2SJack Wang .iov_base = &msg, 609f7a7a5c2SJack Wang .iov_len = sizeof(msg) 610f7a7a5c2SJack Wang }; 611f7a7a5c2SJack Wang int err, errno; 612f7a7a5c2SJack Wang 613f7a7a5c2SJack Wang rsp = kzalloc(sizeof(*rsp), GFP_KERNEL); 614f7a7a5c2SJack Wang if (!rsp) 615f7a7a5c2SJack Wang return -ENOMEM; 616f7a7a5c2SJack Wang 617f7a7a5c2SJack Wang iu = rnbd_get_iu(sess, RTRS_ADMIN_CON, RTRS_PERMIT_WAIT); 618f7a7a5c2SJack Wang if (!iu) { 619f7a7a5c2SJack Wang kfree(rsp); 620f7a7a5c2SJack Wang return -ENOMEM; 621f7a7a5c2SJack Wang } 622f7a7a5c2SJack Wang 623f7a7a5c2SJack Wang iu->buf = rsp; 624f7a7a5c2SJack Wang iu->sess = sess; 625f7a7a5c2SJack Wang 6265a1328d0SGioh Kim sg_alloc_table(&iu->sgt, 1, GFP_KERNEL); 6275a1328d0SGioh Kim sg_init_one(iu->sgt.sgl, rsp, sizeof(*rsp)); 628f7a7a5c2SJack Wang 629f7a7a5c2SJack Wang msg.hdr.type = cpu_to_le16(RNBD_MSG_SESS_INFO); 630f7a7a5c2SJack Wang msg.ver = RNBD_PROTO_VER_MAJOR; 631f7a7a5c2SJack Wang 632f7a7a5c2SJack Wang if (!rnbd_clt_get_sess(sess)) { 633f7a7a5c2SJack Wang /* 634f7a7a5c2SJack Wang * That can happen only in one case, when RTRS has restablished 635f7a7a5c2SJack Wang * the connection and link_ev() is called, but session is almost 636f7a7a5c2SJack Wang * dead, last reference on session is put and caller is waiting 637f7a7a5c2SJack Wang * for RTRS to close everything. 638f7a7a5c2SJack Wang */ 639f7a7a5c2SJack Wang err = -ENODEV; 640f7a7a5c2SJack Wang goto put_iu; 641f7a7a5c2SJack Wang } 642f7a7a5c2SJack Wang err = send_usr_msg(sess->rtrs, READ, iu, 6435a1328d0SGioh Kim &vec, sizeof(*rsp), iu->sgt.sgl, 1, 644f7a7a5c2SJack Wang msg_sess_info_conf, &errno, wait); 645f7a7a5c2SJack Wang if (err) { 646f7a7a5c2SJack Wang rnbd_clt_put_sess(sess); 647f7a7a5c2SJack Wang put_iu: 648f7a7a5c2SJack Wang rnbd_put_iu(sess, iu); 649f7a7a5c2SJack Wang kfree(rsp); 650f7a7a5c2SJack Wang } else { 651f7a7a5c2SJack Wang err = errno; 652f7a7a5c2SJack Wang } 6535a1328d0SGioh Kim sg_free_table(&iu->sgt); 654f7a7a5c2SJack Wang rnbd_put_iu(sess, iu); 655f7a7a5c2SJack Wang return err; 656f7a7a5c2SJack Wang } 657f7a7a5c2SJack Wang 658f7a7a5c2SJack Wang static void set_dev_states_to_disconnected(struct rnbd_clt_session *sess) 659f7a7a5c2SJack Wang { 660f7a7a5c2SJack Wang struct rnbd_clt_dev *dev; 661f7a7a5c2SJack Wang 662f7a7a5c2SJack Wang mutex_lock(&sess->lock); 663f7a7a5c2SJack Wang list_for_each_entry(dev, &sess->devs_list, list) { 664f7a7a5c2SJack Wang rnbd_clt_err(dev, "Device disconnected.\n"); 665f7a7a5c2SJack Wang 666f7a7a5c2SJack Wang mutex_lock(&dev->lock); 667f7a7a5c2SJack Wang if (dev->dev_state == DEV_STATE_MAPPED) 668f7a7a5c2SJack Wang dev->dev_state = DEV_STATE_MAPPED_DISCONNECTED; 669f7a7a5c2SJack Wang mutex_unlock(&dev->lock); 670f7a7a5c2SJack Wang } 671f7a7a5c2SJack Wang mutex_unlock(&sess->lock); 672f7a7a5c2SJack Wang } 673f7a7a5c2SJack Wang 674f7a7a5c2SJack Wang static void remap_devs(struct rnbd_clt_session *sess) 675f7a7a5c2SJack Wang { 676f7a7a5c2SJack Wang struct rnbd_clt_dev *dev; 677f7a7a5c2SJack Wang struct rtrs_attrs attrs; 678f7a7a5c2SJack Wang int err; 679f7a7a5c2SJack Wang 680f7a7a5c2SJack Wang /* 681f7a7a5c2SJack Wang * Careful here: we are called from RTRS link event directly, 682f7a7a5c2SJack Wang * thus we can't send any RTRS request and wait for response 683f7a7a5c2SJack Wang * or RTRS will not be able to complete request with failure 684f7a7a5c2SJack Wang * if something goes wrong (failing of outstanding requests 685f7a7a5c2SJack Wang * happens exactly from the context where we are blocking now). 686f7a7a5c2SJack Wang * 687f7a7a5c2SJack Wang * So to avoid deadlocks each usr message sent from here must 688f7a7a5c2SJack Wang * be asynchronous. 689f7a7a5c2SJack Wang */ 690f7a7a5c2SJack Wang 691f7a7a5c2SJack Wang err = send_msg_sess_info(sess, NO_WAIT); 692f7a7a5c2SJack Wang if (err) { 693f7a7a5c2SJack Wang pr_err("send_msg_sess_info(\"%s\"): %d\n", sess->sessname, err); 694f7a7a5c2SJack Wang return; 695f7a7a5c2SJack Wang } 696f7a7a5c2SJack Wang 697f7a7a5c2SJack Wang rtrs_clt_query(sess->rtrs, &attrs); 698f7a7a5c2SJack Wang mutex_lock(&sess->lock); 699f7a7a5c2SJack Wang sess->max_io_size = attrs.max_io_size; 700f7a7a5c2SJack Wang 701f7a7a5c2SJack Wang list_for_each_entry(dev, &sess->devs_list, list) { 702f7a7a5c2SJack Wang bool skip; 703f7a7a5c2SJack Wang 704f7a7a5c2SJack Wang mutex_lock(&dev->lock); 705f7a7a5c2SJack Wang skip = (dev->dev_state == DEV_STATE_INIT); 706f7a7a5c2SJack Wang mutex_unlock(&dev->lock); 707f7a7a5c2SJack Wang if (skip) 708f7a7a5c2SJack Wang /* 709f7a7a5c2SJack Wang * When device is establishing connection for the first 710f7a7a5c2SJack Wang * time - do not remap, it will be closed soon. 711f7a7a5c2SJack Wang */ 712f7a7a5c2SJack Wang continue; 713f7a7a5c2SJack Wang 714f7a7a5c2SJack Wang rnbd_clt_info(dev, "session reconnected, remapping device\n"); 715f7a7a5c2SJack Wang err = send_msg_open(dev, NO_WAIT); 716f7a7a5c2SJack Wang if (err) { 717f7a7a5c2SJack Wang rnbd_clt_err(dev, "send_msg_open(): %d\n", err); 718f7a7a5c2SJack Wang break; 719f7a7a5c2SJack Wang } 720f7a7a5c2SJack Wang } 721f7a7a5c2SJack Wang mutex_unlock(&sess->lock); 722f7a7a5c2SJack Wang } 723f7a7a5c2SJack Wang 724f7a7a5c2SJack Wang static void rnbd_clt_link_ev(void *priv, enum rtrs_clt_link_ev ev) 725f7a7a5c2SJack Wang { 726f7a7a5c2SJack Wang struct rnbd_clt_session *sess = priv; 727f7a7a5c2SJack Wang 728f7a7a5c2SJack Wang switch (ev) { 729f7a7a5c2SJack Wang case RTRS_CLT_LINK_EV_DISCONNECTED: 730f7a7a5c2SJack Wang set_dev_states_to_disconnected(sess); 731f7a7a5c2SJack Wang break; 732f7a7a5c2SJack Wang case RTRS_CLT_LINK_EV_RECONNECTED: 733f7a7a5c2SJack Wang remap_devs(sess); 734f7a7a5c2SJack Wang break; 735f7a7a5c2SJack Wang default: 736f7a7a5c2SJack Wang pr_err("Unknown session event received (%d), session: %s\n", 737f7a7a5c2SJack Wang ev, sess->sessname); 738f7a7a5c2SJack Wang } 739f7a7a5c2SJack Wang } 740f7a7a5c2SJack Wang 741f7a7a5c2SJack Wang static void rnbd_init_cpu_qlists(struct rnbd_cpu_qlist __percpu *cpu_queues) 742f7a7a5c2SJack Wang { 743f7a7a5c2SJack Wang unsigned int cpu; 744f7a7a5c2SJack Wang struct rnbd_cpu_qlist *cpu_q; 745f7a7a5c2SJack Wang 746f7a7a5c2SJack Wang for_each_possible_cpu(cpu) { 747f7a7a5c2SJack Wang cpu_q = per_cpu_ptr(cpu_queues, cpu); 748f7a7a5c2SJack Wang 749f7a7a5c2SJack Wang cpu_q->cpu = cpu; 750f7a7a5c2SJack Wang INIT_LIST_HEAD(&cpu_q->requeue_list); 751f7a7a5c2SJack Wang spin_lock_init(&cpu_q->requeue_lock); 752f7a7a5c2SJack Wang } 753f7a7a5c2SJack Wang } 754f7a7a5c2SJack Wang 755f7a7a5c2SJack Wang static void destroy_mq_tags(struct rnbd_clt_session *sess) 756f7a7a5c2SJack Wang { 757f7a7a5c2SJack Wang if (sess->tag_set.tags) 758f7a7a5c2SJack Wang blk_mq_free_tag_set(&sess->tag_set); 759f7a7a5c2SJack Wang } 760f7a7a5c2SJack Wang 761f7a7a5c2SJack Wang static inline void wake_up_rtrs_waiters(struct rnbd_clt_session *sess) 762f7a7a5c2SJack Wang { 763f7a7a5c2SJack Wang sess->rtrs_ready = true; 764f7a7a5c2SJack Wang wake_up_all(&sess->rtrs_waitq); 765f7a7a5c2SJack Wang } 766f7a7a5c2SJack Wang 767f7a7a5c2SJack Wang static void close_rtrs(struct rnbd_clt_session *sess) 768f7a7a5c2SJack Wang { 769f7a7a5c2SJack Wang might_sleep(); 770f7a7a5c2SJack Wang 771f7a7a5c2SJack Wang if (!IS_ERR_OR_NULL(sess->rtrs)) { 772f7a7a5c2SJack Wang rtrs_clt_close(sess->rtrs); 773f7a7a5c2SJack Wang sess->rtrs = NULL; 774f7a7a5c2SJack Wang wake_up_rtrs_waiters(sess); 775f7a7a5c2SJack Wang } 776f7a7a5c2SJack Wang } 777f7a7a5c2SJack Wang 778f7a7a5c2SJack Wang static void free_sess(struct rnbd_clt_session *sess) 779f7a7a5c2SJack Wang { 780f7a7a5c2SJack Wang WARN_ON(!list_empty(&sess->devs_list)); 781f7a7a5c2SJack Wang 782f7a7a5c2SJack Wang might_sleep(); 783f7a7a5c2SJack Wang 784f7a7a5c2SJack Wang close_rtrs(sess); 785f7a7a5c2SJack Wang destroy_mq_tags(sess); 786f7a7a5c2SJack Wang if (!list_empty(&sess->list)) { 787f7a7a5c2SJack Wang mutex_lock(&sess_lock); 788f7a7a5c2SJack Wang list_del(&sess->list); 789f7a7a5c2SJack Wang mutex_unlock(&sess_lock); 790f7a7a5c2SJack Wang } 791f7a7a5c2SJack Wang free_percpu(sess->cpu_queues); 792f7a7a5c2SJack Wang free_percpu(sess->cpu_rr); 793f7a7a5c2SJack Wang mutex_destroy(&sess->lock); 794f7a7a5c2SJack Wang kfree(sess); 795f7a7a5c2SJack Wang } 796f7a7a5c2SJack Wang 797f7a7a5c2SJack Wang static struct rnbd_clt_session *alloc_sess(const char *sessname) 798f7a7a5c2SJack Wang { 799f7a7a5c2SJack Wang struct rnbd_clt_session *sess; 800f7a7a5c2SJack Wang int err, cpu; 801f7a7a5c2SJack Wang 802f7a7a5c2SJack Wang sess = kzalloc_node(sizeof(*sess), GFP_KERNEL, NUMA_NO_NODE); 803f7a7a5c2SJack Wang if (!sess) 804f7a7a5c2SJack Wang return ERR_PTR(-ENOMEM); 805f7a7a5c2SJack Wang strlcpy(sess->sessname, sessname, sizeof(sess->sessname)); 806f7a7a5c2SJack Wang atomic_set(&sess->busy, 0); 807f7a7a5c2SJack Wang mutex_init(&sess->lock); 808f7a7a5c2SJack Wang INIT_LIST_HEAD(&sess->devs_list); 809f7a7a5c2SJack Wang INIT_LIST_HEAD(&sess->list); 810f7a7a5c2SJack Wang bitmap_zero(sess->cpu_queues_bm, NR_CPUS); 811f7a7a5c2SJack Wang init_waitqueue_head(&sess->rtrs_waitq); 812f7a7a5c2SJack Wang refcount_set(&sess->refcount, 1); 813f7a7a5c2SJack Wang 814f7a7a5c2SJack Wang sess->cpu_queues = alloc_percpu(struct rnbd_cpu_qlist); 815f7a7a5c2SJack Wang if (!sess->cpu_queues) { 816f7a7a5c2SJack Wang err = -ENOMEM; 817f7a7a5c2SJack Wang goto err; 818f7a7a5c2SJack Wang } 819f7a7a5c2SJack Wang rnbd_init_cpu_qlists(sess->cpu_queues); 820f7a7a5c2SJack Wang 821f7a7a5c2SJack Wang /* 8223877ece0SJack Wang * That is simple percpu variable which stores cpu indices, which are 823f7a7a5c2SJack Wang * incremented on each access. We need that for the sake of fairness 824f7a7a5c2SJack Wang * to wake up queues in a round-robin manner. 825f7a7a5c2SJack Wang */ 826f7a7a5c2SJack Wang sess->cpu_rr = alloc_percpu(int); 827f7a7a5c2SJack Wang if (!sess->cpu_rr) { 828f7a7a5c2SJack Wang err = -ENOMEM; 829f7a7a5c2SJack Wang goto err; 830f7a7a5c2SJack Wang } 831f7a7a5c2SJack Wang for_each_possible_cpu(cpu) 832f7a7a5c2SJack Wang * per_cpu_ptr(sess->cpu_rr, cpu) = cpu; 833f7a7a5c2SJack Wang 834f7a7a5c2SJack Wang return sess; 835f7a7a5c2SJack Wang 836f7a7a5c2SJack Wang err: 837f7a7a5c2SJack Wang free_sess(sess); 838f7a7a5c2SJack Wang 839f7a7a5c2SJack Wang return ERR_PTR(err); 840f7a7a5c2SJack Wang } 841f7a7a5c2SJack Wang 842f7a7a5c2SJack Wang static int wait_for_rtrs_connection(struct rnbd_clt_session *sess) 843f7a7a5c2SJack Wang { 844f7a7a5c2SJack Wang wait_event(sess->rtrs_waitq, sess->rtrs_ready); 845f7a7a5c2SJack Wang if (IS_ERR_OR_NULL(sess->rtrs)) 846f7a7a5c2SJack Wang return -ECONNRESET; 847f7a7a5c2SJack Wang 848f7a7a5c2SJack Wang return 0; 849f7a7a5c2SJack Wang } 850f7a7a5c2SJack Wang 851f7a7a5c2SJack Wang static void wait_for_rtrs_disconnection(struct rnbd_clt_session *sess) 852f7a7a5c2SJack Wang __releases(&sess_lock) 853f7a7a5c2SJack Wang __acquires(&sess_lock) 854f7a7a5c2SJack Wang { 855f7a7a5c2SJack Wang DEFINE_WAIT(wait); 856f7a7a5c2SJack Wang 857f7a7a5c2SJack Wang prepare_to_wait(&sess->rtrs_waitq, &wait, TASK_UNINTERRUPTIBLE); 858f7a7a5c2SJack Wang if (IS_ERR_OR_NULL(sess->rtrs)) { 859f7a7a5c2SJack Wang finish_wait(&sess->rtrs_waitq, &wait); 860f7a7a5c2SJack Wang return; 861f7a7a5c2SJack Wang } 862f7a7a5c2SJack Wang mutex_unlock(&sess_lock); 863f7a7a5c2SJack Wang /* loop in caller, see __find_and_get_sess(). 864f7a7a5c2SJack Wang * You can't leave mutex locked and call schedule(), you will catch a 865f7a7a5c2SJack Wang * deadlock with a caller of free_sess(), which has just put the last 866f7a7a5c2SJack Wang * reference and is about to take the sess_lock in order to delete 867f7a7a5c2SJack Wang * the session from the list. 868f7a7a5c2SJack Wang */ 869f7a7a5c2SJack Wang schedule(); 870f7a7a5c2SJack Wang mutex_lock(&sess_lock); 871f7a7a5c2SJack Wang } 872f7a7a5c2SJack Wang 873f7a7a5c2SJack Wang static struct rnbd_clt_session *__find_and_get_sess(const char *sessname) 874f7a7a5c2SJack Wang __releases(&sess_lock) 875f7a7a5c2SJack Wang __acquires(&sess_lock) 876f7a7a5c2SJack Wang { 877f7a7a5c2SJack Wang struct rnbd_clt_session *sess, *sn; 878f7a7a5c2SJack Wang int err; 879f7a7a5c2SJack Wang 880f7a7a5c2SJack Wang again: 881f7a7a5c2SJack Wang list_for_each_entry_safe(sess, sn, &sess_list, list) { 882f7a7a5c2SJack Wang if (strcmp(sessname, sess->sessname)) 883f7a7a5c2SJack Wang continue; 884f7a7a5c2SJack Wang 885f7a7a5c2SJack Wang if (sess->rtrs_ready && IS_ERR_OR_NULL(sess->rtrs)) 886f7a7a5c2SJack Wang /* 887f7a7a5c2SJack Wang * No RTRS connection, session is dying. 888f7a7a5c2SJack Wang */ 889f7a7a5c2SJack Wang continue; 890f7a7a5c2SJack Wang 891f7a7a5c2SJack Wang if (rnbd_clt_get_sess(sess)) { 892f7a7a5c2SJack Wang /* 893f7a7a5c2SJack Wang * Alive session is found, wait for RTRS connection. 894f7a7a5c2SJack Wang */ 895f7a7a5c2SJack Wang mutex_unlock(&sess_lock); 896f7a7a5c2SJack Wang err = wait_for_rtrs_connection(sess); 897f7a7a5c2SJack Wang if (err) 898f7a7a5c2SJack Wang rnbd_clt_put_sess(sess); 899f7a7a5c2SJack Wang mutex_lock(&sess_lock); 900f7a7a5c2SJack Wang 901f7a7a5c2SJack Wang if (err) 902f7a7a5c2SJack Wang /* Session is dying, repeat the loop */ 903f7a7a5c2SJack Wang goto again; 904f7a7a5c2SJack Wang 905f7a7a5c2SJack Wang return sess; 906f7a7a5c2SJack Wang } 907f7a7a5c2SJack Wang /* 908f7a7a5c2SJack Wang * Ref is 0, session is dying, wait for RTRS disconnect 909f7a7a5c2SJack Wang * in order to avoid session names clashes. 910f7a7a5c2SJack Wang */ 911f7a7a5c2SJack Wang wait_for_rtrs_disconnection(sess); 912f7a7a5c2SJack Wang /* 913f7a7a5c2SJack Wang * RTRS is disconnected and soon session will be freed, 914f7a7a5c2SJack Wang * so repeat a loop. 915f7a7a5c2SJack Wang */ 916f7a7a5c2SJack Wang goto again; 917f7a7a5c2SJack Wang } 918f7a7a5c2SJack Wang 919f7a7a5c2SJack Wang return NULL; 920f7a7a5c2SJack Wang } 921f7a7a5c2SJack Wang 922f7a7a5c2SJack Wang static struct 923f7a7a5c2SJack Wang rnbd_clt_session *find_or_create_sess(const char *sessname, bool *first) 924f7a7a5c2SJack Wang { 925f7a7a5c2SJack Wang struct rnbd_clt_session *sess = NULL; 926f7a7a5c2SJack Wang 927f7a7a5c2SJack Wang mutex_lock(&sess_lock); 928f7a7a5c2SJack Wang sess = __find_and_get_sess(sessname); 929f7a7a5c2SJack Wang if (!sess) { 930f7a7a5c2SJack Wang sess = alloc_sess(sessname); 93147393fb5SDan Carpenter if (IS_ERR(sess)) { 93247393fb5SDan Carpenter mutex_unlock(&sess_lock); 93347393fb5SDan Carpenter return sess; 93447393fb5SDan Carpenter } 935f7a7a5c2SJack Wang list_add(&sess->list, &sess_list); 936f7a7a5c2SJack Wang *first = true; 937f7a7a5c2SJack Wang } else 938f7a7a5c2SJack Wang *first = false; 939f7a7a5c2SJack Wang mutex_unlock(&sess_lock); 940f7a7a5c2SJack Wang 941f7a7a5c2SJack Wang return sess; 942f7a7a5c2SJack Wang } 943f7a7a5c2SJack Wang 944f7a7a5c2SJack Wang static int rnbd_client_open(struct block_device *block_device, fmode_t mode) 945f7a7a5c2SJack Wang { 946f7a7a5c2SJack Wang struct rnbd_clt_dev *dev = block_device->bd_disk->private_data; 947f7a7a5c2SJack Wang 948f7a7a5c2SJack Wang if (dev->read_only && (mode & FMODE_WRITE)) 949f7a7a5c2SJack Wang return -EPERM; 950f7a7a5c2SJack Wang 951f7a7a5c2SJack Wang if (dev->dev_state == DEV_STATE_UNMAPPED || 952f7a7a5c2SJack Wang !rnbd_clt_get_dev(dev)) 953f7a7a5c2SJack Wang return -EIO; 954f7a7a5c2SJack Wang 955f7a7a5c2SJack Wang return 0; 956f7a7a5c2SJack Wang } 957f7a7a5c2SJack Wang 958f7a7a5c2SJack Wang static void rnbd_client_release(struct gendisk *gen, fmode_t mode) 959f7a7a5c2SJack Wang { 960f7a7a5c2SJack Wang struct rnbd_clt_dev *dev = gen->private_data; 961f7a7a5c2SJack Wang 962f7a7a5c2SJack Wang rnbd_clt_put_dev(dev); 963f7a7a5c2SJack Wang } 964f7a7a5c2SJack Wang 965f7a7a5c2SJack Wang static int rnbd_client_getgeo(struct block_device *block_device, 966f7a7a5c2SJack Wang struct hd_geometry *geo) 967f7a7a5c2SJack Wang { 968f7a7a5c2SJack Wang u64 size; 969f7a7a5c2SJack Wang struct rnbd_clt_dev *dev; 970f7a7a5c2SJack Wang 971f7a7a5c2SJack Wang dev = block_device->bd_disk->private_data; 972f7a7a5c2SJack Wang size = dev->size * (dev->logical_block_size / SECTOR_SIZE); 973f7a7a5c2SJack Wang geo->cylinders = size >> 6; /* size/64 */ 974f7a7a5c2SJack Wang geo->heads = 4; 975f7a7a5c2SJack Wang geo->sectors = 16; 976f7a7a5c2SJack Wang geo->start = 0; 977f7a7a5c2SJack Wang 978f7a7a5c2SJack Wang return 0; 979f7a7a5c2SJack Wang } 980f7a7a5c2SJack Wang 981f7a7a5c2SJack Wang static const struct block_device_operations rnbd_client_ops = { 982f7a7a5c2SJack Wang .owner = THIS_MODULE, 983f7a7a5c2SJack Wang .open = rnbd_client_open, 984f7a7a5c2SJack Wang .release = rnbd_client_release, 985f7a7a5c2SJack Wang .getgeo = rnbd_client_getgeo 986f7a7a5c2SJack Wang }; 987f7a7a5c2SJack Wang 988f7a7a5c2SJack Wang /* The amount of data that belongs to an I/O and the amount of data that 989f7a7a5c2SJack Wang * should be read or written to the disk (bi_size) can differ. 990f7a7a5c2SJack Wang * 991f7a7a5c2SJack Wang * E.g. When WRITE_SAME is used, only a small amount of data is 992f7a7a5c2SJack Wang * transferred that is then written repeatedly over a lot of sectors. 993f7a7a5c2SJack Wang * 994f7a7a5c2SJack Wang * Get the size of data to be transferred via RTRS by summing up the size 995f7a7a5c2SJack Wang * of the scather-gather list entries. 996f7a7a5c2SJack Wang */ 997f7a7a5c2SJack Wang static size_t rnbd_clt_get_sg_size(struct scatterlist *sglist, u32 len) 998f7a7a5c2SJack Wang { 999f7a7a5c2SJack Wang struct scatterlist *sg; 1000f7a7a5c2SJack Wang size_t tsize = 0; 1001f7a7a5c2SJack Wang int i; 1002f7a7a5c2SJack Wang 1003f7a7a5c2SJack Wang for_each_sg(sglist, sg, len, i) 1004f7a7a5c2SJack Wang tsize += sg->length; 1005f7a7a5c2SJack Wang return tsize; 1006f7a7a5c2SJack Wang } 1007f7a7a5c2SJack Wang 1008f7a7a5c2SJack Wang static int rnbd_client_xfer_request(struct rnbd_clt_dev *dev, 1009f7a7a5c2SJack Wang struct request *rq, 1010f7a7a5c2SJack Wang struct rnbd_iu *iu) 1011f7a7a5c2SJack Wang { 1012f7a7a5c2SJack Wang struct rtrs_clt *rtrs = dev->sess->rtrs; 1013f7a7a5c2SJack Wang struct rtrs_permit *permit = iu->permit; 1014f7a7a5c2SJack Wang struct rnbd_msg_io msg; 1015f7a7a5c2SJack Wang struct rtrs_clt_req_ops req_ops; 1016f7a7a5c2SJack Wang unsigned int sg_cnt = 0; 1017f7a7a5c2SJack Wang struct kvec vec; 1018f7a7a5c2SJack Wang size_t size; 1019f7a7a5c2SJack Wang int err; 1020f7a7a5c2SJack Wang 1021f7a7a5c2SJack Wang iu->rq = rq; 1022f7a7a5c2SJack Wang iu->dev = dev; 1023f7a7a5c2SJack Wang msg.sector = cpu_to_le64(blk_rq_pos(rq)); 1024f7a7a5c2SJack Wang msg.bi_size = cpu_to_le32(blk_rq_bytes(rq)); 1025f7a7a5c2SJack Wang msg.rw = cpu_to_le32(rq_to_rnbd_flags(rq)); 1026f7a7a5c2SJack Wang msg.prio = cpu_to_le16(req_get_ioprio(rq)); 1027f7a7a5c2SJack Wang 1028f7a7a5c2SJack Wang /* 1029f7a7a5c2SJack Wang * We only support discards with single segment for now. 1030f7a7a5c2SJack Wang * See queue limits. 1031f7a7a5c2SJack Wang */ 1032f7a7a5c2SJack Wang if (req_op(rq) != REQ_OP_DISCARD) 10335a1328d0SGioh Kim sg_cnt = blk_rq_map_sg(dev->queue, rq, iu->sgt.sgl); 1034f7a7a5c2SJack Wang 1035f7a7a5c2SJack Wang if (sg_cnt == 0) 10365a1328d0SGioh Kim sg_mark_end(&iu->sgt.sgl[0]); 1037f7a7a5c2SJack Wang 1038f7a7a5c2SJack Wang msg.hdr.type = cpu_to_le16(RNBD_MSG_IO); 1039f7a7a5c2SJack Wang msg.device_id = cpu_to_le32(dev->device_id); 1040f7a7a5c2SJack Wang 1041f7a7a5c2SJack Wang vec = (struct kvec) { 1042f7a7a5c2SJack Wang .iov_base = &msg, 1043f7a7a5c2SJack Wang .iov_len = sizeof(msg) 1044f7a7a5c2SJack Wang }; 10455a1328d0SGioh Kim size = rnbd_clt_get_sg_size(iu->sgt.sgl, sg_cnt); 1046f7a7a5c2SJack Wang req_ops = (struct rtrs_clt_req_ops) { 1047f7a7a5c2SJack Wang .priv = iu, 1048f7a7a5c2SJack Wang .conf_fn = msg_io_conf, 1049f7a7a5c2SJack Wang }; 1050f7a7a5c2SJack Wang err = rtrs_clt_request(rq_data_dir(rq), &req_ops, rtrs, permit, 10515a1328d0SGioh Kim &vec, 1, size, iu->sgt.sgl, sg_cnt); 1052f7a7a5c2SJack Wang if (unlikely(err)) { 1053f7a7a5c2SJack Wang rnbd_clt_err_rl(dev, "RTRS failed to transfer IO, err: %d\n", 1054f7a7a5c2SJack Wang err); 1055f7a7a5c2SJack Wang return err; 1056f7a7a5c2SJack Wang } 1057f7a7a5c2SJack Wang 1058f7a7a5c2SJack Wang return 0; 1059f7a7a5c2SJack Wang } 1060f7a7a5c2SJack Wang 1061f7a7a5c2SJack Wang /** 1062f7a7a5c2SJack Wang * rnbd_clt_dev_add_to_requeue() - add device to requeue if session is busy 1063f7a7a5c2SJack Wang * @dev: Device to be checked 1064f7a7a5c2SJack Wang * @q: Queue to be added to the requeue list if required 1065f7a7a5c2SJack Wang * 1066f7a7a5c2SJack Wang * Description: 1067f7a7a5c2SJack Wang * If session is busy, that means someone will requeue us when resources 1068f7a7a5c2SJack Wang * are freed. If session is not doing anything - device is not added to 1069f7a7a5c2SJack Wang * the list and @false is returned. 1070f7a7a5c2SJack Wang */ 1071f7a7a5c2SJack Wang static bool rnbd_clt_dev_add_to_requeue(struct rnbd_clt_dev *dev, 1072f7a7a5c2SJack Wang struct rnbd_queue *q) 1073f7a7a5c2SJack Wang { 1074f7a7a5c2SJack Wang struct rnbd_clt_session *sess = dev->sess; 1075f7a7a5c2SJack Wang struct rnbd_cpu_qlist *cpu_q; 1076f7a7a5c2SJack Wang unsigned long flags; 1077f7a7a5c2SJack Wang bool added = true; 1078f7a7a5c2SJack Wang bool need_set; 1079f7a7a5c2SJack Wang 1080f7a7a5c2SJack Wang cpu_q = get_cpu_ptr(sess->cpu_queues); 1081f7a7a5c2SJack Wang spin_lock_irqsave(&cpu_q->requeue_lock, flags); 1082f7a7a5c2SJack Wang 1083f7a7a5c2SJack Wang if (likely(!test_and_set_bit_lock(0, &q->in_list))) { 1084f7a7a5c2SJack Wang if (WARN_ON(!list_empty(&q->requeue_list))) 1085f7a7a5c2SJack Wang goto unlock; 1086f7a7a5c2SJack Wang 1087f7a7a5c2SJack Wang need_set = !test_bit(cpu_q->cpu, sess->cpu_queues_bm); 1088f7a7a5c2SJack Wang if (need_set) { 1089f7a7a5c2SJack Wang set_bit(cpu_q->cpu, sess->cpu_queues_bm); 1090f7a7a5c2SJack Wang /* Paired with rnbd_put_permit(). Set a bit first 1091f7a7a5c2SJack Wang * and then observe the busy counter. 1092f7a7a5c2SJack Wang */ 1093f7a7a5c2SJack Wang smp_mb__before_atomic(); 1094f7a7a5c2SJack Wang } 1095f7a7a5c2SJack Wang if (likely(atomic_read(&sess->busy))) { 1096f7a7a5c2SJack Wang list_add_tail(&q->requeue_list, &cpu_q->requeue_list); 1097f7a7a5c2SJack Wang } else { 1098f7a7a5c2SJack Wang /* Very unlikely, but possible: busy counter was 1099f7a7a5c2SJack Wang * observed as zero. Drop all bits and return 1100f7a7a5c2SJack Wang * false to restart the queue by ourselves. 1101f7a7a5c2SJack Wang */ 1102f7a7a5c2SJack Wang if (need_set) 1103f7a7a5c2SJack Wang clear_bit(cpu_q->cpu, sess->cpu_queues_bm); 1104f7a7a5c2SJack Wang clear_bit_unlock(0, &q->in_list); 1105f7a7a5c2SJack Wang added = false; 1106f7a7a5c2SJack Wang } 1107f7a7a5c2SJack Wang } 1108f7a7a5c2SJack Wang unlock: 1109f7a7a5c2SJack Wang spin_unlock_irqrestore(&cpu_q->requeue_lock, flags); 1110f7a7a5c2SJack Wang put_cpu_ptr(sess->cpu_queues); 1111f7a7a5c2SJack Wang 1112f7a7a5c2SJack Wang return added; 1113f7a7a5c2SJack Wang } 1114f7a7a5c2SJack Wang 1115f7a7a5c2SJack Wang static void rnbd_clt_dev_kick_mq_queue(struct rnbd_clt_dev *dev, 1116f7a7a5c2SJack Wang struct blk_mq_hw_ctx *hctx, 1117f7a7a5c2SJack Wang int delay) 1118f7a7a5c2SJack Wang { 1119f7a7a5c2SJack Wang struct rnbd_queue *q = hctx->driver_data; 1120f7a7a5c2SJack Wang 1121f7a7a5c2SJack Wang if (delay != RNBD_DELAY_IFBUSY) 1122f7a7a5c2SJack Wang blk_mq_delay_run_hw_queue(hctx, delay); 1123f7a7a5c2SJack Wang else if (unlikely(!rnbd_clt_dev_add_to_requeue(dev, q))) 1124f7a7a5c2SJack Wang /* 1125f7a7a5c2SJack Wang * If session is not busy we have to restart 1126f7a7a5c2SJack Wang * the queue ourselves. 1127f7a7a5c2SJack Wang */ 1128f7a7a5c2SJack Wang blk_mq_delay_run_hw_queue(hctx, 10/*ms*/); 1129f7a7a5c2SJack Wang } 1130f7a7a5c2SJack Wang 1131f7a7a5c2SJack Wang static blk_status_t rnbd_queue_rq(struct blk_mq_hw_ctx *hctx, 1132f7a7a5c2SJack Wang const struct blk_mq_queue_data *bd) 1133f7a7a5c2SJack Wang { 1134f7a7a5c2SJack Wang struct request *rq = bd->rq; 1135f7a7a5c2SJack Wang struct rnbd_clt_dev *dev = rq->rq_disk->private_data; 1136f7a7a5c2SJack Wang struct rnbd_iu *iu = blk_mq_rq_to_pdu(rq); 1137f7a7a5c2SJack Wang int err; 11385a1328d0SGioh Kim blk_status_t ret = BLK_STS_IOERR; 1139f7a7a5c2SJack Wang 1140f7a7a5c2SJack Wang if (unlikely(dev->dev_state != DEV_STATE_MAPPED)) 1141f7a7a5c2SJack Wang return BLK_STS_IOERR; 1142f7a7a5c2SJack Wang 1143f7a7a5c2SJack Wang iu->permit = rnbd_get_permit(dev->sess, RTRS_IO_CON, 1144f7a7a5c2SJack Wang RTRS_PERMIT_NOWAIT); 1145f7a7a5c2SJack Wang if (unlikely(!iu->permit)) { 1146f7a7a5c2SJack Wang rnbd_clt_dev_kick_mq_queue(dev, hctx, RNBD_DELAY_IFBUSY); 1147f7a7a5c2SJack Wang return BLK_STS_RESOURCE; 1148f7a7a5c2SJack Wang } 1149f7a7a5c2SJack Wang 11505a1328d0SGioh Kim iu->sgt.sgl = iu->first_sgl; 11515a1328d0SGioh Kim err = sg_alloc_table_chained(&iu->sgt, 11525a1328d0SGioh Kim /* Even-if the request has no segment, 11535a1328d0SGioh Kim * sglist must have one entry at least */ 11545a1328d0SGioh Kim blk_rq_nr_phys_segments(rq) ? : 1, 11555a1328d0SGioh Kim iu->sgt.sgl, 11565a1328d0SGioh Kim RNBD_INLINE_SG_CNT); 11575a1328d0SGioh Kim if (err) { 11585a1328d0SGioh Kim rnbd_clt_err_rl(dev, "sg_alloc_table_chained ret=%d\n", err); 11595a1328d0SGioh Kim rnbd_clt_dev_kick_mq_queue(dev, hctx, 10/*ms*/); 11605a1328d0SGioh Kim rnbd_put_permit(dev->sess, iu->permit); 11615a1328d0SGioh Kim return BLK_STS_RESOURCE; 11625a1328d0SGioh Kim } 11635a1328d0SGioh Kim 1164f7a7a5c2SJack Wang blk_mq_start_request(rq); 1165f7a7a5c2SJack Wang err = rnbd_client_xfer_request(dev, rq, iu); 1166f7a7a5c2SJack Wang if (likely(err == 0)) 1167f7a7a5c2SJack Wang return BLK_STS_OK; 1168f7a7a5c2SJack Wang if (unlikely(err == -EAGAIN || err == -ENOMEM)) { 1169f7a7a5c2SJack Wang rnbd_clt_dev_kick_mq_queue(dev, hctx, 10/*ms*/); 11705a1328d0SGioh Kim ret = BLK_STS_RESOURCE; 1171f7a7a5c2SJack Wang } 11725a1328d0SGioh Kim sg_free_table_chained(&iu->sgt, RNBD_INLINE_SG_CNT); 1173f7a7a5c2SJack Wang rnbd_put_permit(dev->sess, iu->permit); 11745a1328d0SGioh Kim return ret; 1175f7a7a5c2SJack Wang } 1176f7a7a5c2SJack Wang 1177f7a7a5c2SJack Wang static struct blk_mq_ops rnbd_mq_ops = { 1178f7a7a5c2SJack Wang .queue_rq = rnbd_queue_rq, 1179f7a7a5c2SJack Wang .complete = rnbd_softirq_done_fn, 1180f7a7a5c2SJack Wang }; 1181f7a7a5c2SJack Wang 1182f7a7a5c2SJack Wang static int setup_mq_tags(struct rnbd_clt_session *sess) 1183f7a7a5c2SJack Wang { 1184f7a7a5c2SJack Wang struct blk_mq_tag_set *tag_set = &sess->tag_set; 1185f7a7a5c2SJack Wang 1186f7a7a5c2SJack Wang memset(tag_set, 0, sizeof(*tag_set)); 1187f7a7a5c2SJack Wang tag_set->ops = &rnbd_mq_ops; 1188f7a7a5c2SJack Wang tag_set->queue_depth = sess->queue_depth; 1189f7a7a5c2SJack Wang tag_set->numa_node = NUMA_NO_NODE; 1190f7a7a5c2SJack Wang tag_set->flags = BLK_MQ_F_SHOULD_MERGE | 119151db1c37SMing Lei BLK_MQ_F_TAG_QUEUE_SHARED; 11925a1328d0SGioh Kim tag_set->cmd_size = sizeof(struct rnbd_iu) + RNBD_RDMA_SGL_SIZE; 1193f7a7a5c2SJack Wang tag_set->nr_hw_queues = num_online_cpus(); 1194f7a7a5c2SJack Wang 1195f7a7a5c2SJack Wang return blk_mq_alloc_tag_set(tag_set); 1196f7a7a5c2SJack Wang } 1197f7a7a5c2SJack Wang 1198f7a7a5c2SJack Wang static struct rnbd_clt_session * 1199f7a7a5c2SJack Wang find_and_get_or_create_sess(const char *sessname, 1200f7a7a5c2SJack Wang const struct rtrs_addr *paths, 1201f7a7a5c2SJack Wang size_t path_cnt, u16 port_nr) 1202f7a7a5c2SJack Wang { 1203f7a7a5c2SJack Wang struct rnbd_clt_session *sess; 1204f7a7a5c2SJack Wang struct rtrs_attrs attrs; 1205f7a7a5c2SJack Wang int err; 1206f7a7a5c2SJack Wang bool first; 1207f7a7a5c2SJack Wang struct rtrs_clt_ops rtrs_ops; 1208f7a7a5c2SJack Wang 1209f7a7a5c2SJack Wang sess = find_or_create_sess(sessname, &first); 1210f7a7a5c2SJack Wang if (sess == ERR_PTR(-ENOMEM)) 1211f7a7a5c2SJack Wang return ERR_PTR(-ENOMEM); 1212f7a7a5c2SJack Wang else if (!first) 1213f7a7a5c2SJack Wang return sess; 1214f7a7a5c2SJack Wang 1215ce9fe18aSMd Haris Iqbal if (!path_cnt) { 1216ce9fe18aSMd Haris Iqbal pr_err("Session %s not found, and path parameter not given", sessname); 1217ce9fe18aSMd Haris Iqbal err = -ENXIO; 1218ce9fe18aSMd Haris Iqbal goto put_sess; 1219ce9fe18aSMd Haris Iqbal } 1220ce9fe18aSMd Haris Iqbal 1221f7a7a5c2SJack Wang rtrs_ops = (struct rtrs_clt_ops) { 1222f7a7a5c2SJack Wang .priv = sess, 1223f7a7a5c2SJack Wang .link_ev = rnbd_clt_link_ev, 1224f7a7a5c2SJack Wang }; 1225f7a7a5c2SJack Wang /* 1226f7a7a5c2SJack Wang * Nothing was found, establish rtrs connection and proceed further. 1227f7a7a5c2SJack Wang */ 1228f7a7a5c2SJack Wang sess->rtrs = rtrs_clt_open(&rtrs_ops, sessname, 1229f7a7a5c2SJack Wang paths, path_cnt, port_nr, 1230*9aaf9a2aSGioh Kim 0, /* Do not use pdu of rtrs */ 1231f7a7a5c2SJack Wang RECONNECT_DELAY, BMAX_SEGMENTS, 1232d6ea3950SDanil Kipnis BLK_MAX_SEGMENT_SIZE, 1233f7a7a5c2SJack Wang MAX_RECONNECTS); 1234f7a7a5c2SJack Wang if (IS_ERR(sess->rtrs)) { 1235f7a7a5c2SJack Wang err = PTR_ERR(sess->rtrs); 1236f7a7a5c2SJack Wang goto wake_up_and_put; 1237f7a7a5c2SJack Wang } 1238f7a7a5c2SJack Wang rtrs_clt_query(sess->rtrs, &attrs); 1239f7a7a5c2SJack Wang sess->max_io_size = attrs.max_io_size; 1240f7a7a5c2SJack Wang sess->queue_depth = attrs.queue_depth; 1241f7a7a5c2SJack Wang 1242f7a7a5c2SJack Wang err = setup_mq_tags(sess); 1243f7a7a5c2SJack Wang if (err) 1244f7a7a5c2SJack Wang goto close_rtrs; 1245f7a7a5c2SJack Wang 1246f7a7a5c2SJack Wang err = send_msg_sess_info(sess, WAIT); 1247f7a7a5c2SJack Wang if (err) 1248f7a7a5c2SJack Wang goto close_rtrs; 1249f7a7a5c2SJack Wang 1250f7a7a5c2SJack Wang wake_up_rtrs_waiters(sess); 1251f7a7a5c2SJack Wang 1252f7a7a5c2SJack Wang return sess; 1253f7a7a5c2SJack Wang 1254f7a7a5c2SJack Wang close_rtrs: 1255f7a7a5c2SJack Wang close_rtrs(sess); 1256f7a7a5c2SJack Wang put_sess: 1257f7a7a5c2SJack Wang rnbd_clt_put_sess(sess); 1258f7a7a5c2SJack Wang 1259f7a7a5c2SJack Wang return ERR_PTR(err); 1260f7a7a5c2SJack Wang 1261f7a7a5c2SJack Wang wake_up_and_put: 1262f7a7a5c2SJack Wang wake_up_rtrs_waiters(sess); 1263f7a7a5c2SJack Wang goto put_sess; 1264f7a7a5c2SJack Wang } 1265f7a7a5c2SJack Wang 1266f7a7a5c2SJack Wang static inline void rnbd_init_hw_queue(struct rnbd_clt_dev *dev, 1267f7a7a5c2SJack Wang struct rnbd_queue *q, 1268f7a7a5c2SJack Wang struct blk_mq_hw_ctx *hctx) 1269f7a7a5c2SJack Wang { 1270f7a7a5c2SJack Wang INIT_LIST_HEAD(&q->requeue_list); 1271f7a7a5c2SJack Wang q->dev = dev; 1272f7a7a5c2SJack Wang q->hctx = hctx; 1273f7a7a5c2SJack Wang } 1274f7a7a5c2SJack Wang 1275f7a7a5c2SJack Wang static void rnbd_init_mq_hw_queues(struct rnbd_clt_dev *dev) 1276f7a7a5c2SJack Wang { 1277f7a7a5c2SJack Wang int i; 1278f7a7a5c2SJack Wang struct blk_mq_hw_ctx *hctx; 1279f7a7a5c2SJack Wang struct rnbd_queue *q; 1280f7a7a5c2SJack Wang 1281f7a7a5c2SJack Wang queue_for_each_hw_ctx(dev->queue, hctx, i) { 1282f7a7a5c2SJack Wang q = &dev->hw_queues[i]; 1283f7a7a5c2SJack Wang rnbd_init_hw_queue(dev, q, hctx); 1284f7a7a5c2SJack Wang hctx->driver_data = q; 1285f7a7a5c2SJack Wang } 1286f7a7a5c2SJack Wang } 1287f7a7a5c2SJack Wang 1288f7a7a5c2SJack Wang static int setup_mq_dev(struct rnbd_clt_dev *dev) 1289f7a7a5c2SJack Wang { 1290f7a7a5c2SJack Wang dev->queue = blk_mq_init_queue(&dev->sess->tag_set); 1291f7a7a5c2SJack Wang if (IS_ERR(dev->queue)) { 1292f7a7a5c2SJack Wang rnbd_clt_err(dev, "Initializing multiqueue queue failed, err: %ld\n", 1293f7a7a5c2SJack Wang PTR_ERR(dev->queue)); 1294f7a7a5c2SJack Wang return PTR_ERR(dev->queue); 1295f7a7a5c2SJack Wang } 1296f7a7a5c2SJack Wang rnbd_init_mq_hw_queues(dev); 1297f7a7a5c2SJack Wang return 0; 1298f7a7a5c2SJack Wang } 1299f7a7a5c2SJack Wang 1300f7a7a5c2SJack Wang static void setup_request_queue(struct rnbd_clt_dev *dev) 1301f7a7a5c2SJack Wang { 1302f7a7a5c2SJack Wang blk_queue_logical_block_size(dev->queue, dev->logical_block_size); 1303f7a7a5c2SJack Wang blk_queue_physical_block_size(dev->queue, dev->physical_block_size); 1304f7a7a5c2SJack Wang blk_queue_max_hw_sectors(dev->queue, dev->max_hw_sectors); 1305f7a7a5c2SJack Wang blk_queue_max_write_same_sectors(dev->queue, 1306f7a7a5c2SJack Wang dev->max_write_same_sectors); 1307f7a7a5c2SJack Wang 1308f7a7a5c2SJack Wang /* 1309f7a7a5c2SJack Wang * we don't support discards to "discontiguous" segments 1310f7a7a5c2SJack Wang * in on request 1311f7a7a5c2SJack Wang */ 1312f7a7a5c2SJack Wang blk_queue_max_discard_segments(dev->queue, 1); 1313f7a7a5c2SJack Wang 1314f7a7a5c2SJack Wang blk_queue_max_discard_sectors(dev->queue, dev->max_discard_sectors); 1315f7a7a5c2SJack Wang dev->queue->limits.discard_granularity = dev->discard_granularity; 1316f7a7a5c2SJack Wang dev->queue->limits.discard_alignment = dev->discard_alignment; 1317f7a7a5c2SJack Wang if (dev->max_discard_sectors) 1318f7a7a5c2SJack Wang blk_queue_flag_set(QUEUE_FLAG_DISCARD, dev->queue); 1319f7a7a5c2SJack Wang if (dev->secure_discard) 1320f7a7a5c2SJack Wang blk_queue_flag_set(QUEUE_FLAG_SECERASE, dev->queue); 1321f7a7a5c2SJack Wang 1322f7a7a5c2SJack Wang blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, dev->queue); 1323f7a7a5c2SJack Wang blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, dev->queue); 1324f7a7a5c2SJack Wang blk_queue_max_segments(dev->queue, dev->max_segments); 1325f7a7a5c2SJack Wang blk_queue_io_opt(dev->queue, dev->sess->max_io_size); 1326f7a7a5c2SJack Wang blk_queue_virt_boundary(dev->queue, SZ_4K - 1); 1327512c781fSGioh Kim blk_queue_write_cache(dev->queue, dev->wc, dev->fua); 1328f7a7a5c2SJack Wang dev->queue->queuedata = dev; 1329f7a7a5c2SJack Wang } 1330f7a7a5c2SJack Wang 1331f7a7a5c2SJack Wang static void rnbd_clt_setup_gen_disk(struct rnbd_clt_dev *dev, int idx) 1332f7a7a5c2SJack Wang { 1333f7a7a5c2SJack Wang dev->gd->major = rnbd_client_major; 1334f7a7a5c2SJack Wang dev->gd->first_minor = idx << RNBD_PART_BITS; 1335f7a7a5c2SJack Wang dev->gd->fops = &rnbd_client_ops; 1336f7a7a5c2SJack Wang dev->gd->queue = dev->queue; 1337f7a7a5c2SJack Wang dev->gd->private_data = dev; 1338f7a7a5c2SJack Wang snprintf(dev->gd->disk_name, sizeof(dev->gd->disk_name), "rnbd%d", 1339f7a7a5c2SJack Wang idx); 1340f7a7a5c2SJack Wang pr_debug("disk_name=%s, capacity=%zu\n", 1341f7a7a5c2SJack Wang dev->gd->disk_name, 1342f7a7a5c2SJack Wang dev->nsectors * (dev->logical_block_size / SECTOR_SIZE) 1343f7a7a5c2SJack Wang ); 1344f7a7a5c2SJack Wang 1345f7a7a5c2SJack Wang set_capacity(dev->gd, dev->nsectors); 1346f7a7a5c2SJack Wang 1347f7a7a5c2SJack Wang if (dev->access_mode == RNBD_ACCESS_RO) { 1348f7a7a5c2SJack Wang dev->read_only = true; 1349f7a7a5c2SJack Wang set_disk_ro(dev->gd, true); 1350f7a7a5c2SJack Wang } else { 1351f7a7a5c2SJack Wang dev->read_only = false; 1352f7a7a5c2SJack Wang } 1353f7a7a5c2SJack Wang 1354f7a7a5c2SJack Wang if (!dev->rotational) 1355f7a7a5c2SJack Wang blk_queue_flag_set(QUEUE_FLAG_NONROT, dev->queue); 1356f7a7a5c2SJack Wang } 1357f7a7a5c2SJack Wang 1358f7a7a5c2SJack Wang static int rnbd_client_setup_device(struct rnbd_clt_session *sess, 1359f7a7a5c2SJack Wang struct rnbd_clt_dev *dev, int idx) 1360f7a7a5c2SJack Wang { 1361f7a7a5c2SJack Wang int err; 1362f7a7a5c2SJack Wang 1363f7a7a5c2SJack Wang dev->size = dev->nsectors * dev->logical_block_size; 1364f7a7a5c2SJack Wang 1365f7a7a5c2SJack Wang err = setup_mq_dev(dev); 1366f7a7a5c2SJack Wang if (err) 1367f7a7a5c2SJack Wang return err; 1368f7a7a5c2SJack Wang 1369f7a7a5c2SJack Wang setup_request_queue(dev); 1370f7a7a5c2SJack Wang 1371f7a7a5c2SJack Wang dev->gd = alloc_disk_node(1 << RNBD_PART_BITS, NUMA_NO_NODE); 1372f7a7a5c2SJack Wang if (!dev->gd) { 1373f7a7a5c2SJack Wang blk_cleanup_queue(dev->queue); 1374f7a7a5c2SJack Wang return -ENOMEM; 1375f7a7a5c2SJack Wang } 1376f7a7a5c2SJack Wang 1377f7a7a5c2SJack Wang rnbd_clt_setup_gen_disk(dev, idx); 1378f7a7a5c2SJack Wang 1379f7a7a5c2SJack Wang return 0; 1380f7a7a5c2SJack Wang } 1381f7a7a5c2SJack Wang 1382f7a7a5c2SJack Wang static struct rnbd_clt_dev *init_dev(struct rnbd_clt_session *sess, 1383f7a7a5c2SJack Wang enum rnbd_access_mode access_mode, 1384f7a7a5c2SJack Wang const char *pathname) 1385f7a7a5c2SJack Wang { 1386f7a7a5c2SJack Wang struct rnbd_clt_dev *dev; 1387f7a7a5c2SJack Wang int ret; 1388f7a7a5c2SJack Wang 1389f7a7a5c2SJack Wang dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, NUMA_NO_NODE); 1390f7a7a5c2SJack Wang if (!dev) 1391f7a7a5c2SJack Wang return ERR_PTR(-ENOMEM); 1392f7a7a5c2SJack Wang 1393f7a7a5c2SJack Wang dev->hw_queues = kcalloc(nr_cpu_ids, sizeof(*dev->hw_queues), 1394f7a7a5c2SJack Wang GFP_KERNEL); 1395f7a7a5c2SJack Wang if (!dev->hw_queues) { 1396f7a7a5c2SJack Wang ret = -ENOMEM; 1397f7a7a5c2SJack Wang goto out_alloc; 1398f7a7a5c2SJack Wang } 1399f7a7a5c2SJack Wang 1400f7a7a5c2SJack Wang mutex_lock(&ida_lock); 1401f7a7a5c2SJack Wang ret = ida_simple_get(&index_ida, 0, 1 << (MINORBITS - RNBD_PART_BITS), 1402f7a7a5c2SJack Wang GFP_KERNEL); 1403f7a7a5c2SJack Wang mutex_unlock(&ida_lock); 1404f7a7a5c2SJack Wang if (ret < 0) { 1405f7a7a5c2SJack Wang pr_err("Failed to initialize device '%s' from session %s, allocating idr failed, err: %d\n", 1406f7a7a5c2SJack Wang pathname, sess->sessname, ret); 1407f7a7a5c2SJack Wang goto out_queues; 1408f7a7a5c2SJack Wang } 140964e8a6ecSMd Haris Iqbal 1410e7508d48SMd Haris Iqbal dev->pathname = kstrdup(pathname, GFP_KERNEL); 141164e8a6ecSMd Haris Iqbal if (!dev->pathname) { 141264e8a6ecSMd Haris Iqbal ret = -ENOMEM; 141364e8a6ecSMd Haris Iqbal goto out_queues; 141464e8a6ecSMd Haris Iqbal } 141564e8a6ecSMd Haris Iqbal 1416f7a7a5c2SJack Wang dev->clt_device_id = ret; 1417f7a7a5c2SJack Wang dev->sess = sess; 1418f7a7a5c2SJack Wang dev->access_mode = access_mode; 1419f7a7a5c2SJack Wang mutex_init(&dev->lock); 1420f7a7a5c2SJack Wang refcount_set(&dev->refcount, 1); 1421f7a7a5c2SJack Wang dev->dev_state = DEV_STATE_INIT; 1422f7a7a5c2SJack Wang 1423f7a7a5c2SJack Wang /* 1424f7a7a5c2SJack Wang * Here we called from sysfs entry, thus clt-sysfs is 1425f7a7a5c2SJack Wang * responsible that session will not disappear. 1426f7a7a5c2SJack Wang */ 1427f7a7a5c2SJack Wang WARN_ON(!rnbd_clt_get_sess(sess)); 1428f7a7a5c2SJack Wang 1429f7a7a5c2SJack Wang return dev; 1430f7a7a5c2SJack Wang 1431f7a7a5c2SJack Wang out_queues: 1432f7a7a5c2SJack Wang kfree(dev->hw_queues); 1433f7a7a5c2SJack Wang out_alloc: 1434f7a7a5c2SJack Wang kfree(dev); 1435f7a7a5c2SJack Wang return ERR_PTR(ret); 1436f7a7a5c2SJack Wang } 1437f7a7a5c2SJack Wang 143891f4acb2SGuoqing Jiang static bool __exists_dev(const char *pathname, const char *sessname) 1439f7a7a5c2SJack Wang { 1440f7a7a5c2SJack Wang struct rnbd_clt_session *sess; 1441f7a7a5c2SJack Wang struct rnbd_clt_dev *dev; 1442f7a7a5c2SJack Wang bool found = false; 1443f7a7a5c2SJack Wang 1444f7a7a5c2SJack Wang list_for_each_entry(sess, &sess_list, list) { 144591f4acb2SGuoqing Jiang if (sessname && strncmp(sess->sessname, sessname, 144691f4acb2SGuoqing Jiang sizeof(sess->sessname))) 144791f4acb2SGuoqing Jiang continue; 1448f7a7a5c2SJack Wang mutex_lock(&sess->lock); 1449f7a7a5c2SJack Wang list_for_each_entry(dev, &sess->devs_list, list) { 145064e8a6ecSMd Haris Iqbal if (strlen(dev->pathname) == strlen(pathname) && 145164e8a6ecSMd Haris Iqbal !strcmp(dev->pathname, pathname)) { 1452f7a7a5c2SJack Wang found = true; 1453f7a7a5c2SJack Wang break; 1454f7a7a5c2SJack Wang } 1455f7a7a5c2SJack Wang } 1456f7a7a5c2SJack Wang mutex_unlock(&sess->lock); 1457f7a7a5c2SJack Wang if (found) 1458f7a7a5c2SJack Wang break; 1459f7a7a5c2SJack Wang } 1460f7a7a5c2SJack Wang 1461f7a7a5c2SJack Wang return found; 1462f7a7a5c2SJack Wang } 1463f7a7a5c2SJack Wang 146491f4acb2SGuoqing Jiang static bool exists_devpath(const char *pathname, const char *sessname) 1465f7a7a5c2SJack Wang { 1466f7a7a5c2SJack Wang bool found; 1467f7a7a5c2SJack Wang 1468f7a7a5c2SJack Wang mutex_lock(&sess_lock); 146991f4acb2SGuoqing Jiang found = __exists_dev(pathname, sessname); 1470f7a7a5c2SJack Wang mutex_unlock(&sess_lock); 1471f7a7a5c2SJack Wang 1472f7a7a5c2SJack Wang return found; 1473f7a7a5c2SJack Wang } 1474f7a7a5c2SJack Wang 1475f7a7a5c2SJack Wang static bool insert_dev_if_not_exists_devpath(const char *pathname, 1476f7a7a5c2SJack Wang struct rnbd_clt_session *sess, 1477f7a7a5c2SJack Wang struct rnbd_clt_dev *dev) 1478f7a7a5c2SJack Wang { 1479f7a7a5c2SJack Wang bool found; 1480f7a7a5c2SJack Wang 1481f7a7a5c2SJack Wang mutex_lock(&sess_lock); 148291f4acb2SGuoqing Jiang found = __exists_dev(pathname, sess->sessname); 1483f7a7a5c2SJack Wang if (!found) { 1484f7a7a5c2SJack Wang mutex_lock(&sess->lock); 1485f7a7a5c2SJack Wang list_add_tail(&dev->list, &sess->devs_list); 1486f7a7a5c2SJack Wang mutex_unlock(&sess->lock); 1487f7a7a5c2SJack Wang } 1488f7a7a5c2SJack Wang mutex_unlock(&sess_lock); 1489f7a7a5c2SJack Wang 1490f7a7a5c2SJack Wang return found; 1491f7a7a5c2SJack Wang } 1492f7a7a5c2SJack Wang 1493f7a7a5c2SJack Wang static void delete_dev(struct rnbd_clt_dev *dev) 1494f7a7a5c2SJack Wang { 1495f7a7a5c2SJack Wang struct rnbd_clt_session *sess = dev->sess; 1496f7a7a5c2SJack Wang 1497f7a7a5c2SJack Wang mutex_lock(&sess->lock); 1498f7a7a5c2SJack Wang list_del(&dev->list); 1499f7a7a5c2SJack Wang mutex_unlock(&sess->lock); 1500f7a7a5c2SJack Wang } 1501f7a7a5c2SJack Wang 1502f7a7a5c2SJack Wang struct rnbd_clt_dev *rnbd_clt_map_device(const char *sessname, 1503f7a7a5c2SJack Wang struct rtrs_addr *paths, 1504f7a7a5c2SJack Wang size_t path_cnt, u16 port_nr, 1505f7a7a5c2SJack Wang const char *pathname, 1506f7a7a5c2SJack Wang enum rnbd_access_mode access_mode) 1507f7a7a5c2SJack Wang { 1508f7a7a5c2SJack Wang struct rnbd_clt_session *sess; 1509f7a7a5c2SJack Wang struct rnbd_clt_dev *dev; 1510f7a7a5c2SJack Wang int ret; 1511f7a7a5c2SJack Wang 151291f4acb2SGuoqing Jiang if (unlikely(exists_devpath(pathname, sessname))) 1513f7a7a5c2SJack Wang return ERR_PTR(-EEXIST); 1514f7a7a5c2SJack Wang 1515f7a7a5c2SJack Wang sess = find_and_get_or_create_sess(sessname, paths, path_cnt, port_nr); 1516f7a7a5c2SJack Wang if (IS_ERR(sess)) 1517f7a7a5c2SJack Wang return ERR_CAST(sess); 1518f7a7a5c2SJack Wang 1519f7a7a5c2SJack Wang dev = init_dev(sess, access_mode, pathname); 1520f7a7a5c2SJack Wang if (IS_ERR(dev)) { 1521f7a7a5c2SJack Wang pr_err("map_device: failed to map device '%s' from session %s, can't initialize device, err: %ld\n", 1522f7a7a5c2SJack Wang pathname, sess->sessname, PTR_ERR(dev)); 1523f7a7a5c2SJack Wang ret = PTR_ERR(dev); 1524f7a7a5c2SJack Wang goto put_sess; 1525f7a7a5c2SJack Wang } 1526f7a7a5c2SJack Wang if (insert_dev_if_not_exists_devpath(pathname, sess, dev)) { 1527f7a7a5c2SJack Wang ret = -EEXIST; 1528f7a7a5c2SJack Wang goto put_dev; 1529f7a7a5c2SJack Wang } 1530f7a7a5c2SJack Wang ret = send_msg_open(dev, WAIT); 1531f7a7a5c2SJack Wang if (ret) { 1532f7a7a5c2SJack Wang rnbd_clt_err(dev, 1533f7a7a5c2SJack Wang "map_device: failed, can't open remote device, err: %d\n", 1534f7a7a5c2SJack Wang ret); 1535f7a7a5c2SJack Wang goto del_dev; 1536f7a7a5c2SJack Wang } 1537f7a7a5c2SJack Wang mutex_lock(&dev->lock); 1538f7a7a5c2SJack Wang pr_debug("Opened remote device: session=%s, path='%s'\n", 1539f7a7a5c2SJack Wang sess->sessname, pathname); 1540f7a7a5c2SJack Wang ret = rnbd_client_setup_device(sess, dev, dev->clt_device_id); 1541f7a7a5c2SJack Wang if (ret) { 1542f7a7a5c2SJack Wang rnbd_clt_err(dev, 1543f7a7a5c2SJack Wang "map_device: Failed to configure device, err: %d\n", 1544f7a7a5c2SJack Wang ret); 1545f7a7a5c2SJack Wang mutex_unlock(&dev->lock); 154647be77c2SGioh Kim goto send_close; 1547f7a7a5c2SJack Wang } 1548f7a7a5c2SJack Wang 1549f7a7a5c2SJack Wang rnbd_clt_info(dev, 1550512c781fSGioh Kim "map_device: Device mapped as %s (nsectors: %zu, logical_block_size: %d, physical_block_size: %d, max_write_same_sectors: %d, max_discard_sectors: %d, discard_granularity: %d, discard_alignment: %d, secure_discard: %d, max_segments: %d, max_hw_sectors: %d, rotational: %d, wc: %d, fua: %d)\n", 1551f7a7a5c2SJack Wang dev->gd->disk_name, dev->nsectors, 1552f7a7a5c2SJack Wang dev->logical_block_size, dev->physical_block_size, 1553f7a7a5c2SJack Wang dev->max_write_same_sectors, dev->max_discard_sectors, 1554f7a7a5c2SJack Wang dev->discard_granularity, dev->discard_alignment, 1555f7a7a5c2SJack Wang dev->secure_discard, dev->max_segments, 1556512c781fSGioh Kim dev->max_hw_sectors, dev->rotational, dev->wc, dev->fua); 1557f7a7a5c2SJack Wang 1558f7a7a5c2SJack Wang mutex_unlock(&dev->lock); 1559f7a7a5c2SJack Wang 1560f7a7a5c2SJack Wang add_disk(dev->gd); 1561f7a7a5c2SJack Wang rnbd_clt_put_sess(sess); 1562f7a7a5c2SJack Wang 1563f7a7a5c2SJack Wang return dev; 1564f7a7a5c2SJack Wang 156547be77c2SGioh Kim send_close: 156647be77c2SGioh Kim send_msg_close(dev, dev->device_id, WAIT); 1567f7a7a5c2SJack Wang del_dev: 1568f7a7a5c2SJack Wang delete_dev(dev); 1569f7a7a5c2SJack Wang put_dev: 1570f7a7a5c2SJack Wang rnbd_clt_put_dev(dev); 1571f7a7a5c2SJack Wang put_sess: 1572f7a7a5c2SJack Wang rnbd_clt_put_sess(sess); 1573f7a7a5c2SJack Wang 1574f7a7a5c2SJack Wang return ERR_PTR(ret); 1575f7a7a5c2SJack Wang } 1576f7a7a5c2SJack Wang 1577f7a7a5c2SJack Wang static void destroy_gen_disk(struct rnbd_clt_dev *dev) 1578f7a7a5c2SJack Wang { 1579f7a7a5c2SJack Wang del_gendisk(dev->gd); 1580f7a7a5c2SJack Wang blk_cleanup_queue(dev->queue); 1581f7a7a5c2SJack Wang put_disk(dev->gd); 1582f7a7a5c2SJack Wang } 1583f7a7a5c2SJack Wang 1584f7a7a5c2SJack Wang static void destroy_sysfs(struct rnbd_clt_dev *dev, 1585f7a7a5c2SJack Wang const struct attribute *sysfs_self) 1586f7a7a5c2SJack Wang { 1587f7a7a5c2SJack Wang rnbd_clt_remove_dev_symlink(dev); 1588f7a7a5c2SJack Wang if (dev->kobj.state_initialized) { 1589f7a7a5c2SJack Wang if (sysfs_self) 1590f7a7a5c2SJack Wang /* To avoid deadlock firstly remove itself */ 1591f7a7a5c2SJack Wang sysfs_remove_file_self(&dev->kobj, sysfs_self); 1592f7a7a5c2SJack Wang kobject_del(&dev->kobj); 1593f7a7a5c2SJack Wang kobject_put(&dev->kobj); 1594f7a7a5c2SJack Wang } 1595f7a7a5c2SJack Wang } 1596f7a7a5c2SJack Wang 1597f7a7a5c2SJack Wang int rnbd_clt_unmap_device(struct rnbd_clt_dev *dev, bool force, 1598f7a7a5c2SJack Wang const struct attribute *sysfs_self) 1599f7a7a5c2SJack Wang { 1600f7a7a5c2SJack Wang struct rnbd_clt_session *sess = dev->sess; 1601f7a7a5c2SJack Wang int refcount, ret = 0; 1602f7a7a5c2SJack Wang bool was_mapped; 1603f7a7a5c2SJack Wang 1604f7a7a5c2SJack Wang mutex_lock(&dev->lock); 1605f7a7a5c2SJack Wang if (dev->dev_state == DEV_STATE_UNMAPPED) { 1606f7a7a5c2SJack Wang rnbd_clt_info(dev, "Device is already being unmapped\n"); 1607f7a7a5c2SJack Wang ret = -EALREADY; 1608f7a7a5c2SJack Wang goto err; 1609f7a7a5c2SJack Wang } 1610f7a7a5c2SJack Wang refcount = refcount_read(&dev->refcount); 1611f7a7a5c2SJack Wang if (!force && refcount > 1) { 1612f7a7a5c2SJack Wang rnbd_clt_err(dev, 1613f7a7a5c2SJack Wang "Closing device failed, device is in use, (%d device users)\n", 1614f7a7a5c2SJack Wang refcount - 1); 1615f7a7a5c2SJack Wang ret = -EBUSY; 1616f7a7a5c2SJack Wang goto err; 1617f7a7a5c2SJack Wang } 1618f7a7a5c2SJack Wang was_mapped = (dev->dev_state == DEV_STATE_MAPPED); 1619f7a7a5c2SJack Wang dev->dev_state = DEV_STATE_UNMAPPED; 1620f7a7a5c2SJack Wang mutex_unlock(&dev->lock); 1621f7a7a5c2SJack Wang 1622f7a7a5c2SJack Wang delete_dev(dev); 1623f7a7a5c2SJack Wang destroy_sysfs(dev, sysfs_self); 1624f7a7a5c2SJack Wang destroy_gen_disk(dev); 1625f7a7a5c2SJack Wang if (was_mapped && sess->rtrs) 1626f7a7a5c2SJack Wang send_msg_close(dev, dev->device_id, WAIT); 1627f7a7a5c2SJack Wang 1628f7a7a5c2SJack Wang rnbd_clt_info(dev, "Device is unmapped\n"); 1629f7a7a5c2SJack Wang 1630f7a7a5c2SJack Wang /* Likely last reference put */ 1631f7a7a5c2SJack Wang rnbd_clt_put_dev(dev); 1632f7a7a5c2SJack Wang 1633f7a7a5c2SJack Wang /* 1634f7a7a5c2SJack Wang * Here device and session can be vanished! 1635f7a7a5c2SJack Wang */ 1636f7a7a5c2SJack Wang 1637f7a7a5c2SJack Wang return 0; 1638f7a7a5c2SJack Wang err: 1639f7a7a5c2SJack Wang mutex_unlock(&dev->lock); 1640f7a7a5c2SJack Wang 1641f7a7a5c2SJack Wang return ret; 1642f7a7a5c2SJack Wang } 1643f7a7a5c2SJack Wang 1644f7a7a5c2SJack Wang int rnbd_clt_remap_device(struct rnbd_clt_dev *dev) 1645f7a7a5c2SJack Wang { 1646f7a7a5c2SJack Wang int err; 1647f7a7a5c2SJack Wang 1648f7a7a5c2SJack Wang mutex_lock(&dev->lock); 1649f7a7a5c2SJack Wang if (dev->dev_state == DEV_STATE_MAPPED_DISCONNECTED) 1650f7a7a5c2SJack Wang err = 0; 1651f7a7a5c2SJack Wang else if (dev->dev_state == DEV_STATE_UNMAPPED) 1652f7a7a5c2SJack Wang err = -ENODEV; 1653f7a7a5c2SJack Wang else if (dev->dev_state == DEV_STATE_MAPPED) 1654f7a7a5c2SJack Wang err = -EALREADY; 1655f7a7a5c2SJack Wang else 1656f7a7a5c2SJack Wang err = -EBUSY; 1657f7a7a5c2SJack Wang mutex_unlock(&dev->lock); 1658f7a7a5c2SJack Wang if (!err) { 1659f7a7a5c2SJack Wang rnbd_clt_info(dev, "Remapping device.\n"); 1660f7a7a5c2SJack Wang err = send_msg_open(dev, WAIT); 1661f7a7a5c2SJack Wang if (err) 1662f7a7a5c2SJack Wang rnbd_clt_err(dev, "remap_device: %d\n", err); 1663f7a7a5c2SJack Wang } 1664f7a7a5c2SJack Wang 1665f7a7a5c2SJack Wang return err; 1666f7a7a5c2SJack Wang } 1667f7a7a5c2SJack Wang 1668f7a7a5c2SJack Wang static void unmap_device_work(struct work_struct *work) 1669f7a7a5c2SJack Wang { 1670f7a7a5c2SJack Wang struct rnbd_clt_dev *dev; 1671f7a7a5c2SJack Wang 1672f7a7a5c2SJack Wang dev = container_of(work, typeof(*dev), unmap_on_rmmod_work); 1673f7a7a5c2SJack Wang rnbd_clt_unmap_device(dev, true, NULL); 1674f7a7a5c2SJack Wang } 1675f7a7a5c2SJack Wang 1676f7a7a5c2SJack Wang static void rnbd_destroy_sessions(void) 1677f7a7a5c2SJack Wang { 1678f7a7a5c2SJack Wang struct rnbd_clt_session *sess, *sn; 1679f7a7a5c2SJack Wang struct rnbd_clt_dev *dev, *tn; 1680f7a7a5c2SJack Wang 1681f7a7a5c2SJack Wang /* Firstly forbid access through sysfs interface */ 1682f7a7a5c2SJack Wang rnbd_clt_destroy_default_group(); 1683f7a7a5c2SJack Wang rnbd_clt_destroy_sysfs_files(); 1684f7a7a5c2SJack Wang 1685f7a7a5c2SJack Wang /* 1686f7a7a5c2SJack Wang * Here at this point there is no any concurrent access to sessions 1687f7a7a5c2SJack Wang * list and devices list: 16883877ece0SJack Wang * 1. New session or device can't be created - session sysfs files 1689f7a7a5c2SJack Wang * are removed. 1690f7a7a5c2SJack Wang * 2. Device or session can't be removed - module reference is taken 1691f7a7a5c2SJack Wang * into account in unmap device sysfs callback. 1692f7a7a5c2SJack Wang * 3. No IO requests inflight - each file open of block_dev increases 1693f7a7a5c2SJack Wang * module reference in get_disk(). 1694f7a7a5c2SJack Wang * 1695f7a7a5c2SJack Wang * But still there can be user requests inflights, which are sent by 1696f7a7a5c2SJack Wang * asynchronous send_msg_*() functions, thus before unmapping devices 1697f7a7a5c2SJack Wang * RTRS session must be explicitly closed. 1698f7a7a5c2SJack Wang */ 1699f7a7a5c2SJack Wang 1700f7a7a5c2SJack Wang list_for_each_entry_safe(sess, sn, &sess_list, list) { 1701f7a7a5c2SJack Wang WARN_ON(!rnbd_clt_get_sess(sess)); 1702f7a7a5c2SJack Wang close_rtrs(sess); 1703f7a7a5c2SJack Wang list_for_each_entry_safe(dev, tn, &sess->devs_list, list) { 1704f7a7a5c2SJack Wang /* 1705f7a7a5c2SJack Wang * Here unmap happens in parallel for only one reason: 1706f7a7a5c2SJack Wang * blk_cleanup_queue() takes around half a second, so 1707f7a7a5c2SJack Wang * on huge amount of devices the whole module unload 1708f7a7a5c2SJack Wang * procedure takes minutes. 1709f7a7a5c2SJack Wang */ 1710f7a7a5c2SJack Wang INIT_WORK(&dev->unmap_on_rmmod_work, unmap_device_work); 1711f7a7a5c2SJack Wang queue_work(system_long_wq, &dev->unmap_on_rmmod_work); 1712f7a7a5c2SJack Wang } 1713f7a7a5c2SJack Wang rnbd_clt_put_sess(sess); 1714f7a7a5c2SJack Wang } 1715f7a7a5c2SJack Wang /* Wait for all scheduled unmap works */ 1716f7a7a5c2SJack Wang flush_workqueue(system_long_wq); 1717f7a7a5c2SJack Wang WARN_ON(!list_empty(&sess_list)); 1718f7a7a5c2SJack Wang } 1719f7a7a5c2SJack Wang 1720f7a7a5c2SJack Wang static int __init rnbd_client_init(void) 1721f7a7a5c2SJack Wang { 1722f7a7a5c2SJack Wang int err = 0; 1723f7a7a5c2SJack Wang 1724f7a7a5c2SJack Wang BUILD_BUG_ON(sizeof(struct rnbd_msg_hdr) != 4); 1725f7a7a5c2SJack Wang BUILD_BUG_ON(sizeof(struct rnbd_msg_sess_info) != 36); 1726f7a7a5c2SJack Wang BUILD_BUG_ON(sizeof(struct rnbd_msg_sess_info_rsp) != 36); 1727f7a7a5c2SJack Wang BUILD_BUG_ON(sizeof(struct rnbd_msg_open) != 264); 1728f7a7a5c2SJack Wang BUILD_BUG_ON(sizeof(struct rnbd_msg_close) != 8); 1729f7a7a5c2SJack Wang BUILD_BUG_ON(sizeof(struct rnbd_msg_open_rsp) != 56); 1730f7a7a5c2SJack Wang rnbd_client_major = register_blkdev(rnbd_client_major, "rnbd"); 1731f7a7a5c2SJack Wang if (rnbd_client_major <= 0) { 1732f7a7a5c2SJack Wang pr_err("Failed to load module, block device registration failed\n"); 1733f7a7a5c2SJack Wang return -EBUSY; 1734f7a7a5c2SJack Wang } 1735f7a7a5c2SJack Wang 1736f7a7a5c2SJack Wang err = rnbd_clt_create_sysfs_files(); 1737f7a7a5c2SJack Wang if (err) { 1738f7a7a5c2SJack Wang pr_err("Failed to load module, creating sysfs device files failed, err: %d\n", 1739f7a7a5c2SJack Wang err); 1740f7a7a5c2SJack Wang unregister_blkdev(rnbd_client_major, "rnbd"); 1741f7a7a5c2SJack Wang } 1742f7a7a5c2SJack Wang 1743f7a7a5c2SJack Wang return err; 1744f7a7a5c2SJack Wang } 1745f7a7a5c2SJack Wang 1746f7a7a5c2SJack Wang static void __exit rnbd_client_exit(void) 1747f7a7a5c2SJack Wang { 1748f7a7a5c2SJack Wang rnbd_destroy_sessions(); 1749f7a7a5c2SJack Wang unregister_blkdev(rnbd_client_major, "rnbd"); 1750f7a7a5c2SJack Wang ida_destroy(&index_ida); 1751f7a7a5c2SJack Wang } 1752f7a7a5c2SJack Wang 1753f7a7a5c2SJack Wang module_init(rnbd_client_init); 1754f7a7a5c2SJack Wang module_exit(rnbd_client_exit); 1755