1 /******************************************************************************* 2 * SCSI RDMA Protocol lib functions 3 * 4 * Copyright (C) 2006 FUJITA Tomonori <tomof@acm.org> 5 * Copyright (C) 2016 Bryant G. Ly <bryantly@linux.vnet.ibm.com> IBM Corp. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; either version 2 of the License, or 10 * (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 ***********************************************************************/ 18 19 #define pr_fmt(fmt) "libsrp: " fmt 20 21 #include <linux/printk.h> 22 #include <linux/err.h> 23 #include <linux/slab.h> 24 #include <linux/kfifo.h> 25 #include <linux/scatterlist.h> 26 #include <linux/dma-mapping.h> 27 #include <linux/module.h> 28 #include <scsi/srp.h> 29 #include <target/target_core_base.h> 30 #include "libsrp.h" 31 #include "ibmvscsi_tgt.h" 32 33 static int srp_iu_pool_alloc(struct srp_queue *q, size_t max, 34 struct srp_buf **ring) 35 { 36 struct iu_entry *iue; 37 int i; 38 39 q->pool = kcalloc(max, sizeof(struct iu_entry *), GFP_KERNEL); 40 if (!q->pool) 41 return -ENOMEM; 42 q->items = kcalloc(max, sizeof(struct iu_entry), GFP_KERNEL); 43 if (!q->items) 44 goto free_pool; 45 46 spin_lock_init(&q->lock); 47 kfifo_init(&q->queue, (void *)q->pool, max * sizeof(void *)); 48 49 for (i = 0, iue = q->items; i < max; i++) { 50 kfifo_in(&q->queue, (void *)&iue, sizeof(void *)); 51 iue->sbuf = ring[i]; 52 iue++; 53 } 54 return 0; 55 56 free_pool: 57 kfree(q->pool); 58 return -ENOMEM; 59 } 60 61 static void srp_iu_pool_free(struct srp_queue *q) 62 { 63 kfree(q->items); 64 kfree(q->pool); 65 } 66 67 static struct srp_buf **srp_ring_alloc(struct device *dev, 68 size_t max, size_t size) 69 { 70 struct srp_buf **ring; 71 int i; 72 73 ring = kcalloc(max, sizeof(struct srp_buf *), GFP_KERNEL); 74 if (!ring) 75 return NULL; 76 77 for (i = 0; i < max; i++) { 78 ring[i] = kzalloc(sizeof(*ring[i]), GFP_KERNEL); 79 if (!ring[i]) 80 goto out; 81 ring[i]->buf = dma_alloc_coherent(dev, size, &ring[i]->dma, 82 GFP_KERNEL); 83 if (!ring[i]->buf) 84 goto out; 85 } 86 return ring; 87 88 out: 89 for (i = 0; i < max && ring[i]; i++) { 90 if (ring[i]->buf) { 91 dma_free_coherent(dev, size, ring[i]->buf, 92 ring[i]->dma); 93 } 94 kfree(ring[i]); 95 } 96 kfree(ring); 97 98 return NULL; 99 } 100 101 static void srp_ring_free(struct device *dev, struct srp_buf **ring, 102 size_t max, size_t size) 103 { 104 int i; 105 106 for (i = 0; i < max; i++) { 107 dma_free_coherent(dev, size, ring[i]->buf, ring[i]->dma); 108 kfree(ring[i]); 109 } 110 kfree(ring); 111 } 112 113 int srp_target_alloc(struct srp_target *target, struct device *dev, 114 size_t nr, size_t iu_size) 115 { 116 int err; 117 118 spin_lock_init(&target->lock); 119 120 target->dev = dev; 121 122 target->srp_iu_size = iu_size; 123 target->rx_ring_size = nr; 124 target->rx_ring = srp_ring_alloc(target->dev, nr, iu_size); 125 if (!target->rx_ring) 126 return -ENOMEM; 127 err = srp_iu_pool_alloc(&target->iu_queue, nr, target->rx_ring); 128 if (err) 129 goto free_ring; 130 131 dev_set_drvdata(target->dev, target); 132 return 0; 133 134 free_ring: 135 srp_ring_free(target->dev, target->rx_ring, nr, iu_size); 136 return -ENOMEM; 137 } 138 139 void srp_target_free(struct srp_target *target) 140 { 141 dev_set_drvdata(target->dev, NULL); 142 srp_ring_free(target->dev, target->rx_ring, target->rx_ring_size, 143 target->srp_iu_size); 144 srp_iu_pool_free(&target->iu_queue); 145 } 146 147 struct iu_entry *srp_iu_get(struct srp_target *target) 148 { 149 struct iu_entry *iue = NULL; 150 151 if (kfifo_out_locked(&target->iu_queue.queue, (void *)&iue, 152 sizeof(void *), 153 &target->iu_queue.lock) != sizeof(void *)) { 154 WARN_ONCE(1, "unexpected fifo state"); 155 return NULL; 156 } 157 if (!iue) 158 return iue; 159 iue->target = target; 160 iue->flags = 0; 161 return iue; 162 } 163 164 void srp_iu_put(struct iu_entry *iue) 165 { 166 kfifo_in_locked(&iue->target->iu_queue.queue, (void *)&iue, 167 sizeof(void *), &iue->target->iu_queue.lock); 168 } 169 170 static int srp_direct_data(struct ibmvscsis_cmd *cmd, struct srp_direct_buf *md, 171 enum dma_data_direction dir, srp_rdma_t rdma_io, 172 int dma_map, int ext_desc) 173 { 174 struct iu_entry *iue = NULL; 175 struct scatterlist *sg = NULL; 176 int err, nsg = 0, len; 177 178 if (dma_map) { 179 iue = cmd->iue; 180 sg = cmd->se_cmd.t_data_sg; 181 nsg = dma_map_sg(iue->target->dev, sg, cmd->se_cmd.t_data_nents, 182 DMA_BIDIRECTIONAL); 183 if (!nsg) { 184 pr_err("fail to map %p %d\n", iue, 185 cmd->se_cmd.t_data_nents); 186 return 0; 187 } 188 len = min(cmd->se_cmd.data_length, be32_to_cpu(md->len)); 189 } else { 190 len = be32_to_cpu(md->len); 191 } 192 193 err = rdma_io(cmd, sg, nsg, md, 1, dir, len); 194 195 if (dma_map) 196 dma_unmap_sg(iue->target->dev, sg, nsg, DMA_BIDIRECTIONAL); 197 198 return err; 199 } 200 201 static int srp_indirect_data(struct ibmvscsis_cmd *cmd, struct srp_cmd *srp_cmd, 202 struct srp_indirect_buf *id, 203 enum dma_data_direction dir, srp_rdma_t rdma_io, 204 int dma_map, int ext_desc) 205 { 206 struct iu_entry *iue = NULL; 207 struct srp_direct_buf *md = NULL; 208 struct scatterlist dummy, *sg = NULL; 209 dma_addr_t token = 0; 210 int err = 0; 211 int nmd, nsg = 0, len; 212 213 if (dma_map || ext_desc) { 214 iue = cmd->iue; 215 sg = cmd->se_cmd.t_data_sg; 216 } 217 218 nmd = be32_to_cpu(id->table_desc.len) / sizeof(struct srp_direct_buf); 219 220 if ((dir == DMA_FROM_DEVICE && nmd == srp_cmd->data_in_desc_cnt) || 221 (dir == DMA_TO_DEVICE && nmd == srp_cmd->data_out_desc_cnt)) { 222 md = &id->desc_list[0]; 223 goto rdma; 224 } 225 226 if (ext_desc && dma_map) { 227 md = dma_alloc_coherent(iue->target->dev, 228 be32_to_cpu(id->table_desc.len), 229 &token, GFP_KERNEL); 230 if (!md) { 231 pr_err("Can't get dma memory %u\n", 232 be32_to_cpu(id->table_desc.len)); 233 return -ENOMEM; 234 } 235 236 sg_init_one(&dummy, md, be32_to_cpu(id->table_desc.len)); 237 sg_dma_address(&dummy) = token; 238 sg_dma_len(&dummy) = be32_to_cpu(id->table_desc.len); 239 err = rdma_io(cmd, &dummy, 1, &id->table_desc, 1, DMA_TO_DEVICE, 240 be32_to_cpu(id->table_desc.len)); 241 if (err) { 242 pr_err("Error copying indirect table %d\n", err); 243 goto free_mem; 244 } 245 } else { 246 pr_err("This command uses external indirect buffer\n"); 247 return -EINVAL; 248 } 249 250 rdma: 251 if (dma_map) { 252 nsg = dma_map_sg(iue->target->dev, sg, cmd->se_cmd.t_data_nents, 253 DMA_BIDIRECTIONAL); 254 if (!nsg) { 255 pr_err("fail to map %p %d\n", iue, 256 cmd->se_cmd.t_data_nents); 257 err = -EIO; 258 goto free_mem; 259 } 260 len = min(cmd->se_cmd.data_length, be32_to_cpu(id->len)); 261 } else { 262 len = be32_to_cpu(id->len); 263 } 264 265 err = rdma_io(cmd, sg, nsg, md, nmd, dir, len); 266 267 if (dma_map) 268 dma_unmap_sg(iue->target->dev, sg, nsg, DMA_BIDIRECTIONAL); 269 270 free_mem: 271 if (token && dma_map) { 272 dma_free_coherent(iue->target->dev, 273 be32_to_cpu(id->table_desc.len), md, token); 274 } 275 return err; 276 } 277 278 static int data_out_desc_size(struct srp_cmd *cmd) 279 { 280 int size = 0; 281 u8 fmt = cmd->buf_fmt >> 4; 282 283 switch (fmt) { 284 case SRP_NO_DATA_DESC: 285 break; 286 case SRP_DATA_DESC_DIRECT: 287 size = sizeof(struct srp_direct_buf); 288 break; 289 case SRP_DATA_DESC_INDIRECT: 290 size = sizeof(struct srp_indirect_buf) + 291 sizeof(struct srp_direct_buf) * cmd->data_out_desc_cnt; 292 break; 293 default: 294 pr_err("client error. Invalid data_out_format %x\n", fmt); 295 break; 296 } 297 return size; 298 } 299 300 /* 301 * TODO: this can be called multiple times for a single command if it 302 * has very long data. 303 */ 304 int srp_transfer_data(struct ibmvscsis_cmd *cmd, struct srp_cmd *srp_cmd, 305 srp_rdma_t rdma_io, int dma_map, int ext_desc) 306 { 307 struct srp_direct_buf *md; 308 struct srp_indirect_buf *id; 309 enum dma_data_direction dir; 310 int offset, err = 0; 311 u8 format; 312 313 if (!cmd->se_cmd.t_data_nents) 314 return 0; 315 316 offset = srp_cmd->add_cdb_len & ~3; 317 318 dir = srp_cmd_direction(srp_cmd); 319 if (dir == DMA_FROM_DEVICE) 320 offset += data_out_desc_size(srp_cmd); 321 322 if (dir == DMA_TO_DEVICE) 323 format = srp_cmd->buf_fmt >> 4; 324 else 325 format = srp_cmd->buf_fmt & ((1U << 4) - 1); 326 327 switch (format) { 328 case SRP_NO_DATA_DESC: 329 break; 330 case SRP_DATA_DESC_DIRECT: 331 md = (struct srp_direct_buf *)(srp_cmd->add_data + offset); 332 err = srp_direct_data(cmd, md, dir, rdma_io, dma_map, ext_desc); 333 break; 334 case SRP_DATA_DESC_INDIRECT: 335 id = (struct srp_indirect_buf *)(srp_cmd->add_data + offset); 336 err = srp_indirect_data(cmd, srp_cmd, id, dir, rdma_io, dma_map, 337 ext_desc); 338 break; 339 default: 340 pr_err("Unknown format %d %x\n", dir, format); 341 err = -EINVAL; 342 } 343 344 return err; 345 } 346 347 u64 srp_data_length(struct srp_cmd *cmd, enum dma_data_direction dir) 348 { 349 struct srp_direct_buf *md; 350 struct srp_indirect_buf *id; 351 u64 len = 0; 352 uint offset = cmd->add_cdb_len & ~3; 353 u8 fmt; 354 355 if (dir == DMA_TO_DEVICE) { 356 fmt = cmd->buf_fmt >> 4; 357 } else { 358 fmt = cmd->buf_fmt & ((1U << 4) - 1); 359 offset += data_out_desc_size(cmd); 360 } 361 362 switch (fmt) { 363 case SRP_NO_DATA_DESC: 364 break; 365 case SRP_DATA_DESC_DIRECT: 366 md = (struct srp_direct_buf *)(cmd->add_data + offset); 367 len = be32_to_cpu(md->len); 368 break; 369 case SRP_DATA_DESC_INDIRECT: 370 id = (struct srp_indirect_buf *)(cmd->add_data + offset); 371 len = be32_to_cpu(id->len); 372 break; 373 default: 374 pr_err("invalid data format %x\n", fmt); 375 break; 376 } 377 return len; 378 } 379 380 int srp_get_desc_table(struct srp_cmd *srp_cmd, enum dma_data_direction *dir, 381 u64 *data_len) 382 { 383 struct srp_indirect_buf *idb; 384 struct srp_direct_buf *db; 385 uint add_cdb_offset; 386 int rc; 387 388 /* 389 * The pointer computations below will only be compiled correctly 390 * if srp_cmd::add_data is declared as s8*, u8*, s8[] or u8[], so check 391 * whether srp_cmd::add_data has been declared as a byte pointer. 392 */ 393 BUILD_BUG_ON(!__same_type(srp_cmd->add_data[0], (s8)0) 394 && !__same_type(srp_cmd->add_data[0], (u8)0)); 395 396 BUG_ON(!dir); 397 BUG_ON(!data_len); 398 399 rc = 0; 400 *data_len = 0; 401 402 *dir = DMA_NONE; 403 404 if (srp_cmd->buf_fmt & 0xf) 405 *dir = DMA_FROM_DEVICE; 406 else if (srp_cmd->buf_fmt >> 4) 407 *dir = DMA_TO_DEVICE; 408 409 add_cdb_offset = srp_cmd->add_cdb_len & ~3; 410 if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_DIRECT) || 411 ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_DIRECT)) { 412 db = (struct srp_direct_buf *)(srp_cmd->add_data 413 + add_cdb_offset); 414 *data_len = be32_to_cpu(db->len); 415 } else if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_INDIRECT) || 416 ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_INDIRECT)) { 417 idb = (struct srp_indirect_buf *)(srp_cmd->add_data 418 + add_cdb_offset); 419 420 *data_len = be32_to_cpu(idb->len); 421 } 422 return rc; 423 } 424 425 MODULE_DESCRIPTION("SCSI RDMA Protocol lib functions"); 426 MODULE_AUTHOR("FUJITA Tomonori"); 427 MODULE_LICENSE("GPL"); 428