1 /*
2  * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
3  * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *	- Redistributions of source code must retain the above
16  *	  copyright notice, this list of conditions and the following
17  *	  disclaimer.
18  *
19  *	- Redistributions in binary form must reproduce the above
20  *	  copyright notice, this list of conditions and the following
21  *	  disclaimer in the documentation and/or other materials
22  *	  provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 #include <linux/kernel.h>
34 #include <linux/slab.h>
35 #include <linux/mm.h>
36 #include <linux/scatterlist.h>
37 #include <linux/kfifo.h>
38 #include <scsi/scsi_cmnd.h>
39 #include <scsi/scsi_host.h>
40 
41 #include "iscsi_iser.h"
42 
43 /* Register user buffer memory and initialize passive rdma
44  *  dto descriptor. Data size is stored in
45  *  task->data[ISER_DIR_IN].data_len, Protection size
46  *  os stored in task->prot[ISER_DIR_IN].data_len
47  */
48 static int iser_prepare_read_cmd(struct iscsi_task *task)
49 
50 {
51 	struct iscsi_iser_task *iser_task = task->dd_data;
52 	struct iser_mem_reg *mem_reg;
53 	int err;
54 	struct iser_ctrl *hdr = &iser_task->desc.iser_header;
55 	struct iser_data_buf *buf_in = &iser_task->data[ISER_DIR_IN];
56 
57 	err = iser_dma_map_task_data(iser_task,
58 				     buf_in,
59 				     ISER_DIR_IN,
60 				     DMA_FROM_DEVICE);
61 	if (err)
62 		return err;
63 
64 	if (scsi_prot_sg_count(iser_task->sc)) {
65 		struct iser_data_buf *pbuf_in = &iser_task->prot[ISER_DIR_IN];
66 
67 		err = iser_dma_map_task_data(iser_task,
68 					     pbuf_in,
69 					     ISER_DIR_IN,
70 					     DMA_FROM_DEVICE);
71 		if (err)
72 			return err;
73 	}
74 
75 	err = iser_reg_mem_fastreg(iser_task, ISER_DIR_IN, false);
76 	if (err) {
77 		iser_err("Failed to set up Data-IN RDMA\n");
78 		return err;
79 	}
80 	mem_reg = &iser_task->rdma_reg[ISER_DIR_IN];
81 
82 	hdr->flags    |= ISER_RSV;
83 	hdr->read_stag = cpu_to_be32(mem_reg->rkey);
84 	hdr->read_va   = cpu_to_be64(mem_reg->sge.addr);
85 
86 	iser_dbg("Cmd itt:%d READ tags RKEY:%#.4X VA:%#llX\n",
87 		 task->itt, mem_reg->rkey,
88 		 (unsigned long long)mem_reg->sge.addr);
89 
90 	return 0;
91 }
92 
93 /* Register user buffer memory and initialize passive rdma
94  *  dto descriptor. Data size is stored in
95  *  task->data[ISER_DIR_OUT].data_len, Protection size
96  *  is stored at task->prot[ISER_DIR_OUT].data_len
97  */
98 static int iser_prepare_write_cmd(struct iscsi_task *task, unsigned int imm_sz,
99 				  unsigned int unsol_sz, unsigned int edtl)
100 {
101 	struct iscsi_iser_task *iser_task = task->dd_data;
102 	struct iser_mem_reg *mem_reg;
103 	int err;
104 	struct iser_ctrl *hdr = &iser_task->desc.iser_header;
105 	struct iser_data_buf *buf_out = &iser_task->data[ISER_DIR_OUT];
106 	struct ib_sge *tx_dsg = &iser_task->desc.tx_sg[1];
107 
108 	err = iser_dma_map_task_data(iser_task,
109 				     buf_out,
110 				     ISER_DIR_OUT,
111 				     DMA_TO_DEVICE);
112 	if (err)
113 		return err;
114 
115 	if (scsi_prot_sg_count(iser_task->sc)) {
116 		struct iser_data_buf *pbuf_out = &iser_task->prot[ISER_DIR_OUT];
117 
118 		err = iser_dma_map_task_data(iser_task,
119 					     pbuf_out,
120 					     ISER_DIR_OUT,
121 					     DMA_TO_DEVICE);
122 		if (err)
123 			return err;
124 	}
125 
126 	err = iser_reg_mem_fastreg(iser_task, ISER_DIR_OUT,
127 				   buf_out->data_len == imm_sz);
128 	if (err != 0) {
129 		iser_err("Failed to register write cmd RDMA mem\n");
130 		return err;
131 	}
132 
133 	mem_reg = &iser_task->rdma_reg[ISER_DIR_OUT];
134 
135 	if (unsol_sz < edtl) {
136 		hdr->flags     |= ISER_WSV;
137 		if (buf_out->data_len > imm_sz) {
138 			hdr->write_stag = cpu_to_be32(mem_reg->rkey);
139 			hdr->write_va = cpu_to_be64(mem_reg->sge.addr + unsol_sz);
140 		}
141 
142 		iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X VA:%#llX + unsol:%d\n",
143 			 task->itt, mem_reg->rkey,
144 			 (unsigned long long)mem_reg->sge.addr, unsol_sz);
145 	}
146 
147 	if (imm_sz > 0) {
148 		iser_dbg("Cmd itt:%d, WRITE, adding imm.data sz: %d\n",
149 			 task->itt, imm_sz);
150 		tx_dsg->addr = mem_reg->sge.addr;
151 		tx_dsg->length = imm_sz;
152 		tx_dsg->lkey = mem_reg->sge.lkey;
153 		iser_task->desc.num_sge = 2;
154 	}
155 
156 	return 0;
157 }
158 
159 /* creates a new tx descriptor and adds header regd buffer */
160 static void iser_create_send_desc(struct iser_conn *iser_conn,
161 				  struct iser_tx_desc *tx_desc)
162 {
163 	struct iser_device *device = iser_conn->ib_conn.device;
164 
165 	ib_dma_sync_single_for_cpu(device->ib_device,
166 		tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE);
167 
168 	memset(&tx_desc->iser_header, 0, sizeof(struct iser_ctrl));
169 	tx_desc->iser_header.flags = ISER_VER;
170 	tx_desc->num_sge = 1;
171 }
172 
173 static void iser_free_login_buf(struct iser_conn *iser_conn)
174 {
175 	struct iser_device *device = iser_conn->ib_conn.device;
176 	struct iser_login_desc *desc = &iser_conn->login_desc;
177 
178 	if (!desc->req)
179 		return;
180 
181 	ib_dma_unmap_single(device->ib_device, desc->req_dma,
182 			    ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_TO_DEVICE);
183 
184 	ib_dma_unmap_single(device->ib_device, desc->rsp_dma,
185 			    ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE);
186 
187 	kfree(desc->req);
188 	kfree(desc->rsp);
189 
190 	/* make sure we never redo any unmapping */
191 	desc->req = NULL;
192 	desc->rsp = NULL;
193 }
194 
195 static int iser_alloc_login_buf(struct iser_conn *iser_conn)
196 {
197 	struct iser_device *device = iser_conn->ib_conn.device;
198 	struct iser_login_desc *desc = &iser_conn->login_desc;
199 
200 	desc->req = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN, GFP_KERNEL);
201 	if (!desc->req)
202 		return -ENOMEM;
203 
204 	desc->req_dma = ib_dma_map_single(device->ib_device, desc->req,
205 					  ISCSI_DEF_MAX_RECV_SEG_LEN,
206 					  DMA_TO_DEVICE);
207 	if (ib_dma_mapping_error(device->ib_device,
208 				desc->req_dma))
209 		goto free_req;
210 
211 	desc->rsp = kmalloc(ISER_RX_LOGIN_SIZE, GFP_KERNEL);
212 	if (!desc->rsp)
213 		goto unmap_req;
214 
215 	desc->rsp_dma = ib_dma_map_single(device->ib_device, desc->rsp,
216 					   ISER_RX_LOGIN_SIZE,
217 					   DMA_FROM_DEVICE);
218 	if (ib_dma_mapping_error(device->ib_device,
219 				desc->rsp_dma))
220 		goto free_rsp;
221 
222 	return 0;
223 
224 free_rsp:
225 	kfree(desc->rsp);
226 unmap_req:
227 	ib_dma_unmap_single(device->ib_device, desc->req_dma,
228 			    ISCSI_DEF_MAX_RECV_SEG_LEN,
229 			    DMA_TO_DEVICE);
230 free_req:
231 	kfree(desc->req);
232 
233 	return -ENOMEM;
234 }
235 
236 int iser_alloc_rx_descriptors(struct iser_conn *iser_conn,
237 			      struct iscsi_session *session)
238 {
239 	int i, j;
240 	u64 dma_addr;
241 	struct iser_rx_desc *rx_desc;
242 	struct ib_sge       *rx_sg;
243 	struct ib_conn *ib_conn = &iser_conn->ib_conn;
244 	struct iser_device *device = ib_conn->device;
245 
246 	iser_conn->qp_max_recv_dtos = session->cmds_max;
247 
248 	if (iser_alloc_fastreg_pool(ib_conn, session->scsi_cmds_max,
249 				    iser_conn->pages_per_mr))
250 		goto create_rdma_reg_res_failed;
251 
252 	if (iser_alloc_login_buf(iser_conn))
253 		goto alloc_login_buf_fail;
254 
255 	iser_conn->num_rx_descs = session->cmds_max;
256 	iser_conn->rx_descs = kmalloc_array(iser_conn->num_rx_descs,
257 					    sizeof(struct iser_rx_desc),
258 					    GFP_KERNEL);
259 	if (!iser_conn->rx_descs)
260 		goto rx_desc_alloc_fail;
261 
262 	rx_desc = iser_conn->rx_descs;
263 
264 	for (i = 0; i < iser_conn->qp_max_recv_dtos; i++, rx_desc++)  {
265 		dma_addr = ib_dma_map_single(device->ib_device, (void *)rx_desc,
266 					ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
267 		if (ib_dma_mapping_error(device->ib_device, dma_addr))
268 			goto rx_desc_dma_map_failed;
269 
270 		rx_desc->dma_addr = dma_addr;
271 		rx_desc->cqe.done = iser_task_rsp;
272 		rx_sg = &rx_desc->rx_sg;
273 		rx_sg->addr = rx_desc->dma_addr;
274 		rx_sg->length = ISER_RX_PAYLOAD_SIZE;
275 		rx_sg->lkey = device->pd->local_dma_lkey;
276 	}
277 
278 	return 0;
279 
280 rx_desc_dma_map_failed:
281 	rx_desc = iser_conn->rx_descs;
282 	for (j = 0; j < i; j++, rx_desc++)
283 		ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr,
284 				    ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
285 	kfree(iser_conn->rx_descs);
286 	iser_conn->rx_descs = NULL;
287 rx_desc_alloc_fail:
288 	iser_free_login_buf(iser_conn);
289 alloc_login_buf_fail:
290 	iser_free_fastreg_pool(ib_conn);
291 create_rdma_reg_res_failed:
292 	iser_err("failed allocating rx descriptors / data buffers\n");
293 	return -ENOMEM;
294 }
295 
296 void iser_free_rx_descriptors(struct iser_conn *iser_conn)
297 {
298 	int i;
299 	struct iser_rx_desc *rx_desc;
300 	struct ib_conn *ib_conn = &iser_conn->ib_conn;
301 	struct iser_device *device = ib_conn->device;
302 
303 	iser_free_fastreg_pool(ib_conn);
304 
305 	rx_desc = iser_conn->rx_descs;
306 	for (i = 0; i < iser_conn->qp_max_recv_dtos; i++, rx_desc++)
307 		ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr,
308 				    ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
309 	kfree(iser_conn->rx_descs);
310 	/* make sure we never redo any unmapping */
311 	iser_conn->rx_descs = NULL;
312 
313 	iser_free_login_buf(iser_conn);
314 }
315 
316 static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req)
317 {
318 	struct iser_conn *iser_conn = conn->dd_data;
319 	struct iscsi_session *session = conn->session;
320 	int err = 0;
321 	int i;
322 
323 	iser_dbg("req op %x flags %x\n", req->opcode, req->flags);
324 	/* check if this is the last login - going to full feature phase */
325 	if ((req->flags & ISCSI_FULL_FEATURE_PHASE) != ISCSI_FULL_FEATURE_PHASE)
326 		goto out;
327 
328 	if (session->discovery_sess) {
329 		iser_info("Discovery session, re-using login RX buffer\n");
330 		goto out;
331 	}
332 
333 	iser_info("Normal session, posting batch of RX %d buffers\n",
334 		  iser_conn->qp_max_recv_dtos - 1);
335 
336 	/*
337 	 * Initial post receive buffers.
338 	 * There is one already posted recv buffer (for the last login
339 	 * response). Therefore, the first recv buffer is skipped here.
340 	 */
341 	for (i = 1; i < iser_conn->qp_max_recv_dtos; i++) {
342 		err = iser_post_recvm(iser_conn, &iser_conn->rx_descs[i]);
343 		if (err)
344 			goto out;
345 	}
346 out:
347 	return err;
348 }
349 
350 /**
351  * iser_send_command - send command PDU
352  * @conn: link to matching iscsi connection
353  * @task: SCSI command task
354  */
355 int iser_send_command(struct iscsi_conn *conn, struct iscsi_task *task)
356 {
357 	struct iser_conn *iser_conn = conn->dd_data;
358 	struct iscsi_iser_task *iser_task = task->dd_data;
359 	unsigned long edtl;
360 	int err;
361 	struct iser_data_buf *data_buf, *prot_buf;
362 	struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
363 	struct scsi_cmnd *sc  =  task->sc;
364 	struct iser_tx_desc *tx_desc = &iser_task->desc;
365 
366 	edtl = ntohl(hdr->data_length);
367 
368 	/* build the tx desc regd header and add it to the tx desc dto */
369 	tx_desc->type = ISCSI_TX_SCSI_COMMAND;
370 	tx_desc->cqe.done = iser_cmd_comp;
371 	iser_create_send_desc(iser_conn, tx_desc);
372 
373 	if (hdr->flags & ISCSI_FLAG_CMD_READ) {
374 		data_buf = &iser_task->data[ISER_DIR_IN];
375 		prot_buf = &iser_task->prot[ISER_DIR_IN];
376 	} else {
377 		data_buf = &iser_task->data[ISER_DIR_OUT];
378 		prot_buf = &iser_task->prot[ISER_DIR_OUT];
379 	}
380 
381 	if (scsi_sg_count(sc)) { /* using a scatter list */
382 		data_buf->sg = scsi_sglist(sc);
383 		data_buf->size = scsi_sg_count(sc);
384 	}
385 	data_buf->data_len = scsi_bufflen(sc);
386 
387 	if (scsi_prot_sg_count(sc)) {
388 		prot_buf->sg  = scsi_prot_sglist(sc);
389 		prot_buf->size = scsi_prot_sg_count(sc);
390 		prot_buf->data_len = (data_buf->data_len >>
391 				     ilog2(sc->device->sector_size)) * 8;
392 	}
393 
394 	if (hdr->flags & ISCSI_FLAG_CMD_READ) {
395 		err = iser_prepare_read_cmd(task);
396 		if (err)
397 			goto send_command_error;
398 	}
399 	if (hdr->flags & ISCSI_FLAG_CMD_WRITE) {
400 		err = iser_prepare_write_cmd(task,
401 					     task->imm_count,
402 				             task->imm_count +
403 					     task->unsol_r2t.data_length,
404 					     edtl);
405 		if (err)
406 			goto send_command_error;
407 	}
408 
409 	iser_task->status = ISER_TASK_STATUS_STARTED;
410 
411 	err = iser_post_send(&iser_conn->ib_conn, tx_desc);
412 	if (!err)
413 		return 0;
414 
415 send_command_error:
416 	iser_err("conn %p failed task->itt %d err %d\n",conn, task->itt, err);
417 	return err;
418 }
419 
420 /**
421  * iser_send_data_out - send data out PDU
422  * @conn: link to matching iscsi connection
423  * @task: SCSI command task
424  * @hdr: pointer to the LLD's iSCSI message header
425  */
426 int iser_send_data_out(struct iscsi_conn *conn, struct iscsi_task *task,
427 		       struct iscsi_data *hdr)
428 {
429 	struct iser_conn *iser_conn = conn->dd_data;
430 	struct iscsi_iser_task *iser_task = task->dd_data;
431 	struct iser_tx_desc *tx_desc;
432 	struct iser_mem_reg *mem_reg;
433 	unsigned long buf_offset;
434 	unsigned long data_seg_len;
435 	uint32_t itt;
436 	int err;
437 	struct ib_sge *tx_dsg;
438 
439 	itt = (__force uint32_t)hdr->itt;
440 	data_seg_len = ntoh24(hdr->dlength);
441 	buf_offset   = ntohl(hdr->offset);
442 
443 	iser_dbg("%s itt %d dseg_len %d offset %d\n",
444 		 __func__,(int)itt,(int)data_seg_len,(int)buf_offset);
445 
446 	tx_desc = kmem_cache_zalloc(ig.desc_cache, GFP_ATOMIC);
447 	if (!tx_desc)
448 		return -ENOMEM;
449 
450 	tx_desc->type = ISCSI_TX_DATAOUT;
451 	tx_desc->cqe.done = iser_dataout_comp;
452 	tx_desc->iser_header.flags = ISER_VER;
453 	memcpy(&tx_desc->iscsi_header, hdr, sizeof(struct iscsi_hdr));
454 
455 	/* build the tx desc */
456 	err = iser_initialize_task_headers(task, tx_desc);
457 	if (err)
458 		goto send_data_out_error;
459 
460 	mem_reg = &iser_task->rdma_reg[ISER_DIR_OUT];
461 	tx_dsg = &tx_desc->tx_sg[1];
462 	tx_dsg->addr = mem_reg->sge.addr + buf_offset;
463 	tx_dsg->length = data_seg_len;
464 	tx_dsg->lkey = mem_reg->sge.lkey;
465 	tx_desc->num_sge = 2;
466 
467 	if (buf_offset + data_seg_len > iser_task->data[ISER_DIR_OUT].data_len) {
468 		iser_err("Offset:%ld & DSL:%ld in Data-Out inconsistent with total len:%ld, itt:%d\n",
469 			 buf_offset, data_seg_len,
470 			 iser_task->data[ISER_DIR_OUT].data_len, itt);
471 		err = -EINVAL;
472 		goto send_data_out_error;
473 	}
474 	iser_dbg("data-out itt: %d, offset: %ld, sz: %ld\n",
475 		 itt, buf_offset, data_seg_len);
476 
477 
478 	err = iser_post_send(&iser_conn->ib_conn, tx_desc);
479 	if (!err)
480 		return 0;
481 
482 send_data_out_error:
483 	kmem_cache_free(ig.desc_cache, tx_desc);
484 	iser_err("conn %p failed err %d\n", conn, err);
485 	return err;
486 }
487 
488 int iser_send_control(struct iscsi_conn *conn, struct iscsi_task *task)
489 {
490 	struct iser_conn *iser_conn = conn->dd_data;
491 	struct iscsi_iser_task *iser_task = task->dd_data;
492 	struct iser_tx_desc *mdesc = &iser_task->desc;
493 	unsigned long data_seg_len;
494 	int err = 0;
495 	struct iser_device *device;
496 
497 	/* build the tx desc regd header and add it to the tx desc dto */
498 	mdesc->type = ISCSI_TX_CONTROL;
499 	mdesc->cqe.done = iser_ctrl_comp;
500 	iser_create_send_desc(iser_conn, mdesc);
501 
502 	device = iser_conn->ib_conn.device;
503 
504 	data_seg_len = ntoh24(task->hdr->dlength);
505 
506 	if (data_seg_len > 0) {
507 		struct iser_login_desc *desc = &iser_conn->login_desc;
508 		struct ib_sge *tx_dsg = &mdesc->tx_sg[1];
509 
510 		if (task != conn->login_task) {
511 			iser_err("data present on non login task!!!\n");
512 			goto send_control_error;
513 		}
514 
515 		ib_dma_sync_single_for_cpu(device->ib_device, desc->req_dma,
516 					   task->data_count, DMA_TO_DEVICE);
517 
518 		memcpy(desc->req, task->data, task->data_count);
519 
520 		ib_dma_sync_single_for_device(device->ib_device, desc->req_dma,
521 					      task->data_count, DMA_TO_DEVICE);
522 
523 		tx_dsg->addr = desc->req_dma;
524 		tx_dsg->length = task->data_count;
525 		tx_dsg->lkey = device->pd->local_dma_lkey;
526 		mdesc->num_sge = 2;
527 	}
528 
529 	if (task == conn->login_task) {
530 		iser_dbg("op %x dsl %lx, posting login rx buffer\n",
531 			 task->hdr->opcode, data_seg_len);
532 		err = iser_post_recvl(iser_conn);
533 		if (err)
534 			goto send_control_error;
535 		err = iser_post_rx_bufs(conn, task->hdr);
536 		if (err)
537 			goto send_control_error;
538 	}
539 
540 	err = iser_post_send(&iser_conn->ib_conn, mdesc);
541 	if (!err)
542 		return 0;
543 
544 send_control_error:
545 	iser_err("conn %p failed err %d\n",conn, err);
546 	return err;
547 }
548 
549 void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc)
550 {
551 	struct ib_conn *ib_conn = wc->qp->qp_context;
552 	struct iser_conn *iser_conn = to_iser_conn(ib_conn);
553 	struct iser_login_desc *desc = iser_login(wc->wr_cqe);
554 	struct iscsi_hdr *hdr;
555 	char *data;
556 	int length;
557 
558 	if (unlikely(wc->status != IB_WC_SUCCESS)) {
559 		iser_err_comp(wc, "login_rsp");
560 		return;
561 	}
562 
563 	ib_dma_sync_single_for_cpu(ib_conn->device->ib_device,
564 				   desc->rsp_dma, ISER_RX_LOGIN_SIZE,
565 				   DMA_FROM_DEVICE);
566 
567 	hdr = desc->rsp + sizeof(struct iser_ctrl);
568 	data = desc->rsp + ISER_HEADERS_LEN;
569 	length = wc->byte_len - ISER_HEADERS_LEN;
570 
571 	iser_dbg("op 0x%x itt 0x%x dlen %d\n", hdr->opcode,
572 		 hdr->itt, length);
573 
574 	iscsi_iser_recv(iser_conn->iscsi_conn, hdr, data, length);
575 
576 	ib_dma_sync_single_for_device(ib_conn->device->ib_device,
577 				      desc->rsp_dma, ISER_RX_LOGIN_SIZE,
578 				      DMA_FROM_DEVICE);
579 
580 	if (iser_conn->iscsi_conn->session->discovery_sess)
581 		return;
582 
583 	/* Post the first RX buffer that is skipped in iser_post_rx_bufs() */
584 	iser_post_recvm(iser_conn, iser_conn->rx_descs);
585 }
586 
587 static inline int iser_inv_desc(struct iser_fr_desc *desc, u32 rkey)
588 {
589 	if (unlikely((!desc->sig_protected && rkey != desc->rsc.mr->rkey) ||
590 		     (desc->sig_protected && rkey != desc->rsc.sig_mr->rkey))) {
591 		iser_err("Bogus remote invalidation for rkey %#x\n", rkey);
592 		return -EINVAL;
593 	}
594 
595 	desc->rsc.mr_valid = 0;
596 
597 	return 0;
598 }
599 
600 static int iser_check_remote_inv(struct iser_conn *iser_conn, struct ib_wc *wc,
601 				 struct iscsi_hdr *hdr)
602 {
603 	if (wc->wc_flags & IB_WC_WITH_INVALIDATE) {
604 		struct iscsi_task *task;
605 		u32 rkey = wc->ex.invalidate_rkey;
606 
607 		iser_dbg("conn %p: remote invalidation for rkey %#x\n",
608 			 iser_conn, rkey);
609 
610 		if (unlikely(!iser_conn->snd_w_inv)) {
611 			iser_err("conn %p: unexpected remote invalidation, terminating connection\n",
612 				 iser_conn);
613 			return -EPROTO;
614 		}
615 
616 		task = iscsi_itt_to_ctask(iser_conn->iscsi_conn, hdr->itt);
617 		if (likely(task)) {
618 			struct iscsi_iser_task *iser_task = task->dd_data;
619 			struct iser_fr_desc *desc;
620 
621 			if (iser_task->dir[ISER_DIR_IN]) {
622 				desc = iser_task->rdma_reg[ISER_DIR_IN].mem_h;
623 				if (unlikely(iser_inv_desc(desc, rkey)))
624 					return -EINVAL;
625 			}
626 
627 			if (iser_task->dir[ISER_DIR_OUT]) {
628 				desc = iser_task->rdma_reg[ISER_DIR_OUT].mem_h;
629 				if (unlikely(iser_inv_desc(desc, rkey)))
630 					return -EINVAL;
631 			}
632 		} else {
633 			iser_err("failed to get task for itt=%d\n", hdr->itt);
634 			return -EINVAL;
635 		}
636 	}
637 
638 	return 0;
639 }
640 
641 
642 void iser_task_rsp(struct ib_cq *cq, struct ib_wc *wc)
643 {
644 	struct ib_conn *ib_conn = wc->qp->qp_context;
645 	struct iser_conn *iser_conn = to_iser_conn(ib_conn);
646 	struct iser_rx_desc *desc = iser_rx(wc->wr_cqe);
647 	struct iscsi_hdr *hdr;
648 	int length, err;
649 
650 	if (unlikely(wc->status != IB_WC_SUCCESS)) {
651 		iser_err_comp(wc, "task_rsp");
652 		return;
653 	}
654 
655 	ib_dma_sync_single_for_cpu(ib_conn->device->ib_device,
656 				   desc->dma_addr, ISER_RX_PAYLOAD_SIZE,
657 				   DMA_FROM_DEVICE);
658 
659 	hdr = &desc->iscsi_header;
660 	length = wc->byte_len - ISER_HEADERS_LEN;
661 
662 	iser_dbg("op 0x%x itt 0x%x dlen %d\n", hdr->opcode,
663 		 hdr->itt, length);
664 
665 	if (iser_check_remote_inv(iser_conn, wc, hdr)) {
666 		iscsi_conn_failure(iser_conn->iscsi_conn,
667 				   ISCSI_ERR_CONN_FAILED);
668 		return;
669 	}
670 
671 	iscsi_iser_recv(iser_conn->iscsi_conn, hdr, desc->data, length);
672 
673 	ib_dma_sync_single_for_device(ib_conn->device->ib_device,
674 				      desc->dma_addr, ISER_RX_PAYLOAD_SIZE,
675 				      DMA_FROM_DEVICE);
676 
677 	err = iser_post_recvm(iser_conn, desc);
678 	if (err)
679 		iser_err("posting rx buffer err %d\n", err);
680 }
681 
682 void iser_cmd_comp(struct ib_cq *cq, struct ib_wc *wc)
683 {
684 	if (unlikely(wc->status != IB_WC_SUCCESS))
685 		iser_err_comp(wc, "command");
686 }
687 
688 void iser_ctrl_comp(struct ib_cq *cq, struct ib_wc *wc)
689 {
690 	struct iser_tx_desc *desc = iser_tx(wc->wr_cqe);
691 	struct iscsi_task *task;
692 
693 	if (unlikely(wc->status != IB_WC_SUCCESS)) {
694 		iser_err_comp(wc, "control");
695 		return;
696 	}
697 
698 	/* this arithmetic is legal by libiscsi dd_data allocation */
699 	task = (void *)desc - sizeof(struct iscsi_task);
700 	if (task->hdr->itt == RESERVED_ITT)
701 		iscsi_put_task(task);
702 }
703 
704 void iser_dataout_comp(struct ib_cq *cq, struct ib_wc *wc)
705 {
706 	struct iser_tx_desc *desc = iser_tx(wc->wr_cqe);
707 	struct ib_conn *ib_conn = wc->qp->qp_context;
708 	struct iser_device *device = ib_conn->device;
709 
710 	if (unlikely(wc->status != IB_WC_SUCCESS))
711 		iser_err_comp(wc, "dataout");
712 
713 	ib_dma_unmap_single(device->ib_device, desc->dma_addr,
714 			    ISER_HEADERS_LEN, DMA_TO_DEVICE);
715 	kmem_cache_free(ig.desc_cache, desc);
716 }
717 
718 void iser_task_rdma_init(struct iscsi_iser_task *iser_task)
719 
720 {
721 	iser_task->status = ISER_TASK_STATUS_INIT;
722 
723 	iser_task->dir[ISER_DIR_IN] = 0;
724 	iser_task->dir[ISER_DIR_OUT] = 0;
725 
726 	iser_task->data[ISER_DIR_IN].data_len  = 0;
727 	iser_task->data[ISER_DIR_OUT].data_len = 0;
728 
729 	iser_task->prot[ISER_DIR_IN].data_len  = 0;
730 	iser_task->prot[ISER_DIR_OUT].data_len = 0;
731 
732 	iser_task->prot[ISER_DIR_IN].dma_nents = 0;
733 	iser_task->prot[ISER_DIR_OUT].dma_nents = 0;
734 
735 	memset(&iser_task->rdma_reg[ISER_DIR_IN], 0,
736 	       sizeof(struct iser_mem_reg));
737 	memset(&iser_task->rdma_reg[ISER_DIR_OUT], 0,
738 	       sizeof(struct iser_mem_reg));
739 }
740 
741 void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
742 {
743 	int prot_count = scsi_prot_sg_count(iser_task->sc);
744 
745 	if (iser_task->dir[ISER_DIR_IN]) {
746 		iser_unreg_mem_fastreg(iser_task, ISER_DIR_IN);
747 		iser_dma_unmap_task_data(iser_task,
748 					 &iser_task->data[ISER_DIR_IN],
749 					 DMA_FROM_DEVICE);
750 		if (prot_count)
751 			iser_dma_unmap_task_data(iser_task,
752 						 &iser_task->prot[ISER_DIR_IN],
753 						 DMA_FROM_DEVICE);
754 	}
755 
756 	if (iser_task->dir[ISER_DIR_OUT]) {
757 		iser_unreg_mem_fastreg(iser_task, ISER_DIR_OUT);
758 		iser_dma_unmap_task_data(iser_task,
759 					 &iser_task->data[ISER_DIR_OUT],
760 					 DMA_TO_DEVICE);
761 		if (prot_count)
762 			iser_dma_unmap_task_data(iser_task,
763 						 &iser_task->prot[ISER_DIR_OUT],
764 						 DMA_TO_DEVICE);
765 	}
766 }
767