xref: /openbmc/linux/drivers/scsi/be2iscsi/be_cmds.c (revision b6dcefde)
1 /**
2  * Copyright (C) 2005 - 2009 ServerEngines
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@serverengines.com
12  *
13  * ServerEngines
14  * 209 N. Fair Oaks Ave
15  * Sunnyvale, CA 94085
16  */
17 
18 #include "be.h"
19 #include "be_mgmt.h"
20 #include "be_main.h"
21 
22 static void be_mcc_notify(struct beiscsi_hba *phba)
23 {
24 	struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
25 	u32 val = 0;
26 
27 	val |= mccq->id & DB_MCCQ_RING_ID_MASK;
28 	val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
29 	iowrite32(val, phba->db_va + DB_MCCQ_OFFSET);
30 }
31 
32 static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
33 {
34 	if (compl->flags != 0) {
35 		compl->flags = le32_to_cpu(compl->flags);
36 		WARN_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
37 		return true;
38 	} else
39 		return false;
40 }
41 
42 static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
43 {
44 	compl->flags = 0;
45 }
46 
47 static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
48 				struct be_mcc_compl *compl)
49 {
50 	u16 compl_status, extd_status;
51 
52 	be_dws_le_to_cpu(compl, 4);
53 
54 	compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
55 					CQE_STATUS_COMPL_MASK;
56 	if (compl_status != MCC_STATUS_SUCCESS) {
57 		extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
58 						CQE_STATUS_EXTD_MASK;
59 		dev_err(&ctrl->pdev->dev,
60 			"error in cmd completion: status(compl/extd)=%d/%d\n",
61 			compl_status, extd_status);
62 		return -1;
63 	}
64 	return 0;
65 }
66 
67 
68 static inline bool is_link_state_evt(u32 trailer)
69 {
70 	return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
71 		  ASYNC_TRAILER_EVENT_CODE_MASK) ==
72 		  ASYNC_EVENT_CODE_LINK_STATE);
73 }
74 
75 static struct be_mcc_compl *be_mcc_compl_get(struct beiscsi_hba *phba)
76 {
77 	struct be_queue_info *mcc_cq = &phba->ctrl.mcc_obj.cq;
78 	struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
79 
80 	if (be_mcc_compl_is_new(compl)) {
81 		queue_tail_inc(mcc_cq);
82 		return compl;
83 	}
84 	return NULL;
85 }
86 
87 static void be2iscsi_fail_session(struct iscsi_cls_session *cls_session)
88 {
89 	iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
90 }
91 
92 static void beiscsi_async_link_state_process(struct beiscsi_hba *phba,
93 		struct be_async_event_link_state *evt)
94 {
95 	switch (evt->port_link_status) {
96 	case ASYNC_EVENT_LINK_DOWN:
97 		SE_DEBUG(DBG_LVL_1, "Link Down on Physical Port %d \n",
98 						evt->physical_port);
99 		phba->state |= BE_ADAPTER_LINK_DOWN;
100 		break;
101 	case ASYNC_EVENT_LINK_UP:
102 		phba->state = BE_ADAPTER_UP;
103 		SE_DEBUG(DBG_LVL_1, "Link UP on Physical Port %d \n",
104 						evt->physical_port);
105 		iscsi_host_for_each_session(phba->shost,
106 					    be2iscsi_fail_session);
107 		break;
108 	default:
109 		SE_DEBUG(DBG_LVL_1, "Unexpected Async Notification %d on"
110 				    "Physical Port %d \n",
111 				     evt->port_link_status,
112 				     evt->physical_port);
113 	}
114 }
115 
116 static void beiscsi_cq_notify(struct beiscsi_hba *phba, u16 qid, bool arm,
117 		       u16 num_popped)
118 {
119 	u32 val = 0;
120 	val |= qid & DB_CQ_RING_ID_MASK;
121 	if (arm)
122 		val |= 1 << DB_CQ_REARM_SHIFT;
123 	val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
124 	iowrite32(val, phba->db_va + DB_CQ_OFFSET);
125 }
126 
127 
128 int beiscsi_process_mcc(struct beiscsi_hba *phba)
129 {
130 	struct be_mcc_compl *compl;
131 	int num = 0, status = 0;
132 	struct be_ctrl_info *ctrl = &phba->ctrl;
133 
134 	spin_lock_bh(&phba->ctrl.mcc_cq_lock);
135 	while ((compl = be_mcc_compl_get(phba))) {
136 		if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
137 			/* Interpret flags as an async trailer */
138 			if (is_link_state_evt(compl->flags))
139 				/* Interpret compl as a async link evt */
140 				beiscsi_async_link_state_process(phba,
141 				   (struct be_async_event_link_state *) compl);
142 			else
143 				SE_DEBUG(DBG_LVL_1,
144 					 " Unsupported Async Event, flags"
145 					 " = 0x%08x \n", compl->flags);
146 
147 		} else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
148 				status = be_mcc_compl_process(ctrl, compl);
149 				atomic_dec(&phba->ctrl.mcc_obj.q.used);
150 		}
151 		be_mcc_compl_use(compl);
152 		num++;
153 	}
154 
155 	if (num)
156 		beiscsi_cq_notify(phba, phba->ctrl.mcc_obj.cq.id, true, num);
157 
158 	spin_unlock_bh(&phba->ctrl.mcc_cq_lock);
159 	return status;
160 }
161 
162 /* Wait till no more pending mcc requests are present */
163 static int be_mcc_wait_compl(struct beiscsi_hba *phba)
164 {
165 #define mcc_timeout		120000 /* 5s timeout */
166 	int i, status;
167 	for (i = 0; i < mcc_timeout; i++) {
168 		status = beiscsi_process_mcc(phba);
169 		if (status)
170 			return status;
171 
172 		if (atomic_read(&phba->ctrl.mcc_obj.q.used) == 0)
173 			break;
174 		udelay(100);
175 	}
176 	if (i == mcc_timeout) {
177 		dev_err(&phba->pcidev->dev, "mccq poll timed out\n");
178 		return -1;
179 	}
180 	return 0;
181 }
182 
183 /* Notify MCC requests and wait for completion */
184 int be_mcc_notify_wait(struct beiscsi_hba *phba)
185 {
186 	be_mcc_notify(phba);
187 	return be_mcc_wait_compl(phba);
188 }
189 
190 static int be_mbox_db_ready_wait(struct be_ctrl_info *ctrl)
191 {
192 #define long_delay 2000
193 	void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
194 	int cnt = 0, wait = 5;	/* in usecs */
195 	u32 ready;
196 
197 	do {
198 		ready = ioread32(db) & MPU_MAILBOX_DB_RDY_MASK;
199 		if (ready)
200 			break;
201 
202 		if (cnt > 6000000) {
203 			dev_err(&ctrl->pdev->dev, "mbox_db poll timed out\n");
204 			return -1;
205 		}
206 
207 		if (cnt > 50) {
208 			wait = long_delay;
209 			mdelay(long_delay / 1000);
210 		} else
211 			udelay(wait);
212 		cnt += wait;
213 	} while (true);
214 	return 0;
215 }
216 
217 int be_mbox_notify(struct be_ctrl_info *ctrl)
218 {
219 	int status;
220 	u32 val = 0;
221 	void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
222 	struct be_dma_mem *mbox_mem = &ctrl->mbox_mem;
223 	struct be_mcc_mailbox *mbox = mbox_mem->va;
224 	struct be_mcc_compl *compl = &mbox->compl;
225 
226 	val &= ~MPU_MAILBOX_DB_RDY_MASK;
227 	val |= MPU_MAILBOX_DB_HI_MASK;
228 	val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
229 	iowrite32(val, db);
230 
231 	status = be_mbox_db_ready_wait(ctrl);
232 	if (status != 0) {
233 		SE_DEBUG(DBG_LVL_1, " be_mbox_db_ready_wait failed 1\n");
234 		return status;
235 	}
236 	val = 0;
237 	val &= ~MPU_MAILBOX_DB_RDY_MASK;
238 	val &= ~MPU_MAILBOX_DB_HI_MASK;
239 	val |= (u32) (mbox_mem->dma >> 4) << 2;
240 	iowrite32(val, db);
241 
242 	status = be_mbox_db_ready_wait(ctrl);
243 	if (status != 0) {
244 		SE_DEBUG(DBG_LVL_1, " be_mbox_db_ready_wait failed 2\n");
245 		return status;
246 	}
247 	if (be_mcc_compl_is_new(compl)) {
248 		status = be_mcc_compl_process(ctrl, &mbox->compl);
249 		be_mcc_compl_use(compl);
250 		if (status) {
251 			SE_DEBUG(DBG_LVL_1, "After be_mcc_compl_process \n");
252 			return status;
253 		}
254 	} else {
255 		dev_err(&ctrl->pdev->dev, "invalid mailbox completion\n");
256 		return -1;
257 	}
258 	return 0;
259 }
260 
261 /*
262  * Insert the mailbox address into the doorbell in two steps
263  * Polls on the mbox doorbell till a command completion (or a timeout) occurs
264  */
265 static int be_mbox_notify_wait(struct beiscsi_hba *phba)
266 {
267 	int status;
268 	u32 val = 0;
269 	void __iomem *db = phba->ctrl.db + MPU_MAILBOX_DB_OFFSET;
270 	struct be_dma_mem *mbox_mem = &phba->ctrl.mbox_mem;
271 	struct be_mcc_mailbox *mbox = mbox_mem->va;
272 	struct be_mcc_compl *compl = &mbox->compl;
273 	struct be_ctrl_info *ctrl = &phba->ctrl;
274 
275 	val |= MPU_MAILBOX_DB_HI_MASK;
276 	/* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
277 	val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
278 	iowrite32(val, db);
279 
280 	/* wait for ready to be set */
281 	status = be_mbox_db_ready_wait(ctrl);
282 	if (status != 0)
283 		return status;
284 
285 	val = 0;
286 	/* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
287 	val |= (u32)(mbox_mem->dma >> 4) << 2;
288 	iowrite32(val, db);
289 
290 	status = be_mbox_db_ready_wait(ctrl);
291 	if (status != 0)
292 		return status;
293 
294 	/* A cq entry has been made now */
295 	if (be_mcc_compl_is_new(compl)) {
296 		status = be_mcc_compl_process(ctrl, &mbox->compl);
297 		be_mcc_compl_use(compl);
298 		if (status)
299 			return status;
300 	} else {
301 		dev_err(&phba->pcidev->dev, "invalid mailbox completion\n");
302 		return -1;
303 	}
304 	return 0;
305 }
306 
307 void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
308 				bool embedded, u8 sge_cnt)
309 {
310 	if (embedded)
311 		wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
312 	else
313 		wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
314 						MCC_WRB_SGE_CNT_SHIFT;
315 	wrb->payload_length = payload_len;
316 	be_dws_cpu_to_le(wrb, 8);
317 }
318 
319 void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
320 			u8 subsystem, u8 opcode, int cmd_len)
321 {
322 	req_hdr->opcode = opcode;
323 	req_hdr->subsystem = subsystem;
324 	req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
325 }
326 
327 static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
328 							struct be_dma_mem *mem)
329 {
330 	int i, buf_pages;
331 	u64 dma = (u64) mem->dma;
332 
333 	buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
334 	for (i = 0; i < buf_pages; i++) {
335 		pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
336 		pages[i].hi = cpu_to_le32(upper_32_bits(dma));
337 		dma += PAGE_SIZE_4K;
338 	}
339 }
340 
341 static u32 eq_delay_to_mult(u32 usec_delay)
342 {
343 #define MAX_INTR_RATE 651042
344 	const u32 round = 10;
345 	u32 multiplier;
346 
347 	if (usec_delay == 0)
348 		multiplier = 0;
349 	else {
350 		u32 interrupt_rate = 1000000 / usec_delay;
351 		if (interrupt_rate == 0)
352 			multiplier = 1023;
353 		else {
354 			multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
355 			multiplier /= interrupt_rate;
356 			multiplier = (multiplier + round / 2) / round;
357 			multiplier = min(multiplier, (u32) 1023);
358 		}
359 	}
360 	return multiplier;
361 }
362 
363 struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem)
364 {
365 	return &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
366 }
367 
368 struct be_mcc_wrb *wrb_from_mccq(struct beiscsi_hba *phba)
369 {
370 	struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
371 	struct be_mcc_wrb *wrb;
372 
373 	BUG_ON(atomic_read(&mccq->used) >= mccq->len);
374 	wrb = queue_head_node(mccq);
375 	queue_head_inc(mccq);
376 	atomic_inc(&mccq->used);
377 	memset(wrb, 0, sizeof(*wrb));
378 	return wrb;
379 }
380 
381 
382 int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl,
383 			  struct be_queue_info *eq, int eq_delay)
384 {
385 	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
386 	struct be_cmd_req_eq_create *req = embedded_payload(wrb);
387 	struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
388 	struct be_dma_mem *q_mem = &eq->dma_mem;
389 	int status;
390 
391 	SE_DEBUG(DBG_LVL_8, "In beiscsi_cmd_eq_create\n");
392 	spin_lock(&ctrl->mbox_lock);
393 	memset(wrb, 0, sizeof(*wrb));
394 
395 	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
396 
397 	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
398 			OPCODE_COMMON_EQ_CREATE, sizeof(*req));
399 
400 	req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
401 
402 	AMAP_SET_BITS(struct amap_eq_context, func, req->context,
403 						PCI_FUNC(ctrl->pdev->devfn));
404 	AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
405 	AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
406 	AMAP_SET_BITS(struct amap_eq_context, count, req->context,
407 					__ilog2_u32(eq->len / 256));
408 	AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
409 					eq_delay_to_mult(eq_delay));
410 	be_dws_cpu_to_le(req->context, sizeof(req->context));
411 
412 	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
413 
414 	status = be_mbox_notify(ctrl);
415 	if (!status) {
416 		eq->id = le16_to_cpu(resp->eq_id);
417 		eq->created = true;
418 	}
419 	spin_unlock(&ctrl->mbox_lock);
420 	return status;
421 }
422 
423 int be_cmd_fw_initialize(struct be_ctrl_info *ctrl)
424 {
425 	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
426 	int status;
427 	u8 *endian_check;
428 
429 	SE_DEBUG(DBG_LVL_8, "In be_cmd_fw_initialize\n");
430 	spin_lock(&ctrl->mbox_lock);
431 	memset(wrb, 0, sizeof(*wrb));
432 
433 	endian_check = (u8 *) wrb;
434 	*endian_check++ = 0xFF;
435 	*endian_check++ = 0x12;
436 	*endian_check++ = 0x34;
437 	*endian_check++ = 0xFF;
438 	*endian_check++ = 0xFF;
439 	*endian_check++ = 0x56;
440 	*endian_check++ = 0x78;
441 	*endian_check++ = 0xFF;
442 	be_dws_cpu_to_le(wrb, sizeof(*wrb));
443 
444 	status = be_mbox_notify(ctrl);
445 	if (status)
446 		SE_DEBUG(DBG_LVL_1, "be_cmd_fw_initialize Failed \n");
447 
448 	spin_unlock(&ctrl->mbox_lock);
449 	return status;
450 }
451 
452 int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
453 			  struct be_queue_info *cq, struct be_queue_info *eq,
454 			  bool sol_evts, bool no_delay, int coalesce_wm)
455 {
456 	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
457 	struct be_cmd_req_cq_create *req = embedded_payload(wrb);
458 	struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
459 	struct be_dma_mem *q_mem = &cq->dma_mem;
460 	void *ctxt = &req->context;
461 	int status;
462 
463 	SE_DEBUG(DBG_LVL_8, "In beiscsi_cmd_cq_create \n");
464 	spin_lock(&ctrl->mbox_lock);
465 	memset(wrb, 0, sizeof(*wrb));
466 
467 	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
468 
469 	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
470 			OPCODE_COMMON_CQ_CREATE, sizeof(*req));
471 	if (!q_mem->va)
472 		SE_DEBUG(DBG_LVL_1, "uninitialized q_mem->va\n");
473 
474 	req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
475 
476 	AMAP_SET_BITS(struct amap_cq_context, coalescwm, ctxt, coalesce_wm);
477 	AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
478 	AMAP_SET_BITS(struct amap_cq_context, count, ctxt,
479 		      __ilog2_u32(cq->len / 256));
480 	AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1);
481 	AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
482 	AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
483 	AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
484 	AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
485 	AMAP_SET_BITS(struct amap_cq_context, func, ctxt,
486 		      PCI_FUNC(ctrl->pdev->devfn));
487 	be_dws_cpu_to_le(ctxt, sizeof(req->context));
488 
489 	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
490 
491 	status = be_mbox_notify(ctrl);
492 	if (!status) {
493 		cq->id = le16_to_cpu(resp->cq_id);
494 		cq->created = true;
495 	} else
496 		SE_DEBUG(DBG_LVL_1, "In be_cmd_cq_create, status=ox%08x \n",
497 			status);
498 	spin_unlock(&ctrl->mbox_lock);
499 
500 	return status;
501 }
502 
503 static u32 be_encoded_q_len(int q_len)
504 {
505 	u32 len_encoded = fls(q_len);	/* log2(len) + 1 */
506 	if (len_encoded == 16)
507 		len_encoded = 0;
508 	return len_encoded;
509 }
510 
511 int beiscsi_cmd_mccq_create(struct beiscsi_hba *phba,
512 			struct be_queue_info *mccq,
513 			struct be_queue_info *cq)
514 {
515 	struct be_mcc_wrb *wrb;
516 	struct be_cmd_req_mcc_create *req;
517 	struct be_dma_mem *q_mem = &mccq->dma_mem;
518 	struct be_ctrl_info *ctrl;
519 	void *ctxt;
520 	int status;
521 
522 	spin_lock(&phba->ctrl.mbox_lock);
523 	ctrl = &phba->ctrl;
524 	wrb = wrb_from_mbox(&ctrl->mbox_mem);
525 	req = embedded_payload(wrb);
526 	ctxt = &req->context;
527 
528 	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
529 
530 	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
531 			OPCODE_COMMON_MCC_CREATE, sizeof(*req));
532 
533 	req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
534 
535 	AMAP_SET_BITS(struct amap_mcc_context, fid, ctxt,
536 		      PCI_FUNC(phba->pcidev->devfn));
537 	AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1);
538 	AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt,
539 		be_encoded_q_len(mccq->len));
540 	AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id);
541 
542 	be_dws_cpu_to_le(ctxt, sizeof(req->context));
543 
544 	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
545 
546 	status = be_mbox_notify_wait(phba);
547 	if (!status) {
548 		struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
549 		mccq->id = le16_to_cpu(resp->id);
550 		mccq->created = true;
551 	}
552 	spin_unlock(&phba->ctrl.mbox_lock);
553 
554 	return status;
555 }
556 
557 int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
558 			  int queue_type)
559 {
560 	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
561 	struct be_cmd_req_q_destroy *req = embedded_payload(wrb);
562 	u8 subsys = 0, opcode = 0;
563 	int status;
564 
565 	SE_DEBUG(DBG_LVL_8, "In beiscsi_cmd_q_destroy \n");
566 	spin_lock(&ctrl->mbox_lock);
567 	memset(wrb, 0, sizeof(*wrb));
568 	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
569 
570 	switch (queue_type) {
571 	case QTYPE_EQ:
572 		subsys = CMD_SUBSYSTEM_COMMON;
573 		opcode = OPCODE_COMMON_EQ_DESTROY;
574 		break;
575 	case QTYPE_CQ:
576 		subsys = CMD_SUBSYSTEM_COMMON;
577 		opcode = OPCODE_COMMON_CQ_DESTROY;
578 		break;
579 	case QTYPE_MCCQ:
580 		subsys = CMD_SUBSYSTEM_COMMON;
581 		opcode = OPCODE_COMMON_MCC_DESTROY;
582 		break;
583 	case QTYPE_WRBQ:
584 		subsys = CMD_SUBSYSTEM_ISCSI;
585 		opcode = OPCODE_COMMON_ISCSI_WRBQ_DESTROY;
586 		break;
587 	case QTYPE_DPDUQ:
588 		subsys = CMD_SUBSYSTEM_ISCSI;
589 		opcode = OPCODE_COMMON_ISCSI_DEFQ_DESTROY;
590 		break;
591 	case QTYPE_SGL:
592 		subsys = CMD_SUBSYSTEM_ISCSI;
593 		opcode = OPCODE_COMMON_ISCSI_CFG_REMOVE_SGL_PAGES;
594 		break;
595 	default:
596 		spin_unlock(&ctrl->mbox_lock);
597 		BUG();
598 		return -1;
599 	}
600 	be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
601 	if (queue_type != QTYPE_SGL)
602 		req->id = cpu_to_le16(q->id);
603 
604 	status = be_mbox_notify(ctrl);
605 
606 	spin_unlock(&ctrl->mbox_lock);
607 	return status;
608 }
609 
610 int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
611 				    struct be_queue_info *cq,
612 				    struct be_queue_info *dq, int length,
613 				    int entry_size)
614 {
615 	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
616 	struct be_defq_create_req *req = embedded_payload(wrb);
617 	struct be_dma_mem *q_mem = &dq->dma_mem;
618 	void *ctxt = &req->context;
619 	int status;
620 
621 	SE_DEBUG(DBG_LVL_8, "In be_cmd_create_default_pdu_queue\n");
622 	spin_lock(&ctrl->mbox_lock);
623 	memset(wrb, 0, sizeof(*wrb));
624 
625 	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
626 
627 	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
628 			   OPCODE_COMMON_ISCSI_DEFQ_CREATE, sizeof(*req));
629 
630 	req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
631 	AMAP_SET_BITS(struct amap_be_default_pdu_context, rx_pdid, ctxt, 0);
632 	AMAP_SET_BITS(struct amap_be_default_pdu_context, rx_pdid_valid, ctxt,
633 		      1);
634 	AMAP_SET_BITS(struct amap_be_default_pdu_context, pci_func_id, ctxt,
635 		      PCI_FUNC(ctrl->pdev->devfn));
636 	AMAP_SET_BITS(struct amap_be_default_pdu_context, ring_size, ctxt,
637 		      be_encoded_q_len(length / sizeof(struct phys_addr)));
638 	AMAP_SET_BITS(struct amap_be_default_pdu_context, default_buffer_size,
639 		      ctxt, entry_size);
640 	AMAP_SET_BITS(struct amap_be_default_pdu_context, cq_id_recv, ctxt,
641 		      cq->id);
642 
643 	be_dws_cpu_to_le(ctxt, sizeof(req->context));
644 
645 	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
646 
647 	status = be_mbox_notify(ctrl);
648 	if (!status) {
649 		struct be_defq_create_resp *resp = embedded_payload(wrb);
650 
651 		dq->id = le16_to_cpu(resp->id);
652 		dq->created = true;
653 	}
654 	spin_unlock(&ctrl->mbox_lock);
655 
656 	return status;
657 }
658 
659 int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem,
660 		       struct be_queue_info *wrbq)
661 {
662 	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
663 	struct be_wrbq_create_req *req = embedded_payload(wrb);
664 	struct be_wrbq_create_resp *resp = embedded_payload(wrb);
665 	int status;
666 
667 	spin_lock(&ctrl->mbox_lock);
668 	memset(wrb, 0, sizeof(*wrb));
669 
670 	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
671 
672 	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
673 		OPCODE_COMMON_ISCSI_WRBQ_CREATE, sizeof(*req));
674 	req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
675 	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
676 
677 	status = be_mbox_notify(ctrl);
678 	if (!status) {
679 		wrbq->id = le16_to_cpu(resp->cid);
680 		wrbq->created = true;
681 	}
682 	spin_unlock(&ctrl->mbox_lock);
683 	return status;
684 }
685 
686 int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl,
687 				struct be_dma_mem *q_mem,
688 				u32 page_offset, u32 num_pages)
689 {
690 	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
691 	struct be_post_sgl_pages_req *req = embedded_payload(wrb);
692 	int status;
693 	unsigned int curr_pages;
694 	u32 internal_page_offset = 0;
695 	u32 temp_num_pages = num_pages;
696 
697 	if (num_pages == 0xff)
698 		num_pages = 1;
699 
700 	spin_lock(&ctrl->mbox_lock);
701 	do {
702 		memset(wrb, 0, sizeof(*wrb));
703 		be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
704 		be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
705 				   OPCODE_COMMON_ISCSI_CFG_POST_SGL_PAGES,
706 				   sizeof(*req));
707 		curr_pages = BE_NUMBER_OF_FIELD(struct be_post_sgl_pages_req,
708 						pages);
709 		req->num_pages = min(num_pages, curr_pages);
710 		req->page_offset = page_offset;
711 		be_cmd_page_addrs_prepare(req->pages, req->num_pages, q_mem);
712 		q_mem->dma = q_mem->dma + (req->num_pages * PAGE_SIZE);
713 		internal_page_offset += req->num_pages;
714 		page_offset += req->num_pages;
715 		num_pages -= req->num_pages;
716 
717 		if (temp_num_pages == 0xff)
718 			req->num_pages = temp_num_pages;
719 
720 		status = be_mbox_notify(ctrl);
721 		if (status) {
722 			SE_DEBUG(DBG_LVL_1,
723 				 "FW CMD to map iscsi frags failed.\n");
724 			goto error;
725 		}
726 	} while (num_pages > 0);
727 error:
728 	spin_unlock(&ctrl->mbox_lock);
729 	if (status != 0)
730 		beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
731 	return status;
732 }
733