xref: /openbmc/linux/drivers/scsi/bnx2fc/bnx2fc_tgt.c (revision c4c3c32d)
1 /* bnx2fc_tgt.c: QLogic Linux FCoE offload driver.
2  * Handles operations such as session offload/upload etc, and manages
3  * session resources such as connection id and qp resources.
4  *
5  * Copyright (c) 2008-2013 Broadcom Corporation
6  * Copyright (c) 2014-2016 QLogic Corporation
7  * Copyright (c) 2016-2017 Cavium Inc.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation.
12  *
13  * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
14  */
15 
16 #include "bnx2fc.h"
17 static void bnx2fc_upld_timer(struct timer_list *t);
18 static void bnx2fc_ofld_timer(struct timer_list *t);
19 static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt,
20 			   struct fcoe_port *port,
21 			   struct fc_rport_priv *rdata);
22 static u32 bnx2fc_alloc_conn_id(struct bnx2fc_hba *hba,
23 				struct bnx2fc_rport *tgt);
24 static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
25 			      struct bnx2fc_rport *tgt);
26 static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba,
27 			      struct bnx2fc_rport *tgt);
28 static void bnx2fc_free_conn_id(struct bnx2fc_hba *hba, u32 conn_id);
29 
30 static void bnx2fc_upld_timer(struct timer_list *t)
31 {
32 
33 	struct bnx2fc_rport *tgt = from_timer(tgt, t, upld_timer);
34 
35 	BNX2FC_TGT_DBG(tgt, "upld_timer - Upload compl not received!!\n");
36 	/* fake upload completion */
37 	clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
38 	clear_bit(BNX2FC_FLAG_ENABLED, &tgt->flags);
39 	set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
40 	wake_up_interruptible(&tgt->upld_wait);
41 }
42 
43 static void bnx2fc_ofld_timer(struct timer_list *t)
44 {
45 
46 	struct bnx2fc_rport *tgt = from_timer(tgt, t, ofld_timer);
47 
48 	BNX2FC_TGT_DBG(tgt, "entered bnx2fc_ofld_timer\n");
49 	/* NOTE: This function should never be called, as
50 	 * offload should never timeout
51 	 */
52 	/*
53 	 * If the timer has expired, this session is dead
54 	 * Clear offloaded flag and logout of this device.
55 	 * Since OFFLOADED flag is cleared, this case
56 	 * will be considered as offload error and the
57 	 * port will be logged off, and conn_id, session
58 	 * resources are freed up in bnx2fc_offload_session
59 	 */
60 	clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
61 	clear_bit(BNX2FC_FLAG_ENABLED, &tgt->flags);
62 	set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
63 	wake_up_interruptible(&tgt->ofld_wait);
64 }
65 
66 static void bnx2fc_ofld_wait(struct bnx2fc_rport *tgt)
67 {
68 	timer_setup(&tgt->ofld_timer, bnx2fc_ofld_timer, 0);
69 	mod_timer(&tgt->ofld_timer, jiffies + BNX2FC_FW_TIMEOUT);
70 
71 	wait_event_interruptible(tgt->ofld_wait,
72 				 (test_bit(
73 				  BNX2FC_FLAG_OFLD_REQ_CMPL,
74 				  &tgt->flags)));
75 	if (signal_pending(current))
76 		flush_signals(current);
77 	del_timer_sync(&tgt->ofld_timer);
78 }
79 
80 static void bnx2fc_offload_session(struct fcoe_port *port,
81 					struct bnx2fc_rport *tgt,
82 					struct fc_rport_priv *rdata)
83 {
84 	struct fc_rport *rport = rdata->rport;
85 	struct bnx2fc_interface *interface = port->priv;
86 	struct bnx2fc_hba *hba = interface->hba;
87 	int rval;
88 	int i = 0;
89 
90 	/* Initialize bnx2fc_rport */
91 	/* NOTE: tgt is already bzero'd */
92 	rval = bnx2fc_init_tgt(tgt, port, rdata);
93 	if (rval) {
94 		printk(KERN_ERR PFX "Failed to allocate conn id for "
95 			"port_id (%6x)\n", rport->port_id);
96 		goto tgt_init_err;
97 	}
98 
99 	/* Allocate session resources */
100 	rval = bnx2fc_alloc_session_resc(hba, tgt);
101 	if (rval) {
102 		printk(KERN_ERR PFX "Failed to allocate resources\n");
103 		goto ofld_err;
104 	}
105 
106 	/*
107 	 * Initialize FCoE session offload process.
108 	 * Upon completion of offload process add
109 	 * rport to list of rports
110 	 */
111 retry_ofld:
112 	clear_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
113 	rval = bnx2fc_send_session_ofld_req(port, tgt);
114 	if (rval) {
115 		printk(KERN_ERR PFX "ofld_req failed\n");
116 		goto ofld_err;
117 	}
118 
119 	/*
120 	 * wait for the session is offloaded and enabled. 3 Secs
121 	 * should be ample time for this process to complete.
122 	 */
123 	bnx2fc_ofld_wait(tgt);
124 
125 	if (!(test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags))) {
126 		if (test_and_clear_bit(BNX2FC_FLAG_CTX_ALLOC_FAILURE,
127 				       &tgt->flags)) {
128 			BNX2FC_TGT_DBG(tgt, "ctx_alloc_failure, "
129 				"retry ofld..%d\n", i++);
130 			msleep_interruptible(1000);
131 			if (i > 3) {
132 				i = 0;
133 				goto ofld_err;
134 			}
135 			goto retry_ofld;
136 		}
137 		goto ofld_err;
138 	}
139 	if (bnx2fc_map_doorbell(tgt)) {
140 		printk(KERN_ERR PFX "map doorbell failed - no mem\n");
141 		goto ofld_err;
142 	}
143 	clear_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
144 	rval = bnx2fc_send_session_enable_req(port, tgt);
145 	if (rval) {
146 		pr_err(PFX "enable session failed\n");
147 		goto ofld_err;
148 	}
149 	bnx2fc_ofld_wait(tgt);
150 	if (!(test_bit(BNX2FC_FLAG_ENABLED, &tgt->flags)))
151 		goto ofld_err;
152 	return;
153 
154 ofld_err:
155 	/* couldn't offload the session. log off from this rport */
156 	BNX2FC_TGT_DBG(tgt, "bnx2fc_offload_session - offload error\n");
157 	clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
158 	/* Free session resources */
159 	bnx2fc_free_session_resc(hba, tgt);
160 tgt_init_err:
161 	if (tgt->fcoe_conn_id != -1)
162 		bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id);
163 	fc_rport_logoff(rdata);
164 }
165 
166 void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt)
167 {
168 	struct bnx2fc_cmd *io_req;
169 	struct bnx2fc_cmd *tmp;
170 	int rc;
171 	int i = 0;
172 	BNX2FC_TGT_DBG(tgt, "Entered flush_active_ios - %d\n",
173 		       tgt->num_active_ios.counter);
174 
175 	spin_lock_bh(&tgt->tgt_lock);
176 	tgt->flush_in_prog = 1;
177 
178 	list_for_each_entry_safe(io_req, tmp, &tgt->active_cmd_queue, link) {
179 		i++;
180 		list_del_init(&io_req->link);
181 		io_req->on_active_queue = 0;
182 		BNX2FC_IO_DBG(io_req, "cmd_queue cleanup\n");
183 
184 		if (cancel_delayed_work(&io_req->timeout_work)) {
185 			if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT,
186 						&io_req->req_flags)) {
187 				/* Handle eh_abort timeout */
188 				BNX2FC_IO_DBG(io_req, "eh_abort for IO "
189 					      "cleaned up\n");
190 				complete(&io_req->abts_done);
191 			}
192 			kref_put(&io_req->refcount,
193 				 bnx2fc_cmd_release); /* drop timer hold */
194 		}
195 
196 		set_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags);
197 		set_bit(BNX2FC_FLAG_IO_CLEANUP, &io_req->req_flags);
198 
199 		/* Do not issue cleanup when disable request failed */
200 		if (test_bit(BNX2FC_FLAG_DISABLE_FAILED, &tgt->flags))
201 			bnx2fc_process_cleanup_compl(io_req, io_req->task, 0);
202 		else {
203 			rc = bnx2fc_initiate_cleanup(io_req);
204 			BUG_ON(rc);
205 		}
206 	}
207 
208 	list_for_each_entry_safe(io_req, tmp, &tgt->active_tm_queue, link) {
209 		i++;
210 		list_del_init(&io_req->link);
211 		io_req->on_tmf_queue = 0;
212 		BNX2FC_IO_DBG(io_req, "tm_queue cleanup\n");
213 		if (io_req->wait_for_abts_comp)
214 			complete(&io_req->abts_done);
215 	}
216 
217 	list_for_each_entry_safe(io_req, tmp, &tgt->els_queue, link) {
218 		i++;
219 		list_del_init(&io_req->link);
220 		io_req->on_active_queue = 0;
221 
222 		BNX2FC_IO_DBG(io_req, "els_queue cleanup\n");
223 
224 		if (cancel_delayed_work(&io_req->timeout_work))
225 			kref_put(&io_req->refcount,
226 				 bnx2fc_cmd_release); /* drop timer hold */
227 
228 		if ((io_req->cb_func) && (io_req->cb_arg)) {
229 			io_req->cb_func(io_req->cb_arg);
230 			io_req->cb_arg = NULL;
231 		}
232 
233 		/* Do not issue cleanup when disable request failed */
234 		if (test_bit(BNX2FC_FLAG_DISABLE_FAILED, &tgt->flags))
235 			bnx2fc_process_cleanup_compl(io_req, io_req->task, 0);
236 		else {
237 			rc = bnx2fc_initiate_cleanup(io_req);
238 			BUG_ON(rc);
239 		}
240 	}
241 
242 	list_for_each_entry_safe(io_req, tmp, &tgt->io_retire_queue, link) {
243 		i++;
244 		list_del_init(&io_req->link);
245 
246 		BNX2FC_IO_DBG(io_req, "retire_queue flush\n");
247 
248 		if (cancel_delayed_work(&io_req->timeout_work)) {
249 			if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT,
250 						&io_req->req_flags)) {
251 				/* Handle eh_abort timeout */
252 				BNX2FC_IO_DBG(io_req, "eh_abort for IO "
253 					      "in retire_q\n");
254 				if (io_req->wait_for_abts_comp)
255 					complete(&io_req->abts_done);
256 			}
257 			kref_put(&io_req->refcount, bnx2fc_cmd_release);
258 		}
259 
260 		clear_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags);
261 	}
262 
263 	BNX2FC_TGT_DBG(tgt, "IOs flushed = %d\n", i);
264 	i = 0;
265 	spin_unlock_bh(&tgt->tgt_lock);
266 	/* wait for active_ios to go to 0 */
267 	while ((tgt->num_active_ios.counter != 0) && (i++ < BNX2FC_WAIT_CNT))
268 		msleep(25);
269 	if (tgt->num_active_ios.counter != 0)
270 		printk(KERN_ERR PFX "CLEANUP on port 0x%x:"
271 				    " active_ios = %d\n",
272 			tgt->rdata->ids.port_id, tgt->num_active_ios.counter);
273 	spin_lock_bh(&tgt->tgt_lock);
274 	tgt->flush_in_prog = 0;
275 	spin_unlock_bh(&tgt->tgt_lock);
276 }
277 
278 static void bnx2fc_upld_wait(struct bnx2fc_rport *tgt)
279 {
280 	timer_setup(&tgt->upld_timer, bnx2fc_upld_timer, 0);
281 	mod_timer(&tgt->upld_timer, jiffies + BNX2FC_FW_TIMEOUT);
282 	wait_event_interruptible(tgt->upld_wait,
283 				 (test_bit(
284 				  BNX2FC_FLAG_UPLD_REQ_COMPL,
285 				  &tgt->flags)));
286 	if (signal_pending(current))
287 		flush_signals(current);
288 	del_timer_sync(&tgt->upld_timer);
289 }
290 
291 static void bnx2fc_upload_session(struct fcoe_port *port,
292 					struct bnx2fc_rport *tgt)
293 {
294 	struct bnx2fc_interface *interface = port->priv;
295 	struct bnx2fc_hba *hba = interface->hba;
296 
297 	BNX2FC_TGT_DBG(tgt, "upload_session: active_ios = %d\n",
298 		tgt->num_active_ios.counter);
299 
300 	/*
301 	 * Called with hba->hba_mutex held.
302 	 * This is a blocking call
303 	 */
304 	clear_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
305 	bnx2fc_send_session_disable_req(port, tgt);
306 
307 	/*
308 	 * wait for upload to complete. 3 Secs
309 	 * should be sufficient time for this process to complete.
310 	 */
311 	BNX2FC_TGT_DBG(tgt, "waiting for disable compl\n");
312 	bnx2fc_upld_wait(tgt);
313 
314 	/*
315 	 * traverse thru the active_q and tmf_q and cleanup
316 	 * IOs in these lists
317 	 */
318 	BNX2FC_TGT_DBG(tgt, "flush/upload - disable wait flags = 0x%lx\n",
319 		       tgt->flags);
320 	bnx2fc_flush_active_ios(tgt);
321 
322 	/* Issue destroy KWQE */
323 	if (test_bit(BNX2FC_FLAG_DISABLED, &tgt->flags)) {
324 		BNX2FC_TGT_DBG(tgt, "send destroy req\n");
325 		clear_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
326 		bnx2fc_send_session_destroy_req(hba, tgt);
327 
328 		/* wait for destroy to complete */
329 		bnx2fc_upld_wait(tgt);
330 
331 		if (!(test_bit(BNX2FC_FLAG_DESTROYED, &tgt->flags)))
332 			printk(KERN_ERR PFX "ERROR!! destroy timed out\n");
333 
334 		BNX2FC_TGT_DBG(tgt, "destroy wait complete flags = 0x%lx\n",
335 			tgt->flags);
336 
337 	} else if (test_bit(BNX2FC_FLAG_DISABLE_FAILED, &tgt->flags)) {
338 		printk(KERN_ERR PFX "ERROR!! DISABLE req failed, destroy"
339 				" not sent to FW\n");
340 	} else {
341 		printk(KERN_ERR PFX "ERROR!! DISABLE req timed out, destroy"
342 				" not sent to FW\n");
343 	}
344 
345 	/* Free session resources */
346 	bnx2fc_free_session_resc(hba, tgt);
347 	bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id);
348 }
349 
350 static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt,
351 			   struct fcoe_port *port,
352 			   struct fc_rport_priv *rdata)
353 {
354 
355 	struct fc_rport *rport = rdata->rport;
356 	struct bnx2fc_interface *interface = port->priv;
357 	struct bnx2fc_hba *hba = interface->hba;
358 	struct b577xx_doorbell_set_prod *sq_db = &tgt->sq_db;
359 	struct b577xx_fcoe_rx_doorbell *rx_db = &tgt->rx_db;
360 
361 	tgt->rport = rport;
362 	tgt->rdata = rdata;
363 	tgt->port = port;
364 
365 	if (hba->num_ofld_sess >= BNX2FC_NUM_MAX_SESS) {
366 		BNX2FC_TGT_DBG(tgt, "exceeded max sessions. logoff this tgt\n");
367 		tgt->fcoe_conn_id = -1;
368 		return -1;
369 	}
370 
371 	tgt->fcoe_conn_id = bnx2fc_alloc_conn_id(hba, tgt);
372 	if (tgt->fcoe_conn_id == -1)
373 		return -1;
374 
375 	BNX2FC_TGT_DBG(tgt, "init_tgt - conn_id = 0x%x\n", tgt->fcoe_conn_id);
376 
377 	tgt->max_sqes = BNX2FC_SQ_WQES_MAX;
378 	tgt->max_rqes = BNX2FC_RQ_WQES_MAX;
379 	tgt->max_cqes = BNX2FC_CQ_WQES_MAX;
380 	atomic_set(&tgt->free_sqes, BNX2FC_SQ_WQES_MAX);
381 
382 	/* Initialize the toggle bit */
383 	tgt->sq_curr_toggle_bit = 1;
384 	tgt->cq_curr_toggle_bit = 1;
385 	tgt->sq_prod_idx = 0;
386 	tgt->cq_cons_idx = 0;
387 	tgt->rq_prod_idx = 0x8000;
388 	tgt->rq_cons_idx = 0;
389 	atomic_set(&tgt->num_active_ios, 0);
390 	tgt->retry_delay_timestamp = 0;
391 
392 	if (rdata->flags & FC_RP_FLAGS_RETRY &&
393 	    rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET &&
394 	    !(rdata->ids.roles & FC_RPORT_ROLE_FCP_INITIATOR)) {
395 		tgt->dev_type = TYPE_TAPE;
396 		tgt->io_timeout = 0; /* use default ULP timeout */
397 	} else {
398 		tgt->dev_type = TYPE_DISK;
399 		tgt->io_timeout = BNX2FC_IO_TIMEOUT;
400 	}
401 
402 	/* initialize sq doorbell */
403 	sq_db->header.header = B577XX_DOORBELL_HDR_DB_TYPE;
404 	sq_db->header.header |= B577XX_FCOE_CONNECTION_TYPE <<
405 					B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT;
406 	/* initialize rx doorbell */
407 	rx_db->hdr.header = ((0x1 << B577XX_DOORBELL_HDR_RX_SHIFT) |
408 			  (0x1 << B577XX_DOORBELL_HDR_DB_TYPE_SHIFT) |
409 			  (B577XX_FCOE_CONNECTION_TYPE <<
410 				B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT));
411 	rx_db->params = (0x2 << B577XX_FCOE_RX_DOORBELL_NEGATIVE_ARM_SHIFT) |
412 		     (0x3 << B577XX_FCOE_RX_DOORBELL_OPCODE_SHIFT);
413 
414 	spin_lock_init(&tgt->tgt_lock);
415 	spin_lock_init(&tgt->cq_lock);
416 
417 	/* Initialize active_cmd_queue list */
418 	INIT_LIST_HEAD(&tgt->active_cmd_queue);
419 
420 	/* Initialize IO retire queue */
421 	INIT_LIST_HEAD(&tgt->io_retire_queue);
422 
423 	INIT_LIST_HEAD(&tgt->els_queue);
424 
425 	/* Initialize active_tm_queue list */
426 	INIT_LIST_HEAD(&tgt->active_tm_queue);
427 
428 	init_waitqueue_head(&tgt->ofld_wait);
429 	init_waitqueue_head(&tgt->upld_wait);
430 
431 	return 0;
432 }
433 
434 /*
435  * This event_callback is called after successful completion of libfc
436  * initiated target login. bnx2fc can proceed with initiating the session
437  * establishment.
438  */
439 void bnx2fc_rport_event_handler(struct fc_lport *lport,
440 				struct fc_rport_priv *rdata,
441 				enum fc_rport_event event)
442 {
443 	struct fcoe_port *port = lport_priv(lport);
444 	struct bnx2fc_interface *interface = port->priv;
445 	struct bnx2fc_hba *hba = interface->hba;
446 	struct fc_rport *rport = rdata->rport;
447 	struct fc_rport_libfc_priv *rp;
448 	struct bnx2fc_rport *tgt;
449 	u32 port_id;
450 
451 	BNX2FC_HBA_DBG(lport, "rport_event_hdlr: event = %d, port_id = 0x%x\n",
452 		event, rdata->ids.port_id);
453 	switch (event) {
454 	case RPORT_EV_READY:
455 		if (!rport) {
456 			printk(KERN_ERR PFX "rport is NULL: ERROR!\n");
457 			break;
458 		}
459 
460 		rp = rport->dd_data;
461 		if (rport->port_id == FC_FID_DIR_SERV) {
462 			/*
463 			 * bnx2fc_rport structure doesn't exist for
464 			 * directory server.
465 			 * We should not come here, as lport will
466 			 * take care of fabric login
467 			 */
468 			printk(KERN_ERR PFX "%x - rport_event_handler ERROR\n",
469 				rdata->ids.port_id);
470 			break;
471 		}
472 
473 		if (rdata->spp_type != FC_TYPE_FCP) {
474 			BNX2FC_HBA_DBG(lport, "not FCP type target."
475 				   " not offloading\n");
476 			break;
477 		}
478 		if (!(rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET)) {
479 			BNX2FC_HBA_DBG(lport, "not FCP_TARGET"
480 				   " not offloading\n");
481 			break;
482 		}
483 
484 		/*
485 		 * Offload process is protected with hba mutex.
486 		 * Use the same mutex_lock for upload process too
487 		 */
488 		mutex_lock(&hba->hba_mutex);
489 		tgt = (struct bnx2fc_rport *)&rp[1];
490 
491 		/* This can happen when ADISC finds the same target */
492 		if (test_bit(BNX2FC_FLAG_ENABLED, &tgt->flags)) {
493 			BNX2FC_TGT_DBG(tgt, "already offloaded\n");
494 			mutex_unlock(&hba->hba_mutex);
495 			return;
496 		}
497 
498 		/*
499 		 * Offload the session. This is a blocking call, and will
500 		 * wait until the session is offloaded.
501 		 */
502 		bnx2fc_offload_session(port, tgt, rdata);
503 
504 		BNX2FC_TGT_DBG(tgt, "OFFLOAD num_ofld_sess = %d\n",
505 			hba->num_ofld_sess);
506 
507 		if (test_bit(BNX2FC_FLAG_ENABLED, &tgt->flags)) {
508 			/* Session is offloaded and enabled.  */
509 			BNX2FC_TGT_DBG(tgt, "sess offloaded\n");
510 			/* This counter is protected with hba mutex */
511 			hba->num_ofld_sess++;
512 
513 			set_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags);
514 		} else {
515 			/*
516 			 * Offload or enable would have failed.
517 			 * In offload/enable completion path, the
518 			 * rport would have already been removed
519 			 */
520 			BNX2FC_TGT_DBG(tgt, "Port is being logged off as "
521 				   "offloaded flag not set\n");
522 		}
523 		mutex_unlock(&hba->hba_mutex);
524 		break;
525 	case RPORT_EV_LOGO:
526 	case RPORT_EV_FAILED:
527 	case RPORT_EV_STOP:
528 		port_id = rdata->ids.port_id;
529 		if (port_id == FC_FID_DIR_SERV)
530 			break;
531 
532 		if (!rport) {
533 			printk(KERN_INFO PFX "%x - rport not created Yet!!\n",
534 				port_id);
535 			break;
536 		}
537 		rp = rport->dd_data;
538 		mutex_lock(&hba->hba_mutex);
539 		/*
540 		 * Perform session upload. Note that rdata->peers is already
541 		 * removed from disc->rports list before we get this event.
542 		 */
543 		tgt = (struct bnx2fc_rport *)&rp[1];
544 
545 		if (!(test_bit(BNX2FC_FLAG_ENABLED, &tgt->flags))) {
546 			mutex_unlock(&hba->hba_mutex);
547 			break;
548 		}
549 		clear_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags);
550 
551 		bnx2fc_upload_session(port, tgt);
552 		hba->num_ofld_sess--;
553 		BNX2FC_TGT_DBG(tgt, "UPLOAD num_ofld_sess = %d\n",
554 			hba->num_ofld_sess);
555 		/*
556 		 * Try to wake up the linkdown wait thread. If num_ofld_sess
557 		 * is 0, the waiting therad wakes up
558 		 */
559 		if ((hba->wait_for_link_down) &&
560 		    (hba->num_ofld_sess == 0)) {
561 			wake_up_interruptible(&hba->shutdown_wait);
562 		}
563 		mutex_unlock(&hba->hba_mutex);
564 
565 		break;
566 
567 	case RPORT_EV_NONE:
568 		break;
569 	}
570 }
571 
572 /**
573  * bnx2fc_tgt_lookup() - Lookup a bnx2fc_rport by port_id
574  *
575  * @port:  fcoe_port struct to lookup the target port on
576  * @port_id: The remote port ID to look up
577  */
578 struct bnx2fc_rport *bnx2fc_tgt_lookup(struct fcoe_port *port,
579 					     u32 port_id)
580 {
581 	struct bnx2fc_interface *interface = port->priv;
582 	struct bnx2fc_hba *hba = interface->hba;
583 	struct bnx2fc_rport *tgt;
584 	struct fc_rport_priv *rdata;
585 	int i;
586 
587 	for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) {
588 		tgt = hba->tgt_ofld_list[i];
589 		if ((tgt) && (tgt->port == port)) {
590 			rdata = tgt->rdata;
591 			if (rdata->ids.port_id == port_id) {
592 				if (rdata->rp_state != RPORT_ST_DELETE) {
593 					BNX2FC_TGT_DBG(tgt, "rport "
594 						"obtained\n");
595 					return tgt;
596 				} else {
597 					BNX2FC_TGT_DBG(tgt, "rport 0x%x "
598 						"is in DELETED state\n",
599 						rdata->ids.port_id);
600 					return NULL;
601 				}
602 			}
603 		}
604 	}
605 	return NULL;
606 }
607 
608 
609 /**
610  * bnx2fc_alloc_conn_id - allocates FCOE Connection id
611  *
612  * @hba:	pointer to adapter structure
613  * @tgt:	pointer to bnx2fc_rport structure
614  */
615 static u32 bnx2fc_alloc_conn_id(struct bnx2fc_hba *hba,
616 				struct bnx2fc_rport *tgt)
617 {
618 	u32 conn_id, next;
619 
620 	/* called with hba mutex held */
621 
622 	/*
623 	 * tgt_ofld_list access is synchronized using
624 	 * both hba mutex and hba lock. Atleast hba mutex or
625 	 * hba lock needs to be held for read access.
626 	 */
627 
628 	spin_lock_bh(&hba->hba_lock);
629 	next = hba->next_conn_id;
630 	conn_id = hba->next_conn_id++;
631 	if (hba->next_conn_id == BNX2FC_NUM_MAX_SESS)
632 		hba->next_conn_id = 0;
633 
634 	while (hba->tgt_ofld_list[conn_id] != NULL) {
635 		conn_id++;
636 		if (conn_id == BNX2FC_NUM_MAX_SESS)
637 			conn_id = 0;
638 
639 		if (conn_id == next) {
640 			/* No free conn_ids are available */
641 			spin_unlock_bh(&hba->hba_lock);
642 			return -1;
643 		}
644 	}
645 	hba->tgt_ofld_list[conn_id] = tgt;
646 	tgt->fcoe_conn_id = conn_id;
647 	spin_unlock_bh(&hba->hba_lock);
648 	return conn_id;
649 }
650 
651 static void bnx2fc_free_conn_id(struct bnx2fc_hba *hba, u32 conn_id)
652 {
653 	/* called with hba mutex held */
654 	spin_lock_bh(&hba->hba_lock);
655 	hba->tgt_ofld_list[conn_id] = NULL;
656 	spin_unlock_bh(&hba->hba_lock);
657 }
658 
659 /*
660  * bnx2fc_alloc_session_resc - Allocate qp resources for the session
661  */
662 static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
663 					struct bnx2fc_rport *tgt)
664 {
665 	dma_addr_t page;
666 	int num_pages;
667 	u32 *pbl;
668 
669 	/* Allocate and map SQ */
670 	tgt->sq_mem_size = tgt->max_sqes * BNX2FC_SQ_WQE_SIZE;
671 	tgt->sq_mem_size = (tgt->sq_mem_size + (CNIC_PAGE_SIZE - 1)) &
672 			   CNIC_PAGE_MASK;
673 
674 	tgt->sq = dma_alloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
675 				     &tgt->sq_dma, GFP_KERNEL);
676 	if (!tgt->sq) {
677 		printk(KERN_ERR PFX "unable to allocate SQ memory %d\n",
678 			tgt->sq_mem_size);
679 		goto mem_alloc_failure;
680 	}
681 
682 	/* Allocate and map CQ */
683 	tgt->cq_mem_size = tgt->max_cqes * BNX2FC_CQ_WQE_SIZE;
684 	tgt->cq_mem_size = (tgt->cq_mem_size + (CNIC_PAGE_SIZE - 1)) &
685 			   CNIC_PAGE_MASK;
686 
687 	tgt->cq = dma_alloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size,
688 				     &tgt->cq_dma, GFP_KERNEL);
689 	if (!tgt->cq) {
690 		printk(KERN_ERR PFX "unable to allocate CQ memory %d\n",
691 			tgt->cq_mem_size);
692 		goto mem_alloc_failure;
693 	}
694 
695 	/* Allocate and map RQ and RQ PBL */
696 	tgt->rq_mem_size = tgt->max_rqes * BNX2FC_RQ_WQE_SIZE;
697 	tgt->rq_mem_size = (tgt->rq_mem_size + (CNIC_PAGE_SIZE - 1)) &
698 			   CNIC_PAGE_MASK;
699 
700 	tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size,
701 				     &tgt->rq_dma, GFP_KERNEL);
702 	if (!tgt->rq) {
703 		printk(KERN_ERR PFX "unable to allocate RQ memory %d\n",
704 			tgt->rq_mem_size);
705 		goto mem_alloc_failure;
706 	}
707 
708 	tgt->rq_pbl_size = (tgt->rq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);
709 	tgt->rq_pbl_size = (tgt->rq_pbl_size + (CNIC_PAGE_SIZE - 1)) &
710 			   CNIC_PAGE_MASK;
711 
712 	tgt->rq_pbl = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size,
713 					 &tgt->rq_pbl_dma, GFP_KERNEL);
714 	if (!tgt->rq_pbl) {
715 		printk(KERN_ERR PFX "unable to allocate RQ PBL %d\n",
716 			tgt->rq_pbl_size);
717 		goto mem_alloc_failure;
718 	}
719 
720 	num_pages = tgt->rq_mem_size / CNIC_PAGE_SIZE;
721 	page = tgt->rq_dma;
722 	pbl = (u32 *)tgt->rq_pbl;
723 
724 	while (num_pages--) {
725 		*pbl = (u32)page;
726 		pbl++;
727 		*pbl = (u32)((u64)page >> 32);
728 		pbl++;
729 		page += CNIC_PAGE_SIZE;
730 	}
731 
732 	/* Allocate and map XFERQ */
733 	tgt->xferq_mem_size = tgt->max_sqes * BNX2FC_XFERQ_WQE_SIZE;
734 	tgt->xferq_mem_size = (tgt->xferq_mem_size + (CNIC_PAGE_SIZE - 1)) &
735 			       CNIC_PAGE_MASK;
736 
737 	tgt->xferq = dma_alloc_coherent(&hba->pcidev->dev,
738 					tgt->xferq_mem_size, &tgt->xferq_dma,
739 					GFP_KERNEL);
740 	if (!tgt->xferq) {
741 		printk(KERN_ERR PFX "unable to allocate XFERQ %d\n",
742 			tgt->xferq_mem_size);
743 		goto mem_alloc_failure;
744 	}
745 
746 	/* Allocate and map CONFQ & CONFQ PBL */
747 	tgt->confq_mem_size = tgt->max_sqes * BNX2FC_CONFQ_WQE_SIZE;
748 	tgt->confq_mem_size = (tgt->confq_mem_size + (CNIC_PAGE_SIZE - 1)) &
749 			       CNIC_PAGE_MASK;
750 
751 	tgt->confq = dma_alloc_coherent(&hba->pcidev->dev,
752 					tgt->confq_mem_size, &tgt->confq_dma,
753 					GFP_KERNEL);
754 	if (!tgt->confq) {
755 		printk(KERN_ERR PFX "unable to allocate CONFQ %d\n",
756 			tgt->confq_mem_size);
757 		goto mem_alloc_failure;
758 	}
759 
760 	tgt->confq_pbl_size =
761 		(tgt->confq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);
762 	tgt->confq_pbl_size =
763 		(tgt->confq_pbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
764 
765 	tgt->confq_pbl = dma_alloc_coherent(&hba->pcidev->dev,
766 					    tgt->confq_pbl_size,
767 					    &tgt->confq_pbl_dma, GFP_KERNEL);
768 	if (!tgt->confq_pbl) {
769 		printk(KERN_ERR PFX "unable to allocate CONFQ PBL %d\n",
770 			tgt->confq_pbl_size);
771 		goto mem_alloc_failure;
772 	}
773 
774 	num_pages = tgt->confq_mem_size / CNIC_PAGE_SIZE;
775 	page = tgt->confq_dma;
776 	pbl = (u32 *)tgt->confq_pbl;
777 
778 	while (num_pages--) {
779 		*pbl = (u32)page;
780 		pbl++;
781 		*pbl = (u32)((u64)page >> 32);
782 		pbl++;
783 		page += CNIC_PAGE_SIZE;
784 	}
785 
786 	/* Allocate and map ConnDB */
787 	tgt->conn_db_mem_size = sizeof(struct fcoe_conn_db);
788 
789 	tgt->conn_db = dma_alloc_coherent(&hba->pcidev->dev,
790 					  tgt->conn_db_mem_size,
791 					  &tgt->conn_db_dma, GFP_KERNEL);
792 	if (!tgt->conn_db) {
793 		printk(KERN_ERR PFX "unable to allocate conn_db %d\n",
794 						tgt->conn_db_mem_size);
795 		goto mem_alloc_failure;
796 	}
797 
798 
799 	/* Allocate and map LCQ */
800 	tgt->lcq_mem_size = (tgt->max_sqes + 8) * BNX2FC_SQ_WQE_SIZE;
801 	tgt->lcq_mem_size = (tgt->lcq_mem_size + (CNIC_PAGE_SIZE - 1)) &
802 			     CNIC_PAGE_MASK;
803 
804 	tgt->lcq = dma_alloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size,
805 				      &tgt->lcq_dma, GFP_KERNEL);
806 
807 	if (!tgt->lcq) {
808 		printk(KERN_ERR PFX "unable to allocate lcq %d\n",
809 		       tgt->lcq_mem_size);
810 		goto mem_alloc_failure;
811 	}
812 
813 	tgt->conn_db->rq_prod = 0x8000;
814 
815 	return 0;
816 
817 mem_alloc_failure:
818 	return -ENOMEM;
819 }
820 
821 /**
822  * bnx2fc_free_session_resc - free qp resources for the session
823  *
824  * @hba:	adapter structure pointer
825  * @tgt:	bnx2fc_rport structure pointer
826  *
827  * Free QP resources - SQ/RQ/CQ/XFERQ memory and PBL
828  */
829 static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba,
830 						struct bnx2fc_rport *tgt)
831 {
832 	void __iomem *ctx_base_ptr;
833 
834 	BNX2FC_TGT_DBG(tgt, "Freeing up session resources\n");
835 
836 	spin_lock_bh(&tgt->cq_lock);
837 	ctx_base_ptr = tgt->ctx_base;
838 	tgt->ctx_base = NULL;
839 
840 	/* Free LCQ */
841 	if (tgt->lcq) {
842 		dma_free_coherent(&hba->pcidev->dev, tgt->lcq_mem_size,
843 				    tgt->lcq, tgt->lcq_dma);
844 		tgt->lcq = NULL;
845 	}
846 	/* Free connDB */
847 	if (tgt->conn_db) {
848 		dma_free_coherent(&hba->pcidev->dev, tgt->conn_db_mem_size,
849 				    tgt->conn_db, tgt->conn_db_dma);
850 		tgt->conn_db = NULL;
851 	}
852 	/* Free confq  and confq pbl */
853 	if (tgt->confq_pbl) {
854 		dma_free_coherent(&hba->pcidev->dev, tgt->confq_pbl_size,
855 				    tgt->confq_pbl, tgt->confq_pbl_dma);
856 		tgt->confq_pbl = NULL;
857 	}
858 	if (tgt->confq) {
859 		dma_free_coherent(&hba->pcidev->dev, tgt->confq_mem_size,
860 				    tgt->confq, tgt->confq_dma);
861 		tgt->confq = NULL;
862 	}
863 	/* Free XFERQ */
864 	if (tgt->xferq) {
865 		dma_free_coherent(&hba->pcidev->dev, tgt->xferq_mem_size,
866 				    tgt->xferq, tgt->xferq_dma);
867 		tgt->xferq = NULL;
868 	}
869 	/* Free RQ PBL and RQ */
870 	if (tgt->rq_pbl) {
871 		dma_free_coherent(&hba->pcidev->dev, tgt->rq_pbl_size,
872 				    tgt->rq_pbl, tgt->rq_pbl_dma);
873 		tgt->rq_pbl = NULL;
874 	}
875 	if (tgt->rq) {
876 		dma_free_coherent(&hba->pcidev->dev, tgt->rq_mem_size,
877 				    tgt->rq, tgt->rq_dma);
878 		tgt->rq = NULL;
879 	}
880 	/* Free CQ */
881 	if (tgt->cq) {
882 		dma_free_coherent(&hba->pcidev->dev, tgt->cq_mem_size,
883 				    tgt->cq, tgt->cq_dma);
884 		tgt->cq = NULL;
885 	}
886 	/* Free SQ */
887 	if (tgt->sq) {
888 		dma_free_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
889 				    tgt->sq, tgt->sq_dma);
890 		tgt->sq = NULL;
891 	}
892 	spin_unlock_bh(&tgt->cq_lock);
893 
894 	if (ctx_base_ptr)
895 		iounmap(ctx_base_ptr);
896 }
897