1 /*
2  *  linux/drivers/scsi/esas2r/esas2r_disc.c
3  *      esas2r device discovery routines
4  *
5  *  Copyright (c) 2001-2013 ATTO Technology, Inc.
6  *  (mailto:linuxdrivers@attotech.com)
7  */
8 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
9 /*
10  *  This program is free software; you can redistribute it and/or modify
11  *  it under the terms of the GNU General Public License as published by
12  *  the Free Software Foundation; version 2 of the License.
13  *
14  *  This program is distributed in the hope that it will be useful,
15  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
16  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  *  GNU General Public License for more details.
18  *
19  *  NO WARRANTY
20  *  THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
21  *  CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
22  *  LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
23  *  MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
24  *  solely responsible for determining the appropriateness of using and
25  *  distributing the Program and assumes all risks associated with its
26  *  exercise of rights under this Agreement, including but not limited to
27  *  the risks and costs of program errors, damage to or loss of data,
28  *  programs or equipment, and unavailability or interruption of operations.
29  *
30  *  DISCLAIMER OF LIABILITY
31  *  NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
32  *  DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  *  DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
34  *  ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
35  *  TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
36  *  USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
37  *  HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
38  *
39  *  You should have received a copy of the GNU General Public License
40  *  along with this program; if not, write to the Free Software
41  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
42  */
43 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
44 
45 #include "esas2r.h"
46 
47 /* Miscellaneous internal discovery routines */
48 static void esas2r_disc_abort(struct esas2r_adapter *a,
49 			      struct esas2r_request *rq);
50 static bool esas2r_disc_continue(struct esas2r_adapter *a,
51 				 struct esas2r_request *rq);
52 static void esas2r_disc_fix_curr_requests(struct esas2r_adapter *a);
53 static u32 esas2r_disc_get_phys_addr(struct esas2r_sg_context *sgc, u64 *addr);
54 static bool esas2r_disc_start_request(struct esas2r_adapter *a,
55 				      struct esas2r_request *rq);
56 
57 /* Internal discovery routines that process the states */
58 static bool esas2r_disc_block_dev_scan(struct esas2r_adapter *a,
59 				       struct esas2r_request *rq);
60 static void esas2r_disc_block_dev_scan_cb(struct esas2r_adapter *a,
61 					  struct esas2r_request *rq);
62 static bool esas2r_disc_dev_add(struct esas2r_adapter *a,
63 				struct esas2r_request *rq);
64 static bool esas2r_disc_dev_remove(struct esas2r_adapter *a,
65 				   struct esas2r_request *rq);
66 static bool esas2r_disc_part_info(struct esas2r_adapter *a,
67 				  struct esas2r_request *rq);
68 static void esas2r_disc_part_info_cb(struct esas2r_adapter *a,
69 				     struct esas2r_request *rq);
70 static bool esas2r_disc_passthru_dev_info(struct esas2r_adapter *a,
71 					  struct esas2r_request *rq);
72 static void esas2r_disc_passthru_dev_info_cb(struct esas2r_adapter *a,
73 					     struct esas2r_request *rq);
74 static bool esas2r_disc_passthru_dev_addr(struct esas2r_adapter *a,
75 					  struct esas2r_request *rq);
76 static void esas2r_disc_passthru_dev_addr_cb(struct esas2r_adapter *a,
77 					     struct esas2r_request *rq);
78 static bool esas2r_disc_raid_grp_info(struct esas2r_adapter *a,
79 				      struct esas2r_request *rq);
80 static void esas2r_disc_raid_grp_info_cb(struct esas2r_adapter *a,
81 					 struct esas2r_request *rq);
82 
83 void esas2r_disc_initialize(struct esas2r_adapter *a)
84 {
85 	struct esas2r_sas_nvram *nvr = a->nvram;
86 
87 	esas2r_trace_enter();
88 
89 	esas2r_lock_clear_flags(&a->flags, AF_DISC_IN_PROG);
90 	esas2r_lock_clear_flags(&a->flags2, AF2_DEV_SCAN);
91 	esas2r_lock_clear_flags(&a->flags2, AF2_DEV_CNT_OK);
92 
93 	a->disc_start_time = jiffies_to_msecs(jiffies);
94 	a->disc_wait_time = nvr->dev_wait_time * 1000;
95 	a->disc_wait_cnt = nvr->dev_wait_count;
96 
97 	if (a->disc_wait_cnt > ESAS2R_MAX_TARGETS)
98 		a->disc_wait_cnt = ESAS2R_MAX_TARGETS;
99 
100 	/*
101 	 * If we are doing chip reset or power management processing, always
102 	 * wait for devices.  use the NVRAM device count if it is greater than
103 	 * previously discovered devices.
104 	 */
105 
106 	esas2r_hdebug("starting discovery...");
107 
108 	a->general_req.interrupt_cx = NULL;
109 
110 	if (a->flags & (AF_CHPRST_DETECTED | AF_POWER_MGT)) {
111 		if (a->prev_dev_cnt == 0) {
112 			/* Don't bother waiting if there is nothing to wait
113 			 * for.
114 			 */
115 			a->disc_wait_time = 0;
116 		} else {
117 			/*
118 			 * Set the device wait count to what was previously
119 			 * found.  We don't care if the user only configured
120 			 * a time because we know the exact count to wait for.
121 			 * There is no need to honor the user's wishes to
122 			 * always wait the full time.
123 			 */
124 			a->disc_wait_cnt = a->prev_dev_cnt;
125 
126 			/*
127 			 * bump the minimum wait time to 15 seconds since the
128 			 * default is 3 (system boot or the boot driver usually
129 			 * buys us more time).
130 			 */
131 			if (a->disc_wait_time < 15000)
132 				a->disc_wait_time = 15000;
133 		}
134 	}
135 
136 	esas2r_trace("disc wait count: %d", a->disc_wait_cnt);
137 	esas2r_trace("disc wait time: %d", a->disc_wait_time);
138 
139 	if (a->disc_wait_time == 0)
140 		esas2r_disc_check_complete(a);
141 
142 	esas2r_trace_exit();
143 }
144 
145 void esas2r_disc_start_waiting(struct esas2r_adapter *a)
146 {
147 	unsigned long flags;
148 
149 	spin_lock_irqsave(&a->mem_lock, flags);
150 
151 	if (a->disc_ctx.disc_evt)
152 		esas2r_disc_start_port(a);
153 
154 	spin_unlock_irqrestore(&a->mem_lock, flags);
155 }
156 
157 void esas2r_disc_check_for_work(struct esas2r_adapter *a)
158 {
159 	struct esas2r_request *rq = &a->general_req;
160 
161 	/* service any pending interrupts first */
162 
163 	esas2r_polled_interrupt(a);
164 
165 	/*
166 	 * now, interrupt processing may have queued up a discovery event.  go
167 	 * see if we have one to start.  we couldn't start it in the ISR since
168 	 * polled discovery would cause a deadlock.
169 	 */
170 
171 	esas2r_disc_start_waiting(a);
172 
173 	if (rq->interrupt_cx == NULL)
174 		return;
175 
176 	if (rq->req_stat == RS_STARTED
177 	    && rq->timeout <= RQ_MAX_TIMEOUT) {
178 		/* wait for the current discovery request to complete. */
179 		esas2r_wait_request(a, rq);
180 
181 		if (rq->req_stat == RS_TIMEOUT) {
182 			esas2r_disc_abort(a, rq);
183 			esas2r_local_reset_adapter(a);
184 			return;
185 		}
186 	}
187 
188 	if (rq->req_stat == RS_PENDING
189 	    || rq->req_stat == RS_STARTED)
190 		return;
191 
192 	esas2r_disc_continue(a, rq);
193 }
194 
195 void esas2r_disc_check_complete(struct esas2r_adapter *a)
196 {
197 	unsigned long flags;
198 
199 	esas2r_trace_enter();
200 
201 	/* check to see if we should be waiting for devices */
202 	if (a->disc_wait_time) {
203 		u32 currtime = jiffies_to_msecs(jiffies);
204 		u32 time = currtime - a->disc_start_time;
205 
206 		/*
207 		 * Wait until the device wait time is exhausted or the device
208 		 * wait count is satisfied.
209 		 */
210 		if (time < a->disc_wait_time
211 		    && (esas2r_targ_db_get_tgt_cnt(a) < a->disc_wait_cnt
212 			|| a->disc_wait_cnt == 0)) {
213 			/* After three seconds of waiting, schedule a scan. */
214 			if (time >= 3000
215 			    && !(esas2r_lock_set_flags(&a->flags2,
216 						       AF2_DEV_SCAN) &
217 				 ilog2(AF2_DEV_SCAN))) {
218 				spin_lock_irqsave(&a->mem_lock, flags);
219 				esas2r_disc_queue_event(a, DCDE_DEV_SCAN);
220 				spin_unlock_irqrestore(&a->mem_lock, flags);
221 			}
222 
223 			esas2r_trace_exit();
224 			return;
225 		}
226 
227 		/*
228 		 * We are done waiting...we think.  Adjust the wait time to
229 		 * consume events after the count is met.
230 		 */
231 		if (!(esas2r_lock_set_flags(&a->flags2, AF2_DEV_CNT_OK)
232 		      & ilog2(AF2_DEV_CNT_OK)))
233 			a->disc_wait_time = time + 3000;
234 
235 		/* If we haven't done a full scan yet, do it now. */
236 		if (!(esas2r_lock_set_flags(&a->flags2,
237 					    AF2_DEV_SCAN) &
238 		      ilog2(AF2_DEV_SCAN))) {
239 			spin_lock_irqsave(&a->mem_lock, flags);
240 			esas2r_disc_queue_event(a, DCDE_DEV_SCAN);
241 			spin_unlock_irqrestore(&a->mem_lock, flags);
242 
243 			esas2r_trace_exit();
244 			return;
245 		}
246 
247 		/*
248 		 * Now, if there is still time left to consume events, continue
249 		 * waiting.
250 		 */
251 		if (time < a->disc_wait_time) {
252 			esas2r_trace_exit();
253 			return;
254 		}
255 	} else {
256 		if (!(esas2r_lock_set_flags(&a->flags2,
257 					    AF2_DEV_SCAN) &
258 		      ilog2(AF2_DEV_SCAN))) {
259 			spin_lock_irqsave(&a->mem_lock, flags);
260 			esas2r_disc_queue_event(a, DCDE_DEV_SCAN);
261 			spin_unlock_irqrestore(&a->mem_lock, flags);
262 		}
263 	}
264 
265 	/* We want to stop waiting for devices. */
266 	a->disc_wait_time = 0;
267 
268 	if ((a->flags & AF_DISC_POLLED)
269 	    && (a->flags & AF_DISC_IN_PROG)) {
270 		/*
271 		 * Polled discovery is still pending so continue the active
272 		 * discovery until it is done.  At that point, we will stop
273 		 * polled discovery and transition to interrupt driven
274 		 * discovery.
275 		 */
276 	} else {
277 		/*
278 		 * Done waiting for devices.  Note that we get here immediately
279 		 * after deferred waiting completes because that is interrupt
280 		 * driven; i.e. There is no transition.
281 		 */
282 		esas2r_disc_fix_curr_requests(a);
283 		esas2r_lock_clear_flags(&a->flags, AF_DISC_PENDING);
284 
285 		/*
286 		 * We have deferred target state changes until now because we
287 		 * don't want to report any removals (due to the first arrival)
288 		 * until the device wait time expires.
289 		 */
290 		esas2r_lock_set_flags(&a->flags, AF_PORT_CHANGE);
291 	}
292 
293 	esas2r_trace_exit();
294 }
295 
296 void esas2r_disc_queue_event(struct esas2r_adapter *a, u8 disc_evt)
297 {
298 	struct esas2r_disc_context *dc = &a->disc_ctx;
299 
300 	esas2r_trace_enter();
301 
302 	esas2r_trace("disc_event: %d", disc_evt);
303 
304 	/* Initialize the discovery context */
305 	dc->disc_evt |= disc_evt;
306 
307 	/*
308 	 * Don't start discovery before or during polled discovery.  if we did,
309 	 * we would have a deadlock if we are in the ISR already.
310 	 */
311 	if (!(a->flags & (AF_CHPRST_PENDING | AF_DISC_POLLED)))
312 		esas2r_disc_start_port(a);
313 
314 	esas2r_trace_exit();
315 }
316 
317 bool esas2r_disc_start_port(struct esas2r_adapter *a)
318 {
319 	struct esas2r_request *rq = &a->general_req;
320 	struct esas2r_disc_context *dc = &a->disc_ctx;
321 	bool ret;
322 
323 	esas2r_trace_enter();
324 
325 	if (a->flags & AF_DISC_IN_PROG) {
326 		esas2r_trace_exit();
327 
328 		return false;
329 	}
330 
331 	/* If there is a discovery waiting, process it. */
332 	if (dc->disc_evt) {
333 		if ((a->flags & AF_DISC_POLLED)
334 		    && a->disc_wait_time == 0) {
335 			/*
336 			 * We are doing polled discovery, but we no longer want
337 			 * to wait for devices.  Stop polled discovery and
338 			 * transition to interrupt driven discovery.
339 			 */
340 
341 			esas2r_trace_exit();
342 
343 			return false;
344 		}
345 	} else {
346 		/* Discovery is complete. */
347 
348 		esas2r_hdebug("disc done");
349 
350 		esas2r_lock_set_flags(&a->flags, AF_PORT_CHANGE);
351 
352 		esas2r_trace_exit();
353 
354 		return false;
355 	}
356 
357 	/* Handle the discovery context */
358 	esas2r_trace("disc_evt: %d", dc->disc_evt);
359 	esas2r_lock_set_flags(&a->flags, AF_DISC_IN_PROG);
360 	dc->flags = 0;
361 
362 	if (a->flags & AF_DISC_POLLED)
363 		dc->flags |= DCF_POLLED;
364 
365 	rq->interrupt_cx = dc;
366 	rq->req_stat = RS_SUCCESS;
367 
368 	/* Decode the event code */
369 	if (dc->disc_evt & DCDE_DEV_SCAN) {
370 		dc->disc_evt &= ~DCDE_DEV_SCAN;
371 
372 		dc->flags |= DCF_DEV_SCAN;
373 		dc->state = DCS_BLOCK_DEV_SCAN;
374 	} else if (dc->disc_evt & DCDE_DEV_CHANGE) {
375 		dc->disc_evt &= ~DCDE_DEV_CHANGE;
376 
377 		dc->flags |= DCF_DEV_CHANGE;
378 		dc->state = DCS_DEV_RMV;
379 	}
380 
381 	/* Continue interrupt driven discovery */
382 	if (!(a->flags & AF_DISC_POLLED))
383 		ret = esas2r_disc_continue(a, rq);
384 	else
385 		ret = true;
386 
387 	esas2r_trace_exit();
388 
389 	return ret;
390 }
391 
392 static bool esas2r_disc_continue(struct esas2r_adapter *a,
393 				 struct esas2r_request *rq)
394 {
395 	struct esas2r_disc_context *dc =
396 		(struct esas2r_disc_context *)rq->interrupt_cx;
397 	bool rslt;
398 
399 	/* Device discovery/removal */
400 	while (dc->flags & (DCF_DEV_CHANGE | DCF_DEV_SCAN)) {
401 		rslt = false;
402 
403 		switch (dc->state) {
404 		case DCS_DEV_RMV:
405 
406 			rslt = esas2r_disc_dev_remove(a, rq);
407 			break;
408 
409 		case DCS_DEV_ADD:
410 
411 			rslt = esas2r_disc_dev_add(a, rq);
412 			break;
413 
414 		case DCS_BLOCK_DEV_SCAN:
415 
416 			rslt = esas2r_disc_block_dev_scan(a, rq);
417 			break;
418 
419 		case DCS_RAID_GRP_INFO:
420 
421 			rslt = esas2r_disc_raid_grp_info(a, rq);
422 			break;
423 
424 		case DCS_PART_INFO:
425 
426 			rslt = esas2r_disc_part_info(a, rq);
427 			break;
428 
429 		case DCS_PT_DEV_INFO:
430 
431 			rslt = esas2r_disc_passthru_dev_info(a, rq);
432 			break;
433 		case DCS_PT_DEV_ADDR:
434 
435 			rslt = esas2r_disc_passthru_dev_addr(a, rq);
436 			break;
437 		case DCS_DISC_DONE:
438 
439 			dc->flags &= ~(DCF_DEV_CHANGE | DCF_DEV_SCAN);
440 			break;
441 
442 		default:
443 
444 			esas2r_bugon();
445 			dc->state = DCS_DISC_DONE;
446 			break;
447 		}
448 
449 		if (rslt)
450 			return true;
451 	}
452 
453 	/* Discovery is done...for now. */
454 	rq->interrupt_cx = NULL;
455 
456 	if (!(a->flags & AF_DISC_PENDING))
457 		esas2r_disc_fix_curr_requests(a);
458 
459 	esas2r_lock_clear_flags(&a->flags, AF_DISC_IN_PROG);
460 
461 	/* Start the next discovery. */
462 	return esas2r_disc_start_port(a);
463 }
464 
465 static bool esas2r_disc_start_request(struct esas2r_adapter *a,
466 				      struct esas2r_request *rq)
467 {
468 	unsigned long flags;
469 
470 	/* Set the timeout to a minimum value. */
471 	if (rq->timeout < ESAS2R_DEFAULT_TMO)
472 		rq->timeout = ESAS2R_DEFAULT_TMO;
473 
474 	/*
475 	 * Override the request type to distinguish discovery requests.  If we
476 	 * end up deferring the request, esas2r_disc_local_start_request()
477 	 * will be called to restart it.
478 	 */
479 	rq->req_type = RT_DISC_REQ;
480 
481 	spin_lock_irqsave(&a->queue_lock, flags);
482 
483 	if (!(a->flags & (AF_CHPRST_PENDING | AF_FLASHING)))
484 		esas2r_disc_local_start_request(a, rq);
485 	else
486 		list_add_tail(&rq->req_list, &a->defer_list);
487 
488 	spin_unlock_irqrestore(&a->queue_lock, flags);
489 
490 	return true;
491 }
492 
493 void esas2r_disc_local_start_request(struct esas2r_adapter *a,
494 				     struct esas2r_request *rq)
495 {
496 	esas2r_trace_enter();
497 
498 	list_add_tail(&rq->req_list, &a->active_list);
499 
500 	esas2r_start_vda_request(a, rq);
501 
502 	esas2r_trace_exit();
503 
504 	return;
505 }
506 
507 static void esas2r_disc_abort(struct esas2r_adapter *a,
508 			      struct esas2r_request *rq)
509 {
510 	struct esas2r_disc_context *dc =
511 		(struct esas2r_disc_context *)rq->interrupt_cx;
512 
513 	esas2r_trace_enter();
514 
515 	/* abort the current discovery */
516 
517 	dc->state = DCS_DISC_DONE;
518 
519 	esas2r_trace_exit();
520 }
521 
522 static bool esas2r_disc_block_dev_scan(struct esas2r_adapter *a,
523 				       struct esas2r_request *rq)
524 {
525 	struct esas2r_disc_context *dc =
526 		(struct esas2r_disc_context *)rq->interrupt_cx;
527 	bool rslt;
528 
529 	esas2r_trace_enter();
530 
531 	esas2r_rq_init_request(rq, a);
532 
533 	esas2r_build_mgt_req(a,
534 			     rq,
535 			     VDAMGT_DEV_SCAN,
536 			     0,
537 			     0,
538 			     0,
539 			     NULL);
540 
541 	rq->comp_cb = esas2r_disc_block_dev_scan_cb;
542 
543 	rq->timeout = 30000;
544 	rq->interrupt_cx = dc;
545 
546 	rslt = esas2r_disc_start_request(a, rq);
547 
548 	esas2r_trace_exit();
549 
550 	return rslt;
551 }
552 
553 static void esas2r_disc_block_dev_scan_cb(struct esas2r_adapter *a,
554 					  struct esas2r_request *rq)
555 {
556 	struct esas2r_disc_context *dc =
557 		(struct esas2r_disc_context *)rq->interrupt_cx;
558 	unsigned long flags;
559 
560 	esas2r_trace_enter();
561 
562 	spin_lock_irqsave(&a->mem_lock, flags);
563 
564 	if (rq->req_stat == RS_SUCCESS)
565 		dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation;
566 
567 	dc->state = DCS_RAID_GRP_INFO;
568 	dc->raid_grp_ix = 0;
569 
570 	esas2r_rq_destroy_request(rq, a);
571 
572 	/* continue discovery if it's interrupt driven */
573 
574 	if (!(dc->flags & DCF_POLLED))
575 		esas2r_disc_continue(a, rq);
576 
577 	spin_unlock_irqrestore(&a->mem_lock, flags);
578 
579 	esas2r_trace_exit();
580 }
581 
582 static bool esas2r_disc_raid_grp_info(struct esas2r_adapter *a,
583 				      struct esas2r_request *rq)
584 {
585 	struct esas2r_disc_context *dc =
586 		(struct esas2r_disc_context *)rq->interrupt_cx;
587 	bool rslt;
588 	struct atto_vda_grp_info *grpinfo;
589 
590 	esas2r_trace_enter();
591 
592 	esas2r_trace("raid_group_idx: %d", dc->raid_grp_ix);
593 
594 	if (dc->raid_grp_ix >= VDA_MAX_RAID_GROUPS) {
595 		dc->state = DCS_DISC_DONE;
596 
597 		esas2r_trace_exit();
598 
599 		return false;
600 	}
601 
602 	esas2r_rq_init_request(rq, a);
603 
604 	grpinfo = &rq->vda_rsp_data->mgt_data.data.grp_info;
605 
606 	memset(grpinfo, 0, sizeof(struct atto_vda_grp_info));
607 
608 	esas2r_build_mgt_req(a,
609 			     rq,
610 			     VDAMGT_GRP_INFO,
611 			     dc->scan_gen,
612 			     0,
613 			     sizeof(struct atto_vda_grp_info),
614 			     NULL);
615 
616 	grpinfo->grp_index = dc->raid_grp_ix;
617 
618 	rq->comp_cb = esas2r_disc_raid_grp_info_cb;
619 
620 	rq->interrupt_cx = dc;
621 
622 	rslt = esas2r_disc_start_request(a, rq);
623 
624 	esas2r_trace_exit();
625 
626 	return rslt;
627 }
628 
629 static void esas2r_disc_raid_grp_info_cb(struct esas2r_adapter *a,
630 					 struct esas2r_request *rq)
631 {
632 	struct esas2r_disc_context *dc =
633 		(struct esas2r_disc_context *)rq->interrupt_cx;
634 	unsigned long flags;
635 	struct atto_vda_grp_info *grpinfo;
636 
637 	esas2r_trace_enter();
638 
639 	spin_lock_irqsave(&a->mem_lock, flags);
640 
641 	if (rq->req_stat == RS_SCAN_GEN) {
642 		dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation;
643 		dc->raid_grp_ix = 0;
644 		goto done;
645 	}
646 
647 	if (rq->req_stat == RS_SUCCESS) {
648 		grpinfo = &rq->vda_rsp_data->mgt_data.data.grp_info;
649 
650 		if (grpinfo->status != VDA_GRP_STAT_ONLINE
651 		    && grpinfo->status != VDA_GRP_STAT_DEGRADED) {
652 			/* go to the next group. */
653 
654 			dc->raid_grp_ix++;
655 		} else {
656 			memcpy(&dc->raid_grp_name[0],
657 			       &grpinfo->grp_name[0],
658 			       sizeof(grpinfo->grp_name));
659 
660 			dc->interleave = le32_to_cpu(grpinfo->interleave);
661 			dc->block_size = le32_to_cpu(grpinfo->block_size);
662 
663 			dc->state = DCS_PART_INFO;
664 			dc->part_num = 0;
665 		}
666 	} else {
667 		if (!(rq->req_stat == RS_GRP_INVALID)) {
668 			esas2r_log(ESAS2R_LOG_WARN,
669 				   "A request for RAID group info failed - "
670 				   "returned with %x",
671 				   rq->req_stat);
672 		}
673 
674 		dc->dev_ix = 0;
675 		dc->state = DCS_PT_DEV_INFO;
676 	}
677 
678 done:
679 
680 	esas2r_rq_destroy_request(rq, a);
681 
682 	/* continue discovery if it's interrupt driven */
683 
684 	if (!(dc->flags & DCF_POLLED))
685 		esas2r_disc_continue(a, rq);
686 
687 	spin_unlock_irqrestore(&a->mem_lock, flags);
688 
689 	esas2r_trace_exit();
690 }
691 
692 static bool esas2r_disc_part_info(struct esas2r_adapter *a,
693 				  struct esas2r_request *rq)
694 {
695 	struct esas2r_disc_context *dc =
696 		(struct esas2r_disc_context *)rq->interrupt_cx;
697 	bool rslt;
698 	struct atto_vdapart_info *partinfo;
699 
700 	esas2r_trace_enter();
701 
702 	esas2r_trace("part_num: %d", dc->part_num);
703 
704 	if (dc->part_num >= VDA_MAX_PARTITIONS) {
705 		dc->state = DCS_RAID_GRP_INFO;
706 		dc->raid_grp_ix++;
707 
708 		esas2r_trace_exit();
709 
710 		return false;
711 	}
712 
713 	esas2r_rq_init_request(rq, a);
714 
715 	partinfo = &rq->vda_rsp_data->mgt_data.data.part_info;
716 
717 	memset(partinfo, 0, sizeof(struct atto_vdapart_info));
718 
719 	esas2r_build_mgt_req(a,
720 			     rq,
721 			     VDAMGT_PART_INFO,
722 			     dc->scan_gen,
723 			     0,
724 			     sizeof(struct atto_vdapart_info),
725 			     NULL);
726 
727 	partinfo->part_no = dc->part_num;
728 
729 	memcpy(&partinfo->grp_name[0],
730 	       &dc->raid_grp_name[0],
731 	       sizeof(partinfo->grp_name));
732 
733 	rq->comp_cb = esas2r_disc_part_info_cb;
734 
735 	rq->interrupt_cx = dc;
736 
737 	rslt = esas2r_disc_start_request(a, rq);
738 
739 	esas2r_trace_exit();
740 
741 	return rslt;
742 }
743 
744 static void esas2r_disc_part_info_cb(struct esas2r_adapter *a,
745 				     struct esas2r_request *rq)
746 {
747 	struct esas2r_disc_context *dc =
748 		(struct esas2r_disc_context *)rq->interrupt_cx;
749 	unsigned long flags;
750 	struct atto_vdapart_info *partinfo;
751 
752 	esas2r_trace_enter();
753 
754 	spin_lock_irqsave(&a->mem_lock, flags);
755 
756 	if (rq->req_stat == RS_SCAN_GEN) {
757 		dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation;
758 		dc->raid_grp_ix = 0;
759 		dc->state = DCS_RAID_GRP_INFO;
760 	} else if (rq->req_stat == RS_SUCCESS) {
761 		partinfo = &rq->vda_rsp_data->mgt_data.data.part_info;
762 
763 		dc->part_num = partinfo->part_no;
764 
765 		dc->curr_virt_id = le16_to_cpu(partinfo->target_id);
766 
767 		esas2r_targ_db_add_raid(a, dc);
768 
769 		dc->part_num++;
770 	} else {
771 		if (!(rq->req_stat == RS_PART_LAST)) {
772 			esas2r_log(ESAS2R_LOG_WARN,
773 				   "A request for RAID group partition info "
774 				   "failed - status:%d", rq->req_stat);
775 		}
776 
777 		dc->state = DCS_RAID_GRP_INFO;
778 		dc->raid_grp_ix++;
779 	}
780 
781 	esas2r_rq_destroy_request(rq, a);
782 
783 	/* continue discovery if it's interrupt driven */
784 
785 	if (!(dc->flags & DCF_POLLED))
786 		esas2r_disc_continue(a, rq);
787 
788 	spin_unlock_irqrestore(&a->mem_lock, flags);
789 
790 	esas2r_trace_exit();
791 }
792 
793 static bool esas2r_disc_passthru_dev_info(struct esas2r_adapter *a,
794 					  struct esas2r_request *rq)
795 {
796 	struct esas2r_disc_context *dc =
797 		(struct esas2r_disc_context *)rq->interrupt_cx;
798 	bool rslt;
799 	struct atto_vda_devinfo *devinfo;
800 
801 	esas2r_trace_enter();
802 
803 	esas2r_trace("dev_ix: %d", dc->dev_ix);
804 
805 	esas2r_rq_init_request(rq, a);
806 
807 	devinfo = &rq->vda_rsp_data->mgt_data.data.dev_info;
808 
809 	memset(devinfo, 0, sizeof(struct atto_vda_devinfo));
810 
811 	esas2r_build_mgt_req(a,
812 			     rq,
813 			     VDAMGT_DEV_PT_INFO,
814 			     dc->scan_gen,
815 			     dc->dev_ix,
816 			     sizeof(struct atto_vda_devinfo),
817 			     NULL);
818 
819 	rq->comp_cb = esas2r_disc_passthru_dev_info_cb;
820 
821 	rq->interrupt_cx = dc;
822 
823 	rslt = esas2r_disc_start_request(a, rq);
824 
825 	esas2r_trace_exit();
826 
827 	return rslt;
828 }
829 
830 static void esas2r_disc_passthru_dev_info_cb(struct esas2r_adapter *a,
831 					     struct esas2r_request *rq)
832 {
833 	struct esas2r_disc_context *dc =
834 		(struct esas2r_disc_context *)rq->interrupt_cx;
835 	unsigned long flags;
836 	struct atto_vda_devinfo *devinfo;
837 
838 	esas2r_trace_enter();
839 
840 	spin_lock_irqsave(&a->mem_lock, flags);
841 
842 	if (rq->req_stat == RS_SCAN_GEN) {
843 		dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation;
844 		dc->dev_ix = 0;
845 		dc->state = DCS_PT_DEV_INFO;
846 	} else if (rq->req_stat == RS_SUCCESS) {
847 		devinfo = &rq->vda_rsp_data->mgt_data.data.dev_info;
848 
849 		dc->dev_ix = le16_to_cpu(rq->func_rsp.mgt_rsp.dev_index);
850 
851 		dc->curr_virt_id = le16_to_cpu(devinfo->target_id);
852 
853 		if (le16_to_cpu(devinfo->features) & VDADEVFEAT_PHYS_ID) {
854 			dc->curr_phys_id =
855 				le16_to_cpu(devinfo->phys_target_id);
856 			dc->dev_addr_type = ATTO_GDA_AT_PORT;
857 			dc->state = DCS_PT_DEV_ADDR;
858 
859 			esas2r_trace("curr_virt_id: %d", dc->curr_virt_id);
860 			esas2r_trace("curr_phys_id: %d", dc->curr_phys_id);
861 		} else {
862 			dc->dev_ix++;
863 		}
864 	} else {
865 		if (!(rq->req_stat == RS_DEV_INVALID)) {
866 			esas2r_log(ESAS2R_LOG_WARN,
867 				   "A request for device information failed - "
868 				   "status:%d", rq->req_stat);
869 		}
870 
871 		dc->state = DCS_DISC_DONE;
872 	}
873 
874 	esas2r_rq_destroy_request(rq, a);
875 
876 	/* continue discovery if it's interrupt driven */
877 
878 	if (!(dc->flags & DCF_POLLED))
879 		esas2r_disc_continue(a, rq);
880 
881 	spin_unlock_irqrestore(&a->mem_lock, flags);
882 
883 	esas2r_trace_exit();
884 }
885 
886 static bool esas2r_disc_passthru_dev_addr(struct esas2r_adapter *a,
887 					  struct esas2r_request *rq)
888 {
889 	struct esas2r_disc_context *dc =
890 		(struct esas2r_disc_context *)rq->interrupt_cx;
891 	bool rslt;
892 	struct atto_ioctl *hi;
893 	struct esas2r_sg_context sgc;
894 
895 	esas2r_trace_enter();
896 
897 	esas2r_rq_init_request(rq, a);
898 
899 	/* format the request. */
900 
901 	sgc.cur_offset = NULL;
902 	sgc.get_phys_addr = (PGETPHYSADDR)esas2r_disc_get_phys_addr;
903 	sgc.length = offsetof(struct atto_ioctl, data)
904 		     + sizeof(struct atto_hba_get_device_address);
905 
906 	esas2r_sgc_init(&sgc, a, rq, rq->vrq->ioctl.sge);
907 
908 	esas2r_build_ioctl_req(a, rq, sgc.length, VDA_IOCTL_HBA);
909 
910 	if (!esas2r_build_sg_list(a, rq, &sgc)) {
911 		esas2r_rq_destroy_request(rq, a);
912 
913 		esas2r_trace_exit();
914 
915 		return false;
916 	}
917 
918 	rq->comp_cb = esas2r_disc_passthru_dev_addr_cb;
919 
920 	rq->interrupt_cx = dc;
921 
922 	/* format the IOCTL data. */
923 
924 	hi = (struct atto_ioctl *)a->disc_buffer;
925 
926 	memset(a->disc_buffer, 0, ESAS2R_DISC_BUF_LEN);
927 
928 	hi->version = ATTO_VER_GET_DEV_ADDR0;
929 	hi->function = ATTO_FUNC_GET_DEV_ADDR;
930 	hi->flags = HBAF_TUNNEL;
931 
932 	hi->data.get_dev_addr.target_id = le32_to_cpu(dc->curr_phys_id);
933 	hi->data.get_dev_addr.addr_type = dc->dev_addr_type;
934 
935 	/* start it up. */
936 
937 	rslt = esas2r_disc_start_request(a, rq);
938 
939 	esas2r_trace_exit();
940 
941 	return rslt;
942 }
943 
944 static void esas2r_disc_passthru_dev_addr_cb(struct esas2r_adapter *a,
945 					     struct esas2r_request *rq)
946 {
947 	struct esas2r_disc_context *dc =
948 		(struct esas2r_disc_context *)rq->interrupt_cx;
949 	struct esas2r_target *t = NULL;
950 	unsigned long flags;
951 	struct atto_ioctl *hi;
952 	u16 addrlen;
953 
954 	esas2r_trace_enter();
955 
956 	spin_lock_irqsave(&a->mem_lock, flags);
957 
958 	hi = (struct atto_ioctl *)a->disc_buffer;
959 
960 	if (rq->req_stat == RS_SUCCESS
961 	    && hi->status == ATTO_STS_SUCCESS) {
962 		addrlen = le16_to_cpu(hi->data.get_dev_addr.addr_len);
963 
964 		if (dc->dev_addr_type == ATTO_GDA_AT_PORT) {
965 			if (addrlen == sizeof(u64))
966 				memcpy(&dc->sas_addr,
967 				       &hi->data.get_dev_addr.address[0],
968 				       addrlen);
969 			else
970 				memset(&dc->sas_addr, 0, sizeof(dc->sas_addr));
971 
972 			/* Get the unique identifier. */
973 			dc->dev_addr_type = ATTO_GDA_AT_UNIQUE;
974 
975 			goto next_dev_addr;
976 		} else {
977 			/* Add the pass through target. */
978 			if (HIBYTE(addrlen) == 0) {
979 				t = esas2r_targ_db_add_pthru(a,
980 							     dc,
981 							     &hi->data.
982 							     get_dev_addr.
983 							     address[0],
984 							     (u8)hi->data.
985 							     get_dev_addr.
986 							     addr_len);
987 
988 				if (t)
989 					memcpy(&t->sas_addr, &dc->sas_addr,
990 					       sizeof(t->sas_addr));
991 			} else {
992 				/* getting the back end data failed */
993 
994 				esas2r_log(ESAS2R_LOG_WARN,
995 					   "an error occurred retrieving the "
996 					   "back end data (%s:%d)",
997 					   __func__,
998 					   __LINE__);
999 			}
1000 		}
1001 	} else {
1002 		/* getting the back end data failed */
1003 
1004 		esas2r_log(ESAS2R_LOG_WARN,
1005 			   "an error occurred retrieving the back end data - "
1006 			   "rq->req_stat:%d hi->status:%d",
1007 			   rq->req_stat, hi->status);
1008 	}
1009 
1010 	/* proceed to the next device. */
1011 
1012 	if (dc->flags & DCF_DEV_SCAN) {
1013 		dc->dev_ix++;
1014 		dc->state = DCS_PT_DEV_INFO;
1015 	} else if (dc->flags & DCF_DEV_CHANGE) {
1016 		dc->curr_targ++;
1017 		dc->state = DCS_DEV_ADD;
1018 	} else {
1019 		esas2r_bugon();
1020 	}
1021 
1022 next_dev_addr:
1023 	esas2r_rq_destroy_request(rq, a);
1024 
1025 	/* continue discovery if it's interrupt driven */
1026 
1027 	if (!(dc->flags & DCF_POLLED))
1028 		esas2r_disc_continue(a, rq);
1029 
1030 	spin_unlock_irqrestore(&a->mem_lock, flags);
1031 
1032 	esas2r_trace_exit();
1033 }
1034 
1035 static u32 esas2r_disc_get_phys_addr(struct esas2r_sg_context *sgc, u64 *addr)
1036 {
1037 	struct esas2r_adapter *a = sgc->adapter;
1038 
1039 	if (sgc->length > ESAS2R_DISC_BUF_LEN)
1040 		esas2r_bugon();
1041 
1042 	*addr = a->uncached_phys
1043 		+ (u64)((u8 *)a->disc_buffer - a->uncached);
1044 
1045 	return sgc->length;
1046 }
1047 
1048 static bool esas2r_disc_dev_remove(struct esas2r_adapter *a,
1049 				   struct esas2r_request *rq)
1050 {
1051 	struct esas2r_disc_context *dc =
1052 		(struct esas2r_disc_context *)rq->interrupt_cx;
1053 	struct esas2r_target *t;
1054 	struct esas2r_target *t2;
1055 
1056 	esas2r_trace_enter();
1057 
1058 	/* process removals. */
1059 
1060 	for (t = a->targetdb; t < a->targetdb_end; t++) {
1061 		if (t->new_target_state != TS_NOT_PRESENT)
1062 			continue;
1063 
1064 		t->new_target_state = TS_INVALID;
1065 
1066 		/* remove the right target! */
1067 
1068 		t2 =
1069 			esas2r_targ_db_find_by_virt_id(a,
1070 						       esas2r_targ_get_id(t,
1071 									  a));
1072 
1073 		if (t2)
1074 			esas2r_targ_db_remove(a, t2);
1075 	}
1076 
1077 	/* removals complete.  process arrivals. */
1078 
1079 	dc->state = DCS_DEV_ADD;
1080 	dc->curr_targ = a->targetdb;
1081 
1082 	esas2r_trace_exit();
1083 
1084 	return false;
1085 }
1086 
1087 static bool esas2r_disc_dev_add(struct esas2r_adapter *a,
1088 				struct esas2r_request *rq)
1089 {
1090 	struct esas2r_disc_context *dc =
1091 		(struct esas2r_disc_context *)rq->interrupt_cx;
1092 	struct esas2r_target *t = dc->curr_targ;
1093 
1094 	if (t >= a->targetdb_end) {
1095 		/* done processing state changes. */
1096 
1097 		dc->state = DCS_DISC_DONE;
1098 	} else if (t->new_target_state == TS_PRESENT) {
1099 		struct atto_vda_ae_lu *luevt = &t->lu_event;
1100 
1101 		esas2r_trace_enter();
1102 
1103 		/* clear this now in case more events come in. */
1104 
1105 		t->new_target_state = TS_INVALID;
1106 
1107 		/* setup the discovery context for adding this device. */
1108 
1109 		dc->curr_virt_id = esas2r_targ_get_id(t, a);
1110 
1111 		if ((luevt->hdr.bylength >= offsetof(struct atto_vda_ae_lu, id)
1112 		     + sizeof(struct atto_vda_ae_lu_tgt_lun_raid))
1113 		    && !(luevt->dwevent & VDAAE_LU_PASSTHROUGH)) {
1114 			dc->block_size = luevt->id.tgtlun_raid.dwblock_size;
1115 			dc->interleave = luevt->id.tgtlun_raid.dwinterleave;
1116 		} else {
1117 			dc->block_size = 0;
1118 			dc->interleave = 0;
1119 		}
1120 
1121 		/* determine the device type being added. */
1122 
1123 		if (luevt->dwevent & VDAAE_LU_PASSTHROUGH) {
1124 			if (luevt->dwevent & VDAAE_LU_PHYS_ID) {
1125 				dc->state = DCS_PT_DEV_ADDR;
1126 				dc->dev_addr_type = ATTO_GDA_AT_PORT;
1127 				dc->curr_phys_id = luevt->wphys_target_id;
1128 			} else {
1129 				esas2r_log(ESAS2R_LOG_WARN,
1130 					   "luevt->dwevent does not have the "
1131 					   "VDAAE_LU_PHYS_ID bit set (%s:%d)",
1132 					   __func__, __LINE__);
1133 			}
1134 		} else {
1135 			dc->raid_grp_name[0] = 0;
1136 
1137 			esas2r_targ_db_add_raid(a, dc);
1138 		}
1139 
1140 		esas2r_trace("curr_virt_id: %d", dc->curr_virt_id);
1141 		esas2r_trace("curr_phys_id: %d", dc->curr_phys_id);
1142 		esas2r_trace("dwevent: %d", luevt->dwevent);
1143 
1144 		esas2r_trace_exit();
1145 	}
1146 
1147 	if (dc->state == DCS_DEV_ADD) {
1148 		/* go to the next device. */
1149 
1150 		dc->curr_targ++;
1151 	}
1152 
1153 	return false;
1154 }
1155 
1156 /*
1157  * When discovery is done, find all requests on defer queue and
1158  * test if they need to be modified. If a target is no longer present
1159  * then complete the request with RS_SEL. Otherwise, update the
1160  * target_id since after a hibernate it can be a different value.
1161  * VDA does not make passthrough target IDs persistent.
1162  */
1163 static void esas2r_disc_fix_curr_requests(struct esas2r_adapter *a)
1164 {
1165 	unsigned long flags;
1166 	struct esas2r_target *t;
1167 	struct esas2r_request *rq;
1168 	struct list_head *element;
1169 
1170 	/* update virt_targ_id in any outstanding esas2r_requests  */
1171 
1172 	spin_lock_irqsave(&a->queue_lock, flags);
1173 
1174 	list_for_each(element, &a->defer_list) {
1175 		rq = list_entry(element, struct esas2r_request, req_list);
1176 		if (rq->vrq->scsi.function == VDA_FUNC_SCSI) {
1177 			t = a->targetdb + rq->target_id;
1178 
1179 			if (t->target_state == TS_PRESENT)
1180 				rq->vrq->scsi.target_id = le16_to_cpu(
1181 					t->virt_targ_id);
1182 			else
1183 				rq->req_stat = RS_SEL;
1184 		}
1185 
1186 	}
1187 
1188 	spin_unlock_irqrestore(&a->queue_lock, flags);
1189 }
1190