xref: /openbmc/linux/drivers/scsi/pm8001/pm8001_sas.c (revision 8cb5d216)
1 /*
2  * PMC-Sierra PM8001/8081/8088/8089 SAS/SATA based host adapters driver
3  *
4  * Copyright (c) 2008-2009 USI Co., Ltd.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions, and the following disclaimer,
12  *    without modification.
13  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
14  *    substantially similar to the "NO WARRANTY" disclaimer below
15  *    ("Disclaimer") and any redistribution must be conditioned upon
16  *    including a substantially similar Disclaimer requirement for further
17  *    binary redistribution.
18  * 3. Neither the names of the above-listed copyright holders nor the names
19  *    of any contributors may be used to endorse or promote products derived
20  *    from this software without specific prior written permission.
21  *
22  * Alternatively, this software may be distributed under the terms of the
23  * GNU General Public License ("GPL") version 2 as published by the Free
24  * Software Foundation.
25  *
26  * NO WARRANTY
27  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
30  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
35  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
36  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37  * POSSIBILITY OF SUCH DAMAGES.
38  *
39  */
40 
41 #include <linux/slab.h>
42 #include "pm8001_sas.h"
43 
44 /**
45  * pm8001_find_tag - from sas task to find out  tag that belongs to this task
46  * @task: the task sent to the LLDD
47  * @tag: the found tag associated with the task
48  */
49 static int pm8001_find_tag(struct sas_task *task, u32 *tag)
50 {
51 	if (task->lldd_task) {
52 		struct pm8001_ccb_info *ccb;
53 		ccb = task->lldd_task;
54 		*tag = ccb->ccb_tag;
55 		return 1;
56 	}
57 	return 0;
58 }
59 
60 /**
61   * pm8001_tag_free - free the no more needed tag
62   * @pm8001_ha: our hba struct
63   * @tag: the found tag associated with the task
64   */
65 void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag)
66 {
67 	void *bitmap = pm8001_ha->tags;
68 	clear_bit(tag, bitmap);
69 }
70 
71 /**
72   * pm8001_tag_alloc - allocate a empty tag for task used.
73   * @pm8001_ha: our hba struct
74   * @tag_out: the found empty tag .
75   */
76 inline int pm8001_tag_alloc(struct pm8001_hba_info *pm8001_ha, u32 *tag_out)
77 {
78 	unsigned int tag;
79 	void *bitmap = pm8001_ha->tags;
80 	unsigned long flags;
81 
82 	spin_lock_irqsave(&pm8001_ha->bitmap_lock, flags);
83 	tag = find_first_zero_bit(bitmap, pm8001_ha->tags_num);
84 	if (tag >= pm8001_ha->tags_num) {
85 		spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags);
86 		return -SAS_QUEUE_FULL;
87 	}
88 	set_bit(tag, bitmap);
89 	spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags);
90 	*tag_out = tag;
91 	return 0;
92 }
93 
94 void pm8001_tag_init(struct pm8001_hba_info *pm8001_ha)
95 {
96 	int i;
97 	for (i = 0; i < pm8001_ha->tags_num; ++i)
98 		pm8001_tag_free(pm8001_ha, i);
99 }
100 
101  /**
102   * pm8001_mem_alloc - allocate memory for pm8001.
103   * @pdev: pci device.
104   * @virt_addr: the allocated virtual address
105   * @pphys_addr_hi: the physical address high byte address.
106   * @pphys_addr_lo: the physical address low byte address.
107   * @mem_size: memory size.
108   */
109 int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr,
110 	dma_addr_t *pphys_addr, u32 *pphys_addr_hi,
111 	u32 *pphys_addr_lo, u32 mem_size, u32 align)
112 {
113 	caddr_t mem_virt_alloc;
114 	dma_addr_t mem_dma_handle;
115 	u64 phys_align;
116 	u64 align_offset = 0;
117 	if (align)
118 		align_offset = (dma_addr_t)align - 1;
119 	mem_virt_alloc = dma_alloc_coherent(&pdev->dev, mem_size + align,
120 					    &mem_dma_handle, GFP_KERNEL);
121 	if (!mem_virt_alloc) {
122 		pr_err("pm80xx: memory allocation error\n");
123 		return -1;
124 	}
125 	*pphys_addr = mem_dma_handle;
126 	phys_align = (*pphys_addr + align_offset) & ~align_offset;
127 	*virt_addr = (void *)mem_virt_alloc + phys_align - *pphys_addr;
128 	*pphys_addr_hi = upper_32_bits(phys_align);
129 	*pphys_addr_lo = lower_32_bits(phys_align);
130 	return 0;
131 }
132 
133 /**
134   * pm8001_find_ha_by_dev - from domain device which come from sas layer to
135   * find out our hba struct.
136   * @dev: the domain device which from sas layer.
137   */
138 static
139 struct pm8001_hba_info *pm8001_find_ha_by_dev(struct domain_device *dev)
140 {
141 	struct sas_ha_struct *sha = dev->port->ha;
142 	struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
143 	return pm8001_ha;
144 }
145 
146 /**
147   * pm8001_phy_control - this function should be registered to
148   * sas_domain_function_template to provide libsas used, note: this is just
149   * control the HBA phy rather than other expander phy if you want control
150   * other phy, you should use SMP command.
151   * @sas_phy: which phy in HBA phys.
152   * @func: the operation.
153   * @funcdata: always NULL.
154   */
155 int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
156 	void *funcdata)
157 {
158 	int rc = 0, phy_id = sas_phy->id;
159 	struct pm8001_hba_info *pm8001_ha = NULL;
160 	struct sas_phy_linkrates *rates;
161 	struct pm8001_phy *phy;
162 	DECLARE_COMPLETION_ONSTACK(completion);
163 	unsigned long flags;
164 	pm8001_ha = sas_phy->ha->lldd_ha;
165 	phy = &pm8001_ha->phy[phy_id];
166 	pm8001_ha->phy[phy_id].enable_completion = &completion;
167 	switch (func) {
168 	case PHY_FUNC_SET_LINK_RATE:
169 		rates = funcdata;
170 		if (rates->minimum_linkrate) {
171 			pm8001_ha->phy[phy_id].minimum_linkrate =
172 				rates->minimum_linkrate;
173 		}
174 		if (rates->maximum_linkrate) {
175 			pm8001_ha->phy[phy_id].maximum_linkrate =
176 				rates->maximum_linkrate;
177 		}
178 		if (pm8001_ha->phy[phy_id].phy_state ==  PHY_LINK_DISABLE) {
179 			PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
180 			wait_for_completion(&completion);
181 		}
182 		PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
183 					      PHY_LINK_RESET);
184 		break;
185 	case PHY_FUNC_HARD_RESET:
186 		if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) {
187 			PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
188 			wait_for_completion(&completion);
189 		}
190 		PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
191 					      PHY_HARD_RESET);
192 		break;
193 	case PHY_FUNC_LINK_RESET:
194 		if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) {
195 			PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
196 			wait_for_completion(&completion);
197 		}
198 		PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
199 					      PHY_LINK_RESET);
200 		break;
201 	case PHY_FUNC_RELEASE_SPINUP_HOLD:
202 		PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
203 					      PHY_LINK_RESET);
204 		break;
205 	case PHY_FUNC_DISABLE:
206 		if (pm8001_ha->chip_id != chip_8001) {
207 			if (pm8001_ha->phy[phy_id].phy_state ==
208 				PHY_STATE_LINK_UP_SPCV) {
209 				sas_phy_disconnected(&phy->sas_phy);
210 				sas_notify_phy_event(&phy->sas_phy,
211 					PHYE_LOSS_OF_SIGNAL, GFP_KERNEL);
212 				phy->phy_attached = 0;
213 			}
214 		} else {
215 			if (pm8001_ha->phy[phy_id].phy_state ==
216 				PHY_STATE_LINK_UP_SPC) {
217 				sas_phy_disconnected(&phy->sas_phy);
218 				sas_notify_phy_event(&phy->sas_phy,
219 					PHYE_LOSS_OF_SIGNAL, GFP_KERNEL);
220 				phy->phy_attached = 0;
221 			}
222 		}
223 		PM8001_CHIP_DISP->phy_stop_req(pm8001_ha, phy_id);
224 		break;
225 	case PHY_FUNC_GET_EVENTS:
226 		spin_lock_irqsave(&pm8001_ha->lock, flags);
227 		if (pm8001_ha->chip_id == chip_8001) {
228 			if (-1 == pm8001_bar4_shift(pm8001_ha,
229 					(phy_id < 4) ? 0x30000 : 0x40000)) {
230 				spin_unlock_irqrestore(&pm8001_ha->lock, flags);
231 				return -EINVAL;
232 			}
233 		}
234 		{
235 			struct sas_phy *phy = sas_phy->phy;
236 			uint32_t *qp = (uint32_t *)(((char *)
237 				pm8001_ha->io_mem[2].memvirtaddr)
238 				+ 0x1034 + (0x4000 * (phy_id & 3)));
239 
240 			phy->invalid_dword_count = qp[0];
241 			phy->running_disparity_error_count = qp[1];
242 			phy->loss_of_dword_sync_count = qp[3];
243 			phy->phy_reset_problem_count = qp[4];
244 		}
245 		if (pm8001_ha->chip_id == chip_8001)
246 			pm8001_bar4_shift(pm8001_ha, 0);
247 		spin_unlock_irqrestore(&pm8001_ha->lock, flags);
248 		return 0;
249 	default:
250 		pm8001_dbg(pm8001_ha, DEVIO, "func 0x%x\n", func);
251 		rc = -EOPNOTSUPP;
252 	}
253 	msleep(300);
254 	return rc;
255 }
256 
257 /**
258   * pm8001_scan_start - we should enable all HBA phys by sending the phy_start
259   * command to HBA.
260   * @shost: the scsi host data.
261   */
262 void pm8001_scan_start(struct Scsi_Host *shost)
263 {
264 	int i;
265 	struct pm8001_hba_info *pm8001_ha;
266 	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
267 	pm8001_ha = sha->lldd_ha;
268 	/* SAS_RE_INITIALIZATION not available in SPCv/ve */
269 	if (pm8001_ha->chip_id == chip_8001)
270 		PM8001_CHIP_DISP->sas_re_init_req(pm8001_ha);
271 	for (i = 0; i < pm8001_ha->chip->n_phy; ++i)
272 		PM8001_CHIP_DISP->phy_start_req(pm8001_ha, i);
273 }
274 
275 int pm8001_scan_finished(struct Scsi_Host *shost, unsigned long time)
276 {
277 	struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
278 
279 	/* give the phy enabling interrupt event time to come in (1s
280 	* is empirically about all it takes) */
281 	if (time < HZ)
282 		return 0;
283 	/* Wait for discovery to finish */
284 	sas_drain_work(ha);
285 	return 1;
286 }
287 
288 /**
289   * pm8001_task_prep_smp - the dispatcher function, prepare data for smp task
290   * @pm8001_ha: our hba card information
291   * @ccb: the ccb which attached to smp task
292   */
293 static int pm8001_task_prep_smp(struct pm8001_hba_info *pm8001_ha,
294 	struct pm8001_ccb_info *ccb)
295 {
296 	return PM8001_CHIP_DISP->smp_req(pm8001_ha, ccb);
297 }
298 
299 u32 pm8001_get_ncq_tag(struct sas_task *task, u32 *tag)
300 {
301 	struct ata_queued_cmd *qc = task->uldd_task;
302 	if (qc) {
303 		if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
304 		    qc->tf.command == ATA_CMD_FPDMA_READ ||
305 		    qc->tf.command == ATA_CMD_FPDMA_RECV ||
306 		    qc->tf.command == ATA_CMD_FPDMA_SEND ||
307 		    qc->tf.command == ATA_CMD_NCQ_NON_DATA) {
308 			*tag = qc->tag;
309 			return 1;
310 		}
311 	}
312 	return 0;
313 }
314 
315 /**
316   * pm8001_task_prep_ata - the dispatcher function, prepare data for sata task
317   * @pm8001_ha: our hba card information
318   * @ccb: the ccb which attached to sata task
319   */
320 static int pm8001_task_prep_ata(struct pm8001_hba_info *pm8001_ha,
321 	struct pm8001_ccb_info *ccb)
322 {
323 	return PM8001_CHIP_DISP->sata_req(pm8001_ha, ccb);
324 }
325 
326 /**
327   * pm8001_task_prep_ssp_tm - the dispatcher function, prepare task management data
328   * @pm8001_ha: our hba card information
329   * @ccb: the ccb which attached to TM
330   * @tmf: the task management IU
331   */
332 static int pm8001_task_prep_ssp_tm(struct pm8001_hba_info *pm8001_ha,
333 	struct pm8001_ccb_info *ccb, struct pm8001_tmf_task *tmf)
334 {
335 	return PM8001_CHIP_DISP->ssp_tm_req(pm8001_ha, ccb, tmf);
336 }
337 
338 /**
339   * pm8001_task_prep_ssp - the dispatcher function,prepare ssp data for ssp task
340   * @pm8001_ha: our hba card information
341   * @ccb: the ccb which attached to ssp task
342   */
343 static int pm8001_task_prep_ssp(struct pm8001_hba_info *pm8001_ha,
344 	struct pm8001_ccb_info *ccb)
345 {
346 	return PM8001_CHIP_DISP->ssp_io_req(pm8001_ha, ccb);
347 }
348 
349  /* Find the local port id that's attached to this device */
350 static int sas_find_local_port_id(struct domain_device *dev)
351 {
352 	struct domain_device *pdev = dev->parent;
353 
354 	/* Directly attached device */
355 	if (!pdev)
356 		return dev->port->id;
357 	while (pdev) {
358 		struct domain_device *pdev_p = pdev->parent;
359 		if (!pdev_p)
360 			return pdev->port->id;
361 		pdev = pdev->parent;
362 	}
363 	return 0;
364 }
365 
366 #define DEV_IS_GONE(pm8001_dev)	\
367 	((!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED)))
368 /**
369   * pm8001_task_exec - queue the task(ssp, smp && ata) to the hardware.
370   * @task: the task to be execute.
371   * @gfp_flags: gfp_flags.
372   * @is_tmf: if it is task management task.
373   * @tmf: the task management IU
374   */
375 static int pm8001_task_exec(struct sas_task *task,
376 	gfp_t gfp_flags, int is_tmf, struct pm8001_tmf_task *tmf)
377 {
378 	struct domain_device *dev = task->dev;
379 	struct pm8001_hba_info *pm8001_ha;
380 	struct pm8001_device *pm8001_dev;
381 	struct pm8001_port *port = NULL;
382 	struct sas_task *t = task;
383 	struct pm8001_ccb_info *ccb;
384 	u32 tag = 0xdeadbeef, rc = 0, n_elem = 0;
385 	unsigned long flags = 0;
386 	enum sas_protocol task_proto = t->task_proto;
387 
388 	if (!dev->port) {
389 		struct task_status_struct *tsm = &t->task_status;
390 		tsm->resp = SAS_TASK_UNDELIVERED;
391 		tsm->stat = SAS_PHY_DOWN;
392 		if (dev->dev_type != SAS_SATA_DEV)
393 			t->task_done(t);
394 		return 0;
395 	}
396 	pm8001_ha = pm8001_find_ha_by_dev(task->dev);
397 	if (pm8001_ha->controller_fatal_error) {
398 		struct task_status_struct *ts = &t->task_status;
399 
400 		ts->resp = SAS_TASK_UNDELIVERED;
401 		t->task_done(t);
402 		return 0;
403 	}
404 	pm8001_dbg(pm8001_ha, IO, "pm8001_task_exec device\n");
405 	spin_lock_irqsave(&pm8001_ha->lock, flags);
406 	do {
407 		dev = t->dev;
408 		pm8001_dev = dev->lldd_dev;
409 		port = &pm8001_ha->port[sas_find_local_port_id(dev)];
410 		if (DEV_IS_GONE(pm8001_dev) || !port->port_attached) {
411 			if (sas_protocol_ata(task_proto)) {
412 				struct task_status_struct *ts = &t->task_status;
413 				ts->resp = SAS_TASK_UNDELIVERED;
414 				ts->stat = SAS_PHY_DOWN;
415 
416 				spin_unlock_irqrestore(&pm8001_ha->lock, flags);
417 				t->task_done(t);
418 				spin_lock_irqsave(&pm8001_ha->lock, flags);
419 				continue;
420 			} else {
421 				struct task_status_struct *ts = &t->task_status;
422 				ts->resp = SAS_TASK_UNDELIVERED;
423 				ts->stat = SAS_PHY_DOWN;
424 				t->task_done(t);
425 				continue;
426 			}
427 		}
428 		rc = pm8001_tag_alloc(pm8001_ha, &tag);
429 		if (rc)
430 			goto err_out;
431 		ccb = &pm8001_ha->ccb_info[tag];
432 
433 		if (!sas_protocol_ata(task_proto)) {
434 			if (t->num_scatter) {
435 				n_elem = dma_map_sg(pm8001_ha->dev,
436 					t->scatter,
437 					t->num_scatter,
438 					t->data_dir);
439 				if (!n_elem) {
440 					rc = -ENOMEM;
441 					goto err_out_tag;
442 				}
443 			}
444 		} else {
445 			n_elem = t->num_scatter;
446 		}
447 
448 		t->lldd_task = ccb;
449 		ccb->n_elem = n_elem;
450 		ccb->ccb_tag = tag;
451 		ccb->task = t;
452 		ccb->device = pm8001_dev;
453 		switch (task_proto) {
454 		case SAS_PROTOCOL_SMP:
455 			atomic_inc(&pm8001_dev->running_req);
456 			rc = pm8001_task_prep_smp(pm8001_ha, ccb);
457 			break;
458 		case SAS_PROTOCOL_SSP:
459 			atomic_inc(&pm8001_dev->running_req);
460 			if (is_tmf)
461 				rc = pm8001_task_prep_ssp_tm(pm8001_ha,
462 					ccb, tmf);
463 			else
464 				rc = pm8001_task_prep_ssp(pm8001_ha, ccb);
465 			break;
466 		case SAS_PROTOCOL_SATA:
467 		case SAS_PROTOCOL_STP:
468 			atomic_inc(&pm8001_dev->running_req);
469 			rc = pm8001_task_prep_ata(pm8001_ha, ccb);
470 			break;
471 		default:
472 			dev_printk(KERN_ERR, pm8001_ha->dev,
473 				"unknown sas_task proto: 0x%x\n", task_proto);
474 			rc = -EINVAL;
475 			break;
476 		}
477 
478 		if (rc) {
479 			pm8001_dbg(pm8001_ha, IO, "rc is %x\n", rc);
480 			atomic_dec(&pm8001_dev->running_req);
481 			goto err_out_tag;
482 		}
483 		/* TODO: select normal or high priority */
484 		spin_lock(&t->task_state_lock);
485 		t->task_state_flags |= SAS_TASK_AT_INITIATOR;
486 		spin_unlock(&t->task_state_lock);
487 	} while (0);
488 	rc = 0;
489 	goto out_done;
490 
491 err_out_tag:
492 	pm8001_tag_free(pm8001_ha, tag);
493 err_out:
494 	dev_printk(KERN_ERR, pm8001_ha->dev, "pm8001 exec failed[%d]!\n", rc);
495 	if (!sas_protocol_ata(task_proto))
496 		if (n_elem)
497 			dma_unmap_sg(pm8001_ha->dev, t->scatter, t->num_scatter,
498 				t->data_dir);
499 out_done:
500 	spin_unlock_irqrestore(&pm8001_ha->lock, flags);
501 	return rc;
502 }
503 
504 /**
505   * pm8001_queue_command - register for upper layer used, all IO commands sent
506   * to HBA are from this interface.
507   * @task: the task to be execute.
508   * @gfp_flags: gfp_flags
509   */
510 int pm8001_queue_command(struct sas_task *task, gfp_t gfp_flags)
511 {
512 	return pm8001_task_exec(task, gfp_flags, 0, NULL);
513 }
514 
515 /**
516   * pm8001_ccb_task_free - free the sg for ssp and smp command, free the ccb.
517   * @pm8001_ha: our hba card information
518   * @ccb: the ccb which attached to ssp task
519   * @task: the task to be free.
520   * @ccb_idx: ccb index.
521   */
522 void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha,
523 	struct sas_task *task, struct pm8001_ccb_info *ccb, u32 ccb_idx)
524 {
525 	if (!ccb->task)
526 		return;
527 	if (!sas_protocol_ata(task->task_proto))
528 		if (ccb->n_elem)
529 			dma_unmap_sg(pm8001_ha->dev, task->scatter,
530 				task->num_scatter, task->data_dir);
531 
532 	switch (task->task_proto) {
533 	case SAS_PROTOCOL_SMP:
534 		dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_resp, 1,
535 			DMA_FROM_DEVICE);
536 		dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_req, 1,
537 			DMA_TO_DEVICE);
538 		break;
539 
540 	case SAS_PROTOCOL_SATA:
541 	case SAS_PROTOCOL_STP:
542 	case SAS_PROTOCOL_SSP:
543 	default:
544 		/* do nothing */
545 		break;
546 	}
547 	task->lldd_task = NULL;
548 	ccb->task = NULL;
549 	ccb->ccb_tag = 0xFFFFFFFF;
550 	ccb->open_retry = 0;
551 	pm8001_tag_free(pm8001_ha, ccb_idx);
552 }
553 
554  /**
555   * pm8001_alloc_dev - find a empty pm8001_device
556   * @pm8001_ha: our hba card information
557   */
558 static struct pm8001_device *pm8001_alloc_dev(struct pm8001_hba_info *pm8001_ha)
559 {
560 	u32 dev;
561 	for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) {
562 		if (pm8001_ha->devices[dev].dev_type == SAS_PHY_UNUSED) {
563 			pm8001_ha->devices[dev].id = dev;
564 			return &pm8001_ha->devices[dev];
565 		}
566 	}
567 	if (dev == PM8001_MAX_DEVICES) {
568 		pm8001_dbg(pm8001_ha, FAIL,
569 			   "max support %d devices, ignore ..\n",
570 			   PM8001_MAX_DEVICES);
571 	}
572 	return NULL;
573 }
574 /**
575   * pm8001_find_dev - find a matching pm8001_device
576   * @pm8001_ha: our hba card information
577   * @device_id: device ID to match against
578   */
579 struct pm8001_device *pm8001_find_dev(struct pm8001_hba_info *pm8001_ha,
580 					u32 device_id)
581 {
582 	u32 dev;
583 	for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) {
584 		if (pm8001_ha->devices[dev].device_id == device_id)
585 			return &pm8001_ha->devices[dev];
586 	}
587 	if (dev == PM8001_MAX_DEVICES) {
588 		pm8001_dbg(pm8001_ha, FAIL, "NO MATCHING DEVICE FOUND !!!\n");
589 	}
590 	return NULL;
591 }
592 
593 void pm8001_free_dev(struct pm8001_device *pm8001_dev)
594 {
595 	u32 id = pm8001_dev->id;
596 	memset(pm8001_dev, 0, sizeof(*pm8001_dev));
597 	pm8001_dev->id = id;
598 	pm8001_dev->dev_type = SAS_PHY_UNUSED;
599 	pm8001_dev->device_id = PM8001_MAX_DEVICES;
600 	pm8001_dev->sas_device = NULL;
601 }
602 
603 /**
604   * pm8001_dev_found_notify - libsas notify a device is found.
605   * @dev: the device structure which sas layer used.
606   *
607   * when libsas find a sas domain device, it should tell the LLDD that
608   * device is found, and then LLDD register this device to HBA firmware
609   * by the command "OPC_INB_REG_DEV", after that the HBA will assign a
610   * device ID(according to device's sas address) and returned it to LLDD. From
611   * now on, we communicate with HBA FW with the device ID which HBA assigned
612   * rather than sas address. it is the necessary step for our HBA but it is
613   * the optional for other HBA driver.
614   */
615 static int pm8001_dev_found_notify(struct domain_device *dev)
616 {
617 	unsigned long flags = 0;
618 	int res = 0;
619 	struct pm8001_hba_info *pm8001_ha = NULL;
620 	struct domain_device *parent_dev = dev->parent;
621 	struct pm8001_device *pm8001_device;
622 	DECLARE_COMPLETION_ONSTACK(completion);
623 	u32 flag = 0;
624 	pm8001_ha = pm8001_find_ha_by_dev(dev);
625 	spin_lock_irqsave(&pm8001_ha->lock, flags);
626 
627 	pm8001_device = pm8001_alloc_dev(pm8001_ha);
628 	if (!pm8001_device) {
629 		res = -1;
630 		goto found_out;
631 	}
632 	pm8001_device->sas_device = dev;
633 	dev->lldd_dev = pm8001_device;
634 	pm8001_device->dev_type = dev->dev_type;
635 	pm8001_device->dcompletion = &completion;
636 	if (parent_dev && dev_is_expander(parent_dev->dev_type)) {
637 		int phy_id;
638 		struct ex_phy *phy;
639 		for (phy_id = 0; phy_id < parent_dev->ex_dev.num_phys;
640 		phy_id++) {
641 			phy = &parent_dev->ex_dev.ex_phy[phy_id];
642 			if (SAS_ADDR(phy->attached_sas_addr)
643 				== SAS_ADDR(dev->sas_addr)) {
644 				pm8001_device->attached_phy = phy_id;
645 				break;
646 			}
647 		}
648 		if (phy_id == parent_dev->ex_dev.num_phys) {
649 			pm8001_dbg(pm8001_ha, FAIL,
650 				   "Error: no attached dev:%016llx at ex:%016llx.\n",
651 				   SAS_ADDR(dev->sas_addr),
652 				   SAS_ADDR(parent_dev->sas_addr));
653 			res = -1;
654 		}
655 	} else {
656 		if (dev->dev_type == SAS_SATA_DEV) {
657 			pm8001_device->attached_phy =
658 				dev->rphy->identify.phy_identifier;
659 			flag = 1; /* directly sata */
660 		}
661 	} /*register this device to HBA*/
662 	pm8001_dbg(pm8001_ha, DISC, "Found device\n");
663 	PM8001_CHIP_DISP->reg_dev_req(pm8001_ha, pm8001_device, flag);
664 	spin_unlock_irqrestore(&pm8001_ha->lock, flags);
665 	wait_for_completion(&completion);
666 	if (dev->dev_type == SAS_END_DEVICE)
667 		msleep(50);
668 	pm8001_ha->flags = PM8001F_RUN_TIME;
669 	return 0;
670 found_out:
671 	spin_unlock_irqrestore(&pm8001_ha->lock, flags);
672 	return res;
673 }
674 
675 int pm8001_dev_found(struct domain_device *dev)
676 {
677 	return pm8001_dev_found_notify(dev);
678 }
679 
680 void pm8001_task_done(struct sas_task *task)
681 {
682 	if (!del_timer(&task->slow_task->timer))
683 		return;
684 	complete(&task->slow_task->completion);
685 }
686 
687 static void pm8001_tmf_timedout(struct timer_list *t)
688 {
689 	struct sas_task_slow *slow = from_timer(slow, t, timer);
690 	struct sas_task *task = slow->task;
691 
692 	task->task_state_flags |= SAS_TASK_STATE_ABORTED;
693 	complete(&task->slow_task->completion);
694 }
695 
696 #define PM8001_TASK_TIMEOUT 20
697 /**
698   * pm8001_exec_internal_tmf_task - execute some task management commands.
699   * @dev: the wanted device.
700   * @tmf: which task management wanted to be take.
701   * @para_len: para_len.
702   * @parameter: ssp task parameter.
703   *
704   * when errors or exception happened, we may want to do something, for example
705   * abort the issued task which result in this execption, it is done by calling
706   * this function, note it is also with the task execute interface.
707   */
708 static int pm8001_exec_internal_tmf_task(struct domain_device *dev,
709 	void *parameter, u32 para_len, struct pm8001_tmf_task *tmf)
710 {
711 	int res, retry;
712 	struct sas_task *task = NULL;
713 	struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
714 	struct pm8001_device *pm8001_dev = dev->lldd_dev;
715 	DECLARE_COMPLETION_ONSTACK(completion_setstate);
716 
717 	for (retry = 0; retry < 3; retry++) {
718 		task = sas_alloc_slow_task(GFP_KERNEL);
719 		if (!task)
720 			return -ENOMEM;
721 
722 		task->dev = dev;
723 		task->task_proto = dev->tproto;
724 		memcpy(&task->ssp_task, parameter, para_len);
725 		task->task_done = pm8001_task_done;
726 		task->slow_task->timer.function = pm8001_tmf_timedout;
727 		task->slow_task->timer.expires = jiffies + PM8001_TASK_TIMEOUT*HZ;
728 		add_timer(&task->slow_task->timer);
729 
730 		res = pm8001_task_exec(task, GFP_KERNEL, 1, tmf);
731 
732 		if (res) {
733 			del_timer(&task->slow_task->timer);
734 			pm8001_dbg(pm8001_ha, FAIL, "Executing internal task failed\n");
735 			goto ex_err;
736 		}
737 		wait_for_completion(&task->slow_task->completion);
738 		if (pm8001_ha->chip_id != chip_8001) {
739 			pm8001_dev->setds_completion = &completion_setstate;
740 			PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
741 				pm8001_dev, DS_OPERATIONAL);
742 			wait_for_completion(&completion_setstate);
743 		}
744 		res = -TMF_RESP_FUNC_FAILED;
745 		/* Even TMF timed out, return direct. */
746 		if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
747 			if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
748 				pm8001_dbg(pm8001_ha, FAIL,
749 					   "TMF task[%x]timeout.\n",
750 					   tmf->tmf);
751 				goto ex_err;
752 			}
753 		}
754 
755 		if (task->task_status.resp == SAS_TASK_COMPLETE &&
756 			task->task_status.stat == SAM_STAT_GOOD) {
757 			res = TMF_RESP_FUNC_COMPLETE;
758 			break;
759 		}
760 
761 		if (task->task_status.resp == SAS_TASK_COMPLETE &&
762 		task->task_status.stat == SAS_DATA_UNDERRUN) {
763 			/* no error, but return the number of bytes of
764 			* underrun */
765 			res = task->task_status.residual;
766 			break;
767 		}
768 
769 		if (task->task_status.resp == SAS_TASK_COMPLETE &&
770 			task->task_status.stat == SAS_DATA_OVERRUN) {
771 			pm8001_dbg(pm8001_ha, FAIL, "Blocked task error.\n");
772 			res = -EMSGSIZE;
773 			break;
774 		} else {
775 			pm8001_dbg(pm8001_ha, EH,
776 				   " Task to dev %016llx response:0x%x status 0x%x\n",
777 				   SAS_ADDR(dev->sas_addr),
778 				   task->task_status.resp,
779 				   task->task_status.stat);
780 			sas_free_task(task);
781 			task = NULL;
782 		}
783 	}
784 ex_err:
785 	BUG_ON(retry == 3 && task != NULL);
786 	sas_free_task(task);
787 	return res;
788 }
789 
790 static int
791 pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha,
792 	struct pm8001_device *pm8001_dev, struct domain_device *dev, u32 flag,
793 	u32 task_tag)
794 {
795 	int res, retry;
796 	u32 ccb_tag;
797 	struct pm8001_ccb_info *ccb;
798 	struct sas_task *task = NULL;
799 
800 	for (retry = 0; retry < 3; retry++) {
801 		task = sas_alloc_slow_task(GFP_KERNEL);
802 		if (!task)
803 			return -ENOMEM;
804 
805 		task->dev = dev;
806 		task->task_proto = dev->tproto;
807 		task->task_done = pm8001_task_done;
808 		task->slow_task->timer.function = pm8001_tmf_timedout;
809 		task->slow_task->timer.expires = jiffies + PM8001_TASK_TIMEOUT * HZ;
810 		add_timer(&task->slow_task->timer);
811 
812 		res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
813 		if (res)
814 			goto ex_err;
815 		ccb = &pm8001_ha->ccb_info[ccb_tag];
816 		ccb->device = pm8001_dev;
817 		ccb->ccb_tag = ccb_tag;
818 		ccb->task = task;
819 		ccb->n_elem = 0;
820 
821 		res = PM8001_CHIP_DISP->task_abort(pm8001_ha,
822 			pm8001_dev, flag, task_tag, ccb_tag);
823 
824 		if (res) {
825 			del_timer(&task->slow_task->timer);
826 			pm8001_dbg(pm8001_ha, FAIL, "Executing internal task failed\n");
827 			goto ex_err;
828 		}
829 		wait_for_completion(&task->slow_task->completion);
830 		res = TMF_RESP_FUNC_FAILED;
831 		/* Even TMF timed out, return direct. */
832 		if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
833 			if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
834 				pm8001_dbg(pm8001_ha, FAIL,
835 					   "TMF task timeout.\n");
836 				goto ex_err;
837 			}
838 		}
839 
840 		if (task->task_status.resp == SAS_TASK_COMPLETE &&
841 			task->task_status.stat == SAM_STAT_GOOD) {
842 			res = TMF_RESP_FUNC_COMPLETE;
843 			break;
844 
845 		} else {
846 			pm8001_dbg(pm8001_ha, EH,
847 				   " Task to dev %016llx response: 0x%x status 0x%x\n",
848 				   SAS_ADDR(dev->sas_addr),
849 				   task->task_status.resp,
850 				   task->task_status.stat);
851 			sas_free_task(task);
852 			task = NULL;
853 		}
854 	}
855 ex_err:
856 	BUG_ON(retry == 3 && task != NULL);
857 	sas_free_task(task);
858 	return res;
859 }
860 
861 /**
862   * pm8001_dev_gone_notify - see the comments for "pm8001_dev_found_notify"
863   * @dev: the device structure which sas layer used.
864   */
865 static void pm8001_dev_gone_notify(struct domain_device *dev)
866 {
867 	unsigned long flags = 0;
868 	struct pm8001_hba_info *pm8001_ha;
869 	struct pm8001_device *pm8001_dev = dev->lldd_dev;
870 
871 	pm8001_ha = pm8001_find_ha_by_dev(dev);
872 	spin_lock_irqsave(&pm8001_ha->lock, flags);
873 	if (pm8001_dev) {
874 		u32 device_id = pm8001_dev->device_id;
875 
876 		pm8001_dbg(pm8001_ha, DISC, "found dev[%d:%x] is gone.\n",
877 			   pm8001_dev->device_id, pm8001_dev->dev_type);
878 		if (atomic_read(&pm8001_dev->running_req)) {
879 			spin_unlock_irqrestore(&pm8001_ha->lock, flags);
880 			pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev,
881 							dev, 1, 0);
882 			while (atomic_read(&pm8001_dev->running_req))
883 				msleep(20);
884 			spin_lock_irqsave(&pm8001_ha->lock, flags);
885 		}
886 		PM8001_CHIP_DISP->dereg_dev_req(pm8001_ha, device_id);
887 		pm8001_free_dev(pm8001_dev);
888 	} else {
889 		pm8001_dbg(pm8001_ha, DISC, "Found dev has gone.\n");
890 	}
891 	dev->lldd_dev = NULL;
892 	spin_unlock_irqrestore(&pm8001_ha->lock, flags);
893 }
894 
895 void pm8001_dev_gone(struct domain_device *dev)
896 {
897 	pm8001_dev_gone_notify(dev);
898 }
899 
900 static int pm8001_issue_ssp_tmf(struct domain_device *dev,
901 	u8 *lun, struct pm8001_tmf_task *tmf)
902 {
903 	struct sas_ssp_task ssp_task;
904 	if (!(dev->tproto & SAS_PROTOCOL_SSP))
905 		return TMF_RESP_FUNC_ESUPP;
906 
907 	memcpy((u8 *)&ssp_task.LUN, lun, 8);
908 	return pm8001_exec_internal_tmf_task(dev, &ssp_task, sizeof(ssp_task),
909 		tmf);
910 }
911 
912 /* retry commands by ha, by task and/or by device */
913 void pm8001_open_reject_retry(
914 	struct pm8001_hba_info *pm8001_ha,
915 	struct sas_task *task_to_close,
916 	struct pm8001_device *device_to_close)
917 {
918 	int i;
919 	unsigned long flags;
920 
921 	if (pm8001_ha == NULL)
922 		return;
923 
924 	spin_lock_irqsave(&pm8001_ha->lock, flags);
925 
926 	for (i = 0; i < PM8001_MAX_CCB; i++) {
927 		struct sas_task *task;
928 		struct task_status_struct *ts;
929 		struct pm8001_device *pm8001_dev;
930 		unsigned long flags1;
931 		u32 tag;
932 		struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[i];
933 
934 		pm8001_dev = ccb->device;
935 		if (!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED))
936 			continue;
937 		if (!device_to_close) {
938 			uintptr_t d = (uintptr_t)pm8001_dev
939 					- (uintptr_t)&pm8001_ha->devices;
940 			if (((d % sizeof(*pm8001_dev)) != 0)
941 			 || ((d / sizeof(*pm8001_dev)) >= PM8001_MAX_DEVICES))
942 				continue;
943 		} else if (pm8001_dev != device_to_close)
944 			continue;
945 		tag = ccb->ccb_tag;
946 		if (!tag || (tag == 0xFFFFFFFF))
947 			continue;
948 		task = ccb->task;
949 		if (!task || !task->task_done)
950 			continue;
951 		if (task_to_close && (task != task_to_close))
952 			continue;
953 		ts = &task->task_status;
954 		ts->resp = SAS_TASK_COMPLETE;
955 		/* Force the midlayer to retry */
956 		ts->stat = SAS_OPEN_REJECT;
957 		ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
958 		if (pm8001_dev)
959 			atomic_dec(&pm8001_dev->running_req);
960 		spin_lock_irqsave(&task->task_state_lock, flags1);
961 		task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
962 		task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
963 		task->task_state_flags |= SAS_TASK_STATE_DONE;
964 		if (unlikely((task->task_state_flags
965 				& SAS_TASK_STATE_ABORTED))) {
966 			spin_unlock_irqrestore(&task->task_state_lock,
967 				flags1);
968 			pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
969 		} else {
970 			spin_unlock_irqrestore(&task->task_state_lock,
971 				flags1);
972 			pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
973 			mb();/* in order to force CPU ordering */
974 			spin_unlock_irqrestore(&pm8001_ha->lock, flags);
975 			task->task_done(task);
976 			spin_lock_irqsave(&pm8001_ha->lock, flags);
977 		}
978 	}
979 
980 	spin_unlock_irqrestore(&pm8001_ha->lock, flags);
981 }
982 
983 /**
984  * pm8001_I_T_nexus_reset()
985   * Standard mandates link reset for ATA  (type 0) and hard reset for
986   * SSP (type 1) , only for RECOVERY
987   * @dev: the device structure for the device to reset.
988   */
989 int pm8001_I_T_nexus_reset(struct domain_device *dev)
990 {
991 	int rc = TMF_RESP_FUNC_FAILED;
992 	struct pm8001_device *pm8001_dev;
993 	struct pm8001_hba_info *pm8001_ha;
994 	struct sas_phy *phy;
995 
996 	if (!dev || !dev->lldd_dev)
997 		return -ENODEV;
998 
999 	pm8001_dev = dev->lldd_dev;
1000 	pm8001_ha = pm8001_find_ha_by_dev(dev);
1001 	phy = sas_get_local_phy(dev);
1002 
1003 	if (dev_is_sata(dev)) {
1004 		if (scsi_is_sas_phy_local(phy)) {
1005 			rc = 0;
1006 			goto out;
1007 		}
1008 		rc = sas_phy_reset(phy, 1);
1009 		if (rc) {
1010 			pm8001_dbg(pm8001_ha, EH,
1011 				   "phy reset failed for device %x\n"
1012 				   "with rc %d\n", pm8001_dev->device_id, rc);
1013 			rc = TMF_RESP_FUNC_FAILED;
1014 			goto out;
1015 		}
1016 		msleep(2000);
1017 		rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev,
1018 						     dev, 1, 0);
1019 		if (rc) {
1020 			pm8001_dbg(pm8001_ha, EH, "task abort failed %x\n"
1021 				   "with rc %d\n", pm8001_dev->device_id, rc);
1022 			rc = TMF_RESP_FUNC_FAILED;
1023 		}
1024 	} else {
1025 		rc = sas_phy_reset(phy, 1);
1026 		msleep(2000);
1027 	}
1028 	pm8001_dbg(pm8001_ha, EH, " for device[%x]:rc=%d\n",
1029 		   pm8001_dev->device_id, rc);
1030  out:
1031 	sas_put_local_phy(phy);
1032 	return rc;
1033 }
1034 
1035 /*
1036 * This function handle the IT_NEXUS_XXX event or completion
1037 * status code for SSP/SATA/SMP I/O request.
1038 */
1039 int pm8001_I_T_nexus_event_handler(struct domain_device *dev)
1040 {
1041 	int rc = TMF_RESP_FUNC_FAILED;
1042 	struct pm8001_device *pm8001_dev;
1043 	struct pm8001_hba_info *pm8001_ha;
1044 	struct sas_phy *phy;
1045 
1046 	if (!dev || !dev->lldd_dev)
1047 		return -1;
1048 
1049 	pm8001_dev = dev->lldd_dev;
1050 	pm8001_ha = pm8001_find_ha_by_dev(dev);
1051 
1052 	pm8001_dbg(pm8001_ha, EH, "I_T_Nexus handler invoked !!\n");
1053 
1054 	phy = sas_get_local_phy(dev);
1055 
1056 	if (dev_is_sata(dev)) {
1057 		DECLARE_COMPLETION_ONSTACK(completion_setstate);
1058 		if (scsi_is_sas_phy_local(phy)) {
1059 			rc = 0;
1060 			goto out;
1061 		}
1062 		/* send internal ssp/sata/smp abort command to FW */
1063 		rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev,
1064 						     dev, 1, 0);
1065 		msleep(100);
1066 
1067 		/* deregister the target device */
1068 		pm8001_dev_gone_notify(dev);
1069 		msleep(200);
1070 
1071 		/*send phy reset to hard reset target */
1072 		rc = sas_phy_reset(phy, 1);
1073 		msleep(2000);
1074 		pm8001_dev->setds_completion = &completion_setstate;
1075 
1076 		wait_for_completion(&completion_setstate);
1077 	} else {
1078 		/* send internal ssp/sata/smp abort command to FW */
1079 		rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev,
1080 						     dev, 1, 0);
1081 		msleep(100);
1082 
1083 		/* deregister the target device */
1084 		pm8001_dev_gone_notify(dev);
1085 		msleep(200);
1086 
1087 		/*send phy reset to hard reset target */
1088 		rc = sas_phy_reset(phy, 1);
1089 		msleep(2000);
1090 	}
1091 	pm8001_dbg(pm8001_ha, EH, " for device[%x]:rc=%d\n",
1092 		   pm8001_dev->device_id, rc);
1093 out:
1094 	sas_put_local_phy(phy);
1095 
1096 	return rc;
1097 }
1098 /* mandatory SAM-3, the task reset the specified LUN*/
1099 int pm8001_lu_reset(struct domain_device *dev, u8 *lun)
1100 {
1101 	int rc = TMF_RESP_FUNC_FAILED;
1102 	struct pm8001_tmf_task tmf_task;
1103 	struct pm8001_device *pm8001_dev = dev->lldd_dev;
1104 	struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
1105 	DECLARE_COMPLETION_ONSTACK(completion_setstate);
1106 	if (dev_is_sata(dev)) {
1107 		struct sas_phy *phy = sas_get_local_phy(dev);
1108 		rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev,
1109 						     dev, 1, 0);
1110 		rc = sas_phy_reset(phy, 1);
1111 		sas_put_local_phy(phy);
1112 		pm8001_dev->setds_completion = &completion_setstate;
1113 		rc = PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
1114 			pm8001_dev, DS_OPERATIONAL);
1115 		wait_for_completion(&completion_setstate);
1116 	} else {
1117 		tmf_task.tmf = TMF_LU_RESET;
1118 		rc = pm8001_issue_ssp_tmf(dev, lun, &tmf_task);
1119 	}
1120 	/* If failed, fall-through I_T_Nexus reset */
1121 	pm8001_dbg(pm8001_ha, EH, "for device[%x]:rc=%d\n",
1122 		   pm8001_dev->device_id, rc);
1123 	return rc;
1124 }
1125 
1126 /* optional SAM-3 */
1127 int pm8001_query_task(struct sas_task *task)
1128 {
1129 	u32 tag = 0xdeadbeef;
1130 	struct scsi_lun lun;
1131 	struct pm8001_tmf_task tmf_task;
1132 	int rc = TMF_RESP_FUNC_FAILED;
1133 	if (unlikely(!task || !task->lldd_task || !task->dev))
1134 		return rc;
1135 
1136 	if (task->task_proto & SAS_PROTOCOL_SSP) {
1137 		struct scsi_cmnd *cmnd = task->uldd_task;
1138 		struct domain_device *dev = task->dev;
1139 		struct pm8001_hba_info *pm8001_ha =
1140 			pm8001_find_ha_by_dev(dev);
1141 
1142 		int_to_scsilun(cmnd->device->lun, &lun);
1143 		rc = pm8001_find_tag(task, &tag);
1144 		if (rc == 0) {
1145 			rc = TMF_RESP_FUNC_FAILED;
1146 			return rc;
1147 		}
1148 		pm8001_dbg(pm8001_ha, EH, "Query:[%16ph]\n", cmnd->cmnd);
1149 		tmf_task.tmf = 	TMF_QUERY_TASK;
1150 		tmf_task.tag_of_task_to_be_managed = tag;
1151 
1152 		rc = pm8001_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
1153 		switch (rc) {
1154 		/* The task is still in Lun, release it then */
1155 		case TMF_RESP_FUNC_SUCC:
1156 			pm8001_dbg(pm8001_ha, EH,
1157 				   "The task is still in Lun\n");
1158 			break;
1159 		/* The task is not in Lun or failed, reset the phy */
1160 		case TMF_RESP_FUNC_FAILED:
1161 		case TMF_RESP_FUNC_COMPLETE:
1162 			pm8001_dbg(pm8001_ha, EH,
1163 				   "The task is not in Lun or failed, reset the phy\n");
1164 			break;
1165 		}
1166 	}
1167 	pr_err("pm80xx: rc= %d\n", rc);
1168 	return rc;
1169 }
1170 
1171 /*  mandatory SAM-3, still need free task/ccb info, abort the specified task */
1172 int pm8001_abort_task(struct sas_task *task)
1173 {
1174 	unsigned long flags;
1175 	u32 tag;
1176 	struct domain_device *dev ;
1177 	struct pm8001_hba_info *pm8001_ha;
1178 	struct scsi_lun lun;
1179 	struct pm8001_device *pm8001_dev;
1180 	struct pm8001_tmf_task tmf_task;
1181 	int rc = TMF_RESP_FUNC_FAILED, ret;
1182 	u32 phy_id;
1183 	struct sas_task_slow slow_task;
1184 
1185 	if (unlikely(!task || !task->lldd_task || !task->dev))
1186 		return TMF_RESP_FUNC_FAILED;
1187 
1188 	dev = task->dev;
1189 	pm8001_dev = dev->lldd_dev;
1190 	pm8001_ha = pm8001_find_ha_by_dev(dev);
1191 	phy_id = pm8001_dev->attached_phy;
1192 
1193 	if (PM8001_CHIP_DISP->fatal_errors(pm8001_ha)) {
1194 		// If the controller is seeing fatal errors
1195 		// abort task will not get a response from the controller
1196 		return TMF_RESP_FUNC_FAILED;
1197 	}
1198 
1199 	ret = pm8001_find_tag(task, &tag);
1200 	if (ret == 0) {
1201 		pm8001_info(pm8001_ha, "no tag for task:%p\n", task);
1202 		return TMF_RESP_FUNC_FAILED;
1203 	}
1204 	spin_lock_irqsave(&task->task_state_lock, flags);
1205 	if (task->task_state_flags & SAS_TASK_STATE_DONE) {
1206 		spin_unlock_irqrestore(&task->task_state_lock, flags);
1207 		return TMF_RESP_FUNC_COMPLETE;
1208 	}
1209 	task->task_state_flags |= SAS_TASK_STATE_ABORTED;
1210 	if (task->slow_task == NULL) {
1211 		init_completion(&slow_task.completion);
1212 		task->slow_task = &slow_task;
1213 	}
1214 	spin_unlock_irqrestore(&task->task_state_lock, flags);
1215 	if (task->task_proto & SAS_PROTOCOL_SSP) {
1216 		struct scsi_cmnd *cmnd = task->uldd_task;
1217 		int_to_scsilun(cmnd->device->lun, &lun);
1218 		tmf_task.tmf = TMF_ABORT_TASK;
1219 		tmf_task.tag_of_task_to_be_managed = tag;
1220 		rc = pm8001_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
1221 		pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev,
1222 			pm8001_dev->sas_device, 0, tag);
1223 	} else if (task->task_proto & SAS_PROTOCOL_SATA ||
1224 		task->task_proto & SAS_PROTOCOL_STP) {
1225 		if (pm8001_ha->chip_id == chip_8006) {
1226 			DECLARE_COMPLETION_ONSTACK(completion_reset);
1227 			DECLARE_COMPLETION_ONSTACK(completion);
1228 			struct pm8001_phy *phy = pm8001_ha->phy + phy_id;
1229 
1230 			/* 1. Set Device state as Recovery */
1231 			pm8001_dev->setds_completion = &completion;
1232 			PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
1233 				pm8001_dev, DS_IN_RECOVERY);
1234 			wait_for_completion(&completion);
1235 
1236 			/* 2. Send Phy Control Hard Reset */
1237 			reinit_completion(&completion);
1238 			phy->port_reset_status = PORT_RESET_TMO;
1239 			phy->reset_success = false;
1240 			phy->enable_completion = &completion;
1241 			phy->reset_completion = &completion_reset;
1242 			ret = PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
1243 				PHY_HARD_RESET);
1244 			if (ret) {
1245 				phy->enable_completion = NULL;
1246 				phy->reset_completion = NULL;
1247 				goto out;
1248 			}
1249 
1250 			/* In the case of the reset timeout/fail we still
1251 			 * abort the command at the firmware. The assumption
1252 			 * here is that the drive is off doing something so
1253 			 * that it's not processing requests, and we want to
1254 			 * avoid getting a completion for this and either
1255 			 * leaking the task in libsas or losing the race and
1256 			 * getting a double free.
1257 			 */
1258 			pm8001_dbg(pm8001_ha, MSG,
1259 				   "Waiting for local phy ctl\n");
1260 			ret = wait_for_completion_timeout(&completion,
1261 					PM8001_TASK_TIMEOUT * HZ);
1262 			if (!ret || !phy->reset_success) {
1263 				phy->enable_completion = NULL;
1264 				phy->reset_completion = NULL;
1265 			} else {
1266 				/* 3. Wait for Port Reset complete or
1267 				 * Port reset TMO
1268 				 */
1269 				pm8001_dbg(pm8001_ha, MSG,
1270 					   "Waiting for Port reset\n");
1271 				ret = wait_for_completion_timeout(
1272 					&completion_reset,
1273 					PM8001_TASK_TIMEOUT * HZ);
1274 				if (!ret)
1275 					phy->reset_completion = NULL;
1276 				WARN_ON(phy->port_reset_status ==
1277 						PORT_RESET_TMO);
1278 				if (phy->port_reset_status == PORT_RESET_TMO) {
1279 					pm8001_dev_gone_notify(dev);
1280 					goto out;
1281 				}
1282 			}
1283 
1284 			/*
1285 			 * 4. SATA Abort ALL
1286 			 * we wait for the task to be aborted so that the task
1287 			 * is removed from the ccb. on success the caller is
1288 			 * going to free the task.
1289 			 */
1290 			ret = pm8001_exec_internal_task_abort(pm8001_ha,
1291 				pm8001_dev, pm8001_dev->sas_device, 1, tag);
1292 			if (ret)
1293 				goto out;
1294 			ret = wait_for_completion_timeout(
1295 				&task->slow_task->completion,
1296 				PM8001_TASK_TIMEOUT * HZ);
1297 			if (!ret)
1298 				goto out;
1299 
1300 			/* 5. Set Device State as Operational */
1301 			reinit_completion(&completion);
1302 			pm8001_dev->setds_completion = &completion;
1303 			PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
1304 				pm8001_dev, DS_OPERATIONAL);
1305 			wait_for_completion(&completion);
1306 		} else {
1307 			rc = pm8001_exec_internal_task_abort(pm8001_ha,
1308 				pm8001_dev, pm8001_dev->sas_device, 0, tag);
1309 		}
1310 		rc = TMF_RESP_FUNC_COMPLETE;
1311 	} else if (task->task_proto & SAS_PROTOCOL_SMP) {
1312 		/* SMP */
1313 		rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev,
1314 			pm8001_dev->sas_device, 0, tag);
1315 
1316 	}
1317 out:
1318 	spin_lock_irqsave(&task->task_state_lock, flags);
1319 	if (task->slow_task == &slow_task)
1320 		task->slow_task = NULL;
1321 	spin_unlock_irqrestore(&task->task_state_lock, flags);
1322 	if (rc != TMF_RESP_FUNC_COMPLETE)
1323 		pm8001_info(pm8001_ha, "rc= %d\n", rc);
1324 	return rc;
1325 }
1326 
1327 int pm8001_abort_task_set(struct domain_device *dev, u8 *lun)
1328 {
1329 	struct pm8001_tmf_task tmf_task;
1330 
1331 	tmf_task.tmf = TMF_ABORT_TASK_SET;
1332 	return pm8001_issue_ssp_tmf(dev, lun, &tmf_task);
1333 }
1334 
1335 int pm8001_clear_aca(struct domain_device *dev, u8 *lun)
1336 {
1337 	struct pm8001_tmf_task tmf_task;
1338 
1339 	tmf_task.tmf = TMF_CLEAR_ACA;
1340 	return pm8001_issue_ssp_tmf(dev, lun, &tmf_task);
1341 }
1342 
1343 int pm8001_clear_task_set(struct domain_device *dev, u8 *lun)
1344 {
1345 	struct pm8001_tmf_task tmf_task;
1346 	struct pm8001_device *pm8001_dev = dev->lldd_dev;
1347 	struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
1348 
1349 	pm8001_dbg(pm8001_ha, EH, "I_T_L_Q clear task set[%x]\n",
1350 		   pm8001_dev->device_id);
1351 	tmf_task.tmf = TMF_CLEAR_TASK_SET;
1352 	return pm8001_issue_ssp_tmf(dev, lun, &tmf_task);
1353 }
1354