1 /*
2  * Copyright (c) 2015 Linaro Ltd.
3  * Copyright (c) 2015 Hisilicon Limited.
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  */
11 
12 #include "hisi_sas.h"
13 #define DRV_NAME "hisi_sas"
14 
15 #define DEV_IS_GONE(dev) \
16 	((!dev) || (dev->dev_type == SAS_PHY_UNUSED))
17 
18 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
19 				u8 *lun, struct hisi_sas_tmf_task *tmf);
20 static int
21 hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
22 			     struct domain_device *device,
23 			     int abort_flag, int tag);
24 static int hisi_sas_softreset_ata_disk(struct domain_device *device);
25 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
26 				void *funcdata);
27 static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
28 				  struct domain_device *device);
29 static void hisi_sas_dev_gone(struct domain_device *device);
30 
31 u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction)
32 {
33 	switch (fis->command) {
34 	case ATA_CMD_FPDMA_WRITE:
35 	case ATA_CMD_FPDMA_READ:
36 	case ATA_CMD_FPDMA_RECV:
37 	case ATA_CMD_FPDMA_SEND:
38 	case ATA_CMD_NCQ_NON_DATA:
39 		return HISI_SAS_SATA_PROTOCOL_FPDMA;
40 
41 	case ATA_CMD_DOWNLOAD_MICRO:
42 	case ATA_CMD_ID_ATA:
43 	case ATA_CMD_PMP_READ:
44 	case ATA_CMD_READ_LOG_EXT:
45 	case ATA_CMD_PIO_READ:
46 	case ATA_CMD_PIO_READ_EXT:
47 	case ATA_CMD_PMP_WRITE:
48 	case ATA_CMD_WRITE_LOG_EXT:
49 	case ATA_CMD_PIO_WRITE:
50 	case ATA_CMD_PIO_WRITE_EXT:
51 		return HISI_SAS_SATA_PROTOCOL_PIO;
52 
53 	case ATA_CMD_DSM:
54 	case ATA_CMD_DOWNLOAD_MICRO_DMA:
55 	case ATA_CMD_PMP_READ_DMA:
56 	case ATA_CMD_PMP_WRITE_DMA:
57 	case ATA_CMD_READ:
58 	case ATA_CMD_READ_EXT:
59 	case ATA_CMD_READ_LOG_DMA_EXT:
60 	case ATA_CMD_READ_STREAM_DMA_EXT:
61 	case ATA_CMD_TRUSTED_RCV_DMA:
62 	case ATA_CMD_TRUSTED_SND_DMA:
63 	case ATA_CMD_WRITE:
64 	case ATA_CMD_WRITE_EXT:
65 	case ATA_CMD_WRITE_FUA_EXT:
66 	case ATA_CMD_WRITE_QUEUED:
67 	case ATA_CMD_WRITE_LOG_DMA_EXT:
68 	case ATA_CMD_WRITE_STREAM_DMA_EXT:
69 	case ATA_CMD_ZAC_MGMT_IN:
70 		return HISI_SAS_SATA_PROTOCOL_DMA;
71 
72 	case ATA_CMD_CHK_POWER:
73 	case ATA_CMD_DEV_RESET:
74 	case ATA_CMD_EDD:
75 	case ATA_CMD_FLUSH:
76 	case ATA_CMD_FLUSH_EXT:
77 	case ATA_CMD_VERIFY:
78 	case ATA_CMD_VERIFY_EXT:
79 	case ATA_CMD_SET_FEATURES:
80 	case ATA_CMD_STANDBY:
81 	case ATA_CMD_STANDBYNOW1:
82 	case ATA_CMD_ZAC_MGMT_OUT:
83 		return HISI_SAS_SATA_PROTOCOL_NONDATA;
84 
85 	case ATA_CMD_SET_MAX:
86 		switch (fis->features) {
87 		case ATA_SET_MAX_PASSWD:
88 		case ATA_SET_MAX_LOCK:
89 			return HISI_SAS_SATA_PROTOCOL_PIO;
90 
91 		case ATA_SET_MAX_PASSWD_DMA:
92 		case ATA_SET_MAX_UNLOCK_DMA:
93 			return HISI_SAS_SATA_PROTOCOL_DMA;
94 
95 		default:
96 			return HISI_SAS_SATA_PROTOCOL_NONDATA;
97 		}
98 
99 	default:
100 	{
101 		if (direction == DMA_NONE)
102 			return HISI_SAS_SATA_PROTOCOL_NONDATA;
103 		return HISI_SAS_SATA_PROTOCOL_PIO;
104 	}
105 	}
106 }
107 EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol);
108 
109 void hisi_sas_sata_done(struct sas_task *task,
110 			    struct hisi_sas_slot *slot)
111 {
112 	struct task_status_struct *ts = &task->task_status;
113 	struct ata_task_resp *resp = (struct ata_task_resp *)ts->buf;
114 	struct hisi_sas_status_buffer *status_buf =
115 			hisi_sas_status_buf_addr_mem(slot);
116 	u8 *iu = &status_buf->iu[0];
117 	struct dev_to_host_fis *d2h =  (struct dev_to_host_fis *)iu;
118 
119 	resp->frame_len = sizeof(struct dev_to_host_fis);
120 	memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis));
121 
122 	ts->buf_valid_size = sizeof(*resp);
123 }
124 EXPORT_SYMBOL_GPL(hisi_sas_sata_done);
125 
126 int hisi_sas_get_ncq_tag(struct sas_task *task, u32 *tag)
127 {
128 	struct ata_queued_cmd *qc = task->uldd_task;
129 
130 	if (qc) {
131 		if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
132 			qc->tf.command == ATA_CMD_FPDMA_READ) {
133 			*tag = qc->tag;
134 			return 1;
135 		}
136 	}
137 	return 0;
138 }
139 EXPORT_SYMBOL_GPL(hisi_sas_get_ncq_tag);
140 
141 /*
142  * This function assumes linkrate mask fits in 8 bits, which it
143  * does for all HW versions supported.
144  */
145 u8 hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max)
146 {
147 	u16 rate = 0;
148 	int i;
149 
150 	max -= SAS_LINK_RATE_1_5_GBPS;
151 	for (i = 0; i <= max; i++)
152 		rate |= 1 << (i * 2);
153 	return rate;
154 }
155 EXPORT_SYMBOL_GPL(hisi_sas_get_prog_phy_linkrate_mask);
156 
157 static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device)
158 {
159 	return device->port->ha->lldd_ha;
160 }
161 
162 struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port)
163 {
164 	return container_of(sas_port, struct hisi_sas_port, sas_port);
165 }
166 EXPORT_SYMBOL_GPL(to_hisi_sas_port);
167 
168 void hisi_sas_stop_phys(struct hisi_hba *hisi_hba)
169 {
170 	int phy_no;
171 
172 	for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++)
173 		hisi_hba->hw->phy_disable(hisi_hba, phy_no);
174 }
175 EXPORT_SYMBOL_GPL(hisi_sas_stop_phys);
176 
177 static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx)
178 {
179 	void *bitmap = hisi_hba->slot_index_tags;
180 
181 	clear_bit(slot_idx, bitmap);
182 }
183 
184 static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx)
185 {
186 	unsigned long flags;
187 
188 	if (hisi_hba->hw->slot_index_alloc || (slot_idx >=
189 	    hisi_hba->hw->max_command_entries - HISI_SAS_RESERVED_IPTT_CNT)) {
190 		spin_lock_irqsave(&hisi_hba->lock, flags);
191 		hisi_sas_slot_index_clear(hisi_hba, slot_idx);
192 		spin_unlock_irqrestore(&hisi_hba->lock, flags);
193 	}
194 }
195 
196 static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx)
197 {
198 	void *bitmap = hisi_hba->slot_index_tags;
199 
200 	set_bit(slot_idx, bitmap);
201 }
202 
203 static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba,
204 				     struct scsi_cmnd *scsi_cmnd)
205 {
206 	int index;
207 	void *bitmap = hisi_hba->slot_index_tags;
208 	unsigned long flags;
209 
210 	if (scsi_cmnd)
211 		return scsi_cmnd->request->tag;
212 
213 	spin_lock_irqsave(&hisi_hba->lock, flags);
214 	index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count,
215 				   hisi_hba->last_slot_index + 1);
216 	if (index >= hisi_hba->slot_index_count) {
217 		index = find_next_zero_bit(bitmap,
218 				hisi_hba->slot_index_count,
219 				hisi_hba->hw->max_command_entries -
220 				HISI_SAS_RESERVED_IPTT_CNT);
221 		if (index >= hisi_hba->slot_index_count) {
222 			spin_unlock_irqrestore(&hisi_hba->lock, flags);
223 			return -SAS_QUEUE_FULL;
224 		}
225 	}
226 	hisi_sas_slot_index_set(hisi_hba, index);
227 	hisi_hba->last_slot_index = index;
228 	spin_unlock_irqrestore(&hisi_hba->lock, flags);
229 
230 	return index;
231 }
232 
233 static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba)
234 {
235 	int i;
236 
237 	for (i = 0; i < hisi_hba->slot_index_count; ++i)
238 		hisi_sas_slot_index_clear(hisi_hba, i);
239 }
240 
241 void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
242 			     struct hisi_sas_slot *slot)
243 {
244 	struct hisi_sas_dq *dq = &hisi_hba->dq[slot->dlvry_queue];
245 	unsigned long flags;
246 
247 	if (task) {
248 		struct device *dev = hisi_hba->dev;
249 
250 		if (!task->lldd_task)
251 			return;
252 
253 		task->lldd_task = NULL;
254 
255 		if (!sas_protocol_ata(task->task_proto))
256 			if (slot->n_elem)
257 				dma_unmap_sg(dev, task->scatter,
258 					     task->num_scatter,
259 					     task->data_dir);
260 	}
261 
262 
263 	spin_lock_irqsave(&dq->lock, flags);
264 	list_del_init(&slot->entry);
265 	spin_unlock_irqrestore(&dq->lock, flags);
266 
267 	memset(slot, 0, offsetof(struct hisi_sas_slot, buf));
268 
269 	hisi_sas_slot_index_free(hisi_hba, slot->idx);
270 }
271 EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free);
272 
273 static void hisi_sas_task_prep_smp(struct hisi_hba *hisi_hba,
274 				  struct hisi_sas_slot *slot)
275 {
276 	hisi_hba->hw->prep_smp(hisi_hba, slot);
277 }
278 
279 static void hisi_sas_task_prep_ssp(struct hisi_hba *hisi_hba,
280 				  struct hisi_sas_slot *slot)
281 {
282 	hisi_hba->hw->prep_ssp(hisi_hba, slot);
283 }
284 
285 static void hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba,
286 				  struct hisi_sas_slot *slot)
287 {
288 	hisi_hba->hw->prep_stp(hisi_hba, slot);
289 }
290 
291 static void hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba,
292 		struct hisi_sas_slot *slot,
293 		int device_id, int abort_flag, int tag_to_abort)
294 {
295 	hisi_hba->hw->prep_abort(hisi_hba, slot,
296 			device_id, abort_flag, tag_to_abort);
297 }
298 
299 static void hisi_sas_dma_unmap(struct hisi_hba *hisi_hba,
300 			       struct sas_task *task, int n_elem,
301 			       int n_elem_req, int n_elem_resp)
302 {
303 	struct device *dev = hisi_hba->dev;
304 
305 	if (!sas_protocol_ata(task->task_proto)) {
306 		if (task->num_scatter) {
307 			if (n_elem)
308 				dma_unmap_sg(dev, task->scatter,
309 					     task->num_scatter,
310 					     task->data_dir);
311 		} else if (task->task_proto & SAS_PROTOCOL_SMP) {
312 			if (n_elem_req)
313 				dma_unmap_sg(dev, &task->smp_task.smp_req,
314 					     1, DMA_TO_DEVICE);
315 			if (n_elem_resp)
316 				dma_unmap_sg(dev, &task->smp_task.smp_resp,
317 					     1, DMA_FROM_DEVICE);
318 		}
319 	}
320 }
321 
322 static int hisi_sas_dma_map(struct hisi_hba *hisi_hba,
323 			    struct sas_task *task, int *n_elem,
324 			    int *n_elem_req, int *n_elem_resp)
325 {
326 	struct device *dev = hisi_hba->dev;
327 	int rc;
328 
329 	if (sas_protocol_ata(task->task_proto)) {
330 		*n_elem = task->num_scatter;
331 	} else {
332 		unsigned int req_len, resp_len;
333 
334 		if (task->num_scatter) {
335 			*n_elem = dma_map_sg(dev, task->scatter,
336 					     task->num_scatter, task->data_dir);
337 			if (!*n_elem) {
338 				rc = -ENOMEM;
339 				goto prep_out;
340 			}
341 		} else if (task->task_proto & SAS_PROTOCOL_SMP) {
342 			*n_elem_req = dma_map_sg(dev, &task->smp_task.smp_req,
343 						 1, DMA_TO_DEVICE);
344 			if (!*n_elem_req) {
345 				rc = -ENOMEM;
346 				goto prep_out;
347 			}
348 			req_len = sg_dma_len(&task->smp_task.smp_req);
349 			if (req_len & 0x3) {
350 				rc = -EINVAL;
351 				goto err_out_dma_unmap;
352 			}
353 			*n_elem_resp = dma_map_sg(dev, &task->smp_task.smp_resp,
354 						  1, DMA_FROM_DEVICE);
355 			if (!*n_elem_resp) {
356 				rc = -ENOMEM;
357 				goto err_out_dma_unmap;
358 			}
359 			resp_len = sg_dma_len(&task->smp_task.smp_resp);
360 			if (resp_len & 0x3) {
361 				rc = -EINVAL;
362 				goto err_out_dma_unmap;
363 			}
364 		}
365 	}
366 
367 	if (*n_elem > HISI_SAS_SGE_PAGE_CNT) {
368 		dev_err(dev, "task prep: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT",
369 			*n_elem);
370 		rc = -EINVAL;
371 		goto err_out_dma_unmap;
372 	}
373 	return 0;
374 
375 err_out_dma_unmap:
376 	/* It would be better to call dma_unmap_sg() here, but it's messy */
377 	hisi_sas_dma_unmap(hisi_hba, task, *n_elem,
378 			   *n_elem_req, *n_elem_resp);
379 prep_out:
380 	return rc;
381 }
382 
383 static int hisi_sas_task_prep(struct sas_task *task,
384 			      struct hisi_sas_dq **dq_pointer,
385 			      bool is_tmf, struct hisi_sas_tmf_task *tmf,
386 			      int *pass)
387 {
388 	struct domain_device *device = task->dev;
389 	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
390 	struct hisi_sas_device *sas_dev = device->lldd_dev;
391 	struct hisi_sas_port *port;
392 	struct hisi_sas_slot *slot;
393 	struct hisi_sas_cmd_hdr	*cmd_hdr_base;
394 	struct asd_sas_port *sas_port = device->port;
395 	struct device *dev = hisi_hba->dev;
396 	int dlvry_queue_slot, dlvry_queue, rc, slot_idx;
397 	int n_elem = 0, n_elem_req = 0, n_elem_resp = 0;
398 	struct hisi_sas_dq *dq;
399 	unsigned long flags;
400 	int wr_q_index;
401 
402 	if (DEV_IS_GONE(sas_dev)) {
403 		if (sas_dev)
404 			dev_info(dev, "task prep: device %d not ready\n",
405 				 sas_dev->device_id);
406 		else
407 			dev_info(dev, "task prep: device %016llx not ready\n",
408 				 SAS_ADDR(device->sas_addr));
409 
410 		return -ECOMM;
411 	}
412 
413 	*dq_pointer = dq = sas_dev->dq;
414 
415 	port = to_hisi_sas_port(sas_port);
416 	if (port && !port->port_attached) {
417 		dev_info(dev, "task prep: %s port%d not attach device\n",
418 			 (dev_is_sata(device)) ?
419 			 "SATA/STP" : "SAS",
420 			 device->port->id);
421 
422 		return -ECOMM;
423 	}
424 
425 	rc = hisi_sas_dma_map(hisi_hba, task, &n_elem,
426 			      &n_elem_req, &n_elem_resp);
427 	if (rc < 0)
428 		goto prep_out;
429 
430 	if (hisi_hba->hw->slot_index_alloc)
431 		rc = hisi_hba->hw->slot_index_alloc(hisi_hba, device);
432 	else {
433 		struct scsi_cmnd *scsi_cmnd = NULL;
434 
435 		if (task->uldd_task) {
436 			struct ata_queued_cmd *qc;
437 
438 			if (dev_is_sata(device)) {
439 				qc = task->uldd_task;
440 				scsi_cmnd = qc->scsicmd;
441 			} else {
442 				scsi_cmnd = task->uldd_task;
443 			}
444 		}
445 		rc  = hisi_sas_slot_index_alloc(hisi_hba, scsi_cmnd);
446 	}
447 	if (rc < 0)
448 		goto err_out_dma_unmap;
449 
450 	slot_idx = rc;
451 	slot = &hisi_hba->slot_info[slot_idx];
452 
453 	spin_lock_irqsave(&dq->lock, flags);
454 	wr_q_index = hisi_hba->hw->get_free_slot(hisi_hba, dq);
455 	if (wr_q_index < 0) {
456 		spin_unlock_irqrestore(&dq->lock, flags);
457 		rc = -EAGAIN;
458 		goto err_out_tag;
459 	}
460 
461 	list_add_tail(&slot->delivery, &dq->list);
462 	list_add_tail(&slot->entry, &sas_dev->list);
463 	spin_unlock_irqrestore(&dq->lock, flags);
464 
465 	dlvry_queue = dq->id;
466 	dlvry_queue_slot = wr_q_index;
467 
468 	slot->n_elem = n_elem;
469 	slot->dlvry_queue = dlvry_queue;
470 	slot->dlvry_queue_slot = dlvry_queue_slot;
471 	cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
472 	slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
473 	slot->task = task;
474 	slot->port = port;
475 	slot->tmf = tmf;
476 	slot->is_internal = is_tmf;
477 	task->lldd_task = slot;
478 
479 	memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
480 	memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
481 	memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ);
482 
483 	switch (task->task_proto) {
484 	case SAS_PROTOCOL_SMP:
485 		hisi_sas_task_prep_smp(hisi_hba, slot);
486 		break;
487 	case SAS_PROTOCOL_SSP:
488 		hisi_sas_task_prep_ssp(hisi_hba, slot);
489 		break;
490 	case SAS_PROTOCOL_SATA:
491 	case SAS_PROTOCOL_STP:
492 	case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
493 		hisi_sas_task_prep_ata(hisi_hba, slot);
494 		break;
495 	default:
496 		dev_err(dev, "task prep: unknown/unsupported proto (0x%x)\n",
497 			task->task_proto);
498 		break;
499 	}
500 
501 	spin_lock_irqsave(&task->task_state_lock, flags);
502 	task->task_state_flags |= SAS_TASK_AT_INITIATOR;
503 	spin_unlock_irqrestore(&task->task_state_lock, flags);
504 
505 	++(*pass);
506 	WRITE_ONCE(slot->ready, 1);
507 
508 	return 0;
509 
510 err_out_tag:
511 	hisi_sas_slot_index_free(hisi_hba, slot_idx);
512 err_out_dma_unmap:
513 	hisi_sas_dma_unmap(hisi_hba, task, n_elem,
514 			   n_elem_req, n_elem_resp);
515 prep_out:
516 	dev_err(dev, "task prep: failed[%d]!\n", rc);
517 	return rc;
518 }
519 
520 static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
521 			      bool is_tmf, struct hisi_sas_tmf_task *tmf)
522 {
523 	u32 rc;
524 	u32 pass = 0;
525 	unsigned long flags;
526 	struct hisi_hba *hisi_hba;
527 	struct device *dev;
528 	struct domain_device *device = task->dev;
529 	struct asd_sas_port *sas_port = device->port;
530 	struct hisi_sas_dq *dq = NULL;
531 
532 	if (!sas_port) {
533 		struct task_status_struct *ts = &task->task_status;
534 
535 		ts->resp = SAS_TASK_UNDELIVERED;
536 		ts->stat = SAS_PHY_DOWN;
537 		/*
538 		 * libsas will use dev->port, should
539 		 * not call task_done for sata
540 		 */
541 		if (device->dev_type != SAS_SATA_DEV)
542 			task->task_done(task);
543 		return -ECOMM;
544 	}
545 
546 	hisi_hba = dev_to_hisi_hba(device);
547 	dev = hisi_hba->dev;
548 
549 	if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) {
550 		if (in_softirq())
551 			return -EINVAL;
552 
553 		down(&hisi_hba->sem);
554 		up(&hisi_hba->sem);
555 	}
556 
557 	/* protect task_prep and start_delivery sequence */
558 	rc = hisi_sas_task_prep(task, &dq, is_tmf, tmf, &pass);
559 	if (rc)
560 		dev_err(dev, "task exec: failed[%d]!\n", rc);
561 
562 	if (likely(pass)) {
563 		spin_lock_irqsave(&dq->lock, flags);
564 		hisi_hba->hw->start_delivery(dq);
565 		spin_unlock_irqrestore(&dq->lock, flags);
566 	}
567 
568 	return rc;
569 }
570 
571 static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no)
572 {
573 	struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
574 	struct asd_sas_phy *sas_phy = &phy->sas_phy;
575 	struct sas_ha_struct *sas_ha;
576 
577 	if (!phy->phy_attached)
578 		return;
579 
580 	sas_ha = &hisi_hba->sha;
581 	sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE);
582 
583 	if (sas_phy->phy) {
584 		struct sas_phy *sphy = sas_phy->phy;
585 
586 		sphy->negotiated_linkrate = sas_phy->linkrate;
587 		sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
588 		sphy->maximum_linkrate_hw =
589 			hisi_hba->hw->phy_get_max_linkrate();
590 		if (sphy->minimum_linkrate == SAS_LINK_RATE_UNKNOWN)
591 			sphy->minimum_linkrate = phy->minimum_linkrate;
592 
593 		if (sphy->maximum_linkrate == SAS_LINK_RATE_UNKNOWN)
594 			sphy->maximum_linkrate = phy->maximum_linkrate;
595 	}
596 
597 	if (phy->phy_type & PORT_TYPE_SAS) {
598 		struct sas_identify_frame *id;
599 
600 		id = (struct sas_identify_frame *)phy->frame_rcvd;
601 		id->dev_type = phy->identify.device_type;
602 		id->initiator_bits = SAS_PROTOCOL_ALL;
603 		id->target_bits = phy->identify.target_port_protocols;
604 	} else if (phy->phy_type & PORT_TYPE_SATA) {
605 		/*Nothing*/
606 	}
607 
608 	sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
609 	sas_ha->notify_port_event(sas_phy, PORTE_BYTES_DMAED);
610 }
611 
612 static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device)
613 {
614 	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
615 	struct hisi_sas_device *sas_dev = NULL;
616 	unsigned long flags;
617 	int last = hisi_hba->last_dev_id;
618 	int first = (hisi_hba->last_dev_id + 1) % HISI_SAS_MAX_DEVICES;
619 	int i;
620 
621 	spin_lock_irqsave(&hisi_hba->lock, flags);
622 	for (i = first; i != last; i %= HISI_SAS_MAX_DEVICES) {
623 		if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) {
624 			int queue = i % hisi_hba->queue_count;
625 			struct hisi_sas_dq *dq = &hisi_hba->dq[queue];
626 
627 			hisi_hba->devices[i].device_id = i;
628 			sas_dev = &hisi_hba->devices[i];
629 			sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
630 			sas_dev->dev_type = device->dev_type;
631 			sas_dev->hisi_hba = hisi_hba;
632 			sas_dev->sas_device = device;
633 			sas_dev->dq = dq;
634 			INIT_LIST_HEAD(&hisi_hba->devices[i].list);
635 			break;
636 		}
637 		i++;
638 	}
639 	hisi_hba->last_dev_id = i;
640 	spin_unlock_irqrestore(&hisi_hba->lock, flags);
641 
642 	return sas_dev;
643 }
644 
645 #define HISI_SAS_SRST_ATA_DISK_CNT 3
646 static int hisi_sas_init_device(struct domain_device *device)
647 {
648 	int rc = TMF_RESP_FUNC_COMPLETE;
649 	struct scsi_lun lun;
650 	struct hisi_sas_tmf_task tmf_task;
651 	int retry = HISI_SAS_SRST_ATA_DISK_CNT;
652 	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
653 
654 	switch (device->dev_type) {
655 	case SAS_END_DEVICE:
656 		int_to_scsilun(0, &lun);
657 
658 		tmf_task.tmf = TMF_CLEAR_TASK_SET;
659 		rc = hisi_sas_debug_issue_ssp_tmf(device, lun.scsi_lun,
660 						  &tmf_task);
661 		if (rc == TMF_RESP_FUNC_COMPLETE)
662 			hisi_sas_release_task(hisi_hba, device);
663 		break;
664 	case SAS_SATA_DEV:
665 	case SAS_SATA_PM:
666 	case SAS_SATA_PM_PORT:
667 	case SAS_SATA_PENDING:
668 		while (retry-- > 0) {
669 			rc = hisi_sas_softreset_ata_disk(device);
670 			if (!rc)
671 				break;
672 		}
673 		break;
674 	default:
675 		break;
676 	}
677 
678 	return rc;
679 }
680 
681 static int hisi_sas_dev_found(struct domain_device *device)
682 {
683 	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
684 	struct domain_device *parent_dev = device->parent;
685 	struct hisi_sas_device *sas_dev;
686 	struct device *dev = hisi_hba->dev;
687 	int rc;
688 
689 	if (hisi_hba->hw->alloc_dev)
690 		sas_dev = hisi_hba->hw->alloc_dev(device);
691 	else
692 		sas_dev = hisi_sas_alloc_dev(device);
693 	if (!sas_dev) {
694 		dev_err(dev, "fail alloc dev: max support %d devices\n",
695 			HISI_SAS_MAX_DEVICES);
696 		return -EINVAL;
697 	}
698 
699 	device->lldd_dev = sas_dev;
700 	hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
701 
702 	if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) {
703 		int phy_no;
704 		u8 phy_num = parent_dev->ex_dev.num_phys;
705 		struct ex_phy *phy;
706 
707 		for (phy_no = 0; phy_no < phy_num; phy_no++) {
708 			phy = &parent_dev->ex_dev.ex_phy[phy_no];
709 			if (SAS_ADDR(phy->attached_sas_addr) ==
710 				SAS_ADDR(device->sas_addr))
711 				break;
712 		}
713 
714 		if (phy_no == phy_num) {
715 			dev_info(dev, "dev found: no attached "
716 				 "dev:%016llx at ex:%016llx\n",
717 				 SAS_ADDR(device->sas_addr),
718 				 SAS_ADDR(parent_dev->sas_addr));
719 			rc = -EINVAL;
720 			goto err_out;
721 		}
722 	}
723 
724 	dev_info(dev, "dev[%d:%x] found\n",
725 		sas_dev->device_id, sas_dev->dev_type);
726 
727 	rc = hisi_sas_init_device(device);
728 	if (rc)
729 		goto err_out;
730 	return 0;
731 
732 err_out:
733 	hisi_sas_dev_gone(device);
734 	return rc;
735 }
736 
737 int hisi_sas_slave_configure(struct scsi_device *sdev)
738 {
739 	struct domain_device *dev = sdev_to_domain_dev(sdev);
740 	int ret = sas_slave_configure(sdev);
741 
742 	if (ret)
743 		return ret;
744 	if (!dev_is_sata(dev))
745 		sas_change_queue_depth(sdev, 64);
746 
747 	return 0;
748 }
749 EXPORT_SYMBOL_GPL(hisi_sas_slave_configure);
750 
751 void hisi_sas_scan_start(struct Scsi_Host *shost)
752 {
753 	struct hisi_hba *hisi_hba = shost_priv(shost);
754 
755 	hisi_hba->hw->phys_init(hisi_hba);
756 }
757 EXPORT_SYMBOL_GPL(hisi_sas_scan_start);
758 
759 int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time)
760 {
761 	struct hisi_hba *hisi_hba = shost_priv(shost);
762 	struct sas_ha_struct *sha = &hisi_hba->sha;
763 
764 	/* Wait for PHY up interrupt to occur */
765 	if (time < HZ)
766 		return 0;
767 
768 	sas_drain_work(sha);
769 	return 1;
770 }
771 EXPORT_SYMBOL_GPL(hisi_sas_scan_finished);
772 
773 static void hisi_sas_phyup_work(struct work_struct *work)
774 {
775 	struct hisi_sas_phy *phy =
776 		container_of(work, typeof(*phy), works[HISI_PHYE_PHY_UP]);
777 	struct hisi_hba *hisi_hba = phy->hisi_hba;
778 	struct asd_sas_phy *sas_phy = &phy->sas_phy;
779 	int phy_no = sas_phy->id;
780 
781 	hisi_hba->hw->sl_notify(hisi_hba, phy_no); /* This requires a sleep */
782 	hisi_sas_bytes_dmaed(hisi_hba, phy_no);
783 }
784 
785 static void hisi_sas_linkreset_work(struct work_struct *work)
786 {
787 	struct hisi_sas_phy *phy =
788 		container_of(work, typeof(*phy), works[HISI_PHYE_LINK_RESET]);
789 	struct asd_sas_phy *sas_phy = &phy->sas_phy;
790 
791 	hisi_sas_control_phy(sas_phy, PHY_FUNC_LINK_RESET, NULL);
792 }
793 
794 static const work_func_t hisi_sas_phye_fns[HISI_PHYES_NUM] = {
795 	[HISI_PHYE_PHY_UP] = hisi_sas_phyup_work,
796 	[HISI_PHYE_LINK_RESET] = hisi_sas_linkreset_work,
797 };
798 
799 bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy,
800 				enum hisi_sas_phy_event event)
801 {
802 	struct hisi_hba *hisi_hba = phy->hisi_hba;
803 
804 	if (WARN_ON(event >= HISI_PHYES_NUM))
805 		return false;
806 
807 	return queue_work(hisi_hba->wq, &phy->works[event]);
808 }
809 EXPORT_SYMBOL_GPL(hisi_sas_notify_phy_event);
810 
811 static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no)
812 {
813 	struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
814 	struct asd_sas_phy *sas_phy = &phy->sas_phy;
815 	int i;
816 
817 	phy->hisi_hba = hisi_hba;
818 	phy->port = NULL;
819 	phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS;
820 	phy->maximum_linkrate = hisi_hba->hw->phy_get_max_linkrate();
821 	sas_phy->enabled = (phy_no < hisi_hba->n_phy) ? 1 : 0;
822 	sas_phy->class = SAS;
823 	sas_phy->iproto = SAS_PROTOCOL_ALL;
824 	sas_phy->tproto = 0;
825 	sas_phy->type = PHY_TYPE_PHYSICAL;
826 	sas_phy->role = PHY_ROLE_INITIATOR;
827 	sas_phy->oob_mode = OOB_NOT_CONNECTED;
828 	sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
829 	sas_phy->id = phy_no;
830 	sas_phy->sas_addr = &hisi_hba->sas_addr[0];
831 	sas_phy->frame_rcvd = &phy->frame_rcvd[0];
832 	sas_phy->ha = (struct sas_ha_struct *)hisi_hba->shost->hostdata;
833 	sas_phy->lldd_phy = phy;
834 
835 	for (i = 0; i < HISI_PHYES_NUM; i++)
836 		INIT_WORK(&phy->works[i], hisi_sas_phye_fns[i]);
837 
838 	spin_lock_init(&phy->lock);
839 }
840 
841 static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
842 {
843 	struct sas_ha_struct *sas_ha = sas_phy->ha;
844 	struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
845 	struct hisi_sas_phy *phy = sas_phy->lldd_phy;
846 	struct asd_sas_port *sas_port = sas_phy->port;
847 	struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
848 	unsigned long flags;
849 
850 	if (!sas_port)
851 		return;
852 
853 	spin_lock_irqsave(&hisi_hba->lock, flags);
854 	port->port_attached = 1;
855 	port->id = phy->port_id;
856 	phy->port = port;
857 	sas_port->lldd_port = port;
858 	spin_unlock_irqrestore(&hisi_hba->lock, flags);
859 }
860 
861 static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task *task,
862 				     struct hisi_sas_slot *slot)
863 {
864 	if (task) {
865 		unsigned long flags;
866 		struct task_status_struct *ts;
867 
868 		ts = &task->task_status;
869 
870 		ts->resp = SAS_TASK_COMPLETE;
871 		ts->stat = SAS_ABORTED_TASK;
872 		spin_lock_irqsave(&task->task_state_lock, flags);
873 		task->task_state_flags &=
874 			~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
875 		task->task_state_flags |= SAS_TASK_STATE_DONE;
876 		spin_unlock_irqrestore(&task->task_state_lock, flags);
877 	}
878 
879 	hisi_sas_slot_task_free(hisi_hba, task, slot);
880 }
881 
882 static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
883 			struct domain_device *device)
884 {
885 	struct hisi_sas_slot *slot, *slot2;
886 	struct hisi_sas_device *sas_dev = device->lldd_dev;
887 
888 	list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry)
889 		hisi_sas_do_release_task(hisi_hba, slot->task, slot);
890 }
891 
892 void hisi_sas_release_tasks(struct hisi_hba *hisi_hba)
893 {
894 	struct hisi_sas_device *sas_dev;
895 	struct domain_device *device;
896 	int i;
897 
898 	for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
899 		sas_dev = &hisi_hba->devices[i];
900 		device = sas_dev->sas_device;
901 
902 		if ((sas_dev->dev_type == SAS_PHY_UNUSED) ||
903 		    !device)
904 			continue;
905 
906 		hisi_sas_release_task(hisi_hba, device);
907 	}
908 }
909 EXPORT_SYMBOL_GPL(hisi_sas_release_tasks);
910 
911 static void hisi_sas_dereg_device(struct hisi_hba *hisi_hba,
912 				struct domain_device *device)
913 {
914 	if (hisi_hba->hw->dereg_device)
915 		hisi_hba->hw->dereg_device(hisi_hba, device);
916 }
917 
918 static void hisi_sas_dev_gone(struct domain_device *device)
919 {
920 	struct hisi_sas_device *sas_dev = device->lldd_dev;
921 	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
922 	struct device *dev = hisi_hba->dev;
923 
924 	dev_info(dev, "dev[%d:%x] is gone\n",
925 		 sas_dev->device_id, sas_dev->dev_type);
926 
927 	if (!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) {
928 		hisi_sas_internal_task_abort(hisi_hba, device,
929 				     HISI_SAS_INT_ABT_DEV, 0);
930 
931 		hisi_sas_dereg_device(hisi_hba, device);
932 
933 		down(&hisi_hba->sem);
934 		hisi_hba->hw->clear_itct(hisi_hba, sas_dev);
935 		up(&hisi_hba->sem);
936 		device->lldd_dev = NULL;
937 	}
938 
939 	if (hisi_hba->hw->free_device)
940 		hisi_hba->hw->free_device(sas_dev);
941 	sas_dev->dev_type = SAS_PHY_UNUSED;
942 }
943 
944 static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags)
945 {
946 	return hisi_sas_task_exec(task, gfp_flags, 0, NULL);
947 }
948 
949 static void hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no,
950 			struct sas_phy_linkrates *r)
951 {
952 	struct sas_phy_linkrates _r;
953 
954 	struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
955 	struct asd_sas_phy *sas_phy = &phy->sas_phy;
956 	enum sas_linkrate min, max;
957 
958 	if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) {
959 		max = sas_phy->phy->maximum_linkrate;
960 		min = r->minimum_linkrate;
961 	} else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) {
962 		max = r->maximum_linkrate;
963 		min = sas_phy->phy->minimum_linkrate;
964 	} else
965 		return;
966 
967 	_r.maximum_linkrate = max;
968 	_r.minimum_linkrate = min;
969 
970 	sas_phy->phy->maximum_linkrate = max;
971 	sas_phy->phy->minimum_linkrate = min;
972 
973 	hisi_hba->hw->phy_disable(hisi_hba, phy_no);
974 	msleep(100);
975 	hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, &_r);
976 	hisi_hba->hw->phy_start(hisi_hba, phy_no);
977 }
978 
979 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
980 				void *funcdata)
981 {
982 	struct sas_ha_struct *sas_ha = sas_phy->ha;
983 	struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
984 	int phy_no = sas_phy->id;
985 
986 	switch (func) {
987 	case PHY_FUNC_HARD_RESET:
988 		hisi_hba->hw->phy_hard_reset(hisi_hba, phy_no);
989 		break;
990 
991 	case PHY_FUNC_LINK_RESET:
992 		hisi_hba->hw->phy_disable(hisi_hba, phy_no);
993 		msleep(100);
994 		hisi_hba->hw->phy_start(hisi_hba, phy_no);
995 		break;
996 
997 	case PHY_FUNC_DISABLE:
998 		hisi_hba->hw->phy_disable(hisi_hba, phy_no);
999 		break;
1000 
1001 	case PHY_FUNC_SET_LINK_RATE:
1002 		hisi_sas_phy_set_linkrate(hisi_hba, phy_no, funcdata);
1003 		break;
1004 	case PHY_FUNC_GET_EVENTS:
1005 		if (hisi_hba->hw->get_events) {
1006 			hisi_hba->hw->get_events(hisi_hba, phy_no);
1007 			break;
1008 		}
1009 		/* fallthru */
1010 	case PHY_FUNC_RELEASE_SPINUP_HOLD:
1011 	default:
1012 		return -EOPNOTSUPP;
1013 	}
1014 	return 0;
1015 }
1016 
1017 static void hisi_sas_task_done(struct sas_task *task)
1018 {
1019 	del_timer(&task->slow_task->timer);
1020 	complete(&task->slow_task->completion);
1021 }
1022 
1023 static void hisi_sas_tmf_timedout(struct timer_list *t)
1024 {
1025 	struct sas_task_slow *slow = from_timer(slow, t, timer);
1026 	struct sas_task *task = slow->task;
1027 	unsigned long flags;
1028 	bool is_completed = true;
1029 
1030 	spin_lock_irqsave(&task->task_state_lock, flags);
1031 	if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
1032 		task->task_state_flags |= SAS_TASK_STATE_ABORTED;
1033 		is_completed = false;
1034 	}
1035 	spin_unlock_irqrestore(&task->task_state_lock, flags);
1036 
1037 	if (!is_completed)
1038 		complete(&task->slow_task->completion);
1039 }
1040 
1041 #define TASK_TIMEOUT 20
1042 #define TASK_RETRY 3
1043 #define INTERNAL_ABORT_TIMEOUT 6
1044 static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
1045 					   void *parameter, u32 para_len,
1046 					   struct hisi_sas_tmf_task *tmf)
1047 {
1048 	struct hisi_sas_device *sas_dev = device->lldd_dev;
1049 	struct hisi_hba *hisi_hba = sas_dev->hisi_hba;
1050 	struct device *dev = hisi_hba->dev;
1051 	struct sas_task *task;
1052 	int res, retry;
1053 
1054 	for (retry = 0; retry < TASK_RETRY; retry++) {
1055 		task = sas_alloc_slow_task(GFP_KERNEL);
1056 		if (!task)
1057 			return -ENOMEM;
1058 
1059 		task->dev = device;
1060 		task->task_proto = device->tproto;
1061 
1062 		if (dev_is_sata(device)) {
1063 			task->ata_task.device_control_reg_update = 1;
1064 			memcpy(&task->ata_task.fis, parameter, para_len);
1065 		} else {
1066 			memcpy(&task->ssp_task, parameter, para_len);
1067 		}
1068 		task->task_done = hisi_sas_task_done;
1069 
1070 		task->slow_task->timer.function = hisi_sas_tmf_timedout;
1071 		task->slow_task->timer.expires = jiffies + TASK_TIMEOUT*HZ;
1072 		add_timer(&task->slow_task->timer);
1073 
1074 		res = hisi_sas_task_exec(task, GFP_KERNEL, 1, tmf);
1075 
1076 		if (res) {
1077 			del_timer(&task->slow_task->timer);
1078 			dev_err(dev, "abort tmf: executing internal task failed: %d\n",
1079 				res);
1080 			goto ex_err;
1081 		}
1082 
1083 		wait_for_completion(&task->slow_task->completion);
1084 		res = TMF_RESP_FUNC_FAILED;
1085 		/* Even TMF timed out, return direct. */
1086 		if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1087 			if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
1088 				struct hisi_sas_slot *slot = task->lldd_task;
1089 
1090 				dev_err(dev, "abort tmf: TMF task timeout and not done\n");
1091 				if (slot) {
1092 					struct hisi_sas_cq *cq =
1093 					       &hisi_hba->cq[slot->dlvry_queue];
1094 					/*
1095 					 * flush tasklet to avoid free'ing task
1096 					 * before using task in IO completion
1097 					 */
1098 					tasklet_kill(&cq->tasklet);
1099 					slot->task = NULL;
1100 				}
1101 
1102 				goto ex_err;
1103 			} else
1104 				dev_err(dev, "abort tmf: TMF task timeout\n");
1105 		}
1106 
1107 		if (task->task_status.resp == SAS_TASK_COMPLETE &&
1108 		     task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
1109 			res = TMF_RESP_FUNC_COMPLETE;
1110 			break;
1111 		}
1112 
1113 		if (task->task_status.resp == SAS_TASK_COMPLETE &&
1114 			task->task_status.stat == TMF_RESP_FUNC_SUCC) {
1115 			res = TMF_RESP_FUNC_SUCC;
1116 			break;
1117 		}
1118 
1119 		if (task->task_status.resp == SAS_TASK_COMPLETE &&
1120 		      task->task_status.stat == SAS_DATA_UNDERRUN) {
1121 			/* no error, but return the number of bytes of
1122 			 * underrun
1123 			 */
1124 			dev_warn(dev, "abort tmf: task to dev %016llx "
1125 				 "resp: 0x%x sts 0x%x underrun\n",
1126 				 SAS_ADDR(device->sas_addr),
1127 				 task->task_status.resp,
1128 				 task->task_status.stat);
1129 			res = task->task_status.residual;
1130 			break;
1131 		}
1132 
1133 		if (task->task_status.resp == SAS_TASK_COMPLETE &&
1134 			task->task_status.stat == SAS_DATA_OVERRUN) {
1135 			dev_warn(dev, "abort tmf: blocked task error\n");
1136 			res = -EMSGSIZE;
1137 			break;
1138 		}
1139 
1140 		dev_warn(dev, "abort tmf: task to dev "
1141 			 "%016llx resp: 0x%x status 0x%x\n",
1142 			 SAS_ADDR(device->sas_addr), task->task_status.resp,
1143 			 task->task_status.stat);
1144 		sas_free_task(task);
1145 		task = NULL;
1146 	}
1147 ex_err:
1148 	if (retry == TASK_RETRY)
1149 		dev_warn(dev, "abort tmf: executing internal task failed!\n");
1150 	sas_free_task(task);
1151 	return res;
1152 }
1153 
1154 static void hisi_sas_fill_ata_reset_cmd(struct ata_device *dev,
1155 		bool reset, int pmp, u8 *fis)
1156 {
1157 	struct ata_taskfile tf;
1158 
1159 	ata_tf_init(dev, &tf);
1160 	if (reset)
1161 		tf.ctl |= ATA_SRST;
1162 	else
1163 		tf.ctl &= ~ATA_SRST;
1164 	tf.command = ATA_CMD_DEV_RESET;
1165 	ata_tf_to_fis(&tf, pmp, 0, fis);
1166 }
1167 
1168 static int hisi_sas_softreset_ata_disk(struct domain_device *device)
1169 {
1170 	u8 fis[20] = {0};
1171 	struct ata_port *ap = device->sata_dev.ap;
1172 	struct ata_link *link;
1173 	int rc = TMF_RESP_FUNC_FAILED;
1174 	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1175 	struct device *dev = hisi_hba->dev;
1176 	int s = sizeof(struct host_to_dev_fis);
1177 
1178 	ata_for_each_link(link, ap, EDGE) {
1179 		int pmp = sata_srst_pmp(link);
1180 
1181 		hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis);
1182 		rc = hisi_sas_exec_internal_tmf_task(device, fis, s, NULL);
1183 		if (rc != TMF_RESP_FUNC_COMPLETE)
1184 			break;
1185 	}
1186 
1187 	if (rc == TMF_RESP_FUNC_COMPLETE) {
1188 		ata_for_each_link(link, ap, EDGE) {
1189 			int pmp = sata_srst_pmp(link);
1190 
1191 			hisi_sas_fill_ata_reset_cmd(link->device, 0, pmp, fis);
1192 			rc = hisi_sas_exec_internal_tmf_task(device, fis,
1193 							     s, NULL);
1194 			if (rc != TMF_RESP_FUNC_COMPLETE)
1195 				dev_err(dev, "ata disk de-reset failed\n");
1196 		}
1197 	} else {
1198 		dev_err(dev, "ata disk reset failed\n");
1199 	}
1200 
1201 	if (rc == TMF_RESP_FUNC_COMPLETE)
1202 		hisi_sas_release_task(hisi_hba, device);
1203 
1204 	return rc;
1205 }
1206 
1207 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
1208 				u8 *lun, struct hisi_sas_tmf_task *tmf)
1209 {
1210 	struct sas_ssp_task ssp_task;
1211 
1212 	if (!(device->tproto & SAS_PROTOCOL_SSP))
1213 		return TMF_RESP_FUNC_ESUPP;
1214 
1215 	memcpy(ssp_task.LUN, lun, 8);
1216 
1217 	return hisi_sas_exec_internal_tmf_task(device, &ssp_task,
1218 				sizeof(ssp_task), tmf);
1219 }
1220 
1221 static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba)
1222 {
1223 	u32 state = hisi_hba->hw->get_phys_state(hisi_hba);
1224 	int i;
1225 
1226 	for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1227 		struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
1228 		struct domain_device *device = sas_dev->sas_device;
1229 		struct asd_sas_port *sas_port;
1230 		struct hisi_sas_port *port;
1231 		struct hisi_sas_phy *phy = NULL;
1232 		struct asd_sas_phy *sas_phy;
1233 
1234 		if ((sas_dev->dev_type == SAS_PHY_UNUSED)
1235 				|| !device || !device->port)
1236 			continue;
1237 
1238 		sas_port = device->port;
1239 		port = to_hisi_sas_port(sas_port);
1240 
1241 		list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el)
1242 			if (state & BIT(sas_phy->id)) {
1243 				phy = sas_phy->lldd_phy;
1244 				break;
1245 			}
1246 
1247 		if (phy) {
1248 			port->id = phy->port_id;
1249 
1250 			/* Update linkrate of directly attached device. */
1251 			if (!device->parent)
1252 				device->linkrate = phy->sas_phy.linkrate;
1253 
1254 			hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
1255 		} else
1256 			port->id = 0xff;
1257 	}
1258 }
1259 
1260 static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state,
1261 			      u32 state)
1262 {
1263 	struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1264 	struct asd_sas_port *_sas_port = NULL;
1265 	int phy_no;
1266 
1267 	for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
1268 		struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1269 		struct asd_sas_phy *sas_phy = &phy->sas_phy;
1270 		struct asd_sas_port *sas_port = sas_phy->port;
1271 		bool do_port_check = !!(_sas_port != sas_port);
1272 
1273 		if (!sas_phy->phy->enabled)
1274 			continue;
1275 
1276 		/* Report PHY state change to libsas */
1277 		if (state & BIT(phy_no)) {
1278 			if (do_port_check && sas_port && sas_port->port_dev) {
1279 				struct domain_device *dev = sas_port->port_dev;
1280 
1281 				_sas_port = sas_port;
1282 
1283 				if (DEV_IS_EXPANDER(dev->dev_type))
1284 					sas_ha->notify_port_event(sas_phy,
1285 							PORTE_BROADCAST_RCVD);
1286 			}
1287 		} else if (old_state & (1 << phy_no))
1288 			/* PHY down but was up before */
1289 			hisi_sas_phy_down(hisi_hba, phy_no, 0);
1290 
1291 	}
1292 }
1293 
1294 static void hisi_sas_reset_init_all_devices(struct hisi_hba *hisi_hba)
1295 {
1296 	struct hisi_sas_device *sas_dev;
1297 	struct domain_device *device;
1298 	int i;
1299 
1300 	for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1301 		sas_dev = &hisi_hba->devices[i];
1302 		device = sas_dev->sas_device;
1303 
1304 		if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device)
1305 			continue;
1306 
1307 		hisi_sas_init_device(device);
1308 	}
1309 }
1310 
1311 static void hisi_sas_send_ata_reset_each_phy(struct hisi_hba *hisi_hba,
1312 					     struct asd_sas_port *sas_port,
1313 					     struct domain_device *device)
1314 {
1315 	struct hisi_sas_tmf_task tmf_task = { .force_phy = 1 };
1316 	struct ata_port *ap = device->sata_dev.ap;
1317 	struct device *dev = hisi_hba->dev;
1318 	int s = sizeof(struct host_to_dev_fis);
1319 	int rc = TMF_RESP_FUNC_FAILED;
1320 	struct asd_sas_phy *sas_phy;
1321 	struct ata_link *link;
1322 	u8 fis[20] = {0};
1323 	u32 state;
1324 
1325 	state = hisi_hba->hw->get_phys_state(hisi_hba);
1326 	list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el) {
1327 		if (!(state & BIT(sas_phy->id)))
1328 			continue;
1329 
1330 		ata_for_each_link(link, ap, EDGE) {
1331 			int pmp = sata_srst_pmp(link);
1332 
1333 			tmf_task.phy_id = sas_phy->id;
1334 			hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis);
1335 			rc = hisi_sas_exec_internal_tmf_task(device, fis, s,
1336 							     &tmf_task);
1337 			if (rc != TMF_RESP_FUNC_COMPLETE) {
1338 				dev_err(dev, "phy%d ata reset failed rc=%d\n",
1339 					sas_phy->id, rc);
1340 				break;
1341 			}
1342 		}
1343 	}
1344 }
1345 
1346 static void hisi_sas_terminate_stp_reject(struct hisi_hba *hisi_hba)
1347 {
1348 	struct device *dev = hisi_hba->dev;
1349 	int port_no, rc, i;
1350 
1351 	for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1352 		struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
1353 		struct domain_device *device = sas_dev->sas_device;
1354 
1355 		if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device)
1356 			continue;
1357 
1358 		rc = hisi_sas_internal_task_abort(hisi_hba, device,
1359 						  HISI_SAS_INT_ABT_DEV, 0);
1360 		if (rc < 0)
1361 			dev_err(dev, "STP reject: abort dev failed %d\n", rc);
1362 	}
1363 
1364 	for (port_no = 0; port_no < hisi_hba->n_phy; port_no++) {
1365 		struct hisi_sas_port *port = &hisi_hba->port[port_no];
1366 		struct asd_sas_port *sas_port = &port->sas_port;
1367 		struct domain_device *port_dev = sas_port->port_dev;
1368 		struct domain_device *device;
1369 
1370 		if (!port_dev || !DEV_IS_EXPANDER(port_dev->dev_type))
1371 			continue;
1372 
1373 		/* Try to find a SATA device */
1374 		list_for_each_entry(device, &sas_port->dev_list,
1375 				    dev_list_node) {
1376 			if (dev_is_sata(device)) {
1377 				hisi_sas_send_ata_reset_each_phy(hisi_hba,
1378 								 sas_port,
1379 								 device);
1380 				break;
1381 			}
1382 		}
1383 	}
1384 }
1385 
1386 void hisi_sas_controller_reset_prepare(struct hisi_hba *hisi_hba)
1387 {
1388 	struct Scsi_Host *shost = hisi_hba->shost;
1389 
1390 	down(&hisi_hba->sem);
1391 	hisi_hba->phy_state = hisi_hba->hw->get_phys_state(hisi_hba);
1392 
1393 	scsi_block_requests(shost);
1394 	hisi_hba->hw->wait_cmds_complete_timeout(hisi_hba, 100, 5000);
1395 
1396 	if (timer_pending(&hisi_hba->timer))
1397 		del_timer_sync(&hisi_hba->timer);
1398 
1399 	set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1400 }
1401 EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_prepare);
1402 
1403 void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba)
1404 {
1405 	struct Scsi_Host *shost = hisi_hba->shost;
1406 	u32 state;
1407 
1408 	/* Init and wait for PHYs to come up and all libsas event finished. */
1409 	hisi_hba->hw->phys_init(hisi_hba);
1410 	msleep(1000);
1411 	hisi_sas_refresh_port_id(hisi_hba);
1412 	clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1413 	up(&hisi_hba->sem);
1414 
1415 	if (hisi_hba->reject_stp_links_msk)
1416 		hisi_sas_terminate_stp_reject(hisi_hba);
1417 	hisi_sas_reset_init_all_devices(hisi_hba);
1418 	scsi_unblock_requests(shost);
1419 	clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
1420 
1421 	state = hisi_hba->hw->get_phys_state(hisi_hba);
1422 	hisi_sas_rescan_topology(hisi_hba, hisi_hba->phy_state, state);
1423 }
1424 EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_done);
1425 
1426 static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
1427 {
1428 	struct device *dev = hisi_hba->dev;
1429 	struct Scsi_Host *shost = hisi_hba->shost;
1430 	int rc;
1431 
1432 	if (!hisi_hba->hw->soft_reset)
1433 		return -1;
1434 
1435 	if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
1436 		return -1;
1437 
1438 	dev_info(dev, "controller resetting...\n");
1439 	hisi_sas_controller_reset_prepare(hisi_hba);
1440 
1441 	rc = hisi_hba->hw->soft_reset(hisi_hba);
1442 	if (rc) {
1443 		dev_warn(dev, "controller reset failed (%d)\n", rc);
1444 		clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1445 		up(&hisi_hba->sem);
1446 		scsi_unblock_requests(shost);
1447 		clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
1448 		return rc;
1449 	}
1450 
1451 	hisi_sas_controller_reset_done(hisi_hba);
1452 	dev_info(dev, "controller reset complete\n");
1453 
1454 	return 0;
1455 }
1456 
1457 static int hisi_sas_abort_task(struct sas_task *task)
1458 {
1459 	struct scsi_lun lun;
1460 	struct hisi_sas_tmf_task tmf_task;
1461 	struct domain_device *device = task->dev;
1462 	struct hisi_sas_device *sas_dev = device->lldd_dev;
1463 	struct hisi_hba *hisi_hba;
1464 	struct device *dev;
1465 	int rc = TMF_RESP_FUNC_FAILED;
1466 	unsigned long flags;
1467 
1468 	if (!sas_dev)
1469 		return TMF_RESP_FUNC_FAILED;
1470 
1471 	hisi_hba = dev_to_hisi_hba(task->dev);
1472 	dev = hisi_hba->dev;
1473 
1474 	spin_lock_irqsave(&task->task_state_lock, flags);
1475 	if (task->task_state_flags & SAS_TASK_STATE_DONE) {
1476 		struct hisi_sas_slot *slot = task->lldd_task;
1477 		struct hisi_sas_cq *cq;
1478 
1479 		if (slot) {
1480 			/*
1481 			 * flush tasklet to avoid free'ing task
1482 			 * before using task in IO completion
1483 			 */
1484 			cq = &hisi_hba->cq[slot->dlvry_queue];
1485 			tasklet_kill(&cq->tasklet);
1486 		}
1487 		spin_unlock_irqrestore(&task->task_state_lock, flags);
1488 		rc = TMF_RESP_FUNC_COMPLETE;
1489 		goto out;
1490 	}
1491 	task->task_state_flags |= SAS_TASK_STATE_ABORTED;
1492 	spin_unlock_irqrestore(&task->task_state_lock, flags);
1493 
1494 	sas_dev->dev_status = HISI_SAS_DEV_EH;
1495 	if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1496 		struct scsi_cmnd *cmnd = task->uldd_task;
1497 		struct hisi_sas_slot *slot = task->lldd_task;
1498 		u16 tag = slot->idx;
1499 		int rc2;
1500 
1501 		int_to_scsilun(cmnd->device->lun, &lun);
1502 		tmf_task.tmf = TMF_ABORT_TASK;
1503 		tmf_task.tag_of_task_to_be_managed = tag;
1504 
1505 		rc = hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun,
1506 						  &tmf_task);
1507 
1508 		rc2 = hisi_sas_internal_task_abort(hisi_hba, device,
1509 						   HISI_SAS_INT_ABT_CMD, tag);
1510 		if (rc2 < 0) {
1511 			dev_err(dev, "abort task: internal abort (%d)\n", rc2);
1512 			return TMF_RESP_FUNC_FAILED;
1513 		}
1514 
1515 		/*
1516 		 * If the TMF finds that the IO is not in the device and also
1517 		 * the internal abort does not succeed, then it is safe to
1518 		 * free the slot.
1519 		 * Note: if the internal abort succeeds then the slot
1520 		 * will have already been completed
1521 		 */
1522 		if (rc == TMF_RESP_FUNC_COMPLETE && rc2 != TMF_RESP_FUNC_SUCC) {
1523 			if (task->lldd_task)
1524 				hisi_sas_do_release_task(hisi_hba, task, slot);
1525 		}
1526 	} else if (task->task_proto & SAS_PROTOCOL_SATA ||
1527 		task->task_proto & SAS_PROTOCOL_STP) {
1528 		if (task->dev->dev_type == SAS_SATA_DEV) {
1529 			rc = hisi_sas_internal_task_abort(hisi_hba, device,
1530 						HISI_SAS_INT_ABT_DEV, 0);
1531 			if (rc < 0) {
1532 				dev_err(dev, "abort task: internal abort failed\n");
1533 				goto out;
1534 			}
1535 			hisi_sas_dereg_device(hisi_hba, device);
1536 			rc = hisi_sas_softreset_ata_disk(device);
1537 		}
1538 	} else if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SMP) {
1539 		/* SMP */
1540 		struct hisi_sas_slot *slot = task->lldd_task;
1541 		u32 tag = slot->idx;
1542 		struct hisi_sas_cq *cq = &hisi_hba->cq[slot->dlvry_queue];
1543 
1544 		rc = hisi_sas_internal_task_abort(hisi_hba, device,
1545 			     HISI_SAS_INT_ABT_CMD, tag);
1546 		if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) &&
1547 					task->lldd_task) {
1548 			/*
1549 			 * flush tasklet to avoid free'ing task
1550 			 * before using task in IO completion
1551 			 */
1552 			tasklet_kill(&cq->tasklet);
1553 			slot->task = NULL;
1554 		}
1555 	}
1556 
1557 out:
1558 	if (rc != TMF_RESP_FUNC_COMPLETE)
1559 		dev_notice(dev, "abort task: rc=%d\n", rc);
1560 	return rc;
1561 }
1562 
1563 static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun)
1564 {
1565 	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1566 	struct device *dev = hisi_hba->dev;
1567 	struct hisi_sas_tmf_task tmf_task;
1568 	int rc = TMF_RESP_FUNC_FAILED;
1569 
1570 	rc = hisi_sas_internal_task_abort(hisi_hba, device,
1571 					HISI_SAS_INT_ABT_DEV, 0);
1572 	if (rc < 0) {
1573 		dev_err(dev, "abort task set: internal abort rc=%d\n", rc);
1574 		return TMF_RESP_FUNC_FAILED;
1575 	}
1576 	hisi_sas_dereg_device(hisi_hba, device);
1577 
1578 	tmf_task.tmf = TMF_ABORT_TASK_SET;
1579 	rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1580 
1581 	if (rc == TMF_RESP_FUNC_COMPLETE)
1582 		hisi_sas_release_task(hisi_hba, device);
1583 
1584 	return rc;
1585 }
1586 
1587 static int hisi_sas_clear_aca(struct domain_device *device, u8 *lun)
1588 {
1589 	int rc = TMF_RESP_FUNC_FAILED;
1590 	struct hisi_sas_tmf_task tmf_task;
1591 
1592 	tmf_task.tmf = TMF_CLEAR_ACA;
1593 	rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1594 
1595 	return rc;
1596 }
1597 
1598 static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
1599 {
1600 	struct sas_phy *local_phy = sas_get_local_phy(device);
1601 	int rc, reset_type = (device->dev_type == SAS_SATA_DEV ||
1602 			(device->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
1603 	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1604 	struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1605 	struct asd_sas_phy *sas_phy = sas_ha->sas_phy[local_phy->number];
1606 	struct hisi_sas_phy *phy = container_of(sas_phy,
1607 			struct hisi_sas_phy, sas_phy);
1608 	DECLARE_COMPLETION_ONSTACK(phyreset);
1609 
1610 	if (scsi_is_sas_phy_local(local_phy)) {
1611 		phy->in_reset = 1;
1612 		phy->reset_completion = &phyreset;
1613 	}
1614 
1615 	rc = sas_phy_reset(local_phy, reset_type);
1616 	sas_put_local_phy(local_phy);
1617 
1618 	if (scsi_is_sas_phy_local(local_phy)) {
1619 		int ret = wait_for_completion_timeout(&phyreset, 2 * HZ);
1620 		unsigned long flags;
1621 
1622 		spin_lock_irqsave(&phy->lock, flags);
1623 		phy->reset_completion = NULL;
1624 		phy->in_reset = 0;
1625 		spin_unlock_irqrestore(&phy->lock, flags);
1626 
1627 		/* report PHY down if timed out */
1628 		if (!ret)
1629 			hisi_sas_phy_down(hisi_hba, sas_phy->id, 0);
1630 	} else
1631 		msleep(2000);
1632 
1633 	return rc;
1634 }
1635 
1636 static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
1637 {
1638 	struct hisi_sas_device *sas_dev = device->lldd_dev;
1639 	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1640 	struct device *dev = hisi_hba->dev;
1641 	int rc = TMF_RESP_FUNC_FAILED;
1642 
1643 	if (sas_dev->dev_status != HISI_SAS_DEV_EH)
1644 		return TMF_RESP_FUNC_FAILED;
1645 	sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
1646 
1647 	rc = hisi_sas_internal_task_abort(hisi_hba, device,
1648 					HISI_SAS_INT_ABT_DEV, 0);
1649 	if (rc < 0) {
1650 		dev_err(dev, "I_T nexus reset: internal abort (%d)\n", rc);
1651 		return TMF_RESP_FUNC_FAILED;
1652 	}
1653 	hisi_sas_dereg_device(hisi_hba, device);
1654 
1655 	rc = hisi_sas_debug_I_T_nexus_reset(device);
1656 
1657 	if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV))
1658 		hisi_sas_release_task(hisi_hba, device);
1659 
1660 	return rc;
1661 }
1662 
1663 static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
1664 {
1665 	struct hisi_sas_device *sas_dev = device->lldd_dev;
1666 	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1667 	struct device *dev = hisi_hba->dev;
1668 	int rc = TMF_RESP_FUNC_FAILED;
1669 
1670 	sas_dev->dev_status = HISI_SAS_DEV_EH;
1671 	if (dev_is_sata(device)) {
1672 		struct sas_phy *phy;
1673 
1674 		/* Clear internal IO and then hardreset */
1675 		rc = hisi_sas_internal_task_abort(hisi_hba, device,
1676 						  HISI_SAS_INT_ABT_DEV, 0);
1677 		if (rc < 0) {
1678 			dev_err(dev, "lu_reset: internal abort failed\n");
1679 			goto out;
1680 		}
1681 		hisi_sas_dereg_device(hisi_hba, device);
1682 
1683 		phy = sas_get_local_phy(device);
1684 
1685 		rc = sas_phy_reset(phy, 1);
1686 
1687 		if (rc == 0)
1688 			hisi_sas_release_task(hisi_hba, device);
1689 		sas_put_local_phy(phy);
1690 	} else {
1691 		struct hisi_sas_tmf_task tmf_task = { .tmf =  TMF_LU_RESET };
1692 
1693 		rc = hisi_sas_internal_task_abort(hisi_hba, device,
1694 						HISI_SAS_INT_ABT_DEV, 0);
1695 		if (rc < 0) {
1696 			dev_err(dev, "lu_reset: internal abort failed\n");
1697 			goto out;
1698 		}
1699 		hisi_sas_dereg_device(hisi_hba, device);
1700 
1701 		rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1702 		if (rc == TMF_RESP_FUNC_COMPLETE)
1703 			hisi_sas_release_task(hisi_hba, device);
1704 	}
1705 out:
1706 	if (rc != TMF_RESP_FUNC_COMPLETE)
1707 		dev_err(dev, "lu_reset: for device[%d]:rc= %d\n",
1708 			     sas_dev->device_id, rc);
1709 	return rc;
1710 }
1711 
1712 static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha)
1713 {
1714 	struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
1715 	struct device *dev = hisi_hba->dev;
1716 	HISI_SAS_DECLARE_RST_WORK_ON_STACK(r);
1717 	int rc, i;
1718 
1719 	queue_work(hisi_hba->wq, &r.work);
1720 	wait_for_completion(r.completion);
1721 	if (!r.done)
1722 		return TMF_RESP_FUNC_FAILED;
1723 
1724 	for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1725 		struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
1726 		struct domain_device *device = sas_dev->sas_device;
1727 
1728 		if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device ||
1729 		    DEV_IS_EXPANDER(device->dev_type))
1730 			continue;
1731 
1732 		rc = hisi_sas_debug_I_T_nexus_reset(device);
1733 		if (rc != TMF_RESP_FUNC_COMPLETE)
1734 			dev_info(dev, "clear nexus ha: for device[%d] rc=%d\n",
1735 				 sas_dev->device_id, rc);
1736 	}
1737 
1738 	hisi_sas_release_tasks(hisi_hba);
1739 
1740 	return TMF_RESP_FUNC_COMPLETE;
1741 }
1742 
1743 static int hisi_sas_query_task(struct sas_task *task)
1744 {
1745 	struct scsi_lun lun;
1746 	struct hisi_sas_tmf_task tmf_task;
1747 	int rc = TMF_RESP_FUNC_FAILED;
1748 
1749 	if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1750 		struct scsi_cmnd *cmnd = task->uldd_task;
1751 		struct domain_device *device = task->dev;
1752 		struct hisi_sas_slot *slot = task->lldd_task;
1753 		u32 tag = slot->idx;
1754 
1755 		int_to_scsilun(cmnd->device->lun, &lun);
1756 		tmf_task.tmf = TMF_QUERY_TASK;
1757 		tmf_task.tag_of_task_to_be_managed = tag;
1758 
1759 		rc = hisi_sas_debug_issue_ssp_tmf(device,
1760 						  lun.scsi_lun,
1761 						  &tmf_task);
1762 		switch (rc) {
1763 		/* The task is still in Lun, release it then */
1764 		case TMF_RESP_FUNC_SUCC:
1765 		/* The task is not in Lun or failed, reset the phy */
1766 		case TMF_RESP_FUNC_FAILED:
1767 		case TMF_RESP_FUNC_COMPLETE:
1768 			break;
1769 		default:
1770 			rc = TMF_RESP_FUNC_FAILED;
1771 			break;
1772 		}
1773 	}
1774 	return rc;
1775 }
1776 
1777 static int
1778 hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
1779 				  struct sas_task *task, int abort_flag,
1780 				  int task_tag)
1781 {
1782 	struct domain_device *device = task->dev;
1783 	struct hisi_sas_device *sas_dev = device->lldd_dev;
1784 	struct device *dev = hisi_hba->dev;
1785 	struct hisi_sas_port *port;
1786 	struct hisi_sas_slot *slot;
1787 	struct asd_sas_port *sas_port = device->port;
1788 	struct hisi_sas_cmd_hdr *cmd_hdr_base;
1789 	struct hisi_sas_dq *dq = sas_dev->dq;
1790 	int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
1791 	unsigned long flags, flags_dq = 0;
1792 	int wr_q_index;
1793 
1794 	if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
1795 		return -EINVAL;
1796 
1797 	if (!device->port)
1798 		return -1;
1799 
1800 	port = to_hisi_sas_port(sas_port);
1801 
1802 	/* simply get a slot and send abort command */
1803 	rc = hisi_sas_slot_index_alloc(hisi_hba, NULL);
1804 	if (rc < 0)
1805 		goto err_out;
1806 
1807 	slot_idx = rc;
1808 	slot = &hisi_hba->slot_info[slot_idx];
1809 
1810 	spin_lock_irqsave(&dq->lock, flags_dq);
1811 	wr_q_index = hisi_hba->hw->get_free_slot(hisi_hba, dq);
1812 	if (wr_q_index < 0) {
1813 		spin_unlock_irqrestore(&dq->lock, flags_dq);
1814 		rc = -EAGAIN;
1815 		goto err_out_tag;
1816 	}
1817 	list_add_tail(&slot->delivery, &dq->list);
1818 	spin_unlock_irqrestore(&dq->lock, flags_dq);
1819 
1820 	dlvry_queue = dq->id;
1821 	dlvry_queue_slot = wr_q_index;
1822 
1823 	slot->n_elem = n_elem;
1824 	slot->dlvry_queue = dlvry_queue;
1825 	slot->dlvry_queue_slot = dlvry_queue_slot;
1826 	cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
1827 	slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
1828 	slot->task = task;
1829 	slot->port = port;
1830 	slot->is_internal = true;
1831 	task->lldd_task = slot;
1832 
1833 	memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
1834 	memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
1835 	memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ);
1836 
1837 	hisi_sas_task_prep_abort(hisi_hba, slot, device_id,
1838 				      abort_flag, task_tag);
1839 
1840 	spin_lock_irqsave(&task->task_state_lock, flags);
1841 	task->task_state_flags |= SAS_TASK_AT_INITIATOR;
1842 	spin_unlock_irqrestore(&task->task_state_lock, flags);
1843 	WRITE_ONCE(slot->ready, 1);
1844 	/* send abort command to the chip */
1845 	spin_lock_irqsave(&dq->lock, flags);
1846 	list_add_tail(&slot->entry, &sas_dev->list);
1847 	hisi_hba->hw->start_delivery(dq);
1848 	spin_unlock_irqrestore(&dq->lock, flags);
1849 
1850 	return 0;
1851 
1852 err_out_tag:
1853 	hisi_sas_slot_index_free(hisi_hba, slot_idx);
1854 err_out:
1855 	dev_err(dev, "internal abort task prep: failed[%d]!\n", rc);
1856 
1857 	return rc;
1858 }
1859 
1860 /**
1861  * hisi_sas_internal_task_abort -- execute an internal
1862  * abort command for single IO command or a device
1863  * @hisi_hba: host controller struct
1864  * @device: domain device
1865  * @abort_flag: mode of operation, device or single IO
1866  * @tag: tag of IO to be aborted (only relevant to single
1867  *       IO mode)
1868  */
1869 static int
1870 hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
1871 			     struct domain_device *device,
1872 			     int abort_flag, int tag)
1873 {
1874 	struct sas_task *task;
1875 	struct hisi_sas_device *sas_dev = device->lldd_dev;
1876 	struct device *dev = hisi_hba->dev;
1877 	int res;
1878 
1879 	/*
1880 	 * The interface is not realized means this HW don't support internal
1881 	 * abort, or don't need to do internal abort. Then here, we return
1882 	 * TMF_RESP_FUNC_FAILED and let other steps go on, which depends that
1883 	 * the internal abort has been executed and returned CQ.
1884 	 */
1885 	if (!hisi_hba->hw->prep_abort)
1886 		return TMF_RESP_FUNC_FAILED;
1887 
1888 	task = sas_alloc_slow_task(GFP_KERNEL);
1889 	if (!task)
1890 		return -ENOMEM;
1891 
1892 	task->dev = device;
1893 	task->task_proto = device->tproto;
1894 	task->task_done = hisi_sas_task_done;
1895 	task->slow_task->timer.function = hisi_sas_tmf_timedout;
1896 	task->slow_task->timer.expires = jiffies + INTERNAL_ABORT_TIMEOUT*HZ;
1897 	add_timer(&task->slow_task->timer);
1898 
1899 	res = hisi_sas_internal_abort_task_exec(hisi_hba, sas_dev->device_id,
1900 						task, abort_flag, tag);
1901 	if (res) {
1902 		del_timer(&task->slow_task->timer);
1903 		dev_err(dev, "internal task abort: executing internal task failed: %d\n",
1904 			res);
1905 		goto exit;
1906 	}
1907 	wait_for_completion(&task->slow_task->completion);
1908 	res = TMF_RESP_FUNC_FAILED;
1909 
1910 	/* Internal abort timed out */
1911 	if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1912 		if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
1913 			struct hisi_sas_slot *slot = task->lldd_task;
1914 
1915 			if (slot) {
1916 				struct hisi_sas_cq *cq =
1917 					&hisi_hba->cq[slot->dlvry_queue];
1918 				/*
1919 				 * flush tasklet to avoid free'ing task
1920 				 * before using task in IO completion
1921 				 */
1922 				tasklet_kill(&cq->tasklet);
1923 				slot->task = NULL;
1924 			}
1925 			dev_err(dev, "internal task abort: timeout and not done.\n");
1926 			res = -EIO;
1927 			goto exit;
1928 		} else
1929 			dev_err(dev, "internal task abort: timeout.\n");
1930 	}
1931 
1932 	if (task->task_status.resp == SAS_TASK_COMPLETE &&
1933 		task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
1934 		res = TMF_RESP_FUNC_COMPLETE;
1935 		goto exit;
1936 	}
1937 
1938 	if (task->task_status.resp == SAS_TASK_COMPLETE &&
1939 		task->task_status.stat == TMF_RESP_FUNC_SUCC) {
1940 		res = TMF_RESP_FUNC_SUCC;
1941 		goto exit;
1942 	}
1943 
1944 exit:
1945 	dev_dbg(dev, "internal task abort: task to dev %016llx task=%p "
1946 		"resp: 0x%x sts 0x%x\n",
1947 		SAS_ADDR(device->sas_addr),
1948 		task,
1949 		task->task_status.resp, /* 0 is complete, -1 is undelivered */
1950 		task->task_status.stat);
1951 	sas_free_task(task);
1952 
1953 	return res;
1954 }
1955 
1956 static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy)
1957 {
1958 	hisi_sas_port_notify_formed(sas_phy);
1959 }
1960 
1961 static int hisi_sas_write_gpio(struct sas_ha_struct *sha, u8 reg_type,
1962 			u8 reg_index, u8 reg_count, u8 *write_data)
1963 {
1964 	struct hisi_hba *hisi_hba = sha->lldd_ha;
1965 
1966 	if (!hisi_hba->hw->write_gpio)
1967 		return -EOPNOTSUPP;
1968 
1969 	return hisi_hba->hw->write_gpio(hisi_hba, reg_type,
1970 				reg_index, reg_count, write_data);
1971 }
1972 
1973 static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy)
1974 {
1975 	phy->phy_attached = 0;
1976 	phy->phy_type = 0;
1977 	phy->port = NULL;
1978 }
1979 
1980 void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy)
1981 {
1982 	struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1983 	struct asd_sas_phy *sas_phy = &phy->sas_phy;
1984 	struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1985 	struct device *dev = hisi_hba->dev;
1986 
1987 	if (rdy) {
1988 		/* Phy down but ready */
1989 		hisi_sas_bytes_dmaed(hisi_hba, phy_no);
1990 		hisi_sas_port_notify_formed(sas_phy);
1991 	} else {
1992 		struct hisi_sas_port *port  = phy->port;
1993 
1994 		if (test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags) ||
1995 		    phy->in_reset) {
1996 			dev_info(dev, "ignore flutter phy%d down\n", phy_no);
1997 			return;
1998 		}
1999 		/* Phy down and not ready */
2000 		sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL);
2001 		sas_phy_disconnected(sas_phy);
2002 
2003 		if (port) {
2004 			if (phy->phy_type & PORT_TYPE_SAS) {
2005 				int port_id = port->id;
2006 
2007 				if (!hisi_hba->hw->get_wideport_bitmap(hisi_hba,
2008 								       port_id))
2009 					port->port_attached = 0;
2010 			} else if (phy->phy_type & PORT_TYPE_SATA)
2011 				port->port_attached = 0;
2012 		}
2013 		hisi_sas_phy_disconnected(phy);
2014 	}
2015 }
2016 EXPORT_SYMBOL_GPL(hisi_sas_phy_down);
2017 
2018 void hisi_sas_kill_tasklets(struct hisi_hba *hisi_hba)
2019 {
2020 	int i;
2021 
2022 	for (i = 0; i < hisi_hba->queue_count; i++) {
2023 		struct hisi_sas_cq *cq = &hisi_hba->cq[i];
2024 
2025 		tasklet_kill(&cq->tasklet);
2026 	}
2027 }
2028 EXPORT_SYMBOL_GPL(hisi_sas_kill_tasklets);
2029 
2030 struct scsi_transport_template *hisi_sas_stt;
2031 EXPORT_SYMBOL_GPL(hisi_sas_stt);
2032 
2033 static struct sas_domain_function_template hisi_sas_transport_ops = {
2034 	.lldd_dev_found		= hisi_sas_dev_found,
2035 	.lldd_dev_gone		= hisi_sas_dev_gone,
2036 	.lldd_execute_task	= hisi_sas_queue_command,
2037 	.lldd_control_phy	= hisi_sas_control_phy,
2038 	.lldd_abort_task	= hisi_sas_abort_task,
2039 	.lldd_abort_task_set	= hisi_sas_abort_task_set,
2040 	.lldd_clear_aca		= hisi_sas_clear_aca,
2041 	.lldd_I_T_nexus_reset	= hisi_sas_I_T_nexus_reset,
2042 	.lldd_lu_reset		= hisi_sas_lu_reset,
2043 	.lldd_query_task	= hisi_sas_query_task,
2044 	.lldd_clear_nexus_ha	= hisi_sas_clear_nexus_ha,
2045 	.lldd_port_formed	= hisi_sas_port_formed,
2046 	.lldd_write_gpio	= hisi_sas_write_gpio,
2047 };
2048 
2049 void hisi_sas_init_mem(struct hisi_hba *hisi_hba)
2050 {
2051 	int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
2052 
2053 	for (i = 0; i < hisi_hba->queue_count; i++) {
2054 		struct hisi_sas_cq *cq = &hisi_hba->cq[i];
2055 		struct hisi_sas_dq *dq = &hisi_hba->dq[i];
2056 
2057 		s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
2058 		memset(hisi_hba->cmd_hdr[i], 0, s);
2059 		dq->wr_point = 0;
2060 
2061 		s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
2062 		memset(hisi_hba->complete_hdr[i], 0, s);
2063 		cq->rd_point = 0;
2064 	}
2065 
2066 	s = sizeof(struct hisi_sas_initial_fis) * hisi_hba->n_phy;
2067 	memset(hisi_hba->initial_fis, 0, s);
2068 
2069 	s = max_command_entries * sizeof(struct hisi_sas_iost);
2070 	memset(hisi_hba->iost, 0, s);
2071 
2072 	s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
2073 	memset(hisi_hba->breakpoint, 0, s);
2074 
2075 	s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
2076 	memset(hisi_hba->sata_breakpoint, 0, s);
2077 }
2078 EXPORT_SYMBOL_GPL(hisi_sas_init_mem);
2079 
2080 int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
2081 {
2082 	struct device *dev = hisi_hba->dev;
2083 	int i, j, s, max_command_entries = hisi_hba->hw->max_command_entries;
2084 	int max_command_entries_ru, sz_slot_buf_ru;
2085 	int blk_cnt, slots_per_blk;
2086 
2087 	sema_init(&hisi_hba->sem, 1);
2088 	spin_lock_init(&hisi_hba->lock);
2089 	for (i = 0; i < hisi_hba->n_phy; i++) {
2090 		hisi_sas_phy_init(hisi_hba, i);
2091 		hisi_hba->port[i].port_attached = 0;
2092 		hisi_hba->port[i].id = -1;
2093 	}
2094 
2095 	for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
2096 		hisi_hba->devices[i].dev_type = SAS_PHY_UNUSED;
2097 		hisi_hba->devices[i].device_id = i;
2098 		hisi_hba->devices[i].dev_status = HISI_SAS_DEV_NORMAL;
2099 	}
2100 
2101 	for (i = 0; i < hisi_hba->queue_count; i++) {
2102 		struct hisi_sas_cq *cq = &hisi_hba->cq[i];
2103 		struct hisi_sas_dq *dq = &hisi_hba->dq[i];
2104 
2105 		/* Completion queue structure */
2106 		cq->id = i;
2107 		cq->hisi_hba = hisi_hba;
2108 
2109 		/* Delivery queue structure */
2110 		spin_lock_init(&dq->lock);
2111 		INIT_LIST_HEAD(&dq->list);
2112 		dq->id = i;
2113 		dq->hisi_hba = hisi_hba;
2114 
2115 		/* Delivery queue */
2116 		s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
2117 		hisi_hba->cmd_hdr[i] = dmam_alloc_coherent(dev, s,
2118 						&hisi_hba->cmd_hdr_dma[i],
2119 						GFP_KERNEL);
2120 		if (!hisi_hba->cmd_hdr[i])
2121 			goto err_out;
2122 
2123 		/* Completion queue */
2124 		s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
2125 		hisi_hba->complete_hdr[i] = dmam_alloc_coherent(dev, s,
2126 						&hisi_hba->complete_hdr_dma[i],
2127 						GFP_KERNEL);
2128 		if (!hisi_hba->complete_hdr[i])
2129 			goto err_out;
2130 	}
2131 
2132 	s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
2133 	hisi_hba->itct = dmam_alloc_coherent(dev, s, &hisi_hba->itct_dma,
2134 					     GFP_KERNEL);
2135 	if (!hisi_hba->itct)
2136 		goto err_out;
2137 	memset(hisi_hba->itct, 0, s);
2138 
2139 	hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries,
2140 					   sizeof(struct hisi_sas_slot),
2141 					   GFP_KERNEL);
2142 	if (!hisi_hba->slot_info)
2143 		goto err_out;
2144 
2145 	/* roundup to avoid overly large block size */
2146 	max_command_entries_ru = roundup(max_command_entries, 64);
2147 	sz_slot_buf_ru = roundup(sizeof(struct hisi_sas_slot_buf_table), 64);
2148 	s = lcm(max_command_entries_ru, sz_slot_buf_ru);
2149 	blk_cnt = (max_command_entries_ru * sz_slot_buf_ru) / s;
2150 	slots_per_blk = s / sz_slot_buf_ru;
2151 	for (i = 0; i < blk_cnt; i++) {
2152 		struct hisi_sas_slot_buf_table *buf;
2153 		dma_addr_t buf_dma;
2154 		int slot_index = i * slots_per_blk;
2155 
2156 		buf = dmam_alloc_coherent(dev, s, &buf_dma, GFP_KERNEL);
2157 		if (!buf)
2158 			goto err_out;
2159 		memset(buf, 0, s);
2160 
2161 		for (j = 0; j < slots_per_blk; j++, slot_index++) {
2162 			struct hisi_sas_slot *slot;
2163 
2164 			slot = &hisi_hba->slot_info[slot_index];
2165 			slot->buf = buf;
2166 			slot->buf_dma = buf_dma;
2167 			slot->idx = slot_index;
2168 
2169 			buf++;
2170 			buf_dma += sizeof(*buf);
2171 		}
2172 	}
2173 
2174 	s = max_command_entries * sizeof(struct hisi_sas_iost);
2175 	hisi_hba->iost = dmam_alloc_coherent(dev, s, &hisi_hba->iost_dma,
2176 					     GFP_KERNEL);
2177 	if (!hisi_hba->iost)
2178 		goto err_out;
2179 
2180 	s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
2181 	hisi_hba->breakpoint = dmam_alloc_coherent(dev, s,
2182 						   &hisi_hba->breakpoint_dma,
2183 						   GFP_KERNEL);
2184 	if (!hisi_hba->breakpoint)
2185 		goto err_out;
2186 
2187 	hisi_hba->slot_index_count = max_command_entries;
2188 	s = hisi_hba->slot_index_count / BITS_PER_BYTE;
2189 	hisi_hba->slot_index_tags = devm_kzalloc(dev, s, GFP_KERNEL);
2190 	if (!hisi_hba->slot_index_tags)
2191 		goto err_out;
2192 
2193 	s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
2194 	hisi_hba->initial_fis = dmam_alloc_coherent(dev, s,
2195 						    &hisi_hba->initial_fis_dma,
2196 						    GFP_KERNEL);
2197 	if (!hisi_hba->initial_fis)
2198 		goto err_out;
2199 
2200 	s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
2201 	hisi_hba->sata_breakpoint = dmam_alloc_coherent(dev, s,
2202 					&hisi_hba->sata_breakpoint_dma,
2203 					GFP_KERNEL);
2204 	if (!hisi_hba->sata_breakpoint)
2205 		goto err_out;
2206 	hisi_sas_init_mem(hisi_hba);
2207 
2208 	hisi_sas_slot_index_init(hisi_hba);
2209 	hisi_hba->last_slot_index = hisi_hba->hw->max_command_entries -
2210 		HISI_SAS_RESERVED_IPTT_CNT;
2211 
2212 	hisi_hba->wq = create_singlethread_workqueue(dev_name(dev));
2213 	if (!hisi_hba->wq) {
2214 		dev_err(dev, "sas_alloc: failed to create workqueue\n");
2215 		goto err_out;
2216 	}
2217 
2218 	return 0;
2219 err_out:
2220 	return -ENOMEM;
2221 }
2222 EXPORT_SYMBOL_GPL(hisi_sas_alloc);
2223 
2224 void hisi_sas_free(struct hisi_hba *hisi_hba)
2225 {
2226 	if (hisi_hba->wq)
2227 		destroy_workqueue(hisi_hba->wq);
2228 }
2229 EXPORT_SYMBOL_GPL(hisi_sas_free);
2230 
2231 void hisi_sas_rst_work_handler(struct work_struct *work)
2232 {
2233 	struct hisi_hba *hisi_hba =
2234 		container_of(work, struct hisi_hba, rst_work);
2235 
2236 	hisi_sas_controller_reset(hisi_hba);
2237 }
2238 EXPORT_SYMBOL_GPL(hisi_sas_rst_work_handler);
2239 
2240 void hisi_sas_sync_rst_work_handler(struct work_struct *work)
2241 {
2242 	struct hisi_sas_rst *rst =
2243 		container_of(work, struct hisi_sas_rst, work);
2244 
2245 	if (!hisi_sas_controller_reset(rst->hisi_hba))
2246 		rst->done = true;
2247 	complete(rst->completion);
2248 }
2249 EXPORT_SYMBOL_GPL(hisi_sas_sync_rst_work_handler);
2250 
2251 int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba)
2252 {
2253 	struct device *dev = hisi_hba->dev;
2254 	struct platform_device *pdev = hisi_hba->platform_dev;
2255 	struct device_node *np = pdev ? pdev->dev.of_node : NULL;
2256 	struct clk *refclk;
2257 
2258 	if (device_property_read_u8_array(dev, "sas-addr", hisi_hba->sas_addr,
2259 					  SAS_ADDR_SIZE)) {
2260 		dev_err(dev, "could not get property sas-addr\n");
2261 		return -ENOENT;
2262 	}
2263 
2264 	if (np) {
2265 		/*
2266 		 * These properties are only required for platform device-based
2267 		 * controller with DT firmware.
2268 		 */
2269 		hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(np,
2270 					"hisilicon,sas-syscon");
2271 		if (IS_ERR(hisi_hba->ctrl)) {
2272 			dev_err(dev, "could not get syscon\n");
2273 			return -ENOENT;
2274 		}
2275 
2276 		if (device_property_read_u32(dev, "ctrl-reset-reg",
2277 					     &hisi_hba->ctrl_reset_reg)) {
2278 			dev_err(dev,
2279 				"could not get property ctrl-reset-reg\n");
2280 			return -ENOENT;
2281 		}
2282 
2283 		if (device_property_read_u32(dev, "ctrl-reset-sts-reg",
2284 					     &hisi_hba->ctrl_reset_sts_reg)) {
2285 			dev_err(dev,
2286 				"could not get property ctrl-reset-sts-reg\n");
2287 			return -ENOENT;
2288 		}
2289 
2290 		if (device_property_read_u32(dev, "ctrl-clock-ena-reg",
2291 					     &hisi_hba->ctrl_clock_ena_reg)) {
2292 			dev_err(dev,
2293 				"could not get property ctrl-clock-ena-reg\n");
2294 			return -ENOENT;
2295 		}
2296 	}
2297 
2298 	refclk = devm_clk_get(dev, NULL);
2299 	if (IS_ERR(refclk))
2300 		dev_dbg(dev, "no ref clk property\n");
2301 	else
2302 		hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000;
2303 
2304 	if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy)) {
2305 		dev_err(dev, "could not get property phy-count\n");
2306 		return -ENOENT;
2307 	}
2308 
2309 	if (device_property_read_u32(dev, "queue-count",
2310 				     &hisi_hba->queue_count)) {
2311 		dev_err(dev, "could not get property queue-count\n");
2312 		return -ENOENT;
2313 	}
2314 
2315 	return 0;
2316 }
2317 EXPORT_SYMBOL_GPL(hisi_sas_get_fw_info);
2318 
2319 static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
2320 					      const struct hisi_sas_hw *hw)
2321 {
2322 	struct resource *res;
2323 	struct Scsi_Host *shost;
2324 	struct hisi_hba *hisi_hba;
2325 	struct device *dev = &pdev->dev;
2326 
2327 	shost = scsi_host_alloc(hw->sht, sizeof(*hisi_hba));
2328 	if (!shost) {
2329 		dev_err(dev, "scsi host alloc failed\n");
2330 		return NULL;
2331 	}
2332 	hisi_hba = shost_priv(shost);
2333 
2334 	INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler);
2335 	hisi_hba->hw = hw;
2336 	hisi_hba->dev = dev;
2337 	hisi_hba->platform_dev = pdev;
2338 	hisi_hba->shost = shost;
2339 	SHOST_TO_SAS_HA(shost) = &hisi_hba->sha;
2340 
2341 	timer_setup(&hisi_hba->timer, NULL, 0);
2342 
2343 	if (hisi_sas_get_fw_info(hisi_hba) < 0)
2344 		goto err_out;
2345 
2346 	if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) &&
2347 	    dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
2348 		dev_err(dev, "No usable DMA addressing method\n");
2349 		goto err_out;
2350 	}
2351 
2352 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2353 	hisi_hba->regs = devm_ioremap_resource(dev, res);
2354 	if (IS_ERR(hisi_hba->regs))
2355 		goto err_out;
2356 
2357 	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2358 	if (res) {
2359 		hisi_hba->sgpio_regs = devm_ioremap_resource(dev, res);
2360 		if (IS_ERR(hisi_hba->sgpio_regs))
2361 			goto err_out;
2362 	}
2363 
2364 	if (hisi_sas_alloc(hisi_hba, shost)) {
2365 		hisi_sas_free(hisi_hba);
2366 		goto err_out;
2367 	}
2368 
2369 	return shost;
2370 err_out:
2371 	scsi_host_put(shost);
2372 	dev_err(dev, "shost alloc failed\n");
2373 	return NULL;
2374 }
2375 
2376 int hisi_sas_probe(struct platform_device *pdev,
2377 		   const struct hisi_sas_hw *hw)
2378 {
2379 	struct Scsi_Host *shost;
2380 	struct hisi_hba *hisi_hba;
2381 	struct device *dev = &pdev->dev;
2382 	struct asd_sas_phy **arr_phy;
2383 	struct asd_sas_port **arr_port;
2384 	struct sas_ha_struct *sha;
2385 	int rc, phy_nr, port_nr, i;
2386 
2387 	shost = hisi_sas_shost_alloc(pdev, hw);
2388 	if (!shost)
2389 		return -ENOMEM;
2390 
2391 	sha = SHOST_TO_SAS_HA(shost);
2392 	hisi_hba = shost_priv(shost);
2393 	platform_set_drvdata(pdev, sha);
2394 
2395 	phy_nr = port_nr = hisi_hba->n_phy;
2396 
2397 	arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL);
2398 	arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL);
2399 	if (!arr_phy || !arr_port) {
2400 		rc = -ENOMEM;
2401 		goto err_out_ha;
2402 	}
2403 
2404 	sha->sas_phy = arr_phy;
2405 	sha->sas_port = arr_port;
2406 	sha->lldd_ha = hisi_hba;
2407 
2408 	shost->transportt = hisi_sas_stt;
2409 	shost->max_id = HISI_SAS_MAX_DEVICES;
2410 	shost->max_lun = ~0;
2411 	shost->max_channel = 1;
2412 	shost->max_cmd_len = 16;
2413 	if (hisi_hba->hw->slot_index_alloc) {
2414 		shost->can_queue = hisi_hba->hw->max_command_entries;
2415 		shost->cmd_per_lun = hisi_hba->hw->max_command_entries;
2416 	} else {
2417 		shost->can_queue = hisi_hba->hw->max_command_entries -
2418 			HISI_SAS_RESERVED_IPTT_CNT;
2419 		shost->cmd_per_lun = hisi_hba->hw->max_command_entries -
2420 			HISI_SAS_RESERVED_IPTT_CNT;
2421 	}
2422 
2423 	sha->sas_ha_name = DRV_NAME;
2424 	sha->dev = hisi_hba->dev;
2425 	sha->lldd_module = THIS_MODULE;
2426 	sha->sas_addr = &hisi_hba->sas_addr[0];
2427 	sha->num_phys = hisi_hba->n_phy;
2428 	sha->core.shost = hisi_hba->shost;
2429 
2430 	for (i = 0; i < hisi_hba->n_phy; i++) {
2431 		sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy;
2432 		sha->sas_port[i] = &hisi_hba->port[i].sas_port;
2433 	}
2434 
2435 	rc = scsi_add_host(shost, &pdev->dev);
2436 	if (rc)
2437 		goto err_out_ha;
2438 
2439 	rc = sas_register_ha(sha);
2440 	if (rc)
2441 		goto err_out_register_ha;
2442 
2443 	rc = hisi_hba->hw->hw_init(hisi_hba);
2444 	if (rc)
2445 		goto err_out_register_ha;
2446 
2447 	scsi_scan_host(shost);
2448 
2449 	return 0;
2450 
2451 err_out_register_ha:
2452 	scsi_remove_host(shost);
2453 err_out_ha:
2454 	hisi_sas_free(hisi_hba);
2455 	scsi_host_put(shost);
2456 	return rc;
2457 }
2458 EXPORT_SYMBOL_GPL(hisi_sas_probe);
2459 
2460 int hisi_sas_remove(struct platform_device *pdev)
2461 {
2462 	struct sas_ha_struct *sha = platform_get_drvdata(pdev);
2463 	struct hisi_hba *hisi_hba = sha->lldd_ha;
2464 	struct Scsi_Host *shost = sha->core.shost;
2465 
2466 	if (timer_pending(&hisi_hba->timer))
2467 		del_timer(&hisi_hba->timer);
2468 
2469 	sas_unregister_ha(sha);
2470 	sas_remove_host(sha->core.shost);
2471 
2472 	hisi_sas_free(hisi_hba);
2473 	scsi_host_put(shost);
2474 	return 0;
2475 }
2476 EXPORT_SYMBOL_GPL(hisi_sas_remove);
2477 
2478 static __init int hisi_sas_init(void)
2479 {
2480 	hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops);
2481 	if (!hisi_sas_stt)
2482 		return -ENOMEM;
2483 
2484 	return 0;
2485 }
2486 
2487 static __exit void hisi_sas_exit(void)
2488 {
2489 	sas_release_transport(hisi_sas_stt);
2490 }
2491 
2492 module_init(hisi_sas_init);
2493 module_exit(hisi_sas_exit);
2494 
2495 MODULE_LICENSE("GPL");
2496 MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
2497 MODULE_DESCRIPTION("HISILICON SAS controller driver");
2498 MODULE_ALIAS("platform:" DRV_NAME);
2499