1 /*
2  * Copyright (c) 2015 Linaro Ltd.
3  * Copyright (c) 2015 Hisilicon Limited.
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  */
11 
12 #include "hisi_sas.h"
13 #define DRV_NAME "hisi_sas"
14 
15 #define DEV_IS_GONE(dev) \
16 	((!dev) || (dev->dev_type == SAS_PHY_UNUSED))
17 
18 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
19 				u8 *lun, struct hisi_sas_tmf_task *tmf);
20 static int
21 hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
22 			     struct domain_device *device,
23 			     int abort_flag, int tag);
24 static int hisi_sas_softreset_ata_disk(struct domain_device *device);
25 
26 u8 hisi_sas_get_ata_protocol(u8 cmd, int direction)
27 {
28 	switch (cmd) {
29 	case ATA_CMD_FPDMA_WRITE:
30 	case ATA_CMD_FPDMA_READ:
31 	case ATA_CMD_FPDMA_RECV:
32 	case ATA_CMD_FPDMA_SEND:
33 	case ATA_CMD_NCQ_NON_DATA:
34 	return HISI_SAS_SATA_PROTOCOL_FPDMA;
35 
36 	case ATA_CMD_DOWNLOAD_MICRO:
37 	case ATA_CMD_ID_ATA:
38 	case ATA_CMD_PMP_READ:
39 	case ATA_CMD_READ_LOG_EXT:
40 	case ATA_CMD_PIO_READ:
41 	case ATA_CMD_PIO_READ_EXT:
42 	case ATA_CMD_PMP_WRITE:
43 	case ATA_CMD_WRITE_LOG_EXT:
44 	case ATA_CMD_PIO_WRITE:
45 	case ATA_CMD_PIO_WRITE_EXT:
46 	return HISI_SAS_SATA_PROTOCOL_PIO;
47 
48 	case ATA_CMD_DSM:
49 	case ATA_CMD_DOWNLOAD_MICRO_DMA:
50 	case ATA_CMD_PMP_READ_DMA:
51 	case ATA_CMD_PMP_WRITE_DMA:
52 	case ATA_CMD_READ:
53 	case ATA_CMD_READ_EXT:
54 	case ATA_CMD_READ_LOG_DMA_EXT:
55 	case ATA_CMD_READ_STREAM_DMA_EXT:
56 	case ATA_CMD_TRUSTED_RCV_DMA:
57 	case ATA_CMD_TRUSTED_SND_DMA:
58 	case ATA_CMD_WRITE:
59 	case ATA_CMD_WRITE_EXT:
60 	case ATA_CMD_WRITE_FUA_EXT:
61 	case ATA_CMD_WRITE_QUEUED:
62 	case ATA_CMD_WRITE_LOG_DMA_EXT:
63 	case ATA_CMD_WRITE_STREAM_DMA_EXT:
64 	case ATA_CMD_ZAC_MGMT_IN:
65 	return HISI_SAS_SATA_PROTOCOL_DMA;
66 
67 	case ATA_CMD_CHK_POWER:
68 	case ATA_CMD_DEV_RESET:
69 	case ATA_CMD_EDD:
70 	case ATA_CMD_FLUSH:
71 	case ATA_CMD_FLUSH_EXT:
72 	case ATA_CMD_VERIFY:
73 	case ATA_CMD_VERIFY_EXT:
74 	case ATA_CMD_SET_FEATURES:
75 	case ATA_CMD_STANDBY:
76 	case ATA_CMD_STANDBYNOW1:
77 	case ATA_CMD_ZAC_MGMT_OUT:
78 	return HISI_SAS_SATA_PROTOCOL_NONDATA;
79 	default:
80 		if (direction == DMA_NONE)
81 			return HISI_SAS_SATA_PROTOCOL_NONDATA;
82 		return HISI_SAS_SATA_PROTOCOL_PIO;
83 	}
84 }
85 EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol);
86 
87 void hisi_sas_sata_done(struct sas_task *task,
88 			    struct hisi_sas_slot *slot)
89 {
90 	struct task_status_struct *ts = &task->task_status;
91 	struct ata_task_resp *resp = (struct ata_task_resp *)ts->buf;
92 	struct hisi_sas_status_buffer *status_buf =
93 			hisi_sas_status_buf_addr_mem(slot);
94 	u8 *iu = &status_buf->iu[0];
95 	struct dev_to_host_fis *d2h =  (struct dev_to_host_fis *)iu;
96 
97 	resp->frame_len = sizeof(struct dev_to_host_fis);
98 	memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis));
99 
100 	ts->buf_valid_size = sizeof(*resp);
101 }
102 EXPORT_SYMBOL_GPL(hisi_sas_sata_done);
103 
104 int hisi_sas_get_ncq_tag(struct sas_task *task, u32 *tag)
105 {
106 	struct ata_queued_cmd *qc = task->uldd_task;
107 
108 	if (qc) {
109 		if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
110 			qc->tf.command == ATA_CMD_FPDMA_READ) {
111 			*tag = qc->tag;
112 			return 1;
113 		}
114 	}
115 	return 0;
116 }
117 EXPORT_SYMBOL_GPL(hisi_sas_get_ncq_tag);
118 
119 static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device)
120 {
121 	return device->port->ha->lldd_ha;
122 }
123 
124 struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port)
125 {
126 	return container_of(sas_port, struct hisi_sas_port, sas_port);
127 }
128 EXPORT_SYMBOL_GPL(to_hisi_sas_port);
129 
130 void hisi_sas_stop_phys(struct hisi_hba *hisi_hba)
131 {
132 	int phy_no;
133 
134 	for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++)
135 		hisi_hba->hw->phy_disable(hisi_hba, phy_no);
136 }
137 EXPORT_SYMBOL_GPL(hisi_sas_stop_phys);
138 
139 static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx)
140 {
141 	void *bitmap = hisi_hba->slot_index_tags;
142 
143 	clear_bit(slot_idx, bitmap);
144 }
145 
146 static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx)
147 {
148 	hisi_sas_slot_index_clear(hisi_hba, slot_idx);
149 }
150 
151 static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx)
152 {
153 	void *bitmap = hisi_hba->slot_index_tags;
154 
155 	set_bit(slot_idx, bitmap);
156 }
157 
158 static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba, int *slot_idx)
159 {
160 	unsigned int index;
161 	void *bitmap = hisi_hba->slot_index_tags;
162 
163 	index = find_first_zero_bit(bitmap, hisi_hba->slot_index_count);
164 	if (index >= hisi_hba->slot_index_count)
165 		return -SAS_QUEUE_FULL;
166 	hisi_sas_slot_index_set(hisi_hba, index);
167 	*slot_idx = index;
168 	return 0;
169 }
170 
171 static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba)
172 {
173 	int i;
174 
175 	for (i = 0; i < hisi_hba->slot_index_count; ++i)
176 		hisi_sas_slot_index_clear(hisi_hba, i);
177 }
178 
179 void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
180 			     struct hisi_sas_slot *slot)
181 {
182 
183 	if (task) {
184 		struct device *dev = hisi_hba->dev;
185 		struct domain_device *device = task->dev;
186 		struct hisi_sas_device *sas_dev = device->lldd_dev;
187 
188 		if (!sas_protocol_ata(task->task_proto))
189 			if (slot->n_elem)
190 				dma_unmap_sg(dev, task->scatter, slot->n_elem,
191 					     task->data_dir);
192 
193 		task->lldd_task = NULL;
194 
195 		if (sas_dev)
196 			atomic64_dec(&sas_dev->running_req);
197 	}
198 
199 	if (slot->buf)
200 		dma_pool_free(hisi_hba->buffer_pool, slot->buf, slot->buf_dma);
201 
202 
203 	list_del_init(&slot->entry);
204 	slot->task = NULL;
205 	slot->port = NULL;
206 	hisi_sas_slot_index_free(hisi_hba, slot->idx);
207 
208 	/* slot memory is fully zeroed when it is reused */
209 }
210 EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free);
211 
212 static int hisi_sas_task_prep_smp(struct hisi_hba *hisi_hba,
213 				  struct hisi_sas_slot *slot)
214 {
215 	return hisi_hba->hw->prep_smp(hisi_hba, slot);
216 }
217 
218 static int hisi_sas_task_prep_ssp(struct hisi_hba *hisi_hba,
219 				  struct hisi_sas_slot *slot, int is_tmf,
220 				  struct hisi_sas_tmf_task *tmf)
221 {
222 	return hisi_hba->hw->prep_ssp(hisi_hba, slot, is_tmf, tmf);
223 }
224 
225 static int hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba,
226 				  struct hisi_sas_slot *slot)
227 {
228 	return hisi_hba->hw->prep_stp(hisi_hba, slot);
229 }
230 
231 static int hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba,
232 		struct hisi_sas_slot *slot,
233 		int device_id, int abort_flag, int tag_to_abort)
234 {
235 	return hisi_hba->hw->prep_abort(hisi_hba, slot,
236 			device_id, abort_flag, tag_to_abort);
237 }
238 
239 /*
240  * This function will issue an abort TMF regardless of whether the
241  * task is in the sdev or not. Then it will do the task complete
242  * cleanup and callbacks.
243  */
244 static void hisi_sas_slot_abort(struct work_struct *work)
245 {
246 	struct hisi_sas_slot *abort_slot =
247 		container_of(work, struct hisi_sas_slot, abort_slot);
248 	struct sas_task *task = abort_slot->task;
249 	struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
250 	struct scsi_cmnd *cmnd = task->uldd_task;
251 	struct hisi_sas_tmf_task tmf_task;
252 	struct scsi_lun lun;
253 	struct device *dev = hisi_hba->dev;
254 	int tag = abort_slot->idx;
255 	unsigned long flags;
256 
257 	if (!(task->task_proto & SAS_PROTOCOL_SSP)) {
258 		dev_err(dev, "cannot abort slot for non-ssp task\n");
259 		goto out;
260 	}
261 
262 	int_to_scsilun(cmnd->device->lun, &lun);
263 	tmf_task.tmf = TMF_ABORT_TASK;
264 	tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
265 
266 	hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun, &tmf_task);
267 out:
268 	/* Do cleanup for this task */
269 	spin_lock_irqsave(&hisi_hba->lock, flags);
270 	hisi_sas_slot_task_free(hisi_hba, task, abort_slot);
271 	spin_unlock_irqrestore(&hisi_hba->lock, flags);
272 	if (task->task_done)
273 		task->task_done(task);
274 }
275 
276 static int hisi_sas_task_prep(struct sas_task *task, struct hisi_sas_dq
277 		*dq, int is_tmf, struct hisi_sas_tmf_task *tmf,
278 		int *pass)
279 {
280 	struct hisi_hba *hisi_hba = dq->hisi_hba;
281 	struct domain_device *device = task->dev;
282 	struct hisi_sas_device *sas_dev = device->lldd_dev;
283 	struct hisi_sas_port *port;
284 	struct hisi_sas_slot *slot;
285 	struct hisi_sas_cmd_hdr	*cmd_hdr_base;
286 	struct asd_sas_port *sas_port = device->port;
287 	struct device *dev = hisi_hba->dev;
288 	int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
289 	unsigned long flags;
290 
291 	if (!sas_port) {
292 		struct task_status_struct *ts = &task->task_status;
293 
294 		ts->resp = SAS_TASK_UNDELIVERED;
295 		ts->stat = SAS_PHY_DOWN;
296 		/*
297 		 * libsas will use dev->port, should
298 		 * not call task_done for sata
299 		 */
300 		if (device->dev_type != SAS_SATA_DEV)
301 			task->task_done(task);
302 		return SAS_PHY_DOWN;
303 	}
304 
305 	if (DEV_IS_GONE(sas_dev)) {
306 		if (sas_dev)
307 			dev_info(dev, "task prep: device %d not ready\n",
308 				 sas_dev->device_id);
309 		else
310 			dev_info(dev, "task prep: device %016llx not ready\n",
311 				 SAS_ADDR(device->sas_addr));
312 
313 		return SAS_PHY_DOWN;
314 	}
315 
316 	port = to_hisi_sas_port(sas_port);
317 	if (port && !port->port_attached) {
318 		dev_info(dev, "task prep: %s port%d not attach device\n",
319 			 (dev_is_sata(device)) ?
320 			 "SATA/STP" : "SAS",
321 			 device->port->id);
322 
323 		return SAS_PHY_DOWN;
324 	}
325 
326 	if (!sas_protocol_ata(task->task_proto)) {
327 		if (task->num_scatter) {
328 			n_elem = dma_map_sg(dev, task->scatter,
329 					    task->num_scatter, task->data_dir);
330 			if (!n_elem) {
331 				rc = -ENOMEM;
332 				goto prep_out;
333 			}
334 		}
335 	} else
336 		n_elem = task->num_scatter;
337 
338 	spin_lock_irqsave(&hisi_hba->lock, flags);
339 	if (hisi_hba->hw->slot_index_alloc)
340 		rc = hisi_hba->hw->slot_index_alloc(hisi_hba, &slot_idx,
341 						    device);
342 	else
343 		rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
344 	if (rc) {
345 		spin_unlock_irqrestore(&hisi_hba->lock, flags);
346 		goto err_out;
347 	}
348 	spin_unlock_irqrestore(&hisi_hba->lock, flags);
349 
350 	rc = hisi_hba->hw->get_free_slot(hisi_hba, dq);
351 	if (rc)
352 		goto err_out_tag;
353 
354 	dlvry_queue = dq->id;
355 	dlvry_queue_slot = dq->wr_point;
356 	slot = &hisi_hba->slot_info[slot_idx];
357 	memset(slot, 0, sizeof(struct hisi_sas_slot));
358 
359 	slot->idx = slot_idx;
360 	slot->n_elem = n_elem;
361 	slot->dlvry_queue = dlvry_queue;
362 	slot->dlvry_queue_slot = dlvry_queue_slot;
363 	cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
364 	slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
365 	slot->task = task;
366 	slot->port = port;
367 	task->lldd_task = slot;
368 	INIT_WORK(&slot->abort_slot, hisi_sas_slot_abort);
369 
370 	slot->buf = dma_pool_alloc(hisi_hba->buffer_pool,
371 				   GFP_ATOMIC, &slot->buf_dma);
372 	if (!slot->buf) {
373 		rc = -ENOMEM;
374 		goto err_out_slot_buf;
375 	}
376 	memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
377 	memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
378 	memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ);
379 
380 	switch (task->task_proto) {
381 	case SAS_PROTOCOL_SMP:
382 		rc = hisi_sas_task_prep_smp(hisi_hba, slot);
383 		break;
384 	case SAS_PROTOCOL_SSP:
385 		rc = hisi_sas_task_prep_ssp(hisi_hba, slot, is_tmf, tmf);
386 		break;
387 	case SAS_PROTOCOL_SATA:
388 	case SAS_PROTOCOL_STP:
389 	case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
390 		rc = hisi_sas_task_prep_ata(hisi_hba, slot);
391 		break;
392 	default:
393 		dev_err(dev, "task prep: unknown/unsupported proto (0x%x)\n",
394 			task->task_proto);
395 		rc = -EINVAL;
396 		break;
397 	}
398 
399 	if (rc) {
400 		dev_err(dev, "task prep: rc = 0x%x\n", rc);
401 		goto err_out_buf;
402 	}
403 
404 	list_add_tail(&slot->entry, &sas_dev->list);
405 	spin_lock_irqsave(&task->task_state_lock, flags);
406 	task->task_state_flags |= SAS_TASK_AT_INITIATOR;
407 	spin_unlock_irqrestore(&task->task_state_lock, flags);
408 
409 	dq->slot_prep = slot;
410 
411 	atomic64_inc(&sas_dev->running_req);
412 	++(*pass);
413 
414 	return 0;
415 
416 err_out_buf:
417 	dma_pool_free(hisi_hba->buffer_pool, slot->buf,
418 		slot->buf_dma);
419 err_out_slot_buf:
420 	/* Nothing to be done */
421 err_out_tag:
422 	spin_lock_irqsave(&hisi_hba->lock, flags);
423 	hisi_sas_slot_index_free(hisi_hba, slot_idx);
424 	spin_unlock_irqrestore(&hisi_hba->lock, flags);
425 err_out:
426 	dev_err(dev, "task prep: failed[%d]!\n", rc);
427 	if (!sas_protocol_ata(task->task_proto))
428 		if (n_elem)
429 			dma_unmap_sg(dev, task->scatter, n_elem,
430 				     task->data_dir);
431 prep_out:
432 	return rc;
433 }
434 
435 static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
436 			      int is_tmf, struct hisi_sas_tmf_task *tmf)
437 {
438 	u32 rc;
439 	u32 pass = 0;
440 	unsigned long flags;
441 	struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
442 	struct device *dev = hisi_hba->dev;
443 	struct domain_device *device = task->dev;
444 	struct hisi_sas_device *sas_dev = device->lldd_dev;
445 	struct hisi_sas_dq *dq = sas_dev->dq;
446 
447 	if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
448 		return -EINVAL;
449 
450 	/* protect task_prep and start_delivery sequence */
451 	spin_lock_irqsave(&dq->lock, flags);
452 	rc = hisi_sas_task_prep(task, dq, is_tmf, tmf, &pass);
453 	if (rc)
454 		dev_err(dev, "task exec: failed[%d]!\n", rc);
455 
456 	if (likely(pass))
457 		hisi_hba->hw->start_delivery(dq);
458 	spin_unlock_irqrestore(&dq->lock, flags);
459 
460 	return rc;
461 }
462 
463 static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no)
464 {
465 	struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
466 	struct asd_sas_phy *sas_phy = &phy->sas_phy;
467 	struct sas_ha_struct *sas_ha;
468 
469 	if (!phy->phy_attached)
470 		return;
471 
472 	sas_ha = &hisi_hba->sha;
473 	sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE);
474 
475 	if (sas_phy->phy) {
476 		struct sas_phy *sphy = sas_phy->phy;
477 
478 		sphy->negotiated_linkrate = sas_phy->linkrate;
479 		sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
480 		sphy->maximum_linkrate_hw =
481 			hisi_hba->hw->phy_get_max_linkrate();
482 		if (sphy->minimum_linkrate == SAS_LINK_RATE_UNKNOWN)
483 			sphy->minimum_linkrate = phy->minimum_linkrate;
484 
485 		if (sphy->maximum_linkrate == SAS_LINK_RATE_UNKNOWN)
486 			sphy->maximum_linkrate = phy->maximum_linkrate;
487 	}
488 
489 	if (phy->phy_type & PORT_TYPE_SAS) {
490 		struct sas_identify_frame *id;
491 
492 		id = (struct sas_identify_frame *)phy->frame_rcvd;
493 		id->dev_type = phy->identify.device_type;
494 		id->initiator_bits = SAS_PROTOCOL_ALL;
495 		id->target_bits = phy->identify.target_port_protocols;
496 	} else if (phy->phy_type & PORT_TYPE_SATA) {
497 		/*Nothing*/
498 	}
499 
500 	sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
501 	sas_ha->notify_port_event(sas_phy, PORTE_BYTES_DMAED);
502 }
503 
504 static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device)
505 {
506 	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
507 	struct hisi_sas_device *sas_dev = NULL;
508 	int i;
509 
510 	spin_lock(&hisi_hba->lock);
511 	for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
512 		if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) {
513 			int queue = i % hisi_hba->queue_count;
514 			struct hisi_sas_dq *dq = &hisi_hba->dq[queue];
515 
516 			hisi_hba->devices[i].device_id = i;
517 			sas_dev = &hisi_hba->devices[i];
518 			sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
519 			sas_dev->dev_type = device->dev_type;
520 			sas_dev->hisi_hba = hisi_hba;
521 			sas_dev->sas_device = device;
522 			sas_dev->dq = dq;
523 			INIT_LIST_HEAD(&hisi_hba->devices[i].list);
524 			break;
525 		}
526 	}
527 	spin_unlock(&hisi_hba->lock);
528 
529 	return sas_dev;
530 }
531 
532 static int hisi_sas_dev_found(struct domain_device *device)
533 {
534 	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
535 	struct domain_device *parent_dev = device->parent;
536 	struct hisi_sas_device *sas_dev;
537 	struct device *dev = hisi_hba->dev;
538 
539 	if (hisi_hba->hw->alloc_dev)
540 		sas_dev = hisi_hba->hw->alloc_dev(device);
541 	else
542 		sas_dev = hisi_sas_alloc_dev(device);
543 	if (!sas_dev) {
544 		dev_err(dev, "fail alloc dev: max support %d devices\n",
545 			HISI_SAS_MAX_DEVICES);
546 		return -EINVAL;
547 	}
548 
549 	device->lldd_dev = sas_dev;
550 	hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
551 
552 	if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) {
553 		int phy_no;
554 		u8 phy_num = parent_dev->ex_dev.num_phys;
555 		struct ex_phy *phy;
556 
557 		for (phy_no = 0; phy_no < phy_num; phy_no++) {
558 			phy = &parent_dev->ex_dev.ex_phy[phy_no];
559 			if (SAS_ADDR(phy->attached_sas_addr) ==
560 				SAS_ADDR(device->sas_addr)) {
561 				sas_dev->attached_phy = phy_no;
562 				break;
563 			}
564 		}
565 
566 		if (phy_no == phy_num) {
567 			dev_info(dev, "dev found: no attached "
568 				 "dev:%016llx at ex:%016llx\n",
569 				 SAS_ADDR(device->sas_addr),
570 				 SAS_ADDR(parent_dev->sas_addr));
571 			return -EINVAL;
572 		}
573 	}
574 
575 	return 0;
576 }
577 
578 static int hisi_sas_slave_configure(struct scsi_device *sdev)
579 {
580 	struct domain_device *dev = sdev_to_domain_dev(sdev);
581 	int ret = sas_slave_configure(sdev);
582 
583 	if (ret)
584 		return ret;
585 	if (!dev_is_sata(dev))
586 		sas_change_queue_depth(sdev, 64);
587 
588 	return 0;
589 }
590 
591 static void hisi_sas_scan_start(struct Scsi_Host *shost)
592 {
593 	struct hisi_hba *hisi_hba = shost_priv(shost);
594 
595 	hisi_hba->hw->phys_init(hisi_hba);
596 }
597 
598 static int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time)
599 {
600 	struct hisi_hba *hisi_hba = shost_priv(shost);
601 	struct sas_ha_struct *sha = &hisi_hba->sha;
602 
603 	/* Wait for PHY up interrupt to occur */
604 	if (time < HZ)
605 		return 0;
606 
607 	sas_drain_work(sha);
608 	return 1;
609 }
610 
611 static void hisi_sas_phyup_work(struct work_struct *work)
612 {
613 	struct hisi_sas_phy *phy =
614 		container_of(work, struct hisi_sas_phy, phyup_ws);
615 	struct hisi_hba *hisi_hba = phy->hisi_hba;
616 	struct asd_sas_phy *sas_phy = &phy->sas_phy;
617 	int phy_no = sas_phy->id;
618 
619 	hisi_hba->hw->sl_notify(hisi_hba, phy_no); /* This requires a sleep */
620 	hisi_sas_bytes_dmaed(hisi_hba, phy_no);
621 }
622 
623 static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no)
624 {
625 	struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
626 	struct asd_sas_phy *sas_phy = &phy->sas_phy;
627 
628 	phy->hisi_hba = hisi_hba;
629 	phy->port = NULL;
630 	init_timer(&phy->timer);
631 	sas_phy->enabled = (phy_no < hisi_hba->n_phy) ? 1 : 0;
632 	sas_phy->class = SAS;
633 	sas_phy->iproto = SAS_PROTOCOL_ALL;
634 	sas_phy->tproto = 0;
635 	sas_phy->type = PHY_TYPE_PHYSICAL;
636 	sas_phy->role = PHY_ROLE_INITIATOR;
637 	sas_phy->oob_mode = OOB_NOT_CONNECTED;
638 	sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
639 	sas_phy->id = phy_no;
640 	sas_phy->sas_addr = &hisi_hba->sas_addr[0];
641 	sas_phy->frame_rcvd = &phy->frame_rcvd[0];
642 	sas_phy->ha = (struct sas_ha_struct *)hisi_hba->shost->hostdata;
643 	sas_phy->lldd_phy = phy;
644 
645 	INIT_WORK(&phy->phyup_ws, hisi_sas_phyup_work);
646 }
647 
648 static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
649 {
650 	struct sas_ha_struct *sas_ha = sas_phy->ha;
651 	struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
652 	struct hisi_sas_phy *phy = sas_phy->lldd_phy;
653 	struct asd_sas_port *sas_port = sas_phy->port;
654 	struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
655 	unsigned long flags;
656 
657 	if (!sas_port)
658 		return;
659 
660 	spin_lock_irqsave(&hisi_hba->lock, flags);
661 	port->port_attached = 1;
662 	port->id = phy->port_id;
663 	phy->port = port;
664 	sas_port->lldd_port = port;
665 	spin_unlock_irqrestore(&hisi_hba->lock, flags);
666 }
667 
668 static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task *task,
669 				     struct hisi_sas_slot *slot)
670 {
671 	if (task) {
672 		unsigned long flags;
673 		struct task_status_struct *ts;
674 
675 		ts = &task->task_status;
676 
677 		ts->resp = SAS_TASK_COMPLETE;
678 		ts->stat = SAS_ABORTED_TASK;
679 		spin_lock_irqsave(&task->task_state_lock, flags);
680 		task->task_state_flags &=
681 			~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
682 		task->task_state_flags |= SAS_TASK_STATE_DONE;
683 		spin_unlock_irqrestore(&task->task_state_lock, flags);
684 	}
685 
686 	hisi_sas_slot_task_free(hisi_hba, task, slot);
687 }
688 
689 /* hisi_hba.lock should be locked */
690 static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
691 			struct domain_device *device)
692 {
693 	struct hisi_sas_slot *slot, *slot2;
694 	struct hisi_sas_device *sas_dev = device->lldd_dev;
695 
696 	list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry)
697 		hisi_sas_do_release_task(hisi_hba, slot->task, slot);
698 }
699 
700 static void hisi_sas_release_tasks(struct hisi_hba *hisi_hba)
701 {
702 	struct hisi_sas_device *sas_dev;
703 	struct domain_device *device;
704 	int i;
705 
706 	for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
707 		sas_dev = &hisi_hba->devices[i];
708 		device = sas_dev->sas_device;
709 
710 		if ((sas_dev->dev_type == SAS_PHY_UNUSED) ||
711 		    !device)
712 			continue;
713 
714 		hisi_sas_release_task(hisi_hba, device);
715 	}
716 }
717 
718 static void hisi_sas_dereg_device(struct hisi_hba *hisi_hba,
719 				struct domain_device *device)
720 {
721 	if (hisi_hba->hw->dereg_device)
722 		hisi_hba->hw->dereg_device(hisi_hba, device);
723 }
724 
725 static void hisi_sas_dev_gone(struct domain_device *device)
726 {
727 	struct hisi_sas_device *sas_dev = device->lldd_dev;
728 	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
729 	struct device *dev = hisi_hba->dev;
730 
731 	dev_info(dev, "found dev[%d:%x] is gone\n",
732 		 sas_dev->device_id, sas_dev->dev_type);
733 
734 	hisi_sas_internal_task_abort(hisi_hba, device,
735 				     HISI_SAS_INT_ABT_DEV, 0);
736 
737 	hisi_sas_dereg_device(hisi_hba, device);
738 
739 	hisi_hba->hw->free_device(hisi_hba, sas_dev);
740 	device->lldd_dev = NULL;
741 	memset(sas_dev, 0, sizeof(*sas_dev));
742 	sas_dev->dev_type = SAS_PHY_UNUSED;
743 }
744 
745 static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags)
746 {
747 	return hisi_sas_task_exec(task, gfp_flags, 0, NULL);
748 }
749 
750 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
751 				void *funcdata)
752 {
753 	struct sas_ha_struct *sas_ha = sas_phy->ha;
754 	struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
755 	int phy_no = sas_phy->id;
756 
757 	switch (func) {
758 	case PHY_FUNC_HARD_RESET:
759 		hisi_hba->hw->phy_hard_reset(hisi_hba, phy_no);
760 		break;
761 
762 	case PHY_FUNC_LINK_RESET:
763 		hisi_hba->hw->phy_disable(hisi_hba, phy_no);
764 		msleep(100);
765 		hisi_hba->hw->phy_enable(hisi_hba, phy_no);
766 		break;
767 
768 	case PHY_FUNC_DISABLE:
769 		hisi_hba->hw->phy_disable(hisi_hba, phy_no);
770 		break;
771 
772 	case PHY_FUNC_SET_LINK_RATE:
773 		hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, funcdata);
774 		break;
775 	case PHY_FUNC_GET_EVENTS:
776 		if (hisi_hba->hw->get_events) {
777 			hisi_hba->hw->get_events(hisi_hba, phy_no);
778 			break;
779 		}
780 		/* fallthru */
781 	case PHY_FUNC_RELEASE_SPINUP_HOLD:
782 	default:
783 		return -EOPNOTSUPP;
784 	}
785 	return 0;
786 }
787 
788 static void hisi_sas_task_done(struct sas_task *task)
789 {
790 	if (!del_timer(&task->slow_task->timer))
791 		return;
792 	complete(&task->slow_task->completion);
793 }
794 
795 static void hisi_sas_tmf_timedout(unsigned long data)
796 {
797 	struct sas_task *task = (struct sas_task *)data;
798 	unsigned long flags;
799 
800 	spin_lock_irqsave(&task->task_state_lock, flags);
801 	if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
802 		task->task_state_flags |= SAS_TASK_STATE_ABORTED;
803 	spin_unlock_irqrestore(&task->task_state_lock, flags);
804 
805 	complete(&task->slow_task->completion);
806 }
807 
808 #define TASK_TIMEOUT 20
809 #define TASK_RETRY 3
810 static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
811 					   void *parameter, u32 para_len,
812 					   struct hisi_sas_tmf_task *tmf)
813 {
814 	struct hisi_sas_device *sas_dev = device->lldd_dev;
815 	struct hisi_hba *hisi_hba = sas_dev->hisi_hba;
816 	struct device *dev = hisi_hba->dev;
817 	struct sas_task *task;
818 	int res, retry;
819 
820 	for (retry = 0; retry < TASK_RETRY; retry++) {
821 		task = sas_alloc_slow_task(GFP_KERNEL);
822 		if (!task)
823 			return -ENOMEM;
824 
825 		task->dev = device;
826 		task->task_proto = device->tproto;
827 
828 		if (dev_is_sata(device)) {
829 			task->ata_task.device_control_reg_update = 1;
830 			memcpy(&task->ata_task.fis, parameter, para_len);
831 		} else {
832 			memcpy(&task->ssp_task, parameter, para_len);
833 		}
834 		task->task_done = hisi_sas_task_done;
835 
836 		task->slow_task->timer.data = (unsigned long) task;
837 		task->slow_task->timer.function = hisi_sas_tmf_timedout;
838 		task->slow_task->timer.expires = jiffies + TASK_TIMEOUT*HZ;
839 		add_timer(&task->slow_task->timer);
840 
841 		res = hisi_sas_task_exec(task, GFP_KERNEL, 1, tmf);
842 
843 		if (res) {
844 			del_timer(&task->slow_task->timer);
845 			dev_err(dev, "abort tmf: executing internal task failed: %d\n",
846 				res);
847 			goto ex_err;
848 		}
849 
850 		wait_for_completion(&task->slow_task->completion);
851 		res = TMF_RESP_FUNC_FAILED;
852 		/* Even TMF timed out, return direct. */
853 		if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
854 			if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
855 				struct hisi_sas_slot *slot = task->lldd_task;
856 
857 				dev_err(dev, "abort tmf: TMF task timeout\n");
858 				if (slot)
859 					slot->task = NULL;
860 
861 				goto ex_err;
862 			}
863 		}
864 
865 		if (task->task_status.resp == SAS_TASK_COMPLETE &&
866 		     task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
867 			res = TMF_RESP_FUNC_COMPLETE;
868 			break;
869 		}
870 
871 		if (task->task_status.resp == SAS_TASK_COMPLETE &&
872 			task->task_status.stat == TMF_RESP_FUNC_SUCC) {
873 			res = TMF_RESP_FUNC_SUCC;
874 			break;
875 		}
876 
877 		if (task->task_status.resp == SAS_TASK_COMPLETE &&
878 		      task->task_status.stat == SAS_DATA_UNDERRUN) {
879 			/* no error, but return the number of bytes of
880 			 * underrun
881 			 */
882 			dev_warn(dev, "abort tmf: task to dev %016llx "
883 				 "resp: 0x%x sts 0x%x underrun\n",
884 				 SAS_ADDR(device->sas_addr),
885 				 task->task_status.resp,
886 				 task->task_status.stat);
887 			res = task->task_status.residual;
888 			break;
889 		}
890 
891 		if (task->task_status.resp == SAS_TASK_COMPLETE &&
892 			task->task_status.stat == SAS_DATA_OVERRUN) {
893 			dev_warn(dev, "abort tmf: blocked task error\n");
894 			res = -EMSGSIZE;
895 			break;
896 		}
897 
898 		dev_warn(dev, "abort tmf: task to dev "
899 			 "%016llx resp: 0x%x status 0x%x\n",
900 			 SAS_ADDR(device->sas_addr), task->task_status.resp,
901 			 task->task_status.stat);
902 		sas_free_task(task);
903 		task = NULL;
904 	}
905 ex_err:
906 	if (retry == TASK_RETRY)
907 		dev_warn(dev, "abort tmf: executing internal task failed!\n");
908 	sas_free_task(task);
909 	return res;
910 }
911 
912 static void hisi_sas_fill_ata_reset_cmd(struct ata_device *dev,
913 		bool reset, int pmp, u8 *fis)
914 {
915 	struct ata_taskfile tf;
916 
917 	ata_tf_init(dev, &tf);
918 	if (reset)
919 		tf.ctl |= ATA_SRST;
920 	else
921 		tf.ctl &= ~ATA_SRST;
922 	tf.command = ATA_CMD_DEV_RESET;
923 	ata_tf_to_fis(&tf, pmp, 0, fis);
924 }
925 
926 static int hisi_sas_softreset_ata_disk(struct domain_device *device)
927 {
928 	u8 fis[20] = {0};
929 	struct ata_port *ap = device->sata_dev.ap;
930 	struct ata_link *link;
931 	int rc = TMF_RESP_FUNC_FAILED;
932 	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
933 	struct device *dev = hisi_hba->dev;
934 	int s = sizeof(struct host_to_dev_fis);
935 	unsigned long flags;
936 
937 	ata_for_each_link(link, ap, EDGE) {
938 		int pmp = sata_srst_pmp(link);
939 
940 		hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis);
941 		rc = hisi_sas_exec_internal_tmf_task(device, fis, s, NULL);
942 		if (rc != TMF_RESP_FUNC_COMPLETE)
943 			break;
944 	}
945 
946 	if (rc == TMF_RESP_FUNC_COMPLETE) {
947 		ata_for_each_link(link, ap, EDGE) {
948 			int pmp = sata_srst_pmp(link);
949 
950 			hisi_sas_fill_ata_reset_cmd(link->device, 0, pmp, fis);
951 			rc = hisi_sas_exec_internal_tmf_task(device, fis,
952 							     s, NULL);
953 			if (rc != TMF_RESP_FUNC_COMPLETE)
954 				dev_err(dev, "ata disk de-reset failed\n");
955 		}
956 	} else {
957 		dev_err(dev, "ata disk reset failed\n");
958 	}
959 
960 	if (rc == TMF_RESP_FUNC_COMPLETE) {
961 		spin_lock_irqsave(&hisi_hba->lock, flags);
962 		hisi_sas_release_task(hisi_hba, device);
963 		spin_unlock_irqrestore(&hisi_hba->lock, flags);
964 	}
965 
966 	return rc;
967 }
968 
969 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
970 				u8 *lun, struct hisi_sas_tmf_task *tmf)
971 {
972 	struct sas_ssp_task ssp_task;
973 
974 	if (!(device->tproto & SAS_PROTOCOL_SSP))
975 		return TMF_RESP_FUNC_ESUPP;
976 
977 	memcpy(ssp_task.LUN, lun, 8);
978 
979 	return hisi_sas_exec_internal_tmf_task(device, &ssp_task,
980 				sizeof(ssp_task), tmf);
981 }
982 
983 static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba,
984 		struct asd_sas_port *sas_port, enum sas_linkrate linkrate)
985 {
986 	struct hisi_sas_device	*sas_dev;
987 	struct domain_device *device;
988 	int i;
989 
990 	for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
991 		sas_dev = &hisi_hba->devices[i];
992 		device = sas_dev->sas_device;
993 		if ((sas_dev->dev_type == SAS_PHY_UNUSED)
994 				|| !device || (device->port != sas_port))
995 			continue;
996 
997 		hisi_hba->hw->free_device(hisi_hba, sas_dev);
998 
999 		/* Update linkrate of directly attached device. */
1000 		if (!device->parent)
1001 			device->linkrate = linkrate;
1002 
1003 		hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
1004 	}
1005 }
1006 
1007 static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state,
1008 			      u32 state)
1009 {
1010 	struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1011 	struct asd_sas_port *_sas_port = NULL;
1012 	int phy_no;
1013 
1014 	for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
1015 		struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1016 		struct asd_sas_phy *sas_phy = &phy->sas_phy;
1017 		struct asd_sas_port *sas_port = sas_phy->port;
1018 		struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
1019 		bool do_port_check = !!(_sas_port != sas_port);
1020 
1021 		if (!sas_phy->phy->enabled)
1022 			continue;
1023 
1024 		/* Report PHY state change to libsas */
1025 		if (state & (1 << phy_no)) {
1026 			if (do_port_check && sas_port) {
1027 				struct domain_device *dev = sas_port->port_dev;
1028 
1029 				_sas_port = sas_port;
1030 				port->id = phy->port_id;
1031 				hisi_sas_refresh_port_id(hisi_hba,
1032 						sas_port, sas_phy->linkrate);
1033 
1034 				if (DEV_IS_EXPANDER(dev->dev_type))
1035 					sas_ha->notify_port_event(sas_phy,
1036 							PORTE_BROADCAST_RCVD);
1037 			}
1038 		} else if (old_state & (1 << phy_no))
1039 			/* PHY down but was up before */
1040 			hisi_sas_phy_down(hisi_hba, phy_no, 0);
1041 
1042 	}
1043 
1044 	drain_workqueue(hisi_hba->shost->work_q);
1045 }
1046 
1047 static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
1048 {
1049 	struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1050 	struct device *dev = hisi_hba->dev;
1051 	struct Scsi_Host *shost = hisi_hba->shost;
1052 	u32 old_state, state;
1053 	unsigned long flags;
1054 	int rc;
1055 
1056 	if (!hisi_hba->hw->soft_reset)
1057 		return -1;
1058 
1059 	if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
1060 		return -1;
1061 
1062 	dev_dbg(dev, "controller resetting...\n");
1063 	old_state = hisi_hba->hw->get_phys_state(hisi_hba);
1064 
1065 	scsi_block_requests(shost);
1066 	set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1067 	rc = hisi_hba->hw->soft_reset(hisi_hba);
1068 	if (rc) {
1069 		dev_warn(dev, "controller reset failed (%d)\n", rc);
1070 		clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1071 		goto out;
1072 	}
1073 	spin_lock_irqsave(&hisi_hba->lock, flags);
1074 	hisi_sas_release_tasks(hisi_hba);
1075 	spin_unlock_irqrestore(&hisi_hba->lock, flags);
1076 
1077 	sas_ha->notify_ha_event(sas_ha, HAE_RESET);
1078 	clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1079 
1080 	/* Init and wait for PHYs to come up and all libsas event finished. */
1081 	hisi_hba->hw->phys_init(hisi_hba);
1082 	msleep(1000);
1083 	drain_workqueue(hisi_hba->wq);
1084 	drain_workqueue(shost->work_q);
1085 
1086 	state = hisi_hba->hw->get_phys_state(hisi_hba);
1087 	hisi_sas_rescan_topology(hisi_hba, old_state, state);
1088 	dev_dbg(dev, "controller reset complete\n");
1089 
1090 out:
1091 	scsi_unblock_requests(shost);
1092 	clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
1093 
1094 	return rc;
1095 }
1096 
1097 static int hisi_sas_abort_task(struct sas_task *task)
1098 {
1099 	struct scsi_lun lun;
1100 	struct hisi_sas_tmf_task tmf_task;
1101 	struct domain_device *device = task->dev;
1102 	struct hisi_sas_device *sas_dev = device->lldd_dev;
1103 	struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
1104 	struct device *dev = hisi_hba->dev;
1105 	int rc = TMF_RESP_FUNC_FAILED;
1106 	unsigned long flags;
1107 
1108 	if (!sas_dev) {
1109 		dev_warn(dev, "Device has been removed\n");
1110 		return TMF_RESP_FUNC_FAILED;
1111 	}
1112 
1113 	if (task->task_state_flags & SAS_TASK_STATE_DONE) {
1114 		rc = TMF_RESP_FUNC_COMPLETE;
1115 		goto out;
1116 	}
1117 
1118 	sas_dev->dev_status = HISI_SAS_DEV_EH;
1119 	if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1120 		struct scsi_cmnd *cmnd = task->uldd_task;
1121 		struct hisi_sas_slot *slot = task->lldd_task;
1122 		u32 tag = slot->idx;
1123 		int rc2;
1124 
1125 		int_to_scsilun(cmnd->device->lun, &lun);
1126 		tmf_task.tmf = TMF_ABORT_TASK;
1127 		tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
1128 
1129 		rc = hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun,
1130 						  &tmf_task);
1131 
1132 		rc2 = hisi_sas_internal_task_abort(hisi_hba, device,
1133 						   HISI_SAS_INT_ABT_CMD, tag);
1134 		/*
1135 		 * If the TMF finds that the IO is not in the device and also
1136 		 * the internal abort does not succeed, then it is safe to
1137 		 * free the slot.
1138 		 * Note: if the internal abort succeeds then the slot
1139 		 * will have already been completed
1140 		 */
1141 		if (rc == TMF_RESP_FUNC_COMPLETE && rc2 != TMF_RESP_FUNC_SUCC) {
1142 			if (task->lldd_task) {
1143 				spin_lock_irqsave(&hisi_hba->lock, flags);
1144 				hisi_sas_do_release_task(hisi_hba, task, slot);
1145 				spin_unlock_irqrestore(&hisi_hba->lock, flags);
1146 			}
1147 		}
1148 	} else if (task->task_proto & SAS_PROTOCOL_SATA ||
1149 		task->task_proto & SAS_PROTOCOL_STP) {
1150 		if (task->dev->dev_type == SAS_SATA_DEV) {
1151 			hisi_sas_internal_task_abort(hisi_hba, device,
1152 						     HISI_SAS_INT_ABT_DEV, 0);
1153 			hisi_sas_dereg_device(hisi_hba, device);
1154 			rc = hisi_sas_softreset_ata_disk(device);
1155 		}
1156 	} else if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SMP) {
1157 		/* SMP */
1158 		struct hisi_sas_slot *slot = task->lldd_task;
1159 		u32 tag = slot->idx;
1160 
1161 		rc = hisi_sas_internal_task_abort(hisi_hba, device,
1162 			     HISI_SAS_INT_ABT_CMD, tag);
1163 		if (rc == TMF_RESP_FUNC_FAILED) {
1164 			spin_lock_irqsave(&hisi_hba->lock, flags);
1165 			hisi_sas_do_release_task(hisi_hba, task, slot);
1166 			spin_unlock_irqrestore(&hisi_hba->lock, flags);
1167 		}
1168 	}
1169 
1170 out:
1171 	if (rc != TMF_RESP_FUNC_COMPLETE)
1172 		dev_notice(dev, "abort task: rc=%d\n", rc);
1173 	return rc;
1174 }
1175 
1176 static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun)
1177 {
1178 	struct hisi_sas_tmf_task tmf_task;
1179 	int rc = TMF_RESP_FUNC_FAILED;
1180 
1181 	tmf_task.tmf = TMF_ABORT_TASK_SET;
1182 	rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1183 
1184 	return rc;
1185 }
1186 
1187 static int hisi_sas_clear_aca(struct domain_device *device, u8 *lun)
1188 {
1189 	int rc = TMF_RESP_FUNC_FAILED;
1190 	struct hisi_sas_tmf_task tmf_task;
1191 
1192 	tmf_task.tmf = TMF_CLEAR_ACA;
1193 	rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1194 
1195 	return rc;
1196 }
1197 
1198 static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
1199 {
1200 	struct sas_phy *phy = sas_get_local_phy(device);
1201 	int rc, reset_type = (device->dev_type == SAS_SATA_DEV ||
1202 			(device->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
1203 	rc = sas_phy_reset(phy, reset_type);
1204 	sas_put_local_phy(phy);
1205 	msleep(2000);
1206 	return rc;
1207 }
1208 
1209 static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
1210 {
1211 	struct hisi_sas_device *sas_dev = device->lldd_dev;
1212 	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1213 	unsigned long flags;
1214 	int rc = TMF_RESP_FUNC_FAILED;
1215 
1216 	if (sas_dev->dev_status != HISI_SAS_DEV_EH)
1217 		return TMF_RESP_FUNC_FAILED;
1218 	sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
1219 
1220 	hisi_sas_internal_task_abort(hisi_hba, device,
1221 					HISI_SAS_INT_ABT_DEV, 0);
1222 	hisi_sas_dereg_device(hisi_hba, device);
1223 
1224 	rc = hisi_sas_debug_I_T_nexus_reset(device);
1225 
1226 	if (rc == TMF_RESP_FUNC_COMPLETE) {
1227 		spin_lock_irqsave(&hisi_hba->lock, flags);
1228 		hisi_sas_release_task(hisi_hba, device);
1229 		spin_unlock_irqrestore(&hisi_hba->lock, flags);
1230 	}
1231 	return rc;
1232 }
1233 
1234 static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
1235 {
1236 	struct hisi_sas_device *sas_dev = device->lldd_dev;
1237 	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1238 	struct device *dev = hisi_hba->dev;
1239 	unsigned long flags;
1240 	int rc = TMF_RESP_FUNC_FAILED;
1241 
1242 	sas_dev->dev_status = HISI_SAS_DEV_EH;
1243 	if (dev_is_sata(device)) {
1244 		struct sas_phy *phy;
1245 
1246 		/* Clear internal IO and then hardreset */
1247 		rc = hisi_sas_internal_task_abort(hisi_hba, device,
1248 						  HISI_SAS_INT_ABT_DEV, 0);
1249 		if (rc == TMF_RESP_FUNC_FAILED)
1250 			goto out;
1251 		hisi_sas_dereg_device(hisi_hba, device);
1252 
1253 		phy = sas_get_local_phy(device);
1254 
1255 		rc = sas_phy_reset(phy, 1);
1256 
1257 		if (rc == 0) {
1258 			spin_lock_irqsave(&hisi_hba->lock, flags);
1259 			hisi_sas_release_task(hisi_hba, device);
1260 			spin_unlock_irqrestore(&hisi_hba->lock, flags);
1261 		}
1262 		sas_put_local_phy(phy);
1263 	} else {
1264 		struct hisi_sas_tmf_task tmf_task = { .tmf =  TMF_LU_RESET };
1265 
1266 		rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1267 		if (rc == TMF_RESP_FUNC_COMPLETE) {
1268 			spin_lock_irqsave(&hisi_hba->lock, flags);
1269 			hisi_sas_release_task(hisi_hba, device);
1270 			spin_unlock_irqrestore(&hisi_hba->lock, flags);
1271 		}
1272 	}
1273 out:
1274 	if (rc != TMF_RESP_FUNC_COMPLETE)
1275 		dev_err(dev, "lu_reset: for device[%d]:rc= %d\n",
1276 			     sas_dev->device_id, rc);
1277 	return rc;
1278 }
1279 
1280 static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha)
1281 {
1282 	struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
1283 
1284 	return hisi_sas_controller_reset(hisi_hba);
1285 }
1286 
1287 static int hisi_sas_query_task(struct sas_task *task)
1288 {
1289 	struct scsi_lun lun;
1290 	struct hisi_sas_tmf_task tmf_task;
1291 	int rc = TMF_RESP_FUNC_FAILED;
1292 
1293 	if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1294 		struct scsi_cmnd *cmnd = task->uldd_task;
1295 		struct domain_device *device = task->dev;
1296 		struct hisi_sas_slot *slot = task->lldd_task;
1297 		u32 tag = slot->idx;
1298 
1299 		int_to_scsilun(cmnd->device->lun, &lun);
1300 		tmf_task.tmf = TMF_QUERY_TASK;
1301 		tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
1302 
1303 		rc = hisi_sas_debug_issue_ssp_tmf(device,
1304 						  lun.scsi_lun,
1305 						  &tmf_task);
1306 		switch (rc) {
1307 		/* The task is still in Lun, release it then */
1308 		case TMF_RESP_FUNC_SUCC:
1309 		/* The task is not in Lun or failed, reset the phy */
1310 		case TMF_RESP_FUNC_FAILED:
1311 		case TMF_RESP_FUNC_COMPLETE:
1312 			break;
1313 		default:
1314 			rc = TMF_RESP_FUNC_FAILED;
1315 			break;
1316 		}
1317 	}
1318 	return rc;
1319 }
1320 
1321 static int
1322 hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
1323 				  struct sas_task *task, int abort_flag,
1324 				  int task_tag)
1325 {
1326 	struct domain_device *device = task->dev;
1327 	struct hisi_sas_device *sas_dev = device->lldd_dev;
1328 	struct device *dev = hisi_hba->dev;
1329 	struct hisi_sas_port *port;
1330 	struct hisi_sas_slot *slot;
1331 	struct asd_sas_port *sas_port = device->port;
1332 	struct hisi_sas_cmd_hdr *cmd_hdr_base;
1333 	struct hisi_sas_dq *dq = sas_dev->dq;
1334 	int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
1335 	unsigned long flags, flags_dq;
1336 
1337 	if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
1338 		return -EINVAL;
1339 
1340 	if (!device->port)
1341 		return -1;
1342 
1343 	port = to_hisi_sas_port(sas_port);
1344 
1345 	/* simply get a slot and send abort command */
1346 	spin_lock_irqsave(&hisi_hba->lock, flags);
1347 	rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
1348 	if (rc) {
1349 		spin_unlock_irqrestore(&hisi_hba->lock, flags);
1350 		goto err_out;
1351 	}
1352 	spin_unlock_irqrestore(&hisi_hba->lock, flags);
1353 
1354 	spin_lock_irqsave(&dq->lock, flags_dq);
1355 	rc = hisi_hba->hw->get_free_slot(hisi_hba, dq);
1356 	if (rc)
1357 		goto err_out_tag;
1358 
1359 	dlvry_queue = dq->id;
1360 	dlvry_queue_slot = dq->wr_point;
1361 
1362 	slot = &hisi_hba->slot_info[slot_idx];
1363 	memset(slot, 0, sizeof(struct hisi_sas_slot));
1364 
1365 	slot->idx = slot_idx;
1366 	slot->n_elem = n_elem;
1367 	slot->dlvry_queue = dlvry_queue;
1368 	slot->dlvry_queue_slot = dlvry_queue_slot;
1369 	cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
1370 	slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
1371 	slot->task = task;
1372 	slot->port = port;
1373 	task->lldd_task = slot;
1374 
1375 	slot->buf = dma_pool_alloc(hisi_hba->buffer_pool,
1376 			GFP_ATOMIC, &slot->buf_dma);
1377 	if (!slot->buf) {
1378 		rc = -ENOMEM;
1379 		goto err_out_tag;
1380 	}
1381 
1382 	memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
1383 	memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
1384 	memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ);
1385 
1386 	rc = hisi_sas_task_prep_abort(hisi_hba, slot, device_id,
1387 				      abort_flag, task_tag);
1388 	if (rc)
1389 		goto err_out_buf;
1390 
1391 
1392 	list_add_tail(&slot->entry, &sas_dev->list);
1393 	spin_lock_irqsave(&task->task_state_lock, flags);
1394 	task->task_state_flags |= SAS_TASK_AT_INITIATOR;
1395 	spin_unlock_irqrestore(&task->task_state_lock, flags);
1396 
1397 	dq->slot_prep = slot;
1398 
1399 	atomic64_inc(&sas_dev->running_req);
1400 
1401 	/* send abort command to the chip */
1402 	hisi_hba->hw->start_delivery(dq);
1403 	spin_unlock_irqrestore(&dq->lock, flags_dq);
1404 
1405 	return 0;
1406 
1407 err_out_buf:
1408 	dma_pool_free(hisi_hba->buffer_pool, slot->buf,
1409 		slot->buf_dma);
1410 err_out_tag:
1411 	spin_lock_irqsave(&hisi_hba->lock, flags);
1412 	hisi_sas_slot_index_free(hisi_hba, slot_idx);
1413 	spin_unlock_irqrestore(&hisi_hba->lock, flags);
1414 	spin_unlock_irqrestore(&dq->lock, flags_dq);
1415 err_out:
1416 	dev_err(dev, "internal abort task prep: failed[%d]!\n", rc);
1417 
1418 	return rc;
1419 }
1420 
1421 /**
1422  * hisi_sas_internal_task_abort -- execute an internal
1423  * abort command for single IO command or a device
1424  * @hisi_hba: host controller struct
1425  * @device: domain device
1426  * @abort_flag: mode of operation, device or single IO
1427  * @tag: tag of IO to be aborted (only relevant to single
1428  *       IO mode)
1429  */
1430 static int
1431 hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
1432 			     struct domain_device *device,
1433 			     int abort_flag, int tag)
1434 {
1435 	struct sas_task *task;
1436 	struct hisi_sas_device *sas_dev = device->lldd_dev;
1437 	struct device *dev = hisi_hba->dev;
1438 	int res;
1439 
1440 	if (!hisi_hba->hw->prep_abort)
1441 		return -EOPNOTSUPP;
1442 
1443 	task = sas_alloc_slow_task(GFP_KERNEL);
1444 	if (!task)
1445 		return -ENOMEM;
1446 
1447 	task->dev = device;
1448 	task->task_proto = device->tproto;
1449 	task->task_done = hisi_sas_task_done;
1450 	task->slow_task->timer.data = (unsigned long)task;
1451 	task->slow_task->timer.function = hisi_sas_tmf_timedout;
1452 	task->slow_task->timer.expires = jiffies + msecs_to_jiffies(110);
1453 	add_timer(&task->slow_task->timer);
1454 
1455 	res = hisi_sas_internal_abort_task_exec(hisi_hba, sas_dev->device_id,
1456 						task, abort_flag, tag);
1457 	if (res) {
1458 		del_timer(&task->slow_task->timer);
1459 		dev_err(dev, "internal task abort: executing internal task failed: %d\n",
1460 			res);
1461 		goto exit;
1462 	}
1463 	wait_for_completion(&task->slow_task->completion);
1464 	res = TMF_RESP_FUNC_FAILED;
1465 
1466 	/* Internal abort timed out */
1467 	if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1468 		if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
1469 			struct hisi_sas_slot *slot = task->lldd_task;
1470 
1471 			if (slot)
1472 				slot->task = NULL;
1473 			dev_err(dev, "internal task abort: timeout.\n");
1474 		}
1475 	}
1476 
1477 	if (task->task_status.resp == SAS_TASK_COMPLETE &&
1478 		task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
1479 		res = TMF_RESP_FUNC_COMPLETE;
1480 		goto exit;
1481 	}
1482 
1483 	if (task->task_status.resp == SAS_TASK_COMPLETE &&
1484 		task->task_status.stat == TMF_RESP_FUNC_SUCC) {
1485 		res = TMF_RESP_FUNC_SUCC;
1486 		goto exit;
1487 	}
1488 
1489 exit:
1490 	dev_dbg(dev, "internal task abort: task to dev %016llx task=%p "
1491 		"resp: 0x%x sts 0x%x\n",
1492 		SAS_ADDR(device->sas_addr),
1493 		task,
1494 		task->task_status.resp, /* 0 is complete, -1 is undelivered */
1495 		task->task_status.stat);
1496 	sas_free_task(task);
1497 
1498 	return res;
1499 }
1500 
1501 static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy)
1502 {
1503 	hisi_sas_port_notify_formed(sas_phy);
1504 }
1505 
1506 static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy)
1507 {
1508 	phy->phy_attached = 0;
1509 	phy->phy_type = 0;
1510 	phy->port = NULL;
1511 }
1512 
1513 void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy)
1514 {
1515 	struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1516 	struct asd_sas_phy *sas_phy = &phy->sas_phy;
1517 	struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1518 
1519 	if (rdy) {
1520 		/* Phy down but ready */
1521 		hisi_sas_bytes_dmaed(hisi_hba, phy_no);
1522 		hisi_sas_port_notify_formed(sas_phy);
1523 	} else {
1524 		struct hisi_sas_port *port  = phy->port;
1525 
1526 		/* Phy down and not ready */
1527 		sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL);
1528 		sas_phy_disconnected(sas_phy);
1529 
1530 		if (port) {
1531 			if (phy->phy_type & PORT_TYPE_SAS) {
1532 				int port_id = port->id;
1533 
1534 				if (!hisi_hba->hw->get_wideport_bitmap(hisi_hba,
1535 								       port_id))
1536 					port->port_attached = 0;
1537 			} else if (phy->phy_type & PORT_TYPE_SATA)
1538 				port->port_attached = 0;
1539 		}
1540 		hisi_sas_phy_disconnected(phy);
1541 	}
1542 }
1543 EXPORT_SYMBOL_GPL(hisi_sas_phy_down);
1544 
1545 
1546 struct scsi_transport_template *hisi_sas_stt;
1547 EXPORT_SYMBOL_GPL(hisi_sas_stt);
1548 
1549 static struct scsi_host_template _hisi_sas_sht = {
1550 	.module			= THIS_MODULE,
1551 	.name			= DRV_NAME,
1552 	.queuecommand		= sas_queuecommand,
1553 	.target_alloc		= sas_target_alloc,
1554 	.slave_configure	= hisi_sas_slave_configure,
1555 	.scan_finished		= hisi_sas_scan_finished,
1556 	.scan_start		= hisi_sas_scan_start,
1557 	.change_queue_depth	= sas_change_queue_depth,
1558 	.bios_param		= sas_bios_param,
1559 	.can_queue		= 1,
1560 	.this_id		= -1,
1561 	.sg_tablesize		= SG_ALL,
1562 	.max_sectors		= SCSI_DEFAULT_MAX_SECTORS,
1563 	.use_clustering		= ENABLE_CLUSTERING,
1564 	.eh_device_reset_handler = sas_eh_device_reset_handler,
1565 	.eh_target_reset_handler = sas_eh_target_reset_handler,
1566 	.target_destroy		= sas_target_destroy,
1567 	.ioctl			= sas_ioctl,
1568 };
1569 struct scsi_host_template *hisi_sas_sht = &_hisi_sas_sht;
1570 EXPORT_SYMBOL_GPL(hisi_sas_sht);
1571 
1572 static struct sas_domain_function_template hisi_sas_transport_ops = {
1573 	.lldd_dev_found		= hisi_sas_dev_found,
1574 	.lldd_dev_gone		= hisi_sas_dev_gone,
1575 	.lldd_execute_task	= hisi_sas_queue_command,
1576 	.lldd_control_phy	= hisi_sas_control_phy,
1577 	.lldd_abort_task	= hisi_sas_abort_task,
1578 	.lldd_abort_task_set	= hisi_sas_abort_task_set,
1579 	.lldd_clear_aca		= hisi_sas_clear_aca,
1580 	.lldd_I_T_nexus_reset	= hisi_sas_I_T_nexus_reset,
1581 	.lldd_lu_reset		= hisi_sas_lu_reset,
1582 	.lldd_query_task	= hisi_sas_query_task,
1583 	.lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha,
1584 	.lldd_port_formed	= hisi_sas_port_formed,
1585 };
1586 
1587 void hisi_sas_init_mem(struct hisi_hba *hisi_hba)
1588 {
1589 	int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
1590 
1591 	for (i = 0; i < hisi_hba->queue_count; i++) {
1592 		struct hisi_sas_cq *cq = &hisi_hba->cq[i];
1593 		struct hisi_sas_dq *dq = &hisi_hba->dq[i];
1594 
1595 		s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
1596 		memset(hisi_hba->cmd_hdr[i], 0, s);
1597 		dq->wr_point = 0;
1598 
1599 		s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
1600 		memset(hisi_hba->complete_hdr[i], 0, s);
1601 		cq->rd_point = 0;
1602 	}
1603 
1604 	s = sizeof(struct hisi_sas_initial_fis) * hisi_hba->n_phy;
1605 	memset(hisi_hba->initial_fis, 0, s);
1606 
1607 	s = max_command_entries * sizeof(struct hisi_sas_iost);
1608 	memset(hisi_hba->iost, 0, s);
1609 
1610 	s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
1611 	memset(hisi_hba->breakpoint, 0, s);
1612 
1613 	s = max_command_entries * sizeof(struct hisi_sas_breakpoint) * 2;
1614 	memset(hisi_hba->sata_breakpoint, 0, s);
1615 }
1616 EXPORT_SYMBOL_GPL(hisi_sas_init_mem);
1617 
1618 int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
1619 {
1620 	struct device *dev = hisi_hba->dev;
1621 	int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
1622 
1623 	spin_lock_init(&hisi_hba->lock);
1624 	for (i = 0; i < hisi_hba->n_phy; i++) {
1625 		hisi_sas_phy_init(hisi_hba, i);
1626 		hisi_hba->port[i].port_attached = 0;
1627 		hisi_hba->port[i].id = -1;
1628 	}
1629 
1630 	for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1631 		hisi_hba->devices[i].dev_type = SAS_PHY_UNUSED;
1632 		hisi_hba->devices[i].device_id = i;
1633 		hisi_hba->devices[i].dev_status = HISI_SAS_DEV_NORMAL;
1634 	}
1635 
1636 	for (i = 0; i < hisi_hba->queue_count; i++) {
1637 		struct hisi_sas_cq *cq = &hisi_hba->cq[i];
1638 		struct hisi_sas_dq *dq = &hisi_hba->dq[i];
1639 
1640 		/* Completion queue structure */
1641 		cq->id = i;
1642 		cq->hisi_hba = hisi_hba;
1643 
1644 		/* Delivery queue structure */
1645 		dq->id = i;
1646 		dq->hisi_hba = hisi_hba;
1647 
1648 		/* Delivery queue */
1649 		s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
1650 		hisi_hba->cmd_hdr[i] = dma_alloc_coherent(dev, s,
1651 					&hisi_hba->cmd_hdr_dma[i], GFP_KERNEL);
1652 		if (!hisi_hba->cmd_hdr[i])
1653 			goto err_out;
1654 
1655 		/* Completion queue */
1656 		s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
1657 		hisi_hba->complete_hdr[i] = dma_alloc_coherent(dev, s,
1658 				&hisi_hba->complete_hdr_dma[i], GFP_KERNEL);
1659 		if (!hisi_hba->complete_hdr[i])
1660 			goto err_out;
1661 	}
1662 
1663 	s = sizeof(struct hisi_sas_slot_buf_table);
1664 	hisi_hba->buffer_pool = dma_pool_create("dma_buffer", dev, s, 16, 0);
1665 	if (!hisi_hba->buffer_pool)
1666 		goto err_out;
1667 
1668 	s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
1669 	hisi_hba->itct = dma_alloc_coherent(dev, s, &hisi_hba->itct_dma,
1670 					    GFP_KERNEL);
1671 	if (!hisi_hba->itct)
1672 		goto err_out;
1673 
1674 	memset(hisi_hba->itct, 0, s);
1675 
1676 	hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries,
1677 					   sizeof(struct hisi_sas_slot),
1678 					   GFP_KERNEL);
1679 	if (!hisi_hba->slot_info)
1680 		goto err_out;
1681 
1682 	s = max_command_entries * sizeof(struct hisi_sas_iost);
1683 	hisi_hba->iost = dma_alloc_coherent(dev, s, &hisi_hba->iost_dma,
1684 					    GFP_KERNEL);
1685 	if (!hisi_hba->iost)
1686 		goto err_out;
1687 
1688 	s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
1689 	hisi_hba->breakpoint = dma_alloc_coherent(dev, s,
1690 				&hisi_hba->breakpoint_dma, GFP_KERNEL);
1691 	if (!hisi_hba->breakpoint)
1692 		goto err_out;
1693 
1694 	hisi_hba->slot_index_count = max_command_entries;
1695 	s = hisi_hba->slot_index_count / BITS_PER_BYTE;
1696 	hisi_hba->slot_index_tags = devm_kzalloc(dev, s, GFP_KERNEL);
1697 	if (!hisi_hba->slot_index_tags)
1698 		goto err_out;
1699 
1700 	s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
1701 	hisi_hba->initial_fis = dma_alloc_coherent(dev, s,
1702 				&hisi_hba->initial_fis_dma, GFP_KERNEL);
1703 	if (!hisi_hba->initial_fis)
1704 		goto err_out;
1705 
1706 	s = max_command_entries * sizeof(struct hisi_sas_breakpoint) * 2;
1707 	hisi_hba->sata_breakpoint = dma_alloc_coherent(dev, s,
1708 				&hisi_hba->sata_breakpoint_dma, GFP_KERNEL);
1709 	if (!hisi_hba->sata_breakpoint)
1710 		goto err_out;
1711 	hisi_sas_init_mem(hisi_hba);
1712 
1713 	hisi_sas_slot_index_init(hisi_hba);
1714 
1715 	hisi_hba->wq = create_singlethread_workqueue(dev_name(dev));
1716 	if (!hisi_hba->wq) {
1717 		dev_err(dev, "sas_alloc: failed to create workqueue\n");
1718 		goto err_out;
1719 	}
1720 
1721 	return 0;
1722 err_out:
1723 	return -ENOMEM;
1724 }
1725 EXPORT_SYMBOL_GPL(hisi_sas_alloc);
1726 
1727 void hisi_sas_free(struct hisi_hba *hisi_hba)
1728 {
1729 	struct device *dev = hisi_hba->dev;
1730 	int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
1731 
1732 	for (i = 0; i < hisi_hba->queue_count; i++) {
1733 		s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
1734 		if (hisi_hba->cmd_hdr[i])
1735 			dma_free_coherent(dev, s,
1736 					  hisi_hba->cmd_hdr[i],
1737 					  hisi_hba->cmd_hdr_dma[i]);
1738 
1739 		s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
1740 		if (hisi_hba->complete_hdr[i])
1741 			dma_free_coherent(dev, s,
1742 					  hisi_hba->complete_hdr[i],
1743 					  hisi_hba->complete_hdr_dma[i]);
1744 	}
1745 
1746 	dma_pool_destroy(hisi_hba->buffer_pool);
1747 
1748 	s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
1749 	if (hisi_hba->itct)
1750 		dma_free_coherent(dev, s,
1751 				  hisi_hba->itct, hisi_hba->itct_dma);
1752 
1753 	s = max_command_entries * sizeof(struct hisi_sas_iost);
1754 	if (hisi_hba->iost)
1755 		dma_free_coherent(dev, s,
1756 				  hisi_hba->iost, hisi_hba->iost_dma);
1757 
1758 	s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
1759 	if (hisi_hba->breakpoint)
1760 		dma_free_coherent(dev, s,
1761 				  hisi_hba->breakpoint,
1762 				  hisi_hba->breakpoint_dma);
1763 
1764 
1765 	s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
1766 	if (hisi_hba->initial_fis)
1767 		dma_free_coherent(dev, s,
1768 				  hisi_hba->initial_fis,
1769 				  hisi_hba->initial_fis_dma);
1770 
1771 	s = max_command_entries * sizeof(struct hisi_sas_breakpoint) * 2;
1772 	if (hisi_hba->sata_breakpoint)
1773 		dma_free_coherent(dev, s,
1774 				  hisi_hba->sata_breakpoint,
1775 				  hisi_hba->sata_breakpoint_dma);
1776 
1777 	if (hisi_hba->wq)
1778 		destroy_workqueue(hisi_hba->wq);
1779 }
1780 EXPORT_SYMBOL_GPL(hisi_sas_free);
1781 
1782 static void hisi_sas_rst_work_handler(struct work_struct *work)
1783 {
1784 	struct hisi_hba *hisi_hba =
1785 		container_of(work, struct hisi_hba, rst_work);
1786 
1787 	hisi_sas_controller_reset(hisi_hba);
1788 }
1789 
1790 int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba)
1791 {
1792 	struct device *dev = hisi_hba->dev;
1793 	struct platform_device *pdev = hisi_hba->platform_dev;
1794 	struct device_node *np = pdev ? pdev->dev.of_node : NULL;
1795 	struct clk *refclk;
1796 
1797 	if (device_property_read_u8_array(dev, "sas-addr", hisi_hba->sas_addr,
1798 					  SAS_ADDR_SIZE)) {
1799 		dev_err(dev, "could not get property sas-addr\n");
1800 		return -ENOENT;
1801 	}
1802 
1803 	if (np) {
1804 		/*
1805 		 * These properties are only required for platform device-based
1806 		 * controller with DT firmware.
1807 		 */
1808 		hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(np,
1809 					"hisilicon,sas-syscon");
1810 		if (IS_ERR(hisi_hba->ctrl)) {
1811 			dev_err(dev, "could not get syscon\n");
1812 			return -ENOENT;
1813 		}
1814 
1815 		if (device_property_read_u32(dev, "ctrl-reset-reg",
1816 					     &hisi_hba->ctrl_reset_reg)) {
1817 			dev_err(dev,
1818 				"could not get property ctrl-reset-reg\n");
1819 			return -ENOENT;
1820 		}
1821 
1822 		if (device_property_read_u32(dev, "ctrl-reset-sts-reg",
1823 					     &hisi_hba->ctrl_reset_sts_reg)) {
1824 			dev_err(dev,
1825 				"could not get property ctrl-reset-sts-reg\n");
1826 			return -ENOENT;
1827 		}
1828 
1829 		if (device_property_read_u32(dev, "ctrl-clock-ena-reg",
1830 					     &hisi_hba->ctrl_clock_ena_reg)) {
1831 			dev_err(dev,
1832 				"could not get property ctrl-clock-ena-reg\n");
1833 			return -ENOENT;
1834 		}
1835 	}
1836 
1837 	refclk = devm_clk_get(dev, NULL);
1838 	if (IS_ERR(refclk))
1839 		dev_dbg(dev, "no ref clk property\n");
1840 	else
1841 		hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000;
1842 
1843 	if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy)) {
1844 		dev_err(dev, "could not get property phy-count\n");
1845 		return -ENOENT;
1846 	}
1847 
1848 	if (device_property_read_u32(dev, "queue-count",
1849 				     &hisi_hba->queue_count)) {
1850 		dev_err(dev, "could not get property queue-count\n");
1851 		return -ENOENT;
1852 	}
1853 
1854 	return 0;
1855 }
1856 EXPORT_SYMBOL_GPL(hisi_sas_get_fw_info);
1857 
1858 static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
1859 					      const struct hisi_sas_hw *hw)
1860 {
1861 	struct resource *res;
1862 	struct Scsi_Host *shost;
1863 	struct hisi_hba *hisi_hba;
1864 	struct device *dev = &pdev->dev;
1865 
1866 	shost = scsi_host_alloc(hisi_sas_sht, sizeof(*hisi_hba));
1867 	if (!shost) {
1868 		dev_err(dev, "scsi host alloc failed\n");
1869 		return NULL;
1870 	}
1871 	hisi_hba = shost_priv(shost);
1872 
1873 	INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler);
1874 	hisi_hba->hw = hw;
1875 	hisi_hba->dev = dev;
1876 	hisi_hba->platform_dev = pdev;
1877 	hisi_hba->shost = shost;
1878 	SHOST_TO_SAS_HA(shost) = &hisi_hba->sha;
1879 
1880 	init_timer(&hisi_hba->timer);
1881 
1882 	if (hisi_sas_get_fw_info(hisi_hba) < 0)
1883 		goto err_out;
1884 
1885 	if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) &&
1886 	    dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
1887 		dev_err(dev, "No usable DMA addressing method\n");
1888 		goto err_out;
1889 	}
1890 
1891 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1892 	hisi_hba->regs = devm_ioremap_resource(dev, res);
1893 	if (IS_ERR(hisi_hba->regs))
1894 		goto err_out;
1895 
1896 	if (hisi_sas_alloc(hisi_hba, shost)) {
1897 		hisi_sas_free(hisi_hba);
1898 		goto err_out;
1899 	}
1900 
1901 	return shost;
1902 err_out:
1903 	scsi_host_put(shost);
1904 	dev_err(dev, "shost alloc failed\n");
1905 	return NULL;
1906 }
1907 
1908 void hisi_sas_init_add(struct hisi_hba *hisi_hba)
1909 {
1910 	int i;
1911 
1912 	for (i = 0; i < hisi_hba->n_phy; i++)
1913 		memcpy(&hisi_hba->phy[i].dev_sas_addr,
1914 		       hisi_hba->sas_addr,
1915 		       SAS_ADDR_SIZE);
1916 }
1917 EXPORT_SYMBOL_GPL(hisi_sas_init_add);
1918 
1919 int hisi_sas_probe(struct platform_device *pdev,
1920 			 const struct hisi_sas_hw *hw)
1921 {
1922 	struct Scsi_Host *shost;
1923 	struct hisi_hba *hisi_hba;
1924 	struct device *dev = &pdev->dev;
1925 	struct asd_sas_phy **arr_phy;
1926 	struct asd_sas_port **arr_port;
1927 	struct sas_ha_struct *sha;
1928 	int rc, phy_nr, port_nr, i;
1929 
1930 	shost = hisi_sas_shost_alloc(pdev, hw);
1931 	if (!shost)
1932 		return -ENOMEM;
1933 
1934 	sha = SHOST_TO_SAS_HA(shost);
1935 	hisi_hba = shost_priv(shost);
1936 	platform_set_drvdata(pdev, sha);
1937 
1938 	phy_nr = port_nr = hisi_hba->n_phy;
1939 
1940 	arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL);
1941 	arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL);
1942 	if (!arr_phy || !arr_port) {
1943 		rc = -ENOMEM;
1944 		goto err_out_ha;
1945 	}
1946 
1947 	sha->sas_phy = arr_phy;
1948 	sha->sas_port = arr_port;
1949 	sha->lldd_ha = hisi_hba;
1950 
1951 	shost->transportt = hisi_sas_stt;
1952 	shost->max_id = HISI_SAS_MAX_DEVICES;
1953 	shost->max_lun = ~0;
1954 	shost->max_channel = 1;
1955 	shost->max_cmd_len = 16;
1956 	shost->sg_tablesize = min_t(u16, SG_ALL, HISI_SAS_SGE_PAGE_CNT);
1957 	shost->can_queue = hisi_hba->hw->max_command_entries;
1958 	shost->cmd_per_lun = hisi_hba->hw->max_command_entries;
1959 
1960 	sha->sas_ha_name = DRV_NAME;
1961 	sha->dev = hisi_hba->dev;
1962 	sha->lldd_module = THIS_MODULE;
1963 	sha->sas_addr = &hisi_hba->sas_addr[0];
1964 	sha->num_phys = hisi_hba->n_phy;
1965 	sha->core.shost = hisi_hba->shost;
1966 
1967 	for (i = 0; i < hisi_hba->n_phy; i++) {
1968 		sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy;
1969 		sha->sas_port[i] = &hisi_hba->port[i].sas_port;
1970 	}
1971 
1972 	hisi_sas_init_add(hisi_hba);
1973 
1974 	rc = scsi_add_host(shost, &pdev->dev);
1975 	if (rc)
1976 		goto err_out_ha;
1977 
1978 	rc = sas_register_ha(sha);
1979 	if (rc)
1980 		goto err_out_register_ha;
1981 
1982 	rc = hisi_hba->hw->hw_init(hisi_hba);
1983 	if (rc)
1984 		goto err_out_register_ha;
1985 
1986 	scsi_scan_host(shost);
1987 
1988 	return 0;
1989 
1990 err_out_register_ha:
1991 	scsi_remove_host(shost);
1992 err_out_ha:
1993 	hisi_sas_free(hisi_hba);
1994 	scsi_host_put(shost);
1995 	return rc;
1996 }
1997 EXPORT_SYMBOL_GPL(hisi_sas_probe);
1998 
1999 int hisi_sas_remove(struct platform_device *pdev)
2000 {
2001 	struct sas_ha_struct *sha = platform_get_drvdata(pdev);
2002 	struct hisi_hba *hisi_hba = sha->lldd_ha;
2003 	struct Scsi_Host *shost = sha->core.shost;
2004 
2005 	sas_unregister_ha(sha);
2006 	sas_remove_host(sha->core.shost);
2007 
2008 	hisi_sas_free(hisi_hba);
2009 	scsi_host_put(shost);
2010 	return 0;
2011 }
2012 EXPORT_SYMBOL_GPL(hisi_sas_remove);
2013 
2014 static __init int hisi_sas_init(void)
2015 {
2016 	hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops);
2017 	if (!hisi_sas_stt)
2018 		return -ENOMEM;
2019 
2020 	return 0;
2021 }
2022 
2023 static __exit void hisi_sas_exit(void)
2024 {
2025 	sas_release_transport(hisi_sas_stt);
2026 }
2027 
2028 module_init(hisi_sas_init);
2029 module_exit(hisi_sas_exit);
2030 
2031 MODULE_LICENSE("GPL");
2032 MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
2033 MODULE_DESCRIPTION("HISILICON SAS controller driver");
2034 MODULE_ALIAS("platform:" DRV_NAME);
2035