xref: /openbmc/linux/drivers/net/fjes/fjes_hw.c (revision fe7498ef)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  FUJITSU Extended Socket Network Device driver
4  *  Copyright (c) 2015 FUJITSU LIMITED
5  */
6 
7 #include "fjes_hw.h"
8 #include "fjes.h"
9 #include "fjes_trace.h"
10 
11 static void fjes_hw_update_zone_task(struct work_struct *);
12 static void fjes_hw_epstop_task(struct work_struct *);
13 
14 /* supported MTU list */
15 const u32 fjes_support_mtu[] = {
16 	FJES_MTU_DEFINE(8 * 1024),
17 	FJES_MTU_DEFINE(16 * 1024),
18 	FJES_MTU_DEFINE(32 * 1024),
19 	FJES_MTU_DEFINE(64 * 1024),
20 	0
21 };
22 
23 u32 fjes_hw_rd32(struct fjes_hw *hw, u32 reg)
24 {
25 	u8 *base = hw->base;
26 	u32 value = 0;
27 
28 	value = readl(&base[reg]);
29 
30 	return value;
31 }
32 
33 static u8 *fjes_hw_iomap(struct fjes_hw *hw)
34 {
35 	u8 *base;
36 
37 	if (!request_mem_region(hw->hw_res.start, hw->hw_res.size,
38 				fjes_driver_name)) {
39 		pr_err("request_mem_region failed\n");
40 		return NULL;
41 	}
42 
43 	base = (u8 *)ioremap(hw->hw_res.start, hw->hw_res.size);
44 
45 	return base;
46 }
47 
48 static void fjes_hw_iounmap(struct fjes_hw *hw)
49 {
50 	iounmap(hw->base);
51 	release_mem_region(hw->hw_res.start, hw->hw_res.size);
52 }
53 
54 int fjes_hw_reset(struct fjes_hw *hw)
55 {
56 	union REG_DCTL dctl;
57 	int timeout;
58 
59 	dctl.reg = 0;
60 	dctl.bits.reset = 1;
61 	wr32(XSCT_DCTL, dctl.reg);
62 
63 	timeout = FJES_DEVICE_RESET_TIMEOUT * 1000;
64 	dctl.reg = rd32(XSCT_DCTL);
65 	while ((dctl.bits.reset == 1) && (timeout > 0)) {
66 		msleep(1000);
67 		dctl.reg = rd32(XSCT_DCTL);
68 		timeout -= 1000;
69 	}
70 
71 	return timeout > 0 ? 0 : -EIO;
72 }
73 
74 static int fjes_hw_get_max_epid(struct fjes_hw *hw)
75 {
76 	union REG_MAX_EP info;
77 
78 	info.reg = rd32(XSCT_MAX_EP);
79 
80 	return info.bits.maxep;
81 }
82 
83 static int fjes_hw_get_my_epid(struct fjes_hw *hw)
84 {
85 	union REG_OWNER_EPID info;
86 
87 	info.reg = rd32(XSCT_OWNER_EPID);
88 
89 	return info.bits.epid;
90 }
91 
92 static int fjes_hw_alloc_shared_status_region(struct fjes_hw *hw)
93 {
94 	size_t size;
95 
96 	size = sizeof(struct fjes_device_shared_info) +
97 	    (sizeof(u8) * hw->max_epid);
98 	hw->hw_info.share = kzalloc(size, GFP_KERNEL);
99 	if (!hw->hw_info.share)
100 		return -ENOMEM;
101 
102 	hw->hw_info.share->epnum = hw->max_epid;
103 
104 	return 0;
105 }
106 
107 static void fjes_hw_free_shared_status_region(struct fjes_hw *hw)
108 {
109 	kfree(hw->hw_info.share);
110 	hw->hw_info.share = NULL;
111 }
112 
113 static int fjes_hw_alloc_epbuf(struct epbuf_handler *epbh)
114 {
115 	void *mem;
116 
117 	mem = vzalloc(EP_BUFFER_SIZE);
118 	if (!mem)
119 		return -ENOMEM;
120 
121 	epbh->buffer = mem;
122 	epbh->size = EP_BUFFER_SIZE;
123 
124 	epbh->info = (union ep_buffer_info *)mem;
125 	epbh->ring = (u8 *)(mem + sizeof(union ep_buffer_info));
126 
127 	return 0;
128 }
129 
130 static void fjes_hw_free_epbuf(struct epbuf_handler *epbh)
131 {
132 	vfree(epbh->buffer);
133 	epbh->buffer = NULL;
134 	epbh->size = 0;
135 
136 	epbh->info = NULL;
137 	epbh->ring = NULL;
138 }
139 
140 void fjes_hw_setup_epbuf(struct epbuf_handler *epbh, const u8 *mac_addr,
141 			 u32 mtu)
142 {
143 	union ep_buffer_info *info = epbh->info;
144 	u16 vlan_id[EP_BUFFER_SUPPORT_VLAN_MAX];
145 	int i;
146 
147 	for (i = 0; i < EP_BUFFER_SUPPORT_VLAN_MAX; i++)
148 		vlan_id[i] = info->v1i.vlan_id[i];
149 
150 	memset(info, 0, sizeof(union ep_buffer_info));
151 
152 	info->v1i.version = 0;  /* version 0 */
153 
154 	for (i = 0; i < ETH_ALEN; i++)
155 		info->v1i.mac_addr[i] = mac_addr[i];
156 
157 	info->v1i.head = 0;
158 	info->v1i.tail = 1;
159 
160 	info->v1i.info_size = sizeof(union ep_buffer_info);
161 	info->v1i.buffer_size = epbh->size - info->v1i.info_size;
162 
163 	info->v1i.frame_max = FJES_MTU_TO_FRAME_SIZE(mtu);
164 	info->v1i.count_max =
165 	    EP_RING_NUM(info->v1i.buffer_size, info->v1i.frame_max);
166 
167 	for (i = 0; i < EP_BUFFER_SUPPORT_VLAN_MAX; i++)
168 		info->v1i.vlan_id[i] = vlan_id[i];
169 
170 	info->v1i.rx_status |= FJES_RX_MTU_CHANGING_DONE;
171 }
172 
173 void
174 fjes_hw_init_command_registers(struct fjes_hw *hw,
175 			       struct fjes_device_command_param *param)
176 {
177 	/* Request Buffer length */
178 	wr32(XSCT_REQBL, (__le32)(param->req_len));
179 	/* Response Buffer Length */
180 	wr32(XSCT_RESPBL, (__le32)(param->res_len));
181 
182 	/* Request Buffer Address */
183 	wr32(XSCT_REQBAL,
184 	     (__le32)(param->req_start & GENMASK_ULL(31, 0)));
185 	wr32(XSCT_REQBAH,
186 	     (__le32)((param->req_start & GENMASK_ULL(63, 32)) >> 32));
187 
188 	/* Response Buffer Address */
189 	wr32(XSCT_RESPBAL,
190 	     (__le32)(param->res_start & GENMASK_ULL(31, 0)));
191 	wr32(XSCT_RESPBAH,
192 	     (__le32)((param->res_start & GENMASK_ULL(63, 32)) >> 32));
193 
194 	/* Share status address */
195 	wr32(XSCT_SHSTSAL,
196 	     (__le32)(param->share_start & GENMASK_ULL(31, 0)));
197 	wr32(XSCT_SHSTSAH,
198 	     (__le32)((param->share_start & GENMASK_ULL(63, 32)) >> 32));
199 }
200 
201 static int fjes_hw_setup(struct fjes_hw *hw)
202 {
203 	u8 mac[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
204 	struct fjes_device_command_param param;
205 	struct ep_share_mem_info *buf_pair;
206 	unsigned long flags;
207 	size_t mem_size;
208 	int result;
209 	int epidx;
210 	void *buf;
211 
212 	hw->hw_info.max_epid = &hw->max_epid;
213 	hw->hw_info.my_epid = &hw->my_epid;
214 
215 	buf = kcalloc(hw->max_epid, sizeof(struct ep_share_mem_info),
216 		      GFP_KERNEL);
217 	if (!buf)
218 		return -ENOMEM;
219 
220 	hw->ep_shm_info = (struct ep_share_mem_info *)buf;
221 
222 	mem_size = FJES_DEV_REQ_BUF_SIZE(hw->max_epid);
223 	hw->hw_info.req_buf = kzalloc(mem_size, GFP_KERNEL);
224 	if (!(hw->hw_info.req_buf))
225 		return -ENOMEM;
226 
227 	hw->hw_info.req_buf_size = mem_size;
228 
229 	mem_size = FJES_DEV_RES_BUF_SIZE(hw->max_epid);
230 	hw->hw_info.res_buf = kzalloc(mem_size, GFP_KERNEL);
231 	if (!(hw->hw_info.res_buf))
232 		return -ENOMEM;
233 
234 	hw->hw_info.res_buf_size = mem_size;
235 
236 	result = fjes_hw_alloc_shared_status_region(hw);
237 	if (result)
238 		return result;
239 
240 	hw->hw_info.buffer_share_bit = 0;
241 	hw->hw_info.buffer_unshare_reserve_bit = 0;
242 
243 	for (epidx = 0; epidx < hw->max_epid; epidx++) {
244 		if (epidx != hw->my_epid) {
245 			buf_pair = &hw->ep_shm_info[epidx];
246 
247 			result = fjes_hw_alloc_epbuf(&buf_pair->tx);
248 			if (result)
249 				return result;
250 
251 			result = fjes_hw_alloc_epbuf(&buf_pair->rx);
252 			if (result)
253 				return result;
254 
255 			spin_lock_irqsave(&hw->rx_status_lock, flags);
256 			fjes_hw_setup_epbuf(&buf_pair->tx, mac,
257 					    fjes_support_mtu[0]);
258 			fjes_hw_setup_epbuf(&buf_pair->rx, mac,
259 					    fjes_support_mtu[0]);
260 			spin_unlock_irqrestore(&hw->rx_status_lock, flags);
261 		}
262 	}
263 
264 	memset(&param, 0, sizeof(param));
265 
266 	param.req_len = hw->hw_info.req_buf_size;
267 	param.req_start = __pa(hw->hw_info.req_buf);
268 	param.res_len = hw->hw_info.res_buf_size;
269 	param.res_start = __pa(hw->hw_info.res_buf);
270 
271 	param.share_start = __pa(hw->hw_info.share->ep_status);
272 
273 	fjes_hw_init_command_registers(hw, &param);
274 
275 	return 0;
276 }
277 
278 static void fjes_hw_cleanup(struct fjes_hw *hw)
279 {
280 	int epidx;
281 
282 	if (!hw->ep_shm_info)
283 		return;
284 
285 	fjes_hw_free_shared_status_region(hw);
286 
287 	kfree(hw->hw_info.req_buf);
288 	hw->hw_info.req_buf = NULL;
289 
290 	kfree(hw->hw_info.res_buf);
291 	hw->hw_info.res_buf = NULL;
292 
293 	for (epidx = 0; epidx < hw->max_epid ; epidx++) {
294 		if (epidx == hw->my_epid)
295 			continue;
296 		fjes_hw_free_epbuf(&hw->ep_shm_info[epidx].tx);
297 		fjes_hw_free_epbuf(&hw->ep_shm_info[epidx].rx);
298 	}
299 
300 	kfree(hw->ep_shm_info);
301 	hw->ep_shm_info = NULL;
302 }
303 
304 int fjes_hw_init(struct fjes_hw *hw)
305 {
306 	int ret;
307 
308 	hw->base = fjes_hw_iomap(hw);
309 	if (!hw->base)
310 		return -EIO;
311 
312 	ret = fjes_hw_reset(hw);
313 	if (ret)
314 		return ret;
315 
316 	fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, true);
317 
318 	INIT_WORK(&hw->update_zone_task, fjes_hw_update_zone_task);
319 	INIT_WORK(&hw->epstop_task, fjes_hw_epstop_task);
320 
321 	mutex_init(&hw->hw_info.lock);
322 	spin_lock_init(&hw->rx_status_lock);
323 
324 	hw->max_epid = fjes_hw_get_max_epid(hw);
325 	hw->my_epid = fjes_hw_get_my_epid(hw);
326 
327 	if ((hw->max_epid == 0) || (hw->my_epid >= hw->max_epid))
328 		return -ENXIO;
329 
330 	ret = fjes_hw_setup(hw);
331 
332 	hw->hw_info.trace = vzalloc(FJES_DEBUG_BUFFER_SIZE);
333 	hw->hw_info.trace_size = FJES_DEBUG_BUFFER_SIZE;
334 
335 	return ret;
336 }
337 
338 void fjes_hw_exit(struct fjes_hw *hw)
339 {
340 	int ret;
341 
342 	if (hw->base) {
343 
344 		if (hw->debug_mode) {
345 			/* disable debug mode */
346 			mutex_lock(&hw->hw_info.lock);
347 			fjes_hw_stop_debug(hw);
348 			mutex_unlock(&hw->hw_info.lock);
349 		}
350 		vfree(hw->hw_info.trace);
351 		hw->hw_info.trace = NULL;
352 		hw->hw_info.trace_size = 0;
353 		hw->debug_mode = 0;
354 
355 		ret = fjes_hw_reset(hw);
356 		if (ret)
357 			pr_err("%s: reset error", __func__);
358 
359 		fjes_hw_iounmap(hw);
360 		hw->base = NULL;
361 	}
362 
363 	fjes_hw_cleanup(hw);
364 
365 	cancel_work_sync(&hw->update_zone_task);
366 	cancel_work_sync(&hw->epstop_task);
367 }
368 
369 static enum fjes_dev_command_response_e
370 fjes_hw_issue_request_command(struct fjes_hw *hw,
371 			      enum fjes_dev_command_request_type type)
372 {
373 	enum fjes_dev_command_response_e ret = FJES_CMD_STATUS_UNKNOWN;
374 	union REG_CR cr;
375 	union REG_CS cs;
376 	int timeout = FJES_COMMAND_REQ_TIMEOUT * 1000;
377 
378 	cr.reg = 0;
379 	cr.bits.req_start = 1;
380 	cr.bits.req_code = type;
381 	wr32(XSCT_CR, cr.reg);
382 	cr.reg = rd32(XSCT_CR);
383 
384 	if (cr.bits.error == 0) {
385 		timeout = FJES_COMMAND_REQ_TIMEOUT * 1000;
386 		cs.reg = rd32(XSCT_CS);
387 
388 		while ((cs.bits.complete != 1) && timeout > 0) {
389 			msleep(1000);
390 			cs.reg = rd32(XSCT_CS);
391 			timeout -= 1000;
392 		}
393 
394 		if (cs.bits.complete == 1)
395 			ret = FJES_CMD_STATUS_NORMAL;
396 		else if (timeout <= 0)
397 			ret = FJES_CMD_STATUS_TIMEOUT;
398 
399 	} else {
400 		switch (cr.bits.err_info) {
401 		case FJES_CMD_REQ_ERR_INFO_PARAM:
402 			ret = FJES_CMD_STATUS_ERROR_PARAM;
403 			break;
404 		case FJES_CMD_REQ_ERR_INFO_STATUS:
405 			ret = FJES_CMD_STATUS_ERROR_STATUS;
406 			break;
407 		default:
408 			ret = FJES_CMD_STATUS_UNKNOWN;
409 			break;
410 		}
411 	}
412 
413 	trace_fjes_hw_issue_request_command(&cr, &cs, timeout, ret);
414 
415 	return ret;
416 }
417 
418 int fjes_hw_request_info(struct fjes_hw *hw)
419 {
420 	union fjes_device_command_req *req_buf = hw->hw_info.req_buf;
421 	union fjes_device_command_res *res_buf = hw->hw_info.res_buf;
422 	enum fjes_dev_command_response_e ret;
423 	int result;
424 
425 	memset(req_buf, 0, hw->hw_info.req_buf_size);
426 	memset(res_buf, 0, hw->hw_info.res_buf_size);
427 
428 	req_buf->info.length = FJES_DEV_COMMAND_INFO_REQ_LEN;
429 
430 	res_buf->info.length = 0;
431 	res_buf->info.code = 0;
432 
433 	ret = fjes_hw_issue_request_command(hw, FJES_CMD_REQ_INFO);
434 	trace_fjes_hw_request_info(hw, res_buf);
435 
436 	result = 0;
437 
438 	if (FJES_DEV_COMMAND_INFO_RES_LEN((*hw->hw_info.max_epid)) !=
439 		res_buf->info.length) {
440 		trace_fjes_hw_request_info_err("Invalid res_buf");
441 		result = -ENOMSG;
442 	} else if (ret == FJES_CMD_STATUS_NORMAL) {
443 		switch (res_buf->info.code) {
444 		case FJES_CMD_REQ_RES_CODE_NORMAL:
445 			result = 0;
446 			break;
447 		default:
448 			result = -EPERM;
449 			break;
450 		}
451 	} else {
452 		switch (ret) {
453 		case FJES_CMD_STATUS_UNKNOWN:
454 			result = -EPERM;
455 			break;
456 		case FJES_CMD_STATUS_TIMEOUT:
457 			trace_fjes_hw_request_info_err("Timeout");
458 			result = -EBUSY;
459 			break;
460 		case FJES_CMD_STATUS_ERROR_PARAM:
461 			result = -EPERM;
462 			break;
463 		case FJES_CMD_STATUS_ERROR_STATUS:
464 			result = -EPERM;
465 			break;
466 		default:
467 			result = -EPERM;
468 			break;
469 		}
470 	}
471 
472 	return result;
473 }
474 
475 int fjes_hw_register_buff_addr(struct fjes_hw *hw, int dest_epid,
476 			       struct ep_share_mem_info *buf_pair)
477 {
478 	union fjes_device_command_req *req_buf = hw->hw_info.req_buf;
479 	union fjes_device_command_res *res_buf = hw->hw_info.res_buf;
480 	enum fjes_dev_command_response_e ret;
481 	int page_count;
482 	int timeout;
483 	int i, idx;
484 	void *addr;
485 	int result;
486 
487 	if (test_bit(dest_epid, &hw->hw_info.buffer_share_bit))
488 		return 0;
489 
490 	memset(req_buf, 0, hw->hw_info.req_buf_size);
491 	memset(res_buf, 0, hw->hw_info.res_buf_size);
492 
493 	req_buf->share_buffer.length = FJES_DEV_COMMAND_SHARE_BUFFER_REQ_LEN(
494 						buf_pair->tx.size,
495 						buf_pair->rx.size);
496 	req_buf->share_buffer.epid = dest_epid;
497 
498 	idx = 0;
499 	req_buf->share_buffer.buffer[idx++] = buf_pair->tx.size;
500 	page_count = buf_pair->tx.size / EP_BUFFER_INFO_SIZE;
501 	for (i = 0; i < page_count; i++) {
502 		addr = ((u8 *)(buf_pair->tx.buffer)) +
503 				(i * EP_BUFFER_INFO_SIZE);
504 		req_buf->share_buffer.buffer[idx++] =
505 				(__le64)(page_to_phys(vmalloc_to_page(addr)) +
506 						offset_in_page(addr));
507 	}
508 
509 	req_buf->share_buffer.buffer[idx++] = buf_pair->rx.size;
510 	page_count = buf_pair->rx.size / EP_BUFFER_INFO_SIZE;
511 	for (i = 0; i < page_count; i++) {
512 		addr = ((u8 *)(buf_pair->rx.buffer)) +
513 				(i * EP_BUFFER_INFO_SIZE);
514 		req_buf->share_buffer.buffer[idx++] =
515 				(__le64)(page_to_phys(vmalloc_to_page(addr)) +
516 						offset_in_page(addr));
517 	}
518 
519 	res_buf->share_buffer.length = 0;
520 	res_buf->share_buffer.code = 0;
521 
522 	trace_fjes_hw_register_buff_addr_req(req_buf, buf_pair);
523 
524 	ret = fjes_hw_issue_request_command(hw, FJES_CMD_REQ_SHARE_BUFFER);
525 
526 	timeout = FJES_COMMAND_REQ_BUFF_TIMEOUT * 1000;
527 	while ((ret == FJES_CMD_STATUS_NORMAL) &&
528 	       (res_buf->share_buffer.length ==
529 		FJES_DEV_COMMAND_SHARE_BUFFER_RES_LEN) &&
530 	       (res_buf->share_buffer.code == FJES_CMD_REQ_RES_CODE_BUSY) &&
531 	       (timeout > 0)) {
532 			msleep(200 + hw->my_epid * 20);
533 			timeout -= (200 + hw->my_epid * 20);
534 
535 			res_buf->share_buffer.length = 0;
536 			res_buf->share_buffer.code = 0;
537 
538 			ret = fjes_hw_issue_request_command(
539 					hw, FJES_CMD_REQ_SHARE_BUFFER);
540 	}
541 
542 	result = 0;
543 
544 	trace_fjes_hw_register_buff_addr(res_buf, timeout);
545 
546 	if (res_buf->share_buffer.length !=
547 			FJES_DEV_COMMAND_SHARE_BUFFER_RES_LEN) {
548 		trace_fjes_hw_register_buff_addr_err("Invalid res_buf");
549 		result = -ENOMSG;
550 	} else if (ret == FJES_CMD_STATUS_NORMAL) {
551 		switch (res_buf->share_buffer.code) {
552 		case FJES_CMD_REQ_RES_CODE_NORMAL:
553 			result = 0;
554 			set_bit(dest_epid, &hw->hw_info.buffer_share_bit);
555 			break;
556 		case FJES_CMD_REQ_RES_CODE_BUSY:
557 			trace_fjes_hw_register_buff_addr_err("Busy Timeout");
558 			result = -EBUSY;
559 			break;
560 		default:
561 			result = -EPERM;
562 			break;
563 		}
564 	} else {
565 		switch (ret) {
566 		case FJES_CMD_STATUS_UNKNOWN:
567 			result = -EPERM;
568 			break;
569 		case FJES_CMD_STATUS_TIMEOUT:
570 			trace_fjes_hw_register_buff_addr_err("Timeout");
571 			result = -EBUSY;
572 			break;
573 		case FJES_CMD_STATUS_ERROR_PARAM:
574 		case FJES_CMD_STATUS_ERROR_STATUS:
575 		default:
576 			result = -EPERM;
577 			break;
578 		}
579 	}
580 
581 	return result;
582 }
583 
584 int fjes_hw_unregister_buff_addr(struct fjes_hw *hw, int dest_epid)
585 {
586 	union fjes_device_command_req *req_buf = hw->hw_info.req_buf;
587 	union fjes_device_command_res *res_buf = hw->hw_info.res_buf;
588 	struct fjes_device_shared_info *share = hw->hw_info.share;
589 	enum fjes_dev_command_response_e ret;
590 	int timeout;
591 	int result;
592 
593 	if (!hw->base)
594 		return -EPERM;
595 
596 	if (!req_buf || !res_buf || !share)
597 		return -EPERM;
598 
599 	if (!test_bit(dest_epid, &hw->hw_info.buffer_share_bit))
600 		return 0;
601 
602 	memset(req_buf, 0, hw->hw_info.req_buf_size);
603 	memset(res_buf, 0, hw->hw_info.res_buf_size);
604 
605 	req_buf->unshare_buffer.length =
606 			FJES_DEV_COMMAND_UNSHARE_BUFFER_REQ_LEN;
607 	req_buf->unshare_buffer.epid = dest_epid;
608 
609 	res_buf->unshare_buffer.length = 0;
610 	res_buf->unshare_buffer.code = 0;
611 
612 	trace_fjes_hw_unregister_buff_addr_req(req_buf);
613 	ret = fjes_hw_issue_request_command(hw, FJES_CMD_REQ_UNSHARE_BUFFER);
614 
615 	timeout = FJES_COMMAND_REQ_BUFF_TIMEOUT * 1000;
616 	while ((ret == FJES_CMD_STATUS_NORMAL) &&
617 	       (res_buf->unshare_buffer.length ==
618 		FJES_DEV_COMMAND_UNSHARE_BUFFER_RES_LEN) &&
619 	       (res_buf->unshare_buffer.code ==
620 		FJES_CMD_REQ_RES_CODE_BUSY) &&
621 	       (timeout > 0)) {
622 		msleep(200 + hw->my_epid * 20);
623 		timeout -= (200 + hw->my_epid * 20);
624 
625 		res_buf->unshare_buffer.length = 0;
626 		res_buf->unshare_buffer.code = 0;
627 
628 		ret =
629 		fjes_hw_issue_request_command(hw, FJES_CMD_REQ_UNSHARE_BUFFER);
630 	}
631 
632 	result = 0;
633 
634 	trace_fjes_hw_unregister_buff_addr(res_buf, timeout);
635 
636 	if (res_buf->unshare_buffer.length !=
637 			FJES_DEV_COMMAND_UNSHARE_BUFFER_RES_LEN) {
638 		trace_fjes_hw_unregister_buff_addr_err("Invalid res_buf");
639 		result = -ENOMSG;
640 	} else if (ret == FJES_CMD_STATUS_NORMAL) {
641 		switch (res_buf->unshare_buffer.code) {
642 		case FJES_CMD_REQ_RES_CODE_NORMAL:
643 			result = 0;
644 			clear_bit(dest_epid, &hw->hw_info.buffer_share_bit);
645 			break;
646 		case FJES_CMD_REQ_RES_CODE_BUSY:
647 			trace_fjes_hw_unregister_buff_addr_err("Busy Timeout");
648 			result = -EBUSY;
649 			break;
650 		default:
651 			result = -EPERM;
652 			break;
653 		}
654 	} else {
655 		switch (ret) {
656 		case FJES_CMD_STATUS_UNKNOWN:
657 			result = -EPERM;
658 			break;
659 		case FJES_CMD_STATUS_TIMEOUT:
660 			trace_fjes_hw_unregister_buff_addr_err("Timeout");
661 			result = -EBUSY;
662 			break;
663 		case FJES_CMD_STATUS_ERROR_PARAM:
664 		case FJES_CMD_STATUS_ERROR_STATUS:
665 		default:
666 			result = -EPERM;
667 			break;
668 		}
669 	}
670 
671 	return result;
672 }
673 
674 int fjes_hw_raise_interrupt(struct fjes_hw *hw, int dest_epid,
675 			    enum REG_ICTL_MASK  mask)
676 {
677 	u32 ig = mask | dest_epid;
678 
679 	wr32(XSCT_IG, cpu_to_le32(ig));
680 
681 	return 0;
682 }
683 
684 u32 fjes_hw_capture_interrupt_status(struct fjes_hw *hw)
685 {
686 	u32 cur_is;
687 
688 	cur_is = rd32(XSCT_IS);
689 
690 	return cur_is;
691 }
692 
693 void fjes_hw_set_irqmask(struct fjes_hw *hw,
694 			 enum REG_ICTL_MASK intr_mask, bool mask)
695 {
696 	if (mask)
697 		wr32(XSCT_IMS, intr_mask);
698 	else
699 		wr32(XSCT_IMC, intr_mask);
700 }
701 
702 bool fjes_hw_epid_is_same_zone(struct fjes_hw *hw, int epid)
703 {
704 	if (epid >= hw->max_epid)
705 		return false;
706 
707 	if ((hw->ep_shm_info[epid].es_status !=
708 			FJES_ZONING_STATUS_ENABLE) ||
709 		(hw->ep_shm_info[hw->my_epid].zone ==
710 			FJES_ZONING_ZONE_TYPE_NONE))
711 		return false;
712 	else
713 		return (hw->ep_shm_info[epid].zone ==
714 				hw->ep_shm_info[hw->my_epid].zone);
715 }
716 
717 int fjes_hw_epid_is_shared(struct fjes_device_shared_info *share,
718 			   int dest_epid)
719 {
720 	int value = false;
721 
722 	if (dest_epid < share->epnum)
723 		value = share->ep_status[dest_epid];
724 
725 	return value;
726 }
727 
728 static bool fjes_hw_epid_is_stop_requested(struct fjes_hw *hw, int src_epid)
729 {
730 	return test_bit(src_epid, &hw->txrx_stop_req_bit);
731 }
732 
733 static bool fjes_hw_epid_is_stop_process_done(struct fjes_hw *hw, int src_epid)
734 {
735 	return (hw->ep_shm_info[src_epid].tx.info->v1i.rx_status &
736 			FJES_RX_STOP_REQ_DONE);
737 }
738 
739 enum ep_partner_status
740 fjes_hw_get_partner_ep_status(struct fjes_hw *hw, int epid)
741 {
742 	enum ep_partner_status status;
743 
744 	if (fjes_hw_epid_is_shared(hw->hw_info.share, epid)) {
745 		if (fjes_hw_epid_is_stop_requested(hw, epid)) {
746 			status = EP_PARTNER_WAITING;
747 		} else {
748 			if (fjes_hw_epid_is_stop_process_done(hw, epid))
749 				status = EP_PARTNER_COMPLETE;
750 			else
751 				status = EP_PARTNER_SHARED;
752 		}
753 	} else {
754 		status = EP_PARTNER_UNSHARE;
755 	}
756 
757 	return status;
758 }
759 
760 void fjes_hw_raise_epstop(struct fjes_hw *hw)
761 {
762 	enum ep_partner_status status;
763 	unsigned long flags;
764 	int epidx;
765 
766 	for (epidx = 0; epidx < hw->max_epid; epidx++) {
767 		if (epidx == hw->my_epid)
768 			continue;
769 
770 		status = fjes_hw_get_partner_ep_status(hw, epidx);
771 		switch (status) {
772 		case EP_PARTNER_SHARED:
773 			fjes_hw_raise_interrupt(hw, epidx,
774 						REG_ICTL_MASK_TXRX_STOP_REQ);
775 			hw->ep_shm_info[epidx].ep_stats.send_intr_unshare += 1;
776 			break;
777 		default:
778 			break;
779 		}
780 
781 		set_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit);
782 		set_bit(epidx, &hw->txrx_stop_req_bit);
783 
784 		spin_lock_irqsave(&hw->rx_status_lock, flags);
785 		hw->ep_shm_info[epidx].tx.info->v1i.rx_status |=
786 				FJES_RX_STOP_REQ_REQUEST;
787 		spin_unlock_irqrestore(&hw->rx_status_lock, flags);
788 	}
789 }
790 
791 int fjes_hw_wait_epstop(struct fjes_hw *hw)
792 {
793 	enum ep_partner_status status;
794 	union ep_buffer_info *info;
795 	int wait_time = 0;
796 	int epidx;
797 
798 	while (hw->hw_info.buffer_unshare_reserve_bit &&
799 	       (wait_time < FJES_COMMAND_EPSTOP_WAIT_TIMEOUT * 1000)) {
800 		for (epidx = 0; epidx < hw->max_epid; epidx++) {
801 			if (epidx == hw->my_epid)
802 				continue;
803 			status = fjes_hw_epid_is_shared(hw->hw_info.share,
804 							epidx);
805 			info = hw->ep_shm_info[epidx].rx.info;
806 			if ((!status ||
807 			     (info->v1i.rx_status &
808 			      FJES_RX_STOP_REQ_DONE)) &&
809 			    test_bit(epidx,
810 				     &hw->hw_info.buffer_unshare_reserve_bit)) {
811 				clear_bit(epidx,
812 					  &hw->hw_info.buffer_unshare_reserve_bit);
813 			}
814 		}
815 
816 		msleep(100);
817 		wait_time += 100;
818 	}
819 
820 	for (epidx = 0; epidx < hw->max_epid; epidx++) {
821 		if (epidx == hw->my_epid)
822 			continue;
823 		if (test_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit))
824 			clear_bit(epidx,
825 				  &hw->hw_info.buffer_unshare_reserve_bit);
826 	}
827 
828 	return (wait_time < FJES_COMMAND_EPSTOP_WAIT_TIMEOUT * 1000)
829 			? 0 : -EBUSY;
830 }
831 
832 bool fjes_hw_check_epbuf_version(struct epbuf_handler *epbh, u32 version)
833 {
834 	union ep_buffer_info *info = epbh->info;
835 
836 	return (info->common.version == version);
837 }
838 
839 bool fjes_hw_check_mtu(struct epbuf_handler *epbh, u32 mtu)
840 {
841 	union ep_buffer_info *info = epbh->info;
842 
843 	return ((info->v1i.frame_max == FJES_MTU_TO_FRAME_SIZE(mtu)) &&
844 		info->v1i.rx_status & FJES_RX_MTU_CHANGING_DONE);
845 }
846 
847 bool fjes_hw_check_vlan_id(struct epbuf_handler *epbh, u16 vlan_id)
848 {
849 	union ep_buffer_info *info = epbh->info;
850 	bool ret = false;
851 	int i;
852 
853 	if (vlan_id == 0) {
854 		ret = true;
855 	} else {
856 		for (i = 0; i < EP_BUFFER_SUPPORT_VLAN_MAX; i++) {
857 			if (vlan_id == info->v1i.vlan_id[i]) {
858 				ret = true;
859 				break;
860 			}
861 		}
862 	}
863 	return ret;
864 }
865 
866 bool fjes_hw_set_vlan_id(struct epbuf_handler *epbh, u16 vlan_id)
867 {
868 	union ep_buffer_info *info = epbh->info;
869 	int i;
870 
871 	for (i = 0; i < EP_BUFFER_SUPPORT_VLAN_MAX; i++) {
872 		if (info->v1i.vlan_id[i] == 0) {
873 			info->v1i.vlan_id[i] = vlan_id;
874 			return true;
875 		}
876 	}
877 	return false;
878 }
879 
880 void fjes_hw_del_vlan_id(struct epbuf_handler *epbh, u16 vlan_id)
881 {
882 	union ep_buffer_info *info = epbh->info;
883 	int i;
884 
885 	if (0 != vlan_id) {
886 		for (i = 0; i < EP_BUFFER_SUPPORT_VLAN_MAX; i++) {
887 			if (vlan_id == info->v1i.vlan_id[i])
888 				info->v1i.vlan_id[i] = 0;
889 		}
890 	}
891 }
892 
893 bool fjes_hw_epbuf_rx_is_empty(struct epbuf_handler *epbh)
894 {
895 	union ep_buffer_info *info = epbh->info;
896 
897 	if (!(info->v1i.rx_status & FJES_RX_MTU_CHANGING_DONE))
898 		return true;
899 
900 	if (info->v1i.count_max == 0)
901 		return true;
902 
903 	return EP_RING_EMPTY(info->v1i.head, info->v1i.tail,
904 			     info->v1i.count_max);
905 }
906 
907 void *fjes_hw_epbuf_rx_curpkt_get_addr(struct epbuf_handler *epbh,
908 				       size_t *psize)
909 {
910 	union ep_buffer_info *info = epbh->info;
911 	struct esmem_frame *ring_frame;
912 	void *frame;
913 
914 	ring_frame = (struct esmem_frame *)&(epbh->ring[EP_RING_INDEX
915 					     (info->v1i.head,
916 					      info->v1i.count_max) *
917 					     info->v1i.frame_max]);
918 
919 	*psize = (size_t)ring_frame->frame_size;
920 
921 	frame = ring_frame->frame_data;
922 
923 	return frame;
924 }
925 
926 void fjes_hw_epbuf_rx_curpkt_drop(struct epbuf_handler *epbh)
927 {
928 	union ep_buffer_info *info = epbh->info;
929 
930 	if (fjes_hw_epbuf_rx_is_empty(epbh))
931 		return;
932 
933 	EP_RING_INDEX_INC(epbh->info->v1i.head, info->v1i.count_max);
934 }
935 
936 int fjes_hw_epbuf_tx_pkt_send(struct epbuf_handler *epbh,
937 			      void *frame, size_t size)
938 {
939 	union ep_buffer_info *info = epbh->info;
940 	struct esmem_frame *ring_frame;
941 
942 	if (EP_RING_FULL(info->v1i.head, info->v1i.tail, info->v1i.count_max))
943 		return -ENOBUFS;
944 
945 	ring_frame = (struct esmem_frame *)&(epbh->ring[EP_RING_INDEX
946 					     (info->v1i.tail - 1,
947 					      info->v1i.count_max) *
948 					     info->v1i.frame_max]);
949 
950 	ring_frame->frame_size = size;
951 	memcpy((void *)(ring_frame->frame_data), (void *)frame, size);
952 
953 	EP_RING_INDEX_INC(epbh->info->v1i.tail, info->v1i.count_max);
954 
955 	return 0;
956 }
957 
958 static void fjes_hw_update_zone_task(struct work_struct *work)
959 {
960 	struct fjes_hw *hw = container_of(work,
961 			struct fjes_hw, update_zone_task);
962 
963 	struct my_s {u8 es_status; u8 zone; } *info;
964 	union fjes_device_command_res *res_buf;
965 	enum ep_partner_status pstatus;
966 
967 	struct fjes_adapter *adapter;
968 	struct net_device *netdev;
969 	unsigned long flags;
970 
971 	ulong unshare_bit = 0;
972 	ulong share_bit = 0;
973 	ulong irq_bit = 0;
974 
975 	int epidx;
976 	int ret;
977 
978 	adapter = (struct fjes_adapter *)hw->back;
979 	netdev = adapter->netdev;
980 	res_buf = hw->hw_info.res_buf;
981 	info = (struct my_s *)&res_buf->info.info;
982 
983 	mutex_lock(&hw->hw_info.lock);
984 
985 	ret = fjes_hw_request_info(hw);
986 	switch (ret) {
987 	case -ENOMSG:
988 	case -EBUSY:
989 	default:
990 		if (!work_pending(&adapter->force_close_task)) {
991 			adapter->force_reset = true;
992 			schedule_work(&adapter->force_close_task);
993 		}
994 		break;
995 
996 	case 0:
997 
998 		for (epidx = 0; epidx < hw->max_epid; epidx++) {
999 			if (epidx == hw->my_epid) {
1000 				hw->ep_shm_info[epidx].es_status =
1001 					info[epidx].es_status;
1002 				hw->ep_shm_info[epidx].zone =
1003 					info[epidx].zone;
1004 				continue;
1005 			}
1006 
1007 			pstatus = fjes_hw_get_partner_ep_status(hw, epidx);
1008 			switch (pstatus) {
1009 			case EP_PARTNER_UNSHARE:
1010 			default:
1011 				if ((info[epidx].zone !=
1012 					FJES_ZONING_ZONE_TYPE_NONE) &&
1013 				    (info[epidx].es_status ==
1014 					FJES_ZONING_STATUS_ENABLE) &&
1015 				    (info[epidx].zone ==
1016 					info[hw->my_epid].zone))
1017 					set_bit(epidx, &share_bit);
1018 				else
1019 					set_bit(epidx, &unshare_bit);
1020 				break;
1021 
1022 			case EP_PARTNER_COMPLETE:
1023 			case EP_PARTNER_WAITING:
1024 				if ((info[epidx].zone ==
1025 					FJES_ZONING_ZONE_TYPE_NONE) ||
1026 				    (info[epidx].es_status !=
1027 					FJES_ZONING_STATUS_ENABLE) ||
1028 				    (info[epidx].zone !=
1029 					info[hw->my_epid].zone)) {
1030 					set_bit(epidx,
1031 						&adapter->unshare_watch_bitmask);
1032 					set_bit(epidx,
1033 						&hw->hw_info.buffer_unshare_reserve_bit);
1034 				}
1035 				break;
1036 
1037 			case EP_PARTNER_SHARED:
1038 				if ((info[epidx].zone ==
1039 					FJES_ZONING_ZONE_TYPE_NONE) ||
1040 				    (info[epidx].es_status !=
1041 					FJES_ZONING_STATUS_ENABLE) ||
1042 				    (info[epidx].zone !=
1043 					info[hw->my_epid].zone))
1044 					set_bit(epidx, &irq_bit);
1045 				break;
1046 			}
1047 
1048 			hw->ep_shm_info[epidx].es_status =
1049 				info[epidx].es_status;
1050 			hw->ep_shm_info[epidx].zone = info[epidx].zone;
1051 		}
1052 		break;
1053 	}
1054 
1055 	mutex_unlock(&hw->hw_info.lock);
1056 
1057 	for (epidx = 0; epidx < hw->max_epid; epidx++) {
1058 		if (epidx == hw->my_epid)
1059 			continue;
1060 
1061 		if (test_bit(epidx, &share_bit)) {
1062 			spin_lock_irqsave(&hw->rx_status_lock, flags);
1063 			fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx,
1064 					    netdev->dev_addr, netdev->mtu);
1065 			spin_unlock_irqrestore(&hw->rx_status_lock, flags);
1066 
1067 			mutex_lock(&hw->hw_info.lock);
1068 
1069 			ret = fjes_hw_register_buff_addr(
1070 				hw, epidx, &hw->ep_shm_info[epidx]);
1071 
1072 			switch (ret) {
1073 			case 0:
1074 				break;
1075 			case -ENOMSG:
1076 			case -EBUSY:
1077 			default:
1078 				if (!work_pending(&adapter->force_close_task)) {
1079 					adapter->force_reset = true;
1080 					schedule_work(
1081 					  &adapter->force_close_task);
1082 				}
1083 				break;
1084 			}
1085 			mutex_unlock(&hw->hw_info.lock);
1086 
1087 			hw->ep_shm_info[epidx].ep_stats
1088 					      .com_regist_buf_exec += 1;
1089 		}
1090 
1091 		if (test_bit(epidx, &unshare_bit)) {
1092 			mutex_lock(&hw->hw_info.lock);
1093 
1094 			ret = fjes_hw_unregister_buff_addr(hw, epidx);
1095 
1096 			switch (ret) {
1097 			case 0:
1098 				break;
1099 			case -ENOMSG:
1100 			case -EBUSY:
1101 			default:
1102 				if (!work_pending(&adapter->force_close_task)) {
1103 					adapter->force_reset = true;
1104 					schedule_work(
1105 					  &adapter->force_close_task);
1106 				}
1107 				break;
1108 			}
1109 
1110 			mutex_unlock(&hw->hw_info.lock);
1111 
1112 			hw->ep_shm_info[epidx].ep_stats
1113 					      .com_unregist_buf_exec += 1;
1114 
1115 			if (ret == 0) {
1116 				spin_lock_irqsave(&hw->rx_status_lock, flags);
1117 				fjes_hw_setup_epbuf(
1118 					&hw->ep_shm_info[epidx].tx,
1119 					netdev->dev_addr, netdev->mtu);
1120 				spin_unlock_irqrestore(&hw->rx_status_lock,
1121 						       flags);
1122 			}
1123 		}
1124 
1125 		if (test_bit(epidx, &irq_bit)) {
1126 			fjes_hw_raise_interrupt(hw, epidx,
1127 						REG_ICTL_MASK_TXRX_STOP_REQ);
1128 
1129 			hw->ep_shm_info[epidx].ep_stats.send_intr_unshare += 1;
1130 
1131 			set_bit(epidx, &hw->txrx_stop_req_bit);
1132 			spin_lock_irqsave(&hw->rx_status_lock, flags);
1133 			hw->ep_shm_info[epidx].tx.
1134 				info->v1i.rx_status |=
1135 					FJES_RX_STOP_REQ_REQUEST;
1136 			spin_unlock_irqrestore(&hw->rx_status_lock, flags);
1137 			set_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit);
1138 		}
1139 	}
1140 
1141 	if (irq_bit || adapter->unshare_watch_bitmask) {
1142 		if (!work_pending(&adapter->unshare_watch_task))
1143 			queue_work(adapter->control_wq,
1144 				   &adapter->unshare_watch_task);
1145 	}
1146 }
1147 
1148 static void fjes_hw_epstop_task(struct work_struct *work)
1149 {
1150 	struct fjes_hw *hw = container_of(work, struct fjes_hw, epstop_task);
1151 	struct fjes_adapter *adapter = (struct fjes_adapter *)hw->back;
1152 	unsigned long flags;
1153 
1154 	ulong remain_bit;
1155 	int epid_bit;
1156 
1157 	while ((remain_bit = hw->epstop_req_bit)) {
1158 		for (epid_bit = 0; remain_bit; remain_bit >>= 1, epid_bit++) {
1159 			if (remain_bit & 1) {
1160 				spin_lock_irqsave(&hw->rx_status_lock, flags);
1161 				hw->ep_shm_info[epid_bit].
1162 					tx.info->v1i.rx_status |=
1163 						FJES_RX_STOP_REQ_DONE;
1164 				spin_unlock_irqrestore(&hw->rx_status_lock,
1165 						       flags);
1166 
1167 				clear_bit(epid_bit, &hw->epstop_req_bit);
1168 				set_bit(epid_bit,
1169 					&adapter->unshare_watch_bitmask);
1170 
1171 				if (!work_pending(&adapter->unshare_watch_task))
1172 					queue_work(
1173 						adapter->control_wq,
1174 						&adapter->unshare_watch_task);
1175 			}
1176 		}
1177 	}
1178 }
1179 
1180 int fjes_hw_start_debug(struct fjes_hw *hw)
1181 {
1182 	union fjes_device_command_req *req_buf = hw->hw_info.req_buf;
1183 	union fjes_device_command_res *res_buf = hw->hw_info.res_buf;
1184 	enum fjes_dev_command_response_e ret;
1185 	int page_count;
1186 	int result = 0;
1187 	void *addr;
1188 	int i;
1189 
1190 	if (!hw->hw_info.trace)
1191 		return -EPERM;
1192 	memset(hw->hw_info.trace, 0, FJES_DEBUG_BUFFER_SIZE);
1193 
1194 	memset(req_buf, 0, hw->hw_info.req_buf_size);
1195 	memset(res_buf, 0, hw->hw_info.res_buf_size);
1196 
1197 	req_buf->start_trace.length =
1198 		FJES_DEV_COMMAND_START_DBG_REQ_LEN(hw->hw_info.trace_size);
1199 	req_buf->start_trace.mode = hw->debug_mode;
1200 	req_buf->start_trace.buffer_len = hw->hw_info.trace_size;
1201 	page_count = hw->hw_info.trace_size / FJES_DEBUG_PAGE_SIZE;
1202 	for (i = 0; i < page_count; i++) {
1203 		addr = ((u8 *)hw->hw_info.trace) + i * FJES_DEBUG_PAGE_SIZE;
1204 		req_buf->start_trace.buffer[i] =
1205 			(__le64)(page_to_phys(vmalloc_to_page(addr)) +
1206 			offset_in_page(addr));
1207 	}
1208 
1209 	res_buf->start_trace.length = 0;
1210 	res_buf->start_trace.code = 0;
1211 
1212 	trace_fjes_hw_start_debug_req(req_buf);
1213 	ret = fjes_hw_issue_request_command(hw, FJES_CMD_REQ_START_DEBUG);
1214 	trace_fjes_hw_start_debug(res_buf);
1215 
1216 	if (res_buf->start_trace.length !=
1217 		FJES_DEV_COMMAND_START_DBG_RES_LEN) {
1218 		result = -ENOMSG;
1219 		trace_fjes_hw_start_debug_err("Invalid res_buf");
1220 	} else if (ret == FJES_CMD_STATUS_NORMAL) {
1221 		switch (res_buf->start_trace.code) {
1222 		case FJES_CMD_REQ_RES_CODE_NORMAL:
1223 			result = 0;
1224 			break;
1225 		default:
1226 			result = -EPERM;
1227 			break;
1228 		}
1229 	} else {
1230 		switch (ret) {
1231 		case FJES_CMD_STATUS_UNKNOWN:
1232 			result = -EPERM;
1233 			break;
1234 		case FJES_CMD_STATUS_TIMEOUT:
1235 			trace_fjes_hw_start_debug_err("Busy Timeout");
1236 			result = -EBUSY;
1237 			break;
1238 		case FJES_CMD_STATUS_ERROR_PARAM:
1239 		case FJES_CMD_STATUS_ERROR_STATUS:
1240 		default:
1241 			result = -EPERM;
1242 			break;
1243 		}
1244 	}
1245 
1246 	return result;
1247 }
1248 
1249 int fjes_hw_stop_debug(struct fjes_hw *hw)
1250 {
1251 	union fjes_device_command_req *req_buf = hw->hw_info.req_buf;
1252 	union fjes_device_command_res *res_buf = hw->hw_info.res_buf;
1253 	enum fjes_dev_command_response_e ret;
1254 	int result = 0;
1255 
1256 	if (!hw->hw_info.trace)
1257 		return -EPERM;
1258 
1259 	memset(req_buf, 0, hw->hw_info.req_buf_size);
1260 	memset(res_buf, 0, hw->hw_info.res_buf_size);
1261 	req_buf->stop_trace.length = FJES_DEV_COMMAND_STOP_DBG_REQ_LEN;
1262 
1263 	res_buf->stop_trace.length = 0;
1264 	res_buf->stop_trace.code = 0;
1265 
1266 	ret = fjes_hw_issue_request_command(hw, FJES_CMD_REQ_STOP_DEBUG);
1267 	trace_fjes_hw_stop_debug(res_buf);
1268 
1269 	if (res_buf->stop_trace.length != FJES_DEV_COMMAND_STOP_DBG_RES_LEN) {
1270 		trace_fjes_hw_stop_debug_err("Invalid res_buf");
1271 		result = -ENOMSG;
1272 	} else if (ret == FJES_CMD_STATUS_NORMAL) {
1273 		switch (res_buf->stop_trace.code) {
1274 		case FJES_CMD_REQ_RES_CODE_NORMAL:
1275 			result = 0;
1276 			hw->debug_mode = 0;
1277 			break;
1278 		default:
1279 			result = -EPERM;
1280 			break;
1281 		}
1282 	} else {
1283 		switch (ret) {
1284 		case FJES_CMD_STATUS_UNKNOWN:
1285 			result = -EPERM;
1286 			break;
1287 		case FJES_CMD_STATUS_TIMEOUT:
1288 			result = -EBUSY;
1289 			trace_fjes_hw_stop_debug_err("Busy Timeout");
1290 			break;
1291 		case FJES_CMD_STATUS_ERROR_PARAM:
1292 		case FJES_CMD_STATUS_ERROR_STATUS:
1293 		default:
1294 			result = -EPERM;
1295 			break;
1296 		}
1297 	}
1298 
1299 	return result;
1300 }
1301