xref: /openbmc/linux/drivers/net/fjes/fjes_hw.c (revision dc6a81c3)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  FUJITSU Extended Socket Network Device driver
4  *  Copyright (c) 2015 FUJITSU LIMITED
5  */
6 
7 #include "fjes_hw.h"
8 #include "fjes.h"
9 #include "fjes_trace.h"
10 
11 static void fjes_hw_update_zone_task(struct work_struct *);
12 static void fjes_hw_epstop_task(struct work_struct *);
13 
14 /* supported MTU list */
15 const u32 fjes_support_mtu[] = {
16 	FJES_MTU_DEFINE(8 * 1024),
17 	FJES_MTU_DEFINE(16 * 1024),
18 	FJES_MTU_DEFINE(32 * 1024),
19 	FJES_MTU_DEFINE(64 * 1024),
20 	0
21 };
22 
23 u32 fjes_hw_rd32(struct fjes_hw *hw, u32 reg)
24 {
25 	u8 *base = hw->base;
26 	u32 value = 0;
27 
28 	value = readl(&base[reg]);
29 
30 	return value;
31 }
32 
33 static u8 *fjes_hw_iomap(struct fjes_hw *hw)
34 {
35 	u8 *base;
36 
37 	if (!request_mem_region(hw->hw_res.start, hw->hw_res.size,
38 				fjes_driver_name)) {
39 		pr_err("request_mem_region failed\n");
40 		return NULL;
41 	}
42 
43 	base = (u8 *)ioremap(hw->hw_res.start, hw->hw_res.size);
44 
45 	return base;
46 }
47 
48 static void fjes_hw_iounmap(struct fjes_hw *hw)
49 {
50 	iounmap(hw->base);
51 	release_mem_region(hw->hw_res.start, hw->hw_res.size);
52 }
53 
54 int fjes_hw_reset(struct fjes_hw *hw)
55 {
56 	union REG_DCTL dctl;
57 	int timeout;
58 
59 	dctl.reg = 0;
60 	dctl.bits.reset = 1;
61 	wr32(XSCT_DCTL, dctl.reg);
62 
63 	timeout = FJES_DEVICE_RESET_TIMEOUT * 1000;
64 	dctl.reg = rd32(XSCT_DCTL);
65 	while ((dctl.bits.reset == 1) && (timeout > 0)) {
66 		msleep(1000);
67 		dctl.reg = rd32(XSCT_DCTL);
68 		timeout -= 1000;
69 	}
70 
71 	return timeout > 0 ? 0 : -EIO;
72 }
73 
74 static int fjes_hw_get_max_epid(struct fjes_hw *hw)
75 {
76 	union REG_MAX_EP info;
77 
78 	info.reg = rd32(XSCT_MAX_EP);
79 
80 	return info.bits.maxep;
81 }
82 
83 static int fjes_hw_get_my_epid(struct fjes_hw *hw)
84 {
85 	union REG_OWNER_EPID info;
86 
87 	info.reg = rd32(XSCT_OWNER_EPID);
88 
89 	return info.bits.epid;
90 }
91 
92 static int fjes_hw_alloc_shared_status_region(struct fjes_hw *hw)
93 {
94 	size_t size;
95 
96 	size = sizeof(struct fjes_device_shared_info) +
97 	    (sizeof(u8) * hw->max_epid);
98 	hw->hw_info.share = kzalloc(size, GFP_KERNEL);
99 	if (!hw->hw_info.share)
100 		return -ENOMEM;
101 
102 	hw->hw_info.share->epnum = hw->max_epid;
103 
104 	return 0;
105 }
106 
107 static void fjes_hw_free_shared_status_region(struct fjes_hw *hw)
108 {
109 	kfree(hw->hw_info.share);
110 	hw->hw_info.share = NULL;
111 }
112 
113 static int fjes_hw_alloc_epbuf(struct epbuf_handler *epbh)
114 {
115 	void *mem;
116 
117 	mem = vzalloc(EP_BUFFER_SIZE);
118 	if (!mem)
119 		return -ENOMEM;
120 
121 	epbh->buffer = mem;
122 	epbh->size = EP_BUFFER_SIZE;
123 
124 	epbh->info = (union ep_buffer_info *)mem;
125 	epbh->ring = (u8 *)(mem + sizeof(union ep_buffer_info));
126 
127 	return 0;
128 }
129 
130 static void fjes_hw_free_epbuf(struct epbuf_handler *epbh)
131 {
132 	vfree(epbh->buffer);
133 	epbh->buffer = NULL;
134 	epbh->size = 0;
135 
136 	epbh->info = NULL;
137 	epbh->ring = NULL;
138 }
139 
140 void fjes_hw_setup_epbuf(struct epbuf_handler *epbh, u8 *mac_addr, u32 mtu)
141 {
142 	union ep_buffer_info *info = epbh->info;
143 	u16 vlan_id[EP_BUFFER_SUPPORT_VLAN_MAX];
144 	int i;
145 
146 	for (i = 0; i < EP_BUFFER_SUPPORT_VLAN_MAX; i++)
147 		vlan_id[i] = info->v1i.vlan_id[i];
148 
149 	memset(info, 0, sizeof(union ep_buffer_info));
150 
151 	info->v1i.version = 0;  /* version 0 */
152 
153 	for (i = 0; i < ETH_ALEN; i++)
154 		info->v1i.mac_addr[i] = mac_addr[i];
155 
156 	info->v1i.head = 0;
157 	info->v1i.tail = 1;
158 
159 	info->v1i.info_size = sizeof(union ep_buffer_info);
160 	info->v1i.buffer_size = epbh->size - info->v1i.info_size;
161 
162 	info->v1i.frame_max = FJES_MTU_TO_FRAME_SIZE(mtu);
163 	info->v1i.count_max =
164 	    EP_RING_NUM(info->v1i.buffer_size, info->v1i.frame_max);
165 
166 	for (i = 0; i < EP_BUFFER_SUPPORT_VLAN_MAX; i++)
167 		info->v1i.vlan_id[i] = vlan_id[i];
168 
169 	info->v1i.rx_status |= FJES_RX_MTU_CHANGING_DONE;
170 }
171 
172 void
173 fjes_hw_init_command_registers(struct fjes_hw *hw,
174 			       struct fjes_device_command_param *param)
175 {
176 	/* Request Buffer length */
177 	wr32(XSCT_REQBL, (__le32)(param->req_len));
178 	/* Response Buffer Length */
179 	wr32(XSCT_RESPBL, (__le32)(param->res_len));
180 
181 	/* Request Buffer Address */
182 	wr32(XSCT_REQBAL,
183 	     (__le32)(param->req_start & GENMASK_ULL(31, 0)));
184 	wr32(XSCT_REQBAH,
185 	     (__le32)((param->req_start & GENMASK_ULL(63, 32)) >> 32));
186 
187 	/* Response Buffer Address */
188 	wr32(XSCT_RESPBAL,
189 	     (__le32)(param->res_start & GENMASK_ULL(31, 0)));
190 	wr32(XSCT_RESPBAH,
191 	     (__le32)((param->res_start & GENMASK_ULL(63, 32)) >> 32));
192 
193 	/* Share status address */
194 	wr32(XSCT_SHSTSAL,
195 	     (__le32)(param->share_start & GENMASK_ULL(31, 0)));
196 	wr32(XSCT_SHSTSAH,
197 	     (__le32)((param->share_start & GENMASK_ULL(63, 32)) >> 32));
198 }
199 
200 static int fjes_hw_setup(struct fjes_hw *hw)
201 {
202 	u8 mac[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
203 	struct fjes_device_command_param param;
204 	struct ep_share_mem_info *buf_pair;
205 	unsigned long flags;
206 	size_t mem_size;
207 	int result;
208 	int epidx;
209 	void *buf;
210 
211 	hw->hw_info.max_epid = &hw->max_epid;
212 	hw->hw_info.my_epid = &hw->my_epid;
213 
214 	buf = kcalloc(hw->max_epid, sizeof(struct ep_share_mem_info),
215 		      GFP_KERNEL);
216 	if (!buf)
217 		return -ENOMEM;
218 
219 	hw->ep_shm_info = (struct ep_share_mem_info *)buf;
220 
221 	mem_size = FJES_DEV_REQ_BUF_SIZE(hw->max_epid);
222 	hw->hw_info.req_buf = kzalloc(mem_size, GFP_KERNEL);
223 	if (!(hw->hw_info.req_buf))
224 		return -ENOMEM;
225 
226 	hw->hw_info.req_buf_size = mem_size;
227 
228 	mem_size = FJES_DEV_RES_BUF_SIZE(hw->max_epid);
229 	hw->hw_info.res_buf = kzalloc(mem_size, GFP_KERNEL);
230 	if (!(hw->hw_info.res_buf))
231 		return -ENOMEM;
232 
233 	hw->hw_info.res_buf_size = mem_size;
234 
235 	result = fjes_hw_alloc_shared_status_region(hw);
236 	if (result)
237 		return result;
238 
239 	hw->hw_info.buffer_share_bit = 0;
240 	hw->hw_info.buffer_unshare_reserve_bit = 0;
241 
242 	for (epidx = 0; epidx < hw->max_epid; epidx++) {
243 		if (epidx != hw->my_epid) {
244 			buf_pair = &hw->ep_shm_info[epidx];
245 
246 			result = fjes_hw_alloc_epbuf(&buf_pair->tx);
247 			if (result)
248 				return result;
249 
250 			result = fjes_hw_alloc_epbuf(&buf_pair->rx);
251 			if (result)
252 				return result;
253 
254 			spin_lock_irqsave(&hw->rx_status_lock, flags);
255 			fjes_hw_setup_epbuf(&buf_pair->tx, mac,
256 					    fjes_support_mtu[0]);
257 			fjes_hw_setup_epbuf(&buf_pair->rx, mac,
258 					    fjes_support_mtu[0]);
259 			spin_unlock_irqrestore(&hw->rx_status_lock, flags);
260 		}
261 	}
262 
263 	memset(&param, 0, sizeof(param));
264 
265 	param.req_len = hw->hw_info.req_buf_size;
266 	param.req_start = __pa(hw->hw_info.req_buf);
267 	param.res_len = hw->hw_info.res_buf_size;
268 	param.res_start = __pa(hw->hw_info.res_buf);
269 
270 	param.share_start = __pa(hw->hw_info.share->ep_status);
271 
272 	fjes_hw_init_command_registers(hw, &param);
273 
274 	return 0;
275 }
276 
277 static void fjes_hw_cleanup(struct fjes_hw *hw)
278 {
279 	int epidx;
280 
281 	if (!hw->ep_shm_info)
282 		return;
283 
284 	fjes_hw_free_shared_status_region(hw);
285 
286 	kfree(hw->hw_info.req_buf);
287 	hw->hw_info.req_buf = NULL;
288 
289 	kfree(hw->hw_info.res_buf);
290 	hw->hw_info.res_buf = NULL;
291 
292 	for (epidx = 0; epidx < hw->max_epid ; epidx++) {
293 		if (epidx == hw->my_epid)
294 			continue;
295 		fjes_hw_free_epbuf(&hw->ep_shm_info[epidx].tx);
296 		fjes_hw_free_epbuf(&hw->ep_shm_info[epidx].rx);
297 	}
298 
299 	kfree(hw->ep_shm_info);
300 	hw->ep_shm_info = NULL;
301 }
302 
303 int fjes_hw_init(struct fjes_hw *hw)
304 {
305 	int ret;
306 
307 	hw->base = fjes_hw_iomap(hw);
308 	if (!hw->base)
309 		return -EIO;
310 
311 	ret = fjes_hw_reset(hw);
312 	if (ret)
313 		return ret;
314 
315 	fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, true);
316 
317 	INIT_WORK(&hw->update_zone_task, fjes_hw_update_zone_task);
318 	INIT_WORK(&hw->epstop_task, fjes_hw_epstop_task);
319 
320 	mutex_init(&hw->hw_info.lock);
321 	spin_lock_init(&hw->rx_status_lock);
322 
323 	hw->max_epid = fjes_hw_get_max_epid(hw);
324 	hw->my_epid = fjes_hw_get_my_epid(hw);
325 
326 	if ((hw->max_epid == 0) || (hw->my_epid >= hw->max_epid))
327 		return -ENXIO;
328 
329 	ret = fjes_hw_setup(hw);
330 
331 	hw->hw_info.trace = vzalloc(FJES_DEBUG_BUFFER_SIZE);
332 	hw->hw_info.trace_size = FJES_DEBUG_BUFFER_SIZE;
333 
334 	return ret;
335 }
336 
337 void fjes_hw_exit(struct fjes_hw *hw)
338 {
339 	int ret;
340 
341 	if (hw->base) {
342 
343 		if (hw->debug_mode) {
344 			/* disable debug mode */
345 			mutex_lock(&hw->hw_info.lock);
346 			fjes_hw_stop_debug(hw);
347 			mutex_unlock(&hw->hw_info.lock);
348 		}
349 		vfree(hw->hw_info.trace);
350 		hw->hw_info.trace = NULL;
351 		hw->hw_info.trace_size = 0;
352 		hw->debug_mode = 0;
353 
354 		ret = fjes_hw_reset(hw);
355 		if (ret)
356 			pr_err("%s: reset error", __func__);
357 
358 		fjes_hw_iounmap(hw);
359 		hw->base = NULL;
360 	}
361 
362 	fjes_hw_cleanup(hw);
363 
364 	cancel_work_sync(&hw->update_zone_task);
365 	cancel_work_sync(&hw->epstop_task);
366 }
367 
368 static enum fjes_dev_command_response_e
369 fjes_hw_issue_request_command(struct fjes_hw *hw,
370 			      enum fjes_dev_command_request_type type)
371 {
372 	enum fjes_dev_command_response_e ret = FJES_CMD_STATUS_UNKNOWN;
373 	union REG_CR cr;
374 	union REG_CS cs;
375 	int timeout = FJES_COMMAND_REQ_TIMEOUT * 1000;
376 
377 	cr.reg = 0;
378 	cr.bits.req_start = 1;
379 	cr.bits.req_code = type;
380 	wr32(XSCT_CR, cr.reg);
381 	cr.reg = rd32(XSCT_CR);
382 
383 	if (cr.bits.error == 0) {
384 		timeout = FJES_COMMAND_REQ_TIMEOUT * 1000;
385 		cs.reg = rd32(XSCT_CS);
386 
387 		while ((cs.bits.complete != 1) && timeout > 0) {
388 			msleep(1000);
389 			cs.reg = rd32(XSCT_CS);
390 			timeout -= 1000;
391 		}
392 
393 		if (cs.bits.complete == 1)
394 			ret = FJES_CMD_STATUS_NORMAL;
395 		else if (timeout <= 0)
396 			ret = FJES_CMD_STATUS_TIMEOUT;
397 
398 	} else {
399 		switch (cr.bits.err_info) {
400 		case FJES_CMD_REQ_ERR_INFO_PARAM:
401 			ret = FJES_CMD_STATUS_ERROR_PARAM;
402 			break;
403 		case FJES_CMD_REQ_ERR_INFO_STATUS:
404 			ret = FJES_CMD_STATUS_ERROR_STATUS;
405 			break;
406 		default:
407 			ret = FJES_CMD_STATUS_UNKNOWN;
408 			break;
409 		}
410 	}
411 
412 	trace_fjes_hw_issue_request_command(&cr, &cs, timeout, ret);
413 
414 	return ret;
415 }
416 
417 int fjes_hw_request_info(struct fjes_hw *hw)
418 {
419 	union fjes_device_command_req *req_buf = hw->hw_info.req_buf;
420 	union fjes_device_command_res *res_buf = hw->hw_info.res_buf;
421 	enum fjes_dev_command_response_e ret;
422 	int result;
423 
424 	memset(req_buf, 0, hw->hw_info.req_buf_size);
425 	memset(res_buf, 0, hw->hw_info.res_buf_size);
426 
427 	req_buf->info.length = FJES_DEV_COMMAND_INFO_REQ_LEN;
428 
429 	res_buf->info.length = 0;
430 	res_buf->info.code = 0;
431 
432 	ret = fjes_hw_issue_request_command(hw, FJES_CMD_REQ_INFO);
433 	trace_fjes_hw_request_info(hw, res_buf);
434 
435 	result = 0;
436 
437 	if (FJES_DEV_COMMAND_INFO_RES_LEN((*hw->hw_info.max_epid)) !=
438 		res_buf->info.length) {
439 		trace_fjes_hw_request_info_err("Invalid res_buf");
440 		result = -ENOMSG;
441 	} else if (ret == FJES_CMD_STATUS_NORMAL) {
442 		switch (res_buf->info.code) {
443 		case FJES_CMD_REQ_RES_CODE_NORMAL:
444 			result = 0;
445 			break;
446 		default:
447 			result = -EPERM;
448 			break;
449 		}
450 	} else {
451 		switch (ret) {
452 		case FJES_CMD_STATUS_UNKNOWN:
453 			result = -EPERM;
454 			break;
455 		case FJES_CMD_STATUS_TIMEOUT:
456 			trace_fjes_hw_request_info_err("Timeout");
457 			result = -EBUSY;
458 			break;
459 		case FJES_CMD_STATUS_ERROR_PARAM:
460 			result = -EPERM;
461 			break;
462 		case FJES_CMD_STATUS_ERROR_STATUS:
463 			result = -EPERM;
464 			break;
465 		default:
466 			result = -EPERM;
467 			break;
468 		}
469 	}
470 
471 	return result;
472 }
473 
474 int fjes_hw_register_buff_addr(struct fjes_hw *hw, int dest_epid,
475 			       struct ep_share_mem_info *buf_pair)
476 {
477 	union fjes_device_command_req *req_buf = hw->hw_info.req_buf;
478 	union fjes_device_command_res *res_buf = hw->hw_info.res_buf;
479 	enum fjes_dev_command_response_e ret;
480 	int page_count;
481 	int timeout;
482 	int i, idx;
483 	void *addr;
484 	int result;
485 
486 	if (test_bit(dest_epid, &hw->hw_info.buffer_share_bit))
487 		return 0;
488 
489 	memset(req_buf, 0, hw->hw_info.req_buf_size);
490 	memset(res_buf, 0, hw->hw_info.res_buf_size);
491 
492 	req_buf->share_buffer.length = FJES_DEV_COMMAND_SHARE_BUFFER_REQ_LEN(
493 						buf_pair->tx.size,
494 						buf_pair->rx.size);
495 	req_buf->share_buffer.epid = dest_epid;
496 
497 	idx = 0;
498 	req_buf->share_buffer.buffer[idx++] = buf_pair->tx.size;
499 	page_count = buf_pair->tx.size / EP_BUFFER_INFO_SIZE;
500 	for (i = 0; i < page_count; i++) {
501 		addr = ((u8 *)(buf_pair->tx.buffer)) +
502 				(i * EP_BUFFER_INFO_SIZE);
503 		req_buf->share_buffer.buffer[idx++] =
504 				(__le64)(page_to_phys(vmalloc_to_page(addr)) +
505 						offset_in_page(addr));
506 	}
507 
508 	req_buf->share_buffer.buffer[idx++] = buf_pair->rx.size;
509 	page_count = buf_pair->rx.size / EP_BUFFER_INFO_SIZE;
510 	for (i = 0; i < page_count; i++) {
511 		addr = ((u8 *)(buf_pair->rx.buffer)) +
512 				(i * EP_BUFFER_INFO_SIZE);
513 		req_buf->share_buffer.buffer[idx++] =
514 				(__le64)(page_to_phys(vmalloc_to_page(addr)) +
515 						offset_in_page(addr));
516 	}
517 
518 	res_buf->share_buffer.length = 0;
519 	res_buf->share_buffer.code = 0;
520 
521 	trace_fjes_hw_register_buff_addr_req(req_buf, buf_pair);
522 
523 	ret = fjes_hw_issue_request_command(hw, FJES_CMD_REQ_SHARE_BUFFER);
524 
525 	timeout = FJES_COMMAND_REQ_BUFF_TIMEOUT * 1000;
526 	while ((ret == FJES_CMD_STATUS_NORMAL) &&
527 	       (res_buf->share_buffer.length ==
528 		FJES_DEV_COMMAND_SHARE_BUFFER_RES_LEN) &&
529 	       (res_buf->share_buffer.code == FJES_CMD_REQ_RES_CODE_BUSY) &&
530 	       (timeout > 0)) {
531 			msleep(200 + hw->my_epid * 20);
532 			timeout -= (200 + hw->my_epid * 20);
533 
534 			res_buf->share_buffer.length = 0;
535 			res_buf->share_buffer.code = 0;
536 
537 			ret = fjes_hw_issue_request_command(
538 					hw, FJES_CMD_REQ_SHARE_BUFFER);
539 	}
540 
541 	result = 0;
542 
543 	trace_fjes_hw_register_buff_addr(res_buf, timeout);
544 
545 	if (res_buf->share_buffer.length !=
546 			FJES_DEV_COMMAND_SHARE_BUFFER_RES_LEN) {
547 		trace_fjes_hw_register_buff_addr_err("Invalid res_buf");
548 		result = -ENOMSG;
549 	} else if (ret == FJES_CMD_STATUS_NORMAL) {
550 		switch (res_buf->share_buffer.code) {
551 		case FJES_CMD_REQ_RES_CODE_NORMAL:
552 			result = 0;
553 			set_bit(dest_epid, &hw->hw_info.buffer_share_bit);
554 			break;
555 		case FJES_CMD_REQ_RES_CODE_BUSY:
556 			trace_fjes_hw_register_buff_addr_err("Busy Timeout");
557 			result = -EBUSY;
558 			break;
559 		default:
560 			result = -EPERM;
561 			break;
562 		}
563 	} else {
564 		switch (ret) {
565 		case FJES_CMD_STATUS_UNKNOWN:
566 			result = -EPERM;
567 			break;
568 		case FJES_CMD_STATUS_TIMEOUT:
569 			trace_fjes_hw_register_buff_addr_err("Timeout");
570 			result = -EBUSY;
571 			break;
572 		case FJES_CMD_STATUS_ERROR_PARAM:
573 		case FJES_CMD_STATUS_ERROR_STATUS:
574 		default:
575 			result = -EPERM;
576 			break;
577 		}
578 	}
579 
580 	return result;
581 }
582 
583 int fjes_hw_unregister_buff_addr(struct fjes_hw *hw, int dest_epid)
584 {
585 	union fjes_device_command_req *req_buf = hw->hw_info.req_buf;
586 	union fjes_device_command_res *res_buf = hw->hw_info.res_buf;
587 	struct fjes_device_shared_info *share = hw->hw_info.share;
588 	enum fjes_dev_command_response_e ret;
589 	int timeout;
590 	int result;
591 
592 	if (!hw->base)
593 		return -EPERM;
594 
595 	if (!req_buf || !res_buf || !share)
596 		return -EPERM;
597 
598 	if (!test_bit(dest_epid, &hw->hw_info.buffer_share_bit))
599 		return 0;
600 
601 	memset(req_buf, 0, hw->hw_info.req_buf_size);
602 	memset(res_buf, 0, hw->hw_info.res_buf_size);
603 
604 	req_buf->unshare_buffer.length =
605 			FJES_DEV_COMMAND_UNSHARE_BUFFER_REQ_LEN;
606 	req_buf->unshare_buffer.epid = dest_epid;
607 
608 	res_buf->unshare_buffer.length = 0;
609 	res_buf->unshare_buffer.code = 0;
610 
611 	trace_fjes_hw_unregister_buff_addr_req(req_buf);
612 	ret = fjes_hw_issue_request_command(hw, FJES_CMD_REQ_UNSHARE_BUFFER);
613 
614 	timeout = FJES_COMMAND_REQ_BUFF_TIMEOUT * 1000;
615 	while ((ret == FJES_CMD_STATUS_NORMAL) &&
616 	       (res_buf->unshare_buffer.length ==
617 		FJES_DEV_COMMAND_UNSHARE_BUFFER_RES_LEN) &&
618 	       (res_buf->unshare_buffer.code ==
619 		FJES_CMD_REQ_RES_CODE_BUSY) &&
620 	       (timeout > 0)) {
621 		msleep(200 + hw->my_epid * 20);
622 		timeout -= (200 + hw->my_epid * 20);
623 
624 		res_buf->unshare_buffer.length = 0;
625 		res_buf->unshare_buffer.code = 0;
626 
627 		ret =
628 		fjes_hw_issue_request_command(hw, FJES_CMD_REQ_UNSHARE_BUFFER);
629 	}
630 
631 	result = 0;
632 
633 	trace_fjes_hw_unregister_buff_addr(res_buf, timeout);
634 
635 	if (res_buf->unshare_buffer.length !=
636 			FJES_DEV_COMMAND_UNSHARE_BUFFER_RES_LEN) {
637 		trace_fjes_hw_unregister_buff_addr_err("Invalid res_buf");
638 		result = -ENOMSG;
639 	} else if (ret == FJES_CMD_STATUS_NORMAL) {
640 		switch (res_buf->unshare_buffer.code) {
641 		case FJES_CMD_REQ_RES_CODE_NORMAL:
642 			result = 0;
643 			clear_bit(dest_epid, &hw->hw_info.buffer_share_bit);
644 			break;
645 		case FJES_CMD_REQ_RES_CODE_BUSY:
646 			trace_fjes_hw_unregister_buff_addr_err("Busy Timeout");
647 			result = -EBUSY;
648 			break;
649 		default:
650 			result = -EPERM;
651 			break;
652 		}
653 	} else {
654 		switch (ret) {
655 		case FJES_CMD_STATUS_UNKNOWN:
656 			result = -EPERM;
657 			break;
658 		case FJES_CMD_STATUS_TIMEOUT:
659 			trace_fjes_hw_unregister_buff_addr_err("Timeout");
660 			result = -EBUSY;
661 			break;
662 		case FJES_CMD_STATUS_ERROR_PARAM:
663 		case FJES_CMD_STATUS_ERROR_STATUS:
664 		default:
665 			result = -EPERM;
666 			break;
667 		}
668 	}
669 
670 	return result;
671 }
672 
673 int fjes_hw_raise_interrupt(struct fjes_hw *hw, int dest_epid,
674 			    enum REG_ICTL_MASK  mask)
675 {
676 	u32 ig = mask | dest_epid;
677 
678 	wr32(XSCT_IG, cpu_to_le32(ig));
679 
680 	return 0;
681 }
682 
683 u32 fjes_hw_capture_interrupt_status(struct fjes_hw *hw)
684 {
685 	u32 cur_is;
686 
687 	cur_is = rd32(XSCT_IS);
688 
689 	return cur_is;
690 }
691 
692 void fjes_hw_set_irqmask(struct fjes_hw *hw,
693 			 enum REG_ICTL_MASK intr_mask, bool mask)
694 {
695 	if (mask)
696 		wr32(XSCT_IMS, intr_mask);
697 	else
698 		wr32(XSCT_IMC, intr_mask);
699 }
700 
701 bool fjes_hw_epid_is_same_zone(struct fjes_hw *hw, int epid)
702 {
703 	if (epid >= hw->max_epid)
704 		return false;
705 
706 	if ((hw->ep_shm_info[epid].es_status !=
707 			FJES_ZONING_STATUS_ENABLE) ||
708 		(hw->ep_shm_info[hw->my_epid].zone ==
709 			FJES_ZONING_ZONE_TYPE_NONE))
710 		return false;
711 	else
712 		return (hw->ep_shm_info[epid].zone ==
713 				hw->ep_shm_info[hw->my_epid].zone);
714 }
715 
716 int fjes_hw_epid_is_shared(struct fjes_device_shared_info *share,
717 			   int dest_epid)
718 {
719 	int value = false;
720 
721 	if (dest_epid < share->epnum)
722 		value = share->ep_status[dest_epid];
723 
724 	return value;
725 }
726 
727 static bool fjes_hw_epid_is_stop_requested(struct fjes_hw *hw, int src_epid)
728 {
729 	return test_bit(src_epid, &hw->txrx_stop_req_bit);
730 }
731 
732 static bool fjes_hw_epid_is_stop_process_done(struct fjes_hw *hw, int src_epid)
733 {
734 	return (hw->ep_shm_info[src_epid].tx.info->v1i.rx_status &
735 			FJES_RX_STOP_REQ_DONE);
736 }
737 
738 enum ep_partner_status
739 fjes_hw_get_partner_ep_status(struct fjes_hw *hw, int epid)
740 {
741 	enum ep_partner_status status;
742 
743 	if (fjes_hw_epid_is_shared(hw->hw_info.share, epid)) {
744 		if (fjes_hw_epid_is_stop_requested(hw, epid)) {
745 			status = EP_PARTNER_WAITING;
746 		} else {
747 			if (fjes_hw_epid_is_stop_process_done(hw, epid))
748 				status = EP_PARTNER_COMPLETE;
749 			else
750 				status = EP_PARTNER_SHARED;
751 		}
752 	} else {
753 		status = EP_PARTNER_UNSHARE;
754 	}
755 
756 	return status;
757 }
758 
759 void fjes_hw_raise_epstop(struct fjes_hw *hw)
760 {
761 	enum ep_partner_status status;
762 	unsigned long flags;
763 	int epidx;
764 
765 	for (epidx = 0; epidx < hw->max_epid; epidx++) {
766 		if (epidx == hw->my_epid)
767 			continue;
768 
769 		status = fjes_hw_get_partner_ep_status(hw, epidx);
770 		switch (status) {
771 		case EP_PARTNER_SHARED:
772 			fjes_hw_raise_interrupt(hw, epidx,
773 						REG_ICTL_MASK_TXRX_STOP_REQ);
774 			hw->ep_shm_info[epidx].ep_stats.send_intr_unshare += 1;
775 			break;
776 		default:
777 			break;
778 		}
779 
780 		set_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit);
781 		set_bit(epidx, &hw->txrx_stop_req_bit);
782 
783 		spin_lock_irqsave(&hw->rx_status_lock, flags);
784 		hw->ep_shm_info[epidx].tx.info->v1i.rx_status |=
785 				FJES_RX_STOP_REQ_REQUEST;
786 		spin_unlock_irqrestore(&hw->rx_status_lock, flags);
787 	}
788 }
789 
790 int fjes_hw_wait_epstop(struct fjes_hw *hw)
791 {
792 	enum ep_partner_status status;
793 	union ep_buffer_info *info;
794 	int wait_time = 0;
795 	int epidx;
796 
797 	while (hw->hw_info.buffer_unshare_reserve_bit &&
798 	       (wait_time < FJES_COMMAND_EPSTOP_WAIT_TIMEOUT * 1000)) {
799 		for (epidx = 0; epidx < hw->max_epid; epidx++) {
800 			if (epidx == hw->my_epid)
801 				continue;
802 			status = fjes_hw_epid_is_shared(hw->hw_info.share,
803 							epidx);
804 			info = hw->ep_shm_info[epidx].rx.info;
805 			if ((!status ||
806 			     (info->v1i.rx_status &
807 			      FJES_RX_STOP_REQ_DONE)) &&
808 			    test_bit(epidx,
809 				     &hw->hw_info.buffer_unshare_reserve_bit)) {
810 				clear_bit(epidx,
811 					  &hw->hw_info.buffer_unshare_reserve_bit);
812 			}
813 		}
814 
815 		msleep(100);
816 		wait_time += 100;
817 	}
818 
819 	for (epidx = 0; epidx < hw->max_epid; epidx++) {
820 		if (epidx == hw->my_epid)
821 			continue;
822 		if (test_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit))
823 			clear_bit(epidx,
824 				  &hw->hw_info.buffer_unshare_reserve_bit);
825 	}
826 
827 	return (wait_time < FJES_COMMAND_EPSTOP_WAIT_TIMEOUT * 1000)
828 			? 0 : -EBUSY;
829 }
830 
831 bool fjes_hw_check_epbuf_version(struct epbuf_handler *epbh, u32 version)
832 {
833 	union ep_buffer_info *info = epbh->info;
834 
835 	return (info->common.version == version);
836 }
837 
838 bool fjes_hw_check_mtu(struct epbuf_handler *epbh, u32 mtu)
839 {
840 	union ep_buffer_info *info = epbh->info;
841 
842 	return ((info->v1i.frame_max == FJES_MTU_TO_FRAME_SIZE(mtu)) &&
843 		info->v1i.rx_status & FJES_RX_MTU_CHANGING_DONE);
844 }
845 
846 bool fjes_hw_check_vlan_id(struct epbuf_handler *epbh, u16 vlan_id)
847 {
848 	union ep_buffer_info *info = epbh->info;
849 	bool ret = false;
850 	int i;
851 
852 	if (vlan_id == 0) {
853 		ret = true;
854 	} else {
855 		for (i = 0; i < EP_BUFFER_SUPPORT_VLAN_MAX; i++) {
856 			if (vlan_id == info->v1i.vlan_id[i]) {
857 				ret = true;
858 				break;
859 			}
860 		}
861 	}
862 	return ret;
863 }
864 
865 bool fjes_hw_set_vlan_id(struct epbuf_handler *epbh, u16 vlan_id)
866 {
867 	union ep_buffer_info *info = epbh->info;
868 	int i;
869 
870 	for (i = 0; i < EP_BUFFER_SUPPORT_VLAN_MAX; i++) {
871 		if (info->v1i.vlan_id[i] == 0) {
872 			info->v1i.vlan_id[i] = vlan_id;
873 			return true;
874 		}
875 	}
876 	return false;
877 }
878 
879 void fjes_hw_del_vlan_id(struct epbuf_handler *epbh, u16 vlan_id)
880 {
881 	union ep_buffer_info *info = epbh->info;
882 	int i;
883 
884 	if (0 != vlan_id) {
885 		for (i = 0; i < EP_BUFFER_SUPPORT_VLAN_MAX; i++) {
886 			if (vlan_id == info->v1i.vlan_id[i])
887 				info->v1i.vlan_id[i] = 0;
888 		}
889 	}
890 }
891 
892 bool fjes_hw_epbuf_rx_is_empty(struct epbuf_handler *epbh)
893 {
894 	union ep_buffer_info *info = epbh->info;
895 
896 	if (!(info->v1i.rx_status & FJES_RX_MTU_CHANGING_DONE))
897 		return true;
898 
899 	if (info->v1i.count_max == 0)
900 		return true;
901 
902 	return EP_RING_EMPTY(info->v1i.head, info->v1i.tail,
903 			     info->v1i.count_max);
904 }
905 
906 void *fjes_hw_epbuf_rx_curpkt_get_addr(struct epbuf_handler *epbh,
907 				       size_t *psize)
908 {
909 	union ep_buffer_info *info = epbh->info;
910 	struct esmem_frame *ring_frame;
911 	void *frame;
912 
913 	ring_frame = (struct esmem_frame *)&(epbh->ring[EP_RING_INDEX
914 					     (info->v1i.head,
915 					      info->v1i.count_max) *
916 					     info->v1i.frame_max]);
917 
918 	*psize = (size_t)ring_frame->frame_size;
919 
920 	frame = ring_frame->frame_data;
921 
922 	return frame;
923 }
924 
925 void fjes_hw_epbuf_rx_curpkt_drop(struct epbuf_handler *epbh)
926 {
927 	union ep_buffer_info *info = epbh->info;
928 
929 	if (fjes_hw_epbuf_rx_is_empty(epbh))
930 		return;
931 
932 	EP_RING_INDEX_INC(epbh->info->v1i.head, info->v1i.count_max);
933 }
934 
935 int fjes_hw_epbuf_tx_pkt_send(struct epbuf_handler *epbh,
936 			      void *frame, size_t size)
937 {
938 	union ep_buffer_info *info = epbh->info;
939 	struct esmem_frame *ring_frame;
940 
941 	if (EP_RING_FULL(info->v1i.head, info->v1i.tail, info->v1i.count_max))
942 		return -ENOBUFS;
943 
944 	ring_frame = (struct esmem_frame *)&(epbh->ring[EP_RING_INDEX
945 					     (info->v1i.tail - 1,
946 					      info->v1i.count_max) *
947 					     info->v1i.frame_max]);
948 
949 	ring_frame->frame_size = size;
950 	memcpy((void *)(ring_frame->frame_data), (void *)frame, size);
951 
952 	EP_RING_INDEX_INC(epbh->info->v1i.tail, info->v1i.count_max);
953 
954 	return 0;
955 }
956 
957 static void fjes_hw_update_zone_task(struct work_struct *work)
958 {
959 	struct fjes_hw *hw = container_of(work,
960 			struct fjes_hw, update_zone_task);
961 
962 	struct my_s {u8 es_status; u8 zone; } *info;
963 	union fjes_device_command_res *res_buf;
964 	enum ep_partner_status pstatus;
965 
966 	struct fjes_adapter *adapter;
967 	struct net_device *netdev;
968 	unsigned long flags;
969 
970 	ulong unshare_bit = 0;
971 	ulong share_bit = 0;
972 	ulong irq_bit = 0;
973 
974 	int epidx;
975 	int ret;
976 
977 	adapter = (struct fjes_adapter *)hw->back;
978 	netdev = adapter->netdev;
979 	res_buf = hw->hw_info.res_buf;
980 	info = (struct my_s *)&res_buf->info.info;
981 
982 	mutex_lock(&hw->hw_info.lock);
983 
984 	ret = fjes_hw_request_info(hw);
985 	switch (ret) {
986 	case -ENOMSG:
987 	case -EBUSY:
988 	default:
989 		if (!work_pending(&adapter->force_close_task)) {
990 			adapter->force_reset = true;
991 			schedule_work(&adapter->force_close_task);
992 		}
993 		break;
994 
995 	case 0:
996 
997 		for (epidx = 0; epidx < hw->max_epid; epidx++) {
998 			if (epidx == hw->my_epid) {
999 				hw->ep_shm_info[epidx].es_status =
1000 					info[epidx].es_status;
1001 				hw->ep_shm_info[epidx].zone =
1002 					info[epidx].zone;
1003 				continue;
1004 			}
1005 
1006 			pstatus = fjes_hw_get_partner_ep_status(hw, epidx);
1007 			switch (pstatus) {
1008 			case EP_PARTNER_UNSHARE:
1009 			default:
1010 				if ((info[epidx].zone !=
1011 					FJES_ZONING_ZONE_TYPE_NONE) &&
1012 				    (info[epidx].es_status ==
1013 					FJES_ZONING_STATUS_ENABLE) &&
1014 				    (info[epidx].zone ==
1015 					info[hw->my_epid].zone))
1016 					set_bit(epidx, &share_bit);
1017 				else
1018 					set_bit(epidx, &unshare_bit);
1019 				break;
1020 
1021 			case EP_PARTNER_COMPLETE:
1022 			case EP_PARTNER_WAITING:
1023 				if ((info[epidx].zone ==
1024 					FJES_ZONING_ZONE_TYPE_NONE) ||
1025 				    (info[epidx].es_status !=
1026 					FJES_ZONING_STATUS_ENABLE) ||
1027 				    (info[epidx].zone !=
1028 					info[hw->my_epid].zone)) {
1029 					set_bit(epidx,
1030 						&adapter->unshare_watch_bitmask);
1031 					set_bit(epidx,
1032 						&hw->hw_info.buffer_unshare_reserve_bit);
1033 				}
1034 				break;
1035 
1036 			case EP_PARTNER_SHARED:
1037 				if ((info[epidx].zone ==
1038 					FJES_ZONING_ZONE_TYPE_NONE) ||
1039 				    (info[epidx].es_status !=
1040 					FJES_ZONING_STATUS_ENABLE) ||
1041 				    (info[epidx].zone !=
1042 					info[hw->my_epid].zone))
1043 					set_bit(epidx, &irq_bit);
1044 				break;
1045 			}
1046 
1047 			hw->ep_shm_info[epidx].es_status =
1048 				info[epidx].es_status;
1049 			hw->ep_shm_info[epidx].zone = info[epidx].zone;
1050 		}
1051 		break;
1052 	}
1053 
1054 	mutex_unlock(&hw->hw_info.lock);
1055 
1056 	for (epidx = 0; epidx < hw->max_epid; epidx++) {
1057 		if (epidx == hw->my_epid)
1058 			continue;
1059 
1060 		if (test_bit(epidx, &share_bit)) {
1061 			spin_lock_irqsave(&hw->rx_status_lock, flags);
1062 			fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx,
1063 					    netdev->dev_addr, netdev->mtu);
1064 			spin_unlock_irqrestore(&hw->rx_status_lock, flags);
1065 
1066 			mutex_lock(&hw->hw_info.lock);
1067 
1068 			ret = fjes_hw_register_buff_addr(
1069 				hw, epidx, &hw->ep_shm_info[epidx]);
1070 
1071 			switch (ret) {
1072 			case 0:
1073 				break;
1074 			case -ENOMSG:
1075 			case -EBUSY:
1076 			default:
1077 				if (!work_pending(&adapter->force_close_task)) {
1078 					adapter->force_reset = true;
1079 					schedule_work(
1080 					  &adapter->force_close_task);
1081 				}
1082 				break;
1083 			}
1084 			mutex_unlock(&hw->hw_info.lock);
1085 
1086 			hw->ep_shm_info[epidx].ep_stats
1087 					      .com_regist_buf_exec += 1;
1088 		}
1089 
1090 		if (test_bit(epidx, &unshare_bit)) {
1091 			mutex_lock(&hw->hw_info.lock);
1092 
1093 			ret = fjes_hw_unregister_buff_addr(hw, epidx);
1094 
1095 			switch (ret) {
1096 			case 0:
1097 				break;
1098 			case -ENOMSG:
1099 			case -EBUSY:
1100 			default:
1101 				if (!work_pending(&adapter->force_close_task)) {
1102 					adapter->force_reset = true;
1103 					schedule_work(
1104 					  &adapter->force_close_task);
1105 				}
1106 				break;
1107 			}
1108 
1109 			mutex_unlock(&hw->hw_info.lock);
1110 
1111 			hw->ep_shm_info[epidx].ep_stats
1112 					      .com_unregist_buf_exec += 1;
1113 
1114 			if (ret == 0) {
1115 				spin_lock_irqsave(&hw->rx_status_lock, flags);
1116 				fjes_hw_setup_epbuf(
1117 					&hw->ep_shm_info[epidx].tx,
1118 					netdev->dev_addr, netdev->mtu);
1119 				spin_unlock_irqrestore(&hw->rx_status_lock,
1120 						       flags);
1121 			}
1122 		}
1123 
1124 		if (test_bit(epidx, &irq_bit)) {
1125 			fjes_hw_raise_interrupt(hw, epidx,
1126 						REG_ICTL_MASK_TXRX_STOP_REQ);
1127 
1128 			hw->ep_shm_info[epidx].ep_stats.send_intr_unshare += 1;
1129 
1130 			set_bit(epidx, &hw->txrx_stop_req_bit);
1131 			spin_lock_irqsave(&hw->rx_status_lock, flags);
1132 			hw->ep_shm_info[epidx].tx.
1133 				info->v1i.rx_status |=
1134 					FJES_RX_STOP_REQ_REQUEST;
1135 			spin_unlock_irqrestore(&hw->rx_status_lock, flags);
1136 			set_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit);
1137 		}
1138 	}
1139 
1140 	if (irq_bit || adapter->unshare_watch_bitmask) {
1141 		if (!work_pending(&adapter->unshare_watch_task))
1142 			queue_work(adapter->control_wq,
1143 				   &adapter->unshare_watch_task);
1144 	}
1145 }
1146 
1147 static void fjes_hw_epstop_task(struct work_struct *work)
1148 {
1149 	struct fjes_hw *hw = container_of(work, struct fjes_hw, epstop_task);
1150 	struct fjes_adapter *adapter = (struct fjes_adapter *)hw->back;
1151 	unsigned long flags;
1152 
1153 	ulong remain_bit;
1154 	int epid_bit;
1155 
1156 	while ((remain_bit = hw->epstop_req_bit)) {
1157 		for (epid_bit = 0; remain_bit; remain_bit >>= 1, epid_bit++) {
1158 			if (remain_bit & 1) {
1159 				spin_lock_irqsave(&hw->rx_status_lock, flags);
1160 				hw->ep_shm_info[epid_bit].
1161 					tx.info->v1i.rx_status |=
1162 						FJES_RX_STOP_REQ_DONE;
1163 				spin_unlock_irqrestore(&hw->rx_status_lock,
1164 						       flags);
1165 
1166 				clear_bit(epid_bit, &hw->epstop_req_bit);
1167 				set_bit(epid_bit,
1168 					&adapter->unshare_watch_bitmask);
1169 
1170 				if (!work_pending(&adapter->unshare_watch_task))
1171 					queue_work(
1172 						adapter->control_wq,
1173 						&adapter->unshare_watch_task);
1174 			}
1175 		}
1176 	}
1177 }
1178 
1179 int fjes_hw_start_debug(struct fjes_hw *hw)
1180 {
1181 	union fjes_device_command_req *req_buf = hw->hw_info.req_buf;
1182 	union fjes_device_command_res *res_buf = hw->hw_info.res_buf;
1183 	enum fjes_dev_command_response_e ret;
1184 	int page_count;
1185 	int result = 0;
1186 	void *addr;
1187 	int i;
1188 
1189 	if (!hw->hw_info.trace)
1190 		return -EPERM;
1191 	memset(hw->hw_info.trace, 0, FJES_DEBUG_BUFFER_SIZE);
1192 
1193 	memset(req_buf, 0, hw->hw_info.req_buf_size);
1194 	memset(res_buf, 0, hw->hw_info.res_buf_size);
1195 
1196 	req_buf->start_trace.length =
1197 		FJES_DEV_COMMAND_START_DBG_REQ_LEN(hw->hw_info.trace_size);
1198 	req_buf->start_trace.mode = hw->debug_mode;
1199 	req_buf->start_trace.buffer_len = hw->hw_info.trace_size;
1200 	page_count = hw->hw_info.trace_size / FJES_DEBUG_PAGE_SIZE;
1201 	for (i = 0; i < page_count; i++) {
1202 		addr = ((u8 *)hw->hw_info.trace) + i * FJES_DEBUG_PAGE_SIZE;
1203 		req_buf->start_trace.buffer[i] =
1204 			(__le64)(page_to_phys(vmalloc_to_page(addr)) +
1205 			offset_in_page(addr));
1206 	}
1207 
1208 	res_buf->start_trace.length = 0;
1209 	res_buf->start_trace.code = 0;
1210 
1211 	trace_fjes_hw_start_debug_req(req_buf);
1212 	ret = fjes_hw_issue_request_command(hw, FJES_CMD_REQ_START_DEBUG);
1213 	trace_fjes_hw_start_debug(res_buf);
1214 
1215 	if (res_buf->start_trace.length !=
1216 		FJES_DEV_COMMAND_START_DBG_RES_LEN) {
1217 		result = -ENOMSG;
1218 		trace_fjes_hw_start_debug_err("Invalid res_buf");
1219 	} else if (ret == FJES_CMD_STATUS_NORMAL) {
1220 		switch (res_buf->start_trace.code) {
1221 		case FJES_CMD_REQ_RES_CODE_NORMAL:
1222 			result = 0;
1223 			break;
1224 		default:
1225 			result = -EPERM;
1226 			break;
1227 		}
1228 	} else {
1229 		switch (ret) {
1230 		case FJES_CMD_STATUS_UNKNOWN:
1231 			result = -EPERM;
1232 			break;
1233 		case FJES_CMD_STATUS_TIMEOUT:
1234 			trace_fjes_hw_start_debug_err("Busy Timeout");
1235 			result = -EBUSY;
1236 			break;
1237 		case FJES_CMD_STATUS_ERROR_PARAM:
1238 		case FJES_CMD_STATUS_ERROR_STATUS:
1239 		default:
1240 			result = -EPERM;
1241 			break;
1242 		}
1243 	}
1244 
1245 	return result;
1246 }
1247 
1248 int fjes_hw_stop_debug(struct fjes_hw *hw)
1249 {
1250 	union fjes_device_command_req *req_buf = hw->hw_info.req_buf;
1251 	union fjes_device_command_res *res_buf = hw->hw_info.res_buf;
1252 	enum fjes_dev_command_response_e ret;
1253 	int result = 0;
1254 
1255 	if (!hw->hw_info.trace)
1256 		return -EPERM;
1257 
1258 	memset(req_buf, 0, hw->hw_info.req_buf_size);
1259 	memset(res_buf, 0, hw->hw_info.res_buf_size);
1260 	req_buf->stop_trace.length = FJES_DEV_COMMAND_STOP_DBG_REQ_LEN;
1261 
1262 	res_buf->stop_trace.length = 0;
1263 	res_buf->stop_trace.code = 0;
1264 
1265 	ret = fjes_hw_issue_request_command(hw, FJES_CMD_REQ_STOP_DEBUG);
1266 	trace_fjes_hw_stop_debug(res_buf);
1267 
1268 	if (res_buf->stop_trace.length != FJES_DEV_COMMAND_STOP_DBG_RES_LEN) {
1269 		trace_fjes_hw_stop_debug_err("Invalid res_buf");
1270 		result = -ENOMSG;
1271 	} else if (ret == FJES_CMD_STATUS_NORMAL) {
1272 		switch (res_buf->stop_trace.code) {
1273 		case FJES_CMD_REQ_RES_CODE_NORMAL:
1274 			result = 0;
1275 			hw->debug_mode = 0;
1276 			break;
1277 		default:
1278 			result = -EPERM;
1279 			break;
1280 		}
1281 	} else {
1282 		switch (ret) {
1283 		case FJES_CMD_STATUS_UNKNOWN:
1284 			result = -EPERM;
1285 			break;
1286 		case FJES_CMD_STATUS_TIMEOUT:
1287 			result = -EBUSY;
1288 			trace_fjes_hw_stop_debug_err("Busy Timeout");
1289 			break;
1290 		case FJES_CMD_STATUS_ERROR_PARAM:
1291 		case FJES_CMD_STATUS_ERROR_STATUS:
1292 		default:
1293 			result = -EPERM;
1294 			break;
1295 		}
1296 	}
1297 
1298 	return result;
1299 }
1300