1 /*
2  * Support for Medifield PNW Camera Imaging ISP subsystem.
3  *
4  * Copyright (c) 2010-2017 Intel Corporation. All Rights Reserved.
5  *
6  * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License version
10  * 2 as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  *
18  */
19 /*
20  * This file contains entry functions for memory management of ISP driver
21  */
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/mm.h>
25 #include <linux/highmem.h>	/* for kmap */
26 #include <linux/io.h>		/* for page_to_phys */
27 #include <linux/sysfs.h>
28 
29 #include "hmm/hmm.h"
30 #include "hmm/hmm_pool.h"
31 #include "hmm/hmm_bo.h"
32 
33 #include "atomisp_internal.h"
34 #include "asm/cacheflush.h"
35 #include "mmu/isp_mmu.h"
36 #include "mmu/sh_mmu_mrfld.h"
37 
38 struct hmm_bo_device bo_device;
39 struct hmm_pool	dynamic_pool;
40 struct hmm_pool	reserved_pool;
41 static ia_css_ptr dummy_ptr;
42 static bool hmm_initialized;
43 struct _hmm_mem_stat hmm_mem_stat;
44 
45 /*
46  * p: private
47  * s: shared
48  * u: user
49  * i: ion
50  */
51 static const char hmm_bo_type_string[] = "psui";
52 
53 static ssize_t bo_show(struct device *dev, struct device_attribute *attr,
54 		       char *buf, struct list_head *bo_list, bool active)
55 {
56 	ssize_t ret = 0;
57 	struct hmm_buffer_object *bo;
58 	unsigned long flags;
59 	int i;
60 	long total[HMM_BO_LAST] = { 0 };
61 	long count[HMM_BO_LAST] = { 0 };
62 	int index1 = 0;
63 	int index2 = 0;
64 
65 	ret = scnprintf(buf, PAGE_SIZE, "type pgnr\n");
66 	if (ret <= 0)
67 		return 0;
68 
69 	index1 += ret;
70 
71 	spin_lock_irqsave(&bo_device.list_lock, flags);
72 	list_for_each_entry(bo, bo_list, list) {
73 		if ((active && (bo->status & HMM_BO_ALLOCED)) ||
74 		    (!active && !(bo->status & HMM_BO_ALLOCED))) {
75 			ret = scnprintf(buf + index1, PAGE_SIZE - index1,
76 					"%c %d\n",
77 					hmm_bo_type_string[bo->type], bo->pgnr);
78 
79 			total[bo->type] += bo->pgnr;
80 			count[bo->type]++;
81 			if (ret > 0)
82 				index1 += ret;
83 		}
84 	}
85 	spin_unlock_irqrestore(&bo_device.list_lock, flags);
86 
87 	for (i = 0; i < HMM_BO_LAST; i++) {
88 		if (count[i]) {
89 			ret = scnprintf(buf + index1 + index2,
90 					PAGE_SIZE - index1 - index2,
91 					"%ld %c buffer objects: %ld KB\n",
92 					count[i], hmm_bo_type_string[i],
93 					total[i] * 4);
94 			if (ret > 0)
95 				index2 += ret;
96 		}
97 	}
98 
99 	/* Add trailing zero, not included by scnprintf */
100 	return index1 + index2 + 1;
101 }
102 
103 static ssize_t active_bo_show(struct device *dev, struct device_attribute *attr,
104 			      char *buf)
105 {
106 	return bo_show(dev, attr, buf, &bo_device.entire_bo_list, true);
107 }
108 
109 static ssize_t free_bo_show(struct device *dev, struct device_attribute *attr,
110 			    char *buf)
111 {
112 	return bo_show(dev, attr, buf, &bo_device.entire_bo_list, false);
113 }
114 
115 static ssize_t reserved_pool_show(struct device *dev,
116 				  struct device_attribute *attr,
117 				  char *buf)
118 {
119 	ssize_t ret = 0;
120 
121 	struct hmm_reserved_pool_info *pinfo = reserved_pool.pool_info;
122 	unsigned long flags;
123 
124 	if (!pinfo || !pinfo->initialized)
125 		return 0;
126 
127 	spin_lock_irqsave(&pinfo->list_lock, flags);
128 	ret = scnprintf(buf, PAGE_SIZE, "%d out of %d pages available\n",
129 			pinfo->index, pinfo->pgnr);
130 	spin_unlock_irqrestore(&pinfo->list_lock, flags);
131 
132 	if (ret > 0)
133 		ret++; /* Add trailing zero, not included by scnprintf */
134 
135 	return ret;
136 };
137 
138 static ssize_t dynamic_pool_show(struct device *dev,
139 				 struct device_attribute *attr,
140 				 char *buf)
141 {
142 	ssize_t ret = 0;
143 
144 	struct hmm_dynamic_pool_info *pinfo = dynamic_pool.pool_info;
145 	unsigned long flags;
146 
147 	if (!pinfo || !pinfo->initialized)
148 		return 0;
149 
150 	spin_lock_irqsave(&pinfo->list_lock, flags);
151 	ret = scnprintf(buf, PAGE_SIZE, "%d (max %d) pages available\n",
152 			pinfo->pgnr, pinfo->pool_size);
153 	spin_unlock_irqrestore(&pinfo->list_lock, flags);
154 
155 	if (ret > 0)
156 		ret++; /* Add trailing zero, not included by scnprintf */
157 
158 	return ret;
159 };
160 
161 static DEVICE_ATTR_RO(active_bo);
162 static DEVICE_ATTR_RO(free_bo);
163 static DEVICE_ATTR_RO(reserved_pool);
164 static DEVICE_ATTR_RO(dynamic_pool);
165 
166 static struct attribute *sysfs_attrs_ctrl[] = {
167 	&dev_attr_active_bo.attr,
168 	&dev_attr_free_bo.attr,
169 	&dev_attr_reserved_pool.attr,
170 	&dev_attr_dynamic_pool.attr,
171 	NULL
172 };
173 
174 static struct attribute_group atomisp_attribute_group[] = {
175 	{.attrs = sysfs_attrs_ctrl },
176 };
177 
178 int hmm_init(void)
179 {
180 	int ret;
181 
182 	ret = hmm_bo_device_init(&bo_device, &sh_mmu_mrfld,
183 				 ISP_VM_START, ISP_VM_SIZE);
184 	if (ret)
185 		dev_err(atomisp_dev, "hmm_bo_device_init failed.\n");
186 
187 	hmm_initialized = true;
188 
189 	/*
190 	 * As hmm use NULL to indicate invalid ISP virtual address,
191 	 * and ISP_VM_START is defined to 0 too, so we allocate
192 	 * one piece of dummy memory, which should return value 0,
193 	 * at the beginning, to avoid hmm_alloc return 0 in the
194 	 * further allocation.
195 	 */
196 	dummy_ptr = hmm_alloc(1, HMM_BO_PRIVATE, 0, NULL, HMM_UNCACHED);
197 
198 	if (!ret) {
199 		ret = sysfs_create_group(&atomisp_dev->kobj,
200 					 atomisp_attribute_group);
201 		if (ret)
202 			dev_err(atomisp_dev,
203 				"%s Failed to create sysfs\n", __func__);
204 	}
205 
206 	return ret;
207 }
208 
209 void hmm_cleanup(void)
210 {
211 	sysfs_remove_group(&atomisp_dev->kobj, atomisp_attribute_group);
212 
213 	/* free dummy memory first */
214 	hmm_free(dummy_ptr);
215 	dummy_ptr = 0;
216 
217 	hmm_bo_device_exit(&bo_device);
218 	hmm_initialized = false;
219 }
220 
221 ia_css_ptr hmm_alloc(size_t bytes, enum hmm_bo_type type,
222 		     int from_highmem, const void __user *userptr, bool cached)
223 {
224 	unsigned int pgnr;
225 	struct hmm_buffer_object *bo;
226 	int ret;
227 
228 	/*
229 	 * Check if we are initialized. In the ideal world we wouldn't need
230 	 * this but we can tackle it once the driver is a lot cleaner
231 	 */
232 
233 	if (!hmm_initialized)
234 		hmm_init();
235 	/* Get page number from size */
236 	pgnr = size_to_pgnr_ceil(bytes);
237 
238 	/* Buffer object structure init */
239 	bo = hmm_bo_alloc(&bo_device, pgnr);
240 	if (!bo) {
241 		dev_err(atomisp_dev, "hmm_bo_create failed.\n");
242 		goto create_bo_err;
243 	}
244 
245 	/* Allocate pages for memory */
246 	ret = hmm_bo_alloc_pages(bo, type, from_highmem, userptr, cached);
247 	if (ret) {
248 		dev_err(atomisp_dev, "hmm_bo_alloc_pages failed.\n");
249 		goto alloc_page_err;
250 	}
251 
252 	/* Combind the virtual address and pages togather */
253 	ret = hmm_bo_bind(bo);
254 	if (ret) {
255 		dev_err(atomisp_dev, "hmm_bo_bind failed.\n");
256 		goto bind_err;
257 	}
258 
259 	hmm_mem_stat.tol_cnt += pgnr;
260 
261 	return bo->start;
262 
263 bind_err:
264 	hmm_bo_free_pages(bo);
265 alloc_page_err:
266 	hmm_bo_unref(bo);
267 create_bo_err:
268 	return 0;
269 }
270 
271 void hmm_free(ia_css_ptr virt)
272 {
273 	struct hmm_buffer_object *bo;
274 
275 	WARN_ON(!virt);
276 
277 	bo = hmm_bo_device_search_start(&bo_device, (unsigned int)virt);
278 
279 	if (!bo) {
280 		dev_err(atomisp_dev,
281 			"can not find buffer object start with address 0x%x\n",
282 			(unsigned int)virt);
283 		return;
284 	}
285 
286 	hmm_mem_stat.tol_cnt -= bo->pgnr;
287 
288 	hmm_bo_unbind(bo);
289 	hmm_bo_free_pages(bo);
290 	hmm_bo_unref(bo);
291 }
292 
293 static inline int hmm_check_bo(struct hmm_buffer_object *bo, unsigned int ptr)
294 {
295 	if (!bo) {
296 		dev_err(atomisp_dev,
297 			"can not find buffer object contains address 0x%x\n",
298 			ptr);
299 		return -EINVAL;
300 	}
301 
302 	if (!hmm_bo_page_allocated(bo)) {
303 		dev_err(atomisp_dev,
304 			"buffer object has no page allocated.\n");
305 		return -EINVAL;
306 	}
307 
308 	if (!hmm_bo_allocated(bo)) {
309 		dev_err(atomisp_dev,
310 			"buffer object has no virtual address space allocated.\n");
311 		return -EINVAL;
312 	}
313 
314 	return 0;
315 }
316 
317 /* Read function in ISP memory management */
318 static int load_and_flush_by_kmap(ia_css_ptr virt, void *data,
319 				  unsigned int bytes)
320 {
321 	struct hmm_buffer_object *bo;
322 	unsigned int idx, offset, len;
323 	char *src, *des;
324 	int ret;
325 
326 	bo = hmm_bo_device_search_in_range(&bo_device, virt);
327 	ret = hmm_check_bo(bo, virt);
328 	if (ret)
329 		return ret;
330 
331 	des = (char *)data;
332 	while (bytes) {
333 		idx = (virt - bo->start) >> PAGE_SHIFT;
334 		offset = (virt - bo->start) - (idx << PAGE_SHIFT);
335 
336 		src = (char *)kmap(bo->page_obj[idx].page) + offset;
337 
338 		if ((bytes + offset) >= PAGE_SIZE) {
339 			len = PAGE_SIZE - offset;
340 			bytes -= len;
341 		} else {
342 			len = bytes;
343 			bytes = 0;
344 		}
345 
346 		virt += len;	/* update virt for next loop */
347 
348 		if (des) {
349 			memcpy(des, src, len);
350 			des += len;
351 		}
352 
353 		clflush_cache_range(src, len);
354 
355 		kunmap(bo->page_obj[idx].page);
356 	}
357 
358 	return 0;
359 }
360 
361 /* Read function in ISP memory management */
362 static int load_and_flush(ia_css_ptr virt, void *data, unsigned int bytes)
363 {
364 	struct hmm_buffer_object *bo;
365 	int ret;
366 
367 	bo = hmm_bo_device_search_in_range(&bo_device, virt);
368 	ret = hmm_check_bo(bo, virt);
369 	if (ret)
370 		return ret;
371 
372 	if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
373 		void *src = bo->vmap_addr;
374 
375 		src += (virt - bo->start);
376 		memcpy(data, src, bytes);
377 		if (bo->status & HMM_BO_VMAPED_CACHED)
378 			clflush_cache_range(src, bytes);
379 	} else {
380 		void *vptr;
381 
382 		vptr = hmm_bo_vmap(bo, true);
383 		if (!vptr)
384 			return load_and_flush_by_kmap(virt, data, bytes);
385 		else
386 			vptr = vptr + (virt - bo->start);
387 
388 		memcpy(data, vptr, bytes);
389 		clflush_cache_range(vptr, bytes);
390 		hmm_bo_vunmap(bo);
391 	}
392 
393 	return 0;
394 }
395 
396 /* Read function in ISP memory management */
397 int hmm_load(ia_css_ptr virt, void *data, unsigned int bytes)
398 {
399 	if (!data) {
400 		dev_err(atomisp_dev,
401 			"hmm_load NULL argument\n");
402 		return -EINVAL;
403 	}
404 	return load_and_flush(virt, data, bytes);
405 }
406 
407 /* Flush hmm data from the data cache */
408 int hmm_flush(ia_css_ptr virt, unsigned int bytes)
409 {
410 	return load_and_flush(virt, NULL, bytes);
411 }
412 
413 /* Write function in ISP memory management */
414 int hmm_store(ia_css_ptr virt, const void *data, unsigned int bytes)
415 {
416 	struct hmm_buffer_object *bo;
417 	unsigned int idx, offset, len;
418 	char *src, *des;
419 	int ret;
420 
421 	bo = hmm_bo_device_search_in_range(&bo_device, virt);
422 	ret = hmm_check_bo(bo, virt);
423 	if (ret)
424 		return ret;
425 
426 	if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
427 		void *dst = bo->vmap_addr;
428 
429 		dst += (virt - bo->start);
430 		memcpy(dst, data, bytes);
431 		if (bo->status & HMM_BO_VMAPED_CACHED)
432 			clflush_cache_range(dst, bytes);
433 	} else {
434 		void *vptr;
435 
436 		vptr = hmm_bo_vmap(bo, true);
437 		if (vptr) {
438 			vptr = vptr + (virt - bo->start);
439 
440 			memcpy(vptr, data, bytes);
441 			clflush_cache_range(vptr, bytes);
442 			hmm_bo_vunmap(bo);
443 			return 0;
444 		}
445 	}
446 
447 	src = (char *)data;
448 	while (bytes) {
449 		idx = (virt - bo->start) >> PAGE_SHIFT;
450 		offset = (virt - bo->start) - (idx << PAGE_SHIFT);
451 
452 		if (in_atomic())
453 			des = (char *)kmap_atomic(bo->page_obj[idx].page);
454 		else
455 			des = (char *)kmap(bo->page_obj[idx].page);
456 
457 		if (!des) {
458 			dev_err(atomisp_dev,
459 				"kmap buffer object page failed: pg_idx = %d\n",
460 				idx);
461 			return -EINVAL;
462 		}
463 
464 		des += offset;
465 
466 		if ((bytes + offset) >= PAGE_SIZE) {
467 			len = PAGE_SIZE - offset;
468 			bytes -= len;
469 		} else {
470 			len = bytes;
471 			bytes = 0;
472 		}
473 
474 		virt += len;
475 
476 		memcpy(des, src, len);
477 
478 		src += len;
479 
480 		clflush_cache_range(des, len);
481 
482 		if (in_atomic())
483 			/*
484 			 * Note: kunmap_atomic requires return addr from
485 			 * kmap_atomic, not the page. See linux/highmem.h
486 			 */
487 			kunmap_atomic(des - offset);
488 		else
489 			kunmap(bo->page_obj[idx].page);
490 	}
491 
492 	return 0;
493 }
494 
495 /* memset function in ISP memory management */
496 int hmm_set(ia_css_ptr virt, int c, unsigned int bytes)
497 {
498 	struct hmm_buffer_object *bo;
499 	unsigned int idx, offset, len;
500 	char *des;
501 	int ret;
502 
503 	bo = hmm_bo_device_search_in_range(&bo_device, virt);
504 	ret = hmm_check_bo(bo, virt);
505 	if (ret)
506 		return ret;
507 
508 	if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
509 		void *dst = bo->vmap_addr;
510 
511 		dst += (virt - bo->start);
512 		memset(dst, c, bytes);
513 
514 		if (bo->status & HMM_BO_VMAPED_CACHED)
515 			clflush_cache_range(dst, bytes);
516 	} else {
517 		void *vptr;
518 
519 		vptr = hmm_bo_vmap(bo, true);
520 		if (vptr) {
521 			vptr = vptr + (virt - bo->start);
522 			memset(vptr, c, bytes);
523 			clflush_cache_range(vptr, bytes);
524 			hmm_bo_vunmap(bo);
525 			return 0;
526 		}
527 	}
528 
529 	while (bytes) {
530 		idx = (virt - bo->start) >> PAGE_SHIFT;
531 		offset = (virt - bo->start) - (idx << PAGE_SHIFT);
532 
533 		des = (char *)kmap(bo->page_obj[idx].page) + offset;
534 
535 		if ((bytes + offset) >= PAGE_SIZE) {
536 			len = PAGE_SIZE - offset;
537 			bytes -= len;
538 		} else {
539 			len = bytes;
540 			bytes = 0;
541 		}
542 
543 		virt += len;
544 
545 		memset(des, c, len);
546 
547 		clflush_cache_range(des, len);
548 
549 		kunmap(bo->page_obj[idx].page);
550 	}
551 
552 	return 0;
553 }
554 
555 /* Virtual address to physical address convert */
556 phys_addr_t hmm_virt_to_phys(ia_css_ptr virt)
557 {
558 	unsigned int idx, offset;
559 	struct hmm_buffer_object *bo;
560 
561 	bo = hmm_bo_device_search_in_range(&bo_device, virt);
562 	if (!bo) {
563 		dev_err(atomisp_dev,
564 			"can not find buffer object contains address 0x%x\n",
565 			virt);
566 		return -1;
567 	}
568 
569 	idx = (virt - bo->start) >> PAGE_SHIFT;
570 	offset = (virt - bo->start) - (idx << PAGE_SHIFT);
571 
572 	return page_to_phys(bo->page_obj[idx].page) + offset;
573 }
574 
575 int hmm_mmap(struct vm_area_struct *vma, ia_css_ptr virt)
576 {
577 	struct hmm_buffer_object *bo;
578 
579 	bo = hmm_bo_device_search_start(&bo_device, virt);
580 	if (!bo) {
581 		dev_err(atomisp_dev,
582 			"can not find buffer object start with address 0x%x\n",
583 			virt);
584 		return -EINVAL;
585 	}
586 
587 	return hmm_bo_mmap(vma, bo);
588 }
589 
590 /* Map ISP virtual address into IA virtual address */
591 void *hmm_vmap(ia_css_ptr virt, bool cached)
592 {
593 	struct hmm_buffer_object *bo;
594 	void *ptr;
595 
596 	bo = hmm_bo_device_search_in_range(&bo_device, virt);
597 	if (!bo) {
598 		dev_err(atomisp_dev,
599 			"can not find buffer object contains address 0x%x\n",
600 			virt);
601 		return NULL;
602 	}
603 
604 	ptr = hmm_bo_vmap(bo, cached);
605 	if (ptr)
606 		return ptr + (virt - bo->start);
607 	else
608 		return NULL;
609 }
610 
611 /* Flush the memory which is mapped as cached memory through hmm_vmap */
612 void hmm_flush_vmap(ia_css_ptr virt)
613 {
614 	struct hmm_buffer_object *bo;
615 
616 	bo = hmm_bo_device_search_in_range(&bo_device, virt);
617 	if (!bo) {
618 		dev_warn(atomisp_dev,
619 			 "can not find buffer object contains address 0x%x\n",
620 			 virt);
621 		return;
622 	}
623 
624 	hmm_bo_flush_vmap(bo);
625 }
626 
627 void hmm_vunmap(ia_css_ptr virt)
628 {
629 	struct hmm_buffer_object *bo;
630 
631 	bo = hmm_bo_device_search_in_range(&bo_device, virt);
632 	if (!bo) {
633 		dev_warn(atomisp_dev,
634 			 "can not find buffer object contains address 0x%x\n",
635 			 virt);
636 		return;
637 	}
638 
639 	hmm_bo_vunmap(bo);
640 }
641 
642 int hmm_pool_register(unsigned int pool_size, enum hmm_pool_type pool_type)
643 {
644 #if 0	// Just use the "normal" pool
645 	switch (pool_type) {
646 	case HMM_POOL_TYPE_RESERVED:
647 		reserved_pool.pops = &reserved_pops;
648 		return reserved_pool.pops->pool_init(&reserved_pool.pool_info,
649 						     pool_size);
650 	case HMM_POOL_TYPE_DYNAMIC:
651 		dynamic_pool.pops = &dynamic_pops;
652 		return dynamic_pool.pops->pool_init(&dynamic_pool.pool_info,
653 						    pool_size);
654 	default:
655 		dev_err(atomisp_dev, "invalid pool type.\n");
656 		return -EINVAL;
657 	}
658 #else
659 	return 0;
660 #endif
661 }
662 
663 void hmm_pool_unregister(enum hmm_pool_type pool_type)
664 {
665 #if 0	// Just use the "normal" pool
666 	switch (pool_type) {
667 	case HMM_POOL_TYPE_RESERVED:
668 		if (reserved_pool.pops && reserved_pool.pops->pool_exit)
669 			reserved_pool.pops->pool_exit(&reserved_pool.pool_info);
670 		break;
671 	case HMM_POOL_TYPE_DYNAMIC:
672 		if (dynamic_pool.pops && dynamic_pool.pops->pool_exit)
673 			dynamic_pool.pops->pool_exit(&dynamic_pool.pool_info);
674 		break;
675 	default:
676 		dev_err(atomisp_dev, "invalid pool type.\n");
677 		break;
678 	}
679 #endif
680 
681 	return;
682 }
683 
684 void *hmm_isp_vaddr_to_host_vaddr(ia_css_ptr ptr, bool cached)
685 {
686 	return hmm_vmap(ptr, cached);
687 	/* vmunmap will be done in hmm_bo_release() */
688 }
689 
690 ia_css_ptr hmm_host_vaddr_to_hrt_vaddr(const void *ptr)
691 {
692 	struct hmm_buffer_object *bo;
693 
694 	bo = hmm_bo_device_search_vmap_start(&bo_device, ptr);
695 	if (bo)
696 		return bo->start;
697 
698 	dev_err(atomisp_dev,
699 		"can not find buffer object whose kernel virtual address is %p\n",
700 		ptr);
701 	return 0;
702 }
703 
704 void hmm_show_mem_stat(const char *func, const int line)
705 {
706 	trace_printk("tol_cnt=%d usr_size=%d res_size=%d res_cnt=%d sys_size=%d  dyc_thr=%d dyc_size=%d.\n",
707 		     hmm_mem_stat.tol_cnt,
708 		     hmm_mem_stat.usr_size, hmm_mem_stat.res_size,
709 		     hmm_mem_stat.res_cnt, hmm_mem_stat.sys_size,
710 		     hmm_mem_stat.dyc_thr, hmm_mem_stat.dyc_size);
711 }
712 
713 void hmm_init_mem_stat(int res_pgnr, int dyc_en, int dyc_pgnr)
714 {
715 	hmm_mem_stat.res_size = res_pgnr;
716 	/* If reserved mem pool is not enabled, set its "mem stat" values as -1. */
717 	if (hmm_mem_stat.res_size == 0) {
718 		hmm_mem_stat.res_size = -1;
719 		hmm_mem_stat.res_cnt = -1;
720 	}
721 
722 	/* If dynamic memory pool is not enabled, set its "mem stat" values as -1. */
723 	if (!dyc_en) {
724 		hmm_mem_stat.dyc_size = -1;
725 		hmm_mem_stat.dyc_thr = -1;
726 	} else {
727 		hmm_mem_stat.dyc_size = 0;
728 		hmm_mem_stat.dyc_thr = dyc_pgnr;
729 	}
730 	hmm_mem_stat.usr_size = 0;
731 	hmm_mem_stat.sys_size = 0;
732 	hmm_mem_stat.tol_cnt = 0;
733 }
734