1 /* 2 * Copyright IBM Corp. 2008 3 * 4 * Guest page hinting for unused pages. 5 * 6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> 7 */ 8 9 #include <linux/kernel.h> 10 #include <linux/errno.h> 11 #include <linux/types.h> 12 #include <linux/mm.h> 13 #include <linux/gfp.h> 14 #include <linux/init.h> 15 16 #include <asm/page-states.h> 17 18 static int cmma_flag = 1; 19 20 static int __init cmma(char *str) 21 { 22 char *parm; 23 24 parm = strstrip(str); 25 if (strcmp(parm, "yes") == 0 || strcmp(parm, "on") == 0) { 26 cmma_flag = 1; 27 return 1; 28 } 29 cmma_flag = 0; 30 if (strcmp(parm, "no") == 0 || strcmp(parm, "off") == 0) 31 return 1; 32 return 0; 33 } 34 __setup("cmma=", cmma); 35 36 static inline int cmma_test_essa(void) 37 { 38 register unsigned long tmp asm("0") = 0; 39 register int rc asm("1") = -EOPNOTSUPP; 40 41 asm volatile( 42 " .insn rrf,0xb9ab0000,%1,%1,0,0\n" 43 "0: la %0,0\n" 44 "1:\n" 45 EX_TABLE(0b,1b) 46 : "+&d" (rc), "+&d" (tmp)); 47 return rc; 48 } 49 50 void __init cmma_init(void) 51 { 52 if (!cmma_flag) 53 return; 54 if (cmma_test_essa()) 55 cmma_flag = 0; 56 } 57 58 static inline void set_page_unstable(struct page *page, int order) 59 { 60 int i, rc; 61 62 for (i = 0; i < (1 << order); i++) 63 asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0" 64 : "=&d" (rc) 65 : "a" (page_to_phys(page + i)), 66 "i" (ESSA_SET_UNUSED)); 67 } 68 69 void arch_free_page(struct page *page, int order) 70 { 71 if (!cmma_flag) 72 return; 73 set_page_unstable(page, order); 74 } 75 76 static inline void set_page_stable(struct page *page, int order) 77 { 78 int i, rc; 79 80 for (i = 0; i < (1 << order); i++) 81 asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0" 82 : "=&d" (rc) 83 : "a" (page_to_phys(page + i)), 84 "i" (ESSA_SET_STABLE)); 85 } 86 87 void arch_alloc_page(struct page *page, int order) 88 { 89 if (!cmma_flag) 90 return; 91 set_page_stable(page, order); 92 } 93 94 void arch_set_page_states(int make_stable) 95 { 96 unsigned long flags, order, t; 97 struct list_head *l; 98 struct page *page; 99 struct zone *zone; 100 101 if (!cmma_flag) 102 return; 103 if (make_stable) 104 drain_local_pages(NULL); 105 for_each_populated_zone(zone) { 106 spin_lock_irqsave(&zone->lock, flags); 107 for_each_migratetype_order(order, t) { 108 list_for_each(l, &zone->free_area[order].free_list[t]) { 109 page = list_entry(l, struct page, lru); 110 if (make_stable) 111 set_page_stable(page, order); 112 else 113 set_page_unstable(page, order); 114 } 115 } 116 spin_unlock_irqrestore(&zone->lock, flags); 117 } 118 } 119