1 /* 2 * Copyright IBM Corp. 2008 3 * 4 * Guest page hinting for unused pages. 5 * 6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> 7 */ 8 9 #include <linux/kernel.h> 10 #include <linux/errno.h> 11 #include <linux/types.h> 12 #include <linux/mm.h> 13 #include <linux/gfp.h> 14 #include <linux/init.h> 15 16 #define ESSA_SET_STABLE 1 17 #define ESSA_SET_UNUSED 2 18 19 static int cmma_flag = 1; 20 21 static int __init cmma(char *str) 22 { 23 char *parm; 24 25 parm = strstrip(str); 26 if (strcmp(parm, "yes") == 0 || strcmp(parm, "on") == 0) { 27 cmma_flag = 1; 28 return 1; 29 } 30 cmma_flag = 0; 31 if (strcmp(parm, "no") == 0 || strcmp(parm, "off") == 0) 32 return 1; 33 return 0; 34 } 35 __setup("cmma=", cmma); 36 37 void __init cmma_init(void) 38 { 39 register unsigned long tmp asm("0") = 0; 40 register int rc asm("1") = -EOPNOTSUPP; 41 42 if (!cmma_flag) 43 return; 44 asm volatile( 45 " .insn rrf,0xb9ab0000,%1,%1,0,0\n" 46 "0: la %0,0\n" 47 "1:\n" 48 EX_TABLE(0b,1b) 49 : "+&d" (rc), "+&d" (tmp)); 50 if (rc) 51 cmma_flag = 0; 52 } 53 54 static inline void set_page_unstable(struct page *page, int order) 55 { 56 int i, rc; 57 58 for (i = 0; i < (1 << order); i++) 59 asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0" 60 : "=&d" (rc) 61 : "a" (page_to_phys(page + i)), 62 "i" (ESSA_SET_UNUSED)); 63 } 64 65 void arch_free_page(struct page *page, int order) 66 { 67 if (!cmma_flag) 68 return; 69 set_page_unstable(page, order); 70 } 71 72 static inline void set_page_stable(struct page *page, int order) 73 { 74 int i, rc; 75 76 for (i = 0; i < (1 << order); i++) 77 asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0" 78 : "=&d" (rc) 79 : "a" (page_to_phys(page + i)), 80 "i" (ESSA_SET_STABLE)); 81 } 82 83 void arch_alloc_page(struct page *page, int order) 84 { 85 if (!cmma_flag) 86 return; 87 set_page_stable(page, order); 88 } 89 90 void arch_set_page_states(int make_stable) 91 { 92 unsigned long flags, order, t; 93 struct list_head *l; 94 struct page *page; 95 struct zone *zone; 96 97 if (!cmma_flag) 98 return; 99 if (make_stable) 100 drain_local_pages(NULL); 101 for_each_populated_zone(zone) { 102 spin_lock_irqsave(&zone->lock, flags); 103 for_each_migratetype_order(order, t) { 104 list_for_each(l, &zone->free_area[order].free_list[t]) { 105 page = list_entry(l, struct page, lru); 106 if (make_stable) 107 set_page_stable(page, order); 108 else 109 set_page_unstable(page, order); 110 } 111 } 112 spin_unlock_irqrestore(&zone->lock, flags); 113 } 114 } 115