1 /* 2 * Copyright (C) 2015 Josh Poimboeuf <jpoimboe@redhat.com> 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 2 7 * of the License, or (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, see <http://www.gnu.org/licenses/>. 16 */ 17 18 /* 19 * This file reads all the special sections which have alternate instructions 20 * which can be patched in or redirected to at runtime. 21 */ 22 23 #include <stdlib.h> 24 #include <string.h> 25 26 #include "builtin.h" 27 #include "special.h" 28 #include "warn.h" 29 30 #define EX_ENTRY_SIZE 12 31 #define EX_ORIG_OFFSET 0 32 #define EX_NEW_OFFSET 4 33 34 #define JUMP_ENTRY_SIZE 16 35 #define JUMP_ORIG_OFFSET 0 36 #define JUMP_NEW_OFFSET 4 37 38 #define ALT_ENTRY_SIZE 13 39 #define ALT_ORIG_OFFSET 0 40 #define ALT_NEW_OFFSET 4 41 #define ALT_FEATURE_OFFSET 8 42 #define ALT_ORIG_LEN_OFFSET 10 43 #define ALT_NEW_LEN_OFFSET 11 44 45 #define X86_FEATURE_POPCNT (4*32+23) 46 #define X86_FEATURE_SMAP (9*32+20) 47 48 struct special_entry { 49 const char *sec; 50 bool group, jump_or_nop; 51 unsigned char size, orig, new; 52 unsigned char orig_len, new_len; /* group only */ 53 unsigned char feature; /* ALTERNATIVE macro CPU feature */ 54 }; 55 56 struct special_entry entries[] = { 57 { 58 .sec = ".altinstructions", 59 .group = true, 60 .size = ALT_ENTRY_SIZE, 61 .orig = ALT_ORIG_OFFSET, 62 .orig_len = ALT_ORIG_LEN_OFFSET, 63 .new = ALT_NEW_OFFSET, 64 .new_len = ALT_NEW_LEN_OFFSET, 65 .feature = ALT_FEATURE_OFFSET, 66 }, 67 { 68 .sec = "__jump_table", 69 .jump_or_nop = true, 70 .size = JUMP_ENTRY_SIZE, 71 .orig = JUMP_ORIG_OFFSET, 72 .new = JUMP_NEW_OFFSET, 73 }, 74 { 75 .sec = "__ex_table", 76 .size = EX_ENTRY_SIZE, 77 .orig = EX_ORIG_OFFSET, 78 .new = EX_NEW_OFFSET, 79 }, 80 {}, 81 }; 82 83 static int get_alt_entry(struct elf *elf, struct special_entry *entry, 84 struct section *sec, int idx, 85 struct special_alt *alt) 86 { 87 struct rela *orig_rela, *new_rela; 88 unsigned long offset; 89 90 offset = idx * entry->size; 91 92 alt->group = entry->group; 93 alt->jump_or_nop = entry->jump_or_nop; 94 95 if (alt->group) { 96 alt->orig_len = *(unsigned char *)(sec->data->d_buf + offset + 97 entry->orig_len); 98 alt->new_len = *(unsigned char *)(sec->data->d_buf + offset + 99 entry->new_len); 100 } 101 102 if (entry->feature) { 103 unsigned short feature; 104 105 feature = *(unsigned short *)(sec->data->d_buf + offset + 106 entry->feature); 107 108 /* 109 * It has been requested that we don't validate the !POPCNT 110 * feature path which is a "very very small percentage of 111 * machines". 112 */ 113 if (feature == X86_FEATURE_POPCNT) 114 alt->skip_orig = true; 115 116 /* 117 * If UACCESS validation is enabled; force that alternative; 118 * otherwise force it the other way. 119 * 120 * What we want to avoid is having both the original and the 121 * alternative code flow at the same time, in that case we can 122 * find paths that see the STAC but take the NOP instead of 123 * CLAC and the other way around. 124 */ 125 if (feature == X86_FEATURE_SMAP) { 126 if (uaccess) 127 alt->skip_orig = true; 128 else 129 alt->skip_alt = true; 130 } 131 } 132 133 orig_rela = find_rela_by_dest(sec, offset + entry->orig); 134 if (!orig_rela) { 135 WARN_FUNC("can't find orig rela", sec, offset + entry->orig); 136 return -1; 137 } 138 if (orig_rela->sym->type != STT_SECTION) { 139 WARN_FUNC("don't know how to handle non-section rela symbol %s", 140 sec, offset + entry->orig, orig_rela->sym->name); 141 return -1; 142 } 143 144 alt->orig_sec = orig_rela->sym->sec; 145 alt->orig_off = orig_rela->addend; 146 147 if (!entry->group || alt->new_len) { 148 new_rela = find_rela_by_dest(sec, offset + entry->new); 149 if (!new_rela) { 150 WARN_FUNC("can't find new rela", 151 sec, offset + entry->new); 152 return -1; 153 } 154 155 alt->new_sec = new_rela->sym->sec; 156 alt->new_off = (unsigned int)new_rela->addend; 157 158 /* _ASM_EXTABLE_EX hack */ 159 if (alt->new_off >= 0x7ffffff0) 160 alt->new_off -= 0x7ffffff0; 161 } 162 163 return 0; 164 } 165 166 /* 167 * Read all the special sections and create a list of special_alt structs which 168 * describe all the alternate instructions which can be patched in or 169 * redirected to at runtime. 170 */ 171 int special_get_alts(struct elf *elf, struct list_head *alts) 172 { 173 struct special_entry *entry; 174 struct section *sec; 175 unsigned int nr_entries; 176 struct special_alt *alt; 177 int idx, ret; 178 179 INIT_LIST_HEAD(alts); 180 181 for (entry = entries; entry->sec; entry++) { 182 sec = find_section_by_name(elf, entry->sec); 183 if (!sec) 184 continue; 185 186 if (sec->len % entry->size != 0) { 187 WARN("%s size not a multiple of %d", 188 sec->name, entry->size); 189 return -1; 190 } 191 192 nr_entries = sec->len / entry->size; 193 194 for (idx = 0; idx < nr_entries; idx++) { 195 alt = malloc(sizeof(*alt)); 196 if (!alt) { 197 WARN("malloc failed"); 198 return -1; 199 } 200 memset(alt, 0, sizeof(*alt)); 201 202 ret = get_alt_entry(elf, entry, sec, idx, alt); 203 if (ret) 204 return ret; 205 206 list_add_tail(&alt->list, alts); 207 } 208 } 209 210 return 0; 211 } 212