110916706SShile Zhang // SPDX-License-Identifier: GPL-2.0-only
210916706SShile Zhang /*
310916706SShile Zhang * sorttable.c: Sort the kernel's table
410916706SShile Zhang *
557fa1899SShile Zhang * Added ORC unwind tables sort support and other updates:
657fa1899SShile Zhang * Copyright (C) 1999-2019 Alibaba Group Holding Limited. by:
757fa1899SShile Zhang * Shile Zhang <shile.zhang@linux.alibaba.com>
857fa1899SShile Zhang *
910916706SShile Zhang * Copyright 2011 - 2012 Cavium, Inc.
1010916706SShile Zhang *
1110916706SShile Zhang * Based on code taken from recortmcount.c which is:
1210916706SShile Zhang *
1310916706SShile Zhang * Copyright 2009 John F. Reiser <jreiser@BitWagon.com>. All rights reserved.
1410916706SShile Zhang *
1510916706SShile Zhang * Restructured to fit Linux format, as well as other updates:
1610916706SShile Zhang * Copyright 2010 Steven Rostedt <srostedt@redhat.com>, Red Hat Inc.
1710916706SShile Zhang */
1810916706SShile Zhang
1910916706SShile Zhang /*
2010916706SShile Zhang * Strategy: alter the vmlinux file in-place.
2110916706SShile Zhang */
2210916706SShile Zhang
2310916706SShile Zhang #include <sys/types.h>
2410916706SShile Zhang #include <sys/mman.h>
2510916706SShile Zhang #include <sys/stat.h>
2610916706SShile Zhang #include <getopt.h>
2710916706SShile Zhang #include <elf.h>
2810916706SShile Zhang #include <fcntl.h>
2910916706SShile Zhang #include <stdio.h>
3010916706SShile Zhang #include <stdlib.h>
3110916706SShile Zhang #include <string.h>
3210916706SShile Zhang #include <unistd.h>
3372b3942aSYinan Liu #include <errno.h>
3472b3942aSYinan Liu #include <pthread.h>
3510916706SShile Zhang
3610916706SShile Zhang #include <tools/be_byteshift.h>
3710916706SShile Zhang #include <tools/le_byteshift.h>
3810916706SShile Zhang
3910916706SShile Zhang #ifndef EM_ARCOMPACT
4010916706SShile Zhang #define EM_ARCOMPACT 93
4110916706SShile Zhang #endif
4210916706SShile Zhang
4310916706SShile Zhang #ifndef EM_XTENSA
4410916706SShile Zhang #define EM_XTENSA 94
4510916706SShile Zhang #endif
4610916706SShile Zhang
4710916706SShile Zhang #ifndef EM_AARCH64
4810916706SShile Zhang #define EM_AARCH64 183
4910916706SShile Zhang #endif
5010916706SShile Zhang
5110916706SShile Zhang #ifndef EM_MICROBLAZE
5210916706SShile Zhang #define EM_MICROBLAZE 189
5310916706SShile Zhang #endif
5410916706SShile Zhang
5510916706SShile Zhang #ifndef EM_ARCV2
5610916706SShile Zhang #define EM_ARCV2 195
5710916706SShile Zhang #endif
5810916706SShile Zhang
59d09c3872SMiles Chen #ifndef EM_RISCV
60d09c3872SMiles Chen #define EM_RISCV 243
61d09c3872SMiles Chen #endif
62d09c3872SMiles Chen
6308145b08SHuacai Chen #ifndef EM_LOONGARCH
6408145b08SHuacai Chen #define EM_LOONGARCH 258
6508145b08SHuacai Chen #endif
6608145b08SHuacai Chen
6710916706SShile Zhang static uint32_t (*r)(const uint32_t *);
6810916706SShile Zhang static uint16_t (*r2)(const uint16_t *);
6910916706SShile Zhang static uint64_t (*r8)(const uint64_t *);
7010916706SShile Zhang static void (*w)(uint32_t, uint32_t *);
7110916706SShile Zhang static void (*w2)(uint16_t, uint16_t *);
7210916706SShile Zhang static void (*w8)(uint64_t, uint64_t *);
7310916706SShile Zhang typedef void (*table_sort_t)(char *, int);
7410916706SShile Zhang
7510916706SShile Zhang /*
7610916706SShile Zhang * Get the whole file as a programming convenience in order to avoid
7710916706SShile Zhang * malloc+lseek+read+free of many pieces. If successful, then mmap
7810916706SShile Zhang * avoids copying unused pieces; else just read the whole file.
7910916706SShile Zhang * Open for both read and write.
8010916706SShile Zhang */
mmap_file(char const * fname,size_t * size)8110916706SShile Zhang static void *mmap_file(char const *fname, size_t *size)
8210916706SShile Zhang {
8310916706SShile Zhang int fd;
8410916706SShile Zhang struct stat sb;
8510916706SShile Zhang void *addr = NULL;
8610916706SShile Zhang
8710916706SShile Zhang fd = open(fname, O_RDWR);
8810916706SShile Zhang if (fd < 0) {
8910916706SShile Zhang perror(fname);
9010916706SShile Zhang return NULL;
9110916706SShile Zhang }
9210916706SShile Zhang if (fstat(fd, &sb) < 0) {
9310916706SShile Zhang perror(fname);
9410916706SShile Zhang goto out;
9510916706SShile Zhang }
9610916706SShile Zhang if (!S_ISREG(sb.st_mode)) {
9710916706SShile Zhang fprintf(stderr, "not a regular file: %s\n", fname);
9810916706SShile Zhang goto out;
9910916706SShile Zhang }
10010916706SShile Zhang
10110916706SShile Zhang addr = mmap(0, sb.st_size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
10210916706SShile Zhang if (addr == MAP_FAILED) {
10310916706SShile Zhang fprintf(stderr, "Could not mmap file: %s\n", fname);
10410916706SShile Zhang goto out;
10510916706SShile Zhang }
10610916706SShile Zhang
10710916706SShile Zhang *size = sb.st_size;
10810916706SShile Zhang
10910916706SShile Zhang out:
11010916706SShile Zhang close(fd);
11110916706SShile Zhang return addr;
11210916706SShile Zhang }
11310916706SShile Zhang
rbe(const uint32_t * x)11410916706SShile Zhang static uint32_t rbe(const uint32_t *x)
11510916706SShile Zhang {
11610916706SShile Zhang return get_unaligned_be32(x);
11710916706SShile Zhang }
11810916706SShile Zhang
r2be(const uint16_t * x)11910916706SShile Zhang static uint16_t r2be(const uint16_t *x)
12010916706SShile Zhang {
12110916706SShile Zhang return get_unaligned_be16(x);
12210916706SShile Zhang }
12310916706SShile Zhang
r8be(const uint64_t * x)12410916706SShile Zhang static uint64_t r8be(const uint64_t *x)
12510916706SShile Zhang {
12610916706SShile Zhang return get_unaligned_be64(x);
12710916706SShile Zhang }
12810916706SShile Zhang
rle(const uint32_t * x)12910916706SShile Zhang static uint32_t rle(const uint32_t *x)
13010916706SShile Zhang {
13110916706SShile Zhang return get_unaligned_le32(x);
13210916706SShile Zhang }
13310916706SShile Zhang
r2le(const uint16_t * x)13410916706SShile Zhang static uint16_t r2le(const uint16_t *x)
13510916706SShile Zhang {
13610916706SShile Zhang return get_unaligned_le16(x);
13710916706SShile Zhang }
13810916706SShile Zhang
r8le(const uint64_t * x)13910916706SShile Zhang static uint64_t r8le(const uint64_t *x)
14010916706SShile Zhang {
14110916706SShile Zhang return get_unaligned_le64(x);
14210916706SShile Zhang }
14310916706SShile Zhang
wbe(uint32_t val,uint32_t * x)14410916706SShile Zhang static void wbe(uint32_t val, uint32_t *x)
14510916706SShile Zhang {
14610916706SShile Zhang put_unaligned_be32(val, x);
14710916706SShile Zhang }
14810916706SShile Zhang
w2be(uint16_t val,uint16_t * x)14910916706SShile Zhang static void w2be(uint16_t val, uint16_t *x)
15010916706SShile Zhang {
15110916706SShile Zhang put_unaligned_be16(val, x);
15210916706SShile Zhang }
15310916706SShile Zhang
w8be(uint64_t val,uint64_t * x)15410916706SShile Zhang static void w8be(uint64_t val, uint64_t *x)
15510916706SShile Zhang {
15610916706SShile Zhang put_unaligned_be64(val, x);
15710916706SShile Zhang }
15810916706SShile Zhang
wle(uint32_t val,uint32_t * x)15910916706SShile Zhang static void wle(uint32_t val, uint32_t *x)
16010916706SShile Zhang {
16110916706SShile Zhang put_unaligned_le32(val, x);
16210916706SShile Zhang }
16310916706SShile Zhang
w2le(uint16_t val,uint16_t * x)16410916706SShile Zhang static void w2le(uint16_t val, uint16_t *x)
16510916706SShile Zhang {
16610916706SShile Zhang put_unaligned_le16(val, x);
16710916706SShile Zhang }
16810916706SShile Zhang
w8le(uint64_t val,uint64_t * x)16910916706SShile Zhang static void w8le(uint64_t val, uint64_t *x)
17010916706SShile Zhang {
17110916706SShile Zhang put_unaligned_le64(val, x);
17210916706SShile Zhang }
17310916706SShile Zhang
17410916706SShile Zhang /*
17510916706SShile Zhang * Move reserved section indices SHN_LORESERVE..SHN_HIRESERVE out of
17610916706SShile Zhang * the way to -256..-1, to avoid conflicting with real section
17710916706SShile Zhang * indices.
17810916706SShile Zhang */
17910916706SShile Zhang #define SPECIAL(i) ((i) - (SHN_HIRESERVE + 1))
18010916706SShile Zhang
is_shndx_special(unsigned int i)18110916706SShile Zhang static inline int is_shndx_special(unsigned int i)
18210916706SShile Zhang {
18310916706SShile Zhang return i != SHN_XINDEX && i >= SHN_LORESERVE && i <= SHN_HIRESERVE;
18410916706SShile Zhang }
18510916706SShile Zhang
18610916706SShile Zhang /* Accessor for sym->st_shndx, hides ugliness of "64k sections" */
get_secindex(unsigned int shndx,unsigned int sym_offs,const Elf32_Word * symtab_shndx_start)18710916706SShile Zhang static inline unsigned int get_secindex(unsigned int shndx,
18810916706SShile Zhang unsigned int sym_offs,
18910916706SShile Zhang const Elf32_Word *symtab_shndx_start)
19010916706SShile Zhang {
19110916706SShile Zhang if (is_shndx_special(shndx))
19210916706SShile Zhang return SPECIAL(shndx);
19310916706SShile Zhang if (shndx != SHN_XINDEX)
19410916706SShile Zhang return shndx;
19510916706SShile Zhang return r(&symtab_shndx_start[sym_offs]);
19610916706SShile Zhang }
19710916706SShile Zhang
19810916706SShile Zhang /* 32 bit and 64 bit are very similar */
19910916706SShile Zhang #include "sorttable.h"
20010916706SShile Zhang #define SORTTABLE_64
20110916706SShile Zhang #include "sorttable.h"
20210916706SShile Zhang
compare_relative_table(const void * a,const void * b)20310916706SShile Zhang static int compare_relative_table(const void *a, const void *b)
20410916706SShile Zhang {
20510916706SShile Zhang int32_t av = (int32_t)r(a);
20610916706SShile Zhang int32_t bv = (int32_t)r(b);
20710916706SShile Zhang
20810916706SShile Zhang if (av < bv)
20910916706SShile Zhang return -1;
21010916706SShile Zhang if (av > bv)
21110916706SShile Zhang return 1;
21210916706SShile Zhang return 0;
21310916706SShile Zhang }
21410916706SShile Zhang
sort_relative_table(char * extab_image,int image_size)21510916706SShile Zhang static void sort_relative_table(char *extab_image, int image_size)
21610916706SShile Zhang {
21710916706SShile Zhang int i = 0;
21810916706SShile Zhang
21910916706SShile Zhang /*
22010916706SShile Zhang * Do the same thing the runtime sort does, first normalize to
22110916706SShile Zhang * being relative to the start of the section.
22210916706SShile Zhang */
22310916706SShile Zhang while (i < image_size) {
22410916706SShile Zhang uint32_t *loc = (uint32_t *)(extab_image + i);
22510916706SShile Zhang w(r(loc) + i, loc);
22610916706SShile Zhang i += 4;
22710916706SShile Zhang }
22810916706SShile Zhang
22910916706SShile Zhang qsort(extab_image, image_size / 8, 8, compare_relative_table);
23010916706SShile Zhang
23110916706SShile Zhang /* Now denormalize. */
23210916706SShile Zhang i = 0;
23310916706SShile Zhang while (i < image_size) {
23410916706SShile Zhang uint32_t *loc = (uint32_t *)(extab_image + i);
23510916706SShile Zhang w(r(loc) - i, loc);
23610916706SShile Zhang i += 4;
23710916706SShile Zhang }
23810916706SShile Zhang }
23910916706SShile Zhang
sort_relative_table_with_data(char * extab_image,int image_size)240e232333bSMark Rutland static void sort_relative_table_with_data(char *extab_image, int image_size)
241d6e2cc56SMark Rutland {
242d6e2cc56SMark Rutland int i = 0;
243d6e2cc56SMark Rutland
244d6e2cc56SMark Rutland while (i < image_size) {
245d6e2cc56SMark Rutland uint32_t *loc = (uint32_t *)(extab_image + i);
246d6e2cc56SMark Rutland
247d6e2cc56SMark Rutland w(r(loc) + i, loc);
248d6e2cc56SMark Rutland w(r(loc + 1) + i + 4, loc + 1);
249d6e2cc56SMark Rutland /* Don't touch the fixup type or data */
250d6e2cc56SMark Rutland
251d6e2cc56SMark Rutland i += sizeof(uint32_t) * 3;
252d6e2cc56SMark Rutland }
253d6e2cc56SMark Rutland
254d6e2cc56SMark Rutland qsort(extab_image, image_size / 12, 12, compare_relative_table);
255d6e2cc56SMark Rutland
256d6e2cc56SMark Rutland i = 0;
257d6e2cc56SMark Rutland while (i < image_size) {
258d6e2cc56SMark Rutland uint32_t *loc = (uint32_t *)(extab_image + i);
259d6e2cc56SMark Rutland
260d6e2cc56SMark Rutland w(r(loc) - i, loc);
261d6e2cc56SMark Rutland w(r(loc + 1) - (i + 4), loc + 1);
262d6e2cc56SMark Rutland /* Don't touch the fixup type or data */
263d6e2cc56SMark Rutland
264d6e2cc56SMark Rutland i += sizeof(uint32_t) * 3;
265d6e2cc56SMark Rutland }
266d6e2cc56SMark Rutland }
267d6e2cc56SMark Rutland
do_file(char const * const fname,void * addr)26810916706SShile Zhang static int do_file(char const *const fname, void *addr)
26910916706SShile Zhang {
27010916706SShile Zhang int rc = -1;
27110916706SShile Zhang Elf32_Ehdr *ehdr = addr;
27210916706SShile Zhang table_sort_t custom_sort = NULL;
27310916706SShile Zhang
27410916706SShile Zhang switch (ehdr->e_ident[EI_DATA]) {
27510916706SShile Zhang case ELFDATA2LSB:
27610916706SShile Zhang r = rle;
27710916706SShile Zhang r2 = r2le;
27810916706SShile Zhang r8 = r8le;
27910916706SShile Zhang w = wle;
28010916706SShile Zhang w2 = w2le;
28110916706SShile Zhang w8 = w8le;
28210916706SShile Zhang break;
28310916706SShile Zhang case ELFDATA2MSB:
28410916706SShile Zhang r = rbe;
28510916706SShile Zhang r2 = r2be;
28610916706SShile Zhang r8 = r8be;
28710916706SShile Zhang w = wbe;
28810916706SShile Zhang w2 = w2be;
28910916706SShile Zhang w8 = w8be;
29010916706SShile Zhang break;
29110916706SShile Zhang default:
29210916706SShile Zhang fprintf(stderr, "unrecognized ELF data encoding %d: %s\n",
29310916706SShile Zhang ehdr->e_ident[EI_DATA], fname);
29410916706SShile Zhang return -1;
29510916706SShile Zhang }
29610916706SShile Zhang
29710916706SShile Zhang if (memcmp(ELFMAG, ehdr->e_ident, SELFMAG) != 0 ||
29810916706SShile Zhang (r2(&ehdr->e_type) != ET_EXEC && r2(&ehdr->e_type) != ET_DYN) ||
29910916706SShile Zhang ehdr->e_ident[EI_VERSION] != EV_CURRENT) {
30010916706SShile Zhang fprintf(stderr, "unrecognized ET_EXEC/ET_DYN file %s\n", fname);
30110916706SShile Zhang return -1;
30210916706SShile Zhang }
30310916706SShile Zhang
30410916706SShile Zhang switch (r2(&ehdr->e_machine)) {
30510916706SShile Zhang case EM_386:
306e232333bSMark Rutland case EM_AARCH64:
307*26bc8244SYouling Tang case EM_LOONGARCH:
3082bf847dbSJisheng Zhang case EM_RISCV:
3093d66718cSHeiko Carstens case EM_S390:
31010916706SShile Zhang case EM_X86_64:
311e232333bSMark Rutland custom_sort = sort_relative_table_with_data;
31210916706SShile Zhang break;
31310916706SShile Zhang case EM_PARISC:
31410916706SShile Zhang case EM_PPC:
31510916706SShile Zhang case EM_PPC64:
31610916706SShile Zhang custom_sort = sort_relative_table;
31710916706SShile Zhang break;
31810916706SShile Zhang case EM_ARCOMPACT:
31910916706SShile Zhang case EM_ARCV2:
32010916706SShile Zhang case EM_ARM:
32110916706SShile Zhang case EM_MICROBLAZE:
32210916706SShile Zhang case EM_MIPS:
32310916706SShile Zhang case EM_XTENSA:
32410916706SShile Zhang break;
32510916706SShile Zhang default:
32610916706SShile Zhang fprintf(stderr, "unrecognized e_machine %d %s\n",
32710916706SShile Zhang r2(&ehdr->e_machine), fname);
32810916706SShile Zhang return -1;
32910916706SShile Zhang }
33010916706SShile Zhang
33110916706SShile Zhang switch (ehdr->e_ident[EI_CLASS]) {
33210916706SShile Zhang case ELFCLASS32:
33310916706SShile Zhang if (r2(&ehdr->e_ehsize) != sizeof(Elf32_Ehdr) ||
33410916706SShile Zhang r2(&ehdr->e_shentsize) != sizeof(Elf32_Shdr)) {
33510916706SShile Zhang fprintf(stderr,
33610916706SShile Zhang "unrecognized ET_EXEC/ET_DYN file: %s\n", fname);
33710916706SShile Zhang break;
33810916706SShile Zhang }
33910916706SShile Zhang rc = do_sort_32(ehdr, fname, custom_sort);
34010916706SShile Zhang break;
34110916706SShile Zhang case ELFCLASS64:
34210916706SShile Zhang {
34310916706SShile Zhang Elf64_Ehdr *const ghdr = (Elf64_Ehdr *)ehdr;
34410916706SShile Zhang if (r2(&ghdr->e_ehsize) != sizeof(Elf64_Ehdr) ||
34510916706SShile Zhang r2(&ghdr->e_shentsize) != sizeof(Elf64_Shdr)) {
34610916706SShile Zhang fprintf(stderr,
34710916706SShile Zhang "unrecognized ET_EXEC/ET_DYN file: %s\n",
34810916706SShile Zhang fname);
34910916706SShile Zhang break;
35010916706SShile Zhang }
35110916706SShile Zhang rc = do_sort_64(ghdr, fname, custom_sort);
35210916706SShile Zhang }
35310916706SShile Zhang break;
35410916706SShile Zhang default:
35510916706SShile Zhang fprintf(stderr, "unrecognized ELF class %d %s\n",
35610916706SShile Zhang ehdr->e_ident[EI_CLASS], fname);
35710916706SShile Zhang break;
35810916706SShile Zhang }
35910916706SShile Zhang
36010916706SShile Zhang return rc;
36110916706SShile Zhang }
36210916706SShile Zhang
main(int argc,char * argv[])36310916706SShile Zhang int main(int argc, char *argv[])
36410916706SShile Zhang {
36510916706SShile Zhang int i, n_error = 0; /* gcc-4.3.0 false positive complaint */
36610916706SShile Zhang size_t size = 0;
36710916706SShile Zhang void *addr = NULL;
36810916706SShile Zhang
36910916706SShile Zhang if (argc < 2) {
37010916706SShile Zhang fprintf(stderr, "usage: sorttable vmlinux...\n");
37110916706SShile Zhang return 0;
37210916706SShile Zhang }
37310916706SShile Zhang
37410916706SShile Zhang /* Process each file in turn, allowing deep failure. */
37510916706SShile Zhang for (i = 1; i < argc; i++) {
37610916706SShile Zhang addr = mmap_file(argv[i], &size);
37710916706SShile Zhang if (!addr) {
37810916706SShile Zhang ++n_error;
37910916706SShile Zhang continue;
38010916706SShile Zhang }
38110916706SShile Zhang
38210916706SShile Zhang if (do_file(argv[i], addr))
38310916706SShile Zhang ++n_error;
38410916706SShile Zhang
38510916706SShile Zhang munmap(addr, size);
38610916706SShile Zhang }
38710916706SShile Zhang
38810916706SShile Zhang return !!n_error;
38910916706SShile Zhang }
390