xref: /openbmc/linux/lib/sort.c (revision ec8f24b7faaf3d4799a7c3f4c1b87f6b02778ad1)
1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * A fast, small, non-recursive O(n log n) sort for the Linux kernel
4   *
5   * This performs n*log2(n) + 0.37*n + o(n) comparisons on average,
6   * and 1.5*n*log2(n) + O(n) in the (very contrived) worst case.
7   *
8   * Glibc qsort() manages n*log2(n) - 1.26*n for random inputs (1.63*n
9   * better) at the expense of stack usage and much larger code to avoid
10   * quicksort's O(n^2) worst case.
11   */
12  
13  #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14  
15  #include <linux/types.h>
16  #include <linux/export.h>
17  #include <linux/sort.h>
18  
19  /**
20   * is_aligned - is this pointer & size okay for word-wide copying?
21   * @base: pointer to data
22   * @size: size of each element
23   * @align: required alignment (typically 4 or 8)
24   *
25   * Returns true if elements can be copied using word loads and stores.
26   * The size must be a multiple of the alignment, and the base address must
27   * be if we do not have CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS.
28   *
29   * For some reason, gcc doesn't know to optimize "if (a & mask || b & mask)"
30   * to "if ((a | b) & mask)", so we do that by hand.
31   */
32  __attribute_const__ __always_inline
33  static bool is_aligned(const void *base, size_t size, unsigned char align)
34  {
35  	unsigned char lsbits = (unsigned char)size;
36  
37  	(void)base;
38  #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
39  	lsbits |= (unsigned char)(uintptr_t)base;
40  #endif
41  	return (lsbits & (align - 1)) == 0;
42  }
43  
44  /**
45   * swap_words_32 - swap two elements in 32-bit chunks
46   * @a, @b: pointers to the elements
47   * @size: element size (must be a multiple of 4)
48   *
49   * Exchange the two objects in memory.  This exploits base+index addressing,
50   * which basically all CPUs have, to minimize loop overhead computations.
51   *
52   * For some reason, on x86 gcc 7.3.0 adds a redundant test of n at the
53   * bottom of the loop, even though the zero flag is stil valid from the
54   * subtract (since the intervening mov instructions don't alter the flags).
55   * Gcc 8.1.0 doesn't have that problem.
56   */
57  static void swap_words_32(void *a, void *b, size_t n)
58  {
59  	do {
60  		u32 t = *(u32 *)(a + (n -= 4));
61  		*(u32 *)(a + n) = *(u32 *)(b + n);
62  		*(u32 *)(b + n) = t;
63  	} while (n);
64  }
65  
66  /**
67   * swap_words_64 - swap two elements in 64-bit chunks
68   * @a, @b: pointers to the elements
69   * @size: element size (must be a multiple of 8)
70   *
71   * Exchange the two objects in memory.  This exploits base+index
72   * addressing, which basically all CPUs have, to minimize loop overhead
73   * computations.
74   *
75   * We'd like to use 64-bit loads if possible.  If they're not, emulating
76   * one requires base+index+4 addressing which x86 has but most other
77   * processors do not.  If CONFIG_64BIT, we definitely have 64-bit loads,
78   * but it's possible to have 64-bit loads without 64-bit pointers (e.g.
79   * x32 ABI).  Are there any cases the kernel needs to worry about?
80   */
81  static void swap_words_64(void *a, void *b, size_t n)
82  {
83  	do {
84  #ifdef CONFIG_64BIT
85  		u64 t = *(u64 *)(a + (n -= 8));
86  		*(u64 *)(a + n) = *(u64 *)(b + n);
87  		*(u64 *)(b + n) = t;
88  #else
89  		/* Use two 32-bit transfers to avoid base+index+4 addressing */
90  		u32 t = *(u32 *)(a + (n -= 4));
91  		*(u32 *)(a + n) = *(u32 *)(b + n);
92  		*(u32 *)(b + n) = t;
93  
94  		t = *(u32 *)(a + (n -= 4));
95  		*(u32 *)(a + n) = *(u32 *)(b + n);
96  		*(u32 *)(b + n) = t;
97  #endif
98  	} while (n);
99  }
100  
101  /**
102   * swap_bytes - swap two elements a byte at a time
103   * @a, @b: pointers to the elements
104   * @size: element size
105   *
106   * This is the fallback if alignment doesn't allow using larger chunks.
107   */
108  static void swap_bytes(void *a, void *b, size_t n)
109  {
110  	do {
111  		char t = ((char *)a)[--n];
112  		((char *)a)[n] = ((char *)b)[n];
113  		((char *)b)[n] = t;
114  	} while (n);
115  }
116  
117  typedef void (*swap_func_t)(void *a, void *b, int size);
118  
119  /*
120   * The values are arbitrary as long as they can't be confused with
121   * a pointer, but small integers make for the smallest compare
122   * instructions.
123   */
124  #define SWAP_WORDS_64 (swap_func_t)0
125  #define SWAP_WORDS_32 (swap_func_t)1
126  #define SWAP_BYTES    (swap_func_t)2
127  
128  /*
129   * The function pointer is last to make tail calls most efficient if the
130   * compiler decides not to inline this function.
131   */
132  static void do_swap(void *a, void *b, size_t size, swap_func_t swap_func)
133  {
134  	if (swap_func == SWAP_WORDS_64)
135  		swap_words_64(a, b, size);
136  	else if (swap_func == SWAP_WORDS_32)
137  		swap_words_32(a, b, size);
138  	else if (swap_func == SWAP_BYTES)
139  		swap_bytes(a, b, size);
140  	else
141  		swap_func(a, b, (int)size);
142  }
143  
144  /**
145   * parent - given the offset of the child, find the offset of the parent.
146   * @i: the offset of the heap element whose parent is sought.  Non-zero.
147   * @lsbit: a precomputed 1-bit mask, equal to "size & -size"
148   * @size: size of each element
149   *
150   * In terms of array indexes, the parent of element j = @i/@size is simply
151   * (j-1)/2.  But when working in byte offsets, we can't use implicit
152   * truncation of integer divides.
153   *
154   * Fortunately, we only need one bit of the quotient, not the full divide.
155   * @size has a least significant bit.  That bit will be clear if @i is
156   * an even multiple of @size, and set if it's an odd multiple.
157   *
158   * Logically, we're doing "if (i & lsbit) i -= size;", but since the
159   * branch is unpredictable, it's done with a bit of clever branch-free
160   * code instead.
161   */
162  __attribute_const__ __always_inline
163  static size_t parent(size_t i, unsigned int lsbit, size_t size)
164  {
165  	i -= size;
166  	i -= size & -(i & lsbit);
167  	return i / 2;
168  }
169  
170  /**
171   * sort - sort an array of elements
172   * @base: pointer to data to sort
173   * @num: number of elements
174   * @size: size of each element
175   * @cmp_func: pointer to comparison function
176   * @swap_func: pointer to swap function or NULL
177   *
178   * This function does a heapsort on the given array.  You may provide
179   * a swap_func function if you need to do something more than a memory
180   * copy (e.g. fix up pointers or auxiliary data), but the built-in swap
181   * avoids a slow retpoline and so is significantly faster.
182   *
183   * Sorting time is O(n log n) both on average and worst-case. While
184   * quicksort is slightly faster on average, it suffers from exploitable
185   * O(n*n) worst-case behavior and extra memory requirements that make
186   * it less suitable for kernel use.
187   */
188  void sort(void *base, size_t num, size_t size,
189  	  int (*cmp_func)(const void *, const void *),
190  	  void (*swap_func)(void *, void *, int size))
191  {
192  	/* pre-scale counters for performance */
193  	size_t n = num * size, a = (num/2) * size;
194  	const unsigned int lsbit = size & -size;  /* Used to find parent */
195  
196  	if (!a)		/* num < 2 || size == 0 */
197  		return;
198  
199  	if (!swap_func) {
200  		if (is_aligned(base, size, 8))
201  			swap_func = SWAP_WORDS_64;
202  		else if (is_aligned(base, size, 4))
203  			swap_func = SWAP_WORDS_32;
204  		else
205  			swap_func = SWAP_BYTES;
206  	}
207  
208  	/*
209  	 * Loop invariants:
210  	 * 1. elements [a,n) satisfy the heap property (compare greater than
211  	 *    all of their children),
212  	 * 2. elements [n,num*size) are sorted, and
213  	 * 3. a <= b <= c <= d <= n (whenever they are valid).
214  	 */
215  	for (;;) {
216  		size_t b, c, d;
217  
218  		if (a)			/* Building heap: sift down --a */
219  			a -= size;
220  		else if (n -= size)	/* Sorting: Extract root to --n */
221  			do_swap(base, base + n, size, swap_func);
222  		else			/* Sort complete */
223  			break;
224  
225  		/*
226  		 * Sift element at "a" down into heap.  This is the
227  		 * "bottom-up" variant, which significantly reduces
228  		 * calls to cmp_func(): we find the sift-down path all
229  		 * the way to the leaves (one compare per level), then
230  		 * backtrack to find where to insert the target element.
231  		 *
232  		 * Because elements tend to sift down close to the leaves,
233  		 * this uses fewer compares than doing two per level
234  		 * on the way down.  (A bit more than half as many on
235  		 * average, 3/4 worst-case.)
236  		 */
237  		for (b = a; c = 2*b + size, (d = c + size) < n;)
238  			b = cmp_func(base + c, base + d) >= 0 ? c : d;
239  		if (d == n)	/* Special case last leaf with no sibling */
240  			b = c;
241  
242  		/* Now backtrack from "b" to the correct location for "a" */
243  		while (b != a && cmp_func(base + a, base + b) >= 0)
244  			b = parent(b, lsbit, size);
245  		c = b;			/* Where "a" belongs */
246  		while (b != a) {	/* Shift it into place */
247  			b = parent(b, lsbit, size);
248  			do_swap(base + b, base + c, size, swap_func);
249  		}
250  	}
251  }
252  EXPORT_SYMBOL(sort);
253