xref: /openbmc/linux/kernel/bpf/bpf_lru_list.c (revision ee9fd0ac)
125763b3cSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
23a08c2fdSMartin KaFai Lau /* Copyright (c) 2016 Facebook
33a08c2fdSMartin KaFai Lau  */
43a08c2fdSMartin KaFai Lau #include <linux/cpumask.h>
53a08c2fdSMartin KaFai Lau #include <linux/spinlock.h>
63a08c2fdSMartin KaFai Lau #include <linux/percpu.h>
73a08c2fdSMartin KaFai Lau 
83a08c2fdSMartin KaFai Lau #include "bpf_lru_list.h"
93a08c2fdSMartin KaFai Lau 
103a08c2fdSMartin KaFai Lau #define LOCAL_FREE_TARGET		(128)
113a08c2fdSMartin KaFai Lau #define LOCAL_NR_SCANS			LOCAL_FREE_TARGET
123a08c2fdSMartin KaFai Lau 
13695ba265SMartin KaFai Lau #define PERCPU_FREE_TARGET		(4)
14961578b6SMartin KaFai Lau #define PERCPU_NR_SCANS			PERCPU_FREE_TARGET
15961578b6SMartin KaFai Lau 
163a08c2fdSMartin KaFai Lau /* Helpers to get the local list index */
173a08c2fdSMartin KaFai Lau #define LOCAL_LIST_IDX(t)	((t) - BPF_LOCAL_LIST_T_OFFSET)
183a08c2fdSMartin KaFai Lau #define LOCAL_FREE_LIST_IDX	LOCAL_LIST_IDX(BPF_LRU_LOCAL_LIST_T_FREE)
193a08c2fdSMartin KaFai Lau #define LOCAL_PENDING_LIST_IDX	LOCAL_LIST_IDX(BPF_LRU_LOCAL_LIST_T_PENDING)
203a08c2fdSMartin KaFai Lau #define IS_LOCAL_LIST_TYPE(t)	((t) >= BPF_LOCAL_LIST_T_OFFSET)
213a08c2fdSMartin KaFai Lau 
get_next_cpu(int cpu)223a08c2fdSMartin KaFai Lau static int get_next_cpu(int cpu)
233a08c2fdSMartin KaFai Lau {
243a08c2fdSMartin KaFai Lau 	cpu = cpumask_next(cpu, cpu_possible_mask);
253a08c2fdSMartin KaFai Lau 	if (cpu >= nr_cpu_ids)
263a08c2fdSMartin KaFai Lau 		cpu = cpumask_first(cpu_possible_mask);
273a08c2fdSMartin KaFai Lau 	return cpu;
283a08c2fdSMartin KaFai Lau }
293a08c2fdSMartin KaFai Lau 
303a08c2fdSMartin KaFai Lau /* Local list helpers */
local_free_list(struct bpf_lru_locallist * loc_l)313a08c2fdSMartin KaFai Lau static struct list_head *local_free_list(struct bpf_lru_locallist *loc_l)
323a08c2fdSMartin KaFai Lau {
333a08c2fdSMartin KaFai Lau 	return &loc_l->lists[LOCAL_FREE_LIST_IDX];
343a08c2fdSMartin KaFai Lau }
353a08c2fdSMartin KaFai Lau 
local_pending_list(struct bpf_lru_locallist * loc_l)363a08c2fdSMartin KaFai Lau static struct list_head *local_pending_list(struct bpf_lru_locallist *loc_l)
373a08c2fdSMartin KaFai Lau {
383a08c2fdSMartin KaFai Lau 	return &loc_l->lists[LOCAL_PENDING_LIST_IDX];
393a08c2fdSMartin KaFai Lau }
403a08c2fdSMartin KaFai Lau 
413a08c2fdSMartin KaFai Lau /* bpf_lru_node helpers */
bpf_lru_node_is_ref(const struct bpf_lru_node * node)423a08c2fdSMartin KaFai Lau static bool bpf_lru_node_is_ref(const struct bpf_lru_node *node)
433a08c2fdSMartin KaFai Lau {
44*ee9fd0acSMartin KaFai Lau 	return READ_ONCE(node->ref);
45*ee9fd0acSMartin KaFai Lau }
46*ee9fd0acSMartin KaFai Lau 
bpf_lru_node_clear_ref(struct bpf_lru_node * node)47*ee9fd0acSMartin KaFai Lau static void bpf_lru_node_clear_ref(struct bpf_lru_node *node)
48*ee9fd0acSMartin KaFai Lau {
49*ee9fd0acSMartin KaFai Lau 	WRITE_ONCE(node->ref, 0);
503a08c2fdSMartin KaFai Lau }
513a08c2fdSMartin KaFai Lau 
bpf_lru_list_count_inc(struct bpf_lru_list * l,enum bpf_lru_list_type type)523a08c2fdSMartin KaFai Lau static void bpf_lru_list_count_inc(struct bpf_lru_list *l,
533a08c2fdSMartin KaFai Lau 				   enum bpf_lru_list_type type)
543a08c2fdSMartin KaFai Lau {
553a08c2fdSMartin KaFai Lau 	if (type < NR_BPF_LRU_LIST_COUNT)
563a08c2fdSMartin KaFai Lau 		l->counts[type]++;
573a08c2fdSMartin KaFai Lau }
583a08c2fdSMartin KaFai Lau 
bpf_lru_list_count_dec(struct bpf_lru_list * l,enum bpf_lru_list_type type)593a08c2fdSMartin KaFai Lau static void bpf_lru_list_count_dec(struct bpf_lru_list *l,
603a08c2fdSMartin KaFai Lau 				   enum bpf_lru_list_type type)
613a08c2fdSMartin KaFai Lau {
623a08c2fdSMartin KaFai Lau 	if (type < NR_BPF_LRU_LIST_COUNT)
633a08c2fdSMartin KaFai Lau 		l->counts[type]--;
643a08c2fdSMartin KaFai Lau }
653a08c2fdSMartin KaFai Lau 
__bpf_lru_node_move_to_free(struct bpf_lru_list * l,struct bpf_lru_node * node,struct list_head * free_list,enum bpf_lru_list_type tgt_free_type)663a08c2fdSMartin KaFai Lau static void __bpf_lru_node_move_to_free(struct bpf_lru_list *l,
673a08c2fdSMartin KaFai Lau 					struct bpf_lru_node *node,
683a08c2fdSMartin KaFai Lau 					struct list_head *free_list,
693a08c2fdSMartin KaFai Lau 					enum bpf_lru_list_type tgt_free_type)
703a08c2fdSMartin KaFai Lau {
713a08c2fdSMartin KaFai Lau 	if (WARN_ON_ONCE(IS_LOCAL_LIST_TYPE(node->type)))
723a08c2fdSMartin KaFai Lau 		return;
733a08c2fdSMartin KaFai Lau 
743a08c2fdSMartin KaFai Lau 	/* If the removing node is the next_inactive_rotation candidate,
753a08c2fdSMartin KaFai Lau 	 * move the next_inactive_rotation pointer also.
763a08c2fdSMartin KaFai Lau 	 */
773a08c2fdSMartin KaFai Lau 	if (&node->list == l->next_inactive_rotation)
783a08c2fdSMartin KaFai Lau 		l->next_inactive_rotation = l->next_inactive_rotation->prev;
793a08c2fdSMartin KaFai Lau 
803a08c2fdSMartin KaFai Lau 	bpf_lru_list_count_dec(l, node->type);
813a08c2fdSMartin KaFai Lau 
823a08c2fdSMartin KaFai Lau 	node->type = tgt_free_type;
833a08c2fdSMartin KaFai Lau 	list_move(&node->list, free_list);
843a08c2fdSMartin KaFai Lau }
853a08c2fdSMartin KaFai Lau 
863a08c2fdSMartin KaFai Lau /* Move nodes from local list to the LRU list */
__bpf_lru_node_move_in(struct bpf_lru_list * l,struct bpf_lru_node * node,enum bpf_lru_list_type tgt_type)873a08c2fdSMartin KaFai Lau static void __bpf_lru_node_move_in(struct bpf_lru_list *l,
883a08c2fdSMartin KaFai Lau 				   struct bpf_lru_node *node,
893a08c2fdSMartin KaFai Lau 				   enum bpf_lru_list_type tgt_type)
903a08c2fdSMartin KaFai Lau {
913a08c2fdSMartin KaFai Lau 	if (WARN_ON_ONCE(!IS_LOCAL_LIST_TYPE(node->type)) ||
923a08c2fdSMartin KaFai Lau 	    WARN_ON_ONCE(IS_LOCAL_LIST_TYPE(tgt_type)))
933a08c2fdSMartin KaFai Lau 		return;
943a08c2fdSMartin KaFai Lau 
953a08c2fdSMartin KaFai Lau 	bpf_lru_list_count_inc(l, tgt_type);
963a08c2fdSMartin KaFai Lau 	node->type = tgt_type;
97*ee9fd0acSMartin KaFai Lau 	bpf_lru_node_clear_ref(node);
983a08c2fdSMartin KaFai Lau 	list_move(&node->list, &l->lists[tgt_type]);
993a08c2fdSMartin KaFai Lau }
1003a08c2fdSMartin KaFai Lau 
1013a08c2fdSMartin KaFai Lau /* Move nodes between or within active and inactive list (like
1023a08c2fdSMartin KaFai Lau  * active to inactive, inactive to active or tail of active back to
1033a08c2fdSMartin KaFai Lau  * the head of active).
1043a08c2fdSMartin KaFai Lau  */
__bpf_lru_node_move(struct bpf_lru_list * l,struct bpf_lru_node * node,enum bpf_lru_list_type tgt_type)1053a08c2fdSMartin KaFai Lau static void __bpf_lru_node_move(struct bpf_lru_list *l,
1063a08c2fdSMartin KaFai Lau 				struct bpf_lru_node *node,
1073a08c2fdSMartin KaFai Lau 				enum bpf_lru_list_type tgt_type)
1083a08c2fdSMartin KaFai Lau {
1093a08c2fdSMartin KaFai Lau 	if (WARN_ON_ONCE(IS_LOCAL_LIST_TYPE(node->type)) ||
1103a08c2fdSMartin KaFai Lau 	    WARN_ON_ONCE(IS_LOCAL_LIST_TYPE(tgt_type)))
1113a08c2fdSMartin KaFai Lau 		return;
1123a08c2fdSMartin KaFai Lau 
1133a08c2fdSMartin KaFai Lau 	if (node->type != tgt_type) {
1143a08c2fdSMartin KaFai Lau 		bpf_lru_list_count_dec(l, node->type);
1153a08c2fdSMartin KaFai Lau 		bpf_lru_list_count_inc(l, tgt_type);
1163a08c2fdSMartin KaFai Lau 		node->type = tgt_type;
1173a08c2fdSMartin KaFai Lau 	}
118*ee9fd0acSMartin KaFai Lau 	bpf_lru_node_clear_ref(node);
1193a08c2fdSMartin KaFai Lau 
1203a08c2fdSMartin KaFai Lau 	/* If the moving node is the next_inactive_rotation candidate,
1213a08c2fdSMartin KaFai Lau 	 * move the next_inactive_rotation pointer also.
1223a08c2fdSMartin KaFai Lau 	 */
1233a08c2fdSMartin KaFai Lau 	if (&node->list == l->next_inactive_rotation)
1243a08c2fdSMartin KaFai Lau 		l->next_inactive_rotation = l->next_inactive_rotation->prev;
1253a08c2fdSMartin KaFai Lau 
1263a08c2fdSMartin KaFai Lau 	list_move(&node->list, &l->lists[tgt_type]);
1273a08c2fdSMartin KaFai Lau }
1283a08c2fdSMartin KaFai Lau 
bpf_lru_list_inactive_low(const struct bpf_lru_list * l)1293a08c2fdSMartin KaFai Lau static bool bpf_lru_list_inactive_low(const struct bpf_lru_list *l)
1303a08c2fdSMartin KaFai Lau {
1313a08c2fdSMartin KaFai Lau 	return l->counts[BPF_LRU_LIST_T_INACTIVE] <
1323a08c2fdSMartin KaFai Lau 		l->counts[BPF_LRU_LIST_T_ACTIVE];
1333a08c2fdSMartin KaFai Lau }
1343a08c2fdSMartin KaFai Lau 
1353a08c2fdSMartin KaFai Lau /* Rotate the active list:
1363a08c2fdSMartin KaFai Lau  * 1. Start from tail
1373a08c2fdSMartin KaFai Lau  * 2. If the node has the ref bit set, it will be rotated
1383a08c2fdSMartin KaFai Lau  *    back to the head of active list with the ref bit cleared.
1393a08c2fdSMartin KaFai Lau  *    Give this node one more chance to survive in the active list.
1403a08c2fdSMartin KaFai Lau  * 3. If the ref bit is not set, move it to the head of the
1413a08c2fdSMartin KaFai Lau  *    inactive list.
1423a08c2fdSMartin KaFai Lau  * 4. It will at most scan nr_scans nodes
1433a08c2fdSMartin KaFai Lau  */
__bpf_lru_list_rotate_active(struct bpf_lru * lru,struct bpf_lru_list * l)1443a08c2fdSMartin KaFai Lau static void __bpf_lru_list_rotate_active(struct bpf_lru *lru,
1453a08c2fdSMartin KaFai Lau 					 struct bpf_lru_list *l)
1463a08c2fdSMartin KaFai Lau {
1473a08c2fdSMartin KaFai Lau 	struct list_head *active = &l->lists[BPF_LRU_LIST_T_ACTIVE];
1483a08c2fdSMartin KaFai Lau 	struct bpf_lru_node *node, *tmp_node, *first_node;
1493a08c2fdSMartin KaFai Lau 	unsigned int i = 0;
1503a08c2fdSMartin KaFai Lau 
1513a08c2fdSMartin KaFai Lau 	first_node = list_first_entry(active, struct bpf_lru_node, list);
1523a08c2fdSMartin KaFai Lau 	list_for_each_entry_safe_reverse(node, tmp_node, active, list) {
1533a08c2fdSMartin KaFai Lau 		if (bpf_lru_node_is_ref(node))
1543a08c2fdSMartin KaFai Lau 			__bpf_lru_node_move(l, node, BPF_LRU_LIST_T_ACTIVE);
1553a08c2fdSMartin KaFai Lau 		else
1563a08c2fdSMartin KaFai Lau 			__bpf_lru_node_move(l, node, BPF_LRU_LIST_T_INACTIVE);
1573a08c2fdSMartin KaFai Lau 
1583a08c2fdSMartin KaFai Lau 		if (++i == lru->nr_scans || node == first_node)
1593a08c2fdSMartin KaFai Lau 			break;
1603a08c2fdSMartin KaFai Lau 	}
1613a08c2fdSMartin KaFai Lau }
1623a08c2fdSMartin KaFai Lau 
1633a08c2fdSMartin KaFai Lau /* Rotate the inactive list.  It starts from the next_inactive_rotation
1643a08c2fdSMartin KaFai Lau  * 1. If the node has ref bit set, it will be moved to the head
1653a08c2fdSMartin KaFai Lau  *    of active list with the ref bit cleared.
1663a08c2fdSMartin KaFai Lau  * 2. If the node does not have ref bit set, it will leave it
1673a08c2fdSMartin KaFai Lau  *    at its current location (i.e. do nothing) so that it can
1683a08c2fdSMartin KaFai Lau  *    be considered during the next inactive_shrink.
1693a08c2fdSMartin KaFai Lau  * 3. It will at most scan nr_scans nodes
1703a08c2fdSMartin KaFai Lau  */
__bpf_lru_list_rotate_inactive(struct bpf_lru * lru,struct bpf_lru_list * l)1713a08c2fdSMartin KaFai Lau static void __bpf_lru_list_rotate_inactive(struct bpf_lru *lru,
1723a08c2fdSMartin KaFai Lau 					   struct bpf_lru_list *l)
1733a08c2fdSMartin KaFai Lau {
1743a08c2fdSMartin KaFai Lau 	struct list_head *inactive = &l->lists[BPF_LRU_LIST_T_INACTIVE];
1752874aa2eSMartin KaFai Lau 	struct list_head *cur, *last, *next = inactive;
1763a08c2fdSMartin KaFai Lau 	struct bpf_lru_node *node;
1773a08c2fdSMartin KaFai Lau 	unsigned int i = 0;
1783a08c2fdSMartin KaFai Lau 
1793a08c2fdSMartin KaFai Lau 	if (list_empty(inactive))
1803a08c2fdSMartin KaFai Lau 		return;
1813a08c2fdSMartin KaFai Lau 
1823a08c2fdSMartin KaFai Lau 	last = l->next_inactive_rotation->next;
1833a08c2fdSMartin KaFai Lau 	if (last == inactive)
1843a08c2fdSMartin KaFai Lau 		last = last->next;
1853a08c2fdSMartin KaFai Lau 
1863a08c2fdSMartin KaFai Lau 	cur = l->next_inactive_rotation;
1873a08c2fdSMartin KaFai Lau 	while (i < lru->nr_scans) {
1883a08c2fdSMartin KaFai Lau 		if (cur == inactive) {
1893a08c2fdSMartin KaFai Lau 			cur = cur->prev;
1903a08c2fdSMartin KaFai Lau 			continue;
1913a08c2fdSMartin KaFai Lau 		}
1923a08c2fdSMartin KaFai Lau 
1933a08c2fdSMartin KaFai Lau 		node = list_entry(cur, struct bpf_lru_node, list);
1943a08c2fdSMartin KaFai Lau 		next = cur->prev;
1953a08c2fdSMartin KaFai Lau 		if (bpf_lru_node_is_ref(node))
1963a08c2fdSMartin KaFai Lau 			__bpf_lru_node_move(l, node, BPF_LRU_LIST_T_ACTIVE);
1973a08c2fdSMartin KaFai Lau 		if (cur == last)
1983a08c2fdSMartin KaFai Lau 			break;
1993a08c2fdSMartin KaFai Lau 		cur = next;
2003a08c2fdSMartin KaFai Lau 		i++;
2013a08c2fdSMartin KaFai Lau 	}
2023a08c2fdSMartin KaFai Lau 
2033a08c2fdSMartin KaFai Lau 	l->next_inactive_rotation = next;
2043a08c2fdSMartin KaFai Lau }
2053a08c2fdSMartin KaFai Lau 
2063a08c2fdSMartin KaFai Lau /* Shrink the inactive list.  It starts from the tail of the
2073a08c2fdSMartin KaFai Lau  * inactive list and only move the nodes without the ref bit
2083a08c2fdSMartin KaFai Lau  * set to the designated free list.
2093a08c2fdSMartin KaFai Lau  */
2103a08c2fdSMartin KaFai Lau static unsigned int
__bpf_lru_list_shrink_inactive(struct bpf_lru * lru,struct bpf_lru_list * l,unsigned int tgt_nshrink,struct list_head * free_list,enum bpf_lru_list_type tgt_free_type)2113a08c2fdSMartin KaFai Lau __bpf_lru_list_shrink_inactive(struct bpf_lru *lru,
2123a08c2fdSMartin KaFai Lau 			       struct bpf_lru_list *l,
2133a08c2fdSMartin KaFai Lau 			       unsigned int tgt_nshrink,
2143a08c2fdSMartin KaFai Lau 			       struct list_head *free_list,
2153a08c2fdSMartin KaFai Lau 			       enum bpf_lru_list_type tgt_free_type)
2163a08c2fdSMartin KaFai Lau {
2173a08c2fdSMartin KaFai Lau 	struct list_head *inactive = &l->lists[BPF_LRU_LIST_T_INACTIVE];
218a5ef01aaSTobias Klauser 	struct bpf_lru_node *node, *tmp_node;
2193a08c2fdSMartin KaFai Lau 	unsigned int nshrinked = 0;
2203a08c2fdSMartin KaFai Lau 	unsigned int i = 0;
2213a08c2fdSMartin KaFai Lau 
2223a08c2fdSMartin KaFai Lau 	list_for_each_entry_safe_reverse(node, tmp_node, inactive, list) {
2233a08c2fdSMartin KaFai Lau 		if (bpf_lru_node_is_ref(node)) {
2243a08c2fdSMartin KaFai Lau 			__bpf_lru_node_move(l, node, BPF_LRU_LIST_T_ACTIVE);
2253a08c2fdSMartin KaFai Lau 		} else if (lru->del_from_htab(lru->del_arg, node)) {
2263a08c2fdSMartin KaFai Lau 			__bpf_lru_node_move_to_free(l, node, free_list,
2273a08c2fdSMartin KaFai Lau 						    tgt_free_type);
2283a08c2fdSMartin KaFai Lau 			if (++nshrinked == tgt_nshrink)
2293a08c2fdSMartin KaFai Lau 				break;
2303a08c2fdSMartin KaFai Lau 		}
2313a08c2fdSMartin KaFai Lau 
2323a08c2fdSMartin KaFai Lau 		if (++i == lru->nr_scans)
2333a08c2fdSMartin KaFai Lau 			break;
2343a08c2fdSMartin KaFai Lau 	}
2353a08c2fdSMartin KaFai Lau 
2363a08c2fdSMartin KaFai Lau 	return nshrinked;
2373a08c2fdSMartin KaFai Lau }
2383a08c2fdSMartin KaFai Lau 
2393a08c2fdSMartin KaFai Lau /* 1. Rotate the active list (if needed)
2403a08c2fdSMartin KaFai Lau  * 2. Always rotate the inactive list
2413a08c2fdSMartin KaFai Lau  */
__bpf_lru_list_rotate(struct bpf_lru * lru,struct bpf_lru_list * l)2423a08c2fdSMartin KaFai Lau static void __bpf_lru_list_rotate(struct bpf_lru *lru, struct bpf_lru_list *l)
2433a08c2fdSMartin KaFai Lau {
2443a08c2fdSMartin KaFai Lau 	if (bpf_lru_list_inactive_low(l))
2453a08c2fdSMartin KaFai Lau 		__bpf_lru_list_rotate_active(lru, l);
2463a08c2fdSMartin KaFai Lau 
2473a08c2fdSMartin KaFai Lau 	__bpf_lru_list_rotate_inactive(lru, l);
2483a08c2fdSMartin KaFai Lau }
2493a08c2fdSMartin KaFai Lau 
2503a08c2fdSMartin KaFai Lau /* Calls __bpf_lru_list_shrink_inactive() to shrink some
2513a08c2fdSMartin KaFai Lau  * ref-bit-cleared nodes and move them to the designated
2523a08c2fdSMartin KaFai Lau  * free list.
2533a08c2fdSMartin KaFai Lau  *
2543a08c2fdSMartin KaFai Lau  * If it cannot get a free node after calling
2553a08c2fdSMartin KaFai Lau  * __bpf_lru_list_shrink_inactive().  It will just remove
2563a08c2fdSMartin KaFai Lau  * one node from either inactive or active list without
2573a08c2fdSMartin KaFai Lau  * honoring the ref-bit.  It prefers inactive list to active
2583a08c2fdSMartin KaFai Lau  * list in this situation.
2593a08c2fdSMartin KaFai Lau  */
__bpf_lru_list_shrink(struct bpf_lru * lru,struct bpf_lru_list * l,unsigned int tgt_nshrink,struct list_head * free_list,enum bpf_lru_list_type tgt_free_type)2603a08c2fdSMartin KaFai Lau static unsigned int __bpf_lru_list_shrink(struct bpf_lru *lru,
2613a08c2fdSMartin KaFai Lau 					  struct bpf_lru_list *l,
2623a08c2fdSMartin KaFai Lau 					  unsigned int tgt_nshrink,
2633a08c2fdSMartin KaFai Lau 					  struct list_head *free_list,
2643a08c2fdSMartin KaFai Lau 					  enum bpf_lru_list_type tgt_free_type)
2653a08c2fdSMartin KaFai Lau 
2663a08c2fdSMartin KaFai Lau {
2673a08c2fdSMartin KaFai Lau 	struct bpf_lru_node *node, *tmp_node;
2683a08c2fdSMartin KaFai Lau 	struct list_head *force_shrink_list;
2693a08c2fdSMartin KaFai Lau 	unsigned int nshrinked;
2703a08c2fdSMartin KaFai Lau 
2713a08c2fdSMartin KaFai Lau 	nshrinked = __bpf_lru_list_shrink_inactive(lru, l, tgt_nshrink,
2723a08c2fdSMartin KaFai Lau 						   free_list, tgt_free_type);
2733a08c2fdSMartin KaFai Lau 	if (nshrinked)
2743a08c2fdSMartin KaFai Lau 		return nshrinked;
2753a08c2fdSMartin KaFai Lau 
2763a08c2fdSMartin KaFai Lau 	/* Do a force shrink by ignoring the reference bit */
2773a08c2fdSMartin KaFai Lau 	if (!list_empty(&l->lists[BPF_LRU_LIST_T_INACTIVE]))
2783a08c2fdSMartin KaFai Lau 		force_shrink_list = &l->lists[BPF_LRU_LIST_T_INACTIVE];
2793a08c2fdSMartin KaFai Lau 	else
2803a08c2fdSMartin KaFai Lau 		force_shrink_list = &l->lists[BPF_LRU_LIST_T_ACTIVE];
2813a08c2fdSMartin KaFai Lau 
2823a08c2fdSMartin KaFai Lau 	list_for_each_entry_safe_reverse(node, tmp_node, force_shrink_list,
2833a08c2fdSMartin KaFai Lau 					 list) {
2843a08c2fdSMartin KaFai Lau 		if (lru->del_from_htab(lru->del_arg, node)) {
2853a08c2fdSMartin KaFai Lau 			__bpf_lru_node_move_to_free(l, node, free_list,
2863a08c2fdSMartin KaFai Lau 						    tgt_free_type);
2873a08c2fdSMartin KaFai Lau 			return 1;
2883a08c2fdSMartin KaFai Lau 		}
2893a08c2fdSMartin KaFai Lau 	}
2903a08c2fdSMartin KaFai Lau 
2913a08c2fdSMartin KaFai Lau 	return 0;
2923a08c2fdSMartin KaFai Lau }
2933a08c2fdSMartin KaFai Lau 
2943a08c2fdSMartin KaFai Lau /* Flush the nodes from the local pending list to the LRU list */
__local_list_flush(struct bpf_lru_list * l,struct bpf_lru_locallist * loc_l)2953a08c2fdSMartin KaFai Lau static void __local_list_flush(struct bpf_lru_list *l,
2963a08c2fdSMartin KaFai Lau 			       struct bpf_lru_locallist *loc_l)
2973a08c2fdSMartin KaFai Lau {
2983a08c2fdSMartin KaFai Lau 	struct bpf_lru_node *node, *tmp_node;
2993a08c2fdSMartin KaFai Lau 
3003a08c2fdSMartin KaFai Lau 	list_for_each_entry_safe_reverse(node, tmp_node,
3013a08c2fdSMartin KaFai Lau 					 local_pending_list(loc_l), list) {
3023a08c2fdSMartin KaFai Lau 		if (bpf_lru_node_is_ref(node))
3033a08c2fdSMartin KaFai Lau 			__bpf_lru_node_move_in(l, node, BPF_LRU_LIST_T_ACTIVE);
3043a08c2fdSMartin KaFai Lau 		else
3053a08c2fdSMartin KaFai Lau 			__bpf_lru_node_move_in(l, node,
3063a08c2fdSMartin KaFai Lau 					       BPF_LRU_LIST_T_INACTIVE);
3073a08c2fdSMartin KaFai Lau 	}
3083a08c2fdSMartin KaFai Lau }
3093a08c2fdSMartin KaFai Lau 
bpf_lru_list_push_free(struct bpf_lru_list * l,struct bpf_lru_node * node)3103a08c2fdSMartin KaFai Lau static void bpf_lru_list_push_free(struct bpf_lru_list *l,
3113a08c2fdSMartin KaFai Lau 				   struct bpf_lru_node *node)
3123a08c2fdSMartin KaFai Lau {
3133a08c2fdSMartin KaFai Lau 	unsigned long flags;
3143a08c2fdSMartin KaFai Lau 
3153a08c2fdSMartin KaFai Lau 	if (WARN_ON_ONCE(IS_LOCAL_LIST_TYPE(node->type)))
3163a08c2fdSMartin KaFai Lau 		return;
3173a08c2fdSMartin KaFai Lau 
3183a08c2fdSMartin KaFai Lau 	raw_spin_lock_irqsave(&l->lock, flags);
3193a08c2fdSMartin KaFai Lau 	__bpf_lru_node_move(l, node, BPF_LRU_LIST_T_FREE);
3203a08c2fdSMartin KaFai Lau 	raw_spin_unlock_irqrestore(&l->lock, flags);
3213a08c2fdSMartin KaFai Lau }
3223a08c2fdSMartin KaFai Lau 
bpf_lru_list_pop_free_to_local(struct bpf_lru * lru,struct bpf_lru_locallist * loc_l)3233a08c2fdSMartin KaFai Lau static void bpf_lru_list_pop_free_to_local(struct bpf_lru *lru,
3243a08c2fdSMartin KaFai Lau 					   struct bpf_lru_locallist *loc_l)
3253a08c2fdSMartin KaFai Lau {
3263a08c2fdSMartin KaFai Lau 	struct bpf_lru_list *l = &lru->common_lru.lru_list;
3273a08c2fdSMartin KaFai Lau 	struct bpf_lru_node *node, *tmp_node;
3283a08c2fdSMartin KaFai Lau 	unsigned int nfree = 0;
3293a08c2fdSMartin KaFai Lau 
3303a08c2fdSMartin KaFai Lau 	raw_spin_lock(&l->lock);
3313a08c2fdSMartin KaFai Lau 
3323a08c2fdSMartin KaFai Lau 	__local_list_flush(l, loc_l);
3333a08c2fdSMartin KaFai Lau 
3343a08c2fdSMartin KaFai Lau 	__bpf_lru_list_rotate(lru, l);
3353a08c2fdSMartin KaFai Lau 
3363a08c2fdSMartin KaFai Lau 	list_for_each_entry_safe(node, tmp_node, &l->lists[BPF_LRU_LIST_T_FREE],
3373a08c2fdSMartin KaFai Lau 				 list) {
3383a08c2fdSMartin KaFai Lau 		__bpf_lru_node_move_to_free(l, node, local_free_list(loc_l),
3393a08c2fdSMartin KaFai Lau 					    BPF_LRU_LOCAL_LIST_T_FREE);
3403a08c2fdSMartin KaFai Lau 		if (++nfree == LOCAL_FREE_TARGET)
3413a08c2fdSMartin KaFai Lau 			break;
3423a08c2fdSMartin KaFai Lau 	}
3433a08c2fdSMartin KaFai Lau 
3443a08c2fdSMartin KaFai Lau 	if (nfree < LOCAL_FREE_TARGET)
3453a08c2fdSMartin KaFai Lau 		__bpf_lru_list_shrink(lru, l, LOCAL_FREE_TARGET - nfree,
3463a08c2fdSMartin KaFai Lau 				      local_free_list(loc_l),
3473a08c2fdSMartin KaFai Lau 				      BPF_LRU_LOCAL_LIST_T_FREE);
3483a08c2fdSMartin KaFai Lau 
3493a08c2fdSMartin KaFai Lau 	raw_spin_unlock(&l->lock);
3503a08c2fdSMartin KaFai Lau }
3513a08c2fdSMartin KaFai Lau 
__local_list_add_pending(struct bpf_lru * lru,struct bpf_lru_locallist * loc_l,int cpu,struct bpf_lru_node * node,u32 hash)3523a08c2fdSMartin KaFai Lau static void __local_list_add_pending(struct bpf_lru *lru,
3533a08c2fdSMartin KaFai Lau 				     struct bpf_lru_locallist *loc_l,
3543a08c2fdSMartin KaFai Lau 				     int cpu,
3553a08c2fdSMartin KaFai Lau 				     struct bpf_lru_node *node,
3563a08c2fdSMartin KaFai Lau 				     u32 hash)
3573a08c2fdSMartin KaFai Lau {
3583a08c2fdSMartin KaFai Lau 	*(u32 *)((void *)node + lru->hash_offset) = hash;
3593a08c2fdSMartin KaFai Lau 	node->cpu = cpu;
3603a08c2fdSMartin KaFai Lau 	node->type = BPF_LRU_LOCAL_LIST_T_PENDING;
361*ee9fd0acSMartin KaFai Lau 	bpf_lru_node_clear_ref(node);
3623a08c2fdSMartin KaFai Lau 	list_add(&node->list, local_pending_list(loc_l));
3633a08c2fdSMartin KaFai Lau }
3643a08c2fdSMartin KaFai Lau 
3653bf00333STobias Klauser static struct bpf_lru_node *
__local_list_pop_free(struct bpf_lru_locallist * loc_l)3663bf00333STobias Klauser __local_list_pop_free(struct bpf_lru_locallist *loc_l)
3673a08c2fdSMartin KaFai Lau {
3683a08c2fdSMartin KaFai Lau 	struct bpf_lru_node *node;
3693a08c2fdSMartin KaFai Lau 
3703a08c2fdSMartin KaFai Lau 	node = list_first_entry_or_null(local_free_list(loc_l),
3713a08c2fdSMartin KaFai Lau 					struct bpf_lru_node,
3723a08c2fdSMartin KaFai Lau 					list);
3733a08c2fdSMartin KaFai Lau 	if (node)
3743a08c2fdSMartin KaFai Lau 		list_del(&node->list);
3753a08c2fdSMartin KaFai Lau 
3763a08c2fdSMartin KaFai Lau 	return node;
3773a08c2fdSMartin KaFai Lau }
3783a08c2fdSMartin KaFai Lau 
3793bf00333STobias Klauser static struct bpf_lru_node *
__local_list_pop_pending(struct bpf_lru * lru,struct bpf_lru_locallist * loc_l)3803bf00333STobias Klauser __local_list_pop_pending(struct bpf_lru *lru, struct bpf_lru_locallist *loc_l)
3813a08c2fdSMartin KaFai Lau {
3823a08c2fdSMartin KaFai Lau 	struct bpf_lru_node *node;
3833a08c2fdSMartin KaFai Lau 	bool force = false;
3843a08c2fdSMartin KaFai Lau 
3853a08c2fdSMartin KaFai Lau ignore_ref:
3863a08c2fdSMartin KaFai Lau 	/* Get from the tail (i.e. older element) of the pending list. */
3873a08c2fdSMartin KaFai Lau 	list_for_each_entry_reverse(node, local_pending_list(loc_l),
3883a08c2fdSMartin KaFai Lau 				    list) {
3893a08c2fdSMartin KaFai Lau 		if ((!bpf_lru_node_is_ref(node) || force) &&
3903a08c2fdSMartin KaFai Lau 		    lru->del_from_htab(lru->del_arg, node)) {
3913a08c2fdSMartin KaFai Lau 			list_del(&node->list);
3923a08c2fdSMartin KaFai Lau 			return node;
3933a08c2fdSMartin KaFai Lau 		}
3943a08c2fdSMartin KaFai Lau 	}
3953a08c2fdSMartin KaFai Lau 
3963a08c2fdSMartin KaFai Lau 	if (!force) {
3973a08c2fdSMartin KaFai Lau 		force = true;
3983a08c2fdSMartin KaFai Lau 		goto ignore_ref;
3993a08c2fdSMartin KaFai Lau 	}
4003a08c2fdSMartin KaFai Lau 
4013a08c2fdSMartin KaFai Lau 	return NULL;
4023a08c2fdSMartin KaFai Lau }
4033a08c2fdSMartin KaFai Lau 
bpf_percpu_lru_pop_free(struct bpf_lru * lru,u32 hash)404961578b6SMartin KaFai Lau static struct bpf_lru_node *bpf_percpu_lru_pop_free(struct bpf_lru *lru,
405961578b6SMartin KaFai Lau 						    u32 hash)
406961578b6SMartin KaFai Lau {
407961578b6SMartin KaFai Lau 	struct list_head *free_list;
408961578b6SMartin KaFai Lau 	struct bpf_lru_node *node = NULL;
409961578b6SMartin KaFai Lau 	struct bpf_lru_list *l;
410961578b6SMartin KaFai Lau 	unsigned long flags;
411961578b6SMartin KaFai Lau 	int cpu = raw_smp_processor_id();
412961578b6SMartin KaFai Lau 
413961578b6SMartin KaFai Lau 	l = per_cpu_ptr(lru->percpu_lru, cpu);
414961578b6SMartin KaFai Lau 
415961578b6SMartin KaFai Lau 	raw_spin_lock_irqsave(&l->lock, flags);
416961578b6SMartin KaFai Lau 
417961578b6SMartin KaFai Lau 	__bpf_lru_list_rotate(lru, l);
418961578b6SMartin KaFai Lau 
419961578b6SMartin KaFai Lau 	free_list = &l->lists[BPF_LRU_LIST_T_FREE];
420961578b6SMartin KaFai Lau 	if (list_empty(free_list))
421961578b6SMartin KaFai Lau 		__bpf_lru_list_shrink(lru, l, PERCPU_FREE_TARGET, free_list,
422961578b6SMartin KaFai Lau 				      BPF_LRU_LIST_T_FREE);
423961578b6SMartin KaFai Lau 
424961578b6SMartin KaFai Lau 	if (!list_empty(free_list)) {
425961578b6SMartin KaFai Lau 		node = list_first_entry(free_list, struct bpf_lru_node, list);
426961578b6SMartin KaFai Lau 		*(u32 *)((void *)node + lru->hash_offset) = hash;
427*ee9fd0acSMartin KaFai Lau 		bpf_lru_node_clear_ref(node);
428961578b6SMartin KaFai Lau 		__bpf_lru_node_move(l, node, BPF_LRU_LIST_T_INACTIVE);
429961578b6SMartin KaFai Lau 	}
430961578b6SMartin KaFai Lau 
431961578b6SMartin KaFai Lau 	raw_spin_unlock_irqrestore(&l->lock, flags);
432961578b6SMartin KaFai Lau 
433961578b6SMartin KaFai Lau 	return node;
434961578b6SMartin KaFai Lau }
435961578b6SMartin KaFai Lau 
bpf_common_lru_pop_free(struct bpf_lru * lru,u32 hash)436961578b6SMartin KaFai Lau static struct bpf_lru_node *bpf_common_lru_pop_free(struct bpf_lru *lru,
437961578b6SMartin KaFai Lau 						    u32 hash)
4383a08c2fdSMartin KaFai Lau {
4393a08c2fdSMartin KaFai Lau 	struct bpf_lru_locallist *loc_l, *steal_loc_l;
4403a08c2fdSMartin KaFai Lau 	struct bpf_common_lru *clru = &lru->common_lru;
4413a08c2fdSMartin KaFai Lau 	struct bpf_lru_node *node;
4423a08c2fdSMartin KaFai Lau 	int steal, first_steal;
4433a08c2fdSMartin KaFai Lau 	unsigned long flags;
4443a08c2fdSMartin KaFai Lau 	int cpu = raw_smp_processor_id();
4453a08c2fdSMartin KaFai Lau 
4463a08c2fdSMartin KaFai Lau 	loc_l = per_cpu_ptr(clru->local_list, cpu);
4473a08c2fdSMartin KaFai Lau 
4483a08c2fdSMartin KaFai Lau 	raw_spin_lock_irqsave(&loc_l->lock, flags);
4493a08c2fdSMartin KaFai Lau 
4503a08c2fdSMartin KaFai Lau 	node = __local_list_pop_free(loc_l);
4513a08c2fdSMartin KaFai Lau 	if (!node) {
4523a08c2fdSMartin KaFai Lau 		bpf_lru_list_pop_free_to_local(lru, loc_l);
4533a08c2fdSMartin KaFai Lau 		node = __local_list_pop_free(loc_l);
4543a08c2fdSMartin KaFai Lau 	}
4553a08c2fdSMartin KaFai Lau 
4563a08c2fdSMartin KaFai Lau 	if (node)
4573a08c2fdSMartin KaFai Lau 		__local_list_add_pending(lru, loc_l, cpu, node, hash);
4583a08c2fdSMartin KaFai Lau 
4593a08c2fdSMartin KaFai Lau 	raw_spin_unlock_irqrestore(&loc_l->lock, flags);
4603a08c2fdSMartin KaFai Lau 
4613a08c2fdSMartin KaFai Lau 	if (node)
4623a08c2fdSMartin KaFai Lau 		return node;
4633a08c2fdSMartin KaFai Lau 
4643a08c2fdSMartin KaFai Lau 	/* No free nodes found from the local free list and
4653a08c2fdSMartin KaFai Lau 	 * the global LRU list.
4663a08c2fdSMartin KaFai Lau 	 *
4673a08c2fdSMartin KaFai Lau 	 * Steal from the local free/pending list of the
4683a08c2fdSMartin KaFai Lau 	 * current CPU and remote CPU in RR.  It starts
4693a08c2fdSMartin KaFai Lau 	 * with the loc_l->next_steal CPU.
4703a08c2fdSMartin KaFai Lau 	 */
4713a08c2fdSMartin KaFai Lau 
4723a08c2fdSMartin KaFai Lau 	first_steal = loc_l->next_steal;
4733a08c2fdSMartin KaFai Lau 	steal = first_steal;
4743a08c2fdSMartin KaFai Lau 	do {
4753a08c2fdSMartin KaFai Lau 		steal_loc_l = per_cpu_ptr(clru->local_list, steal);
4763a08c2fdSMartin KaFai Lau 
4773a08c2fdSMartin KaFai Lau 		raw_spin_lock_irqsave(&steal_loc_l->lock, flags);
4783a08c2fdSMartin KaFai Lau 
4793a08c2fdSMartin KaFai Lau 		node = __local_list_pop_free(steal_loc_l);
4803a08c2fdSMartin KaFai Lau 		if (!node)
4813a08c2fdSMartin KaFai Lau 			node = __local_list_pop_pending(lru, steal_loc_l);
4823a08c2fdSMartin KaFai Lau 
4833a08c2fdSMartin KaFai Lau 		raw_spin_unlock_irqrestore(&steal_loc_l->lock, flags);
4843a08c2fdSMartin KaFai Lau 
4853a08c2fdSMartin KaFai Lau 		steal = get_next_cpu(steal);
4863a08c2fdSMartin KaFai Lau 	} while (!node && steal != first_steal);
4873a08c2fdSMartin KaFai Lau 
4883a08c2fdSMartin KaFai Lau 	loc_l->next_steal = steal;
4893a08c2fdSMartin KaFai Lau 
4903a08c2fdSMartin KaFai Lau 	if (node) {
4913a08c2fdSMartin KaFai Lau 		raw_spin_lock_irqsave(&loc_l->lock, flags);
4923a08c2fdSMartin KaFai Lau 		__local_list_add_pending(lru, loc_l, cpu, node, hash);
4933a08c2fdSMartin KaFai Lau 		raw_spin_unlock_irqrestore(&loc_l->lock, flags);
4943a08c2fdSMartin KaFai Lau 	}
4953a08c2fdSMartin KaFai Lau 
4963a08c2fdSMartin KaFai Lau 	return node;
4973a08c2fdSMartin KaFai Lau }
4983a08c2fdSMartin KaFai Lau 
bpf_lru_pop_free(struct bpf_lru * lru,u32 hash)499961578b6SMartin KaFai Lau struct bpf_lru_node *bpf_lru_pop_free(struct bpf_lru *lru, u32 hash)
500961578b6SMartin KaFai Lau {
501961578b6SMartin KaFai Lau 	if (lru->percpu)
502961578b6SMartin KaFai Lau 		return bpf_percpu_lru_pop_free(lru, hash);
503961578b6SMartin KaFai Lau 	else
504961578b6SMartin KaFai Lau 		return bpf_common_lru_pop_free(lru, hash);
505961578b6SMartin KaFai Lau }
506961578b6SMartin KaFai Lau 
bpf_common_lru_push_free(struct bpf_lru * lru,struct bpf_lru_node * node)507961578b6SMartin KaFai Lau static void bpf_common_lru_push_free(struct bpf_lru *lru,
508961578b6SMartin KaFai Lau 				     struct bpf_lru_node *node)
5093a08c2fdSMartin KaFai Lau {
5106df8fb83SMarco Elver 	u8 node_type = READ_ONCE(node->type);
5113a08c2fdSMartin KaFai Lau 	unsigned long flags;
5123a08c2fdSMartin KaFai Lau 
5136df8fb83SMarco Elver 	if (WARN_ON_ONCE(node_type == BPF_LRU_LIST_T_FREE) ||
5146df8fb83SMarco Elver 	    WARN_ON_ONCE(node_type == BPF_LRU_LOCAL_LIST_T_FREE))
5153a08c2fdSMartin KaFai Lau 		return;
5163a08c2fdSMartin KaFai Lau 
5176df8fb83SMarco Elver 	if (node_type == BPF_LRU_LOCAL_LIST_T_PENDING) {
5183a08c2fdSMartin KaFai Lau 		struct bpf_lru_locallist *loc_l;
5193a08c2fdSMartin KaFai Lau 
5203a08c2fdSMartin KaFai Lau 		loc_l = per_cpu_ptr(lru->common_lru.local_list, node->cpu);
5213a08c2fdSMartin KaFai Lau 
5223a08c2fdSMartin KaFai Lau 		raw_spin_lock_irqsave(&loc_l->lock, flags);
5233a08c2fdSMartin KaFai Lau 
5243a08c2fdSMartin KaFai Lau 		if (unlikely(node->type != BPF_LRU_LOCAL_LIST_T_PENDING)) {
5253a08c2fdSMartin KaFai Lau 			raw_spin_unlock_irqrestore(&loc_l->lock, flags);
5263a08c2fdSMartin KaFai Lau 			goto check_lru_list;
5273a08c2fdSMartin KaFai Lau 		}
5283a08c2fdSMartin KaFai Lau 
5293a08c2fdSMartin KaFai Lau 		node->type = BPF_LRU_LOCAL_LIST_T_FREE;
530*ee9fd0acSMartin KaFai Lau 		bpf_lru_node_clear_ref(node);
5313a08c2fdSMartin KaFai Lau 		list_move(&node->list, local_free_list(loc_l));
5323a08c2fdSMartin KaFai Lau 
5333a08c2fdSMartin KaFai Lau 		raw_spin_unlock_irqrestore(&loc_l->lock, flags);
5343a08c2fdSMartin KaFai Lau 		return;
5353a08c2fdSMartin KaFai Lau 	}
5363a08c2fdSMartin KaFai Lau 
5373a08c2fdSMartin KaFai Lau check_lru_list:
5383a08c2fdSMartin KaFai Lau 	bpf_lru_list_push_free(&lru->common_lru.lru_list, node);
5393a08c2fdSMartin KaFai Lau }
5403a08c2fdSMartin KaFai Lau 
bpf_percpu_lru_push_free(struct bpf_lru * lru,struct bpf_lru_node * node)541961578b6SMartin KaFai Lau static void bpf_percpu_lru_push_free(struct bpf_lru *lru,
542961578b6SMartin KaFai Lau 				     struct bpf_lru_node *node)
543961578b6SMartin KaFai Lau {
544961578b6SMartin KaFai Lau 	struct bpf_lru_list *l;
545961578b6SMartin KaFai Lau 	unsigned long flags;
546961578b6SMartin KaFai Lau 
547961578b6SMartin KaFai Lau 	l = per_cpu_ptr(lru->percpu_lru, node->cpu);
548961578b6SMartin KaFai Lau 
549961578b6SMartin KaFai Lau 	raw_spin_lock_irqsave(&l->lock, flags);
550961578b6SMartin KaFai Lau 
551961578b6SMartin KaFai Lau 	__bpf_lru_node_move(l, node, BPF_LRU_LIST_T_FREE);
552961578b6SMartin KaFai Lau 
553961578b6SMartin KaFai Lau 	raw_spin_unlock_irqrestore(&l->lock, flags);
554961578b6SMartin KaFai Lau }
555961578b6SMartin KaFai Lau 
bpf_lru_push_free(struct bpf_lru * lru,struct bpf_lru_node * node)556961578b6SMartin KaFai Lau void bpf_lru_push_free(struct bpf_lru *lru, struct bpf_lru_node *node)
557961578b6SMartin KaFai Lau {
558961578b6SMartin KaFai Lau 	if (lru->percpu)
559961578b6SMartin KaFai Lau 		bpf_percpu_lru_push_free(lru, node);
560961578b6SMartin KaFai Lau 	else
561961578b6SMartin KaFai Lau 		bpf_common_lru_push_free(lru, node);
562961578b6SMartin KaFai Lau }
563961578b6SMartin KaFai Lau 
bpf_common_lru_populate(struct bpf_lru * lru,void * buf,u32 node_offset,u32 elem_size,u32 nr_elems)5643bf00333STobias Klauser static void bpf_common_lru_populate(struct bpf_lru *lru, void *buf,
5653bf00333STobias Klauser 				    u32 node_offset, u32 elem_size,
5663bf00333STobias Klauser 				    u32 nr_elems)
5673a08c2fdSMartin KaFai Lau {
5683a08c2fdSMartin KaFai Lau 	struct bpf_lru_list *l = &lru->common_lru.lru_list;
5693a08c2fdSMartin KaFai Lau 	u32 i;
5703a08c2fdSMartin KaFai Lau 
5713a08c2fdSMartin KaFai Lau 	for (i = 0; i < nr_elems; i++) {
5723a08c2fdSMartin KaFai Lau 		struct bpf_lru_node *node;
5733a08c2fdSMartin KaFai Lau 
5743a08c2fdSMartin KaFai Lau 		node = (struct bpf_lru_node *)(buf + node_offset);
5753a08c2fdSMartin KaFai Lau 		node->type = BPF_LRU_LIST_T_FREE;
576*ee9fd0acSMartin KaFai Lau 		bpf_lru_node_clear_ref(node);
5773a08c2fdSMartin KaFai Lau 		list_add(&node->list, &l->lists[BPF_LRU_LIST_T_FREE]);
5783a08c2fdSMartin KaFai Lau 		buf += elem_size;
5793a08c2fdSMartin KaFai Lau 	}
5803a08c2fdSMartin KaFai Lau }
5813a08c2fdSMartin KaFai Lau 
bpf_percpu_lru_populate(struct bpf_lru * lru,void * buf,u32 node_offset,u32 elem_size,u32 nr_elems)5823bf00333STobias Klauser static void bpf_percpu_lru_populate(struct bpf_lru *lru, void *buf,
5833bf00333STobias Klauser 				    u32 node_offset, u32 elem_size,
5843bf00333STobias Klauser 				    u32 nr_elems)
585961578b6SMartin KaFai Lau {
586961578b6SMartin KaFai Lau 	u32 i, pcpu_entries;
587961578b6SMartin KaFai Lau 	int cpu;
588961578b6SMartin KaFai Lau 	struct bpf_lru_list *l;
589961578b6SMartin KaFai Lau 
590961578b6SMartin KaFai Lau 	pcpu_entries = nr_elems / num_possible_cpus();
591961578b6SMartin KaFai Lau 
592961578b6SMartin KaFai Lau 	i = 0;
593961578b6SMartin KaFai Lau 
594961578b6SMartin KaFai Lau 	for_each_possible_cpu(cpu) {
595961578b6SMartin KaFai Lau 		struct bpf_lru_node *node;
596961578b6SMartin KaFai Lau 
597961578b6SMartin KaFai Lau 		l = per_cpu_ptr(lru->percpu_lru, cpu);
598961578b6SMartin KaFai Lau again:
599961578b6SMartin KaFai Lau 		node = (struct bpf_lru_node *)(buf + node_offset);
600961578b6SMartin KaFai Lau 		node->cpu = cpu;
601961578b6SMartin KaFai Lau 		node->type = BPF_LRU_LIST_T_FREE;
602*ee9fd0acSMartin KaFai Lau 		bpf_lru_node_clear_ref(node);
603961578b6SMartin KaFai Lau 		list_add(&node->list, &l->lists[BPF_LRU_LIST_T_FREE]);
604961578b6SMartin KaFai Lau 		i++;
605961578b6SMartin KaFai Lau 		buf += elem_size;
606961578b6SMartin KaFai Lau 		if (i == nr_elems)
607961578b6SMartin KaFai Lau 			break;
608961578b6SMartin KaFai Lau 		if (i % pcpu_entries)
609961578b6SMartin KaFai Lau 			goto again;
610961578b6SMartin KaFai Lau 	}
611961578b6SMartin KaFai Lau }
612961578b6SMartin KaFai Lau 
bpf_lru_populate(struct bpf_lru * lru,void * buf,u32 node_offset,u32 elem_size,u32 nr_elems)613961578b6SMartin KaFai Lau void bpf_lru_populate(struct bpf_lru *lru, void *buf, u32 node_offset,
614961578b6SMartin KaFai Lau 		      u32 elem_size, u32 nr_elems)
615961578b6SMartin KaFai Lau {
616961578b6SMartin KaFai Lau 	if (lru->percpu)
617961578b6SMartin KaFai Lau 		bpf_percpu_lru_populate(lru, buf, node_offset, elem_size,
618961578b6SMartin KaFai Lau 					nr_elems);
619961578b6SMartin KaFai Lau 	else
620961578b6SMartin KaFai Lau 		bpf_common_lru_populate(lru, buf, node_offset, elem_size,
621961578b6SMartin KaFai Lau 					nr_elems);
622961578b6SMartin KaFai Lau }
623961578b6SMartin KaFai Lau 
bpf_lru_locallist_init(struct bpf_lru_locallist * loc_l,int cpu)6243a08c2fdSMartin KaFai Lau static void bpf_lru_locallist_init(struct bpf_lru_locallist *loc_l, int cpu)
6253a08c2fdSMartin KaFai Lau {
6263a08c2fdSMartin KaFai Lau 	int i;
6273a08c2fdSMartin KaFai Lau 
6283a08c2fdSMartin KaFai Lau 	for (i = 0; i < NR_BPF_LRU_LOCAL_LIST_T; i++)
6293a08c2fdSMartin KaFai Lau 		INIT_LIST_HEAD(&loc_l->lists[i]);
6303a08c2fdSMartin KaFai Lau 
6313a08c2fdSMartin KaFai Lau 	loc_l->next_steal = cpu;
6323a08c2fdSMartin KaFai Lau 
6333a08c2fdSMartin KaFai Lau 	raw_spin_lock_init(&loc_l->lock);
6343a08c2fdSMartin KaFai Lau }
6353a08c2fdSMartin KaFai Lau 
bpf_lru_list_init(struct bpf_lru_list * l)6363a08c2fdSMartin KaFai Lau static void bpf_lru_list_init(struct bpf_lru_list *l)
6373a08c2fdSMartin KaFai Lau {
6383a08c2fdSMartin KaFai Lau 	int i;
6393a08c2fdSMartin KaFai Lau 
6403a08c2fdSMartin KaFai Lau 	for (i = 0; i < NR_BPF_LRU_LIST_T; i++)
6413a08c2fdSMartin KaFai Lau 		INIT_LIST_HEAD(&l->lists[i]);
6423a08c2fdSMartin KaFai Lau 
6433a08c2fdSMartin KaFai Lau 	for (i = 0; i < NR_BPF_LRU_LIST_COUNT; i++)
6443a08c2fdSMartin KaFai Lau 		l->counts[i] = 0;
6453a08c2fdSMartin KaFai Lau 
6463a08c2fdSMartin KaFai Lau 	l->next_inactive_rotation = &l->lists[BPF_LRU_LIST_T_INACTIVE];
6473a08c2fdSMartin KaFai Lau 
6483a08c2fdSMartin KaFai Lau 	raw_spin_lock_init(&l->lock);
6493a08c2fdSMartin KaFai Lau }
6503a08c2fdSMartin KaFai Lau 
bpf_lru_init(struct bpf_lru * lru,bool percpu,u32 hash_offset,del_from_htab_func del_from_htab,void * del_arg)651961578b6SMartin KaFai Lau int bpf_lru_init(struct bpf_lru *lru, bool percpu, u32 hash_offset,
6523a08c2fdSMartin KaFai Lau 		 del_from_htab_func del_from_htab, void *del_arg)
6533a08c2fdSMartin KaFai Lau {
6543a08c2fdSMartin KaFai Lau 	int cpu;
655961578b6SMartin KaFai Lau 
656961578b6SMartin KaFai Lau 	if (percpu) {
657961578b6SMartin KaFai Lau 		lru->percpu_lru = alloc_percpu(struct bpf_lru_list);
658961578b6SMartin KaFai Lau 		if (!lru->percpu_lru)
659961578b6SMartin KaFai Lau 			return -ENOMEM;
660961578b6SMartin KaFai Lau 
661961578b6SMartin KaFai Lau 		for_each_possible_cpu(cpu) {
662961578b6SMartin KaFai Lau 			struct bpf_lru_list *l;
663961578b6SMartin KaFai Lau 
664961578b6SMartin KaFai Lau 			l = per_cpu_ptr(lru->percpu_lru, cpu);
665961578b6SMartin KaFai Lau 			bpf_lru_list_init(l);
666961578b6SMartin KaFai Lau 		}
667961578b6SMartin KaFai Lau 		lru->nr_scans = PERCPU_NR_SCANS;
668961578b6SMartin KaFai Lau 	} else {
6693a08c2fdSMartin KaFai Lau 		struct bpf_common_lru *clru = &lru->common_lru;
6703a08c2fdSMartin KaFai Lau 
6713a08c2fdSMartin KaFai Lau 		clru->local_list = alloc_percpu(struct bpf_lru_locallist);
6723a08c2fdSMartin KaFai Lau 		if (!clru->local_list)
6733a08c2fdSMartin KaFai Lau 			return -ENOMEM;
6743a08c2fdSMartin KaFai Lau 
6753a08c2fdSMartin KaFai Lau 		for_each_possible_cpu(cpu) {
6763a08c2fdSMartin KaFai Lau 			struct bpf_lru_locallist *loc_l;
6773a08c2fdSMartin KaFai Lau 
6783a08c2fdSMartin KaFai Lau 			loc_l = per_cpu_ptr(clru->local_list, cpu);
6793a08c2fdSMartin KaFai Lau 			bpf_lru_locallist_init(loc_l, cpu);
6803a08c2fdSMartin KaFai Lau 		}
6813a08c2fdSMartin KaFai Lau 
6823a08c2fdSMartin KaFai Lau 		bpf_lru_list_init(&clru->lru_list);
6833a08c2fdSMartin KaFai Lau 		lru->nr_scans = LOCAL_NR_SCANS;
684961578b6SMartin KaFai Lau 	}
6853a08c2fdSMartin KaFai Lau 
686961578b6SMartin KaFai Lau 	lru->percpu = percpu;
6873a08c2fdSMartin KaFai Lau 	lru->del_from_htab = del_from_htab;
6883a08c2fdSMartin KaFai Lau 	lru->del_arg = del_arg;
6893a08c2fdSMartin KaFai Lau 	lru->hash_offset = hash_offset;
6903a08c2fdSMartin KaFai Lau 
6913a08c2fdSMartin KaFai Lau 	return 0;
6923a08c2fdSMartin KaFai Lau }
6933a08c2fdSMartin KaFai Lau 
bpf_lru_destroy(struct bpf_lru * lru)6943a08c2fdSMartin KaFai Lau void bpf_lru_destroy(struct bpf_lru *lru)
6953a08c2fdSMartin KaFai Lau {
696961578b6SMartin KaFai Lau 	if (lru->percpu)
697961578b6SMartin KaFai Lau 		free_percpu(lru->percpu_lru);
698961578b6SMartin KaFai Lau 	else
6993a08c2fdSMartin KaFai Lau 		free_percpu(lru->common_lru.local_list);
7003a08c2fdSMartin KaFai Lau }
701