xref: /openbmc/linux/kernel/module/dups.c (revision 1ac731c529cd4d6adbce134754b51ff7d822b145)
18660484eSLuis Chamberlain /* SPDX-License-Identifier: GPL-2.0-or-later */
28660484eSLuis Chamberlain /*
38660484eSLuis Chamberlain  * kmod dups - the kernel module autoloader duplicate suppressor
48660484eSLuis Chamberlain  *
58660484eSLuis Chamberlain  * Copyright (C) 2023 Luis Chamberlain <mcgrof@kernel.org>
68660484eSLuis Chamberlain  */
78660484eSLuis Chamberlain 
88660484eSLuis Chamberlain #define pr_fmt(fmt)     "module: " fmt
98660484eSLuis Chamberlain 
108660484eSLuis Chamberlain #include <linux/module.h>
118660484eSLuis Chamberlain #include <linux/sched.h>
128660484eSLuis Chamberlain #include <linux/sched/task.h>
138660484eSLuis Chamberlain #include <linux/binfmts.h>
148660484eSLuis Chamberlain #include <linux/syscalls.h>
158660484eSLuis Chamberlain #include <linux/unistd.h>
168660484eSLuis Chamberlain #include <linux/kmod.h>
178660484eSLuis Chamberlain #include <linux/slab.h>
188660484eSLuis Chamberlain #include <linux/completion.h>
198660484eSLuis Chamberlain #include <linux/cred.h>
208660484eSLuis Chamberlain #include <linux/file.h>
218660484eSLuis Chamberlain #include <linux/fdtable.h>
228660484eSLuis Chamberlain #include <linux/workqueue.h>
238660484eSLuis Chamberlain #include <linux/security.h>
248660484eSLuis Chamberlain #include <linux/mount.h>
258660484eSLuis Chamberlain #include <linux/kernel.h>
268660484eSLuis Chamberlain #include <linux/init.h>
278660484eSLuis Chamberlain #include <linux/resource.h>
288660484eSLuis Chamberlain #include <linux/notifier.h>
298660484eSLuis Chamberlain #include <linux/suspend.h>
308660484eSLuis Chamberlain #include <linux/rwsem.h>
318660484eSLuis Chamberlain #include <linux/ptrace.h>
328660484eSLuis Chamberlain #include <linux/async.h>
338660484eSLuis Chamberlain #include <linux/uaccess.h>
348660484eSLuis Chamberlain 
35*0b891c83SArnd Bergmann #include "internal.h"
36*0b891c83SArnd Bergmann 
378660484eSLuis Chamberlain #undef MODULE_PARAM_PREFIX
388660484eSLuis Chamberlain #define MODULE_PARAM_PREFIX "module."
398660484eSLuis Chamberlain static bool enable_dups_trace = IS_ENABLED(CONFIG_MODULE_DEBUG_AUTOLOAD_DUPS_TRACE);
408660484eSLuis Chamberlain module_param(enable_dups_trace, bool_enable_only, 0644);
418660484eSLuis Chamberlain 
428660484eSLuis Chamberlain /*
438660484eSLuis Chamberlain  * Protects dup_kmod_reqs list, adds / removals with RCU.
448660484eSLuis Chamberlain  */
458660484eSLuis Chamberlain static DEFINE_MUTEX(kmod_dup_mutex);
468660484eSLuis Chamberlain static LIST_HEAD(dup_kmod_reqs);
478660484eSLuis Chamberlain 
488660484eSLuis Chamberlain struct kmod_dup_req {
498660484eSLuis Chamberlain 	struct list_head list;
508660484eSLuis Chamberlain 	char name[MODULE_NAME_LEN];
518660484eSLuis Chamberlain 	struct completion first_req_done;
528660484eSLuis Chamberlain 	struct work_struct complete_work;
538660484eSLuis Chamberlain 	struct delayed_work delete_work;
548660484eSLuis Chamberlain 	int dup_ret;
558660484eSLuis Chamberlain };
568660484eSLuis Chamberlain 
kmod_dup_request_lookup(char * module_name)578660484eSLuis Chamberlain static struct kmod_dup_req *kmod_dup_request_lookup(char *module_name)
588660484eSLuis Chamberlain {
598660484eSLuis Chamberlain 	struct kmod_dup_req *kmod_req;
608660484eSLuis Chamberlain 
618660484eSLuis Chamberlain 	list_for_each_entry_rcu(kmod_req, &dup_kmod_reqs, list,
628660484eSLuis Chamberlain 				lockdep_is_held(&kmod_dup_mutex)) {
638660484eSLuis Chamberlain 		if (strlen(kmod_req->name) == strlen(module_name) &&
648660484eSLuis Chamberlain 		    !memcmp(kmod_req->name, module_name, strlen(module_name))) {
658660484eSLuis Chamberlain 			return kmod_req;
668660484eSLuis Chamberlain                 }
678660484eSLuis Chamberlain         }
688660484eSLuis Chamberlain 
698660484eSLuis Chamberlain 	return NULL;
708660484eSLuis Chamberlain }
718660484eSLuis Chamberlain 
kmod_dup_request_delete(struct work_struct * work)728660484eSLuis Chamberlain static void kmod_dup_request_delete(struct work_struct *work)
738660484eSLuis Chamberlain {
748660484eSLuis Chamberlain 	struct kmod_dup_req *kmod_req;
758660484eSLuis Chamberlain 	kmod_req = container_of(to_delayed_work(work), struct kmod_dup_req, delete_work);
768660484eSLuis Chamberlain 
778660484eSLuis Chamberlain 	/*
788660484eSLuis Chamberlain 	 * The typical situation is a module successully loaded. In that
798660484eSLuis Chamberlain 	 * situation the module will be present already in userspace. If
808660484eSLuis Chamberlain 	 * new requests come in after that, userspace will already know the
818660484eSLuis Chamberlain 	 * module is loaded so will just return 0 right away. There is still
828660484eSLuis Chamberlain 	 * a small chance right after we delete this entry new request_module()
838660484eSLuis Chamberlain 	 * calls may happen after that, they can happen. These heuristics
848660484eSLuis Chamberlain 	 * are to protect finit_module() abuse for auto-loading, if modules
858660484eSLuis Chamberlain 	 * are still tryign to auto-load even if a module is already loaded,
868660484eSLuis Chamberlain 	 * that's on them, and those inneficiencies should not be fixed by
878660484eSLuis Chamberlain 	 * kmod. The inneficies there are a call to modprobe and modprobe
888660484eSLuis Chamberlain 	 * just returning 0.
898660484eSLuis Chamberlain 	 */
908660484eSLuis Chamberlain 	mutex_lock(&kmod_dup_mutex);
918660484eSLuis Chamberlain 	list_del_rcu(&kmod_req->list);
928660484eSLuis Chamberlain 	synchronize_rcu();
938660484eSLuis Chamberlain 	mutex_unlock(&kmod_dup_mutex);
948660484eSLuis Chamberlain 	kfree(kmod_req);
958660484eSLuis Chamberlain }
968660484eSLuis Chamberlain 
kmod_dup_request_complete(struct work_struct * work)978660484eSLuis Chamberlain static void kmod_dup_request_complete(struct work_struct *work)
988660484eSLuis Chamberlain {
998660484eSLuis Chamberlain 	struct kmod_dup_req *kmod_req;
1008660484eSLuis Chamberlain 
1018660484eSLuis Chamberlain 	kmod_req = container_of(work, struct kmod_dup_req, complete_work);
1028660484eSLuis Chamberlain 
1038660484eSLuis Chamberlain 	/*
1048660484eSLuis Chamberlain 	 * This will ensure that the kernel will let all the waiters get
1058660484eSLuis Chamberlain 	 * informed its time to check the return value. It's time to
1068660484eSLuis Chamberlain 	 * go home.
1078660484eSLuis Chamberlain 	 */
1088660484eSLuis Chamberlain 	complete_all(&kmod_req->first_req_done);
1098660484eSLuis Chamberlain 
1108660484eSLuis Chamberlain 	/*
1118660484eSLuis Chamberlain 	 * Now that we have allowed prior request_module() calls to go on
1128660484eSLuis Chamberlain 	 * with life, let's schedule deleting this entry. We don't have
1138660484eSLuis Chamberlain 	 * to do it right away, but we *eventually* want to do it so to not
1148660484eSLuis Chamberlain 	 * let this linger forever as this is just a boot optimization for
1158660484eSLuis Chamberlain 	 * possible abuses of vmalloc() incurred by finit_module() thrashing.
1168660484eSLuis Chamberlain 	 */
1178660484eSLuis Chamberlain 	queue_delayed_work(system_wq, &kmod_req->delete_work, 60 * HZ);
1188660484eSLuis Chamberlain }
1198660484eSLuis Chamberlain 
kmod_dup_request_exists_wait(char * module_name,bool wait,int * dup_ret)1208660484eSLuis Chamberlain bool kmod_dup_request_exists_wait(char *module_name, bool wait, int *dup_ret)
1218660484eSLuis Chamberlain {
1228660484eSLuis Chamberlain 	struct kmod_dup_req *kmod_req, *new_kmod_req;
1238660484eSLuis Chamberlain 	int ret;
1248660484eSLuis Chamberlain 
1258660484eSLuis Chamberlain 	/*
1268660484eSLuis Chamberlain 	 * Pre-allocate the entry in case we have to use it later
1278660484eSLuis Chamberlain 	 * to avoid contention with the mutex.
1288660484eSLuis Chamberlain 	 */
1298660484eSLuis Chamberlain 	new_kmod_req = kzalloc(sizeof(*new_kmod_req), GFP_KERNEL);
1308660484eSLuis Chamberlain 	if (!new_kmod_req)
1318660484eSLuis Chamberlain 		return false;
1328660484eSLuis Chamberlain 
1338660484eSLuis Chamberlain 	memcpy(new_kmod_req->name, module_name, strlen(module_name));
1348660484eSLuis Chamberlain 	INIT_WORK(&new_kmod_req->complete_work, kmod_dup_request_complete);
1358660484eSLuis Chamberlain 	INIT_DELAYED_WORK(&new_kmod_req->delete_work, kmod_dup_request_delete);
1368660484eSLuis Chamberlain 	init_completion(&new_kmod_req->first_req_done);
1378660484eSLuis Chamberlain 
1388660484eSLuis Chamberlain 	mutex_lock(&kmod_dup_mutex);
1398660484eSLuis Chamberlain 
1408660484eSLuis Chamberlain 	kmod_req = kmod_dup_request_lookup(module_name);
1418660484eSLuis Chamberlain 	if (!kmod_req) {
1428660484eSLuis Chamberlain 		/*
1438660484eSLuis Chamberlain 		 * If the first request that came through for a module
1448660484eSLuis Chamberlain 		 * was with request_module_nowait() we cannot wait for it
1458660484eSLuis Chamberlain 		 * and share its return value with other users which may
1468660484eSLuis Chamberlain 		 * have used request_module() and need a proper return value
1478660484eSLuis Chamberlain 		 * so just skip using them as an anchor.
1488660484eSLuis Chamberlain 		 *
1498660484eSLuis Chamberlain 		 * If a prior request to this one came through with
1508660484eSLuis Chamberlain 		 * request_module() though, then a request_module_nowait()
1518660484eSLuis Chamberlain 		 * would benefit from duplicate detection.
1528660484eSLuis Chamberlain 		 */
1538660484eSLuis Chamberlain 		if (!wait) {
1548660484eSLuis Chamberlain 			kfree(new_kmod_req);
1558660484eSLuis Chamberlain 			pr_debug("New request_module_nowait() for %s -- cannot track duplicates for this request\n", module_name);
1568660484eSLuis Chamberlain 			mutex_unlock(&kmod_dup_mutex);
1578660484eSLuis Chamberlain 			return false;
1588660484eSLuis Chamberlain 		}
1598660484eSLuis Chamberlain 
1608660484eSLuis Chamberlain 		/*
1618660484eSLuis Chamberlain 		 * There was no duplicate, just add the request so we can
1628660484eSLuis Chamberlain 		 * keep tab on duplicates later.
1638660484eSLuis Chamberlain 		 */
1648660484eSLuis Chamberlain 		pr_debug("New request_module() for %s\n", module_name);
1658660484eSLuis Chamberlain 		list_add_rcu(&new_kmod_req->list, &dup_kmod_reqs);
1668660484eSLuis Chamberlain 		mutex_unlock(&kmod_dup_mutex);
1678660484eSLuis Chamberlain 		return false;
1688660484eSLuis Chamberlain 	}
1698660484eSLuis Chamberlain 	mutex_unlock(&kmod_dup_mutex);
1708660484eSLuis Chamberlain 
1718660484eSLuis Chamberlain 	/* We are dealing with a duplicate request now */
1728660484eSLuis Chamberlain 	kfree(new_kmod_req);
1738660484eSLuis Chamberlain 
1748660484eSLuis Chamberlain 	/*
1758660484eSLuis Chamberlain 	 * To fix these try to use try_then_request_module() instead as that
1768660484eSLuis Chamberlain 	 * will check if the component you are looking for is present or not.
1778660484eSLuis Chamberlain 	 * You could also just queue a single request to load the module once,
1788660484eSLuis Chamberlain 	 * instead of having each and everything you need try to request for
1798660484eSLuis Chamberlain 	 * the module.
1808660484eSLuis Chamberlain 	 *
1818660484eSLuis Chamberlain 	 * Duplicate request_module() calls  can cause quite a bit of wasted
1828660484eSLuis Chamberlain 	 * vmalloc() space when racing with userspace.
1838660484eSLuis Chamberlain 	 */
1848660484eSLuis Chamberlain 	if (enable_dups_trace)
1858660484eSLuis Chamberlain 		WARN(1, "module-autoload: duplicate request for module %s\n", module_name);
1868660484eSLuis Chamberlain 	else
1878660484eSLuis Chamberlain 		pr_warn("module-autoload: duplicate request for module %s\n", module_name);
1888660484eSLuis Chamberlain 
1898660484eSLuis Chamberlain 	if (!wait) {
1908660484eSLuis Chamberlain 		/*
1918660484eSLuis Chamberlain 		 * If request_module_nowait() was used then the user just
1928660484eSLuis Chamberlain 		 * wanted to issue the request and if another module request
1938660484eSLuis Chamberlain 		 * was already its way with the same name we don't care for
1948660484eSLuis Chamberlain 		 * the return value either. Let duplicate request_module_nowait()
1958660484eSLuis Chamberlain 		 * calls bail out right away.
1968660484eSLuis Chamberlain 		 */
1978660484eSLuis Chamberlain 		*dup_ret = 0;
1988660484eSLuis Chamberlain 		return true;
1998660484eSLuis Chamberlain 	}
2008660484eSLuis Chamberlain 
2018660484eSLuis Chamberlain 	/*
2028660484eSLuis Chamberlain 	 * If a duplicate request_module() was used they *may* care for
2038660484eSLuis Chamberlain 	 * the return value, so we have no other option but to wait for
2048660484eSLuis Chamberlain 	 * the first caller to complete. If the first caller used
2058660484eSLuis Chamberlain 	 * the request_module_nowait() call, subsquent callers will
2068660484eSLuis Chamberlain 	 * deal with the comprmise of getting a successful call with this
2078660484eSLuis Chamberlain 	 * optimization enabled ...
2088660484eSLuis Chamberlain 	 */
2098660484eSLuis Chamberlain 	ret = wait_for_completion_state(&kmod_req->first_req_done,
2108660484eSLuis Chamberlain 					TASK_UNINTERRUPTIBLE | TASK_KILLABLE);
2118660484eSLuis Chamberlain 	if (ret) {
2128660484eSLuis Chamberlain 		*dup_ret = ret;
2138660484eSLuis Chamberlain 		return true;
2148660484eSLuis Chamberlain 	}
2158660484eSLuis Chamberlain 
2168660484eSLuis Chamberlain 	/* Now the duplicate request has the same exact return value as the first request */
2178660484eSLuis Chamberlain 	*dup_ret = kmod_req->dup_ret;
2188660484eSLuis Chamberlain 
2198660484eSLuis Chamberlain 	return true;
2208660484eSLuis Chamberlain }
2218660484eSLuis Chamberlain 
kmod_dup_request_announce(char * module_name,int ret)2228660484eSLuis Chamberlain void kmod_dup_request_announce(char *module_name, int ret)
2238660484eSLuis Chamberlain {
2248660484eSLuis Chamberlain 	struct kmod_dup_req *kmod_req;
2258660484eSLuis Chamberlain 
2268660484eSLuis Chamberlain 	mutex_lock(&kmod_dup_mutex);
2278660484eSLuis Chamberlain 
2288660484eSLuis Chamberlain 	kmod_req = kmod_dup_request_lookup(module_name);
2298660484eSLuis Chamberlain 	if (!kmod_req)
2308660484eSLuis Chamberlain 		goto out;
2318660484eSLuis Chamberlain 
2328660484eSLuis Chamberlain 	kmod_req->dup_ret = ret;
2338660484eSLuis Chamberlain 
2348660484eSLuis Chamberlain 	/*
2358660484eSLuis Chamberlain 	 * If we complete() here we may allow duplicate threads
2368660484eSLuis Chamberlain 	 * to continue before the first one that submitted the
2378660484eSLuis Chamberlain 	 * request. We're in no rush also, given that each and
2388660484eSLuis Chamberlain 	 * every bounce back to userspace is slow we avoid that
2398660484eSLuis Chamberlain 	 * with a slight delay here. So queueue up the completion
2408660484eSLuis Chamberlain 	 * and let duplicates suffer, just wait a tad bit longer.
2418660484eSLuis Chamberlain 	 * There is no rush. But we also don't want to hold the
2428660484eSLuis Chamberlain 	 * caller up forever or introduce any boot delays.
2438660484eSLuis Chamberlain 	 */
2448660484eSLuis Chamberlain 	queue_work(system_wq, &kmod_req->complete_work);
2458660484eSLuis Chamberlain 
2468660484eSLuis Chamberlain out:
2478660484eSLuis Chamberlain 	mutex_unlock(&kmod_dup_mutex);
2488660484eSLuis Chamberlain }
249