X-Git-Url: http://git.rot13.org/?a=blobdiff_plain;f=kernel%2Fkmod.c;h=bb4e29d924e4ff29567a962e7b643e0c1c875896;hb=a22a0fab32e1216df56e4b9a577dc5c922cf7524;hp=20a997c73c3d0c3951c66f8b5c8e6fd61f51abc3;hpb=3e8e7c93d7eb091463839b5212789c4aae09459e;p=powerpc.git diff --git a/kernel/kmod.c b/kernel/kmod.c index 20a997c73c..9f923f8ce6 100644 --- a/kernel/kmod.c +++ b/kernel/kmod.c @@ -18,9 +18,6 @@ call_usermodehelper wait flag, and remove exec_usermodehelper. Rusty Russell Jan 2003 */ -#define __KERNEL_SYSCALLS__ - -#include #include #include #include @@ -28,7 +25,7 @@ #include #include #include -#include +#include #include #include #include @@ -36,8 +33,11 @@ #include #include #include +#include #include +extern int delete_module(const char *name, unsigned int flags); + extern int max_threads; static struct workqueue_struct *khelper_wq; @@ -48,6 +48,7 @@ static struct workqueue_struct *khelper_wq; modprobe_path is set via /proc/sys. */ char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe"; +struct module_kobject kmod_mk; /** * request_module - try to load a kernel module @@ -77,6 +78,11 @@ int request_module(const char *fmt, ...) static atomic_t kmod_concurrent = ATOMIC_INIT(0); #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */ static int kmod_loop_msg; + char modalias[16 + MODULE_NAME_LEN] = "MODALIAS="; + char *uevent_envp[2] = { + modalias, + NULL + }; va_start(args, fmt); ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args); @@ -84,6 +90,12 @@ int request_module(const char *fmt, ...) if (ret >= MODULE_NAME_LEN) return -ENAMETOOLONG; + strcpy(&modalias[strlen("MODALIAS=")], module_name); + kobject_uevent_env(&kmod_mk.kobj, KOBJ_CHANGE, uevent_envp); + + if (modprobe_path[0] == '\0') + goto out; + /* If modprobe needs a service that is in a module, we get a recursive * loop. Limit the number of running kmod threads to max_threads/2 or * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method @@ -110,12 +122,119 @@ int request_module(const char *fmt, ...) ret = call_usermodehelper(modprobe_path, argv, envp, 1); atomic_dec(&kmod_concurrent); +out: return ret; } EXPORT_SYMBOL(request_module); + +static ssize_t store_mod_request(struct module_attribute *mattr, + struct module *mod, + const char *buffer, size_t count) +{ + char name[MODULE_NAME_LEN]; + int ret; + + if (count < 1 || count+1 > MODULE_NAME_LEN) + return -EINVAL; + memcpy(name, buffer, count); + name[count] = '\0'; + if (name[count-1] == '\n') + name[count-1] = '\0'; + + ret = request_module(name); + if (ret < 0) + return ret; + return count; +} + +static struct module_attribute mod_request = { + .attr = { .name = "mod_request", .mode = S_IWUSR, .owner = THIS_MODULE }, + .store = store_mod_request, +}; + +#ifdef CONFIG_MODULE_UNLOAD +static ssize_t store_mod_unload(struct module_attribute *mattr, + struct module *mod, + const char *buffer, size_t count) +{ + char name[MODULE_NAME_LEN]; + int ret; + + if (count < 1 || count+1 > MODULE_NAME_LEN) + return -EINVAL; + memcpy(name, buffer, count); + name[count] = '\0'; + if (name[count-1] == '\n') + name[count-1] = '\0'; + + ret = delete_module(name, O_NONBLOCK); + if (ret < 0) + return ret; + return count; +} + +static struct module_attribute mod_unload = { + .attr = { .name = "mod_unload", .mode = S_IWUSR, .owner = THIS_MODULE }, + .store = store_mod_unload, +}; +#endif + +static ssize_t show_mod_request_helper(struct module_attribute *mattr, + struct module *mod, + char *buffer) +{ + return sprintf(buffer, "%s\n", modprobe_path); +} + +static ssize_t store_mod_request_helper(struct module_attribute *mattr, + struct module *mod, + const char *buffer, size_t count) +{ + if (count < 1 || count+1 > KMOD_PATH_LEN) + return -EINVAL; + memcpy(modprobe_path, buffer, count); + modprobe_path[count] = '\0'; + if (modprobe_path[count-1] == '\n') + modprobe_path[count-1] = '\0'; + return count; +} + +static struct module_attribute mod_request_helper = { + .attr = { + .name = "mod_request_helper", + .mode = S_IWUSR | S_IRUGO, + .owner = THIS_MODULE + }, + .show = show_mod_request_helper, + .store = store_mod_request_helper, +}; + +void __init kmod_sysfs_init(void) +{ + int ret; + + kmod_mk.mod = THIS_MODULE; + kobj_set_kset_s(&kmod_mk, module_subsys); + kobject_set_name(&kmod_mk.kobj, "kmod"); + kobject_init(&kmod_mk.kobj); + ret = kobject_add(&kmod_mk.kobj); + if (ret < 0) + goto out; + + ret = sysfs_create_file(&kmod_mk.kobj, &mod_request_helper.attr); + ret = sysfs_create_file(&kmod_mk.kobj, &mod_request.attr); +#ifdef CONFIG_MODULE_UNLOAD + ret = sysfs_create_file(&kmod_mk.kobj, &mod_unload.attr); +#endif + + kobject_uevent(&kmod_mk.kobj, KOBJ_ADD); +out: + return; +} #endif /* CONFIG_KMOD */ struct subprocess_info { + struct work_struct work; struct completion *complete; char *path; char **argv; @@ -123,6 +242,7 @@ struct subprocess_info { struct key *ring; int wait; int retval; + struct file *stdin; }; /* @@ -146,12 +266,30 @@ static int ____call_usermodehelper(void *data) key_put(old_session); + /* Install input pipe when needed */ + if (sub_info->stdin) { + struct files_struct *f = current->files; + struct fdtable *fdt; + /* no races because files should be private here */ + sys_close(0); + fd_install(0, sub_info->stdin); + spin_lock(&f->file_lock); + fdt = files_fdtable(f); + FD_SET(0, fdt->open_fds); + FD_CLR(0, fdt->close_on_exec); + spin_unlock(&f->file_lock); + + /* and disallow core files too */ + current->signal->rlim[RLIMIT_CORE] = (struct rlimit){0, 0}; + } + /* We can run anywhere, unlike our parent keventd(). */ set_cpus_allowed(current, CPU_MASK_ALL); retval = -EPERM; if (current->fs->root) - retval = execve(sub_info->path, sub_info->argv,sub_info->envp); + retval = kernel_execve(sub_info->path, + sub_info->argv, sub_info->envp); /* Exec failed? */ sub_info->retval = retval; @@ -177,6 +315,8 @@ static int wait_for_helper(void *data) if (pid < 0) { sub_info->retval = pid; } else { + int ret; + /* * Normally it is bogus to call wait4() from in-kernel because * wait4() wants to write the exit code to a userspace address. @@ -186,33 +326,49 @@ static int wait_for_helper(void *data) * * Thus the __user pointer cast is valid here. */ - sys_wait4(pid, (int __user *) &sub_info->retval, 0, NULL); + sys_wait4(pid, (int __user *)&ret, 0, NULL); + + /* + * If ret is 0, either ____call_usermodehelper failed and the + * real error code is already in sub_info->retval or + * sub_info->retval is 0 anyway, so don't mess with it then. + */ + if (ret) + sub_info->retval = ret; } - complete(sub_info->complete); + if (sub_info->wait < 0) + kfree(sub_info); + else + complete(sub_info->complete); return 0; } /* This is run by khelper thread */ -static void __call_usermodehelper(void *data) +static void __call_usermodehelper(struct work_struct *work) { - struct subprocess_info *sub_info = data; + struct subprocess_info *sub_info = + container_of(work, struct subprocess_info, work); pid_t pid; + int wait = sub_info->wait; /* CLONE_VFORK: wait until the usermode helper has execve'd * successfully We need the data structures to stay around * until that is done. */ - if (sub_info->wait) + if (wait) pid = kernel_thread(wait_for_helper, sub_info, CLONE_FS | CLONE_FILES | SIGCHLD); else pid = kernel_thread(____call_usermodehelper, sub_info, CLONE_VFORK | SIGCHLD); + if (wait < 0) + return; + if (pid < 0) { sub_info->retval = pid; complete(sub_info->complete); - } else if (!sub_info->wait) + } else if (!wait) complete(sub_info->complete); } @@ -223,6 +379,9 @@ static void __call_usermodehelper(void *data) * @envp: null-terminated environment list * @session_keyring: session keyring for process (NULL for an empty keyring) * @wait: wait for the application to finish and return status. + * when -1 don't wait at all, but you get no useful error back when + * the program couldn't be exec'ed. This makes it safe to call + * from interrupt context. * * Runs a user-space application. The application is started * asynchronously if wait is not set, and runs as a child of keventd. @@ -233,18 +392,53 @@ static void __call_usermodehelper(void *data) */ int call_usermodehelper_keys(char *path, char **argv, char **envp, struct key *session_keyring, int wait) +{ + DECLARE_COMPLETION_ONSTACK(done); + struct subprocess_info *sub_info; + int retval; + + if (!khelper_wq) + return -EBUSY; + + if (path[0] == '\0') + return 0; + + sub_info = kzalloc(sizeof(struct subprocess_info), GFP_ATOMIC); + if (!sub_info) + return -ENOMEM; + + INIT_WORK(&sub_info->work, __call_usermodehelper); + sub_info->complete = &done; + sub_info->path = path; + sub_info->argv = argv; + sub_info->envp = envp; + sub_info->ring = session_keyring; + sub_info->wait = wait; + + queue_work(khelper_wq, &sub_info->work); + if (wait < 0) /* task has freed sub_info */ + return 0; + wait_for_completion(&done); + retval = sub_info->retval; + kfree(sub_info); + return retval; +} +EXPORT_SYMBOL(call_usermodehelper_keys); + +int call_usermodehelper_pipe(char *path, char **argv, char **envp, + struct file **filp) { DECLARE_COMPLETION(done); struct subprocess_info sub_info = { + .work = __WORK_INITIALIZER(sub_info.work, + __call_usermodehelper), .complete = &done, .path = path, .argv = argv, .envp = envp, - .ring = session_keyring, - .wait = wait, .retval = 0, }; - DECLARE_WORK(work, __call_usermodehelper, &sub_info); + struct file *f; if (!khelper_wq) return -EBUSY; @@ -252,11 +446,23 @@ int call_usermodehelper_keys(char *path, char **argv, char **envp, if (path[0] == '\0') return 0; - queue_work(khelper_wq, &work); + f = create_write_pipe(); + if (IS_ERR(f)) + return PTR_ERR(f); + *filp = f; + + f = create_read_pipe(f); + if (IS_ERR(f)) { + free_write_pipe(*filp); + return PTR_ERR(f); + } + sub_info.stdin = f; + + queue_work(khelper_wq, &sub_info.work); wait_for_completion(&done); return sub_info.retval; } -EXPORT_SYMBOL(call_usermodehelper_keys); +EXPORT_SYMBOL(call_usermodehelper_pipe); void __init usermodehelper_init(void) {