summaryrefslogtreecommitdiff
path: root/kernel/livepatch/core.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-02-10 18:35:40 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-10 18:35:40 -0800
commit1d9c5d79e6e4385aea6f69c23ba543717434ed70 (patch)
treed5bbe45e95f85c8a8b5d980b4a619c0ca82a9da6 /kernel/livepatch/core.c
parent870fd0f5df4e131467612cc46db46fc3b69fd706 (diff)
parentf638f4dc0880d515c807a67b8210885a4a4f18bb (diff)
downloadlinux-sh-1d9c5d79e6e4385aea6f69c23ba543717434ed70.tar.gz
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/livepatching
Pull live patching infrastructure from Jiri Kosina: "Let me provide a bit of history first, before describing what is in this pile. Originally, there was kSplice as a standalone project that implemented stop_machine()-based patching for the linux kernel. This project got later acquired, and the current owner is providing live patching as a proprietary service, without any intentions to have their implementation merged. Then, due to rising user/customer demand, both Red Hat and SUSE started working on their own implementation (not knowing about each other), and announced first versions roughly at the same time [1] [2]. The principle difference between the two solutions is how they are making sure that the patching is performed in a consistent way when it comes to different execution threads with respect to the semantic nature of the change that is being introduced. In a nutshell, kPatch is issuing stop_machine(), then looking at stacks of all existing processess, and if it decides that the system is in a state that can be patched safely, it proceeds insterting code redirection machinery to the patched functions. On the other hand, kGraft provides a per-thread consistency during one single pass of a process through the kernel and performs a lazy contignuous migration of threads from "unpatched" universe to the "patched" one at safe checkpoints. If interested in a more detailed discussion about the consistency models and its possible combinations, please see the thread that evolved around [3]. It pretty quickly became obvious to the interested parties that it's absolutely impractical in this case to have several isolated solutions for one task to co-exist in the kernel. During a dedicated Live Kernel Patching track at LPC in Dusseldorf, all the interested parties sat together and came up with a joint aproach that would work for both distro vendors. Steven Rostedt took notes [4] from this meeting. And the foundation for that aproach is what's present in this pull request. It provides a basic infrastructure for function "live patching" (i.e. code redirection), including API for kernel modules containing the actual patches, and API/ABI for userspace to be able to operate on the patches (look up what patches are applied, enable/disable them, etc). It's relatively simple and minimalistic, as it's making use of existing kernel infrastructure (namely ftrace) as much as possible. It's also self-contained, in a sense that it doesn't hook itself in any other kernel subsystem (it doesn't even touch any other code). It's now implemented for x86 only as a reference architecture, but support for powerpc, s390 and arm is already in the works (adding arch-specific support basically boils down to teaching ftrace about regs-saving). Once this common infrastructure gets merged, both Red Hat and SUSE have agreed to immediately start porting their current solutions on top of this, abandoning their out-of-tree code. The plan basically is that each patch will be marked by flag(s) that would indicate which consistency model it is willing to use (again, the details have been sketched out already in the thread at [3]). Before this happens, the current codebase can be used to patch a large group of secruity/stability problems the patches for which are not too complex (in a sense that they don't introduce non-trivial change of function's return value semantics, they don't change layout of data structures, etc) -- this corresponds to LEAVE_FUNCTION && SWITCH_FUNCTION semantics described at [3]. This tree has been in linux-next since December. [1] https://lkml.org/lkml/2014/4/30/477 [2] https://lkml.org/lkml/2014/7/14/857 [3] https://lkml.org/lkml/2014/11/7/354 [4] http://linuxplumbersconf.org/2014/wp-content/uploads/2014/10/LPC2014_LivePatching.txt [ The core code is introduced by the three commits authored by Seth Jennings, which got a lot of changes incorporated during numerous respins and reviews of the initial implementation. All the followup commits have materialized only after public tree has been created, so they were not folded into initial three commits so that the public tree doesn't get rebased ]" * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/livepatching: livepatch: add missing newline to error message livepatch: rename config to CONFIG_LIVEPATCH livepatch: fix uninitialized return value livepatch: support for repatching a function livepatch: enforce patch stacking semantics livepatch: change ARCH_HAVE_LIVE_PATCHING to HAVE_LIVE_PATCHING livepatch: fix deferred module patching order livepatch: handle ancient compilers with more grace livepatch: kconfig: use bool instead of boolean livepatch: samples: fix usage example comments livepatch: MAINTAINERS: add git tree location livepatch: use FTRACE_OPS_FL_IPMODIFY livepatch: move x86 specific ftrace handler code to arch/x86 livepatch: samples: add sample live patching module livepatch: kernel: add support for live patching livepatch: kernel: add TAINT_LIVEPATCH
Diffstat (limited to 'kernel/livepatch/core.c')
-rw-r--r--kernel/livepatch/core.c1015
1 files changed, 1015 insertions, 0 deletions
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
new file mode 100644
index 000000000000..ff7f47d026ac
--- /dev/null
+++ b/kernel/livepatch/core.c
@@ -0,0 +1,1015 @@
+/*
+ * core.c - Kernel Live Patching Core
+ *
+ * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
+ * Copyright (C) 2014 SUSE
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/ftrace.h>
+#include <linux/list.h>
+#include <linux/kallsyms.h>
+#include <linux/livepatch.h>
+
+/**
+ * struct klp_ops - structure for tracking registered ftrace ops structs
+ *
+ * A single ftrace_ops is shared between all enabled replacement functions
+ * (klp_func structs) which have the same old_addr. This allows the switch
+ * between function versions to happen instantaneously by updating the klp_ops
+ * struct's func_stack list. The winner is the klp_func at the top of the
+ * func_stack (front of the list).
+ *
+ * @node: node for the global klp_ops list
+ * @func_stack: list head for the stack of klp_func's (active func is on top)
+ * @fops: registered ftrace ops struct
+ */
+struct klp_ops {
+ struct list_head node;
+ struct list_head func_stack;
+ struct ftrace_ops fops;
+};
+
+/*
+ * The klp_mutex protects the global lists and state transitions of any
+ * structure reachable from them. References to any structure must be obtained
+ * under mutex protection (except in klp_ftrace_handler(), which uses RCU to
+ * ensure it gets consistent data).
+ */
+static DEFINE_MUTEX(klp_mutex);
+
+static LIST_HEAD(klp_patches);
+static LIST_HEAD(klp_ops);
+
+static struct kobject *klp_root_kobj;
+
+static struct klp_ops *klp_find_ops(unsigned long old_addr)
+{
+ struct klp_ops *ops;
+ struct klp_func *func;
+
+ list_for_each_entry(ops, &klp_ops, node) {
+ func = list_first_entry(&ops->func_stack, struct klp_func,
+ stack_node);
+ if (func->old_addr == old_addr)
+ return ops;
+ }
+
+ return NULL;
+}
+
+static bool klp_is_module(struct klp_object *obj)
+{
+ return obj->name;
+}
+
+static bool klp_is_object_loaded(struct klp_object *obj)
+{
+ return !obj->name || obj->mod;
+}
+
+/* sets obj->mod if object is not vmlinux and module is found */
+static void klp_find_object_module(struct klp_object *obj)
+{
+ if (!klp_is_module(obj))
+ return;
+
+ mutex_lock(&module_mutex);
+ /*
+ * We don't need to take a reference on the module here because we have
+ * the klp_mutex, which is also taken by the module notifier. This
+ * prevents any module from unloading until we release the klp_mutex.
+ */
+ obj->mod = find_module(obj->name);
+ mutex_unlock(&module_mutex);
+}
+
+/* klp_mutex must be held by caller */
+static bool klp_is_patch_registered(struct klp_patch *patch)
+{
+ struct klp_patch *mypatch;
+
+ list_for_each_entry(mypatch, &klp_patches, list)
+ if (mypatch == patch)
+ return true;
+
+ return false;
+}
+
+static bool klp_initialized(void)
+{
+ return klp_root_kobj;
+}
+
+struct klp_find_arg {
+ const char *objname;
+ const char *name;
+ unsigned long addr;
+ /*
+ * If count == 0, the symbol was not found. If count == 1, a unique
+ * match was found and addr is set. If count > 1, there is
+ * unresolvable ambiguity among "count" number of symbols with the same
+ * name in the same object.
+ */
+ unsigned long count;
+};
+
+static int klp_find_callback(void *data, const char *name,
+ struct module *mod, unsigned long addr)
+{
+ struct klp_find_arg *args = data;
+
+ if ((mod && !args->objname) || (!mod && args->objname))
+ return 0;
+
+ if (strcmp(args->name, name))
+ return 0;
+
+ if (args->objname && strcmp(args->objname, mod->name))
+ return 0;
+
+ /*
+ * args->addr might be overwritten if another match is found
+ * but klp_find_object_symbol() handles this and only returns the
+ * addr if count == 1.
+ */
+ args->addr = addr;
+ args->count++;
+
+ return 0;
+}
+
+static int klp_find_object_symbol(const char *objname, const char *name,
+ unsigned long *addr)
+{
+ struct klp_find_arg args = {
+ .objname = objname,
+ .name = name,
+ .addr = 0,
+ .count = 0
+ };
+
+ kallsyms_on_each_symbol(klp_find_callback, &args);
+
+ if (args.count == 0)
+ pr_err("symbol '%s' not found in symbol table\n", name);
+ else if (args.count > 1)
+ pr_err("unresolvable ambiguity (%lu matches) on symbol '%s' in object '%s'\n",
+ args.count, name, objname);
+ else {
+ *addr = args.addr;
+ return 0;
+ }
+
+ *addr = 0;
+ return -EINVAL;
+}
+
+struct klp_verify_args {
+ const char *name;
+ const unsigned long addr;
+};
+
+static int klp_verify_callback(void *data, const char *name,
+ struct module *mod, unsigned long addr)
+{
+ struct klp_verify_args *args = data;
+
+ if (!mod &&
+ !strcmp(args->name, name) &&
+ args->addr == addr)
+ return 1;
+
+ return 0;
+}
+
+static int klp_verify_vmlinux_symbol(const char *name, unsigned long addr)
+{
+ struct klp_verify_args args = {
+ .name = name,
+ .addr = addr,
+ };
+
+ if (kallsyms_on_each_symbol(klp_verify_callback, &args))
+ return 0;
+
+ pr_err("symbol '%s' not found at specified address 0x%016lx, kernel mismatch?\n",
+ name, addr);
+ return -EINVAL;
+}
+
+static int klp_find_verify_func_addr(struct klp_object *obj,
+ struct klp_func *func)
+{
+ int ret;
+
+#if defined(CONFIG_RANDOMIZE_BASE)
+ /* KASLR is enabled, disregard old_addr from user */
+ func->old_addr = 0;
+#endif
+
+ if (!func->old_addr || klp_is_module(obj))
+ ret = klp_find_object_symbol(obj->name, func->old_name,
+ &func->old_addr);
+ else
+ ret = klp_verify_vmlinux_symbol(func->old_name,
+ func->old_addr);
+
+ return ret;
+}
+
+/*
+ * external symbols are located outside the parent object (where the parent
+ * object is either vmlinux or the kmod being patched).
+ */
+static int klp_find_external_symbol(struct module *pmod, const char *name,
+ unsigned long *addr)
+{
+ const struct kernel_symbol *sym;
+
+ /* first, check if it's an exported symbol */
+ preempt_disable();
+ sym = find_symbol(name, NULL, NULL, true, true);
+ preempt_enable();
+ if (sym) {
+ *addr = sym->value;
+ return 0;
+ }
+
+ /* otherwise check if it's in another .o within the patch module */
+ return klp_find_object_symbol(pmod->name, name, addr);
+}
+
+static int klp_write_object_relocations(struct module *pmod,
+ struct klp_object *obj)
+{
+ int ret;
+ struct klp_reloc *reloc;
+
+ if (WARN_ON(!klp_is_object_loaded(obj)))
+ return -EINVAL;
+
+ if (WARN_ON(!obj->relocs))
+ return -EINVAL;
+
+ for (reloc = obj->relocs; reloc->name; reloc++) {
+ if (!klp_is_module(obj)) {
+ ret = klp_verify_vmlinux_symbol(reloc->name,
+ reloc->val);
+ if (ret)
+ return ret;
+ } else {
+ /* module, reloc->val needs to be discovered */
+ if (reloc->external)
+ ret = klp_find_external_symbol(pmod,
+ reloc->name,
+ &reloc->val);
+ else
+ ret = klp_find_object_symbol(obj->mod->name,
+ reloc->name,
+ &reloc->val);
+ if (ret)
+ return ret;
+ }
+ ret = klp_write_module_reloc(pmod, reloc->type, reloc->loc,
+ reloc->val + reloc->addend);
+ if (ret) {
+ pr_err("relocation failed for symbol '%s' at 0x%016lx (%d)\n",
+ reloc->name, reloc->val, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void notrace klp_ftrace_handler(unsigned long ip,
+ unsigned long parent_ip,
+ struct ftrace_ops *fops,
+ struct pt_regs *regs)
+{
+ struct klp_ops *ops;
+ struct klp_func *func;
+
+ ops = container_of(fops, struct klp_ops, fops);
+
+ rcu_read_lock();
+ func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
+ stack_node);
+ rcu_read_unlock();
+
+ if (WARN_ON_ONCE(!func))
+ return;
+
+ klp_arch_set_pc(regs, (unsigned long)func->new_func);
+}
+
+static int klp_disable_func(struct klp_func *func)
+{
+ struct klp_ops *ops;
+ int ret;
+
+ if (WARN_ON(func->state != KLP_ENABLED))
+ return -EINVAL;
+
+ if (WARN_ON(!func->old_addr))
+ return -EINVAL;
+
+ ops = klp_find_ops(func->old_addr);
+ if (WARN_ON(!ops))
+ return -EINVAL;
+
+ if (list_is_singular(&ops->func_stack)) {
+ ret = unregister_ftrace_function(&ops->fops);
+ if (ret) {
+ pr_err("failed to unregister ftrace handler for function '%s' (%d)\n",
+ func->old_name, ret);
+ return ret;
+ }
+
+ ret = ftrace_set_filter_ip(&ops->fops, func->old_addr, 1, 0);
+ if (ret)
+ pr_warn("function unregister succeeded but failed to clear the filter\n");
+
+ list_del_rcu(&func->stack_node);
+ list_del(&ops->node);
+ kfree(ops);
+ } else {
+ list_del_rcu(&func->stack_node);
+ }
+
+ func->state = KLP_DISABLED;
+
+ return 0;
+}
+
+static int klp_enable_func(struct klp_func *func)
+{
+ struct klp_ops *ops;
+ int ret;
+
+ if (WARN_ON(!func->old_addr))
+ return -EINVAL;
+
+ if (WARN_ON(func->state != KLP_DISABLED))
+ return -EINVAL;
+
+ ops = klp_find_ops(func->old_addr);
+ if (!ops) {
+ ops = kzalloc(sizeof(*ops), GFP_KERNEL);
+ if (!ops)
+ return -ENOMEM;
+
+ ops->fops.func = klp_ftrace_handler;
+ ops->fops.flags = FTRACE_OPS_FL_SAVE_REGS |
+ FTRACE_OPS_FL_DYNAMIC |
+ FTRACE_OPS_FL_IPMODIFY;
+
+ list_add(&ops->node, &klp_ops);
+
+ INIT_LIST_HEAD(&ops->func_stack);
+ list_add_rcu(&func->stack_node, &ops->func_stack);
+
+ ret = ftrace_set_filter_ip(&ops->fops, func->old_addr, 0, 0);
+ if (ret) {
+ pr_err("failed to set ftrace filter for function '%s' (%d)\n",
+ func->old_name, ret);
+ goto err;
+ }
+
+ ret = register_ftrace_function(&ops->fops);
+ if (ret) {
+ pr_err("failed to register ftrace handler for function '%s' (%d)\n",
+ func->old_name, ret);
+ ftrace_set_filter_ip(&ops->fops, func->old_addr, 1, 0);
+ goto err;
+ }
+
+
+ } else {
+ list_add_rcu(&func->stack_node, &ops->func_stack);
+ }
+
+ func->state = KLP_ENABLED;
+
+ return 0;
+
+err:
+ list_del_rcu(&func->stack_node);
+ list_del(&ops->node);
+ kfree(ops);
+ return ret;
+}
+
+static int klp_disable_object(struct klp_object *obj)
+{
+ struct klp_func *func;
+ int ret;
+
+ for (func = obj->funcs; func->old_name; func++) {
+ if (func->state != KLP_ENABLED)
+ continue;
+
+ ret = klp_disable_func(func);
+ if (ret)
+ return ret;
+ }
+
+ obj->state = KLP_DISABLED;
+
+ return 0;
+}
+
+static int klp_enable_object(struct klp_object *obj)
+{
+ struct klp_func *func;
+ int ret;
+
+ if (WARN_ON(obj->state != KLP_DISABLED))
+ return -EINVAL;
+
+ if (WARN_ON(!klp_is_object_loaded(obj)))
+ return -EINVAL;
+
+ for (func = obj->funcs; func->old_name; func++) {
+ ret = klp_enable_func(func);
+ if (ret)
+ goto unregister;
+ }
+ obj->state = KLP_ENABLED;
+
+ return 0;
+
+unregister:
+ WARN_ON(klp_disable_object(obj));
+ return ret;
+}
+
+static int __klp_disable_patch(struct klp_patch *patch)
+{
+ struct klp_object *obj;
+ int ret;
+
+ /* enforce stacking: only the last enabled patch can be disabled */
+ if (!list_is_last(&patch->list, &klp_patches) &&
+ list_next_entry(patch, list)->state == KLP_ENABLED)
+ return -EBUSY;
+
+ pr_notice("disabling patch '%s'\n", patch->mod->name);
+
+ for (obj = patch->objs; obj->funcs; obj++) {
+ if (obj->state != KLP_ENABLED)
+ continue;
+
+ ret = klp_disable_object(obj);
+ if (ret)
+ return ret;
+ }
+
+ patch->state = KLP_DISABLED;
+
+ return 0;
+}
+
+/**
+ * klp_disable_patch() - disables a registered patch
+ * @patch: The registered, enabled patch to be disabled
+ *
+ * Unregisters the patched functions from ftrace.
+ *
+ * Return: 0 on success, otherwise error
+ */
+int klp_disable_patch(struct klp_patch *patch)
+{
+ int ret;
+
+ mutex_lock(&klp_mutex);
+
+ if (!klp_is_patch_registered(patch)) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (patch->state == KLP_DISABLED) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ret = __klp_disable_patch(patch);
+
+err:
+ mutex_unlock(&klp_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(klp_disable_patch);
+
+static int __klp_enable_patch(struct klp_patch *patch)
+{
+ struct klp_object *obj;
+ int ret;
+
+ if (WARN_ON(patch->state != KLP_DISABLED))
+ return -EINVAL;
+
+ /* enforce stacking: only the first disabled patch can be enabled */
+ if (patch->list.prev != &klp_patches &&
+ list_prev_entry(patch, list)->state == KLP_DISABLED)
+ return -EBUSY;
+
+ pr_notice_once("tainting kernel with TAINT_LIVEPATCH\n");
+ add_taint(TAINT_LIVEPATCH, LOCKDEP_STILL_OK);
+
+ pr_notice("enabling patch '%s'\n", patch->mod->name);
+
+ for (obj = patch->objs; obj->funcs; obj++) {
+ klp_find_object_module(obj);
+
+ if (!klp_is_object_loaded(obj))
+ continue;
+
+ ret = klp_enable_object(obj);
+ if (ret)
+ goto unregister;
+ }
+
+ patch->state = KLP_ENABLED;
+
+ return 0;
+
+unregister:
+ WARN_ON(__klp_disable_patch(patch));
+ return ret;
+}
+
+/**
+ * klp_enable_patch() - enables a registered patch
+ * @patch: The registered, disabled patch to be enabled
+ *
+ * Performs the needed symbol lookups and code relocations,
+ * then registers the patched functions with ftrace.
+ *
+ * Return: 0 on success, otherwise error
+ */
+int klp_enable_patch(struct klp_patch *patch)
+{
+ int ret;
+
+ mutex_lock(&klp_mutex);
+
+ if (!klp_is_patch_registered(patch)) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ret = __klp_enable_patch(patch);
+
+err:
+ mutex_unlock(&klp_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(klp_enable_patch);
+
+/*
+ * Sysfs Interface
+ *
+ * /sys/kernel/livepatch
+ * /sys/kernel/livepatch/<patch>
+ * /sys/kernel/livepatch/<patch>/enabled
+ * /sys/kernel/livepatch/<patch>/<object>
+ * /sys/kernel/livepatch/<patch>/<object>/<func>
+ */
+
+static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct klp_patch *patch;
+ int ret;
+ unsigned long val;
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret)
+ return -EINVAL;
+
+ if (val != KLP_DISABLED && val != KLP_ENABLED)
+ return -EINVAL;
+
+ patch = container_of(kobj, struct klp_patch, kobj);
+
+ mutex_lock(&klp_mutex);
+
+ if (val == patch->state) {
+ /* already in requested state */
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (val == KLP_ENABLED) {
+ ret = __klp_enable_patch(patch);
+ if (ret)
+ goto err;
+ } else {
+ ret = __klp_disable_patch(patch);
+ if (ret)
+ goto err;
+ }
+
+ mutex_unlock(&klp_mutex);
+
+ return count;
+
+err:
+ mutex_unlock(&klp_mutex);
+ return ret;
+}
+
+static ssize_t enabled_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct klp_patch *patch;
+
+ patch = container_of(kobj, struct klp_patch, kobj);
+ return snprintf(buf, PAGE_SIZE-1, "%d\n", patch->state);
+}
+
+static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled);
+static struct attribute *klp_patch_attrs[] = {
+ &enabled_kobj_attr.attr,
+ NULL
+};
+
+static void klp_kobj_release_patch(struct kobject *kobj)
+{
+ /*
+ * Once we have a consistency model we'll need to module_put() the
+ * patch module here. See klp_register_patch() for more details.
+ */
+}
+
+static struct kobj_type klp_ktype_patch = {
+ .release = klp_kobj_release_patch,
+ .sysfs_ops = &kobj_sysfs_ops,
+ .default_attrs = klp_patch_attrs,
+};
+
+static void klp_kobj_release_func(struct kobject *kobj)
+{
+}
+
+static struct kobj_type klp_ktype_func = {
+ .release = klp_kobj_release_func,
+ .sysfs_ops = &kobj_sysfs_ops,
+};
+
+/*
+ * Free all functions' kobjects in the array up to some limit. When limit is
+ * NULL, all kobjects are freed.
+ */
+static void klp_free_funcs_limited(struct klp_object *obj,
+ struct klp_func *limit)
+{
+ struct klp_func *func;
+
+ for (func = obj->funcs; func->old_name && func != limit; func++)
+ kobject_put(&func->kobj);
+}
+
+/* Clean up when a patched object is unloaded */
+static void klp_free_object_loaded(struct klp_object *obj)
+{
+ struct klp_func *func;
+
+ obj->mod = NULL;
+
+ for (func = obj->funcs; func->old_name; func++)
+ func->old_addr = 0;
+}
+
+/*
+ * Free all objects' kobjects in the array up to some limit. When limit is
+ * NULL, all kobjects are freed.
+ */
+static void klp_free_objects_limited(struct klp_patch *patch,
+ struct klp_object *limit)
+{
+ struct klp_object *obj;
+
+ for (obj = patch->objs; obj->funcs && obj != limit; obj++) {
+ klp_free_funcs_limited(obj, NULL);
+ kobject_put(obj->kobj);
+ }
+}
+
+static void klp_free_patch(struct klp_patch *patch)
+{
+ klp_free_objects_limited(patch, NULL);
+ if (!list_empty(&patch->list))
+ list_del(&patch->list);
+ kobject_put(&patch->kobj);
+}
+
+static int klp_init_func(struct klp_object *obj, struct klp_func *func)
+{
+ INIT_LIST_HEAD(&func->stack_node);
+ func->state = KLP_DISABLED;
+
+ return kobject_init_and_add(&func->kobj, &klp_ktype_func,
+ obj->kobj, func->old_name);
+}
+
+/* parts of the initialization that is done only when the object is loaded */
+static int klp_init_object_loaded(struct klp_patch *patch,
+ struct klp_object *obj)
+{
+ struct klp_func *func;
+ int ret;
+
+ if (obj->relocs) {
+ ret = klp_write_object_relocations(patch->mod, obj);
+ if (ret)
+ return ret;
+ }
+
+ for (func = obj->funcs; func->old_name; func++) {
+ ret = klp_find_verify_func_addr(obj, func);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
+{
+ struct klp_func *func;
+ int ret;
+ const char *name;
+
+ if (!obj->funcs)
+ return -EINVAL;
+
+ obj->state = KLP_DISABLED;
+
+ klp_find_object_module(obj);
+
+ name = klp_is_module(obj) ? obj->name : "vmlinux";
+ obj->kobj = kobject_create_and_add(name, &patch->kobj);
+ if (!obj->kobj)
+ return -ENOMEM;
+
+ for (func = obj->funcs; func->old_name; func++) {
+ ret = klp_init_func(obj, func);
+ if (ret)
+ goto free;
+ }
+
+ if (klp_is_object_loaded(obj)) {
+ ret = klp_init_object_loaded(patch, obj);
+ if (ret)
+ goto free;
+ }
+
+ return 0;
+
+free:
+ klp_free_funcs_limited(obj, func);
+ kobject_put(obj->kobj);
+ return ret;
+}
+
+static int klp_init_patch(struct klp_patch *patch)
+{
+ struct klp_object *obj;
+ int ret;
+
+ if (!patch->objs)
+ return -EINVAL;
+
+ mutex_lock(&klp_mutex);
+
+ patch->state = KLP_DISABLED;
+
+ ret = kobject_init_and_add(&patch->kobj, &klp_ktype_patch,
+ klp_root_kobj, patch->mod->name);
+ if (ret)
+ goto unlock;
+
+ for (obj = patch->objs; obj->funcs; obj++) {
+ ret = klp_init_object(patch, obj);
+ if (ret)
+ goto free;
+ }
+
+ list_add_tail(&patch->list, &klp_patches);
+
+ mutex_unlock(&klp_mutex);
+
+ return 0;
+
+free:
+ klp_free_objects_limited(patch, obj);
+ kobject_put(&patch->kobj);
+unlock:
+ mutex_unlock(&klp_mutex);
+ return ret;
+}
+
+/**
+ * klp_unregister_patch() - unregisters a patch
+ * @patch: Disabled patch to be unregistered
+ *
+ * Frees the data structures and removes the sysfs interface.
+ *
+ * Return: 0 on success, otherwise error
+ */
+int klp_unregister_patch(struct klp_patch *patch)
+{
+ int ret = 0;
+
+ mutex_lock(&klp_mutex);
+
+ if (!klp_is_patch_registered(patch)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (patch->state == KLP_ENABLED) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ klp_free_patch(patch);
+
+out:
+ mutex_unlock(&klp_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(klp_unregister_patch);
+
+/**
+ * klp_register_patch() - registers a patch
+ * @patch: Patch to be registered
+ *
+ * Initializes the data structure associated with the patch and
+ * creates the sysfs interface.
+ *
+ * Return: 0 on success, otherwise error
+ */
+int klp_register_patch(struct klp_patch *patch)
+{
+ int ret;
+
+ if (!klp_initialized())
+ return -ENODEV;
+
+ if (!patch || !patch->mod)
+ return -EINVAL;
+
+ /*
+ * A reference is taken on the patch module to prevent it from being
+ * unloaded. Right now, we don't allow patch modules to unload since
+ * there is currently no method to determine if a thread is still
+ * running in the patched code contained in the patch module once
+ * the ftrace registration is successful.
+ */
+ if (!try_module_get(patch->mod))
+ return -ENODEV;
+
+ ret = klp_init_patch(patch);
+ if (ret)
+ module_put(patch->mod);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(klp_register_patch);
+
+static void klp_module_notify_coming(struct klp_patch *patch,
+ struct klp_object *obj)
+{
+ struct module *pmod = patch->mod;
+ struct module *mod = obj->mod;
+ int ret;
+
+ ret = klp_init_object_loaded(patch, obj);
+ if (ret)
+ goto err;
+
+ if (patch->state == KLP_DISABLED)
+ return;
+
+ pr_notice("applying patch '%s' to loading module '%s'\n",
+ pmod->name, mod->name);
+
+ ret = klp_enable_object(obj);
+ if (!ret)
+ return;
+
+err:
+ pr_warn("failed to apply patch '%s' to module '%s' (%d)\n",
+ pmod->name, mod->name, ret);
+}
+
+static void klp_module_notify_going(struct klp_patch *patch,
+ struct klp_object *obj)
+{
+ struct module *pmod = patch->mod;
+ struct module *mod = obj->mod;
+ int ret;
+
+ if (patch->state == KLP_DISABLED)
+ goto disabled;
+
+ pr_notice("reverting patch '%s' on unloading module '%s'\n",
+ pmod->name, mod->name);
+
+ ret = klp_disable_object(obj);
+ if (ret)
+ pr_warn("failed to revert patch '%s' on module '%s' (%d)\n",
+ pmod->name, mod->name, ret);
+
+disabled:
+ klp_free_object_loaded(obj);
+}
+
+static int klp_module_notify(struct notifier_block *nb, unsigned long action,
+ void *data)
+{
+ struct module *mod = data;
+ struct klp_patch *patch;
+ struct klp_object *obj;
+
+ if (action != MODULE_STATE_COMING && action != MODULE_STATE_GOING)
+ return 0;
+
+ mutex_lock(&klp_mutex);
+
+ list_for_each_entry(patch, &klp_patches, list) {
+ for (obj = patch->objs; obj->funcs; obj++) {
+ if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
+ continue;
+
+ if (action == MODULE_STATE_COMING) {
+ obj->mod = mod;
+ klp_module_notify_coming(patch, obj);
+ } else /* MODULE_STATE_GOING */
+ klp_module_notify_going(patch, obj);
+
+ break;
+ }
+ }
+
+ mutex_unlock(&klp_mutex);
+
+ return 0;
+}
+
+static struct notifier_block klp_module_nb = {
+ .notifier_call = klp_module_notify,
+ .priority = INT_MIN+1, /* called late but before ftrace notifier */
+};
+
+static int klp_init(void)
+{
+ int ret;
+
+ ret = klp_check_compiler_support();
+ if (ret) {
+ pr_info("Your compiler is too old; turning off.\n");
+ return -EINVAL;
+ }
+
+ ret = register_module_notifier(&klp_module_nb);
+ if (ret)
+ return ret;
+
+ klp_root_kobj = kobject_create_and_add("livepatch", kernel_kobj);
+ if (!klp_root_kobj) {
+ ret = -ENOMEM;
+ goto unregister;
+ }
+
+ return 0;
+
+unregister:
+ unregister_module_notifier(&klp_module_nb);
+ return ret;
+}
+
+module_init(klp_init);