livepatch: Change unsigned long old_addr -> void *old_func in struct klp_func
The address of the to be patched function and new function is stored
in struct klp_func as:
void *new_func;
unsigned long old_addr;
The different naming scheme and type are derived from the way
the addresses are set. @old_addr is assigned at runtime using
kallsyms-based search. @new_func is statically initialized,
for example:
static struct klp_func funcs[] = {
{
.old_name = "cmdline_proc_show",
.new_func = livepatch_cmdline_proc_show,
}, { }
};
This patch changes unsigned long old_addr -> void *old_func. It removes
some confusion when these address are later used in the code. It is
motivated by a followup patch that adds special NOP struct klp_func
where we want to assign func->new_func = func->old_addr respectively
func->new_func = func->old_func.
This patch does not modify the existing behavior.
Suggested-by: Josh Poimboeuf <jpoimboe@redhat.com>
Signed-off-by: Petr Mladek <pmladek@suse.com>
Acked-by: Miroslav Benes <mbenes@suse.cz>
Acked-by: Joe Lawrence <joe.lawrence@redhat.com>
Acked-by: Alice Ferrazzi <alice.ferrazzi@gmail.com>
Acked-by: Josh Poimboeuf <jpoimboe@redhat.com>
Signed-off-by: Jiri Kosina <jkosina@suse.cz>
This commit is contained in:
@@ -40,7 +40,7 @@
|
||||
* @new_func: pointer to the patched function code
|
||||
* @old_sympos: a hint indicating which symbol position the old function
|
||||
* can be found (optional)
|
||||
* @old_addr: the address of the function being patched
|
||||
* @old_func: pointer to the function being patched
|
||||
* @kobj: kobject for sysfs resources
|
||||
* @stack_node: list node for klp_ops func_stack list
|
||||
* @old_size: size of the old function
|
||||
@@ -77,7 +77,7 @@ struct klp_func {
|
||||
unsigned long old_sympos;
|
||||
|
||||
/* internal */
|
||||
unsigned long old_addr;
|
||||
void *old_func;
|
||||
struct kobject kobj;
|
||||
struct list_head stack_node;
|
||||
unsigned long old_size, new_size;
|
||||
|
||||
@@ -648,7 +648,7 @@ static void klp_free_object_loaded(struct klp_object *obj)
|
||||
obj->mod = NULL;
|
||||
|
||||
klp_for_each_func(obj, func)
|
||||
func->old_addr = 0;
|
||||
func->old_func = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -721,11 +721,11 @@ static int klp_init_object_loaded(struct klp_patch *patch,
|
||||
klp_for_each_func(obj, func) {
|
||||
ret = klp_find_object_symbol(obj->name, func->old_name,
|
||||
func->old_sympos,
|
||||
&func->old_addr);
|
||||
(unsigned long *)&func->old_func);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = kallsyms_lookup_size_offset(func->old_addr,
|
||||
ret = kallsyms_lookup_size_offset((unsigned long)func->old_func,
|
||||
&func->old_size, NULL);
|
||||
if (!ret) {
|
||||
pr_err("kallsyms size lookup failed for '%s'\n",
|
||||
|
||||
@@ -34,7 +34,7 @@
|
||||
|
||||
static LIST_HEAD(klp_ops);
|
||||
|
||||
struct klp_ops *klp_find_ops(unsigned long old_addr)
|
||||
struct klp_ops *klp_find_ops(void *old_func)
|
||||
{
|
||||
struct klp_ops *ops;
|
||||
struct klp_func *func;
|
||||
@@ -42,7 +42,7 @@ struct klp_ops *klp_find_ops(unsigned long old_addr)
|
||||
list_for_each_entry(ops, &klp_ops, node) {
|
||||
func = list_first_entry(&ops->func_stack, struct klp_func,
|
||||
stack_node);
|
||||
if (func->old_addr == old_addr)
|
||||
if (func->old_func == old_func)
|
||||
return ops;
|
||||
}
|
||||
|
||||
@@ -142,17 +142,18 @@ static void klp_unpatch_func(struct klp_func *func)
|
||||
|
||||
if (WARN_ON(!func->patched))
|
||||
return;
|
||||
if (WARN_ON(!func->old_addr))
|
||||
if (WARN_ON(!func->old_func))
|
||||
return;
|
||||
|
||||
ops = klp_find_ops(func->old_addr);
|
||||
ops = klp_find_ops(func->old_func);
|
||||
if (WARN_ON(!ops))
|
||||
return;
|
||||
|
||||
if (list_is_singular(&ops->func_stack)) {
|
||||
unsigned long ftrace_loc;
|
||||
|
||||
ftrace_loc = klp_get_ftrace_location(func->old_addr);
|
||||
ftrace_loc =
|
||||
klp_get_ftrace_location((unsigned long)func->old_func);
|
||||
if (WARN_ON(!ftrace_loc))
|
||||
return;
|
||||
|
||||
@@ -174,17 +175,18 @@ static int klp_patch_func(struct klp_func *func)
|
||||
struct klp_ops *ops;
|
||||
int ret;
|
||||
|
||||
if (WARN_ON(!func->old_addr))
|
||||
if (WARN_ON(!func->old_func))
|
||||
return -EINVAL;
|
||||
|
||||
if (WARN_ON(func->patched))
|
||||
return -EINVAL;
|
||||
|
||||
ops = klp_find_ops(func->old_addr);
|
||||
ops = klp_find_ops(func->old_func);
|
||||
if (!ops) {
|
||||
unsigned long ftrace_loc;
|
||||
|
||||
ftrace_loc = klp_get_ftrace_location(func->old_addr);
|
||||
ftrace_loc =
|
||||
klp_get_ftrace_location((unsigned long)func->old_func);
|
||||
if (!ftrace_loc) {
|
||||
pr_err("failed to find location for function '%s'\n",
|
||||
func->old_name);
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
* struct klp_ops - structure for tracking registered ftrace ops structs
|
||||
*
|
||||
* A single ftrace_ops is shared between all enabled replacement functions
|
||||
* (klp_func structs) which have the same old_addr. This allows the switch
|
||||
* (klp_func structs) which have the same old_func. This allows the switch
|
||||
* between function versions to happen instantaneously by updating the klp_ops
|
||||
* struct's func_stack list. The winner is the klp_func at the top of the
|
||||
* func_stack (front of the list).
|
||||
@@ -25,7 +25,7 @@ struct klp_ops {
|
||||
struct ftrace_ops fops;
|
||||
};
|
||||
|
||||
struct klp_ops *klp_find_ops(unsigned long old_addr);
|
||||
struct klp_ops *klp_find_ops(void *old_func);
|
||||
|
||||
int klp_patch_object(struct klp_object *obj);
|
||||
void klp_unpatch_object(struct klp_object *obj);
|
||||
|
||||
@@ -224,11 +224,11 @@ static int klp_check_stack_func(struct klp_func *func,
|
||||
* Check for the to-be-patched function
|
||||
* (the previous func).
|
||||
*/
|
||||
ops = klp_find_ops(func->old_addr);
|
||||
ops = klp_find_ops(func->old_func);
|
||||
|
||||
if (list_is_singular(&ops->func_stack)) {
|
||||
/* original function */
|
||||
func_addr = func->old_addr;
|
||||
func_addr = (unsigned long)func->old_func;
|
||||
func_size = func->old_size;
|
||||
} else {
|
||||
/* previously patched function */
|
||||
|
||||
Reference in New Issue
Block a user