forked from awsassets/hide_proc
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathftrace_hook.c
166 lines (138 loc) · 3.74 KB
/
ftrace_hook.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
/*
* Hooking kernel functions using ftrace framework
*/
#define pr_fmt(fmt) "cn_exec_connect: " fmt
#include <linux/ftrace.h>
#include <linux/kallsyms.h>
#include <linux/kernel.h>
#include <linux/linkage.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/version.h>
#include <linux/socket.h>
#include <linux/spinlock.h>
#include <uapi/linux/in.h>
#include "ftrace_hook.h"
/*
* There are two ways of preventing vicious recursive loops when hooking:
* - detect recusion using function return address (USE_FENTRY_OFFSET = 0)
* - avoid recusion by jumping over the ftrace call (USE_FENTRY_OFFSET = 1)
*/
#define USE_FENTRY_OFFSET 0
static int fh_resolve_hook_address(struct ftrace_hook *hook)
{
hook->address = kallsyms_lookup_name(hook->name);
if (!hook->address) {
pr_debug("unresolved symbol: %s\n", hook->name);
return -ENOENT;
}
#if USE_FENTRY_OFFSET
*((unsigned long *)hook->original) = hook->address + MCOUNT_INSN_SIZE;
#else
*((unsigned long *)hook->original) = hook->address;
#endif
return 0;
}
static void notrace fh_ftrace_thunk(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *ops,
struct pt_regs *regs)
{
struct ftrace_hook *hook = container_of(ops, struct ftrace_hook, ops);
#if USE_FENTRY_OFFSET
regs->ip = (unsigned long)hook->function;
#else
if (!within_module_core(parent_ip, THIS_MODULE))
regs->ip = (unsigned long)hook->function;
#endif
}
/**
* fh_install_hook() - register and enable a single hook
* @hook: a hook to install
*
* Returns: zero on success, negative error code otherwise.
*/
int fh_install_hook(struct ftrace_hook *hook)
{
int err;
err = fh_resolve_hook_address(hook);
if (err)
return err;
/*
* We're going to modify %rip register so we'll need IPMODIFY flag
* and SAVE_REGS as its prerequisite. ftrace's anti-recursion guard
* is useless if we change %rip so disable it with RECURSION_SAFE.
* We'll perform our own checks for trace function reentry.
*/
hook->ops.func = fh_ftrace_thunk;
hook->ops.flags = FTRACE_OPS_FL_SAVE_REGS
| FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_IPMODIFY;
err = ftrace_set_filter_ip(&hook->ops, hook->address, 0, 0);
if (err) {
pr_debug("ftrace_set_filter_ip() failed: %d\n", err);
return err;
}
err = register_ftrace_function(&hook->ops);
if (err) {
pr_debug("register_ftrace_function() failed: %d\n", err);
ftrace_set_filter_ip(&hook->ops, hook->address, 1, 0);
return err;
}
return 0;
}
int fh_install_hooks(struct ftrace_hook *hooks, int num)
{
int err = -1;
int i = 0;
if (!hooks || num < 0)
goto out;
for (i = 0; i < num; i++) {
err = fh_install_hook(&hooks[i]);
if (err) {
if (i > 0)
fh_remove_hooks(hooks, i);
goto out;
}
}
out:
return err;
}
/**
* fh_remove_hook() - disable and unregister a single hook
* @hook: a hook to remove
*/
void fh_remove_hook(struct ftrace_hook *hook)
{
int err;
err = unregister_ftrace_function(&hook->ops);
if (err) {
pr_debug("unregister_ftrace_function() failed: %d\n", err);
}
err = ftrace_set_filter_ip(&hook->ops, hook->address, 1, 0);
if (err) {
pr_debug("ftrace_set_filter_ip() failed: %d\n", err);
}
}
void fh_remove_hooks(struct ftrace_hook *hooks, int num)
{
int i = 0;
if (!hooks || num < 0)
goto out;
for (i = 0; i < num; i++)
fh_remove_hook(&hooks[i]);
out:
return;
}
#ifndef CONFIG_X86_64
#error Currently only x86_64 architecture is supported
#endif
#if defined(CONFIG_X86_64) && (LINUX_VERSION_CODE >= KERNEL_VERSION(4,17,0))
#define PTREGS_SYSCALL_STUBS 1
#endif
/*
* Tail call optimization can interfere with recursion detection based on
* return address on the stack. Disable it to avoid machine hangups.
*/
#if !USE_FENTRY_OFFSET
#pragma GCC optimize("-fno-optimize-sibling-calls")
#endif