forked from torvalds/linux
-
Notifications
You must be signed in to change notification settings - Fork 0
/
shadow.c
299 lines (263 loc) · 8.71 KB
/
shadow.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* shadow.c - Shadow Variables
*
* Copyright (C) 2014 Josh Poimboeuf <[email protected]>
* Copyright (C) 2014 Seth Jennings <[email protected]>
* Copyright (C) 2017 Joe Lawrence <[email protected]>
*/
/**
* DOC: Shadow variable API concurrency notes:
*
* The shadow variable API provides a simple relationship between an
* <obj, id> pair and a pointer value. It is the responsibility of the
* caller to provide any mutual exclusion required of the shadow data.
*
* Once a shadow variable is attached to its parent object via the
* klp_shadow_*alloc() API calls, it is considered live: any subsequent
* call to klp_shadow_get() may then return the shadow variable's data
* pointer. Callers of klp_shadow_*alloc() should prepare shadow data
* accordingly.
*
* The klp_shadow_*alloc() API calls may allocate memory for new shadow
* variable structures. Their implementation does not call kmalloc
* inside any spinlocks, but API callers should pass GFP flags according
* to their specific needs.
*
* The klp_shadow_hash is an RCU-enabled hashtable and is safe against
* concurrent klp_shadow_free() and klp_shadow_get() operations.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/hashtable.h>
#include <linux/slab.h>
#include <linux/livepatch.h>
static DEFINE_HASHTABLE(klp_shadow_hash, 12);
/*
* klp_shadow_lock provides exclusive access to the klp_shadow_hash and
* the shadow variables it references.
*/
static DEFINE_SPINLOCK(klp_shadow_lock);
/**
* struct klp_shadow - shadow variable structure
* @node: klp_shadow_hash hash table node
* @rcu_head: RCU is used to safely free this structure
* @obj: pointer to parent object
* @id: data identifier
* @data: data area
*/
struct klp_shadow {
struct hlist_node node;
struct rcu_head rcu_head;
void *obj;
unsigned long id;
char data[];
};
/**
* klp_shadow_match() - verify a shadow variable matches given <obj, id>
* @shadow: shadow variable to match
* @obj: pointer to parent object
* @id: data identifier
*
* Return: true if the shadow variable matches.
*/
static inline bool klp_shadow_match(struct klp_shadow *shadow, void *obj,
unsigned long id)
{
return shadow->obj == obj && shadow->id == id;
}
/**
* klp_shadow_get() - retrieve a shadow variable data pointer
* @obj: pointer to parent object
* @id: data identifier
*
* Return: the shadow variable data element, NULL on failure.
*/
void *klp_shadow_get(void *obj, unsigned long id)
{
struct klp_shadow *shadow;
rcu_read_lock();
hash_for_each_possible_rcu(klp_shadow_hash, shadow, node,
(unsigned long)obj) {
if (klp_shadow_match(shadow, obj, id)) {
rcu_read_unlock();
return shadow->data;
}
}
rcu_read_unlock();
return NULL;
}
EXPORT_SYMBOL_GPL(klp_shadow_get);
static void *__klp_shadow_get_or_alloc(void *obj, unsigned long id,
size_t size, gfp_t gfp_flags,
klp_shadow_ctor_t ctor, void *ctor_data,
bool warn_on_exist)
{
struct klp_shadow *new_shadow;
void *shadow_data;
unsigned long flags;
/* Check if the shadow variable already exists */
shadow_data = klp_shadow_get(obj, id);
if (shadow_data)
goto exists;
/*
* Allocate a new shadow variable. Fill it with zeroes by default.
* More complex setting can be done by @ctor function. But it is
* called only when the buffer is really used (under klp_shadow_lock).
*/
new_shadow = kzalloc(size + sizeof(*new_shadow), gfp_flags);
if (!new_shadow)
return NULL;
/* Look for <obj, id> again under the lock */
spin_lock_irqsave(&klp_shadow_lock, flags);
shadow_data = klp_shadow_get(obj, id);
if (unlikely(shadow_data)) {
/*
* Shadow variable was found, throw away speculative
* allocation.
*/
spin_unlock_irqrestore(&klp_shadow_lock, flags);
kfree(new_shadow);
goto exists;
}
new_shadow->obj = obj;
new_shadow->id = id;
if (ctor) {
int err;
err = ctor(obj, new_shadow->data, ctor_data);
if (err) {
spin_unlock_irqrestore(&klp_shadow_lock, flags);
kfree(new_shadow);
pr_err("Failed to construct shadow variable <%p, %lx> (%d)\n",
obj, id, err);
return NULL;
}
}
/* No <obj, id> found, so attach the newly allocated one */
hash_add_rcu(klp_shadow_hash, &new_shadow->node,
(unsigned long)new_shadow->obj);
spin_unlock_irqrestore(&klp_shadow_lock, flags);
return new_shadow->data;
exists:
if (warn_on_exist) {
WARN(1, "Duplicate shadow variable <%p, %lx>\n", obj, id);
return NULL;
}
return shadow_data;
}
/**
* klp_shadow_alloc() - allocate and add a new shadow variable
* @obj: pointer to parent object
* @id: data identifier
* @size: size of attached data
* @gfp_flags: GFP mask for allocation
* @ctor: custom constructor to initialize the shadow data (optional)
* @ctor_data: pointer to any data needed by @ctor (optional)
*
* Allocates @size bytes for new shadow variable data using @gfp_flags.
* The data are zeroed by default. They are further initialized by @ctor
* function if it is not NULL. The new shadow variable is then added
* to the global hashtable.
*
* If an existing <obj, id> shadow variable can be found, this routine will
* issue a WARN, exit early and return NULL.
*
* This function guarantees that the constructor function is called only when
* the variable did not exist before. The cost is that @ctor is called
* in atomic context under a spin lock.
*
* Return: the shadow variable data element, NULL on duplicate or
* failure.
*/
void *klp_shadow_alloc(void *obj, unsigned long id,
size_t size, gfp_t gfp_flags,
klp_shadow_ctor_t ctor, void *ctor_data)
{
return __klp_shadow_get_or_alloc(obj, id, size, gfp_flags,
ctor, ctor_data, true);
}
EXPORT_SYMBOL_GPL(klp_shadow_alloc);
/**
* klp_shadow_get_or_alloc() - get existing or allocate a new shadow variable
* @obj: pointer to parent object
* @id: data identifier
* @size: size of attached data
* @gfp_flags: GFP mask for allocation
* @ctor: custom constructor to initialize the shadow data (optional)
* @ctor_data: pointer to any data needed by @ctor (optional)
*
* Returns a pointer to existing shadow data if an <obj, id> shadow
* variable is already present. Otherwise, it creates a new shadow
* variable like klp_shadow_alloc().
*
* This function guarantees that only one shadow variable exists with the given
* @id for the given @obj. It also guarantees that the constructor function
* will be called only when the variable did not exist before. The cost is
* that @ctor is called in atomic context under a spin lock.
*
* Return: the shadow variable data element, NULL on failure.
*/
void *klp_shadow_get_or_alloc(void *obj, unsigned long id,
size_t size, gfp_t gfp_flags,
klp_shadow_ctor_t ctor, void *ctor_data)
{
return __klp_shadow_get_or_alloc(obj, id, size, gfp_flags,
ctor, ctor_data, false);
}
EXPORT_SYMBOL_GPL(klp_shadow_get_or_alloc);
static void klp_shadow_free_struct(struct klp_shadow *shadow,
klp_shadow_dtor_t dtor)
{
hash_del_rcu(&shadow->node);
if (dtor)
dtor(shadow->obj, shadow->data);
kfree_rcu(shadow, rcu_head);
}
/**
* klp_shadow_free() - detach and free a <obj, id> shadow variable
* @obj: pointer to parent object
* @id: data identifier
* @dtor: custom callback that can be used to unregister the variable
* and/or free data that the shadow variable points to (optional)
*
* This function releases the memory for this <obj, id> shadow variable
* instance, callers should stop referencing it accordingly.
*/
void klp_shadow_free(void *obj, unsigned long id, klp_shadow_dtor_t dtor)
{
struct klp_shadow *shadow;
unsigned long flags;
spin_lock_irqsave(&klp_shadow_lock, flags);
/* Delete <obj, id> from hash */
hash_for_each_possible(klp_shadow_hash, shadow, node,
(unsigned long)obj) {
if (klp_shadow_match(shadow, obj, id)) {
klp_shadow_free_struct(shadow, dtor);
break;
}
}
spin_unlock_irqrestore(&klp_shadow_lock, flags);
}
EXPORT_SYMBOL_GPL(klp_shadow_free);
/**
* klp_shadow_free_all() - detach and free all <*, id> shadow variables
* @id: data identifier
* @dtor: custom callback that can be used to unregister the variable
* and/or free data that the shadow variable points to (optional)
*
* This function releases the memory for all <*, id> shadow variable
* instances, callers should stop referencing them accordingly.
*/
void klp_shadow_free_all(unsigned long id, klp_shadow_dtor_t dtor)
{
struct klp_shadow *shadow;
unsigned long flags;
int i;
spin_lock_irqsave(&klp_shadow_lock, flags);
/* Delete all <*, id> from hash */
hash_for_each(klp_shadow_hash, i, shadow, node) {
if (klp_shadow_match(shadow, shadow->obj, id))
klp_shadow_free_struct(shadow, dtor);
}
spin_unlock_irqrestore(&klp_shadow_lock, flags);
}
EXPORT_SYMBOL_GPL(klp_shadow_free_all);