forked from JuliaLang/julia
-
Notifications
You must be signed in to change notification settings - Fork 0
/
gc-stacks.c
235 lines (210 loc) · 6.26 KB
/
gc-stacks.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
// This file is a part of Julia. License is MIT: https://julialang.org/license
#include "gc.h"
#ifndef _OS_WINDOWS_
# include <sys/resource.h>
#endif
#ifdef _P64
# ifdef _OS_WINDOWS_
# define MAX_STACK_MAPPINGS 500
# else
# define MAX_STACK_MAPPINGS 30000
# endif
#else
# ifdef _OS_WINDOWS_
# define MAX_STACK_MAPPINGS 250
# else
# define MAX_STACK_MAPPINGS 500
# endif
#endif
// number of stacks to always keep available per pool
#define MIN_STACK_MAPPINGS_PER_POOL 5
const size_t jl_guard_size = (4096 * 8);
static volatile uint32_t num_stack_mappings = 0;
#ifdef _OS_WINDOWS_
#define MAP_FAILED NULL
static void *malloc_stack(size_t bufsz)
{
void *stk = VirtualAlloc(NULL, bufsz, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
if (stk == NULL)
return MAP_FAILED;
DWORD dwOldProtect;
if (!VirtualProtect(stk, jl_guard_size, PAGE_READWRITE | PAGE_GUARD, &dwOldProtect)) {
VirtualFree(stk, 0, MEM_RELEASE);
return MAP_FAILED;
}
jl_atomic_fetch_add(&num_stack_mappings, 1);
return stk;
}
static void free_stack(void *stkbuf, size_t bufsz)
{
VirtualFree(stkbuf, 0, MEM_RELEASE);
jl_atomic_fetch_add(&num_stack_mappings, -1);
}
#else
static void *malloc_stack(size_t bufsz)
{
void* stk = mmap(0, bufsz, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (stk == MAP_FAILED)
return MAP_FAILED;
#if !defined(JL_HAVE_UCONTEXT) && !defined(JL_HAVE_SIGALTSTACK)
// setup a guard page to detect stack overflow
if (mprotect(stk, jl_guard_size, PROT_NONE) == -1) {
munmap(stk, bufsz);
return MAP_FAILED;
}
#endif
jl_atomic_fetch_add(&num_stack_mappings, 1);
return stk;
}
static void free_stack(void *stkbuf, size_t bufsz)
{
munmap(stkbuf, bufsz);
jl_atomic_fetch_add(&num_stack_mappings, -1);
}
#endif
const unsigned pool_sizes[] = {
128 * 1024,
192 * 1024,
256 * 1024,
384 * 1024,
512 * 1024,
768 * 1024,
1024 * 1024,
1537 * 1024,
2048 * 1024,
3 * 1024 * 1024,
4 * 1024 * 1024,
6 * 1024 * 1024,
8 * 1024 * 1024,
12 * 1024 * 1024,
16 * 1024 * 1024,
24 * 1024 * 1024,
};
static_assert(sizeof(pool_sizes) == JL_N_STACK_POOLS * sizeof(pool_sizes[0]), "JL_N_STACK_POOLS size mismatch");
static unsigned select_pool(size_t nb)
{
unsigned pool_id = 0;
while (pool_sizes[pool_id] < nb)
pool_id++;
return pool_id;
}
static void _jl_free_stack(jl_ptls_t ptls, void *stkbuf, size_t bufsz)
{
if (bufsz <= pool_sizes[JL_N_STACK_POOLS - 1]) {
unsigned pool_id = select_pool(bufsz);
if (pool_sizes[pool_id] == bufsz) {
arraylist_push(&ptls->heap.free_stacks[pool_id], stkbuf);
return;
}
}
free_stack(stkbuf, bufsz);
}
JL_DLLEXPORT void jl_free_stack(void *stkbuf, size_t bufsz)
{
_jl_free_stack(jl_get_ptls_states(), stkbuf, bufsz);
}
void jl_release_task_stack(jl_ptls_t ptls, jl_task_t *task)
{
void *stkbuf = task->stkbuf;
size_t bufsz = task->bufsz;
if (bufsz <= pool_sizes[JL_N_STACK_POOLS - 1]) {
unsigned pool_id = select_pool(bufsz);
if (pool_sizes[pool_id] == bufsz) {
task->stkbuf = NULL;
arraylist_push(&ptls->heap.free_stacks[pool_id], stkbuf);
}
}
}
JL_DLLEXPORT void *jl_malloc_stack(size_t *bufsz, jl_task_t *owner)
{
jl_ptls_t ptls = jl_get_ptls_states();
size_t ssize = *bufsz;
void *stk = NULL;
if (ssize <= pool_sizes[JL_N_STACK_POOLS - 1]) {
unsigned pool_id = select_pool(ssize);
ssize = pool_sizes[pool_id];
arraylist_t *pool = &ptls->heap.free_stacks[pool_id];
if (pool->len > 0) {
stk = arraylist_pop(pool);
}
}
else {
ssize = LLT_ALIGN(ssize, jl_page_size);
}
if (stk == NULL) {
if (num_stack_mappings >= MAX_STACK_MAPPINGS)
return NULL;
// TODO: allocate blocks of stacks? but need to mprotect individually anyways
stk = malloc_stack(ssize);
if (stk == MAP_FAILED)
return NULL;
}
*bufsz = ssize;
if (owner) {
arraylist_t *live_tasks = &ptls->heap.live_tasks;
arraylist_push(live_tasks, owner);
}
return stk;
}
void sweep_stack_pools(void)
{
// Stack sweeping algorithm:
// // deallocate stacks if we have too many sitting around unused
// for (stk in halfof(free_stacks))
// free_stack(stk, pool_sz);
// // then sweep the task stacks
// for (t in live_tasks)
// if (!gc-marked(t))
// stkbuf = t->stkbuf
// bufsz = t->bufsz
// if (stkbuf)
// push(free_stacks[sz], stkbuf)
for (int i = 0; i < jl_n_threads; i++) {
jl_ptls_t ptls2 = jl_all_tls_states[i];
// free half of stacks that remain unused since last sweep
for (int p = 0; p < JL_N_STACK_POOLS; p++) {
arraylist_t *al = &ptls2->heap.free_stacks[p];
size_t n_to_free;
if (al->len > MIN_STACK_MAPPINGS_PER_POOL) {
n_to_free = al->len / 2;
if (n_to_free > (al->len - MIN_STACK_MAPPINGS_PER_POOL))
n_to_free = al->len - MIN_STACK_MAPPINGS_PER_POOL;
}
else {
n_to_free = 0;
}
for (int n = 0; n < n_to_free; n++) {
void *stk = arraylist_pop(al);
free_stack(stk, pool_sizes[p]);
}
}
arraylist_t *live_tasks = &ptls2->heap.live_tasks;
size_t n = 0;
size_t ndel = 0;
size_t l = live_tasks->len;
void **lst = live_tasks->items;
if (l == 0)
continue;
while (1) {
jl_task_t *t = (jl_task_t*)lst[n];
if (gc_marked(jl_astaggedvalue(t)->bits.gc)) {
n++;
}
else {
ndel++;
void *stkbuf = t->stkbuf;
size_t bufsz = t->bufsz;
if (stkbuf) {
t->stkbuf = NULL;
_jl_free_stack(ptls2, stkbuf, bufsz);
}
}
if (n >= l - ndel)
break;
void *tmp = lst[n];
lst[n] = lst[n + ndel];
lst[n + ndel] = tmp;
}
live_tasks->len -= ndel;
}
}