forked from civetweb/civetweb
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtimer.inl
298 lines (256 loc) · 7.27 KB
/
timer.inl
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
/* This file is part of the CivetWeb web server.
* See https://github.com/civetweb/civetweb/
* (C) 2014-2021 by the CivetWeb authors, MIT license.
*/
#if !defined(MAX_TIMERS)
#define MAX_TIMERS MAX_WORKER_THREADS
#endif
#if !defined(TIMER_RESOLUTION)
/* Timer resolution in ms */
#define TIMER_RESOLUTION (10)
#endif
typedef int (*taction)(void *arg);
typedef void (*tcancelaction)(void *arg);
struct ttimer {
double time;
double period;
taction action;
void *arg;
tcancelaction cancel;
};
struct ttimers {
pthread_t threadid; /* Timer thread ID */
pthread_mutex_t mutex; /* Protects timer lists */
struct ttimer *timers; /* List of timers */
unsigned timer_count; /* Current size of timer list */
unsigned timer_capacity; /* Capacity of timer list */
#if defined(_WIN32)
DWORD last_tick;
uint64_t now_tick64;
#endif
};
TIMER_API double
timer_getcurrenttime(struct mg_context *ctx)
{
#if defined(_WIN32)
uint64_t now_tick64 = 0;
#if defined(_WIN64)
now_tick64 = GetTickCount64();
#else
/* GetTickCount returns milliseconds since system start as
* unsigned 32 bit value. It will wrap around every 49.7 days.
* We need to use a 64 bit counter (will wrap in 500 mio. years),
* by adding the 32 bit difference since the last call to a
* 64 bit counter. This algorithm will only work, if this
* function is called at least once every 7 weeks. */
DWORD now_tick = GetTickCount();
if (ctx->timers) {
pthread_mutex_lock(&ctx->timers->mutex);
ctx->timers->now_tick64 += now_tick - ctx->timers->last_tick;
now_tick64 = ctx->timers->now_tick64;
ctx->timers->last_tick = now_tick;
pthread_mutex_unlock(&ctx->timers->mutex);
}
#endif
return (double)now_tick64 * 1.0E-3;
#else
struct timespec now_ts;
(void)ctx;
clock_gettime(CLOCK_MONOTONIC, &now_ts);
return (double)now_ts.tv_sec + (double)now_ts.tv_nsec * 1.0E-9;
#endif
}
TIMER_API int
timer_add(struct mg_context *ctx,
double next_time,
double period,
int is_relative,
taction action,
void *arg,
tcancelaction cancel)
{
int error = 0;
double now;
if (!ctx->timers) {
return 1;
}
now = timer_getcurrenttime(ctx);
/* HCP24: if is_relative = 0 and next_time < now
* action will be called so fast as possible
* if additional period > 0
* action will be called so fast as possible
* n times until (next_time + (n * period)) > now
* then the period is working
* Solution:
* if next_time < now then we set next_time = now.
* The first callback will be so fast as possible (now)
* but the next callback on period
*/
if (is_relative) {
next_time += now;
}
/* You can not set timers into the past */
if (next_time < now) {
next_time = now;
}
pthread_mutex_lock(&ctx->timers->mutex);
if (ctx->timers->timer_count == MAX_TIMERS) {
error = 1;
} else if (ctx->timers->timer_count == ctx->timers->timer_capacity) {
unsigned capacity = (ctx->timers->timer_capacity * 2) + 1;
struct ttimer *timers =
(struct ttimer *)mg_realloc_ctx(ctx->timers->timers,
capacity * sizeof(struct ttimer),
ctx);
if (timers) {
ctx->timers->timers = timers;
ctx->timers->timer_capacity = capacity;
} else {
error = 1;
}
}
if (!error) {
/* Insert new timer into a sorted list. */
/* The linear list is still most efficient for short lists (small
* number of timers) - if there are many timers, different
* algorithms will work better. */
unsigned u = ctx->timers->timer_count;
for (; (u > 0) && (ctx->timers->timers[u - 1].time > next_time); u--) {
ctx->timers->timers[u] = ctx->timers->timers[u - 1];
}
ctx->timers->timers[u].time = next_time;
ctx->timers->timers[u].period = period;
ctx->timers->timers[u].action = action;
ctx->timers->timers[u].arg = arg;
ctx->timers->timers[u].cancel = cancel;
ctx->timers->timer_count++;
}
pthread_mutex_unlock(&ctx->timers->mutex);
return error;
}
static void
timer_thread_run(void *thread_func_param)
{
struct mg_context *ctx = (struct mg_context *)thread_func_param;
double d;
unsigned u;
int action_res;
struct ttimer t;
mg_set_thread_name("timer");
if (ctx->callbacks.init_thread) {
/* Timer thread */
ctx->callbacks.init_thread(ctx, 2);
}
/* Timer main loop */
d = timer_getcurrenttime(ctx);
while (STOP_FLAG_IS_ZERO(&ctx->stop_flag)) {
pthread_mutex_lock(&ctx->timers->mutex);
if ((ctx->timers->timer_count > 0)
&& (d >= ctx->timers->timers[0].time)) {
/* Timer list is sorted. First action should run now. */
/* Store active timer in "t" */
t = ctx->timers->timers[0];
/* Shift all other timers */
for (u = 1; u < ctx->timers->timer_count; u++) {
ctx->timers->timers[u - 1] = ctx->timers->timers[u];
}
ctx->timers->timer_count--;
pthread_mutex_unlock(&ctx->timers->mutex);
/* Call timer action */
action_res = t.action(t.arg);
/* action_res == 1: reschedule */
/* action_res == 0: do not reschedule, free(arg) */
if ((action_res > 0) && (t.period > 0)) {
/* Should schedule timer again */
timer_add(ctx,
t.time + t.period,
t.period,
0,
t.action,
t.arg,
t.cancel);
} else {
/* Allow user to free timer argument */
if (t.cancel != NULL) {
t.cancel(t.arg);
}
}
continue;
} else {
pthread_mutex_unlock(&ctx->timers->mutex);
}
/* TIMER_RESOLUTION = 10 ms seems reasonable.
* A faster loop (smaller sleep value) increases CPU load,
* a slower loop (higher sleep value) decreases timer accuracy.
*/
mg_sleep(TIMER_RESOLUTION);
d = timer_getcurrenttime(ctx);
}
/* Remove remaining timers */
for (u = 0; u < ctx->timers->timer_count; u++) {
t = ctx->timers->timers[u];
if (t.cancel != NULL) {
t.cancel(t.arg);
}
}
}
#if defined(_WIN32)
static unsigned __stdcall timer_thread(void *thread_func_param)
{
timer_thread_run(thread_func_param);
return 0;
}
#else
static void *
timer_thread(void *thread_func_param)
{
struct sigaction sa;
/* Ignore SIGPIPE */
memset(&sa, 0, sizeof(sa));
sa.sa_handler = SIG_IGN;
sigaction(SIGPIPE, &sa, NULL);
timer_thread_run(thread_func_param);
return NULL;
}
#endif /* _WIN32 */
TIMER_API int
timers_init(struct mg_context *ctx)
{
/* Initialize timers data structure */
ctx->timers =
(struct ttimers *)mg_calloc_ctx(sizeof(struct ttimers), 1, ctx);
if (!ctx->timers) {
return -1;
}
ctx->timers->timers = NULL;
/* Initialize mutex */
if (0 != pthread_mutex_init(&ctx->timers->mutex, NULL)) {
mg_free(ctx->timers);
ctx->timers = NULL;
return -1;
}
/* For some systems timer_getcurrenttime does some initialization
* during the first call. Call it once now, ignore the result. */
(void)timer_getcurrenttime(ctx);
/* Start timer thread */
if (mg_start_thread_with_id(timer_thread, ctx, &ctx->timers->threadid)
!= 0) {
(void)pthread_mutex_destroy(&ctx->timers->mutex);
mg_free(ctx->timers);
ctx->timers = NULL;
return -1;
}
return 0;
}
TIMER_API void
timers_exit(struct mg_context *ctx)
{
if (ctx->timers) {
mg_join_thread(ctx->timers->threadid);
(void)pthread_mutex_destroy(&ctx->timers->mutex);
mg_free(ctx->timers->timers);
mg_free(ctx->timers);
ctx->timers = NULL;
}
}
/* End of timer.inl */