forked from zephyrproject-rtos/zephyr
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtimer.c
228 lines (176 loc) · 4.77 KB
/
timer.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
/*
* Copyright (c) 1997-2016 Wind River Systems, Inc.
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <kernel.h>
#include <debug/object_tracing_common.h>
#include <init.h>
#include <wait_q.h>
extern struct k_timer _k_timer_list_start[];
extern struct k_timer _k_timer_list_end[];
#ifdef CONFIG_OBJECT_TRACING
struct k_timer *_trace_list_k_timer;
/*
* Complete initialization of statically defined timers.
*/
static int init_timer_module(struct device *dev)
{
ARG_UNUSED(dev);
struct k_timer *timer;
for (timer = _k_timer_list_start; timer < _k_timer_list_end; timer++) {
SYS_TRACING_OBJ_INIT(k_timer, timer);
}
return 0;
}
SYS_INIT(init_timer_module, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
#endif /* CONFIG_OBJECT_TRACING */
/**
* @brief Handle expiration of a kernel timer object.
*
* @param t Timeout used by the timer.
*
* @return N/A
*/
void _timer_expiration_handler(struct _timeout *t)
{
struct k_timer *timer = CONTAINER_OF(t, struct k_timer, timeout);
struct k_thread *thread;
unsigned int key;
/*
* if the timer is periodic, start it again; don't add _TICK_ALIGN
* since we're already aligned to a tick boundary
*/
if (timer->period > 0) {
key = irq_lock();
_add_timeout(NULL, &timer->timeout, &timer->wait_q,
timer->period);
irq_unlock(key);
}
/* update timer's status */
timer->status += 1;
/* invoke timer expiry function */
if (timer->expiry_fn) {
timer->expiry_fn(timer);
}
thread = (struct k_thread *)sys_dlist_peek_head(&timer->wait_q);
if (!thread) {
return;
}
/*
* Interrupts _DO NOT_ have to be locked in this specific instance of
* calling _unpend_thread() because a) this is the only place a thread
* can be taken off this pend queue, and b) the only place a thread
* can be put on the pend queue is at thread level, which of course
* cannot interrupt the current context.
*/
_unpend_thread(thread);
key = irq_lock();
_ready_thread(thread);
irq_unlock(key);
_set_thread_return_value(thread, 0);
}
void k_timer_init(struct k_timer *timer,
void (*expiry_fn)(struct k_timer *),
void (*stop_fn)(struct k_timer *))
{
timer->expiry_fn = expiry_fn;
timer->stop_fn = stop_fn;
timer->status = 0;
sys_dlist_init(&timer->wait_q);
_init_timeout(&timer->timeout, _timer_expiration_handler);
SYS_TRACING_OBJ_INIT(k_timer, timer);
timer->user_data = NULL;
}
void k_timer_start(struct k_timer *timer, s32_t duration, s32_t period)
{
__ASSERT(duration >= 0 && period >= 0 &&
(duration != 0 || period != 0), "invalid parameters\n");
volatile s32_t period_in_ticks, duration_in_ticks;
period_in_ticks = _ms_to_ticks(period);
duration_in_ticks = _ms_to_ticks(duration);
unsigned int key = irq_lock();
if (timer->timeout.delta_ticks_from_prev != _INACTIVE) {
_abort_timeout(&timer->timeout);
}
timer->period = period_in_ticks;
timer->status = 0;
_add_timeout(NULL, &timer->timeout, &timer->wait_q, duration_in_ticks);
irq_unlock(key);
}
void k_timer_stop(struct k_timer *timer)
{
int key = irq_lock();
int inactive = (_abort_timeout(&timer->timeout) == _INACTIVE);
irq_unlock(key);
if (inactive) {
return;
}
if (timer->stop_fn) {
timer->stop_fn(timer);
}
key = irq_lock();
struct k_thread *pending_thread = _unpend_first_thread(&timer->wait_q);
if (pending_thread) {
_ready_thread(pending_thread);
}
if (_is_in_isr()) {
irq_unlock(key);
} else {
_reschedule_threads(key);
}
}
u32_t k_timer_status_get(struct k_timer *timer)
{
unsigned int key = irq_lock();
u32_t result = timer->status;
timer->status = 0;
irq_unlock(key);
return result;
}
u32_t k_timer_status_sync(struct k_timer *timer)
{
__ASSERT(!_is_in_isr(), "");
unsigned int key = irq_lock();
u32_t result = timer->status;
if (result == 0) {
if (timer->timeout.delta_ticks_from_prev != _INACTIVE) {
/* wait for timer to expire or stop */
_pend_current_thread(&timer->wait_q, K_FOREVER);
_Swap(key);
/* get updated timer status */
key = irq_lock();
result = timer->status;
} else {
/* timer is already stopped */
}
} else {
/* timer has already expired at least once */
}
timer->status = 0;
irq_unlock(key);
return result;
}
s32_t _timeout_remaining_get(struct _timeout *timeout)
{
unsigned int key = irq_lock();
s32_t remaining_ticks;
if (timeout->delta_ticks_from_prev == _INACTIVE) {
remaining_ticks = 0;
} else {
/*
* compute remaining ticks by walking the timeout list
* and summing up the various tick deltas involved
*/
struct _timeout *t =
(struct _timeout *)sys_dlist_peek_head(&_timeout_q);
remaining_ticks = t->delta_ticks_from_prev;
while (t != timeout) {
t = (struct _timeout *)sys_dlist_peek_next(&_timeout_q,
&t->node);
remaining_ticks += t->delta_ticks_from_prev;
}
}
irq_unlock(key);
return __ticks_to_ms(remaining_ticks);
}