forked from zephyrproject-rtos/zephyr
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathevents.c
367 lines (298 loc) · 9.87 KB
/
events.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
/*
* Copyright (c) 2021 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file event objects library
*
* Event objects are used to signal one or more threads that a custom set of
* events has occurred. Threads wait on event objects until another thread or
* ISR posts the desired set of events to the event object. Each time events
* are posted to an event object, all threads waiting on that event object are
* processed to determine if there is a match. All threads that whose wait
* conditions match the current set of events now belonging to the event object
* are awakened.
*
* Threads waiting on an event object have the option of either waking once
* any or all of the events it desires have been posted to the event object.
*
* @brief Kernel event object
*/
#include <zephyr/kernel.h>
#include <zephyr/kernel_structs.h>
#include <zephyr/toolchain.h>
#include <zephyr/sys/dlist.h>
#include <zephyr/init.h>
#include <zephyr/internal/syscall_handler.h>
#include <zephyr/tracing/tracing.h>
#include <zephyr/sys/check.h>
/* private kernel APIs */
#include <wait_q.h>
#include <ksched.h>
#define K_EVENT_WAIT_ANY 0x00 /* Wait for any events */
#define K_EVENT_WAIT_ALL 0x01 /* Wait for all events */
#define K_EVENT_WAIT_MASK 0x01
#define K_EVENT_WAIT_RESET 0x02 /* Reset events prior to waiting */
struct event_walk_data {
struct k_thread *head;
uint32_t events;
};
#ifdef CONFIG_OBJ_CORE_EVENT
static struct k_obj_type obj_type_event;
#endif /* CONFIG_OBJ_CORE_EVENT */
void z_impl_k_event_init(struct k_event *event)
{
event->events = 0;
event->lock = (struct k_spinlock) {};
SYS_PORT_TRACING_OBJ_INIT(k_event, event);
z_waitq_init(&event->wait_q);
k_object_init(event);
#ifdef CONFIG_OBJ_CORE_EVENT
k_obj_core_init_and_link(K_OBJ_CORE(event), &obj_type_event);
#endif /* CONFIG_OBJ_CORE_EVENT */
}
#ifdef CONFIG_USERSPACE
void z_vrfy_k_event_init(struct k_event *event)
{
K_OOPS(K_SYSCALL_OBJ_NEVER_INIT(event, K_OBJ_EVENT));
z_impl_k_event_init(event);
}
#include <zephyr/syscalls/k_event_init_mrsh.c>
#endif /* CONFIG_USERSPACE */
/**
* @brief determine if desired set of events been satisfied
*
* This routine determines if the current set of events satisfies the desired
* set of events. If @a wait_condition is K_EVENT_WAIT_ALL, then at least
* all the desired events must be present to satisfy the request. If @a
* wait_condition is not K_EVENT_WAIT_ALL, it is assumed to be K_EVENT_WAIT_ANY.
* In the K_EVENT_WAIT_ANY case, the request is satisfied when any of the
* current set of events are present in the desired set of events.
*/
static bool are_wait_conditions_met(uint32_t desired, uint32_t current,
unsigned int wait_condition)
{
uint32_t match = current & desired;
if (wait_condition == K_EVENT_WAIT_ALL) {
return match == desired;
}
/* wait_condition assumed to be K_EVENT_WAIT_ANY */
return match != 0;
}
static int event_walk_op(struct k_thread *thread, void *data)
{
unsigned int wait_condition;
struct event_walk_data *event_data = data;
wait_condition = thread->event_options & K_EVENT_WAIT_MASK;
if (are_wait_conditions_met(thread->events, event_data->events,
wait_condition)) {
/*
* Events create a list of threads to wake up. We do
* not want z_thread_timeout to wake these threads; they
* will be woken up by k_event_post_internal once they
* have been processed.
*/
thread->no_wake_on_timeout = true;
/*
* The wait conditions have been satisfied. Add this
* thread to the list of threads to unpend.
*/
thread->next_event_link = event_data->head;
event_data->head = thread;
z_abort_timeout(&thread->base.timeout);
}
return 0;
}
static uint32_t k_event_post_internal(struct k_event *event, uint32_t events,
uint32_t events_mask)
{
k_spinlock_key_t key;
struct k_thread *thread;
struct event_walk_data data;
uint32_t previous_events;
data.head = NULL;
key = k_spin_lock(&event->lock);
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_event, post, event, events,
events_mask);
previous_events = event->events & events_mask;
events = (event->events & ~events_mask) |
(events & events_mask);
event->events = events;
data.events = events;
/*
* Posting an event has the potential to wake multiple pended threads.
* It is desirable to unpend all affected threads simultaneously. This
* is done in three steps:
*
* 1. Walk the waitq and create a linked list of threads to unpend.
* 2. Unpend each of the threads in the linked list
* 3. Ready each of the threads in the linked list
*/
z_sched_waitq_walk(&event->wait_q, event_walk_op, &data);
if (data.head != NULL) {
thread = data.head;
struct k_thread *next;
do {
arch_thread_return_value_set(thread, 0);
thread->events = events;
next = thread->next_event_link;
z_sched_wake_thread(thread, false);
thread = next;
} while (thread != NULL);
}
z_reschedule(&event->lock, key);
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_event, post, event, events,
events_mask);
return previous_events;
}
uint32_t z_impl_k_event_post(struct k_event *event, uint32_t events)
{
return k_event_post_internal(event, events, events);
}
#ifdef CONFIG_USERSPACE
uint32_t z_vrfy_k_event_post(struct k_event *event, uint32_t events)
{
K_OOPS(K_SYSCALL_OBJ(event, K_OBJ_EVENT));
return z_impl_k_event_post(event, events);
}
#include <zephyr/syscalls/k_event_post_mrsh.c>
#endif /* CONFIG_USERSPACE */
uint32_t z_impl_k_event_set(struct k_event *event, uint32_t events)
{
return k_event_post_internal(event, events, ~0);
}
#ifdef CONFIG_USERSPACE
uint32_t z_vrfy_k_event_set(struct k_event *event, uint32_t events)
{
K_OOPS(K_SYSCALL_OBJ(event, K_OBJ_EVENT));
return z_impl_k_event_set(event, events);
}
#include <zephyr/syscalls/k_event_set_mrsh.c>
#endif /* CONFIG_USERSPACE */
uint32_t z_impl_k_event_set_masked(struct k_event *event, uint32_t events,
uint32_t events_mask)
{
return k_event_post_internal(event, events, events_mask);
}
#ifdef CONFIG_USERSPACE
uint32_t z_vrfy_k_event_set_masked(struct k_event *event, uint32_t events,
uint32_t events_mask)
{
K_OOPS(K_SYSCALL_OBJ(event, K_OBJ_EVENT));
return z_impl_k_event_set_masked(event, events, events_mask);
}
#include <zephyr/syscalls/k_event_set_masked_mrsh.c>
#endif /* CONFIG_USERSPACE */
uint32_t z_impl_k_event_clear(struct k_event *event, uint32_t events)
{
return k_event_post_internal(event, 0, events);
}
#ifdef CONFIG_USERSPACE
uint32_t z_vrfy_k_event_clear(struct k_event *event, uint32_t events)
{
K_OOPS(K_SYSCALL_OBJ(event, K_OBJ_EVENT));
return z_impl_k_event_clear(event, events);
}
#include <zephyr/syscalls/k_event_clear_mrsh.c>
#endif /* CONFIG_USERSPACE */
static uint32_t k_event_wait_internal(struct k_event *event, uint32_t events,
unsigned int options, k_timeout_t timeout)
{
uint32_t rv = 0;
unsigned int wait_condition;
struct k_thread *thread;
__ASSERT(((arch_is_in_isr() == false) ||
K_TIMEOUT_EQ(timeout, K_NO_WAIT)), "");
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_event, wait, event, events,
options, timeout);
if (events == 0) {
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_event, wait, event, events, 0);
return 0;
}
wait_condition = options & K_EVENT_WAIT_MASK;
thread = k_sched_current_thread_query();
k_spinlock_key_t key = k_spin_lock(&event->lock);
if (options & K_EVENT_WAIT_RESET) {
event->events = 0;
}
/* Test if the wait conditions have already been met. */
if (are_wait_conditions_met(events, event->events, wait_condition)) {
rv = event->events;
k_spin_unlock(&event->lock, key);
goto out;
}
/* Match conditions have not been met. */
if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
k_spin_unlock(&event->lock, key);
goto out;
}
/*
* The caller must pend to wait for the match. Save the desired
* set of events in the k_thread structure.
*/
thread->events = events;
thread->event_options = options;
SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_event, wait, event, events,
options, timeout);
if (z_pend_curr(&event->lock, key, &event->wait_q, timeout) == 0) {
/* Retrieve the set of events that woke the thread */
rv = thread->events;
}
out:
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_event, wait, event,
events, rv & events);
return rv & events;
}
/**
* Wait for any of the specified events
*/
uint32_t z_impl_k_event_wait(struct k_event *event, uint32_t events,
bool reset, k_timeout_t timeout)
{
uint32_t options = reset ? K_EVENT_WAIT_RESET : 0;
return k_event_wait_internal(event, events, options, timeout);
}
#ifdef CONFIG_USERSPACE
uint32_t z_vrfy_k_event_wait(struct k_event *event, uint32_t events,
bool reset, k_timeout_t timeout)
{
K_OOPS(K_SYSCALL_OBJ(event, K_OBJ_EVENT));
return z_impl_k_event_wait(event, events, reset, timeout);
}
#include <zephyr/syscalls/k_event_wait_mrsh.c>
#endif /* CONFIG_USERSPACE */
/**
* Wait for all of the specified events
*/
uint32_t z_impl_k_event_wait_all(struct k_event *event, uint32_t events,
bool reset, k_timeout_t timeout)
{
uint32_t options = reset ? (K_EVENT_WAIT_RESET | K_EVENT_WAIT_ALL)
: K_EVENT_WAIT_ALL;
return k_event_wait_internal(event, events, options, timeout);
}
#ifdef CONFIG_USERSPACE
uint32_t z_vrfy_k_event_wait_all(struct k_event *event, uint32_t events,
bool reset, k_timeout_t timeout)
{
K_OOPS(K_SYSCALL_OBJ(event, K_OBJ_EVENT));
return z_impl_k_event_wait_all(event, events, reset, timeout);
}
#include <zephyr/syscalls/k_event_wait_all_mrsh.c>
#endif /* CONFIG_USERSPACE */
#ifdef CONFIG_OBJ_CORE_EVENT
static int init_event_obj_core_list(void)
{
/* Initialize condvar object type */
z_obj_type_init(&obj_type_event, K_OBJ_TYPE_EVENT_ID,
offsetof(struct k_event, obj_core));
/* Initialize and link statically defined condvars */
STRUCT_SECTION_FOREACH(k_event, event) {
k_obj_core_init_and_link(K_OBJ_CORE(event), &obj_type_event);
}
return 0;
}
SYS_INIT(init_event_obj_core_list, PRE_KERNEL_1,
CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
#endif /* CONFIG_OBJ_CORE_EVENT */