forked from zephyrproject-rtos/zephyr
-
Notifications
You must be signed in to change notification settings - Fork 0
/
work_q.c
150 lines (121 loc) · 3.24 KB
/
work_q.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
/*
* Copyright (c) 2016 Intel Corporation
* Copyright (c) 2016 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
*
* Workqueue support functions
*/
#include <kernel_structs.h>
#include <wait_q.h>
#include <errno.h>
static void work_q_main(void *work_q_ptr, void *p2, void *p3)
{
struct k_work_q *work_q = work_q_ptr;
ARG_UNUSED(p2);
ARG_UNUSED(p3);
while (1) {
struct k_work *work;
k_work_handler_t handler;
work = k_fifo_get(&work_q->fifo, K_FOREVER);
handler = work->handler;
/* Reset pending state so it can be resubmitted by handler */
if (atomic_test_and_clear_bit(work->flags,
K_WORK_STATE_PENDING)) {
handler(work);
}
/* Make sure we don't hog up the CPU if the FIFO never (or
* very rarely) gets empty.
*/
k_yield();
}
}
void k_work_q_start(struct k_work_q *work_q, char *stack,
size_t stack_size, int prio)
{
k_fifo_init(&work_q->fifo);
k_thread_spawn(stack, stack_size,
work_q_main, work_q, 0, 0,
prio, 0, 0);
}
#ifdef CONFIG_SYS_CLOCK_EXISTS
static void work_timeout(struct _timeout *t)
{
struct k_delayed_work *w = CONTAINER_OF(t, struct k_delayed_work,
timeout);
/* submit work to workqueue */
k_work_submit_to_queue(w->work_q, &w->work);
/* detach from workqueue, for cancel to return appropriate status */
w->work_q = NULL;
}
void k_delayed_work_init(struct k_delayed_work *work, k_work_handler_t handler)
{
k_work_init(&work->work, handler);
_init_timeout(&work->timeout, work_timeout);
work->work_q = NULL;
}
int k_delayed_work_submit_to_queue(struct k_work_q *work_q,
struct k_delayed_work *work,
int32_t delay)
{
int key = irq_lock();
int err;
/* Work cannot be active in multiple queues */
if (work->work_q && work->work_q != work_q) {
err = -EADDRINUSE;
goto done;
}
/* Cancel if work has been submitted */
if (work->work_q == work_q) {
err = k_delayed_work_cancel(work);
if (err < 0) {
goto done;
}
}
/* Attach workqueue so the timeout callback can submit it */
work->work_q = work_q;
if (!delay) {
/* Submit work if no ticks is 0 */
k_work_submit_to_queue(work_q, &work->work);
} else {
/* Add timeout */
_add_timeout(NULL, &work->timeout, NULL,
_TICK_ALIGN + _ms_to_ticks(delay));
}
err = 0;
done:
irq_unlock(key);
return err;
}
int k_delayed_work_cancel(struct k_delayed_work *work)
{
int key = irq_lock();
if (k_work_pending(&work->work)) {
irq_unlock(key);
return -EINPROGRESS;
}
if (!work->work_q) {
irq_unlock(key);
return -EINVAL;
}
/* Abort timeout, if it has expired this will do nothing */
_abort_timeout(&work->timeout);
/* Detach from workqueue */
work->work_q = NULL;
irq_unlock(key);
return 0;
}
#endif /* CONFIG_SYS_CLOCK_EXISTS */