forked from zephyrproject-rtos/zephyr
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathkernel.h
3793 lines (3380 loc) · 104 KB
/
kernel.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/*
* Copyright (c) 2016, Wind River Systems, Inc.
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
*
* @brief Public kernel APIs.
*/
#ifndef _kernel__h_
#define _kernel__h_
#if !defined(_ASMLANGUAGE)
#include <stddef.h>
#include <zephyr/types.h>
#include <limits.h>
#include <toolchain.h>
#include <sections.h>
#include <atomic.h>
#include <errno.h>
#include <misc/__assert.h>
#include <misc/dlist.h>
#include <misc/slist.h>
#include <misc/util.h>
#include <kernel_version.h>
#include <drivers/rand32.h>
#include <kernel_arch_thread.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Kernel APIs
* @defgroup kernel_apis Kernel APIs
* @{
* @}
*/
#ifdef CONFIG_KERNEL_DEBUG
#include <misc/printk.h>
#define K_DEBUG(fmt, ...) printk("[%s] " fmt, __func__, ##__VA_ARGS__)
#else
#define K_DEBUG(fmt, ...)
#endif
#if defined(CONFIG_COOP_ENABLED) && defined(CONFIG_PREEMPT_ENABLED)
#define _NUM_COOP_PRIO (CONFIG_NUM_COOP_PRIORITIES)
#define _NUM_PREEMPT_PRIO (CONFIG_NUM_PREEMPT_PRIORITIES + 1)
#elif defined(CONFIG_COOP_ENABLED)
#define _NUM_COOP_PRIO (CONFIG_NUM_COOP_PRIORITIES + 1)
#define _NUM_PREEMPT_PRIO (0)
#elif defined(CONFIG_PREEMPT_ENABLED)
#define _NUM_COOP_PRIO (0)
#define _NUM_PREEMPT_PRIO (CONFIG_NUM_PREEMPT_PRIORITIES + 1)
#else
#error "invalid configuration"
#endif
#define K_PRIO_COOP(x) (-(_NUM_COOP_PRIO - (x)))
#define K_PRIO_PREEMPT(x) (x)
#define K_ANY NULL
#define K_END NULL
#if defined(CONFIG_COOP_ENABLED) && defined(CONFIG_PREEMPT_ENABLED)
#define K_HIGHEST_THREAD_PRIO (-CONFIG_NUM_COOP_PRIORITIES)
#elif defined(CONFIG_COOP_ENABLED)
#define K_HIGHEST_THREAD_PRIO (-CONFIG_NUM_COOP_PRIORITIES - 1)
#elif defined(CONFIG_PREEMPT_ENABLED)
#define K_HIGHEST_THREAD_PRIO 0
#else
#error "invalid configuration"
#endif
#ifdef CONFIG_PREEMPT_ENABLED
#define K_LOWEST_THREAD_PRIO CONFIG_NUM_PREEMPT_PRIORITIES
#else
#define K_LOWEST_THREAD_PRIO -1
#endif
#define K_IDLE_PRIO K_LOWEST_THREAD_PRIO
#define K_HIGHEST_APPLICATION_THREAD_PRIO (K_HIGHEST_THREAD_PRIO)
#define K_LOWEST_APPLICATION_THREAD_PRIO (K_LOWEST_THREAD_PRIO - 1)
typedef sys_dlist_t _wait_q_t;
#ifdef CONFIG_OBJECT_TRACING
#define _OBJECT_TRACING_NEXT_PTR(type) struct type *__next
#define _OBJECT_TRACING_INIT .__next = NULL,
#else
#define _OBJECT_TRACING_INIT
#define _OBJECT_TRACING_NEXT_PTR(type)
#endif
#ifdef CONFIG_POLL
#define _POLL_EVENT_OBJ_INIT \
.poll_event = NULL,
#define _POLL_EVENT struct k_poll_event *poll_event
#else
#define _POLL_EVENT_OBJ_INIT
#define _POLL_EVENT
#endif
struct k_thread;
struct k_mutex;
struct k_sem;
struct k_alert;
struct k_msgq;
struct k_mbox;
struct k_pipe;
struct k_queue;
struct k_fifo;
struct k_lifo;
struct k_stack;
struct k_mem_slab;
struct k_mem_pool;
struct k_timer;
struct k_poll_event;
struct k_poll_signal;
/* timeouts */
struct _timeout;
typedef void (*_timeout_func_t)(struct _timeout *t);
struct _timeout {
sys_dnode_t node;
struct k_thread *thread;
sys_dlist_t *wait_q;
s32_t delta_ticks_from_prev;
_timeout_func_t func;
};
extern s32_t _timeout_remaining_get(struct _timeout *timeout);
/* Threads */
typedef void (*_thread_entry_t)(void *, void *, void *);
#ifdef CONFIG_THREAD_MONITOR
struct __thread_entry {
_thread_entry_t pEntry;
void *parameter1;
void *parameter2;
void *parameter3;
};
#endif
/* can be used for creating 'dummy' threads, e.g. for pending on objects */
struct _thread_base {
/* this thread's entry in a ready/wait queue */
sys_dnode_t k_q_node;
/* user facing 'thread options'; values defined in include/kernel.h */
u8_t user_options;
/* thread state */
u8_t thread_state;
/*
* scheduler lock count and thread priority
*
* These two fields control the preemptibility of a thread.
*
* When the scheduler is locked, sched_locked is decremented, which
* means that the scheduler is locked for values from 0xff to 0x01. A
* thread is coop if its prio is negative, thus 0x80 to 0xff when
* looked at the value as unsigned.
*
* By putting them end-to-end, this means that a thread is
* non-preemptible if the bundled value is greater than or equal to
* 0x0080.
*/
union {
struct {
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
u8_t sched_locked;
s8_t prio;
#else /* LITTLE and PDP */
s8_t prio;
u8_t sched_locked;
#endif
};
u16_t preempt;
};
/* data returned by APIs */
void *swap_data;
#ifdef CONFIG_SYS_CLOCK_EXISTS
/* this thread's entry in a timeout queue */
struct _timeout timeout;
#endif
};
typedef struct _thread_base _thread_base_t;
#if defined(CONFIG_THREAD_STACK_INFO)
/* Contains the stack information of a thread */
struct _thread_stack_info {
/* Stack Start */
u32_t start;
/* Stack Size */
u32_t size;
};
typedef struct _thread_stack_info _thread_stack_info_t;
#endif /* CONFIG_THREAD_STACK_INFO */
struct k_thread {
struct _thread_base base;
/* defined by the architecture, but all archs need these */
struct _caller_saved caller_saved;
struct _callee_saved callee_saved;
/* static thread init data */
void *init_data;
/* abort function */
void (*fn_abort)(void);
#if defined(CONFIG_THREAD_MONITOR)
/* thread entry and parameters description */
struct __thread_entry *entry;
/* next item in list of all threads */
struct k_thread *next_thread;
#endif
#ifdef CONFIG_THREAD_CUSTOM_DATA
/* crude thread-local storage */
void *custom_data;
#endif
#ifdef CONFIG_ERRNO
/* per-thread errno variable */
int errno_var;
#endif
#if defined(CONFIG_THREAD_STACK_INFO)
/* Stack Info */
struct _thread_stack_info stack_info;
#endif /* CONFIG_THREAD_STACK_INFO */
/* arch-specifics: must always be at the end */
struct _thread_arch arch;
};
typedef struct k_thread _thread_t;
typedef struct k_thread *k_tid_t;
#define tcs k_thread
enum execution_context_types {
K_ISR = 0,
K_COOP_THREAD,
K_PREEMPT_THREAD,
};
/**
* @defgroup profiling_apis Profiling APIs
* @ingroup kernel_apis
* @{
*/
/**
* @brief Analyze the main, idle, interrupt and system workqueue call stacks
*
* This routine calls @ref stack_analyze on the 4 call stacks declared and
* maintained by the kernel. The sizes of those 4 call stacks are defined by:
*
* CONFIG_MAIN_STACK_SIZE
* CONFIG_IDLE_STACK_SIZE
* CONFIG_ISR_STACK_SIZE
* CONFIG_SYSTEM_WORKQUEUE_STACK_SIZE
*
* @note CONFIG_INIT_STACKS and CONFIG_PRINTK must be set for this function to
* produce output.
*
* @return N/A
*/
extern void k_call_stacks_analyze(void);
/**
* @} end defgroup profiling_apis
*/
/**
* @defgroup thread_apis Thread APIs
* @ingroup kernel_apis
* @{
*/
/**
* @typedef k_thread_entry_t
* @brief Thread entry point function type.
*
* A thread's entry point function is invoked when the thread starts executing.
* Up to 3 argument values can be passed to the function.
*
* The thread terminates execution permanently if the entry point function
* returns. The thread is responsible for releasing any shared resources
* it may own (such as mutexes and dynamically allocated memory), prior to
* returning.
*
* @param p1 First argument.
* @param p2 Second argument.
* @param p3 Third argument.
*
* @return N/A
*/
typedef void (*k_thread_entry_t)(void *p1, void *p2, void *p3);
#endif /* !_ASMLANGUAGE */
/*
* Thread user options. May be needed by assembly code. Common part uses low
* bits, arch-specific use high bits.
*/
/* system thread that must not abort */
#define K_ESSENTIAL (1 << 0)
#if defined(CONFIG_FP_SHARING)
/* thread uses floating point registers */
#define K_FP_REGS (1 << 1)
#endif
#ifdef CONFIG_X86
/* x86 Bitmask definitions for threads user options */
#if defined(CONFIG_FP_SHARING) && defined(CONFIG_SSE)
/* thread uses SSEx (and also FP) registers */
#define K_SSE_REGS (1 << 7)
#endif
#endif
/* end - thread options */
#if !defined(_ASMLANGUAGE)
/**
* @brief Spawn a thread.
*
* This routine initializes a thread, then schedules it for execution.
*
* The new thread may be scheduled for immediate execution or a delayed start.
* If the newly spawned thread does not have a delayed start the kernel
* scheduler may preempt the current thread to allow the new thread to
* execute.
*
* Kernel data structures for bookkeeping and context storage for this thread
* will be placed at the beginning of the thread's stack memory region and may
* become corrupted if too much of the stack is used. This function has been
* deprecated in favor of k_thread_create() to give the user more control on
* where these data structures reside.
*
* Thread options are architecture-specific, and can include K_ESSENTIAL,
* K_FP_REGS, and K_SSE_REGS. Multiple options may be specified by separating
* them using "|" (the logical OR operator).
*
* @param stack Pointer to the stack space.
* @param stack_size Stack size in bytes.
* @param entry Thread entry function.
* @param p1 1st entry point parameter.
* @param p2 2nd entry point parameter.
* @param p3 3rd entry point parameter.
* @param prio Thread priority.
* @param options Thread options.
* @param delay Scheduling delay (in milliseconds), or K_NO_WAIT (for no delay).
*
* @return ID of new thread.
*/
extern __deprecated k_tid_t k_thread_spawn(char *stack, size_t stack_size,
k_thread_entry_t entry,
void *p1, void *p2, void *p3,
int prio, u32_t options, s32_t delay);
/**
* @brief Create a thread.
*
* This routine initializes a thread, then schedules it for execution.
*
* The new thread may be scheduled for immediate execution or a delayed start.
* If the newly spawned thread does not have a delayed start the kernel
* scheduler may preempt the current thread to allow the new thread to
* execute.
*
* Thread options are architecture-specific, and can include K_ESSENTIAL,
* K_FP_REGS, and K_SSE_REGS. Multiple options may be specified by separating
* them using "|" (the logical OR operator).
*
* Historically, users often would use the beginning of the stack memory region
* to store the struct k_thread data, although corruption will occur if the
* stack overflows this region and stack protection features may not detect this
* situation.
*
* @param new_thread Pointer to uninitialized struct k_thread
* @param stack Pointer to the stack space.
* @param stack_size Stack size in bytes.
* @param entry Thread entry function.
* @param p1 1st entry point parameter.
* @param p2 2nd entry point parameter.
* @param p3 3rd entry point parameter.
* @param prio Thread priority.
* @param options Thread options.
* @param delay Scheduling delay (in milliseconds), or K_NO_WAIT (for no delay).
*
* @return ID of new thread.
*/
extern k_tid_t k_thread_create(struct k_thread *new_thread, char *stack,
size_t stack_size,
void (*entry)(void *, void *, void*),
void *p1, void *p2, void *p3,
int prio, u32_t options, s32_t delay);
/**
* @brief Put the current thread to sleep.
*
* This routine puts the current thread to sleep for @a duration
* milliseconds.
*
* @param duration Number of milliseconds to sleep.
*
* @return N/A
*/
extern void k_sleep(s32_t duration);
/**
* @brief Cause the current thread to busy wait.
*
* This routine causes the current thread to execute a "do nothing" loop for
* @a usec_to_wait microseconds.
*
* @return N/A
*/
extern void k_busy_wait(u32_t usec_to_wait);
/**
* @brief Yield the current thread.
*
* This routine causes the current thread to yield execution to another
* thread of the same or higher priority. If there are no other ready threads
* of the same or higher priority, the routine returns immediately.
*
* @return N/A
*/
extern void k_yield(void);
/**
* @brief Wake up a sleeping thread.
*
* This routine prematurely wakes up @a thread from sleeping.
*
* If @a thread is not currently sleeping, the routine has no effect.
*
* @param thread ID of thread to wake.
*
* @return N/A
*/
extern void k_wakeup(k_tid_t thread);
/**
* @brief Get thread ID of the current thread.
*
* @return ID of current thread.
*/
extern k_tid_t k_current_get(void);
/**
* @brief Cancel thread performing a delayed start.
*
* This routine prevents @a thread from executing if it has not yet started
* execution. The thread must be re-spawned before it will execute.
*
* @param thread ID of thread to cancel.
*
* @retval 0 Thread spawning canceled.
* @retval -EINVAL Thread has already started executing.
*/
extern int k_thread_cancel(k_tid_t thread);
/**
* @brief Abort a thread.
*
* This routine permanently stops execution of @a thread. The thread is taken
* off all kernel queues it is part of (i.e. the ready queue, the timeout
* queue, or a kernel object wait queue). However, any kernel resources the
* thread might currently own (such as mutexes or memory blocks) are not
* released. It is the responsibility of the caller of this routine to ensure
* all necessary cleanup is performed.
*
* @param thread ID of thread to abort.
*
* @return N/A
*/
extern void k_thread_abort(k_tid_t thread);
/**
* @cond INTERNAL_HIDDEN
*/
/* timeout has timed out and is not on _timeout_q anymore */
#define _EXPIRED (-2)
/* timeout is not in use */
#define _INACTIVE (-1)
struct _static_thread_data {
struct k_thread *init_thread;
char *init_stack;
unsigned int init_stack_size;
void (*init_entry)(void *, void *, void *);
void *init_p1;
void *init_p2;
void *init_p3;
int init_prio;
u32_t init_options;
s32_t init_delay;
void (*init_abort)(void);
u32_t init_groups;
};
#define _THREAD_INITIALIZER(thread, stack, stack_size, \
entry, p1, p2, p3, \
prio, options, delay, abort, groups) \
{ \
.init_thread = (thread), \
.init_stack = (stack), \
.init_stack_size = (stack_size), \
.init_entry = (void (*)(void *, void *, void *))entry, \
.init_p1 = (void *)p1, \
.init_p2 = (void *)p2, \
.init_p3 = (void *)p3, \
.init_prio = (prio), \
.init_options = (options), \
.init_delay = (delay), \
.init_abort = (abort), \
.init_groups = (groups), \
}
/**
* INTERNAL_HIDDEN @endcond
*/
/**
* @brief Statically define and initialize a thread.
*
* The thread may be scheduled for immediate execution or a delayed start.
*
* Thread options are architecture-specific, and can include K_ESSENTIAL,
* K_FP_REGS, and K_SSE_REGS. Multiple options may be specified by separating
* them using "|" (the logical OR operator).
*
* The ID of the thread can be accessed using:
*
* @code extern const k_tid_t <name>; @endcode
*
* @param name Name of the thread.
* @param stack_size Stack size in bytes.
* @param entry Thread entry function.
* @param p1 1st entry point parameter.
* @param p2 2nd entry point parameter.
* @param p3 3rd entry point parameter.
* @param prio Thread priority.
* @param options Thread options.
* @param delay Scheduling delay (in milliseconds), or K_NO_WAIT (for no delay).
*
* @internal It has been observed that the x86 compiler by default aligns
* these _static_thread_data structures to 32-byte boundaries, thereby
* wasting space. To work around this, force a 4-byte alignment.
*/
#define K_THREAD_DEFINE(name, stack_size, \
entry, p1, p2, p3, \
prio, options, delay) \
char __noinit __stack _k_thread_stack_##name[stack_size]; \
struct k_thread _k_thread_obj_##name; \
struct _static_thread_data _k_thread_data_##name __aligned(4) \
__in_section(_static_thread_data, static, name) = \
_THREAD_INITIALIZER(&_k_thread_obj_##name, \
_k_thread_stack_##name, stack_size, \
entry, p1, p2, p3, prio, options, delay, \
NULL, 0); \
const k_tid_t name = (k_tid_t)&_k_thread_obj_##name
/**
* @brief Get a thread's priority.
*
* This routine gets the priority of @a thread.
*
* @param thread ID of thread whose priority is needed.
*
* @return Priority of @a thread.
*/
extern int k_thread_priority_get(k_tid_t thread);
/**
* @brief Set a thread's priority.
*
* This routine immediately changes the priority of @a thread.
*
* Rescheduling can occur immediately depending on the priority @a thread is
* set to:
*
* - If its priority is raised above the priority of the caller of this
* function, and the caller is preemptible, @a thread will be scheduled in.
*
* - If the caller operates on itself, it lowers its priority below that of
* other threads in the system, and the caller is preemptible, the thread of
* highest priority will be scheduled in.
*
* Priority can be assigned in the range of -CONFIG_NUM_COOP_PRIORITIES to
* CONFIG_NUM_PREEMPT_PRIORITIES-1, where -CONFIG_NUM_COOP_PRIORITIES is the
* highest priority.
*
* @param thread ID of thread whose priority is to be set.
* @param prio New priority.
*
* @warning Changing the priority of a thread currently involved in mutex
* priority inheritance may result in undefined behavior.
*
* @return N/A
*/
extern void k_thread_priority_set(k_tid_t thread, int prio);
/**
* @brief Suspend a thread.
*
* This routine prevents the kernel scheduler from making @a thread the
* current thread. All other internal operations on @a thread are still
* performed; for example, any timeout it is waiting on keeps ticking,
* kernel objects it is waiting on are still handed to it, etc.
*
* If @a thread is already suspended, the routine has no effect.
*
* @param thread ID of thread to suspend.
*
* @return N/A
*/
extern void k_thread_suspend(k_tid_t thread);
/**
* @brief Resume a suspended thread.
*
* This routine allows the kernel scheduler to make @a thread the current
* thread, when it is next eligible for that role.
*
* If @a thread is not currently suspended, the routine has no effect.
*
* @param thread ID of thread to resume.
*
* @return N/A
*/
extern void k_thread_resume(k_tid_t thread);
/**
* @brief Set time-slicing period and scope.
*
* This routine specifies how the scheduler will perform time slicing of
* preemptible threads.
*
* To enable time slicing, @a slice must be non-zero. The scheduler
* ensures that no thread runs for more than the specified time limit
* before other threads of that priority are given a chance to execute.
* Any thread whose priority is higher than @a prio is exempted, and may
* execute as long as desired without being preempted due to time slicing.
*
* Time slicing only limits the maximum amount of time a thread may continuously
* execute. Once the scheduler selects a thread for execution, there is no
* minimum guaranteed time the thread will execute before threads of greater or
* equal priority are scheduled.
*
* When the current thread is the only one of that priority eligible
* for execution, this routine has no effect; the thread is immediately
* rescheduled after the slice period expires.
*
* To disable timeslicing, set both @a slice and @a prio to zero.
*
* @param slice Maximum time slice length (in milliseconds).
* @param prio Highest thread priority level eligible for time slicing.
*
* @return N/A
*/
extern void k_sched_time_slice_set(s32_t slice, int prio);
/**
* @} end defgroup thread_apis
*/
/**
* @addtogroup isr_apis
* @{
*/
/**
* @brief Determine if code is running at interrupt level.
*
* This routine allows the caller to customize its actions, depending on
* whether it is a thread or an ISR.
*
* @note Can be called by ISRs.
*
* @return 0 if invoked by a thread.
* @return Non-zero if invoked by an ISR.
*/
extern int k_is_in_isr(void);
/**
* @brief Determine if code is running in a preemptible thread.
*
* This routine allows the caller to customize its actions, depending on
* whether it can be preempted by another thread. The routine returns a 'true'
* value if all of the following conditions are met:
*
* - The code is running in a thread, not at ISR.
* - The thread's priority is in the preemptible range.
* - The thread has not locked the scheduler.
*
* @note Can be called by ISRs.
*
* @return 0 if invoked by an ISR or by a cooperative thread.
* @return Non-zero if invoked by a preemptible thread.
*/
extern int k_is_preempt_thread(void);
/**
* @} end addtogroup isr_apis
*/
/**
* @addtogroup thread_apis
* @{
*/
/**
* @brief Lock the scheduler.
*
* This routine prevents the current thread from being preempted by another
* thread by instructing the scheduler to treat it as a cooperative thread.
* If the thread subsequently performs an operation that makes it unready,
* it will be context switched out in the normal manner. When the thread
* again becomes the current thread, its non-preemptible status is maintained.
*
* This routine can be called recursively.
*
* @note k_sched_lock() and k_sched_unlock() should normally be used
* when the operation being performed can be safely interrupted by ISRs.
* However, if the amount of processing involved is very small, better
* performance may be obtained by using irq_lock() and irq_unlock().
*
* @return N/A
*/
extern void k_sched_lock(void);
/**
* @brief Unlock the scheduler.
*
* This routine reverses the effect of a previous call to k_sched_lock().
* A thread must call the routine once for each time it called k_sched_lock()
* before the thread becomes preemptible.
*
* @return N/A
*/
extern void k_sched_unlock(void);
/**
* @brief Set current thread's custom data.
*
* This routine sets the custom data for the current thread to @ value.
*
* Custom data is not used by the kernel itself, and is freely available
* for a thread to use as it sees fit. It can be used as a framework
* upon which to build thread-local storage.
*
* @param value New custom data value.
*
* @return N/A
*/
extern void k_thread_custom_data_set(void *value);
/**
* @brief Get current thread's custom data.
*
* This routine returns the custom data for the current thread.
*
* @return Current custom data value.
*/
extern void *k_thread_custom_data_get(void);
/**
* @} end addtogroup thread_apis
*/
#include <sys_clock.h>
/**
* @addtogroup clock_apis
* @{
*/
/**
* @brief Generate null timeout delay.
*
* This macro generates a timeout delay that that instructs a kernel API
* not to wait if the requested operation cannot be performed immediately.
*
* @return Timeout delay value.
*/
#define K_NO_WAIT 0
/**
* @brief Generate timeout delay from milliseconds.
*
* This macro generates a timeout delay that that instructs a kernel API
* to wait up to @a ms milliseconds to perform the requested operation.
*
* @param ms Duration in milliseconds.
*
* @return Timeout delay value.
*/
#define K_MSEC(ms) (ms)
/**
* @brief Generate timeout delay from seconds.
*
* This macro generates a timeout delay that that instructs a kernel API
* to wait up to @a s seconds to perform the requested operation.
*
* @param s Duration in seconds.
*
* @return Timeout delay value.
*/
#define K_SECONDS(s) K_MSEC((s) * MSEC_PER_SEC)
/**
* @brief Generate timeout delay from minutes.
*
* This macro generates a timeout delay that that instructs a kernel API
* to wait up to @a m minutes to perform the requested operation.
*
* @param m Duration in minutes.
*
* @return Timeout delay value.
*/
#define K_MINUTES(m) K_SECONDS((m) * 60)
/**
* @brief Generate timeout delay from hours.
*
* This macro generates a timeout delay that that instructs a kernel API
* to wait up to @a h hours to perform the requested operation.
*
* @param h Duration in hours.
*
* @return Timeout delay value.
*/
#define K_HOURS(h) K_MINUTES((h) * 60)
/**
* @brief Generate infinite timeout delay.
*
* This macro generates a timeout delay that that instructs a kernel API
* to wait as long as necessary to perform the requested operation.
*
* @return Timeout delay value.
*/
#define K_FOREVER (-1)
/**
* @} end addtogroup clock_apis
*/
/**
* @cond INTERNAL_HIDDEN
*/
/* kernel clocks */
#if (sys_clock_ticks_per_sec == 1000) || \
(sys_clock_ticks_per_sec == 500) || \
(sys_clock_ticks_per_sec == 250) || \
(sys_clock_ticks_per_sec == 125) || \
(sys_clock_ticks_per_sec == 100) || \
(sys_clock_ticks_per_sec == 50) || \
(sys_clock_ticks_per_sec == 25) || \
(sys_clock_ticks_per_sec == 20) || \
(sys_clock_ticks_per_sec == 10) || \
(sys_clock_ticks_per_sec == 1)
#define _ms_per_tick (MSEC_PER_SEC / sys_clock_ticks_per_sec)
#else
/* yields horrible 64-bit math on many architectures: try to avoid */
#define _NON_OPTIMIZED_TICKS_PER_SEC
#endif
#ifdef _NON_OPTIMIZED_TICKS_PER_SEC
extern s32_t _ms_to_ticks(s32_t ms);
#else
static ALWAYS_INLINE s32_t _ms_to_ticks(s32_t ms)
{
return (s32_t)ceiling_fraction((u32_t)ms, _ms_per_tick);
}
#endif
/* added tick needed to account for tick in progress */
#ifdef CONFIG_TICKLESS_KERNEL
#define _TICK_ALIGN 0
#else
#define _TICK_ALIGN 1
#endif
static inline s64_t __ticks_to_ms(s64_t ticks)
{
#ifdef CONFIG_SYS_CLOCK_EXISTS
#ifdef _NON_OPTIMIZED_TICKS_PER_SEC
return (MSEC_PER_SEC * (u64_t)ticks) / sys_clock_ticks_per_sec;
#else
return (u64_t)ticks * _ms_per_tick;
#endif
#else
__ASSERT(ticks == 0, "");
return 0;
#endif
}
struct k_timer {
/*
* _timeout structure must be first here if we want to use
* dynamic timer allocation. timeout.node is used in the double-linked
* list of free timers
*/
struct _timeout timeout;
/* wait queue for the (single) thread waiting on this timer */
_wait_q_t wait_q;
/* runs in ISR context */
void (*expiry_fn)(struct k_timer *);
/* runs in the context of the thread that calls k_timer_stop() */
void (*stop_fn)(struct k_timer *);
/* timer period */
s32_t period;
/* timer status */
u32_t status;
/* user-specific data, also used to support legacy features */
void *user_data;
_OBJECT_TRACING_NEXT_PTR(k_timer);
};
#define K_TIMER_INITIALIZER(obj, expiry, stop) \
{ \
.timeout.delta_ticks_from_prev = _INACTIVE, \
.timeout.wait_q = NULL, \
.timeout.thread = NULL, \
.timeout.func = _timer_expiration_handler, \
.wait_q = SYS_DLIST_STATIC_INIT(&obj.wait_q), \
.expiry_fn = expiry, \
.stop_fn = stop, \
.status = 0, \
.user_data = 0, \
_OBJECT_TRACING_INIT \
}
/**
* INTERNAL_HIDDEN @endcond
*/
/**
* @defgroup timer_apis Timer APIs
* @ingroup kernel_apis
* @{
*/
/**
* @typedef k_timer_expiry_t
* @brief Timer expiry function type.
*
* A timer's expiry function is executed by the system clock interrupt handler
* each time the timer expires. The expiry function is optional, and is only
* invoked if the timer has been initialized with one.
*
* @param timer Address of timer.
*
* @return N/A
*/