forked from zephyrproject-rtos/zephyr
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathkernel.h
5716 lines (5161 loc) · 167 KB
/
kernel.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/*
* Copyright (c) 2016, Wind River Systems, Inc.
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
*
* @brief Public kernel APIs.
*/
#ifndef ZEPHYR_INCLUDE_KERNEL_H_
#define ZEPHYR_INCLUDE_KERNEL_H_
#if !defined(_ASMLANGUAGE)
#include <kernel_includes.h>
#include <errno.h>
#include <limits.h>
#include <stdbool.h>
#include <toolchain.h>
#include <tracing/tracing_macros.h>
#ifdef CONFIG_THREAD_RUNTIME_STATS_USE_TIMING_FUNCTIONS
#include <timing/timing.h>
#endif
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Kernel APIs
* @defgroup kernel_apis Kernel APIs
* @{
* @}
*/
#define K_ANY NULL
#define K_END NULL
#if CONFIG_NUM_COOP_PRIORITIES + CONFIG_NUM_PREEMPT_PRIORITIES == 0
#error Zero available thread priorities defined!
#endif
#define K_PRIO_COOP(x) (-(CONFIG_NUM_COOP_PRIORITIES - (x)))
#define K_PRIO_PREEMPT(x) (x)
#define K_HIGHEST_THREAD_PRIO (-CONFIG_NUM_COOP_PRIORITIES)
#define K_LOWEST_THREAD_PRIO CONFIG_NUM_PREEMPT_PRIORITIES
#define K_IDLE_PRIO K_LOWEST_THREAD_PRIO
#define K_HIGHEST_APPLICATION_THREAD_PRIO (K_HIGHEST_THREAD_PRIO)
#define K_LOWEST_APPLICATION_THREAD_PRIO (K_LOWEST_THREAD_PRIO - 1)
#ifdef CONFIG_POLL
#define _POLL_EVENT_OBJ_INIT(obj) \
.poll_events = SYS_DLIST_STATIC_INIT(&obj.poll_events),
#define _POLL_EVENT sys_dlist_t poll_events
#else
#define _POLL_EVENT_OBJ_INIT(obj)
#define _POLL_EVENT
#endif
struct k_thread;
struct k_mutex;
struct k_sem;
struct k_msgq;
struct k_mbox;
struct k_pipe;
struct k_queue;
struct k_fifo;
struct k_lifo;
struct k_stack;
struct k_mem_slab;
struct k_mem_pool;
struct k_timer;
struct k_poll_event;
struct k_poll_signal;
struct k_mem_domain;
struct k_mem_partition;
struct k_futex;
enum execution_context_types {
K_ISR = 0,
K_COOP_THREAD,
K_PREEMPT_THREAD,
};
/* private, used by k_poll and k_work_poll */
struct k_work_poll;
typedef int (*_poller_cb_t)(struct k_poll_event *event, uint32_t state);
/**
* @addtogroup thread_apis
* @{
*/
typedef void (*k_thread_user_cb_t)(const struct k_thread *thread,
void *user_data);
/**
* @brief Iterate over all the threads in the system.
*
* This routine iterates over all the threads in the system and
* calls the user_cb function for each thread.
*
* @param user_cb Pointer to the user callback function.
* @param user_data Pointer to user data.
*
* @note @kconfig{CONFIG_THREAD_MONITOR} must be set for this function
* to be effective.
* @note This API uses @ref k_spin_lock to protect the _kernel.threads
* list which means creation of new threads and terminations of existing
* threads are blocked until this API returns.
*
* @return N/A
*/
extern void k_thread_foreach(k_thread_user_cb_t user_cb, void *user_data);
/**
* @brief Iterate over all the threads in the system without locking.
*
* This routine works exactly the same like @ref k_thread_foreach
* but unlocks interrupts when user_cb is executed.
*
* @param user_cb Pointer to the user callback function.
* @param user_data Pointer to user data.
*
* @note @kconfig{CONFIG_THREAD_MONITOR} must be set for this function
* to be effective.
* @note This API uses @ref k_spin_lock only when accessing the _kernel.threads
* queue elements. It unlocks it during user callback function processing.
* If a new task is created when this @c foreach function is in progress,
* the added new task would not be included in the enumeration.
* If a task is aborted during this enumeration, there would be a race here
* and there is a possibility that this aborted task would be included in the
* enumeration.
* @note If the task is aborted and the memory occupied by its @c k_thread
* structure is reused when this @c k_thread_foreach_unlocked is in progress
* it might even lead to the system behave unstable.
* This function may never return, as it would follow some @c next task
* pointers treating given pointer as a pointer to the k_thread structure
* while it is something different right now.
* Do not reuse the memory that was occupied by k_thread structure of aborted
* task if it was aborted after this function was called in any context.
*/
extern void k_thread_foreach_unlocked(
k_thread_user_cb_t user_cb, void *user_data);
/** @} */
/**
* @defgroup thread_apis Thread APIs
* @ingroup kernel_apis
* @{
*/
#endif /* !_ASMLANGUAGE */
/*
* Thread user options. May be needed by assembly code. Common part uses low
* bits, arch-specific use high bits.
*/
/**
* @brief system thread that must not abort
* */
#define K_ESSENTIAL (BIT(0))
#if defined(CONFIG_FPU_SHARING)
/**
* @brief FPU registers are managed by context switch
*
* @details
* This option indicates that the thread uses the CPU's floating point
* registers. This instructs the kernel to take additional steps to save
* and restore the contents of these registers when scheduling the thread.
* No effect if @kconfig{CONFIG_FPU_SHARING} is not enabled.
*/
#define K_FP_REGS (BIT(1))
#endif
/**
* @brief user mode thread
*
* This thread has dropped from supervisor mode to user mode and consequently
* has additional restrictions
*/
#define K_USER (BIT(2))
/**
* @brief Inherit Permissions
*
* @details
* Indicates that the thread being created should inherit all kernel object
* permissions from the thread that created it. No effect if
* @kconfig{CONFIG_USERSPACE} is not enabled.
*/
#define K_INHERIT_PERMS (BIT(3))
/**
* @brief Callback item state
*
* @details
* This is a single bit of state reserved for "callback manager"
* utilities (p4wq initially) who need to track operations invoked
* from within a user-provided callback they have been invoked.
* Effectively it serves as a tiny bit of zero-overhead TLS data.
*/
#define K_CALLBACK_STATE (BIT(4))
#ifdef CONFIG_X86
/* x86 Bitmask definitions for threads user options */
#if defined(CONFIG_FPU_SHARING) && defined(CONFIG_X86_SSE)
/* thread uses SSEx (and also FP) registers */
#define K_SSE_REGS (BIT(7))
#endif
#endif
/* end - thread options */
#if !defined(_ASMLANGUAGE)
/**
* @brief Create a thread.
*
* This routine initializes a thread, then schedules it for execution.
*
* The new thread may be scheduled for immediate execution or a delayed start.
* If the newly spawned thread does not have a delayed start the kernel
* scheduler may preempt the current thread to allow the new thread to
* execute.
*
* Thread options are architecture-specific, and can include K_ESSENTIAL,
* K_FP_REGS, and K_SSE_REGS. Multiple options may be specified by separating
* them using "|" (the logical OR operator).
*
* Stack objects passed to this function must be originally defined with
* either of these macros in order to be portable:
*
* - K_THREAD_STACK_DEFINE() - For stacks that may support either user or
* supervisor threads.
* - K_KERNEL_STACK_DEFINE() - For stacks that may support supervisor
* threads only. These stacks use less memory if CONFIG_USERSPACE is
* enabled.
*
* The stack_size parameter has constraints. It must either be:
*
* - The original size value passed to K_THREAD_STACK_DEFINE() or
* K_KERNEL_STACK_DEFINE()
* - The return value of K_THREAD_STACK_SIZEOF(stack) if the stack was
* defined with K_THREAD_STACK_DEFINE()
* - The return value of K_KERNEL_STACK_SIZEOF(stack) if the stack was
* defined with K_KERNEL_STACK_DEFINE().
*
* Using other values, or sizeof(stack) may produce undefined behavior.
*
* @param new_thread Pointer to uninitialized struct k_thread
* @param stack Pointer to the stack space.
* @param stack_size Stack size in bytes.
* @param entry Thread entry function.
* @param p1 1st entry point parameter.
* @param p2 2nd entry point parameter.
* @param p3 3rd entry point parameter.
* @param prio Thread priority.
* @param options Thread options.
* @param delay Scheduling delay, or K_NO_WAIT (for no delay).
*
* @return ID of new thread.
*
*/
__syscall k_tid_t k_thread_create(struct k_thread *new_thread,
k_thread_stack_t *stack,
size_t stack_size,
k_thread_entry_t entry,
void *p1, void *p2, void *p3,
int prio, uint32_t options, k_timeout_t delay);
/**
* @brief Drop a thread's privileges permanently to user mode
*
* This allows a supervisor thread to be re-used as a user thread.
* This function does not return, but control will transfer to the provided
* entry point as if this was a new user thread.
*
* The implementation ensures that the stack buffer contents are erased.
* Any thread-local storage will be reverted to a pristine state.
*
* Memory domain membership, resource pool assignment, kernel object
* permissions, priority, and thread options are preserved.
*
* A common use of this function is to re-use the main thread as a user thread
* once all supervisor mode-only tasks have been completed.
*
* @param entry Function to start executing from
* @param p1 1st entry point parameter
* @param p2 2nd entry point parameter
* @param p3 3rd entry point parameter
*/
extern FUNC_NORETURN void k_thread_user_mode_enter(k_thread_entry_t entry,
void *p1, void *p2,
void *p3);
/**
* @brief Grant a thread access to a set of kernel objects
*
* This is a convenience function. For the provided thread, grant access to
* the remaining arguments, which must be pointers to kernel objects.
*
* The thread object must be initialized (i.e. running). The objects don't
* need to be.
* Note that NULL shouldn't be passed as an argument.
*
* @param thread Thread to grant access to objects
* @param ... list of kernel object pointers
*/
#define k_thread_access_grant(thread, ...) \
FOR_EACH_FIXED_ARG(k_object_access_grant, (;), thread, __VA_ARGS__)
/**
* @brief Assign a resource memory pool to a thread
*
* By default, threads have no resource pool assigned unless their parent
* thread has a resource pool, in which case it is inherited. Multiple
* threads may be assigned to the same memory pool.
*
* Changing a thread's resource pool will not migrate allocations from the
* previous pool.
*
* @param thread Target thread to assign a memory pool for resource requests.
* @param heap Heap object to use for resources,
* or NULL if the thread should no longer have a memory pool.
*/
static inline void k_thread_heap_assign(struct k_thread *thread,
struct k_heap *heap)
{
thread->resource_pool = heap;
}
#if defined(CONFIG_INIT_STACKS) && defined(CONFIG_THREAD_STACK_INFO)
/**
* @brief Obtain stack usage information for the specified thread
*
* User threads will need to have permission on the target thread object.
*
* Some hardware may prevent inspection of a stack buffer currently in use.
* If this API is called from supervisor mode, on the currently running thread,
* on a platform which selects @kconfig{CONFIG_NO_UNUSED_STACK_INSPECTION}, an
* error will be generated.
*
* @param thread Thread to inspect stack information
* @param unused_ptr Output parameter, filled in with the unused stack space
* of the target thread in bytes.
* @return 0 on success
* @return -EBADF Bad thread object (user mode only)
* @return -EPERM No permissions on thread object (user mode only)
* #return -ENOTSUP Forbidden by hardware policy
* @return -EINVAL Thread is uninitialized or exited (user mode only)
* @return -EFAULT Bad memory address for unused_ptr (user mode only)
*/
__syscall int k_thread_stack_space_get(const struct k_thread *thread,
size_t *unused_ptr);
#endif
#if (CONFIG_HEAP_MEM_POOL_SIZE > 0)
/**
* @brief Assign the system heap as a thread's resource pool
*
* Similar to z_thread_heap_assign(), but the thread will use
* the kernel heap to draw memory.
*
* Use with caution, as a malicious thread could perform DoS attacks on the
* kernel heap.
*
* @param thread Target thread to assign the system heap for resource requests
*
*/
void k_thread_system_pool_assign(struct k_thread *thread);
#endif /* (CONFIG_HEAP_MEM_POOL_SIZE > 0) */
/**
* @brief Sleep until a thread exits
*
* The caller will be put to sleep until the target thread exits, either due
* to being aborted, self-exiting, or taking a fatal error. This API returns
* immediately if the thread isn't running.
*
* This API may only be called from ISRs with a K_NO_WAIT timeout,
* where it can be useful as a predicate to detect when a thread has
* aborted.
*
* @param thread Thread to wait to exit
* @param timeout upper bound time to wait for the thread to exit.
* @retval 0 success, target thread has exited or wasn't running
* @retval -EBUSY returned without waiting
* @retval -EAGAIN waiting period timed out
* @retval -EDEADLK target thread is joining on the caller, or target thread
* is the caller
*/
__syscall int k_thread_join(struct k_thread *thread, k_timeout_t timeout);
/**
* @brief Put the current thread to sleep.
*
* This routine puts the current thread to sleep for @a duration,
* specified as a k_timeout_t object.
*
* @note if @a timeout is set to K_FOREVER then the thread is suspended.
*
* @param timeout Desired duration of sleep.
*
* @return Zero if the requested time has elapsed or the number of milliseconds
* left to sleep, if thread was woken up by \ref k_wakeup call.
*/
__syscall int32_t k_sleep(k_timeout_t timeout);
/**
* @brief Put the current thread to sleep.
*
* This routine puts the current thread to sleep for @a duration milliseconds.
*
* @param ms Number of milliseconds to sleep.
*
* @return Zero if the requested time has elapsed or the number of milliseconds
* left to sleep, if thread was woken up by \ref k_wakeup call.
*/
static inline int32_t k_msleep(int32_t ms)
{
return k_sleep(Z_TIMEOUT_MS(ms));
}
/**
* @brief Put the current thread to sleep with microsecond resolution.
*
* This function is unlikely to work as expected without kernel tuning.
* In particular, because the lower bound on the duration of a sleep is
* the duration of a tick, @kconfig{CONFIG_SYS_CLOCK_TICKS_PER_SEC} must be
* adjusted to achieve the resolution desired. The implications of doing
* this must be understood before attempting to use k_usleep(). Use with
* caution.
*
* @param us Number of microseconds to sleep.
*
* @return Zero if the requested time has elapsed or the number of microseconds
* left to sleep, if thread was woken up by \ref k_wakeup call.
*/
__syscall int32_t k_usleep(int32_t us);
/**
* @brief Cause the current thread to busy wait.
*
* This routine causes the current thread to execute a "do nothing" loop for
* @a usec_to_wait microseconds.
*
* @note The clock used for the microsecond-resolution delay here may
* be skewed relative to the clock used for system timeouts like
* k_sleep(). For example k_busy_wait(1000) may take slightly more or
* less time than k_sleep(K_MSEC(1)), with the offset dependent on
* clock tolerances.
*
* @return N/A
*/
__syscall void k_busy_wait(uint32_t usec_to_wait);
/**
* @brief Yield the current thread.
*
* This routine causes the current thread to yield execution to another
* thread of the same or higher priority. If there are no other ready threads
* of the same or higher priority, the routine returns immediately.
*
* @return N/A
*/
__syscall void k_yield(void);
/**
* @brief Wake up a sleeping thread.
*
* This routine prematurely wakes up @a thread from sleeping.
*
* If @a thread is not currently sleeping, the routine has no effect.
*
* @param thread ID of thread to wake.
*
* @return N/A
*/
__syscall void k_wakeup(k_tid_t thread);
/**
* @brief Get thread ID of the current thread.
*
* This unconditionally queries the kernel via a system call.
*
* @return ID of current thread.
*/
__attribute_const__
__syscall k_tid_t z_current_get(void);
#ifdef CONFIG_THREAD_LOCAL_STORAGE
/* Thread-local cache of current thread ID, set in z_thread_entry() */
extern __thread k_tid_t z_tls_current;
#endif
/**
* @brief Get thread ID of the current thread.
*
* @return ID of current thread.
*
*/
__attribute_const__
static inline k_tid_t k_current_get(void)
{
#ifdef CONFIG_THREAD_LOCAL_STORAGE
return z_tls_current;
#else
return z_current_get();
#endif
}
/**
* @brief Abort a thread.
*
* This routine permanently stops execution of @a thread. The thread is taken
* off all kernel queues it is part of (i.e. the ready queue, the timeout
* queue, or a kernel object wait queue). However, any kernel resources the
* thread might currently own (such as mutexes or memory blocks) are not
* released. It is the responsibility of the caller of this routine to ensure
* all necessary cleanup is performed.
*
* After k_thread_abort() returns, the thread is guaranteed not to be
* running or to become runnable anywhere on the system. Normally
* this is done via blocking the caller (in the same manner as
* k_thread_join()), but in interrupt context on SMP systems the
* implementation is required to spin for threads that are running on
* other CPUs. Note that as specified, this means that on SMP
* platforms it is possible for application code to create a deadlock
* condition by simultaneously aborting a cycle of threads using at
* least one termination from interrupt context. Zephyr cannot detect
* all such conditions.
*
* @param thread ID of thread to abort.
*
* @return N/A
*/
__syscall void k_thread_abort(k_tid_t thread);
/**
* @brief Start an inactive thread
*
* If a thread was created with K_FOREVER in the delay parameter, it will
* not be added to the scheduling queue until this function is called
* on it.
*
* @param thread thread to start
*/
__syscall void k_thread_start(k_tid_t thread);
extern k_ticks_t z_timeout_expires(const struct _timeout *timeout);
extern k_ticks_t z_timeout_remaining(const struct _timeout *timeout);
#ifdef CONFIG_SYS_CLOCK_EXISTS
/**
* @brief Get time when a thread wakes up, in system ticks
*
* This routine computes the system uptime when a waiting thread next
* executes, in units of system ticks. If the thread is not waiting,
* it returns current system time.
*/
__syscall k_ticks_t k_thread_timeout_expires_ticks(const struct k_thread *t);
static inline k_ticks_t z_impl_k_thread_timeout_expires_ticks(
const struct k_thread *t)
{
return z_timeout_expires(&t->base.timeout);
}
/**
* @brief Get time remaining before a thread wakes up, in system ticks
*
* This routine computes the time remaining before a waiting thread
* next executes, in units of system ticks. If the thread is not
* waiting, it returns zero.
*/
__syscall k_ticks_t k_thread_timeout_remaining_ticks(const struct k_thread *t);
static inline k_ticks_t z_impl_k_thread_timeout_remaining_ticks(
const struct k_thread *t)
{
return z_timeout_remaining(&t->base.timeout);
}
#endif /* CONFIG_SYS_CLOCK_EXISTS */
/**
* @cond INTERNAL_HIDDEN
*/
/* timeout has timed out and is not on _timeout_q anymore */
#define _EXPIRED (-2)
struct _static_thread_data {
struct k_thread *init_thread;
k_thread_stack_t *init_stack;
unsigned int init_stack_size;
k_thread_entry_t init_entry;
void *init_p1;
void *init_p2;
void *init_p3;
int init_prio;
uint32_t init_options;
int32_t init_delay;
void (*init_abort)(void);
const char *init_name;
};
#define Z_THREAD_INITIALIZER(thread, stack, stack_size, \
entry, p1, p2, p3, \
prio, options, delay, abort, tname) \
{ \
.init_thread = (thread), \
.init_stack = (stack), \
.init_stack_size = (stack_size), \
.init_entry = (k_thread_entry_t)entry, \
.init_p1 = (void *)p1, \
.init_p2 = (void *)p2, \
.init_p3 = (void *)p3, \
.init_prio = (prio), \
.init_options = (options), \
.init_delay = (delay), \
.init_abort = (abort), \
.init_name = STRINGIFY(tname), \
}
/**
* INTERNAL_HIDDEN @endcond
*/
/**
* @brief Statically define and initialize a thread.
*
* The thread may be scheduled for immediate execution or a delayed start.
*
* Thread options are architecture-specific, and can include K_ESSENTIAL,
* K_FP_REGS, and K_SSE_REGS. Multiple options may be specified by separating
* them using "|" (the logical OR operator).
*
* The ID of the thread can be accessed using:
*
* @code extern const k_tid_t <name>; @endcode
*
* @param name Name of the thread.
* @param stack_size Stack size in bytes.
* @param entry Thread entry function.
* @param p1 1st entry point parameter.
* @param p2 2nd entry point parameter.
* @param p3 3rd entry point parameter.
* @param prio Thread priority.
* @param options Thread options.
* @param delay Scheduling delay (in milliseconds), zero for no delay.
*
*
* @internal It has been observed that the x86 compiler by default aligns
* these _static_thread_data structures to 32-byte boundaries, thereby
* wasting space. To work around this, force a 4-byte alignment.
*
*/
#define K_THREAD_DEFINE(name, stack_size, \
entry, p1, p2, p3, \
prio, options, delay) \
K_THREAD_STACK_DEFINE(_k_thread_stack_##name, stack_size); \
struct k_thread _k_thread_obj_##name; \
STRUCT_SECTION_ITERABLE(_static_thread_data, _k_thread_data_##name) = \
Z_THREAD_INITIALIZER(&_k_thread_obj_##name, \
_k_thread_stack_##name, stack_size, \
entry, p1, p2, p3, prio, options, delay, \
NULL, name); \
const k_tid_t name = (k_tid_t)&_k_thread_obj_##name
/**
* @brief Get a thread's priority.
*
* This routine gets the priority of @a thread.
*
* @param thread ID of thread whose priority is needed.
*
* @return Priority of @a thread.
*/
__syscall int k_thread_priority_get(k_tid_t thread);
/**
* @brief Set a thread's priority.
*
* This routine immediately changes the priority of @a thread.
*
* Rescheduling can occur immediately depending on the priority @a thread is
* set to:
*
* - If its priority is raised above the priority of the caller of this
* function, and the caller is preemptible, @a thread will be scheduled in.
*
* - If the caller operates on itself, it lowers its priority below that of
* other threads in the system, and the caller is preemptible, the thread of
* highest priority will be scheduled in.
*
* Priority can be assigned in the range of -CONFIG_NUM_COOP_PRIORITIES to
* CONFIG_NUM_PREEMPT_PRIORITIES-1, where -CONFIG_NUM_COOP_PRIORITIES is the
* highest priority.
*
* @param thread ID of thread whose priority is to be set.
* @param prio New priority.
*
* @warning Changing the priority of a thread currently involved in mutex
* priority inheritance may result in undefined behavior.
*
* @return N/A
*/
__syscall void k_thread_priority_set(k_tid_t thread, int prio);
#ifdef CONFIG_SCHED_DEADLINE
/**
* @brief Set deadline expiration time for scheduler
*
* This sets the "deadline" expiration as a time delta from the
* current time, in the same units used by k_cycle_get_32(). The
* scheduler (when deadline scheduling is enabled) will choose the
* next expiring thread when selecting between threads at the same
* static priority. Threads at different priorities will be scheduled
* according to their static priority.
*
* @note Deadlines are stored internally using 32 bit unsigned
* integers. The number of cycles between the "first" deadline in the
* scheduler queue and the "last" deadline must be less than 2^31 (i.e
* a signed non-negative quantity). Failure to adhere to this rule
* may result in scheduled threads running in an incorrect dealine
* order.
*
* @note Despite the API naming, the scheduler makes no guarantees the
* the thread WILL be scheduled within that deadline, nor does it take
* extra metadata (like e.g. the "runtime" and "period" parameters in
* Linux sched_setattr()) that allows the kernel to validate the
* scheduling for achievability. Such features could be implemented
* above this call, which is simply input to the priority selection
* logic.
*
* @note You should enable @kconfig{CONFIG_SCHED_DEADLINE} in your project
* configuration.
*
* @param thread A thread on which to set the deadline
* @param deadline A time delta, in cycle units
*
*/
__syscall void k_thread_deadline_set(k_tid_t thread, int deadline);
#endif
#ifdef CONFIG_SCHED_CPU_MASK
/**
* @brief Sets all CPU enable masks to zero
*
* After this returns, the thread will no longer be schedulable on any
* CPUs. The thread must not be currently runnable.
*
* @note You should enable @kconfig{CONFIG_SCHED_DEADLINE} in your project
* configuration.
*
* @param thread Thread to operate upon
* @return Zero on success, otherwise error code
*/
int k_thread_cpu_mask_clear(k_tid_t thread);
/**
* @brief Sets all CPU enable masks to one
*
* After this returns, the thread will be schedulable on any CPU. The
* thread must not be currently runnable.
*
* @note You should enable @kconfig{CONFIG_SCHED_DEADLINE} in your project
* configuration.
*
* @param thread Thread to operate upon
* @return Zero on success, otherwise error code
*/
int k_thread_cpu_mask_enable_all(k_tid_t thread);
/**
* @brief Enable thread to run on specified CPU
*
* The thread must not be currently runnable.
*
* @note You should enable @kconfig{CONFIG_SCHED_DEADLINE} in your project
* configuration.
*
* @param thread Thread to operate upon
* @param cpu CPU index
* @return Zero on success, otherwise error code
*/
int k_thread_cpu_mask_enable(k_tid_t thread, int cpu);
/**
* @brief Prevent thread to run on specified CPU
*
* The thread must not be currently runnable.
*
* @note You should enable @kconfig{CONFIG_SCHED_DEADLINE} in your project
* configuration.
*
* @param thread Thread to operate upon
* @param cpu CPU index
* @return Zero on success, otherwise error code
*/
int k_thread_cpu_mask_disable(k_tid_t thread, int cpu);
#endif
/**
* @brief Suspend a thread.
*
* This routine prevents the kernel scheduler from making @a thread
* the current thread. All other internal operations on @a thread are
* still performed; for example, kernel objects it is waiting on are
* still handed to it. Note that any existing timeouts
* (e.g. k_sleep(), or a timeout argument to k_sem_take() et. al.)
* will be canceled. On resume, the thread will begin running
* immediately and return from the blocked call.
*
* If @a thread is already suspended, the routine has no effect.
*
* @param thread ID of thread to suspend.
*
* @return N/A
*/
__syscall void k_thread_suspend(k_tid_t thread);
/**
* @brief Resume a suspended thread.
*
* This routine allows the kernel scheduler to make @a thread the current
* thread, when it is next eligible for that role.
*
* If @a thread is not currently suspended, the routine has no effect.
*
* @param thread ID of thread to resume.
*
* @return N/A
*/
__syscall void k_thread_resume(k_tid_t thread);
/**
* @brief Set time-slicing period and scope.
*
* This routine specifies how the scheduler will perform time slicing of
* preemptible threads.
*
* To enable time slicing, @a slice must be non-zero. The scheduler
* ensures that no thread runs for more than the specified time limit
* before other threads of that priority are given a chance to execute.
* Any thread whose priority is higher than @a prio is exempted, and may
* execute as long as desired without being preempted due to time slicing.
*
* Time slicing only limits the maximum amount of time a thread may continuously
* execute. Once the scheduler selects a thread for execution, there is no
* minimum guaranteed time the thread will execute before threads of greater or
* equal priority are scheduled.
*
* When the current thread is the only one of that priority eligible
* for execution, this routine has no effect; the thread is immediately
* rescheduled after the slice period expires.
*
* To disable timeslicing, set both @a slice and @a prio to zero.
*
* @param slice Maximum time slice length (in milliseconds).
* @param prio Highest thread priority level eligible for time slicing.
*
* @return N/A
*/
extern void k_sched_time_slice_set(int32_t slice, int prio);
/** @} */
/**
* @addtogroup isr_apis
* @{
*/
/**
* @brief Determine if code is running at interrupt level.
*
* This routine allows the caller to customize its actions, depending on
* whether it is a thread or an ISR.
*
* @funcprops \isr_ok
*
* @return false if invoked by a thread.
* @return true if invoked by an ISR.
*/
extern bool k_is_in_isr(void);
/**
* @brief Determine if code is running in a preemptible thread.
*
* This routine allows the caller to customize its actions, depending on
* whether it can be preempted by another thread. The routine returns a 'true'
* value if all of the following conditions are met:
*
* - The code is running in a thread, not at ISR.
* - The thread's priority is in the preemptible range.
* - The thread has not locked the scheduler.
*
* @funcprops \isr_ok
*
* @return 0 if invoked by an ISR or by a cooperative thread.
* @return Non-zero if invoked by a preemptible thread.
*/
__syscall int k_is_preempt_thread(void);
/**
* @brief Test whether startup is in the before-main-task phase.
*
* This routine allows the caller to customize its actions, depending on
* whether it being invoked before the kernel is fully active.
*
* @funcprops \isr_ok
*
* @return true if invoked before post-kernel initialization
* @return false if invoked during/after post-kernel initialization
*/
static inline bool k_is_pre_kernel(void)
{
extern bool z_sys_post_kernel; /* in init.c */
return !z_sys_post_kernel;
}
/**
* @}
*/
/**
* @addtogroup thread_apis
* @{
*/
/**
* @brief Lock the scheduler.
*
* This routine prevents the current thread from being preempted by another
* thread by instructing the scheduler to treat it as a cooperative thread.
* If the thread subsequently performs an operation that makes it unready,
* it will be context switched out in the normal manner. When the thread
* again becomes the current thread, its non-preemptible status is maintained.
*
* This routine can be called recursively.
*
* @note k_sched_lock() and k_sched_unlock() should normally be used
* when the operation being performed can be safely interrupted by ISRs.
* However, if the amount of processing involved is very small, better
* performance may be obtained by using irq_lock() and irq_unlock().
*
* @return N/A
*/
extern void k_sched_lock(void);
/**
* @brief Unlock the scheduler.
*
* This routine reverses the effect of a previous call to k_sched_lock().
* A thread must call the routine once for each time it called k_sched_lock()
* before the thread becomes preemptible.
*
* @return N/A
*/
extern void k_sched_unlock(void);
/**
* @brief Set current thread's custom data.
*
* This routine sets the custom data for the current thread to @ value.
*
* Custom data is not used by the kernel itself, and is freely available
* for a thread to use as it sees fit. It can be used as a framework
* upon which to build thread-local storage.
*
* @param value New custom data value.
*
* @return N/A
*
*/
__syscall void k_thread_custom_data_set(void *value);
/**
* @brief Get current thread's custom data.
*
* This routine returns the custom data for the current thread.
*
* @return Current custom data value.
*/
__syscall void *k_thread_custom_data_get(void);