SuperTinyKernel™ RTOS 1.05.3
Lightweight, high-performance, deterministic, bare-metal C++ RTOS for resource-constrained embedded systems. MIT Open Source License.
Loading...
Searching...
No Matches
stk.h
Go to the documentation of this file.
1/*
2 * SuperTinyKernel(TM) RTOS: Lightweight High-Performance Deterministic C++ RTOS for Embedded Systems.
3 *
4 * Source: https://github.com/SuperTinyKernel-RTOS
5 *
6 * Copyright (c) 2022-2026 Neutron Code Limited <stk@neutroncode.com>. All Rights Reserved.
7 * License: MIT License, see LICENSE for a full text.
8 */
9
10#ifndef STK_H_
11#define STK_H_
12
13#include "stk_helper.h"
19
34
35namespace stk {
36
79template <uint8_t TMode, uint32_t TSize, class TStrategy, class TPlatform>
81{
82protected:
88
94
99 enum ERequest : uint8_t
100 {
103 };
104
116 class KernelTask : public IKernelTask
117 {
118 friend class Kernel;
119
125 {
129 };
130
131 public:
140 {
142 };
143
148 explicit KernelTask() : m_user(nullptr), m_stack(), m_state(STATE_NONE), m_time_sleep(0),
149 m_srt(), m_hrt(), m_rt_weight()
150 {
151 // bind to wait object
152 if (IsSyncMode())
153 m_wait_obj->m_task = this;
154 }
155
159 ITask *GetUserTask() { return m_user; }
160
164 Stack *GetUserStack() { return &m_stack; }
165
169 bool IsBusy() const { return (m_user != nullptr); }
170
174 bool IsSleeping() const { return (m_time_sleep < 0); }
175
179 TId GetTid() const { return hw::PtrToWord(m_user); }
180
185 void Wake()
186 {
188
189 // wakeup on a next cycle
190 m_time_sleep = -1;
191 }
192
196 void SetCurrentWeight(int32_t weight)
197 {
198 if (TStrategy::WEIGHT_API)
199 m_rt_weight[0] = weight;
200 }
201
205 int32_t GetWeight() const { return (TStrategy::WEIGHT_API ? m_user->GetWeight() : 1); }
206
212 int32_t GetCurrentWeight() const { return (TStrategy::WEIGHT_API ? m_rt_weight[0] : 1); }
213
219 {
221
222 return (IsHrtMode() ? m_hrt[0].periodicity : 0);
223 }
224
231 {
233
234 return (IsHrtMode() ? m_hrt[0].deadline : 0);
235 }
236
243 {
246
247 return (IsHrtMode() ? (m_hrt[0].deadline - m_hrt[0].duration) : 0);
248 }
249
251 {
252 // note: task sleep time is negative
253 Timeout task_sleep = Max<Timeout>(0, -m_time_sleep);
254
255 if (IsSyncMode())
256 {
257 // likely task is sleeping during sync operation (see Wait)
258 if (m_wait_obj->IsWaiting())
259 {
260 // note: sync wait time is positive
261 task_sleep = m_wait_obj->m_time_wait;
262
263 // we shall account for only valid time (when task is waiting during sync operation)
264 if (task_sleep > 0)
265 sleep_ticks = Min(sleep_ticks, task_sleep);
266 }
267 else
268 {
269 sleep_ticks = Min(sleep_ticks, task_sleep);
270 }
271 }
272 else
273 {
274 sleep_ticks = Min(sleep_ticks, task_sleep);
275 }
276
277 // clamp to [1, STK_TICKLESS_TICKS_MAX] range
278 return Max<Timeout>(1, sleep_ticks);
279 }
280
281 protected:
286 {}
287
293 struct SrtInfo
294 {
296 {}
297
300 void Clear()
301 {
302 add_task_req = nullptr;
303 }
304
311 };
312
317 struct HrtInfo
318 {
319 HrtInfo() : periodicity(0), deadline(0), duration(0), done(false)
320 {}
321
324 void Clear()
325 {
326 periodicity = 0;
327 deadline = 0;
328 duration = 0;
329 done = false;
330 }
331
335 volatile bool done;
336 };
337
344 struct WaitObject : public IWaitObject
345 {
346 explicit WaitObject() : m_task(nullptr), m_sync_obj(nullptr), m_timeout(false), m_time_wait(0)
347 {}
348
353 {}
354
361 {
363 };
364
368 TId GetTid() const { return m_task->GetTid(); }
369
373 bool IsTimeout() const { return m_timeout; }
374
378 bool IsWaiting() const { return (m_sync_obj != nullptr); }
379
385 void Wake(bool timeout)
386 {
388
389 m_timeout = timeout;
390 m_time_wait = 0;
391
392 m_sync_obj->RemoveWaitObject(this);
393 m_sync_obj = nullptr;
394
395 return m_task->Wake();
396 }
397
404 bool Tick(Timeout elapsed_ticks)
405 {
407 {
408 m_time_wait -= elapsed_ticks;
409
410 if (m_time_wait <= 0)
411 m_timeout = true;
412 }
413
414 return !m_timeout;
415 }
416
424 void SetupWait(ISyncObject *sync_obj, Timeout timeout)
425 {
427
428 m_sync_obj = sync_obj;
429 m_time_wait = timeout;
430 m_timeout = false;
431
432 sync_obj->AddWaitObject(this);
433 }
434
437 volatile bool m_timeout;
439 };
440
445 void Bind(TPlatform *platform, ITask *user_task)
446 {
447 // set access mode for this stack
448 m_stack.mode = user_task->GetAccessMode();
449
450 // set task id for tracking purpose
451 #if STK_NEED_TASK_ID
452 m_stack.tid = user_task->GetId();
453 #endif
454
455 // init stack of the user task
456 if (!platform->InitStack(STACK_USER_TASK, &m_stack, user_task, user_task))
457 {
458 STK_ASSERT(false);
459 }
460
461 // bind user task
462 m_user = user_task;
463 }
464
468 void Unbind()
469 {
470 if (IsSyncMode())
471 {
472 // should be freed from waiting on task exit
473 STK_ASSERT(!m_wait_obj->IsWaiting());
474 }
475
476 m_user = nullptr;
477 m_stack = {};
479 m_time_sleep = 0;
480
481 if (IsHrtMode())
482 m_hrt[0].Clear();
483 else
484 m_srt->Clear();
485 }
486
490 {
491 // make this task sleeping to switch it out from scheduling process
493
494 // mark it as done HRT task
495 if (IsHrtMode())
497
498 // mark it as pending for removal
500 }
501
504 bool IsPendingRemoval() const { return ((m_state & STATE_REMOVE_PENDING) != 0U); }
505
509 bool IsMemoryOfSP(Word SP) const
510 {
511 Word *start = m_user->GetStack();
512 Word *end = start + m_user->GetStackSize();
513
514 return (SP >= hw::PtrToWord(start)) && (SP <= hw::PtrToWord(end));
515 }
516
523 void HrtInit(Timeout periodicity_tc, Timeout deadline_tc, Timeout start_delay_tc)
524 {
525 STK_ASSERT(periodicity_tc > 0);
526 STK_ASSERT(deadline_tc > 0);
527 STK_ASSERT(start_delay_tc >= 0);
528 STK_ASSERT(periodicity_tc < INT32_MAX);
529 STK_ASSERT(deadline_tc < INT32_MAX);
530
531 m_hrt[0].periodicity = periodicity_tc;
532 m_hrt[0].deadline = deadline_tc;
533
534 if (start_delay_tc > 0)
535 ScheduleSleep(start_delay_tc);
536 }
537
543
549 void HrtOnSwitchedOut(IPlatform */*platform*/)
550 {
551 const Timeout duration = m_hrt[0].duration;
552
553 STK_ASSERT(duration >= 0);
554
555 Timeout sleep = m_hrt[0].periodicity - duration;
556 if (sleep > 0)
557 ScheduleSleep(sleep);
558
559 m_hrt[0].duration = 0;
560 m_hrt[0].done = false;
561 }
562
568 {
569 const Timeout duration = m_hrt[0].duration;
570
571 STK_ASSERT(duration >= 0);
573
574 m_user->OnDeadlineMissed(duration);
575 platform->ProcessHardFault();
576 }
577
582 {
583 m_hrt[0].done = true;
584 __stk_full_memfence();
585 }
586
590 bool HrtIsDeadlineMissed(Timeout duration) const { return (duration > m_hrt[0].deadline); }
591
602 {
603 STK_ASSERT(ticks > 0);
604
605 // set state first as kernel checks it when task IsSleeping
606 if (TStrategy::SLEEP_EVENT_API)
607 {
608 if (m_time_sleep >= 0)
610 }
611
612 m_time_sleep = -ticks;
613 __stk_full_memfence();
614 }
615
619 {
620 while (IsSleeping())
621 {
623 }
624 }
625
628 volatile uint32_t m_state;
632 int32_t m_rt_weight[STK_ALLOCATE_COUNT(TStrategy::WEIGHT_API, 1, 1, 0)];
634 };
635
644 {
645 friend class Kernel;
646
647 public:
648 TId GetTid() const { return m_platform->GetTid(); }
649
651
652 uint32_t GetTickResolution() const { return m_platform->GetTickResolution(); }
653
654 Cycles GetSysTimerCount() const { return m_platform->GetSysTimerCount(); }
655
656 uint32_t GetSysTimerFrequency() const { return m_platform->GetSysTimerFrequency(); }
657
659 {
661 STK_ASSERT(ticks >= 0);
662
663 Ticks now = GetTicks();
664 const Ticks deadline = now + ticks;
665 STK_ASSERT(deadline >= now);
666
667 for (; now < deadline; now = GetTicks())
668 {
670 }
671 }
672
674 {
676 STK_ASSERT(ticks >= 0);
677
678 if (!IsHrtMode())
679 {
680 m_platform->Sleep(ticks);
681 }
682 else
683 {
684 // sleeping is not supported in HRT mode, task will sleep according to its periodicity and workload
685 STK_ASSERT(false);
686 }
687 }
688
690 {
692 STK_ASSERT(timestamp >= 0);
693
694 if (!IsHrtMode())
695 {
696 m_platform->SleepUntil(timestamp);
697 }
698 else
699 {
700 // sleeping is not supported in HRT mode, task will sleep according to its periodicity and workload
701 STK_ASSERT(false);
702 }
703 }
704
706 {
708
709 m_platform->SwitchToNext();
710 }
711
713 {
714 if (IsSyncMode())
715 {
716 return m_platform->Wait(sobj, mutex, ticks);
717 }
718 else
719 {
720 STK_ASSERT(false);
721 return nullptr;
722 }
723 }
724
726 {
727 if (IsTicklessMode())
728 {
729 return m_platform->Suspend();
730 }
731 else
732 {
733 STK_ASSERT(false);
734 return 0;
735 }
736 }
737
738 void Resume(Timeout elapsed_ticks)
739 {
740 if (IsTicklessMode())
741 {
742 return m_platform->Resume(elapsed_ticks);
743 }
744 else
745 {
746 STK_ASSERT(false);
747 }
748 }
749
750 private:
754 explicit KernelService() : m_platform(nullptr), m_ticks(0)
755 {}
756
762
768 void Initialize(IPlatform *platform)
769 {
770 m_platform = static_cast<TPlatform *>(platform);
771 }
772
776 void IncrementTicks(Ticks advance)
777 {
778 // using WriteVolatile64() to guarantee correct lockless reading order by ReadVolatile64
780 }
781
782 TPlatform *m_platform;
783 volatile Ticks m_ticks;
784 };
785
786public:
789 static constexpr uint32_t TASKS_MAX = TSize;
790
800 {
801 #ifdef _DEBUG
802 // TPlatform must inherit IPlatform
803 IPlatform *platform = &m_platform;
804 STK_UNUSED(platform);
805
806 // TStrategy must inherit ITaskSwitchStrategy
807 ITaskSwitchStrategy *strategy = &m_strategy;
808 STK_UNUSED(strategy);
809 #endif
810
811 #if !STK_TICKLESS_IDLE
812 STK_STATIC_ASSERT_DESC(((TMode & KERNEL_TICKLESS) == 0U),
813 "STK_TICKLESS_IDLE must be defined to 1 for KERNEL_TICKLESS");
814 #endif
815 }
816
821 {}
822
833 {
834 STK_ASSERT(resolution_us != 0);
835 STK_ASSERT(resolution_us <= PERIODICITY_MAX);
837
838 // reinitialize key state variables
839 m_task_now = nullptr;
842
843 m_service.Initialize(&m_platform);
844
845 m_platform.Initialize(this, &m_service, resolution_us, (IsDynamicMode() ? &m_exit_trap[0].stack : nullptr));
846
847 // now ready to Start()
849 }
850
860 {
861 if (!IsHrtMode())
862 {
863 STK_ASSERT(user_task != nullptr);
865
866 // when started the operation must be serialized by switching out from processing until
867 // kernel processes this request
868 if (IsStarted())
869 {
870 if (IsDynamicMode())
871 {
872 RequestAddTask(user_task);
873 }
874 else
875 {
876 STK_ASSERT(false);
877 }
878 }
879 else
880 {
881 AllocateAndAddNewTask(user_task);
882 }
883 }
884 else
885 {
886 STK_ASSERT(false);
887 }
888 }
889
898 __stk_attr_noinline void AddTask(ITask *user_task, Timeout periodicity_tc, Timeout deadline_tc, Timeout start_delay_tc)
899 {
900 if (IsHrtMode())
901 {
902 STK_ASSERT(user_task != nullptr);
905
906 HrtAllocateAndAddNewTask(user_task, periodicity_tc, deadline_tc, start_delay_tc);
907 }
908 else
909 {
910 STK_ASSERT(false);
911 }
912 }
913
923 {
924 if (IsDynamicMode())
925 {
926 STK_ASSERT(user_task != nullptr);
928
929 KernelTask *task = FindTaskByUserTask(user_task);
930 if (task != nullptr)
931 RemoveTask(task);
932 }
933 else
934 {
935 // kernel operating mode must be KERNEL_DYNAMIC for tasks to be able to be removed
936 STK_ASSERT(false);
937 }
938 }
939
946 {
947 if (IsDynamicMode())
948 {
949 STK_ASSERT(user_task != nullptr);
951
953
954 KernelTask *task = FindTaskByUserTask(user_task);
955 if (task != nullptr)
956 task->ScheduleRemoval();
957 }
958 else
959 {
960 // kernel operating mode must be KERNEL_DYNAMIC for tasks to be able to be removed
961 STK_ASSERT(false);
962 }
963 }
964
971 void SuspendTask(ITask *user_task, bool &suspended)
972 {
973 STK_ASSERT(user_task != nullptr);
974
975 bool self = false;
976 KernelTask *task = nullptr;
977
978 // avoid race with OnTick
979 {
981
982 task = FindTaskByUserTask(user_task);
983 STK_ASSERT(task != nullptr);
984
985 // only suspend if the task is currently awake: if it is already sleeping
986 // (e.g. blocked on a mutex or timed Sleep), do not overwrite m_time_sleep,
987 // that would corrupt the original sleep state and, for sync-object waits,
988 // would interfere with WaitObject::Tick()
989 if ((suspended = !task->IsSleeping()) == true)
990 {
991 task->ScheduleSleep(WAIT_INFINITE);
992
993 // check if suspending self
994 self = (task == m_task_now);
995 }
996 }
997
998 // note: we do not spin long here, kernel will switch this task out from scheduling on the next tick
999 if (self)
1000 task->BusyWaitWhileSleeping();
1001 }
1002
1006 void ResumeTask(ITask *user_task)
1007 {
1008 STK_ASSERT(user_task != nullptr);
1009
1010 // avoid race with OnTick
1012
1013 KernelTask *task = FindTaskByUserTask(user_task);
1014 STK_ASSERT(task != nullptr);
1015
1016 if (task->IsSleeping())
1017 task->Wake();
1018 }
1019
1025 size_t EnumerateTasks(ITask **user_tasks, const size_t max_size)
1026 {
1027 size_t count = 0U;
1028
1029 // avoid race with OnTick
1031
1032 for (uint32_t i = 0U; i < Min(max_size, static_cast<size_t>(TASKS_MAX)); ++i)
1033 {
1034 KernelTask *task = &m_task_storage[i];
1035 if (task->IsBusy())
1036 user_tasks[count++] = task->GetUserTask();
1037 }
1038
1039 return count;
1040 }
1041
1051 {
1053
1054 // stacks of the traps must be re-initilized on every subsequent Start
1055 InitTraps();
1056
1057 // start tracing
1058 #if STK_SEGGER_SYSVIEW
1059 SEGGER_SYSVIEW_Start();
1060 for (uint32_t i = 0U; i < TASKS_MAX; ++i)
1061 {
1062 KernelTask *task = &m_task_storage[i];
1063 if (task->IsBusy())
1064 SendTaskTraceInfo(task);
1065 }
1066 #endif
1067
1068 m_platform.Start();
1069 }
1070
1075 bool IsStarted() const
1076 {
1077 return (m_task_now != nullptr);
1078 }
1079
1084
1089
1092 EState GetState() const { return m_state; }
1093
1094protected:
1108
1121
1128 static constexpr Timeout YIELD_TICKS = 2;
1129
1133 {
1134 return (state > FSM_STATE_NONE) &&
1135 (state < FSM_STATE_MAX);
1136 }
1137
1141 {
1142 // init stack for a Sleep trap
1143 {
1144 SleepTrapStack &sleep = m_sleep_trap[0];
1145
1146 SleepTrapStackMemory wrapper(&sleep.memory);
1147 sleep.stack.mode = ACCESS_PRIVILEGED;
1148 #if STK_NEED_TASK_ID
1149 sleep.stack.tid = SYS_TASK_ID_SLEEP;
1150 #endif
1151
1152 m_platform.InitStack(STACK_SLEEP_TRAP, &sleep.stack, &wrapper, nullptr);
1153 }
1154
1155 // init stack for an Exit trap
1156 if (IsDynamicMode())
1157 {
1158 ExitTrapStack &exit = m_exit_trap[0];
1159
1160 ExitTrapStackMemory wrapper(&exit.memory);
1161 exit.stack.mode = ACCESS_PRIVILEGED;
1162 #if STK_NEED_TASK_ID
1163 exit.stack.tid = SYS_TASK_ID_EXIT;
1164 #endif
1165
1166 m_platform.InitStack(STACK_EXIT_TRAP, &exit.stack, &wrapper, nullptr);
1167 }
1168 }
1169
1174 KernelTask *AllocateNewTask(ITask *user_task)
1175 {
1176 // look for a free kernel task
1177 KernelTask *new_task = nullptr;
1178 for (uint32_t i = 0U; i < TASKS_MAX; ++i)
1179 {
1180 KernelTask *task = &m_task_storage[i];
1181 if (task->IsBusy())
1182 {
1183 // avoid task collision
1184 STK_ASSERT(task->m_user != user_task);
1185
1186 // avoid stack collision
1187 STK_ASSERT(task->m_user->GetStack() != user_task->GetStack());
1188 }
1189 else
1190 if (new_task == nullptr)
1191 {
1192 new_task = task;
1193 #if defined(NDEBUG) && !defined(_STK_ASSERT_REDIRECT)
1194 break; // break if assertions are inactive and do not try to validate collision with existing tasks
1195 #endif
1196 }
1197 }
1198
1199 // if nullptr - exceeded max supported kernel task count, application design failure
1200 STK_ASSERT(new_task != nullptr);
1201
1202 new_task->Bind(&m_platform, user_task);
1203
1204 return new_task;
1205 }
1206
1210 void AddKernelTask(KernelTask *task)
1211 {
1212 #if STK_SEGGER_SYSVIEW
1213 // start tracing new task
1214 SEGGER_SYSVIEW_OnTaskCreate(task->GetUserStack()->tid);
1215 if (IsStarted())
1216 SendTaskTraceInfo(task);
1217 #endif
1218
1219 m_strategy.AddTask(task);
1220 }
1221
1227 {
1228 KernelTask *task = AllocateNewTask(user_task);
1229 STK_ASSERT(task != nullptr);
1230
1231 AddKernelTask(task);
1232 }
1233
1242 void HrtAllocateAndAddNewTask(ITask *user_task, Timeout periodicity_tc, Timeout deadline_tc, Timeout start_delay_tc)
1243 {
1244 KernelTask *task = AllocateNewTask(user_task);
1245 STK_ASSERT(task != nullptr);
1246
1247 task->HrtInit(periodicity_tc, deadline_tc, start_delay_tc);
1248
1249 AddKernelTask(task);
1250 }
1251
1257 {
1259
1260 KernelTask *caller = FindTaskBySP(m_platform.GetCallerSP());
1261 STK_ASSERT(caller != nullptr);
1262
1263 typename KernelTask::AddTaskRequest req = { .user_task = user_task };
1264 caller->m_srt[0].add_task_req = &req;
1265
1266 // notify kernel
1268
1269 // switch out and wait for completion (due to context switch request could be processed here)
1270 if (caller->m_srt[0].add_task_req != nullptr)
1271 m_service.SwitchToNext();
1272
1273 STK_ASSERT(caller->m_srt[0].add_task_req == nullptr);
1274 }
1275
1280 __stk_attr_noinline KernelTask *FindTaskByUserTask(const ITask *user_task)
1281 {
1282 for (uint32_t i = 0U; i < TASKS_MAX; ++i)
1283 {
1284 KernelTask *task = &m_task_storage[i];
1285 if (task->GetUserTask() == user_task)
1286 return task;
1287 }
1288
1289 return nullptr;
1290 }
1291
1296 KernelTask *FindTaskByStack(const Stack *stack)
1297 {
1298 for (uint32_t i = 0U; i < TASKS_MAX; ++i)
1299 {
1300 KernelTask *task = &m_task_storage[i];
1301 if (task->GetUserStack() == stack)
1302 return task;
1303 }
1304
1305 return nullptr;
1306 }
1307
1313 {
1314 STK_ASSERT(m_task_now != nullptr);
1315
1316 if (m_task_now->IsMemoryOfSP(SP))
1317 return m_task_now;
1318
1319 for (uint32_t i = 0U; i < TASKS_MAX; ++i)
1320 {
1321 KernelTask *task = &m_task_storage[i];
1322
1323 // skip finished tasks (applicable only for KERNEL_DYNAMIC mode)
1324 if (IsDynamicMode() && !task->IsBusy())
1325 continue;
1326
1327 if (task->IsMemoryOfSP(SP))
1328 return task;
1329 }
1330
1331 return nullptr;
1332 }
1333
1338 void RemoveTask(KernelTask *task)
1339 {
1340 STK_ASSERT(task != nullptr);
1341
1342 #if STK_SEGGER_SYSVIEW
1343 SEGGER_SYSVIEW_OnTaskTerminate(task->GetUserStack()->tid);
1344 #endif
1345
1346 // notify task about pending exit
1347 task->GetUserTask()->OnExit();
1348
1349 m_strategy.RemoveTask(task);
1350 task->Unbind();
1351 }
1352
1364 {
1365 STK_ASSERT(m_strategy.GetSize() != 0);
1366
1367 // iterate tasks and generate OnTaskSleep for a strategy for all initially sleeping tasks
1368 if (TStrategy::SLEEP_EVENT_API)
1369 {
1370 for (uint32_t i = 0U; i < TASKS_MAX; ++i)
1371 {
1372 KernelTask *task = &m_task_storage[i];
1373
1374 if (task->IsSleeping())
1375 {
1376 if ((task->m_state & KernelTask::STATE_SLEEP_PENDING) != 0U)
1377 {
1378 task->m_state &= ~KernelTask::STATE_SLEEP_PENDING;
1379
1380 // notify strategy that task is sleeping
1381 m_strategy.OnTaskSleep(task);
1382 }
1383 }
1384 }
1385 }
1386
1387 // get initial state and first task
1388 {
1390
1391 KernelTask *next = nullptr;
1393
1394 // expecting only SLEEPING or SWITCHING states
1396
1398 {
1399 m_task_now = next;
1400
1401 active = next->GetUserStack();
1402
1403 if (IsHrtMode())
1404 next->HrtOnSwitchedIn();
1405 }
1406 else
1408 {
1409 // MISRA 5-2-3 deviation: GetNext/GetFirst returns IKernelTask*, all objects in
1410 // the strategy pool are KernelTask instances - downcast is guaranteed safe.
1411 m_task_now = static_cast<KernelTask *>(m_strategy.GetFirst());
1412
1413 active = &m_sleep_trap[0].stack;
1414 }
1415 }
1416
1417 // is in running state
1419
1420 #if STK_SEGGER_SYSVIEW
1421 SEGGER_SYSVIEW_OnTaskStartExec(m_task_now->tid);
1422 #endif
1423 }
1424
1431 {
1432 if (IsDynamicMode())
1433 {
1435
1436 // is in stopped state, i.e. is ready to Start() again
1438 }
1439 }
1440
1457 bool OnTick(Stack *&idle, Stack *&active
1459 , Timeout &ticks
1460 #endif
1461 )
1462 {
1463 #if !STK_TICKLESS_IDLE
1464 // in non-tickless mode kernel is advancing strictly by 1 tick on every OnTick call
1465 enum { ticks = 1 };
1466 #endif
1467
1468 // advance internal timestamp
1469 m_service.IncrementTicks(ticks);
1470
1471 // consume elapsed and update to ticks to sleep
1472 #if STK_TICKLESS_IDLE
1473 ticks = (
1474 #else
1475 // notify compiler that we ignore a return value of UpdateTasks
1476 static_cast<void>(
1477 #endif
1478 UpdateTasks(ticks));
1479
1480 // decide on a context switch
1481 return UpdateFsmState(idle, active);
1482 }
1483
1484 void OnTaskSwitch(Word caller_SP)
1485 {
1486 OnTaskSleep(caller_SP, YIELD_TICKS);
1487 }
1488
1489 void OnTaskSleep(Word caller_SP, Timeout ticks)
1490 {
1491 KernelTask *task = FindTaskBySP(caller_SP);
1492 STK_ASSERT(task != nullptr);
1493
1494 // make change to HRT state and sleep time atomic
1495 {
1497
1498 if (IsHrtMode())
1499 task->HrtOnWorkCompleted();
1500
1501 if (ticks > 0)
1502 task->ScheduleSleep(ticks);
1503 }
1504
1505 // note: we do not spin long here, kernel will switch this task out from scheduling on the next tick
1506 task->BusyWaitWhileSleeping();
1507 }
1508
1509 void OnTaskSleepUntil(Word caller_SP, Ticks timestamp)
1510 {
1512
1513 KernelTask *task = FindTaskBySP(caller_SP);
1514 STK_ASSERT(task != nullptr);
1515
1516 // make change to HRT state and sleep time atomic
1517 {
1519
1520 Timeout ticks = Max(static_cast<Timeout>(0), static_cast<Timeout>(timestamp - m_service.m_ticks));
1521
1522 if (ticks > 0)
1523 task->ScheduleSleep(ticks);
1524 }
1525
1526 // note: we do not spin long here, kernel will switch this task out from scheduling on the next tick
1527 task->BusyWaitWhileSleeping();
1528 }
1529
1530 void OnTaskExit(Stack *stack)
1531 {
1532 if (IsDynamicMode())
1533 {
1534 KernelTask *task = FindTaskByStack(stack);
1535 STK_ASSERT(task != nullptr);
1536
1537 // notify kernel to execute removal
1538 task->ScheduleRemoval();
1539 }
1540 else
1541 {
1542 // kernel operating mode must be KERNEL_DYNAMIC for tasks to be able to exit
1544 }
1545 }
1546
1547 IWaitObject *OnTaskWait(Word caller_SP, ISyncObject *sync_obj, IMutex *mutex, Timeout timeout)
1548 {
1549 if (IsSyncMode())
1550 {
1551 STK_ASSERT(timeout != 0); // API contract: caller must not be in ISR
1552 STK_ASSERT(sync_obj != nullptr); // API contract: ISyncObject instance must be provided
1553 STK_ASSERT(mutex != nullptr); // API contract: IMutex instance must be provided
1554 STK_ASSERT((sync_obj->GetHead() == nullptr) || (sync_obj->GetHead() == &m_sync_list[0]));
1555
1556 KernelTask *task = FindTaskBySP(caller_SP);
1557 STK_ASSERT(task != nullptr);
1558
1559 // configure waiting
1560 task->m_wait_obj->SetupWait(sync_obj, timeout);
1561
1562 // register ISyncObject if not yet
1563 if (sync_obj->GetHead() == nullptr)
1564 m_sync_list->LinkBack(sync_obj);
1565
1566 // start sleeping infinitely, we rely on a Wake call via WaitObject
1567 task->ScheduleSleep(WAIT_INFINITE);
1568
1569 // unlock mutex locked externally, so that we could wait in a busy-waiting loop
1570 mutex->Unlock();
1571
1572 // note: we do not spin long here, kernel will switch this task out from scheduling on the next tick
1573 task->BusyWaitWhileSleeping();
1574
1575 // re-lock mutex when returning to the task's execution space
1576 mutex->Lock();
1577
1578 return task->m_wait_obj;
1579 }
1580 else
1581 {
1582 STK_ASSERT(false);
1583 return nullptr;
1584 }
1585 }
1586
1587 TId OnGetTid(Word caller_SP)
1588 {
1589 KernelTask *task = FindTaskBySP(caller_SP);
1590 STK_ASSERT(task != nullptr);
1591
1592 return task->GetTid();
1593 }
1594
1595 void OnSuspend(bool suspended)
1596 {
1597 if (suspended)
1599 else
1601 }
1602
1605 Timeout UpdateTasks(const Timeout elapsed_ticks)
1606 {
1607 // sync objects are updated before UpdateTaskRequest which may add a new object (newly added object must become 1 tick older)
1608 if (IsSyncMode())
1609 UpdateSyncObjects(elapsed_ticks);
1610
1612
1613 return UpdateTaskState(elapsed_ticks);
1614 }
1615
1625 Timeout UpdateTaskState(const Timeout elapsed_ticks)
1626 {
1627 Timeout sleep_ticks = (IsTicklessMode() ? STK_TICKLESS_TICKS_MAX : 1);
1628
1629 for (uint32_t i = 0U; i < TASKS_MAX; ++i)
1630 {
1631 KernelTask *task = &m_task_storage[i];
1632
1633 if (task->IsSleeping())
1634 {
1635 if (IsDynamicMode())
1636 {
1637 // task is pending removal, wait until it is switched out
1638 if (task->IsPendingRemoval())
1639 {
1640 if ((task != m_task_now) ||
1641 ((m_strategy.GetSize() == 1) && (m_fsm_state == FSM_STATE_SLEEPING)))
1642 {
1643 RemoveTask(task);
1644 continue;
1645 }
1646 }
1647 }
1648
1649 // deliver sleep event to strategy
1650 // note: only currently scheduled task can be pending to sleep
1651 if (TStrategy::SLEEP_EVENT_API)
1652 {
1653 if ((task->m_state & KernelTask::STATE_SLEEP_PENDING) != 0U)
1654 {
1655 task->m_state &= ~KernelTask::STATE_SLEEP_PENDING;
1656
1657 // notify strategy that task is sleeping
1658 m_strategy.OnTaskSleep(task);
1659 }
1660 }
1661
1662 // advance sleep time by a tick
1663 task->m_time_sleep += elapsed_ticks;
1664
1665 // deliver sleep event to strategy
1666 if (TStrategy::SLEEP_EVENT_API)
1667 {
1668 // notify strategy that task woke up
1669 if (task->m_time_sleep >= 0)
1670 m_strategy.OnTaskWake(task);
1671 }
1672 }
1673 else
1674 if (IsHrtMode())
1675 {
1676 // in HRT mode we trace how long task spent in active state (doing some work)
1677 if (task->IsBusy())
1678 {
1679 task->m_hrt[0].duration += elapsed_ticks;
1680
1681 // check if deadline is missed (HRT failure)
1682 if (task->HrtIsDeadlineMissed(task->m_hrt[0].duration))
1683 {
1684 bool can_recover = false;
1685
1686 // report deadline overrun to a strategy which supports overrun recovery
1687 if (TStrategy::DEADLINE_MISSED_API)
1688 can_recover = m_strategy.OnTaskDeadlineMissed(task);
1689
1690 // report failure if it could not be recovered by a scheduling strategy
1691 if (!can_recover)
1692 task->HrtHardFailDeadline(&m_platform);
1693 }
1694 }
1695 }
1696
1697 // get the number ticks the driver has to keep CPU in Idle
1698 if (IsTicklessMode() && (sleep_ticks > 1) && task->IsBusy())
1699 {
1700 sleep_ticks = task->GetSleepTicks(sleep_ticks);
1701 }
1702 }
1703
1704 return sleep_ticks;
1705 }
1706
1709 void UpdateSyncObjects(const Timeout elapsed_ticks)
1710 {
1712
1713 ISyncObject::ListEntryType *itr = m_sync_list->GetFirst();
1714
1715 while (itr != nullptr)
1716 {
1717 ISyncObject::ListEntryType *next = itr->GetNext();
1718
1719 // MISRA 5-2-3 deviation: GetNext/GetFirst returns ISyncObject*, all objects in
1720 // m_sync_list are ISyncObject instances - downcast is guaranteed safe
1721 if (!static_cast<ISyncObject *>(itr)->Tick(elapsed_ticks))
1722 m_sync_list->Unlink(itr);
1723
1724 itr = next;
1725 }
1726 }
1727
1731 {
1732 if (m_request == REQUEST_NONE)
1733 return;
1734
1735 // process AddTask requests coming from tasks (KERNEL_DYNAMIC mode only, KERNEL_HRT is
1736 // excluded as we assume that HRT tasks must be known to the kernel before a Start())
1737 if (IsDynamicMode() && !IsHrtMode())
1738 {
1739 // process serialized AddTask request made from another active task, requesting process
1740 // is currently waiting due to SwitchToNext()
1741 if ((m_request & REQUEST_ADD_TASK) != 0U)
1742 {
1744
1745 for (uint32_t i = 0U; i < TASKS_MAX; ++i)
1746 {
1747 KernelTask *task = &m_task_storage[i];
1748
1749 if (task->m_srt[0].add_task_req != nullptr)
1750 {
1751 AllocateAndAddNewTask(task->m_srt[0].add_task_req->user_task);
1752
1753 task->m_srt[0].add_task_req = nullptr;
1754 __stk_full_memfence();
1755 }
1756 }
1757 }
1758 }
1759 }
1760
1765 EFsmEvent FetchNextEvent(KernelTask *&next)
1766 {
1768 KernelTask *itr = nullptr;
1769
1770 // check if no tasks left in KERNEL_DYNAMIC mode and exit, if KERNEL_DYNAMIC is not
1771 // set then 'is_empty' will always be false
1772 bool is_empty = IsDynamicMode() && (m_strategy.GetSize() == 0U);
1773
1774 if (!is_empty)
1775 {
1776 // MISRA 5-2-3 deviation: GetNext/GetFirst returns IKernelTask*, all objects in
1777 // the strategy pool are KernelTask instances - downcast is guaranteed safe.
1778 itr = static_cast<KernelTask *>(m_strategy.GetNext());
1779
1780 // sleep-aware strategy returns nullptr if no active tasks available, start sleeping
1781 if (itr == nullptr)
1782 {
1783 type = FSM_EVENT_SLEEP;
1784 }
1785 else
1786 {
1787 // strategy must provide active-only task
1788 STK_ASSERT(!itr->IsSleeping());
1789
1790 // if was sleeping, process wake event first
1792 }
1793 }
1794
1795 next = itr;
1796 return type;
1797 }
1798
1803#ifdef _STK_UNDER_TEST
1804 virtual
1805#endif
1806 EFsmState GetNewFsmState(KernelTask *&next)
1807 {
1809 return m_fsm[m_fsm_state][FetchNextEvent(next)];
1810 }
1811
1817 bool UpdateFsmState(Stack *&idle, Stack *&active)
1818 {
1819 KernelTask *now = m_task_now, *next = nullptr;
1820 bool switch_context = false;
1821
1822 EFsmState new_state = GetNewFsmState(next);
1823
1824 switch (new_state)
1825 {
1827 switch_context = StateSwitch(now, next, idle, active);
1828 break;
1829 case FSM_STATE_SLEEPING:
1830 switch_context = StateSleep(now, next, idle, active);
1831 break;
1832 case FSM_STATE_WAKING:
1833 switch_context = StateWake(now, next, idle, active);
1834 break;
1835 case FSM_STATE_EXITING:
1836 switch_context = StateExit(now, next, idle, active);
1837 break;
1838 case FSM_STATE_NONE:
1839 return switch_context; // valid intermittent non-persisting state: no-transition
1840 case FSM_STATE_MAX:
1841 default: // invalid state value
1843 break;
1844 }
1845
1846 m_fsm_state = new_state;
1847 return switch_context;
1848 }
1849
1857 bool StateSwitch(KernelTask *now, KernelTask *next, Stack *&idle, Stack *&active)
1858 {
1859 STK_ASSERT(now != nullptr);
1860 STK_ASSERT(next != nullptr);
1861
1862 // do not switch context because task did not change
1863 if (next == now)
1864 return false;
1865
1866 idle = now->GetUserStack();
1867 active = next->GetUserStack();
1868
1869 // if stack memory is exceeded these assertions will be hit
1870 if (now->IsBusy())
1871 {
1872 // current task could exit, thus we check it with IsBusy to avoid referencing nullptr returned by GetUserTask()
1873 STK_ASSERT(now->GetUserTask()->GetStack()[0] == STK_STACK_MEMORY_FILLER);
1874 }
1875 STK_ASSERT(next->GetUserTask()->GetStack()[0] == STK_STACK_MEMORY_FILLER);
1876
1877 m_task_now = next;
1878
1879 if ((IsHrtMode()))
1880 {
1881 if (now->m_hrt[0].done)
1882 {
1883 now->HrtOnSwitchedOut(&m_platform);
1884 next->HrtOnSwitchedIn();
1885 }
1886 }
1887
1888 #if STK_SEGGER_SYSVIEW
1889 SEGGER_SYSVIEW_OnTaskStopReady(now->GetUserStack()->tid, TRACE_EVENT_SWITCH);
1890 SEGGER_SYSVIEW_OnTaskStartReady(next->GetUserStack()->tid);
1891 #endif
1892
1893 return true; // switch context
1894 }
1895
1903 bool StateWake(KernelTask *now, KernelTask *next, Stack *&idle, Stack *&active)
1904 {
1905 STK_UNUSED(now);
1906
1907 STK_ASSERT(next != nullptr);
1908
1909 idle = &m_sleep_trap[0].stack;
1910 active = next->GetUserStack();
1911
1912 // if stack memory is exceeded these assertions will be hit
1914 STK_ASSERT(next->GetUserTask()->GetStack()[0] == STK_STACK_MEMORY_FILLER);
1915
1916 m_task_now = next;
1917
1918 #if STK_SEGGER_SYSVIEW
1919 SEGGER_SYSVIEW_OnTaskStartReady(next->GetUserStack()->tid);
1920 #endif
1921
1922 if ((IsHrtMode()))
1923 next->HrtOnSwitchedIn();
1924
1925 return true; // switch context
1926 }
1927
1935 bool StateSleep(KernelTask *now, KernelTask *next, Stack *&idle, Stack *&active)
1936 {
1937 STK_UNUSED(next);
1938
1939 STK_ASSERT(now != nullptr);
1940 STK_ASSERT(m_sleep_trap[0].stack.SP != 0);
1941
1942 idle = now->GetUserStack();
1943 active = &m_sleep_trap[0].stack;
1944
1945 m_task_now = static_cast<KernelTask *>(m_strategy.GetFirst());
1946
1947 #if STK_SEGGER_SYSVIEW
1948 SEGGER_SYSVIEW_OnTaskStopReady(now->GetUserStack()->tid, TRACE_EVENT_SLEEP);
1949 #endif
1950
1951 if (IsHrtMode())
1952 {
1953 if (!now->IsPendingRemoval())
1954 now->HrtOnSwitchedOut(&m_platform);
1955 }
1956
1957 return true; // switch context
1958 }
1959
1968 bool StateExit(KernelTask *now, KernelTask *next, Stack *&idle, Stack *&active)
1969 {
1970 STK_UNUSED(now);
1971 STK_UNUSED(next);
1972
1973 if (IsDynamicMode())
1974 {
1975 // dynamic tasks are not supported if main processes's stack memory is not provided in Start()
1976 STK_ASSERT(m_exit_trap[0].stack.SP != 0);
1977
1978 idle = nullptr;
1979 active = &m_exit_trap[0].stack;
1980
1981 m_task_now = nullptr;
1982
1983 m_platform.Stop();
1984 }
1985 else
1986 {
1987 STK_UNUSED(idle);
1988 STK_UNUSED(active);
1989 }
1990
1991 return false;
1992 }
1993
1997 bool IsInitialized() const { return (m_state != STATE_INACTIVE); }
1998
2008
2009#if STK_SEGGER_SYSVIEW
2014 void SendTaskTraceInfo(KernelTask *task)
2015 {
2016 STK_ASSERT(task->IsBusy());
2017
2018 SEGGER_SYSVIEW_TASKINFO info =
2019 {
2020 .TaskID = task->GetUserStack()->tid,
2021 .sName = task->GetUserTask()->GetTraceName(),
2022 .Prio = 0,
2023 .StackBase = hw::PtrToWord(task->GetUserTask()->GetStack()),
2024 .StackSize = task->GetUserTask()->GetStackSizeBytes()
2025 };
2026 SEGGER_SYSVIEW_SendTaskInfo(&info);
2027 }
2028#endif
2029
2030 // Kernel modes:
2031 static __stk_forceinline bool IsStaticMode() { return ((TMode & KERNEL_STATIC) != 0U); }
2032 static __stk_forceinline bool IsDynamicMode() { return ((TMode & KERNEL_DYNAMIC) != 0U); }
2033 static __stk_forceinline bool IsHrtMode() { return ((TMode & KERNEL_HRT) != 0U); }
2034 static __stk_forceinline bool IsSyncMode() { return ((TMode & KERNEL_SYNC) != 0U); }
2035 static __stk_forceinline bool IsTicklessMode() { return ((TMode & KERNEL_TICKLESS) != 0U); }
2036
2037 // If hit here: Kernel<N> expects at least 1 task, e.g. N > 0
2039
2040 // If hit here: Kernel mode must be assigned.
2041 STK_STATIC_ASSERT_N(KERNEL_MODE_MUST_BE_SET, (TMode != 0U));
2042
2043 // If hit here: KERNEL_STATIC and KERNEL_DYNAMIC can not be mixed, either one of these is possible.
2044 STK_STATIC_ASSERT_N(KERNEL_MODE_MIX_NOT_ALLOWED,
2045 (((TMode & KERNEL_STATIC) & (TMode & KERNEL_DYNAMIC)) == 0U));
2046
2047 // If hit here: KERNEL_HRT must accompany KERNEL_STATIC or KERNEL_DYNAMIC.
2048 STK_STATIC_ASSERT_N(KERNEL_MODE_HRT_ALONE, (((TMode & KERNEL_HRT) == 0U) ||
2049 ((((TMode & KERNEL_HRT) != 0U)) && (((TMode & KERNEL_STATIC) != 0U) || ((TMode & KERNEL_DYNAMIC) != 0U)))));
2050
2051 // if hit here: KERNEL_TICKLESS is incompatible with KERNEL_HRT. Tickless suppresses the timer,
2052 // which destroys the precise periodicity HRT depends on.
2053 STK_STATIC_ASSERT_N(TICKLESS_HRT_CONFLICT,
2054 (((TMode & KERNEL_TICKLESS) == 0U) || ((TMode & KERNEL_HRT) == 0U)));
2055
2060
2075
2091
2098
2099 KernelService m_service;
2100 TPlatform m_platform;
2101 TStrategy m_strategy;
2102 KernelTask *m_task_now;
2104 SleepTrapStack m_sleep_trap[1];
2105 ExitTrapStack m_exit_trap[STK_ALLOCATE_COUNT(TMode, KERNEL_DYNAMIC, 1, 0)];
2107 volatile uint8_t m_request;
2108 volatile EState m_state;
2110
2112 // FSM_EVENT_SWITCH FSM_EVENT_SLEEP FSM_EVENT_WAKE FSM_EVENT_EXIT
2117 };
2118
2120};
2121
2122} // namespace stk
2123
2124#endif /* STK_H_ */
#define STK_KERNEL_PANIC(id)
Called when the kernel detects an unrecoverable internal fault.
Definition stk_arch.h:63
#define STK_UNUSED(X)
Explicitly marks a variable as unused to suppress compiler warnings.
Definition stk_defs.h:524
#define STK_STATIC_ASSERT_N(NAME, X)
Compile-time assertion with a user-defined name suffix.
Definition stk_defs.h:359
#define __stk_forceinline
Forces compiler to always inline the decorated function, regardless of optimisation level.
Definition stk_defs.h:104
#define STK_TICKLESS_IDLE
Enables tickless (dynamic-tick) low-power operation during idle periods.
Definition stk_defs.h:36
#define STK_ASSERT(e)
Runtime assertion. Halts execution if the expression e evaluates to false.
Definition stk_defs.h:330
#define __stk_attr_noinline
Prevents compiler from inlining the decorated function (function prefix).
Definition stk_defs.h:185
#define STK_TICKLESS_TICKS_MAX
Maximum number of kernel ticks the hardware timer may be suppressed in one tickless idle interval whe...
Definition stk_defs.h:62
#define STK_STATIC_ASSERT_DESC(X, DESC)
Compile-time assertion with a custom error description. Produces a compilation error if X is false.
Definition stk_defs.h:350
#define STK_STACK_MEMORY_FILLER
Sentinel value written to the entire stack region at initialization (stack watermark pattern).
Definition stk_defs.h:377
#define STK_ALLOCATE_COUNT(MODE, FLAG, ONTRUE, ONFALSE)
Selects a static array element count at compile time based on a mode flag.
Definition stk_defs.h:485
Contains helper implementations which simplify user-side code.
Earliest Deadline First (EDF) task-switching strategy (stk::SwitchStrategyEDF).
Fixed-priority preemptive task-switching strategy with round-robin within each priority level (stk::S...
Rate-Monotonic (RM) and Deadline-Monotonic (DM) task-switching strategies (stk::SwitchStrategyMonoton...
Round-Robin task-switching strategy (stk::SwitchStrategyRoundRobin / stk::SwitchStrategyRR).
Smooth Weighted Round-Robin task-switching strategy (stk::SwitchStrategySmoothWeightedRoundRobin / st...
#define __stk_relax_cpu
Emits a CPU pipeline-relaxation hint for use inside hot busy-wait (spin) loops (in-code statement).
Definition stktest.h:33
Namespace of STK package.
uintptr_t Word
Native processor word type.
Definition stk_common.h:113
@ TRACE_EVENT_SLEEP
Task entered sleep / blocked state.
Definition stk_common.h:103
@ TRACE_EVENT_SWITCH
Task context switch event (task became active).
Definition stk_common.h:102
int64_t Ticks
Ticks value.
Definition stk_common.h:128
int32_t Timeout
Timeout time (ticks).
Definition stk_common.h:123
static constexpr T Max(T a, T b)
Compile-time maximum of two values.
Definition stk_defs.h:541
@ STACK_SLEEP_TRAP
Stack of the Sleep trap.
Definition stk_common.h:72
@ STACK_USER_TASK
Stack of the user task.
Definition stk_common.h:71
@ STACK_EXIT_TRAP
Stack of the Exit trap.
Definition stk_common.h:73
@ PERIODICITY_DEFAULT
Default periodicity (microseconds), 1 millisecond.
Definition stk_common.h:82
@ PERIODICITY_MAX
Maximum periodicity (microseconds), 99 milliseconds (note: this value is the highest working on a rea...
Definition stk_common.h:81
static constexpr Timeout WAIT_INFINITE
Timeout value: block indefinitely until the synchronization object is signaled.
Definition stk_common.h:171
@ SYS_TASK_ID_EXIT
Exit trap.
Definition stk_common.h:92
@ SYS_TASK_ID_SLEEP
Sleep trap.
Definition stk_common.h:91
static constexpr T Min(T a, T b)
Compile-time minimum of two values.
Definition stk_defs.h:535
uint64_t Cycles
Cycles value.
Definition stk_common.h:133
Word TId
Definition stk_common.h:118
@ ACCESS_PRIVILEGED
Privileged access mode (access to hardware is fully unrestricted).
Definition stk_common.h:33
@ KERNEL_TICKLESS
Tickless mode. To use this mode STK_TICKLESS_IDLE must be defined to 1 in stk_config....
Definition stk_common.h:45
@ KERNEL_SYNC
Synchronization support (see Event).
Definition stk_common.h:44
@ KERNEL_HRT
Hard Real-Time (HRT) behavior (tasks are scheduled periodically and have an execution deadline,...
Definition stk_common.h:43
@ KERNEL_STATIC
All tasks are static and can not exit.
Definition stk_common.h:41
@ KERNEL_DYNAMIC
Tasks can be added or removed and therefore exit when done.
Definition stk_common.h:42
@ KERNEL_PANIC_BAD_MODE
Kernel is in bad/unsupported mode for the current operation.
Definition stk_common.h:62
@ KERNEL_PANIC_BAD_STATE
Kernel entered unexpected (bad) state.
Definition stk_common.h:61
Memory-related primitives.
__stk_forceinline void WriteVolatile64(volatile T *addr, T value)
Atomically write a 64-bit volatile value.
Definition stk_arch.h:411
__stk_forceinline Word PtrToWord(T *ptr) noexcept
Cast a pointer to a CPU register-width integer.
Definition stk_arch.h:94
bool IsInsideISR()
Check whether the CPU is currently executing inside a hardware interrupt service routine (ISR).
Definition stktest.cpp:103
__stk_forceinline T ReadVolatile64(volatile const T *addr)
Atomically read a 64-bit volatile value.
Definition stk_arch.h:357
void ResumeTask(ITask *user_task)
Resume task.
Definition stk.h:1006
void ScheduleTaskRemoval(ITask *user_task)
Schedule task removal from scheduling (exit).
Definition stk.h:945
static bool IsStaticMode()
Definition stk.h:2031
bool UpdateFsmState(Stack *&idle, Stack *&active)
Update FSM state.
Definition stk.h:1817
KernelTask * AllocateNewTask(ITask *user_task)
Allocate new instance of KernelTask.
Definition stk.h:1174
static bool IsSyncMode()
Definition stk.h:2034
size_t EnumerateTasks(ITask **user_tasks, const size_t max_size)
Enumerate tasks.
Definition stk.h:1025
void OnStart(Stack *&active)
Called by platform driver immediately after a scheduler start (first tick).
Definition stk.h:1363
EFsmState
Finite-state machine (FSM) state. Encodes what the kernel is currently doing between two consecutive ...
Definition stk.h:1100
KernelTask * FindTaskByStack(const Stack *stack)
Find kernel task by the bound Stack instance.
Definition stk.h:1296
KernelTask TaskStorageType[TASKS_MAX]
KernelTask array type used as a storage for the KernelTask instances.
Definition stk.h:2059
void SuspendTask(ITask *user_task, bool &suspended)
Suspend task.
Definition stk.h:971
void OnTaskSwitch(Word caller_SP)
Called by Thread process (via IKernelService::SwitchToNext) to switch to a next task.
Definition stk.h:1484
EFsmState GetNewFsmState(KernelTask *&next)
Get new FSM state.
Definition stk.h:1806
void RequestAddTask(ITask *user_task)
Request to add new task.
Definition stk.h:1256
void AddTask(ITask *user_task, Timeout periodicity_tc, Timeout deadline_tc, Timeout start_delay_tc)
Register a task for hard real-time (HRT) scheduling.
Definition stk.h:898
StackMemoryWrapper< STACK_SIZE_MIN > ExitTrapStackMemory
Stack memory wrapper type for the exit trap.
Definition stk.h:93
void Initialize(uint32_t resolution_us=PERIODICITY_DEFAULT)
Initialize kernel.
Definition stk.h:832
bool StateWake(KernelTask *now, KernelTask *next, Stack *&idle, Stack *&active)
Wakes up after sleeping.
Definition stk.h:1903
bool OnTick(Stack *&idle, Stack *&active)
Process one scheduler tick. Called from the platform timer/tick ISR.
Definition stk.h:1457
EState GetState() const
Get kernel state.
Definition stk.h:1092
KernelTask * FindTaskBySP(Word SP)
Find kernel task for a Stack Pointer (SP).
Definition stk.h:1312
void InitTraps()
Initialize stack of the traps.
Definition stk.h:1140
void OnTaskSleepUntil(Word caller_SP, Ticks timestamp)
Called by Thread process (via IKernelService::SleepUntil) for exclusion of the calling process from s...
Definition stk.h:1509
ISyncObject::ListHeadType SyncObjectList
Intrusive list of active ISyncObject instances registered with this kernel. Each sync object in this ...
Definition stk.h:2097
ExitTrapStack m_exit_trap[((((TMode) &(KERNEL_DYNAMIC)) !=0U) ?(1) :(0))]
Definition stk.h:2105
EFsmEvent
Finite-state machine (FSM) event. Computed by FetchNextEvent() each tick based on strategy output and...
Definition stk.h:1114
IWaitObject * OnTaskWait(Word caller_SP, ISyncObject *sync_obj, IMutex *mutex, Timeout timeout)
Called from the Thread process when task needs to wait.
Definition stk.h:1547
ITaskSwitchStrategy * GetSwitchStrategy()
Get task-switching strategy instance owned by this kernel.
Definition stk.h:1088
KernelTask * FindTaskByUserTask(const ITask *user_task)
Find kernel task by the bound ITask instance.
Definition stk.h:1280
static bool IsValidFsmState(EFsmState state)
Check if FSM state is valid.
Definition stk.h:1132
static bool IsHrtMode()
Definition stk.h:2033
void OnSuspend(bool suspended)
Called from the Thread process to suspend scheduling.
Definition stk.h:1595
void RemoveTask(KernelTask *task)
Remove kernel task.
Definition stk.h:1338
void Start()
Start the scheduler. This call does not return until all tasks have exited (KERNEL_DYNAMIC mode) or i...
Definition stk.h:1050
void AddKernelTask(KernelTask *task)
Add kernel task to the scheduling strategy.
Definition stk.h:1210
static bool IsDynamicMode()
Definition stk.h:2032
void RemoveTask(ITask *user_task)
Remove a previously added task from the kernel before Start().
Definition stk.h:922
ERequest
Bitmask flags for pending inter-task requests that must be processed by the kernel on the next tick (...
Definition stk.h:100
~Kernel()
Destructor.
Definition stk.h:820
bool StateExit(KernelTask *now, KernelTask *next, Stack *&idle, Stack *&active)
Exits from scheduling.
Definition stk.h:1968
TId OnGetTid(Word caller_SP)
Called from the Thread process when for getting task/thread id of the process.
Definition stk.h:1587
void AllocateAndAddNewTask(ITask *user_task)
Allocate new instance of KernelTask and add it into the scheduling process.
Definition stk.h:1226
void HrtAllocateAndAddNewTask(ITask *user_task, Timeout periodicity_tc, Timeout deadline_tc, Timeout start_delay_tc)
Allocate new instance of KernelTask and add it into the HRT scheduling process.
Definition stk.h:1242
Timeout UpdateTasks(const Timeout elapsed_ticks)
Update tasks (sleep, requests).
Definition stk.h:1605
void ScheduleAddTask()
Signal the kernel to process a pending AddTask request on the next tick.
Definition stk.h:2003
bool IsInitialized() const
Check whether Initialize() has been called and completed successfully.
Definition stk.h:1997
IPlatform * GetPlatform()
Get platform driver instance owned by this kernel.
Definition stk.h:1083
StackMemoryWrapper<(32U)> SleepTrapStackMemory
Stack memory wrapper type for the sleep trap.
Definition stk.h:87
bool StateSwitch(KernelTask *now, KernelTask *next, Stack *&idle, Stack *&active)
Switches contexts.
Definition stk.h:1857
static bool IsTicklessMode()
Definition stk.h:2035
void OnTaskExit(Stack *stack)
Called from the Thread process when task finished (its Run function exited by return).
Definition stk.h:1530
Kernel()
Construct the kernel with all storage zero-initialized and the request flag set to ~0 (indicating uni...
Definition stk.h:798
void UpdateTaskRequest()
Update pending task requests.
Definition stk.h:1730
bool StateSleep(KernelTask *now, KernelTask *next, Stack *&idle, Stack *&active)
Enters into a sleeping mode.
Definition stk.h:1935
Timeout UpdateTaskState(const Timeout elapsed_ticks)
Update task state: process removals, advance sleep timers, and track HRT durations.
Definition stk.h:1625
void OnStop()
Called by the platform driver after a scheduler stop (all tasks have exited).
Definition stk.h:1430
void UpdateSyncObjects(const Timeout elapsed_ticks)
Update synchronization objects.
Definition stk.h:1709
void OnTaskSleep(Word caller_SP, Timeout ticks)
Called by Thread process (via IKernelService::Sleep) for exclusion of the calling process from schedu...
Definition stk.h:1489
bool IsStarted() const
Check whether scheduler is currently running.
Definition stk.h:1075
void AddTask(ITask *user_task)
Register task for a soft real-time (SRT) scheduling.
Definition stk.h:859
EFsmEvent FetchNextEvent(KernelTask *&next)
Fetch next event for the FSM.
Definition stk.h:1765
Internal per-slot kernel descriptor that wraps a user ITask instance.
Definition stk.h:117
KernelTask()
Construct a free (unbound) task slot. All fields set to zero/null.
Definition stk.h:148
void ScheduleSleep(Timeout ticks)
Put the task into a sleeping state for the specified number of ticks.
Definition stk.h:601
Timeout GetSleepTicks(Timeout sleep_ticks)
Definition stk.h:250
TId GetTid() const
Get task identifier.
Definition stk.h:179
int32_t m_rt_weight[((((TStrategy::WEIGHT_API) &(1)) !=0U) ?(1) :(0))]
Run-time weight for weighted-round-robin scheduling. Zero-size for unweighted strategies.
Definition stk.h:632
WaitObject m_wait_obj[((((TMode) &(KERNEL_SYNC)) !=0U) ?(1) :(0))]
Embedded wait object for synchronization. Zero-size (no memory) if KERNEL_SYNC is not set.
Definition stk.h:633
void SetCurrentWeight(int32_t weight)
Update the run-time scheduling weight (weighted strategies only).
Definition stk.h:196
friend class Kernel
Definition stk.h:118
bool HrtIsDeadlineMissed(Timeout duration) const
Check if deadline missed.
Definition stk.h:590
EStateFlags
Bitmask of transient state flags. Set by the task or the kernel and consumed (cleared) during UpdateT...
Definition stk.h:125
@ STATE_REMOVE_PENDING
Task returned from its Run function; slot will be freed on the next tick (KERNEL_DYNAMIC only).
Definition stk.h:127
@ STATE_SLEEP_PENDING
Task called Sleep/SleepUntil/Yield; strategy's OnTaskSleep() will be invoked on the next tick (sleep-...
Definition stk.h:128
@ STATE_NONE
No pending state flags.
Definition stk.h:126
void ScheduleRemoval()
Schedule the removal of the task from the kernel on next tick.
Definition stk.h:489
Timeout GetHrtRelativeDeadline() const
Get remaining HRT deadline (ticks left before the deadline expires).
Definition stk.h:242
Stack m_stack
Stack descriptor (SP register value + access mode + optional tid).
Definition stk.h:627
Stack * GetUserStack()
Get stack descriptor for this task slot.
Definition stk.h:164
void Bind(TPlatform *platform, ITask *user_task)
Bind this slot to a user task: set access mode, task ID, and initialize the stack.
Definition stk.h:445
void HrtHardFailDeadline(IPlatform *platform)
Hard-fail HRT task when it missed its deadline.
Definition stk.h:567
void HrtInit(Timeout periodicity_tc, Timeout deadline_tc, Timeout start_delay_tc)
Initialize task with HRT info.
Definition stk.h:523
volatile uint32_t m_state
Bitmask of EStateFlags. Written by task thread, read/cleared by kernel tick.
Definition stk.h:628
int32_t GetCurrentWeight() const
Get current (run-time) scheduling weight.
Definition stk.h:212
ITask * GetUserTask()
Get bound user task.
Definition stk.h:159
void BusyWaitWhileSleeping() const
Block further execution of the task's context while in sleeping state.
Definition stk.h:618
ITask * m_user
Bound user task, or NULL when slot is free.
Definition stk.h:626
~KernelTask()
Destructor.
Definition stk.h:285
bool IsBusy() const
Check whether this slot is bound to a user task.
Definition stk.h:169
void HrtOnWorkCompleted()
Called when task process called IKernelService::SwitchToNext to inform Kernel that work is completed.
Definition stk.h:581
void Wake()
Wake this task on the next scheduling tick.
Definition stk.h:185
HrtInfo m_hrt[((((TMode) &(KERNEL_HRT)) !=0U) ?(1) :(0))]
HRT metadata. Zero-size (no memory) in non-HRT mode.
Definition stk.h:631
Timeout GetHrtDeadline() const
Get absolute HRT deadline (ticks elapsed since task was activated).
Definition stk.h:230
SrtInfo m_srt[((((TMode) &(KERNEL_HRT)) !=0U) ?(0) :(1))]
SRT metadata. Zero-size (no memory) in KERNEL_HRT mode.
Definition stk.h:630
volatile Timeout m_time_sleep
Sleep countdown: negative while sleeping (absolute value = ticks remaining), zero when awake.
Definition stk.h:629
bool IsPendingRemoval() const
Check if task is pending removal.
Definition stk.h:504
void HrtOnSwitchedOut(IPlatform *)
Called when task is switched out from the scheduling process.
Definition stk.h:549
void Unbind()
Reset this slot to the free (unbound) state, clearing all scheduling metadata.
Definition stk.h:468
void HrtOnSwitchedIn()
Called when task is switched into the scheduling process.
Definition stk.h:542
bool IsMemoryOfSP(Word SP) const
Check if Stack Pointer (SP) belongs to this task.
Definition stk.h:509
int32_t GetWeight() const
Get static scheduling weight from the user task.
Definition stk.h:205
Timeout GetHrtPeriodicity() const
Get HRT scheduling periodicity.
Definition stk.h:218
bool IsSleeping() const
Check whether this task is currently sleeping (waiting for a tick or a wake event).
Definition stk.h:174
Payload for an in-flight AddTask() request issued by a running task.
Definition stk.h:140
ITask * user_task
User task to add. Must remain valid for the lifetime of its kernel slot.
Definition stk.h:141
Per-task soft real-time (SRT) metadata.
Definition stk.h:294
void Clear()
Clear all fields, ready for slot re-use.
Definition stk.h:300
AddTaskRequest * add_task_req
Definition stk.h:310
Per-task Hard Real-Time (HRT) scheduling metadata.
Definition stk.h:318
void Clear()
Clear all fields, ready for slot re-use or re-activation.
Definition stk.h:324
volatile bool done
Set to true when the task signals work completion (via Yield() or on exit). Triggers HrtOnSwitchedOut...
Definition stk.h:335
Timeout deadline
Maximum allowed active duration in ticks (relative to switch-in). Exceeding this triggers OnDeadlineM...
Definition stk.h:333
Timeout periodicity
Activation period in ticks: the task is re-activated every this many ticks.
Definition stk.h:332
Timeout duration
Ticks spent in the active (non-sleeping) state in the current period. Incremented by UpdateTaskState(...
Definition stk.h:334
Concrete implementation of IWaitObject, embedded in each KernelTask slot.
Definition stk.h:345
bool IsWaiting() const
Check if busy with waiting.
Definition stk.h:378
bool Tick(Timeout elapsed_ticks)
Advance the timeout countdown by one tick.
Definition stk.h:404
Timeout m_time_wait
Ticks remaining until timeout. Decremented each tick; WAIT_INFINITE means no timeout.
Definition stk.h:438
bool IsTimeout() const
Check whether the wait expired due to timeout.
Definition stk.h:373
void Wake(bool timeout)
Wake the waiting task (called by ISyncObject when it signals).
Definition stk.h:385
void SetupWait(ISyncObject *sync_obj, Timeout timeout)
Configure and arm this wait object for a new wait operation.
Definition stk.h:424
TId GetTid() const
Get the TId of the task that owns this wait object.
Definition stk.h:368
volatile bool m_timeout
true if the wait expired due to timeout rather than a Wake() signal.
Definition stk.h:437
ISyncObject * m_sync_obj
Sync object this wait is registered with, or NULL when not waiting.
Definition stk.h:436
KernelTask * m_task
Back-pointer to the owning KernelTask. Set once at construction; never changes.
Definition stk.h:435
Payload stored in the sync object's kernel-side list entry while a task is waiting.
Definition stk.h:361
ISyncObject * sync_obj
Sync object whose Tick() will be called each kernel tick.
Definition stk.h:362
KernelService()
Construct an uninitialized service instance (m_platform = null, m_ticks = 0).
Definition stk.h:754
IWaitObject * Wait(ISyncObject *sobj, IMutex *mutex, Timeout ticks)
Put calling process into a waiting state until synchronization object is signaled or timeout occurs.
Definition stk.h:712
volatile Ticks m_ticks
Global tick counter. Written via hw::WriteVolatile64() by IncrementTick() (ISR context); read via hw:...
Definition stk.h:783
~KernelService()
Destructor.
Definition stk.h:760
friend class Kernel
Definition stk.h:645
void SwitchToNext()
Notify scheduler to switch to the next task (yield).
Definition stk.h:705
Ticks GetTicks() const
Get number of ticks elapsed since kernel start.
Definition stk.h:650
Timeout Suspend()
Suspend scheduling.
Definition stk.h:725
void Sleep(Timeout ticks)
Put calling process into a sleep state.
Definition stk.h:673
Cycles GetSysTimerCount() const
Get system timer count value.
Definition stk.h:654
TPlatform * m_platform
Typed platform driver pointer, set at Initialize().
Definition stk.h:782
void SleepUntil(Ticks timestamp)
Put calling process into a sleep state until the specified timestamp.
Definition stk.h:689
TId GetTid() const
Get thread Id of the currently running task.
Definition stk.h:648
void Resume(Timeout elapsed_ticks)
Resume scheduling after a prior Suspend() call.
Definition stk.h:738
uint32_t GetTickResolution() const
Get number of microseconds in one tick.
Definition stk.h:652
uint32_t GetSysTimerFrequency() const
Get system timer frequency.
Definition stk.h:656
void Delay(Timeout ticks)
Delay calling process.
Definition stk.h:658
void IncrementTicks(Ticks advance)
Increment counter by value.
Definition stk.h:776
void Initialize(IPlatform *platform)
Initialize instance.
Definition stk.h:768
Storage bundle for the sleep trap: a Stack descriptor paired with its backing memory.
Definition stk.h:2069
SleepTrapStackMemory::MemoryType Memory
Definition stk.h:2070
Memory memory
Backing stack memory array. Size: STK_SLEEP_TRAP_STACK_SIZE elements of Word.
Definition stk.h:2073
Stack stack
Stack descriptor (SP register value + access mode). Initialised by InitTraps() on every Start().
Definition stk.h:2072
Storage bundle for the exit trap: a Stack descriptor paired with its backing memory.
Definition stk.h:2085
Memory memory
Backing stack memory array. Size: STACK_SIZE_MIN elements of Word.
Definition stk.h:2089
ExitTrapStackMemory::MemoryType Memory
Definition stk.h:2086
Stack stack
Stack descriptor (SP register value + access mode). Initialised by InitTraps() on every Start().
Definition stk.h:2088
RAII guard that enters the critical section on construction and exits it on destruction.
Definition stk_arch.h:218
Stack descriptor.
Definition stk_common.h:219
EAccessMode mode
Hardware access mode of the owning task (see EAccessMode).
Definition stk_common.h:221
virtual Word * GetStack() const =0
Get pointer to the stack memory.
Wait object.
Definition stk_common.h:270
Synchronization object.
Definition stk_common.h:355
DLEntryType ListEntryType
List entry type of ISyncObject elements.
Definition stk_common.h:371
virtual void AddWaitObject(IWaitObject *wobj)
Called by kernel when a new task starts waiting on this event.
Definition stk_common.h:376
DLHeadType ListHeadType
List head type for ISyncObject elements.
Definition stk_common.h:366
Interface for mutex synchronization primitive.
Definition stk_common.h:439
virtual void Unlock()=0
Unlock the mutex.
virtual void Lock()=0
Lock the mutex.
Interface for a user task.
Definition stk_common.h:491
virtual void OnExit()=0
Called by the kernel before removal from the scheduling (see stk::KERNEL_DYNAMIC).
virtual EAccessMode GetAccessMode() const =0
Get hardware access mode of the user task.
virtual TId GetId() const =0
Get task Id set by application.
Scheduling-strategy-facing interface for a kernel task slot.
Definition stk_common.h:562
Interface for a platform driver.
Definition stk_common.h:648
virtual void ProcessHardFault()=0
Cause a hard fault of the system.
Interface for a back-end event handler.
Definition stk_common.h:656
Interface for a task switching strategy implementation.
Definition stk_common.h:889
Interface for the implementation of the kernel of the scheduler. It supports Soft and Hard Real-Time ...
Definition stk_common.h:961
Interface for the kernel services exposed to the user processes during run-time when Kernel started s...
Adapts an externally-owned stack memory array to the IStackMemory interface.
Definition stk_helper.h:184
StackMemoryDef< _StackSize >::Type MemoryType
Definition stk_helper.h:189
DLHeadType * GetHead() const
Get the list head this entry currently belongs to.
DLEntryType * GetNext() const
Get the next entry in the list.