SuperTinyKernel™ RTOS 1.06.0
Lightweight, high-performance, deterministic, bare-metal C++ RTOS for resource-constrained embedded systems. MIT Open Source License.
Loading...
Searching...
No Matches
stk_arch_x86-win32.cpp
Go to the documentation of this file.
1/*
2 * SuperTinyKernel(TM) RTOS: Lightweight High-Performance Deterministic C++ RTOS for Embedded Systems.
3 *
4 * Source: https://github.com/SuperTinyKernel-RTOS
5 *
6 * Copyright (c) 2022-2026 Neutron Code Limited <stk@neutroncode.com>. All Rights Reserved.
7 * License: MIT License, see LICENSE for a full text.
8 */
9
10// note: If missing, this header must be customized (get it in the root of the source folder) and
11// copied to the /include folder manually.
12#include "stk_config.h"
13
14#ifdef _STK_ARCH_X86_WIN32
15
16#include "stk_arch.h"
18
19using namespace stk;
20
21#define WIN32_LEAN_AND_MEAN
22#include <windows.h>
23#include <stdlib.h>
24#include <stdio.h>
25#include <assert.h>
26#include <list>
27#include <vector>
28
29using namespace stk;
30
31#ifndef WINAPI
32#define WINAPI __stdcall
33#endif
34
35typedef UINT MMRESULT;
36typedef MMRESULT (WINAPI * timeBeginPeriodF)(UINT uPeriod);
37static timeBeginPeriodF timeBeginPeriod = nullptr;
38
39#define STK_X86_WIN32_CRITICAL_SECTION CRITICAL_SECTION
40#define STK_X86_WIN32_CRITICAL_SECTION_INIT(SES) ::InitializeCriticalSection(SES)
41#define STK_X86_WIN32_CRITICAL_SECTION_START(SES) ::EnterCriticalSection(SES)
42#define STK_X86_WIN32_CRITICAL_SECTION_END(SES) ::LeaveCriticalSection(SES)
43#define STK_X86_WIN32_MIN_RESOLUTION (1000)
44#define STK_X86_WIN32_GET_SP(STACK) (STACK + 2) // +2 to overcome stack filler check inside Kernel (adjusting to +2 preserves 8-byte alignment)
45#define SLK_UNLOCKED hw::SpinLock::UNLOCKED
46#define SLK_LOCKED hw::SpinLock::LOCKED
47
50static __stk_forceinline bool HW_SpinLockTryLock(volatile LONG &lock)
51{
52 return (InterlockedCompareExchange(
53 reinterpret_cast<volatile LONG *>(&lock), SLK_LOCKED, SLK_UNLOCKED) == SLK_UNLOCKED);
54}
55
58static __stk_forceinline void HW_SpinLockLock(volatile LONG &lock)
59{
60 uint8_t sleep_time = 0;
61 uint32_t timeout = 0xFFFFFF;
62
63test:
64 while (!HW_SpinLockTryLock(lock))
65 {
66 if (--timeout == 0)
67 {
68 // invariant violated: the lock owner exited without releasing
70 }
71
72 for (volatile int32_t spin = 100; (spin != 0); spin--)
73 {
74 __stk_relax_cpu();
75
76 // check if became unlocked then try locking atomically again
77 if (lock == SLK_UNLOCKED)
78 goto test;
79 }
80
81 // avoid priority inversion
82 ::Sleep(sleep_time);
83 sleep_time ^= 1;
84 }
85}
86
89static __stk_forceinline void HW_SpinLockUnlock(volatile LONG &lock)
90{
91 InterlockedExchange(reinterpret_cast<volatile LONG *>(&lock), SLK_UNLOCKED);
92}
93
94struct Win32ScopedCriticalSection
95{
96 STK_X86_WIN32_CRITICAL_SECTION &m_sec;
97
98 explicit Win32ScopedCriticalSection(STK_X86_WIN32_CRITICAL_SECTION &sec) : m_sec(sec)
99 {
100 STK_X86_WIN32_CRITICAL_SECTION_START(&sec);
101 }
102 ~Win32ScopedCriticalSection()
103 {
104 STK_X86_WIN32_CRITICAL_SECTION_END(&m_sec);
105 }
106};
107
108class HiResClockQPC
109{
110 LARGE_INTEGER m_freq;
111 LARGE_INTEGER m_start;
112
113public:
114 explicit HiResClockQPC()
115 {
116 QueryPerformanceFrequency(&m_freq);
117 QueryPerformanceCounter(&m_start);
118 }
119
120 static HiResClockQPC *GetInstance()
121 {
122 // keep declaration function-local to allow compiler stripping it from the binary if
123 // it is unused by the user code
124 static HiResClockQPC clock;
125 return &clock;
126 }
127
128 Cycles GetCycles()
129 {
130 LARGE_INTEGER current;
131 QueryPerformanceCounter(&current);
132
133 // relative cycles since simulation start
134 return static_cast<Cycles>(current.QuadPart - m_start.QuadPart);
135 }
136
137 uint32_t GetFrequency()
138 {
139 return static_cast<uint32_t>(m_freq.QuadPart);
140 }
141};
142
144static struct Context : public PlatformContext
145{
146 Context()
147 : m_overrider(nullptr),
148 m_sleep_trap(nullptr),
149 m_exit_trap(nullptr),
150 m_winmm_dll(nullptr),
151 m_timer_thread(nullptr),
152 m_tls(TLS_OUT_OF_INDEXES),
153 m_tasks(),
154 m_task_threads(),
155 m_timer_tid(0),
156 m_cs(),
157 m_csu_nesting(0),
158 m_started(false),
159 m_stop_signal(false)
160 {}
161
162 void Initialize(IPlatform::IEventHandler *handler, IKernelService *service, Stack *exit_trap, int32_t resolution_us)
163 {
164 PlatformContext::Initialize(handler, service, exit_trap, resolution_us);
165
166 m_overrider = nullptr;
167 m_sleep_trap = nullptr; // set by Context::InitStack
168 m_exit_trap = nullptr; // set by Context::InitStack
169 m_winmm_dll = nullptr;
170 m_timer_thread = nullptr;
171 m_started = false;
172 m_stop_signal = false;
173 m_csu_nesting = 0;
174 m_timer_tid = 0;
175 #if STK_TICKLESS_IDLE
176 m_sleep_ticks = 0;
177 #endif
178
179 if ((m_tls = TlsAlloc()) == TLS_OUT_OF_INDEXES)
180 {
181 assert(false);
182 return;
183 }
184
185 STK_X86_WIN32_CRITICAL_SECTION_INIT(&m_cs);
186
187 LoadWindowsAPI();
188 }
189
193 ~Context()
194 {
195 if (m_tls != TLS_OUT_OF_INDEXES)
196 TlsFree(m_tls);
197
198 UnloadWindowsAPI();
199 }
200
201 void LoadWindowsAPI()
202 {
203 HMODULE winmm = GetModuleHandleA("Winmm");
204 if (winmm == nullptr)
205 m_winmm_dll = winmm = LoadLibraryA("Winmm.dll");
206 assert(winmm != nullptr);
207
208 timeBeginPeriod = (timeBeginPeriodF)GetProcAddress(winmm, "timeBeginPeriod");
209 assert(timeBeginPeriod != nullptr);
210
211 timeBeginPeriod(1);
212 }
213
214 void UnloadWindowsAPI()
215 {
216 if (m_winmm_dll != nullptr)
217 {
218 FreeLibrary(m_winmm_dll);
219 m_winmm_dll = nullptr;
220 }
221 }
222
223 struct TaskContext
224 {
225 TaskContext() : m_task(nullptr), m_stack(nullptr), m_thread(nullptr), m_thread_id(0)
226 {}
227
228 void Initialize(ITask *task, Stack *stack)
229 {
230 m_task = task;
231 m_stack = stack;
232 m_thread = nullptr;
233 m_thread_id = 0;
234
235 InitThread();
236 }
237
238 void InitThread()
239 {
240 // simulate stack size limitation
241 size_t stack_size = m_task->GetStackSize() * sizeof(Word);
242
243 m_thread = CreateThread(nullptr, stack_size, &OnTaskRun, this, CREATE_SUSPENDED, &m_thread_id);
244 }
245
246 static DWORD WINAPI OnTaskRun(LPVOID param)
247 {
248 ((TaskContext *)param)->m_task->Run();
249 return 0;
250 }
251
252 ITask *m_task;
253 Stack *m_stack;
254 HANDLE m_thread;
255 DWORD m_thread_id;
256 };
257
258 bool InitStack(EStackType stack_type, Stack *stack, IStackMemory *stack_memory, ITask *user_task);
259 void ConfigureTime();
260 void StartActiveTask();
261 void CreateTimerThreadAndJoin();
262 void Cleanup();
263 void ProcessTick();
264 void SwitchContext();
265 void SwitchToNext();
266 void Sleep(Timeout ticks);
267 void SleepUntil(Ticks timestamp);
268 IWaitObject *Wait(ISyncObject *sync_obj, IMutex *mutex, Timeout timeout);
269 void Stop();
270 Word GetCallerSP() const;
271 TId GetTid() const;
272
274 {
275 return hw::PtrToWord(TlsGetValue(m_tls));
276 }
277
278 __stk_forceinline void SetTls(Word tp)
279 {
280 TlsSetValue(m_tls, hw::WordToPtr<void>(tp));
281 }
282
283 __stk_forceinline void EnterCriticalSection()
284 {
285 STK_X86_WIN32_CRITICAL_SECTION_START(&m_cs);
286
287 if (m_csu_nesting == 0)
288 {
289 // avoid suspending self
290 if (GetCurrentThreadId() != m_timer_tid)
291 SuspendThread(m_timer_thread);
292 }
293
294 // increase nesting count within a limit
295 if (++m_csu_nesting > STK_CRITICAL_SECTION_NESTINGS_MAX)
296 {
297 // invariant violated: exceeded max allowed number of recursions
298 STK_KERNEL_PANIC(KERNEL_PANIC_CS_NESTING_OVERFLOW);
299 }
300 }
301
302 __stk_forceinline void ExitCriticalSection()
303 {
304 STK_ASSERT(m_csu_nesting != 0);
305
306 --m_csu_nesting;
307
308 if (m_csu_nesting == 0)
309 {
310 // suspending self is not supported
311 if (GetCurrentThreadId() != m_timer_tid)
312 ResumeThread(m_timer_thread);
313 }
314
315 STK_X86_WIN32_CRITICAL_SECTION_END(&m_cs);
316 }
317
318 IPlatform::IEventOverrider *m_overrider;
319 Stack *m_sleep_trap;
320 Stack *m_exit_trap;
321 HMODULE m_winmm_dll;
322 HANDLE m_timer_thread;
323 DWORD m_tls;
324 std::list<TaskContext *> m_tasks;
325 std::vector<HANDLE> m_task_threads;
326 DWORD m_timer_tid;
327#if STK_TICKLESS_IDLE
328 Timeout m_sleep_ticks;
329#endif
330 STK_X86_WIN32_CRITICAL_SECTION m_cs;
331 uint8_t m_csu_nesting;
332 bool m_started;
333 volatile bool m_stop_signal;
334}
335s_StkPlatformContext[1];
336
338static volatile EKernelPanicId g_LastPanicId = KERNEL_PANIC_NONE;
339
340__stk_attr_noinline // keep out of inlining to preserve stack frame
341__stk_attr_noreturn // never returns - a trap
343{
344 g_LastPanicId = id;
345
346 // spin forever: without a watchdog, a debugger can attach and inspect 'id'
347 for (;;)
348 {
349 __stk_relax_cpu();
350 }
351}
352
353static __stk_forceinline DWORD TicksToMs(uint32_t ticks)
354{
355 return (ticks * GetContext().m_tick_resolution) / 1000U;
356}
357
358static DWORD WINAPI TimerThread(LPVOID param)
359{
360 (void)param;
361
362 DWORD wait_ms = TicksToMs(1U);
363 GetContext().m_timer_tid = GetCurrentThreadId();
364
365 while (WaitForSingleObject(GetContext().m_timer_thread, wait_ms) == WAIT_TIMEOUT)
366 {
367 if (GetContext().m_stop_signal)
368 break;
369
370 GetContext().ProcessTick();
371
372 #if STK_TICKLESS_IDLE
373 wait_ms = TicksToMs(GetContext().m_sleep_ticks);
374 #endif
375 }
376
377 return 0;
378}
379
380void Context::ConfigureTime()
381{
382 // Windows timers are jittery, so make resolution more coarse
383 if (m_tick_resolution < STK_X86_WIN32_MIN_RESOLUTION)
384 m_tick_resolution = STK_X86_WIN32_MIN_RESOLUTION;
385
386 // increase precision of ticks to at least 1 ms (although Windows timers will still be quite coarse and have jitter of +1 ms)
387 timeBeginPeriod(1);
388}
389
390void Context::StartActiveTask()
391{
392 STK_ASSERT(m_stack_active != nullptr);
393 TaskContext *active_task = hw::WordToPtr<TaskContext>(m_stack_active->SP);
394 STK_ASSERT(active_task != nullptr);
395
396 ResumeThread(active_task->m_thread);
397}
398
399void Context::CreateTimerThreadAndJoin()
400{
401 m_started = true;
402
403#if STK_TICKLESS_IDLE
404 m_sleep_ticks = 1;
405#endif
406
407 m_handler->OnStart(m_stack_active);
408
409 StartActiveTask();
410
411 // create tick thread with highest priority
412 m_timer_thread = CreateThread(nullptr, 0, &TimerThread, nullptr, 0, nullptr);
413 STK_ASSERT(m_timer_thread != nullptr);
414 SetThreadPriority(m_timer_thread, THREAD_PRIORITY_TIME_CRITICAL);
415
416 while (!m_task_threads.empty())
417 {
418 DWORD result = WaitForMultipleObjects((DWORD)m_task_threads.size(), m_task_threads.data(), FALSE, INFINITE);
419 STK_ASSERT(result != WAIT_TIMEOUT);
420 STK_ASSERT(result != WAIT_ABANDONED);
421 STK_ASSERT(result != WAIT_FAILED);
422
423 Win32ScopedCriticalSection __cs(m_cs);
424
425 uint32_t i = 0;
426 for (std::vector<HANDLE>::iterator itr = m_task_threads.begin(); itr != m_task_threads.end(); ++itr)
427 {
428 if (result == (WAIT_OBJECT_0 + i))
429 {
430 TaskContext *exiting_task = nullptr;
431 for (std::list<TaskContext *>::iterator titr = m_tasks.begin(); titr != m_tasks.end(); ++titr)
432 {
433 if ((*titr)->m_thread == (*itr))
434 {
435 exiting_task = (*titr);
436 break;
437 }
438 }
439 STK_ASSERT(exiting_task != nullptr);
440
441 if (exiting_task != nullptr)
442 m_handler->OnTaskExit(exiting_task->m_stack);
443
444 m_task_threads.erase(itr);
445 break;
446 }
447
448 ++i;
449 }
450 }
451
452 // join (never returns to the caller from here unless thread is terminated, see KERNEL_DYNAMIC),
453 // a stop signal is sent by IPlatform::Stop() by the last exiting task
454 if (m_timer_thread != nullptr)
455 WaitForSingleObject(m_timer_thread, INFINITE);
456}
457
458void Context::Cleanup()
459{
460 // close thread handles of all tasks
461 for (std::list<TaskContext *>::iterator itr = m_tasks.begin(); itr != m_tasks.end(); ++itr)
462 {
463 if ((*itr)->m_thread != nullptr)
464 {
465 CloseHandle((*itr)->m_thread);
466 (*itr)->m_thread = nullptr;
467 }
468 }
469 m_tasks.clear();
470
471 // close timer thread
472 if (m_timer_thread != nullptr)
473 {
474 CloseHandle(m_timer_thread);
475 m_timer_thread = nullptr;
476 }
477
478 // reset stop signal
479 m_stop_signal = false;
480
481 // notify kernel about a full stop
482 m_handler->OnStop();
483}
484
485void Context::ProcessTick()
486{
487 Win32ScopedCriticalSection __cs(m_cs);
488
489#if STK_TICKLESS_IDLE
490 Timeout ticks = m_sleep_ticks;
491#endif
492
493 if (m_handler->OnTick(m_stack_idle, m_stack_active
495 , ticks
496 #endif
497 ))
498 {
499 GetContext().SwitchContext();
500 }
501
502#if STK_TICKLESS_IDLE
503 m_sleep_ticks = ticks;
504#endif
505}
506
507void Context::SwitchContext()
508{
509 // suspend Idle thread
510 if ((m_stack_idle != m_sleep_trap) && (m_stack_idle != m_exit_trap))
511 {
512 TaskContext *idle_task = hw::WordToPtr<TaskContext>(m_stack_idle->SP);
513 STK_ASSERT(idle_task != nullptr);
514
515 SuspendThread(idle_task->m_thread);
516 }
517
518 // resume Active thread
519 if (m_stack_active == m_sleep_trap)
520 {
521 if ((m_overrider == nullptr) || !m_overrider->OnSleep())
522 {
523 // pass
524 }
525 }
526 else
527 if (m_stack_active == GetContext().m_exit_trap)
528 {
529 // pass
530 }
531 else
532 {
533 TaskContext *active_task = hw::WordToPtr<TaskContext>(m_stack_active->SP);
534 STK_ASSERT(active_task != nullptr);
535
536 ResumeThread(active_task->m_thread);
537 }
538}
539
540Word Context::GetCallerSP() const
541{
542 Word caller_sp = 0;
543 DWORD calling_tid = GetCurrentThreadId();
544
545 Win32ScopedCriticalSection __cs(const_cast<STK_X86_WIN32_CRITICAL_SECTION &>(m_cs));
546
547 for (std::list<TaskContext *>::const_iterator itr = m_tasks.begin(), end = m_tasks.end(); itr != end; ++itr)
548 {
549 if ((*itr)->m_thread_id == calling_tid)
550 {
551 caller_sp = hw::PtrToWord(STK_X86_WIN32_GET_SP((*itr)->m_task->GetStack()));
552 break;
553 }
554 }
555
556 // expect to find the calling task inside m_tasks
557 STK_ASSERT(caller_sp != 0);
558
559 return caller_sp;
560}
561
562TId Context::GetTid() const
563{
564 return m_handler->OnGetTid(GetCallerSP());
565}
566
567void Context::SwitchToNext()
568{
569 m_handler->OnTaskSwitch(GetCallerSP());
570}
571
572void Context::Sleep(Timeout ticks)
573{
574 m_handler->OnTaskSleep(GetCallerSP(), ticks);
575}
576
577void Context::SleepUntil(Ticks timestamp)
578{
579 m_handler->OnTaskSleepUntil(GetCallerSP(), timestamp);
580}
581
582IWaitObject *Context::Wait(ISyncObject *sync_obj, IMutex *mutex, Timeout timeout)
583{
584 return m_handler->OnTaskWait(GetCallerSP(), sync_obj, mutex, timeout);
585}
586
587void Context::Stop()
588{
589 m_stop_signal = true;
590 m_started = false;
591}
592
593bool Context::InitStack(EStackType stack_type, Stack *stack, IStackMemory *stack_memory, ITask *user_task)
594{
595 InitStackMemory(stack_memory);
596
597 TaskContext *ctx = reinterpret_cast<TaskContext *>(STK_X86_WIN32_GET_SP(stack_memory->GetStack()));
598
599 switch (stack_type)
600 {
601 case STACK_USER_TASK: {
602 ctx->Initialize(user_task, stack);
603
604 m_tasks.push_back(ctx);
605 m_task_threads.push_back(ctx->m_thread);
606 break; }
607
608 case STACK_SLEEP_TRAP: {
609 GetContext().m_sleep_trap = stack;
610 break; }
611
612 case STACK_EXIT_TRAP: {
613 GetContext().m_exit_trap = stack;
614 break; }
615 }
616
617 stack->SP = hw::PtrToWord(ctx);
618
619 return true;
620}
621
622void PlatformX86Win32::Initialize(IEventHandler *event_handler, IKernelService *service, uint32_t resolution_us,
623 Stack *exit_trap)
624{
625 GetContext().Initialize(event_handler, service, exit_trap, resolution_us);
626}
627
629{
630 GetContext().ConfigureTime();
631 GetContext().CreateTimerThreadAndJoin();
632 GetContext().Cleanup();
633}
634
636{
637 GetContext().Stop();
638}
639
641{
642 STK_ASSERT(false); // unsupported
643 return 0;
644}
645
646void PlatformX86Win32::Resume(Timeout elapsed_ticks)
647{
648 STK_ASSERT(false); // unsupported
649}
650
651bool PlatformX86Win32::InitStack(EStackType stack_type, Stack *stack, IStackMemory *stack_memory, ITask *user_task)
652{
653 return GetContext().InitStack(stack_type, stack, stack_memory, user_task);
654}
655
657{
658 return GetContext().m_tick_resolution;
659}
660
662{
663 return HiResClockQPC::GetInstance()->GetCycles();
664}
665
667{
668 return HiResClockQPC::GetInstance()->GetFrequency();
669}
670
672{
673 GetContext().SwitchToNext();
674}
675
677{
678 GetContext().Sleep(ticks);
679}
680
682{
683 GetContext().SleepUntil(timestamp);
684}
685
687{
688 return GetContext().Wait(sync_obj, mutex, timeout);
689}
690
692{
693 GetContext().ProcessTick();
694}
695
697{
698 if ((GetContext().m_overrider == nullptr) || !GetContext().m_overrider->OnHardFault())
699 {
701 }
702}
703
704void PlatformX86Win32::SetEventOverrider(IEventOverrider *overrider)
705{
706 STK_ASSERT(!GetContext().m_started);
707 GetContext().m_overrider = overrider;
708}
709
711{
712 return GetContext().GetCallerSP();
713}
714
716{
717 return GetContext().GetTid();
718}
719
721{
722 return GetContext().GetTls();
723}
724
725void stk::hw::SetTls(Word tp)
726{
727 return GetContext().SetTls(tp);
728}
729
731{
732 return GetContext().m_service;
733}
734
736{
737 GetContext().EnterCriticalSection();
738}
739
741{
742 GetContext().ExitCriticalSection();
743}
744
746{
747 HW_SpinLockLock(m_lock);
748}
749
751{
752 HW_SpinLockUnlock(m_lock);
753}
754
756{
757 return HW_SpinLockTryLock(m_lock);
758}
759
761{
762 return false;
763}
764
766{
767 return HiResClockQPC::GetInstance()->GetCycles();
768}
769
771{
772 return HiResClockQPC::GetInstance()->GetFrequency();
773}
774
775#endif // _STK_ARCH_X86_WIN32
Contains common inventory for platform implementation.
#define GetContext()
Get platform's context.
Hardware Abstraction Layer (HAL) declarations for the stk::hw namespace.
void STK_PANIC_HANDLER_DEFAULT(stk::EKernelPanicId id)
Default panic handler: disable interrupts, record the id, and spin in a tight loop — a defined,...
#define STK_KERNEL_PANIC(id)
Called when the kernel detects an unrecoverable internal fault.
Definition stk_arch.h:63
#define __stk_forceinline
Forces compiler to always inline the decorated function, regardless of optimisation level.
Definition stk_defs.h:104
#define STK_TICKLESS_IDLE
Enables tickless (dynamic-tick) low-power operation during idle periods.
Definition stk_defs.h:36
#define STK_ASSERT(e)
Runtime assertion. Halts execution if the expression e evaluates to false.
Definition stk_defs.h:330
#define __stk_attr_noinline
Prevents compiler from inlining the decorated function (function prefix).
Definition stk_defs.h:185
#define STK_CRITICAL_SECTION_NESTINGS_MAX
Maximum allowable recursion depth for critical section entry (default: 16).
Definition stk_defs.h:404
#define __stk_attr_noreturn
Declares that function never returns to its caller (function prefix).
Definition stk_defs.h:146
Namespace of STK package.
uintptr_t Word
Native processor word type.
Definition stk_common.h:113
void Sleep(Timeout ticks)
Put calling process into a sleep state.
Definition stk_helper.h:326
int64_t Ticks
Ticks value.
Definition stk_common.h:128
int32_t Timeout
Timeout time (ticks).
Definition stk_common.h:123
EStackType
Stack type.
Definition stk_common.h:70
@ STACK_SLEEP_TRAP
Stack of the Sleep trap.
Definition stk_common.h:72
@ STACK_USER_TASK
Stack of the user task.
Definition stk_common.h:71
@ STACK_EXIT_TRAP
Stack of the Exit trap.
Definition stk_common.h:73
void SetTls(Word tp)
Set thread-local storage (TLS).
Word GetTls()
Get thread-local storage (TLS).
void SleepUntil(Ticks timestamp)
Put calling process into a sleep state until the specified timestamp.
Definition stk_helper.h:350
TId GetTid()
Get task/thread Id of the calling task.
Definition stk_helper.h:227
uint64_t Cycles
Cycles value.
Definition stk_common.h:133
Word TId
Definition stk_common.h:118
EKernelPanicId
Identifies the source of a kernel panic.
Definition stk_common.h:52
@ KERNEL_PANIC_HRT_HARD_FAULT
Kernel running in KERNEL_HRT mode reported deadline failure of the task.
Definition stk_common.h:57
@ KERNEL_PANIC_NONE
Panic is absent (no fault).
Definition stk_common.h:53
@ KERNEL_PANIC_SPINLOCK_DEADLOCK
Spin-lock timeout expired: lock owner never released.
Definition stk_common.h:54
__stk_forceinline T * WordToPtr(Word value) noexcept
Cast a CPU register-width integer back to a pointer.
Definition stk_arch.h:111
__stk_forceinline Word PtrToWord(T *ptr) noexcept
Cast a pointer to a CPU register-width integer.
Definition stk_arch.h:94
void SetTls(Word tp)
Write raw thread-pointer (TP) register used as per-task TLS storage.
Word GetTls()
Read raw thread-pointer (TP) register used as per-task TLS storage.
bool IsInsideISR()
Check whether the CPU is currently executing inside a hardware interrupt service routine (ISR).
Base platform context for all platform implementations.
bool InitStack(EStackType stack_type, Stack *stack, IStackMemory *stack_memory, ITask *user_task)
Initialize stack memory of the user task.
TId GetTid() const
Get thread Id.
uint32_t GetTickResolution() const
Get resolution of the system tick timer in microseconds. Resolution means a number of microseconds be...
void SetEventOverrider(IEventOverrider *overrider)
Set platform event overrider.
void Start()
Start scheduling.
void Stop()
Stop scheduling.
void SwitchToNext()
Switch to a next task.
uint32_t GetSysTimerFrequency() const
Get system timer frequency.
Cycles GetSysTimerCount() const
Get system timer count value.
Timeout Suspend()
Suspend scheduling.
void ProcessHardFault()
Cause a hard fault of the system.
void ProcessTick()
Process one tick.
void Resume(Timeout elapsed_ticks)
Resume scheduling after a prior Suspend() call.
IWaitObject * Wait(ISyncObject *sync_obj, IMutex *mutex, Timeout timeout)
void Sleep(Timeout ticks)
Put calling process into a sleep state.
void SleepUntil(Ticks timestamp)
Put calling process into a sleep state until the specified timestamp.
void Initialize(IEventHandler *event_handler, IKernelService *service, uint32_t resolution_us, Stack *exit_trap)
Initialize scheduler's context.
Word GetCallerSP() const
Get caller's Stack Pointer (SP).
static void Enter()
Enter a critical section.
static void Exit()
Exit a critical section.
bool TryLock()
Attempt to acquire SpinLock in a single non-blocking attempt.
void Lock()
Acquire SpinLock, blocking until it is available.
void Unlock()
Release SpinLock, allowing another thread or core to acquire it.
static uint32_t GetFrequency()
Get clock frequency.
static Cycles GetCycles()
Get number of clock cycles elapsed.
Stack descriptor.
Definition stk_common.h:219
Word SP
Stack Pointer (SP) register (note: must be the first entry in this struct).
Definition stk_common.h:220
Interface for a stack memory region.
Definition stk_common.h:231
virtual Word * GetStack() const =0
Get pointer to the stack memory.
Wait object.
Definition stk_common.h:270
Synchronization object.
Definition stk_common.h:355
Interface for mutex synchronization primitive.
Definition stk_common.h:439
Interface for a user task.
Definition stk_common.h:491
Interface for the kernel services exposed to the user processes during run-time when Kernel started s...
static IKernelService * GetInstance()
Get CPU-local instance of the kernel service.
RISC-V specific event handler.