SuperTinyKernel™ RTOS 1.06.0
Lightweight, high-performance, deterministic, bare-metal C++ RTOS for resource-constrained embedded systems. MIT Open Source License.
Loading...
Searching...
No Matches
stk_arch_risc-v.cpp
Go to the documentation of this file.
1/*
2 * SuperTinyKernel(TM) RTOS: Lightweight High-Performance Deterministic C++ RTOS for Embedded Systems.
3 *
4 * Source: https://github.com/SuperTinyKernel-RTOS
5 *
6 * Copyright (c) 2022-2026 Neutron Code Limited <stk@neutroncode.com>. All Rights Reserved.
7 * License: MIT License, see LICENSE for a full text.
8 */
9
10// note: If missing, this header must be customized (get it in the root of the source folder) and
11// copied to the /include folder manually.
12#include "stk_config.h"
13
14#ifdef _STK_ARCH_RISC_V
15
16#include "stk_arch.h"
18#include "stk_helper.h"
19
20using namespace stk;
21
60//#define _STK_RISCV_USE_PENDSV
61
62// CLINT
63// Details: https://github.com/riscv/riscv-aclint/blob/main/riscv-aclint.adoc
64#ifndef STK_RISCV_CLINT_BASE_ADDR
65 #define STK_RISCV_CLINT_BASE_ADDR (0x2000000U)
66#endif
67#ifndef STK_RISCV_CLINT_MTIMECMP_ADDR
68 #define STK_RISCV_CLINT_MTIMECMP_ADDR (STK_RISCV_CLINT_BASE_ADDR + 0x4000U) // 8-byte value, 1 per hart
69#endif
70#ifndef STK_RISCV_CLINT_MTIME_ADDR
71 #define STK_RISCV_CLINT_MTIME_ADDR (STK_RISCV_CLINT_BASE_ADDR + 0xBFF8U) // 8-byte value, global
72#endif
73
78#define STK_RISCV_ISR_STACK_SIZE 256U
79
83#ifndef STK_TIMER_CLOCK_FREQUENCY
84 #define STK_TIMER_CLOCK_FREQUENCY 1000000U
85#endif
86
91#ifndef STK_RISCV_ISR_SECTION
92 #define STK_RISCV_ISR_SECTION
93#endif
94
99#define STK_RISCV_ISR extern "C" STK_RISCV_ISR_SECTION __attribute__ ((interrupt("machine")))
100
102#define STK_ASM_EXIT_FROM_HANDLER "mret"
103
109#ifndef STK_RISCV_CLINT_MTIMECMP_PER_HART
110 #define STK_RISCV_CLINT_MTIMECMP_PER_HART (1)
111#endif
112
117#ifndef STK_ARCH_GET_CPU_ID
118 #define STK_ARCH_GET_CPU_ID() read_csr(mhartid)
119#endif
120
122#if (__riscv_flen == 0)
123 #define STK_RISCV_FPU 0
124#else
125 #define STK_RISCV_FPU __riscv_flen
126#endif
127
128#define STR(x) #x
129#define XSTR(s) STR(s)
130
132#if (__riscv_xlen == 32)
133 #define REGBYTES XSTR(4)
134 #define LREG XSTR(lw)
135 #define SREG XSTR(sw)
136#elif (__riscv_xlen == 64)
137 #define REGBYTES XSTR(8)
138 #define LREG XSTR(ld)
139 #define SREG XSTR(sd)
140#else
141 #error Unsupported RISC-V platform!
142#endif
143
144#if (STK_RISCV_FPU == 32)
145 #define FREGBYTES XSTR(4)
146 #define FLREG XSTR(flw)
147 #define FSREG XSTR(fsw)
148#elif (STK_RISCV_FPU == 64)
149 #define FREGBYTES XSTR(8)
150 #define FLREG XSTR(fld)
151 #define FSREG XSTR(fsd)
152#elif (STK_RISCV_FPU != 0)
153#error Unsupported FP register count!
154#endif
155
156
157#if (__riscv_32e == 1)
158 #define STK_RISCV_REGISTER_COUNT (15 + (STK_RISCV_FPU != 0 ? 31 : 0))
159#else
160 #define STK_RISCV_REGISTER_COUNT (31 + (STK_RISCV_FPU != 0 ? 31 : 0))
161#endif
162
163#define STK_SERVICE_SLOTS 2 // (0) mepc, (1) mstatus
164
165#if (__riscv_32e == 1)
166 #define FOFFSET XSTR(68) // FP stack offset = (17 * 4)
167 #if (STK_RISCV_FPU == 0)
168 #define REGSIZE XSTR(((15 + STK_SERVICE_SLOTS) * 4)) // STK_RISCV_REGISTER_COUNT + 2 for mepc, mstatus
169 #else
170 #if (STK_RISCV_FPU == 32)
171 #define REGSIZE XSTR((((15 + STK_SERVICE_SLOTS) * 4) + (31 * 4))) // STK_RISCV_REGISTER_COUNT + 2 for mepc, mstatus + 32 fp registers
172 #elif (STK_RISCV_FPU == 64)
173 #define REGSIZE XSTR((((15 + STK_SERVICE_SLOTS) * 4) + (31 * 8))) // STK_RISCV_REGISTER_COUNT + 2 for mepc, mstatus + 32 fp registers
174 #endif
175 #endif
176#elif (__riscv_xlen == 32)
177 #define FOFFSET XSTR(132) // FP stack offset = (33 * 4)
178 #if (STK_RISCV_FPU == 0)
179 #define REGSIZE XSTR(((31 + STK_SERVICE_SLOTS) * 4)) // STK_RISCV_REGISTER_COUNT + 2 for mepc, mstatus
180 #else
181 #if (STK_RISCV_FPU == 32)
182 #define REGSIZE XSTR((((31 + STK_SERVICE_SLOTS) * 4) + (31 * 4))) // STK_RISCV_REGISTER_COUNT + 2 for mepc, mstatus + 32 fp registers
183 #elif (STK_RISCV_FPU == 64)
184 #define REGSIZE XSTR((((31 + STK_SERVICE_SLOTS) * 4) + (31 * 8))) // STK_RISCV_REGISTER_COUNT + 2 for mepc, mstatus + 32 fp registers
185 #endif
186 #endif
187#elif (__riscv_xlen == 64)
188 #define FOFFSET XSTR(264) // FP stack offset = (33 * 8)
189 #if (STK_RISCV_FPU == 0)
190 #define REGSIZE XSTR(((31 + STK_SERVICE_SLOTS) * 8)) // STK_RISCV_REGISTER_COUNT + 2 for mepc, mstatus
191 #else
192 #if (STK_RISCV_FPU == 32)
193 #define REGSIZE XSTR((((31 + STK_SERVICE_SLOTS) * 8) + (31 * 4))) // STK_RISCV_REGISTER_COUNT + 2 for mepc, mstatus + 32 fp registers
194 #elif (STK_RISCV_FPU == 64)
195 #define REGSIZE XSTR((((31 + STK_SERVICE_SLOTS) * 8) + (31 * 8))) // STK_RISCV_REGISTER_COUNT + 2 for mepc, mstatus + 32 fp registers
196 #endif
197 #endif
198#endif
199
200#if (__riscv_xlen == 32)
201 #define REGBYTES_LOG2 "2" // log2(4) - used for hart-index shift
202#elif (__riscv_xlen == 64)
203 #define REGBYTES_LOG2 "3" // log2(8)
204#endif
205
212#ifndef STK_SYSTICK_HANDLER
213 #define STK_SYSTICK_HANDLER riscv_mtvec_mti
214#endif
215
222#ifndef STK_SVC_HANDLER
223 #define STK_SVC_HANDLER riscv_mtvec_exception
224#endif
225
233#ifndef STK_MSI_HANDLER
234 #define STK_MSI_HANDLER riscv_mtvec_msi
235#endif
236
255struct TaskFrame
256{
257 // Service slots (indices 0, 1) - sit at sp+0 and sp+REGBYTES
258 Word MEPC;
259 Word MSTATUS;
260
261 // General-purpose register slots (indices 2..N), one per xN
262 Word X1_RA;
263 Word X2_SP;
264#if (STK_RISCV_FPU != 0)
265 Word X3_FSR;
266#else
267 Word X3_GP;
268#endif
269 Word X4;
270 Word X5;
271 Word X6;
272 Word X7;
273 Word X8;
274 Word X9;
275 Word X10_A0;
276 Word X11;
277 Word X12;
278 Word X13;
279 Word X14;
280 Word X15;
281#if (__riscv_32e != 1)
282 Word X16;
283 Word X17;
284 Word X18;
285 Word X19;
286 Word X20;
287 Word X21;
288 Word X22;
289 Word X23;
290 Word X24;
291 Word X25;
292 Word X26;
293 Word X27;
294 Word X28;
295 Word X29;
296 Word X30;
297 Word X31;
298#endif
299#if (STK_RISCV_FPU != 0)
300 // FP register slots - at FOFFSET from frame base, immediately after integer slots.
301 // FREGBYTES may differ from REGBYTES (32-bit FP on a 64-bit integer machine).
302 // Declared as Word arrays for uniform struct sizing; the FP load/store
303 // instructions address them by byte offset and tolerate the type mismatch.
304 Word F[32];
305#endif
306};
307
310static __stk_forceinline void __DSB()
311{
312 __asm volatile("fence rw, rw" ::: "memory");
313}
314
317static __stk_forceinline void __ISB()
318{
319#ifdef __riscv_zifencei
320 __asm volatile("fence.i" ::: "memory");
321#else
322 __sync_synchronize();
323#endif
324}
325
328static __stk_forceinline void __WFI()
329{
330 __asm volatile("wfi");
331}
332
336static __stk_forceinline void HW_StartScheduler()
337{
338 __asm volatile("ecall");
339}
340
343static __stk_forceinline uint8_t HW_GetHartId()
344{
345 return STK_ARCH_GET_CPU_ID();
346}
347
350static __stk_forceinline void HW_DisableInterrupts()
351{
352 __asm volatile("csrrci zero, mstatus, %0"
353 : /* output: none */
354 : "i"(MSTATUS_MIE)
355 : /* clobbers: none */);
356}
357
360static __stk_forceinline void HW_EnableInterrupts()
361{
362 __asm volatile("csrrsi zero, mstatus, %0"
363 : /* output: none */
364 : "i"(MSTATUS_MIE)
365 : /* clobbers: none */);
366}
367
371static __stk_forceinline Word HW_EnterCriticalSection()
372{
373 Word ses;
374 __asm volatile("csrrci %0, mstatus, %1"
375 : "=r"(ses)
376 : "i"(MSTATUS_MIE)
377 : /* clobbers: none */);
378
379 return ses;
380}
381
385static __stk_forceinline void HW_ExitCriticalSection(Word ses)
386{
387 __asm volatile("csrrs zero, mstatus, %0"
388 : /* output: none */
389 : "r"(ses)
390 : /* clobbers: none */);
391}
392
395static __stk_forceinline void HW_StopMTimer()
396{
397 clear_csr(mie, MIP_MTIP);
398}
399
402static __stk_forceinline void HW_ClearPendingSwitch()
403{
404#ifdef _STK_RISCV_USE_PENDSV
405 clear_csr(mie, MIP_MSIP);
406#endif
407}
408
412static __stk_forceinline uint32_t HW_CoreClockFrequency()
413{
414 return SystemCoreClock; // CPU speed, e.g. 125/150 MHz
415}
416
421static __stk_forceinline uint32_t HW_MtimeClockFrequency()
422{
423 return STK_TIMER_CLOCK_FREQUENCY; // Timer frequency, e.g. 1 MHz
424}
425
431static __stk_forceinline uint64_t HW_GetMtime()
432{
433#if ( __riscv_xlen > 32)
434 return *((volatile uint64_t *)STK_RISCV_CLINT_MTIME_ADDR);
435#else
436 volatile uint32_t *mtime_hi = ((uint32_t *)STK_RISCV_CLINT_MTIME_ADDR) + 1;
437 volatile uint32_t *mtime_lo = ((uint32_t *)STK_RISCV_CLINT_MTIME_ADDR);
438
439 uint32_t hi, lo;
440 do
441 {
442 hi = (*mtime_hi);
443 lo = (*mtime_lo);
444 }
445 while (hi != (*mtime_hi)); // make sure mtime_hi did not tick when read mtime_lo
446
447 return ((uint64_t)hi << 32) | lo;
448#endif
449}
450
454static __stk_forceinline void HW_SetMtimecmp(uint64_t time_next)
455{
456#if STK_RISCV_CLINT_MTIMECMP_PER_HART
457 const uint8_t hart = HW_GetHartId();
458#else
459 const uint8_t hart = 0;
460#endif
461
462#if (__riscv_xlen == 64)
463 ((volatile uint64_t *)STK_RISCV_CLINT_MTIMECMP_ADDR)[hart] = next;
464#else
465 volatile uint32_t *mtime_lo = (uint32_t *)((uint64_t *)STK_RISCV_CLINT_MTIMECMP_ADDR + hart);
466 volatile uint32_t *mtime_hi = mtime_lo + 1;
467
468 // expecting 4-byte aligned memory
469 STK_ASSERT(((uintptr_t)mtime_lo & (4 - 1)) == 0);
470 STK_ASSERT(((uintptr_t)mtime_hi & (4 - 1)) == 0);
471
472 // prevent unexpected interrupt by setting some very large value to the high part
473 // details: https://riscv.org/wp-content/uploads/2017/05/riscv-privileged-v1.10.pdf, page 31
474 (*mtime_hi) = ~0;
475
476 (*mtime_lo) = (uint32_t)(time_next & 0xFFFFFFFF);
477 (*mtime_hi) = (uint32_t)(time_next >> 32);
478#endif
479}
480
486static __stk_forceinline uint64_t HW_GetMtimeElapsed(uint64_t since)
487{
488 return HW_GetMtime() - since;
489}
490
493static __stk_forceinline void HW_EnableCycleCounter()
494{
495 __asm volatile("csrci mcountinhibit, 0x1");
496}
497
501static __stk_forceinline Cycles HW_GetCycleCounter()
502{
503 uint32_t high, low, check;
504 do
505 {
506 __asm volatile("csrr %0, mcycleh" : "=r"(high));
507 __asm volatile("csrr %0, mcycle" : "=r"(low));
508 __asm volatile("csrr %0, mcycleh" : "=r"(check));
509 }
510 while (high != check);
511
512 return (static_cast<Cycles>(high) << 32) | low;
513}
514
517static __stk_forceinline Word HW_GetCallerSP()
518{
519 Word sp;
520 __asm volatile("mv %0, sp"
521 : "=r"(sp)
522 : /* input: none */
523 : /* clobbers: none */);
524
525 return sp;
526}
527
530static __stk_forceinline void HW_CriticalSectionStart(Word &ses)
531{
532 ses = HW_EnterCriticalSection();
533
534 // ensure the disable is recognized before subsequent code
535 __DSB();
536 __ISB();
537}
538
541static __stk_forceinline void HW_CriticalSectionEnd(Word ses)
542{
543 // ensure all memory work is finished before re-enabling
544 __DSB();
545
546 HW_ExitCriticalSection(ses);
547
548 // synchronization point: any pending interrupt can be serviced immediately at this boundary
549 __ISB();
550}
551
566static __stk_forceinline bool HW_SpinLockTryLock(volatile bool &lock)
567{
568 return !__atomic_test_and_set(&lock, __ATOMIC_ACQUIRE);
569}
570
587static __stk_forceinline void HW_SpinLockLock(volatile bool &lock)
588{
589 uint32_t timeout = 0xFFFFFF;
590 while (!HW_SpinLockTryLock(lock))
591 {
592 if (--timeout == 0)
593 {
594 // Invariant violated: the lock owner exited without releasing,
595 // Kernel state is suspect, enter defined safe state.
597 }
598 __stk_relax_cpu();
599 }
600}
601
617static __stk_forceinline void HW_SpinLockUnlock(volatile bool &lock)
618{
619 if (!lock)
620 STK_KERNEL_PANIC(KERNEL_PANIC_SPINLOCK_DEADLOCK); // release attempt of unowned lock
621
622 // ensure all data writes (like scheduling metadata) are flushed before the lock is released:
623 // __atomic_clear with __ATOMIC_RELEASE provides the required store-release barrier,
624 // the explicit fence rw,w is retained for toolchains that do not lower __ATOMIC_RELEASE
625 // to a full release fence on all RISC-V targets
626 __asm volatile("fence rw, w" ::: "memory");
627
628 __atomic_clear(&lock, __ATOMIC_RELEASE);
629}
630
633static __stk_forceinline void HW_ScheduleContextSwitch(uint8_t hart)
634{
635#ifdef _STK_RISCV_USE_PENDSV
636 // Pend Machine Software Interrupt (MSI) - equivalent of ARM's PENDSVSET
637 volatile uint32_t *msip = (volatile uint32_t *)(STK_RISCV_CLINT_BASE_ADDR);
638 msip[hart] = 1; // set pending
639 __DSB();
640#else
641 (void)hart;
642#endif
643}
644
646#ifndef _STK_SYSTEM_CORE_CLOCK_EXTERNAL
648#endif
649
652static volatile bool s_StkRiscvCsuLock = false;
653
661
665#ifdef _STK_RISCV_USE_PENDSV
666Stack * volatile s_StkRiscvStackIdle[STK_ARCH_CPU_COUNT] = {};
667
673volatile Word s_StkRiscvSpIsrInt[STK_ARCH_CPU_COUNT] = {};
674#endif
675
679Stack * volatile s_StkRiscvStackActive[STK_ARCH_CPU_COUNT] = {};
680
684Stack * volatile s_StkRiscvStackIsr[STK_ARCH_CPU_COUNT] = {};
685
687
707
717struct JmpFrame
718{
719 Word RA;
720 Word SP;
721 Word S0;
722 Word S1;
723 Word S2;
724 Word S3;
725 Word S4;
726 Word S5;
727 Word S6;
728 Word S7;
729 Word S8;
730 Word S9;
731 Word S10;
732 Word S11;
733#if (STK_RISCV_FPU != 0)
734 Word FCSR;
735#endif
736};
737
752__attribute__((naked))
753int32_t SaveJmp(JmpFrame &/*f*/)
754{
755 __asm volatile(
756 // a0 = &f - no prologue has touched sp or s0 yet
757 SREG " ra, 0*" REGBYTES "(a0) \n" // save return address
758 SREG " sp, 1*" REGBYTES "(a0) \n" // save caller's stack pointer
759 SREG " s0, 2*" REGBYTES "(a0) \n"
760 SREG " s1, 3*" REGBYTES "(a0) \n"
761 SREG " s2, 4*" REGBYTES "(a0) \n"
762 SREG " s3, 5*" REGBYTES "(a0) \n"
763 SREG " s4, 6*" REGBYTES "(a0) \n"
764 SREG " s5, 7*" REGBYTES "(a0) \n"
765 SREG " s6, 8*" REGBYTES "(a0) \n"
766 SREG " s7, 9*" REGBYTES "(a0) \n"
767 SREG " s8, 10*" REGBYTES "(a0) \n"
768 SREG " s9, 11*" REGBYTES "(a0) \n"
769 SREG " s10, 12*" REGBYTES "(a0) \n"
770 SREG " s11, 13*" REGBYTES "(a0) \n"
771#if (STK_RISCV_FPU != 0)
772 "frcsr t0 \n" // read fcsr (rounding mode + flags)
773 SREG " t0, 14*" REGBYTES "(a0) \n" // save to JmpFrame::FCSR
774#endif
775 "li a0, 0 \n" // return 0
776 "ret \n" // explicit return (naked)
777 );
778}
779
797__attribute__((naked, noreturn))
798void RestoreJmp(JmpFrame &/*f*/, int32_t /*val*/)
799{
800 __asm volatile(
801 // a0 = &f, a1 = val
802 LREG " ra, 0*" REGBYTES "(a0) \n"
803 LREG " sp, 1*" REGBYTES "(a0) \n"
804 LREG " s0, 2*" REGBYTES "(a0) \n"
805 LREG " s1, 3*" REGBYTES "(a0) \n"
806 LREG " s2, 4*" REGBYTES "(a0) \n"
807 LREG " s3, 5*" REGBYTES "(a0) \n"
808 LREG " s4, 6*" REGBYTES "(a0) \n"
809 LREG " s5, 7*" REGBYTES "(a0) \n"
810 LREG " s6, 8*" REGBYTES "(a0) \n"
811 LREG " s7, 9*" REGBYTES "(a0) \n"
812 LREG " s8, 10*" REGBYTES "(a0) \n"
813 LREG " s9, 11*" REGBYTES "(a0) \n"
814 LREG " s10, 12*" REGBYTES "(a0) \n"
815 LREG " s11, 13*" REGBYTES "(a0) \n"
816#if (STK_RISCV_FPU != 0)
817 LREG " t0, 14*" REGBYTES "(a0) \n" // load saved fcsr into t0
818 "fscsr t0 \n" // restore rounding mode + flags
819#endif
820 "mv a0, a1 \n" // return val to SaveJmp's caller
821 "ret \n" // jump to saved RA
822 );
823}
824
826
828#if STK_SUBMICORSECOND_PRECISION_TIMER
829class HiResClockCYCLE
830{
831public:
832 static HiResClockCYCLE *GetInstance()
833 {
834 // keep declaration function-local to allow compiler stripping it from the binary if
835 // it is unused by the user code
836 static HiResClockCYCLE clock;
837 return &clock;
838 }
839
840 Cycles GetCycles()
841 {
842 return HW_GetCycleCounter();
843 }
844
845 uint32_t GetFrequency()
846 {
847 return HW_CoreClockFrequency();
848 }
849};
850typedef HiResClockCYCLE HiResClockImpl;
851#else
852class HiResClockMTIME
853{
854public:
855 static HiResClockMTIME *GetInstance()
856 {
857 // keep declaration function-local to allow compiler stripping it from the binary if
858 // it is unused by the user code
859 static HiResClockMTIME clock;
860 return &clock;
861 }
862
863 Cycles GetCycles()
864 {
865 return HW_GetMtime();
866 }
867
868 uint32_t GetFrequency()
869 {
870 return HW_MtimeClockFrequency();
871 }
872};
873typedef HiResClockMTIME HiResClockImpl;
874#endif // !STK_SUBMICORSECOND_PRECISION_TIMER
875
877static struct Context : public PlatformContext
878{
879 Context() : PlatformContext(), m_stack_main(), m_stack_isr(), m_stack_isr_mem(),
880 m_exit_buf(), m_overrider(nullptr), m_specific(nullptr), m_tick_period(0), m_last_mtime(0ULL),
882 m_sleep_ticks(0),
883 #endif
884 m_csu(0), m_csu_nesting(0),
885 m_starting(false), m_started(false), m_exiting(false)
886
887 {}
888
892 ~Context()
893 {}
894
895 void Initialize(IPlatform::IEventHandler *handler, IKernelService *service, Stack *exit_trap, int32_t resolution_us)
896 {
897 PlatformContext::Initialize(handler, service, exit_trap, resolution_us);
898
899 // init ISR's stack
900 {
901 StackMemoryWrapper<STK_RISCV_ISR_STACK_SIZE> stack_isr_mem(&m_stack_isr_mem);
902 m_stack_isr.SP = hw::PtrToWord(InitStackMemory(&stack_isr_mem));
903 m_stack_isr.mode = ACCESS_PRIVILEGED;
904 }
905
906 // init Main stack
907 {
908 m_stack_main.SP = STK_STACK_MEMORY_FILLER;
909 m_stack_main.mode = ACCESS_PRIVILEGED;
910 }
911
912 m_csu = 0;
913 m_csu_nesting = 0;
914 m_overrider = NULL;
915 m_specific = NULL;
916 m_tick_period = ConvertTimeUsToClockCycles(STK_TIMER_CLOCK_FREQUENCY, resolution_us);
917 m_last_mtime = 0ULL;
918 m_starting = false;
919 m_started = false;
920 m_exiting = false;
921
922 // mcycle counter must be enabled per-core
923 #if STK_SUBMICORSECOND_PRECISION_TIMER
924 HW_EnableCycleCounter();
925 #endif
926 }
927
928 __stk_forceinline void OnTick()
929 {
930 // process tick - scheduler may update m_stack_active to point at a new task
931 Word cs;
932 HW_CriticalSectionStart(cs);
933
934 #if STK_TICKLESS_IDLE
935 Timeout ticks = m_sleep_ticks;
936 #endif
937
938 if (m_handler->OnTick(m_stack_idle, m_stack_active
940 , ticks
941 #endif
942 ))
943 {
944 // refresh ISR asm pointer cache so the naked ISR reads the correct
945 // (possibly new) active stack SP immediately when jal returns
946 // s_StkRiscvStackActive[hart] always points to Context::m_stack_active, the pointer
947 // itself is stable, but we reassign here so multi-core hart-indexed builds
948 // stay correct if the hart mapping ever changes in future,
949 // for single-core builds this is a simple store to a known address at index 0
950 const uint8_t hart = HW_GetHartId();
951 s_StkRiscvStackActive[hart] = m_stack_active;
952 #ifdef _STK_RISCV_USE_PENDSV
953 s_StkRiscvStackIdle[hart] = m_stack_idle;
954 #endif
955
956 HW_ScheduleContextSwitch(hart);
957 }
958
959 #if STK_TICKLESS_IDLE
960 m_sleep_ticks = ticks;
961 #endif
962
963 HW_CriticalSectionEnd(cs);
964 }
965
966 __stk_forceinline void EnterCriticalSection()
967 {
968 // disable local interrupts and save state
969 Word current_ses;
970 HW_CriticalSectionStart(current_ses);
971
972 if (m_csu_nesting == 0)
973 {
974 // ONLY attempt the global spinlock if we aren't already nested
975 HW_SpinLockLock(s_StkRiscvCsuLock);
976
977 // store the hardware interrupt state to restore later
978 m_csu = current_ses;
979 }
980
981 // increase nesting count within a limit
982 if (++m_csu_nesting > STK_CRITICAL_SECTION_NESTINGS_MAX)
983 {
984 // invariant violated: exceeded max allowed number of recursions
985 STK_KERNEL_PANIC(KERNEL_PANIC_CS_NESTING_OVERFLOW);
986 }
987 }
988
989 __stk_forceinline void ExitCriticalSection()
990 {
991 STK_ASSERT(m_csu_nesting != 0);
992 --m_csu_nesting;
993
994 if (m_csu_nesting == 0)
995 {
996 // capture the state before releasing lock
997 Word ses_to_restore = m_csu;
998
999 // release global lock
1000 HW_SpinLockUnlock(s_StkRiscvCsuLock);
1001
1002 // restore hardware interrupts
1003 HW_CriticalSectionEnd(ses_to_restore);
1004 }
1005 }
1006
1007 uint64_t GetSleepTicksPrev()
1008 {
1009 #if STK_TICKLESS_IDLE
1010 uint64_t ticks = (static_cast<uint64_t>(m_sleep_ticks) * static_cast<uint64_t>(m_tick_period));
1011 #else
1012 uint64_t ticks = (1U * static_cast<uint64_t>(m_tick_period));
1013 #endif
1014 return ticks;
1015 }
1016
1017 uint64_t GetTimeNow(uint64_t &error)
1018 {
1019 uint64_t mtime_now = HW_GetMtime();
1020 error = (mtime_now - m_last_mtime) - GetSleepTicksPrev();
1021 return mtime_now;
1022 }
1023
1024 void RearmTimer(const uint64_t mtime_now, const uint64_t error)
1025 {
1026 #if STK_TICKLESS_IDLE
1027 // guard against overflow (theoretical at normal tick periods and CPU frequencies)
1028 STK_ASSERT((static_cast<uint64_t>(m_sleep_ticks) * static_cast<uint64_t>(m_tick_period)) <= (UINT64_MAX - mtime_now));
1029 const uint64_t next_time = (static_cast<uint64_t>(m_sleep_ticks) * static_cast<uint64_t>(m_tick_period));
1030 #else
1031 const uint64_t next_time = (1U * static_cast<uint64_t>(m_tick_period));
1032 #endif
1033 HW_SetMtimecmp(mtime_now + next_time - error);
1034 m_last_mtime = mtime_now;
1035 }
1036
1037 __stk_forceinline void OnSwitchContext()
1038 {
1039 // capture mtime at ISR entry as the absolute base for the next period;
1040 // this eliminates drift from time spent inside OnTick regardless of how
1041 // long the scheduler takes to run
1042 uint64_t error = 0;
1043 uint64_t mtime_now = GetTimeNow(error);
1044 __stk_compiler_barrier(); // avoid compiler reordering, we count ticks from this point
1045
1046 // make sure timer is enabled by the Kernel::Start(), disable its start anywhere else
1047 STK_ASSERT(m_started);
1048 STK_ASSERT(m_handler != NULL);
1049
1050 // process tick - scheduler may update m_stack_active and m_sleep_ticks
1051 OnTick();
1052
1053 // rearm timer: use the ISR-entry mtime snapshot as the absolute base so
1054 // any CPU cycles consumed by OnTick do not accumulate as period drift
1055 RearmTimer(mtime_now, error);
1056 }
1057
1058 void StartTickTimer(Timeout elapsed_ticks)
1059 {
1060 #if STK_TICKLESS_IDLE
1061 // reset sleep ticks if kernel was restarted
1062 m_sleep_ticks = elapsed_ticks;
1063 #endif
1064
1065 // start timer with default periodicity
1066 m_last_mtime = HW_GetMtime();
1067 HW_SetMtimecmp(m_last_mtime + m_tick_period);
1068
1069 // enable timer interrupt
1070 set_csr(mie, MIP_MTIP);
1071 }
1072
1073 void Start();
1074 void OnStart();
1075 void OnStop();
1076#if STK_TICKLESS_IDLE
1077 Timeout Suspend();
1078 void Resume(Timeout elapsed_ticks);
1079#endif
1080
1081 typedef IPlatform::IEventOverrider eovrd_t;
1082 typedef PlatformRiscV::ISpecificEventHandler sehndl_t;
1083 typedef StackMemoryWrapper<STK_RISCV_ISR_STACK_SIZE>::MemoryType isrmem_t;
1084
1085 Stack m_stack_main;
1086 Stack m_stack_isr;
1087 isrmem_t m_stack_isr_mem;
1088 JmpFrame m_exit_buf;
1089 eovrd_t *m_overrider;
1090 sehndl_t *m_specific;
1091 uint32_t m_tick_period;
1092 uint64_t m_last_mtime;
1093#if STK_TICKLESS_IDLE
1094 Timeout m_sleep_ticks;
1095#endif
1096 Word m_csu;
1097 uint8_t m_csu_nesting;
1098 bool m_starting;
1099 bool m_started;
1100 volatile bool m_exiting;
1101}
1102s_StkPlatformContext[STK_ARCH_CPU_COUNT];
1103
1105{
1106#ifdef _STK_RISCV_USE_PENDSV
1107 Word cs;
1108 HW_CriticalSectionStart(cs);
1109
1110 GetContext().OnTick();
1111
1112 HW_CriticalSectionEnd(cs);
1113#else
1114 // unsupported scenario
1115 STK_ASSERT(false);
1116#endif
1117}
1118
1120static volatile EKernelPanicId g_LastPanicId = KERNEL_PANIC_NONE;
1121
1122__stk_attr_noinline // keep out of inlining to preserve stack frame
1123__stk_attr_noreturn // never returns - a trap
1125{
1126 g_LastPanicId = id;
1127
1128 // disable all maskable interrupts: this prevents scheduler from running again and corrupting state further
1129 HW_DisableInterrupts();
1130
1131 // spin forever: with a watchdog active this produces a clean reset, without a watchdog,
1132 // a debugger can attach and inspect 'id'
1133 for (;;)
1134 {
1135 __stk_relax_cpu();
1136 }
1137}
1138
1139#define STK_ASM_SAVE_CONTEXT_BASE\
1140 SREG " x1, 2*" REGBYTES "(sp) \n"\
1141 /*SREG " x2, 3*" REGBYTES "(sp) \n" // skip saving sp, Stack pointer */\
1142 /*SREG " x3, 4*" REGBYTES "(sp) \n" // skip saving gp, Global pointer (note: slot is used by fscsr) */\
1143 SREG " x4, 5*" REGBYTES "(sp) \n"\
1144 SREG " x5, 6*" REGBYTES "(sp) \n"\
1145 SREG " x6, 7*" REGBYTES "(sp) \n"\
1146 SREG " x7, 8*" REGBYTES "(sp) \n"\
1147 SREG " x8, 9*" REGBYTES "(sp) \n"\
1148 SREG " x9, 10*" REGBYTES "(sp) \n"\
1149 SREG " x10, 11*" REGBYTES "(sp) \n"\
1150 SREG " x11, 12*" REGBYTES "(sp) \n"\
1151 SREG " x12, 13*" REGBYTES "(sp) \n"\
1152 SREG " x13, 14*" REGBYTES "(sp) \n"\
1153 SREG " x14, 15*" REGBYTES "(sp) \n"\
1154 SREG " x15, 16*" REGBYTES "(sp) \n"
1155
1156#if (__riscv_32e != 1)
1157#define STK_ASM_SAVE_CONTEXT_RV32I_EXT\
1158 SREG " x16, 17*" REGBYTES "(sp) \n"\
1159 SREG " x17, 18*" REGBYTES "(sp) \n"\
1160 SREG " x18, 19*" REGBYTES "(sp) \n"\
1161 SREG " x19, 20*" REGBYTES "(sp) \n"\
1162 SREG " x20, 21*" REGBYTES "(sp) \n"\
1163 SREG " x21, 22*" REGBYTES "(sp) \n"\
1164 SREG " x22, 23*" REGBYTES "(sp) \n"\
1165 SREG " x23, 24*" REGBYTES "(sp) \n"\
1166 SREG " x24, 25*" REGBYTES "(sp) \n"\
1167 SREG " x25, 26*" REGBYTES "(sp) \n"\
1168 SREG " x26, 27*" REGBYTES "(sp) \n"\
1169 SREG " x27, 28*" REGBYTES "(sp) \n"\
1170 SREG " x28, 29*" REGBYTES "(sp) \n"\
1171 SREG " x29, 30*" REGBYTES "(sp) \n"\
1172 SREG " x30, 31*" REGBYTES "(sp) \n"\
1173 SREG " x31, 32*" REGBYTES "(sp) \n"
1174#else
1175#define STK_ASM_SAVE_CONTEXT_RV32I_EXT
1176#endif
1177
1178#if (STK_RISCV_FPU != 0)
1179#define STK_ASM_SAVE_CONTEXT_FP\
1180 FSREG " f0, " FOFFSET "+0*" FREGBYTES "(sp) \n"\
1181 FSREG " f1, " FOFFSET "+1*" FREGBYTES "(sp) \n"\
1182 FSREG " f2, " FOFFSET "+2*" FREGBYTES "(sp) \n"\
1183 FSREG " f3, " FOFFSET "+3*" FREGBYTES "(sp) \n"\
1184 FSREG " f4, " FOFFSET "+4*" FREGBYTES "(sp) \n"\
1185 FSREG " f5, " FOFFSET "+5*" FREGBYTES "(sp) \n"\
1186 FSREG " f6, " FOFFSET "+6*" FREGBYTES "(sp) \n"\
1187 FSREG " f7, " FOFFSET "+7*" FREGBYTES "(sp) \n"\
1188 FSREG " f8, " FOFFSET "+8*" FREGBYTES "(sp) \n"\
1189 FSREG " f9, " FOFFSET "+9*" FREGBYTES "(sp) \n"\
1190 FSREG " f10, " FOFFSET "+10*" FREGBYTES "(sp) \n"\
1191 FSREG " f11, " FOFFSET "+11*" FREGBYTES "(sp) \n"\
1192 FSREG " f12, " FOFFSET "+12*" FREGBYTES "(sp) \n"\
1193 FSREG " f13, " FOFFSET "+13*" FREGBYTES "(sp) \n"\
1194 FSREG " f14, " FOFFSET "+14*" FREGBYTES "(sp) \n"\
1195 FSREG " f15, " FOFFSET "+15*" FREGBYTES "(sp) \n"\
1196 FSREG " f16, " FOFFSET "+16*" FREGBYTES "(sp) \n"\
1197 FSREG " f17, " FOFFSET "+17*" FREGBYTES "(sp) \n"\
1198 FSREG " f18, " FOFFSET "+18*" FREGBYTES "(sp) \n"\
1199 FSREG " f19, " FOFFSET "+19*" FREGBYTES "(sp) \n"\
1200 FSREG " f20, " FOFFSET "+20*" FREGBYTES "(sp) \n"\
1201 FSREG " f21, " FOFFSET "+21*" FREGBYTES "(sp) \n"\
1202 FSREG " f22, " FOFFSET "+22*" FREGBYTES "(sp) \n"\
1203 FSREG " f23, " FOFFSET "+23*" FREGBYTES "(sp) \n"\
1204 FSREG " f24, " FOFFSET "+24*" FREGBYTES "(sp) \n"\
1205 FSREG " f25, " FOFFSET "+25*" FREGBYTES "(sp) \n"\
1206 FSREG " f26, " FOFFSET "+26*" FREGBYTES "(sp) \n"\
1207 FSREG " f27, " FOFFSET "+27*" FREGBYTES "(sp) \n"\
1208 FSREG " f28, " FOFFSET "+28*" FREGBYTES "(sp) \n"\
1209 FSREG " f29, " FOFFSET "+29*" FREGBYTES "(sp) \n"\
1210 FSREG " f30, " FOFFSET "+30*" FREGBYTES "(sp) \n"\
1211 FSREG " f31, " FOFFSET "+31*" FREGBYTES "(sp) \n"
1212#else
1213#define STK_ASM_SAVE_CONTEXT_FP
1214#endif
1215
1216#define STK_ASM_SAVE_CONTEXT_PC_STATUS\
1217 "csrr t0, mepc \n"\
1218 "csrr t1, mstatus \n"\
1219 SREG " t0, 0*" REGBYTES "(sp) \n"\
1220 SREG " t1, 1*" REGBYTES "(sp) \n"
1221
1222#if (STK_RISCV_FPU != 0)
1223#define STK_ASM_SAVE_CONTEXT_FRCSR\
1224 "frcsr t0 \n"\
1225 SREG " t0, 4*" REGBYTES "(sp) \n" /* use stack memory slot of gp (see comment for x3 above) */
1226#else
1227#define STK_ASM_SAVE_CONTEXT_FRCSR
1228#endif
1229
1230#define STK_ASM_SAVE_CONTEXT\
1231 "addi sp, sp, -" REGSIZE " \n" /* allocate stack memory for registers */\
1232 STK_ASM_SAVE_CONTEXT_BASE\
1233 STK_ASM_SAVE_CONTEXT_RV32I_EXT\
1234 STK_ASM_SAVE_CONTEXT_FP\
1235 STK_ASM_SAVE_CONTEXT_PC_STATUS\
1236 STK_ASM_SAVE_CONTEXT_FRCSR
1237
1238#define STK_ASM_LOAD_CONTEXT_BASE\
1239 LREG " x1, 2*" REGBYTES "(sp) \n"\
1240 /*LREG " x2, 3*" REGBYTES "(sp) \n" skip loading sp, Stack pointer */\
1241 /*LREG " x3, 4*" REGBYTES "(sp) \n" skip loading gp, Global pointer (note: slot is used by fscsr) */\
1242 LREG " x4, 5*" REGBYTES "(sp) \n"\
1243 LREG " x5, 6*" REGBYTES "(sp) \n"\
1244 LREG " x6, 7*" REGBYTES "(sp) \n"\
1245 LREG " x7, 8*" REGBYTES "(sp) \n"\
1246 LREG " x8, 9*" REGBYTES "(sp) \n"\
1247 LREG " x9, 10*" REGBYTES "(sp) \n"\
1248 LREG " x10, 11*" REGBYTES "(sp) \n"\
1249 LREG " x11, 12*" REGBYTES "(sp) \n"\
1250 LREG " x12, 13*" REGBYTES "(sp) \n"\
1251 LREG " x13, 14*" REGBYTES "(sp) \n"\
1252 LREG " x14, 15*" REGBYTES "(sp) \n"\
1253 LREG " x15, 16*" REGBYTES "(sp) \n"
1254
1255#if (__riscv_32e != 1)
1256#define STK_ASM_LOAD_CONTEXT_RV32I_EXT\
1257 LREG " x16, 17*" REGBYTES "(sp) \n"\
1258 LREG " x17, 18*" REGBYTES "(sp) \n"\
1259 LREG " x18, 19*" REGBYTES "(sp) \n"\
1260 LREG " x19, 20*" REGBYTES "(sp) \n"\
1261 LREG " x20, 21*" REGBYTES "(sp) \n"\
1262 LREG " x21, 22*" REGBYTES "(sp) \n"\
1263 LREG " x22, 23*" REGBYTES "(sp) \n"\
1264 LREG " x23, 24*" REGBYTES "(sp) \n"\
1265 LREG " x24, 25*" REGBYTES "(sp) \n"\
1266 LREG " x25, 26*" REGBYTES "(sp) \n"\
1267 LREG " x26, 27*" REGBYTES "(sp) \n"\
1268 LREG " x27, 28*" REGBYTES "(sp) \n"\
1269 LREG " x28, 29*" REGBYTES "(sp) \n"\
1270 LREG " x29, 30*" REGBYTES "(sp) \n"\
1271 LREG " x30, 31*" REGBYTES "(sp) \n"\
1272 LREG " x31, 32*" REGBYTES "(sp) \n"
1273#else
1274#define STK_ASM_LOAD_CONTEXT_RV32I_EXT
1275#endif
1276
1277#if (STK_RISCV_FPU != 0)
1278#define STK_ASM_LOAD_CONTEXT_FP\
1279 FLREG " f0, " FOFFSET "+0*" FREGBYTES "(sp) \n"\
1280 FLREG " f1, " FOFFSET "+1*" FREGBYTES "(sp) \n"\
1281 FLREG " f2, " FOFFSET "+2*" FREGBYTES "(sp) \n"\
1282 FLREG " f3, " FOFFSET "+3*" FREGBYTES "(sp) \n"\
1283 FLREG " f4, " FOFFSET "+4*" FREGBYTES "(sp) \n"\
1284 FLREG " f5, " FOFFSET "+5*" FREGBYTES "(sp) \n"\
1285 FLREG " f6, " FOFFSET "+6*" FREGBYTES "(sp) \n"\
1286 FLREG " f7, " FOFFSET "+7*" FREGBYTES "(sp) \n"\
1287 FLREG " f8, " FOFFSET "+8*" FREGBYTES "(sp) \n"\
1288 FLREG " f9, " FOFFSET "+9*" FREGBYTES "(sp) \n"\
1289 FLREG " f10, " FOFFSET "+10*" FREGBYTES "(sp) \n"\
1290 FLREG " f11, " FOFFSET "+11*" FREGBYTES "(sp) \n"\
1291 FLREG " f12, " FOFFSET "+12*" FREGBYTES "(sp) \n"\
1292 FLREG " f13, " FOFFSET "+13*" FREGBYTES "(sp) \n"\
1293 FLREG " f14, " FOFFSET "+14*" FREGBYTES "(sp) \n"\
1294 FLREG " f15, " FOFFSET "+15*" FREGBYTES "(sp) \n"\
1295 FLREG " f16, " FOFFSET "+16*" FREGBYTES "(sp) \n"\
1296 FLREG " f17, " FOFFSET "+17*" FREGBYTES "(sp) \n"\
1297 FLREG " f18, " FOFFSET "+18*" FREGBYTES "(sp) \n"\
1298 FLREG " f19, " FOFFSET "+19*" FREGBYTES "(sp) \n"\
1299 FLREG " f20, " FOFFSET "+20*" FREGBYTES "(sp) \n"\
1300 FLREG " f21, " FOFFSET "+21*" FREGBYTES "(sp) \n"\
1301 FLREG " f22, " FOFFSET "+22*" FREGBYTES "(sp) \n"\
1302 FLREG " f23, " FOFFSET "+23*" FREGBYTES "(sp) \n"\
1303 FLREG " f24, " FOFFSET "+24*" FREGBYTES "(sp) \n"\
1304 FLREG " f25, " FOFFSET "+25*" FREGBYTES "(sp) \n"\
1305 FLREG " f26, " FOFFSET "+26*" FREGBYTES "(sp) \n"\
1306 FLREG " f27, " FOFFSET "+27*" FREGBYTES "(sp) \n"\
1307 FLREG " f28, " FOFFSET "+28*" FREGBYTES "(sp) \n"\
1308 FLREG " f29, " FOFFSET "+29*" FREGBYTES "(sp) \n"\
1309 FLREG " f30, " FOFFSET "+30*" FREGBYTES "(sp) \n"\
1310 FLREG " f31, " FOFFSET "+31*" FREGBYTES "(sp) \n"
1311#else
1312#define STK_ASM_LOAD_CONTEXT_FP
1313#endif
1314
1315#define STK_ASM_LOAD_CONTEXT_PC_STATUS\
1316 LREG " t0, 0*" REGBYTES "(sp) \n"\
1317 LREG " t1, 1*" REGBYTES "(sp) \n"\
1318 "csrw mepc, t0 \n"\
1319 "csrw mstatus, t1 \n"
1320
1321#if (STK_RISCV_FPU != 0)
1322#define STK_ASM_LOAD_CONTEXT_FRCSR\
1323 LREG " t0, 4*" REGBYTES "(sp) \n" /* use stack memory slot of gp (see comment for x3 below) */\
1324 "fscsr t0 \n"
1325#else
1326#define STK_ASM_LOAD_CONTEXT_FRCSR
1327#endif
1328
1329#define STK_ASM_LOAD_CONTEXT\
1330 STK_ASM_LOAD_CONTEXT_PC_STATUS\
1331 STK_ASM_LOAD_CONTEXT_FRCSR\
1332 STK_ASM_LOAD_CONTEXT_BASE\
1333 STK_ASM_LOAD_CONTEXT_RV32I_EXT\
1334 STK_ASM_LOAD_CONTEXT_FP\
1335 "addi sp, sp, " REGSIZE " \n" /* shrink stack memory of registers */
1336
1337static __stk_forceinline void HW_LoadContextAndExit()
1338{
1339 __asm volatile(
1340 LREG " t0, %0 \n" // load the first member (SP) into t0
1341 LREG " sp, 0(t0) \n" // sp = t0
1342
1343 STK_ASM_LOAD_CONTEXT
1344 STK_ASM_EXIT_FROM_HANDLER " \n"
1345
1346 : /* output: none */
1347 : "m"(GetContext().m_stack_active)
1348 : "t0", "t1", "a2", "a3", "a4", "a5", "gp", "memory");
1349}
1350
1351static __stk_forceinline void HW_EnableFullFpuAccess()
1352{
1353#if (STK_RISCV_FPU != 0)
1354 __asm volatile(
1355 "csrs mstatus, %0"
1356 : /* output: none */
1357 : "r"(MSTATUS_FS | MSTATUS_XS)
1358 : "memory" /* ensure no FP instructions are moved before this call */);
1359#endif
1360}
1361
1362static __stk_forceinline void HW_ClearFpuState()
1363{
1364#if (STK_RISCV_FPU != 0)
1365 __asm volatile(
1366 "fssr x0"
1367 : /* output: none */
1368 : /* input: none */
1369 : "memory" /* ensure flags are cleared before next FP op */);
1370#endif
1371}
1372
1373static __stk_forceinline void HW_SaveMainSP()
1374{
1375 __asm volatile(
1376 SREG " sp, %0"
1377 : "=m"(GetContext().m_stack_main)
1378 : /* input: none */
1379 : "memory" /* protect against compiler reordering */ );
1380}
1381
1382static __stk_forceinline void HW_LoadMainSP()
1383{
1384 __asm volatile(
1385 LREG " sp, %0"
1386 : /* output: none */
1387 : "m"(GetContext().m_stack_main)
1388 : "memory" /* protect against compiler reordering */ );
1389}
1390
1400static __stk_forceinline Word HW_GetCurrentException()
1401{
1402 Word mcause;
1403 __asm volatile("csrr %0, mcause" : "=r"(mcause));
1404 return mcause;
1405}
1406
1407static __stk_forceinline bool HW_IsHandlerMode()
1408{
1409 Word current_sp = HW_GetCallerSP();
1410
1411 // get the bounds of the ISR stack from our Context
1412 // note: STK uses StackMemoryWrapper, so we check against that memory block
1413 const Word isr_stack_base = (Word)&GetContext().m_stack_isr_mem;
1414 const Word isr_stack_top = isr_stack_base + STK_RISCV_ISR_STACK_SIZE;
1415
1416 return ((current_sp >= isr_stack_base) && (current_sp < isr_stack_top));
1417}
1418
1419static __stk_forceinline void OnTaskStart()
1420{
1421 HW_LoadContextAndExit();
1422}
1423
1424// __stk_attr_used for LTO
1425extern "C" STK_RISCV_ISR_SECTION __stk_attr_used void TrySwitchContext()
1426{
1427 GetContext().OnSwitchContext();
1428}
1429
1430#ifdef _STK_RISCV_USE_PENDSV
1431extern "C" STK_RISCV_ISR_SECTION __stk_attr_naked void STK_SYSTICK_HANDLER()
1432{
1433 __asm volatile(
1434 // 1. save full interrupted context onto the task stack
1435 STK_ASM_SAVE_CONTEXT
1436
1437 // 2. store task SP into s_StkRiscvSpIsrInt[hart] directly (plain Word, no struct indirection)
1438#if (STK_ARCH_CPU_COUNT > 1)
1439 "csrr t0, mhartid \n"
1440 "la t1, s_StkRiscvSpIsrInt \n"
1441 "slli t0, t0, " REGBYTES_LOG2 " \n" // t0 = hart * sizeof(Word)
1442 "add t1, t1, t0 \n" // t1 = &s_StkRiscvSpIsrInt[hart]
1443 SREG " sp, 0(t1) \n" // store sp directly - no pointer dereference
1444#else
1445 "la t1, s_StkRiscvSpIsrInt \n"
1446 SREG " sp, 0(t1) \n" // store sp directly - no pointer dereference
1447#endif
1448
1449 // 3. switch to private ISR stack
1450#if (STK_ARCH_CPU_COUNT > 1)
1451 "csrr t0, mhartid \n"
1452 "la t1, s_StkRiscvStackIsr \n"
1453 "slli t0, t0, " REGBYTES_LOG2 " \n"
1454 "add t1, t1, t0 \n"
1455 LREG " t1, 0(t1) \n"
1456#else
1457 "la t1, s_StkRiscvStackIsr \n"
1458 LREG " t1, 0(t1) \n"
1459#endif
1460 LREG " sp, 0(t1) \n" // sp = Stack::SP of ISR stack
1461
1462 // 4. run scheduler
1463 "jal ra, TrySwitchContext \n"
1464
1465 // 5. restore the interrupted task's SP from s_StkRiscvSpIsrInt[hart]
1466#if (STK_ARCH_CPU_COUNT > 1)
1467 "csrr t0, mhartid \n"
1468 "la t1, s_StkRiscvSpIsrInt \n"
1469 "slli t0, t0, " REGBYTES_LOG2 " \n"
1470 "add t1, t1, t0 \n"
1471 LREG " sp, 0(t1) \n" // sp = saved task SP - direct load, no struct
1472#else
1473 "la t1, s_StkRiscvSpIsrInt \n"
1474 LREG " sp, 0(t1) \n" // sp = saved task SP - direct load, no struct
1475#endif
1476
1477 // 6. restore context
1478 STK_ASM_LOAD_CONTEXT
1479
1480 // 7. exit ISR handler
1481 STK_ASM_EXIT_FROM_HANDLER " \n"
1482
1483 : /* outputs: none - naked, compiler emits nothing outside this asm */
1484 : /* inputs: all addresses loaded as linker symbols via "la" */
1485 : /* clobbers: none - the asm string owns all registers */);
1486}
1487extern "C" STK_RISCV_ISR_SECTION __stk_attr_naked void STK_MSI_HANDLER()
1488{
1489 __asm volatile(
1490 // 1. save context
1491 STK_ASM_SAVE_CONTEXT
1492
1493 // 2. store task SP into s_StkRiscvStackIdle[hart]->SP
1494 // all integer registers are now saved. t0/t1 are free to use as scratch.
1495 // "la" loads the address of the global array - a linker-time constant,
1496 // no compiler-generated runtime code, safe to use here
1497#if (STK_ARCH_CPU_COUNT > 1)
1498 "csrr t0, mhartid \n"
1499 "la t1, s_StkRiscvStackIdle \n"
1500 "slli t0, t0, " REGBYTES_LOG2 " \n" // t0 = hart * sizeof(Stack*)
1501 "add t1, t1, t0 \n" // t1 = &s_StkRiscvStackIdle[hart]
1502 LREG " t1, 0(t1) \n" // t1 = s_StkRiscvStackIdle[hart] (Stack*)
1503#else
1504 "la t1, s_StkRiscvStackIdle \n"
1505 LREG " t1, 0(t1) \n" // t1 = s_StkRiscvStackIdle[0] (Stack*)
1506#endif
1507 SREG " sp, 0(t1) \n" // Stack::SP = task's sp (SP is first member)
1508
1509 // 3. clear exception: MSIP[hart] = 0
1510#if (STK_ARCH_CPU_COUNT > 1)
1511 "csrr t0, mhartid \n"
1512 "slli t0, t0, 2 \n" // t0 = hart * 4
1513 "li t1, %[clint_msip_base] \n"
1514 "add t0, t0, t1 \n" // t0 = &MSIP[hart]
1515#else
1516 "li t0, %[clint_msip_base] \n" // t0 = &MSIP[0]
1517#endif
1518 "sw zero, 0(t0) \n" // MSIP[hart] = 0
1519 "fence rw, rw \n" // fence rw,rw - ensure the write is visible before re-enable
1520
1521 // 4. load SP from s_StkRiscvStackActive[hart]->SP
1522#if (STK_ARCH_CPU_COUNT > 1)
1523 "csrr t0, mhartid \n"
1524 "la t1, s_StkRiscvStackActive \n"
1525 "slli t0, t0, " REGBYTES_LOG2 " \n"
1526 "add t1, t1, t0 \n"
1527 LREG " t1, 0(t1) \n"
1528#else
1529 "la t1, s_StkRiscvStackActive \n"
1530 LREG " t1, 0(t1) \n"
1531#endif
1532 LREG " sp, 0(t1) \n" // sp = active task's saved SP
1533
1534 // 5. load context of the active task
1535 STK_ASM_LOAD_CONTEXT
1536
1537 // 6. exit ISR handler
1538 STK_ASM_EXIT_FROM_HANDLER " \n"
1539
1540 : /* outputs: none - naked, compiler emits nothing outside this asm */
1541 : [clint_msip_base] "i" (STK_RISCV_CLINT_BASE_ADDR) /* other inputs: all addresses loaded as linker symbols via "la" */
1542 : /* clobbers: none - the asm string owns all registers */);
1543}
1544#else // !_STK_RISCV_USE_PENDSV
1545/* STK_SYSTICK_HANDLER
1546
1547RISC-V machine-timer ISR: Saves the interrupted task's full context, switches
1548to the private ISR stack, calls TrySwitchContext (which reschedules the timer
1549and runs the scheduler), then restores the (possibly new) task's context.
1550
1551DESIGN RULES - must be obeyed to work correctly at all optimisation levels:
1552
1553 1. Single asm volatile, no compiler operands.
1554 The function body is ONE __asm volatile("..." : : : ) with empty
1555 input/output/clobber lists. No "m" or "r" constraints are used because
1556 the compiler evaluates those as C expressions BEFORE emitting any asm
1557 text, i.e. before the register save - trashing uninitialized registers.
1558
1559 2. All addresses are linker symbols loaded via "la" inside the asm.
1560 s_StkRiscvStackActive and s_StkRiscvStackIsr are plain file-scope globals. "la reg, sym"
1561 emits a PC-relative load that is resolved at link time, it produces no
1562 compiler-generated code outside the asm string.
1563
1564 3. Stack pointer indexing uses sizeof(Stack*) == REGBYTES.
1565 For multi-hart builds the array index is hart * REGBYTES, which is a
1566 single left-shift by log2(REGBYTES): 2 for RV32 (4 bytes), 3 for RV64
1567 (8 bytes). REGBYTES_LOG2 is defined below accordingly.
1568
1569 4. s_StkRiscvStackActive[hart]->SP is updated by TrySwitchContext.
1570 The naked asm reads it fresh after the jal returns, so it always sees
1571 the task the scheduler has chosen - even if it changed.
1572
1573 Stack frame layout (offsets from sp after "addi sp,-REGSIZE"):
1574 [0*REGBYTES] mepc (service slot 0)
1575 [1*REGBYTES] mstatus (service slot 1)
1576 [2*REGBYTES] x1 / ra
1577 [3*REGBYTES] x2 / sp - SKIPPED, managed explicitly
1578 [4*REGBYTES] x3 / gp - SKIPPED, fixed register; slot reused for FCSR
1579 [5*REGBYTES] x4 / tp
1580 [6*REGBYTES] x5 / t0
1581 ...
1582 [32*REGBYTES] x31 / t6 (RV32I; absent on RV32E)
1583 [FOFFSET + n*FREGBYTES] fn (FP registers, if STK_RISCV_FPU != 0)
1584*/
1585extern "C" STK_RISCV_ISR_SECTION __stk_attr_naked void STK_SYSTICK_HANDLER()
1586{
1587 __asm volatile(
1588 // 1. save context
1589 STK_ASM_SAVE_CONTEXT
1590
1591 // 2. store task SP into s_StkRiscvStackActive[hart]->SP
1592 // all integer registers are now saved. t0/t1 are free to use as scratch.
1593 // "la" loads the address of the global array - a linker-time constant,
1594 // no compiler-generated runtime code, safe to use here
1595#if (STK_ARCH_CPU_COUNT > 1)
1596 "csrr t0, mhartid \n"
1597 "la t1, s_StkRiscvStackActive \n"
1598 "slli t0, t0, " REGBYTES_LOG2 " \n" // t0 = hart * sizeof(Stack*)
1599 "add t1, t1, t0 \n" // t1 = &s_StkRiscvStackActive[hart]
1600 LREG " t1, 0(t1) \n" // t1 = s_StkRiscvStackActive[hart] (Stack*)
1601#else
1602 "la t1, s_StkRiscvStackActive \n"
1603 LREG " t1, 0(t1) \n" // t1 = s_StkRiscvStackActive[0] (Stack*)
1604#endif
1605 SREG " sp, 0(t1) \n" // Stack::SP = task's sp (SP is first member)
1606
1607 // 3. switch to private ISR stack
1608#if (STK_ARCH_CPU_COUNT > 1)
1609 "csrr t0, mhartid \n"
1610 "la t1, s_StkRiscvStackIsr \n"
1611 "slli t0, t0, " REGBYTES_LOG2 " \n"
1612 "add t1, t1, t0 \n"
1613 LREG " t1, 0(t1) \n" // t1 = s_StkRiscvStackIsr[hart] (Stack*)
1614#else
1615 "la t1, s_StkRiscvStackIsr \n"
1616 LREG " t1, 0(t1) \n" // t1 = s_StkRiscvStackIsr[0] (Stack*)
1617#endif
1618 LREG " sp, 0(t1) \n" // sp = Stack::SP of ISR stack
1619
1620 // 4. call TrySwitchContext
1621 // runs on the ISR stack: reschedules timer, runs scheduler
1622 // (which may update m_stack_active to a new task), then updates
1623 // s_StkRiscvStackActive[hart] so step 5 below reads the correct new SP,
1624 // all caller-saved registers (a0-a7, t0-t6, ra) are trashed - expected
1625 "jal ra, TrySwitchContext \n"
1626
1627 // 5. reload SP from s_StkRiscvStackActive[hart]->SP
1628 // TrySwitchContext updated s_StkRiscvStackActive[hart] before returning,
1629 // we re-read it fresh to pick up any task switch the scheduler made
1630#if (STK_ARCH_CPU_COUNT > 1)
1631 "csrr t0, mhartid \n"
1632 "la t1, s_StkRiscvStackActive \n"
1633 "slli t0, t0, " REGBYTES_LOG2 " \n"
1634 "add t1, t1, t0 \n"
1635 LREG " t1, 0(t1) \n"
1636#else
1637 "la t1, s_StkRiscvStackActive \n"
1638 LREG " t1, 0(t1) \n"
1639#endif
1640 LREG " sp, 0(t1) \n" // sp = active task's saved SP
1641
1642 // 6. load context of the active task
1643 STK_ASM_LOAD_CONTEXT
1644
1645 // 7. exit ISR handler
1646 STK_ASM_EXIT_FROM_HANDLER " \n"
1647
1648 : /* outputs: none - naked, compiler emits nothing outside this asm */
1649 : /* inputs: none - all addresses loaded as linker symbols via "la" */
1650 : /* clobbers: none - the asm string owns all registers */
1651 );
1652}
1653#endif // !_STK_RISCV_USE_PENDSV
1654
1655void Context::OnStart()
1656{
1657 const uint8_t hart = HW_GetHartId();
1658
1659 // save SP of main stack to reuse it for scheduler exit
1660 HW_SaveMainSP();
1661
1662 // enable FPU (if available)
1663 HW_EnableFullFpuAccess();
1664
1665 // clear FPU usage status if FPU was used before kernel start
1666 HW_ClearFpuState();
1667
1668 // notify kernel
1669 m_handler->OnStart(m_stack_active);
1670
1671 // initialize ISR asm pointer cache
1672 s_StkRiscvStackIsr[hart] = &m_stack_isr; // set once here, the ISR stack never moves
1673 s_StkRiscvStackActive[hart] = m_stack_active;
1674#ifdef _STK_RISCV_USE_PENDSV
1675 s_StkRiscvStackIdle[hart] = m_stack_idle;
1676#endif
1677
1678 // start with initially 1 elapsed tick (after timer expires)
1679 StartTickTimer(1);
1680
1681 // change state before enabling interrupts
1682 m_started = true;
1683 m_starting = false;
1684
1685 // enable SV exception
1686#ifdef _STK_RISCV_USE_PENDSV
1687 set_csr(mie, MIP_MSIP);
1688#endif
1689}
1690
1691STK_RISCV_ISR void STK_SVC_HANDLER()
1692{
1693 Word cause = HW_GetCurrentException();
1694
1695 /*if (cause & (1UL << (__riscv_xlen - 1)))
1696 {
1697 cause &= ~(1UL << (__riscv_xlen - 1));
1698
1699 if (cause == IRQ_M_TIMER)
1700 {
1701
1702 }
1703 }*/
1704
1705 if (cause == IRQ_M_EXT)
1706 {
1707 // not starting scheduler, then try to forward ecall to user
1708 if (!GetContext().m_starting)
1709 {
1710 // forward event to user
1711 if (GetContext().m_specific != NULL)
1712 GetContext().m_specific->OnException(cause);
1713
1714 // switch to the next instruction of the caller space (PC) after the return
1715 write_csr(mepc, read_csr(mepc) + sizeof(Word));
1716 }
1717 else
1718 {
1719 // make sure interrupts do not interfere
1720 HW_DisableInterrupts();
1721
1722 // configure scheduling
1723 GetContext().OnStart();
1724
1725 // start first task
1726 OnTaskStart();
1727 }
1728 }
1729 else
1730 {
1731 if (GetContext().m_specific != NULL)
1732 {
1733 // forward event to user
1734 GetContext().m_specific->OnException(cause);
1735 }
1736 else
1737 {
1738 // trap further execution
1739 // note: normally, if trapped here with cause 2 or 4 then check stack memory size of the
1740 // tasks, scheduler and ISR, they were likely overwritten if your code is 100% correct
1742 }
1743 }
1744}
1745
1746static void OnTaskRun(ITask *task)
1747{
1748 task->Run();
1749}
1750
1751static void OnTaskExit()
1752{
1753 Word cs;
1754 HW_CriticalSectionStart(cs);
1755
1756 GetContext().m_handler->OnTaskExit(GetContext().m_stack_active);
1757
1758 HW_CriticalSectionEnd(cs);
1759
1760 for (;;)
1761 {
1762 __DSB(); // data barrier
1763 __WFI(); // enter standby mode until time slot expires
1764 }
1765}
1766
1767static STK_RISCV_ISR_SECTION void OnSchedulerSleep()
1768{
1769#if STK_SEGGER_SYSVIEW
1770 SEGGER_SYSVIEW_OnIdle();
1771#endif
1772
1773 for (;;)
1774 {
1775 __DSB(); // data barrier
1776 __WFI(); // enter sleep until interrupt
1777 }
1778}
1779
1780static STK_RISCV_ISR_SECTION void OnSchedulerSleepOverride()
1781{
1782 if (!GetContext().m_overrider->OnSleep())
1783 OnSchedulerSleep();
1784}
1785
1786static void OnSchedulerExit()
1787{
1788 // switch to main stack
1789 HW_LoadMainSP();
1790
1791 // jump to the exit from the IKernel::Start()
1792 RestoreJmp(GetContext().m_exit_buf, 0);
1793}
1794
1795void PlatformRiscV::Initialize(IEventHandler *event_handler, IKernelService *service, uint32_t resolution_us, Stack *exit_trap)
1796{
1797 GetContext().Initialize(event_handler, service, exit_trap, resolution_us);
1798}
1799
1800void Context::Start()
1801{
1802 m_exiting = false;
1803
1804 // save jump location of the Exit trap
1805 SaveJmp(m_exit_buf);
1806 if (m_exiting)
1807 {
1808 // notify kernel about a full stop
1809 m_handler->OnStop();
1810 return;
1811 }
1812
1813 // enable FPU (if available)
1814 HW_EnableFullFpuAccess();
1815
1816 // start
1817 m_starting = true;
1818 HW_StartScheduler();
1819}
1820
1821#if STK_TICKLESS_IDLE
1822Timeout Context::Suspend()
1823{
1824 const uint32_t resolution = static_cast<uint32_t>(ConvertTimeUsToClockCycles(HW_CoreClockFrequency(), m_tick_resolution));
1825 STK_ASSERT(resolution != 0);
1826
1827 HW_DisableInterrupts();
1828
1829 // stop tick timer
1830 HW_StopMTimer();
1831
1832 // clear pending PendSV exception
1833 HW_ClearPendingSwitch();
1834
1835 // get already elapsed CPU cycles since SysTick ISR invocation up to SysTick timer stop (see above)
1836 // to account for them for a new period value
1837 const uint32_t elapsed = HW_GetMtime() - m_last_mtime;
1838
1839 // get already elapsed ticks since the OnTick and a call to Suspend(), we shall account for this
1840 // period and return only the remainder
1841 Timeout elapsed_ticks = static_cast<Timeout>(elapsed / resolution);
1842 Timeout sleep_ticks = Max(m_sleep_ticks - elapsed_ticks, static_cast<Timeout>(0));
1843
1844 // notify core
1845 m_handler->OnSuspend(true);
1846
1847 HW_EnableInterrupts();
1848
1849 return sleep_ticks;
1850}
1851#endif
1852
1853#if STK_TICKLESS_IDLE
1854void Context::Resume(Timeout elapsed_ticks)
1855{
1856 HW_DisableInterrupts();
1857
1858 // notify core
1859 m_handler->OnSuspend(false);
1860
1861 // start with initially elapsed ticks (OnTick will fire with elapsed_ticks + 1)
1862 StartTickTimer(elapsed_ticks + 1);
1863
1864 HW_EnableInterrupts();
1865}
1866#endif
1867
1869{
1870 GetContext().Start();
1871}
1872
1873bool PlatformRiscV::InitStack(EStackType stack_type, Stack *stack, IStackMemory *stack_memory, ITask *user_task)
1874{
1875 // TaskFrame must map exactly onto the slot layout consumed by STK_ASM_SAVE_CONTEXT / STK_ASM_LOAD_CONTEXT - no padding allowed
1876 STK_STATIC_ASSERT_DESC(sizeof(TaskFrame) == (STK_RISCV_REGISTER_COUNT + STK_SERVICE_SLOTS) * sizeof(Word),
1877 "TaskFrame size must match REGSIZE: (REGISTER_COUNT + SERVICE_SLOTS) * REGBYTES");
1878
1879 STK_ASSERT(stack_memory->GetStackSize() > (STK_RISCV_REGISTER_COUNT + STK_SERVICE_SLOTS));
1880
1881 // initialize stack memory (fills all slots with STK_STACK_MEMORY_FILLER)
1882 Word *stack_top = PlatformContext::InitStackMemory(stack_memory);
1883
1884 // initialize Stack Pointer (SP): frame sits at the bottom of the register window
1885 stack->SP = hw::PtrToWord(stack_top - (STK_RISCV_REGISTER_COUNT + STK_SERVICE_SLOTS));
1886
1887 // place the task frame at SP directly at the base of the register window
1888 TaskFrame * const task_frame = reinterpret_cast<TaskFrame *>(stack->SP);
1889
1890 // initialize registers for the user task's first start
1891 switch (stack_type)
1892 {
1893 case STACK_USER_TASK: {
1894 task_frame->MEPC = hw::PtrToWord(&OnTaskRun);
1895 task_frame->X1_RA = hw::PtrToWord(&OnTaskExit);
1896 task_frame->X10_A0 = hw::PtrToWord(user_task);
1897 break; }
1898
1899 case STACK_SLEEP_TRAP: {
1900 task_frame->MEPC = hw::PtrToWord(GetContext().m_overrider != NULL ? &OnSchedulerSleepOverride : &OnSchedulerSleep);
1901 task_frame->X1_RA = STK_STACK_MEMORY_FILLER; // should not attempt to exit
1902 break; }
1903
1904 case STACK_EXIT_TRAP: {
1905 task_frame->MEPC = hw::PtrToWord(&OnSchedulerExit);
1906 task_frame->X1_RA = STK_STACK_MEMORY_FILLER; // should not attempt to exit
1907 break; }
1908
1909 default:
1910 return false;
1911 }
1912
1913 // mstatus: return to M-mode (MPP), interrupts enabled on mret (MPIE),
1914 // FPU/extension state initial (FS/XS) if FPU present
1915 task_frame->MSTATUS = MSTATUS_MPP | MSTATUS_MPIE | (STK_RISCV_FPU != 0 ? (MSTATUS_FS | MSTATUS_XS) : 0);
1916
1917#if (STK_RISCV_FPU != 0)
1918 task_frame->X3_FSR = 0; // FCSR = 0: round-to-nearest, no accrued exception flags
1919#endif
1920
1921 return true;
1922}
1923
1924void Context::OnStop()
1925{
1926 // stop timer
1927 HW_StopMTimer();
1928
1929 // clear pending SV exception
1930 HW_ClearPendingSwitch();
1931
1932 m_started = false;
1933 m_exiting = true;
1934
1935 // make sure all assignments are set and executed
1936 __DSB();
1937 __ISB();
1938}
1939
1941{
1942 GetContext().OnStop();
1943
1944 // load context of the Exit trap
1945 HW_DisableInterrupts();
1946 OnTaskStart();
1947}
1948
1949uint32_t PlatformRiscV::GetTickResolution() const
1950{
1951 return GetContext().m_tick_resolution;
1952}
1953
1955{
1956 return static_cast<Cycles>(HW_GetMtime());
1957}
1958
1960{
1961 return HW_MtimeClockFrequency();
1962}
1963
1965{
1966 GetContext().m_handler->OnTaskSwitch(HW_GetCallerSP());
1967}
1968
1969void PlatformRiscV::Sleep(Timeout ticks)
1970{
1971 GetContext().m_handler->OnTaskSleep(HW_GetCallerSP(), ticks);
1972}
1973
1974void PlatformRiscV::SleepUntil(Ticks timestamp)
1975{
1976 GetContext().m_handler->OnTaskSleepUntil(HW_GetCallerSP(), timestamp);
1977}
1978
1979IWaitObject *PlatformRiscV::Wait(ISyncObject *sync_obj, IMutex *mutex, Timeout timeout)
1980{
1981 return GetContext().m_handler->OnTaskWait(HW_GetCallerSP(), sync_obj, mutex, timeout);
1982}
1983
1985{
1986 if (HW_IsHandlerMode())
1987 {
1988 // to avoid the collision with TID_ISR_N mask, extract and fit into available space:
1989
1990 const Word exc = HW_GetCurrentException();
1991
1992 Word num = (exc & 0x7FFU);
1993
1994 #if (__riscv_xlen > 32)
1995 Word interrupt_bit = ((exc & (1ULL << (__riscv_xlen - 1))) ? 0x800U : 0);
1996 #else
1997 Word interrupt_bit = ((exc & (1U << (__riscv_xlen - 1))) ? 0x800U : 0);
1998 #endif
1999
2000 TId isr_tid = TID_ISR_N | num | interrupt_bit;
2001 STK_ASSERT(IsIsrTid(isr_tid));
2002 return isr_tid;
2003 }
2004
2005 return GetContext().m_handler->OnGetTid(HW_GetCallerSP());
2006}
2007
2009{
2010#if STK_TICKLESS_IDLE
2011 return GetContext().Suspend();
2012#else
2013 return 0;
2014#endif
2015}
2016
2017void PlatformRiscV::Resume(Timeout elapsed_ticks)
2018{
2019#if STK_TICKLESS_IDLE
2020 GetContext().Resume(elapsed_ticks);
2021#else
2022 STK_UNUSED(elapsed_ticks);
2023#endif
2024}
2025
2027{
2028 if ((GetContext().m_overrider == NULL) || !GetContext().m_overrider->OnHardFault())
2029 {
2031 }
2032}
2033
2034void PlatformRiscV::SetEventOverrider(IEventOverrider *overrider)
2035{
2036 STK_ASSERT(!GetContext().m_started);
2037 GetContext().m_overrider = overrider;
2038}
2039
2041{
2042 return HW_GetCallerSP();
2043}
2044
2045void PlatformRiscV::SetSpecificEventHandler(ISpecificEventHandler *handler)
2046{
2047 STK_ASSERT(!GetContext().m_started);
2048 GetContext().m_specific = handler;
2049}
2050
2052{
2053 return GetContext().m_service;
2054}
2055
2057{
2058 GetContext().EnterCriticalSection();
2059}
2060
2062{
2063 GetContext().ExitCriticalSection();
2064}
2065
2067{
2068 HW_SpinLockLock(m_lock);
2069}
2070
2072{
2073 HW_SpinLockUnlock(m_lock);
2074}
2075
2077{
2078 return HW_SpinLockTryLock(m_lock);
2079}
2080
2082{
2083 return HW_IsHandlerMode();
2084}
2085
2087{
2088 return HiResClockImpl::GetInstance()->GetCycles();
2089}
2090
2092{
2093 uint32_t freq = HiResClockImpl::GetInstance()->GetFrequency();
2094 STK_ASSERT(freq != 0);
2095 return freq;
2096}
2097
2098#endif // _STK_ARCH_RISC_V
#define STK_SYSTEM_CORE_CLOCK_FREQUENCY
System clock frequency in Hz. Default: 150 MHz.
#define STK_SYSTEM_CORE_CLOCK_VAR
Definition of the system core clock variable holding frequency of the CPU in Hz.
volatile uint32_t SystemCoreClock
System clock frequency in Hz.
Contains common inventory for platform implementation.
#define STK_ARCH_GET_CPU_ID()
Get CPU core id of the caller, e.g. if called while running on core 0 then returned value must be 0.
#define GetContext()
Get platform's context.
Hardware Abstraction Layer (HAL) declarations for the stk::hw namespace.
void STK_PANIC_HANDLER_DEFAULT(stk::EKernelPanicId id)
Default panic handler: disable interrupts, record the id, and spin in a tight loop — a defined,...
#define STK_KERNEL_PANIC(id)
Called when the kernel detects an unrecoverable internal fault.
Definition stk_arch.h:63
#define STK_UNUSED(X)
Explicitly marks a variable as unused to suppress compiler warnings.
Definition stk_defs.h:524
#define __stk_attr_used
Marks a symbol as used, preventing the linker from discarding it even if no references are visible (d...
Definition stk_defs.h:172
#define __stk_forceinline
Forces compiler to always inline the decorated function, regardless of optimisation level.
Definition stk_defs.h:104
#define STK_TICKLESS_IDLE
Enables tickless (dynamic-tick) low-power operation during idle periods.
Definition stk_defs.h:36
#define STK_ASSERT(e)
Runtime assertion. Halts execution if the expression e evaluates to false.
Definition stk_defs.h:330
#define __stk_attr_noinline
Prevents compiler from inlining the decorated function (function prefix).
Definition stk_defs.h:185
#define STK_CRITICAL_SECTION_NESTINGS_MAX
Maximum allowable recursion depth for critical section entry (default: 16).
Definition stk_defs.h:404
#define STK_ARCH_CPU_COUNT
Number of physical CPU cores available to the scheduler (default: 1).
Definition stk_defs.h:414
#define __stk_attr_naked
Suppresses compiler-generated function prologue and epilogue (function prefix).
Definition stk_defs.h:133
#define STK_STATIC_ASSERT_DESC(X, DESC)
Compile-time assertion with a custom error description. Produces a compilation error if X is false.
Definition stk_defs.h:350
#define STK_STACK_MEMORY_FILLER
Sentinel value written to the entire stack region at initialization (stack watermark pattern).
Definition stk_defs.h:377
#define __stk_attr_noreturn
Declares that function never returns to its caller (function prefix).
Definition stk_defs.h:146
Contains helper implementations which simplify user-side code.
Namespace of STK package.
uintptr_t Word
Native processor word type.
Definition stk_common.h:113
static constexpr TId TID_ISR_N
Bitmask sentinel for ISR-context task identifiers.
Definition stk_common.h:160
static __stk_forceinline Cycles ConvertTimeUsToClockCycles(Cycles clock_freq, Ticks time_us)
Convert time (microseconds) to core clock cycles.
static bool IsIsrTid(TId tid)
Test whether a task identifier represents an ISR context.
Definition stk_common.h:190
int64_t Ticks
Ticks value.
Definition stk_common.h:128
int32_t Timeout
Timeout time (ticks).
Definition stk_common.h:123
static constexpr T Max(T a, T b)
Compile-time maximum of two values.
Definition stk_defs.h:541
EStackType
Stack type.
Definition stk_common.h:70
@ STACK_SLEEP_TRAP
Stack of the Sleep trap.
Definition stk_common.h:72
@ STACK_USER_TASK
Stack of the user task.
Definition stk_common.h:71
@ STACK_EXIT_TRAP
Stack of the Exit trap.
Definition stk_common.h:73
uint64_t Cycles
Cycles value.
Definition stk_common.h:133
Word TId
Definition stk_common.h:118
@ ACCESS_PRIVILEGED
Privileged access mode (access to hardware is fully unrestricted).
Definition stk_common.h:33
EKernelPanicId
Identifies the source of a kernel panic.
Definition stk_common.h:52
@ KERNEL_PANIC_HRT_HARD_FAULT
Kernel running in KERNEL_HRT mode reported deadline failure of the task.
Definition stk_common.h:57
@ KERNEL_PANIC_NONE
Panic is absent (no fault).
Definition stk_common.h:53
@ KERNEL_PANIC_CPU_EXCEPTION
CPU reported an exception and halted execution.
Definition stk_common.h:58
@ KERNEL_PANIC_SPINLOCK_DEADLOCK
Spin-lock timeout expired: lock owner never released.
Definition stk_common.h:54
__stk_forceinline Word PtrToWord(T *ptr) noexcept
Cast a pointer to a CPU register-width integer.
Definition stk_arch.h:94
bool IsInsideISR()
Check whether the CPU is currently executing inside a hardware interrupt service routine (ISR).
uint32_t GetSysTimerFrequency() const
Get system timer frequency.
void SetEventOverrider(IEventOverrider *overrider)
Set platform event overrider.
void Initialize(IEventHandler *event_handler, IKernelService *service, uint32_t resolution_us, Stack *exit_trap)
Initialize scheduler's context.
void SleepUntil(Ticks timestamp)
Put calling process into a sleep state until the specified timestamp.
Timeout Suspend()
Suspend scheduling.
void SetSpecificEventHandler(ISpecificEventHandler *handler)
void Stop()
Stop scheduling.
void Sleep(Timeout ticks)
Put calling process into a sleep state.
void SwitchToNext()
Switch to a next task.
void ProcessTick()
Process one tick.
uint32_t GetTickResolution() const
Get resolution of the system tick timer in microseconds. Resolution means a number of microseconds be...
bool InitStack(EStackType stack_type, Stack *stack, IStackMemory *stack_memory, ITask *user_task)
Initialize stack memory of the user task.
void ProcessHardFault()
Cause a hard fault of the system.
void Resume(Timeout elapsed_ticks)
Resume scheduling after a prior Suspend() call.
IWaitObject * Wait(ISyncObject *sync_obj, IMutex *mutex, Timeout timeout)
Word GetCallerSP() const
Get caller's Stack Pointer (SP).
TId GetTid() const
Get thread Id.
void Start()
Start scheduling.
Cycles GetSysTimerCount() const
Get system timer count value.
Base platform context for all platform implementations.
static Word * InitStackMemory(IStackMemory *memory)
Initialize stack memory by filling it with STK_STACK_MEMORY_FILLER.
static void Enter()
Enter a critical section.
static void Exit()
Exit a critical section.
bool TryLock()
Attempt to acquire SpinLock in a single non-blocking attempt.
void Lock()
Acquire SpinLock, blocking until it is available.
void Unlock()
Release SpinLock, allowing another thread or core to acquire it.
static uint32_t GetFrequency()
Get clock frequency.
static Cycles GetCycles()
Get number of clock cycles elapsed.
Stack descriptor.
Definition stk_common.h:219
Word SP
Stack Pointer (SP) register (note: must be the first entry in this struct).
Definition stk_common.h:220
Interface for a stack memory region.
Definition stk_common.h:231
virtual size_t GetStackSize() const =0
Get number of elements of the stack memory array.
Wait object.
Definition stk_common.h:270
Synchronization object.
Definition stk_common.h:355
Interface for mutex synchronization primitive.
Definition stk_common.h:439
Interface for a user task.
Definition stk_common.h:491
virtual void Run()=0
Entry point of the user task.
Interface for the kernel services exposed to the user processes during run-time when Kernel started s...
static IKernelService * GetInstance()
Get CPU-local instance of the kernel service.
RISC-V specific event handler.