From 279364e274f55e98126938fd5332fff03cb2857e Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Wed, 17 Sep 2025 15:30:25 +0800 Subject: [PATCH 01/25] Add sched_t to kcb for O(1) scheduler support Previously, the scheduler performed a linear search through the global task list (kcb->tasks) to find the next TASK_READY task. This approach limited scalability as the search iterations increased with the number of tasks, resulting in higher scheduling latency. To support an O(1) scheduler and improve extensibility, a sched_t structure is introduced and integrated into kcb. The new structure contains: - ready_queues: Holds all runnable tasks, including TASK_RUNNING and TASK_READY. The scheduler selects tasks directly from these queues. - ready_bitmap: Records the state of each ready queue. Using the bitmap, the scheduler can locate the highest-priority runnable task in O(1) time complexity. - rr_cursors: Round-robin cursors that track the next task node in each ready queue. Each priority level maintains its own RR cursor. The top priority cursor is assigned to kcb->task_current, which is advanced circularly after each scheduling cycle. - hart_id: Identifies the scheduler instance per hart (0 for single-hart configurations). - task_idle: The system idle task, executed when no runnable tasks exist. In the current design, kcb binds only one sched_t instance (hart0) for single-hart systems, but this structure can be extended for multi-hart scheduling in the future. --- include/sys/task.h | 18 ++++++++++++++++++ kernel/task.c | 9 +++++++++ 2 files changed, 27 insertions(+) diff --git a/include/sys/task.h b/include/sys/task.h index 33d0b60..4199add 100644 --- a/include/sys/task.h +++ b/include/sys/task.h @@ -84,6 +84,21 @@ typedef struct tcb { void *rt_prio; /* Opaque pointer for custom real-time scheduler hook */ } tcb_t; +/* Scheduler attribution */ +typedef struct sched { + uint8_t ready_bitmap; /* 8-bit priority bitmap */ + list_t + *ready_queues[TASK_PRIORITY_LEVELS]; /* Separate queue per priority */ + uint16_t queue_counts[TASK_PRIORITY_LEVELS]; /* O(1) size tracking */ + + /* Weighted Round-Robin State per Priority Level */ + list_node_t *rr_cursors[TASK_PRIORITY_LEVELS]; /* Round-robin position */ + + /* Hart-Specific Data */ + uint8_t hart_id; /* RISC-V hart identifier */ + +} sched_t; + /* Kernel Control Block (KCB) * * Singleton structure holding global kernel state, including task lists, @@ -104,6 +119,9 @@ typedef struct { /* Timer Management */ list_t *timer_list; /* List of active software timers */ volatile uint32_t ticks; /* Global system tick, incremented by timer */ + + /* per-hart scheduler management */ + sched_t *harts; } kcb_t; /* Global pointer to the singleton Kernel Control Block */ diff --git a/kernel/task.c b/kernel/task.c index 59ffdae..e007cb2 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -15,6 +15,14 @@ static int32_t noop_rtsched(void); void _timer_tick_handler(void); +/* Hart scheduler */ +static sched_t hart0 = { + .ready_bitmap = 0, + .ready_queues = {NULL}, + .rr_cursors = {NULL}, + .hart_id = 0, +}; + /* Kernel-wide control block (KCB) */ static kcb_t kernel_state = { .tasks = NULL, @@ -25,6 +33,7 @@ static kcb_t kernel_state = { .task_count = 0, .ticks = 0, .preemptive = true, /* Default to preemptive mode */ + .harts = &hart0, /* Initial hart */ }; kcb_t *kcb = &kernel_state; From 56c1926202216318cd4b83ed6612f1ae7e22963c Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Wed, 22 Oct 2025 21:52:13 +0800 Subject: [PATCH 02/25] Add list_unlink() for safe node removal from ready queue Previously, the list operation for removal was limited to list_remove(), which immediately freed the list node during the function call. When removing a running task (TASK_RUNNING), the list node in the ready queue must not be freed because kcb->task_current shares the same node. This change introduces list_unlink(), which detaches the node from the list without freeing it. The unlinked node is returned to the caller, allowing safe reuse and improving flexibility in dequeue operations. This API will be applied in sched_dequeue_task() for safely removing tasks from ready queues. --- include/lib/list.h | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/include/lib/list.h b/include/lib/list.h index 298e6c8..9f41866 100644 --- a/include/lib/list.h +++ b/include/lib/list.h @@ -134,6 +134,25 @@ static inline void *list_remove(list_t *list, list_node_t *target) return data; } +/* Unlink a node from list without freeing node */ +static inline void list_unlink(list_t *list, list_node_t *target) +{ + if (unlikely(!list || !target || list_is_empty(list))) + return; + + list_node_t *prev = list->head; + while (prev->next != list->tail && prev->next != target) + prev = prev->next; + + if (unlikely(prev->next != target)) + return; /* node not found */ + + prev->next = target->next; + target->next = NULL; + list->length--; + return; +} + /* Iteration */ /* Callback should return non-NULL to stop early, NULL to continue */ From 6bd43f11dd5e8067a5d9916a45e16422e8bf1689 Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Fri, 24 Oct 2025 12:21:31 +0800 Subject: [PATCH 03/25] Add three marcos for ready queue bitmap operation When a task is enqueued into or dequeued from the ready queue, the bitmap that indicates the ready queue state should be updated. These three marcos can be used in mo_task_dequeue() and mo_task_enqueue() APIs to improve readability and maintain consistency. --- kernel/task.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/kernel/task.c b/kernel/task.c index e007cb2..351238b 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -37,6 +37,11 @@ static kcb_t kernel_state = { }; kcb_t *kcb = &kernel_state; +/* Bitmap operation */ +#define BITMAP_CHECK(prio) (kcb->harts->ready_bitmap & 1U << prio) +#define BITMAP_SET(prio) (kcb->harts->ready_bitmap |= 1U << prio) +#define BITMAP_CLEAN(prio) (kcb->harts->ready_bitmap &= ~(1U << prio)) + /* timer work management for reduced latency */ static volatile uint32_t timer_work_pending = 0; /* timer work types */ static volatile uint32_t timer_work_generation = 0; /* counter for coalescing */ From f5934fb7d4a6ec91fc6f8f5a74df30f0310cb0ea Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Fri, 24 Oct 2025 12:39:29 +0800 Subject: [PATCH 04/25] Refactor sched_enqueue_task() for O(1) scheduler support Previously, sched_enqueue_task() only changed task state without inserting into ready queue. As a result, the scheduler could not select enqueued task for execution. This change pushes the task into the appropriate ready queue using list_pusback(), and initializes realated attribution such as the ready bitmap and RR cursor. The ready queue for corresponging task priority will be initialized at this enqueue path and never be released afterward. With this updated API, tasks can be enqueued into the ready queue and selected by cursor-based O(1) scheduler. --- kernel/task.c | 27 +++++++++++++++++++++++---- 1 file changed, 23 insertions(+), 4 deletions(-) diff --git a/kernel/task.c b/kernel/task.c index 351238b..12e4a31 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -82,7 +82,7 @@ static const uint8_t priority_timeslices[TASK_PRIORITY_LEVELS] = { TASK_TIMESLICE_IDLE /* Priority 7: Idle */ }; -/* Mark task as ready (state-based) */ +/* Enqueue task into ready queue */ static void sched_enqueue_task(tcb_t *task); /* Utility and Validation Functions */ @@ -349,17 +349,36 @@ void _yield(void) __attribute__((weak, alias("yield"))); * practical performance with strong guarantees for fairness and reliability. */ -/* Add task to ready state - simple state-based approach */ +/* Enqueue task into ready queue */ static void sched_enqueue_task(tcb_t *task) { if (unlikely(!task)) return; + uint8_t prio_level = task->prio_level; + /* Ensure task has appropriate time slice for its priority */ - task->time_slice = get_priority_timeslice(task->prio_level); + task->time_slice = get_priority_timeslice(prio_level); task->state = TASK_READY; - /* Task selection is handled directly through the master task list */ + list_t **rq = &kcb->harts->ready_queues[prio_level]; + list_node_t **cursor = &kcb->harts->rr_cursors[prio_level]; + + if (!*rq) + *rq = list_create(); + + list_node_t *rq_node = list_pushback(*rq, task); + if (unlikely(!rq_node)) + return; + + /* Update task count in ready queue */ + kcb->harts->queue_counts[prio_level]++; + + /* Setup first rr_cursor */ + if (!*cursor) + *cursor = rq_node; + BITMAP_SET(task->prio_level); + return; } /* Remove task from ready queues - state-based approach for compatibility */ From e5f828cc81083d9edb958aa9181025f3280ea1bc Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Fri, 24 Oct 2025 13:02:44 +0800 Subject: [PATCH 05/25] Implement sched_dequeue_task() to dequeue task from ready queue Previously, mo_task_dequeue() was only a stub and returned immediately without performing any operation. As a result, tasks remained in the ready queue after being dequeued, leading to potential scheduler inconsistencies. This change implements the full dequeue process: - Searches for the task node in the ready queue by task ID. - Maintains RR cursor consistency: the RR cursor should always point to a valid task node in the ready queue. When removing a task node, the cursor is advanced circularly to the next node. - Unlinks the task node using list_unlink(), which removes the node from the ready queue without freeing it. list_unlink() is used instead of list_remove() to avoid accidentally freeing kcb->task_current when the current running task is dequeued. - Updates and checks queue_counts: if the ready queue becomes empty, the RR cursor is set to NULL and the bitmap is cleared until a new task is enqueued. --- kernel/task.c | 32 +++++++++++++++++++++++++------- 1 file changed, 25 insertions(+), 7 deletions(-) diff --git a/kernel/task.c b/kernel/task.c index 12e4a31..8f4c4d2 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -381,16 +381,34 @@ static void sched_enqueue_task(tcb_t *task) return; } -/* Remove task from ready queues - state-based approach for compatibility */ -void sched_dequeue_task(tcb_t *task) +/* Remove task from ready queue; return removed ready queue node */ +static list_node_t *sched_dequeue_task(tcb_t *task) { if (unlikely(!task)) - return; + return NULL; - /* For tasks that need to be removed from ready state (suspended/cancelled), - * we rely on the state change. The scheduler will skip non-ready tasks - * when it encounters them during the round-robin traversal. - */ + uint8_t prio_level = task->prio_level; + + /* For task that need to be removed from ready/running state, it need be + * removed from corresponding ready queue. */ + list_t *rq = kcb->harts->ready_queues[prio_level]; + list_node_t *rq_node = list_foreach(rq, idcmp, (void *) (size_t) task->id); + list_node_t **cursor = &kcb->harts->rr_cursors[prio_level]; + if (!rq_node) + return NULL; + + /* Safely move cursor to next task node. */ + if (rq_node == *cursor) + *cursor = list_cnext(rq, *cursor); + + list_unlink(rq, rq_node); + + /* Update task count in ready queue */ + if (!--kcb->harts->queue_counts[prio_level]) { + *cursor = NULL; + BITMAP_CLEAN(task->prio_level); + } + return rq_node; } /* Handle time slice expiration for current task */ From 6a7149dc30b75cf661c79533eaa3f9898af50ae8 Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Wed, 22 Oct 2025 22:07:58 +0800 Subject: [PATCH 06/25] Refactor mo_task_spawn() for O(1) scheduler support Previously, mo_task_spawn() only created a task and appended it to the global task list (kcb->tasks), assigning the first task directly from the global list node. This change adds a call to sched_enqueue_task() within the critical section to enqueue the task into the ready queue and safely initialize its scheduling attributes. The first task assignment is now aligned with the RR cursor mechanism to ensure consistency with the O(1) scheduler. --- kernel/task.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/kernel/task.c b/kernel/task.c index 8f4c4d2..cbd9310 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -666,8 +666,12 @@ int32_t mo_task_spawn(void *task_entry, uint16_t stack_size_req) tcb->id = kcb->next_tid++; kcb->task_count++; /* Cached count of active tasks for quick access */ - if (!kcb->task_current) - kcb->task_current = node; + /* Push node to ready queue */ + sched_enqueue_task(tcb); + if (!kcb->task_current) { + kcb->task_current = kcb->harts->rr_cursors[tcb->prio_level]; + tcb->state = TASK_RUNNING; + } CRITICAL_LEAVE(); @@ -681,7 +685,6 @@ int32_t mo_task_spawn(void *task_entry, uint16_t stack_size_req) /* Add to cache and mark ready */ cache_task(tcb->id, tcb); - sched_enqueue_task(tcb); return tcb->id; } From e986cd5ef043d613f2aaf7096d4db9f689364b05 Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Wed, 22 Oct 2025 22:08:38 +0800 Subject: [PATCH 07/25] Refactor scheduler to RR cursor-based O(1) design Previously, the scheduler iterated through the global task list (kcb->tasks) to find the next TASK_READY task, resulting in O(N) selection time. This approach limited scalability and caused inconsistent task rotation under heavy load. The new scheduling process: 1. Check the ready bitmap and find the highest priority level. 2. Select the RR cursor node from the corresponding ready queue. 3. Advance the selected cursor node circularly. Why RR cursor instead of pop/enqueue rotation: - Fewer operations on the ready queue: compared to the pop/enqueue approach, which requires two function calls per switch, the RR cursor method only advances one pointer per scheduling cycle. - Cache friendly: always accesses the same cursor node, improving cache locality on hot paths. - Cycle deterministic: RR cursor design allows deterministic task rotation and enables potential future extensions such as cycle accounting or fairness-based algorithms. This change introduces a fully O(1) scheduler design based on per-priority ready queues and round-robin (RR) cursors. Each ready queue maintains its own cursor, allowing the scheduler to select the next runnable task in constant time. --- include/sys/task.h | 2 -- kernel/task.c | 61 ++++++++++++++++++++++++---------------------- 2 files changed, 32 insertions(+), 31 deletions(-) diff --git a/include/sys/task.h b/include/sys/task.h index 4199add..9bb676e 100644 --- a/include/sys/task.h +++ b/include/sys/task.h @@ -128,8 +128,6 @@ typedef struct { extern kcb_t *kcb; /* System Configuration Constants */ -#define SCHED_IMAX \ - 500 /* Safety limit for scheduler iterations to prevent livelock */ #define MIN_TASK_STACK_SIZE \ 256 /* Minimum stack size to prevent stack overflow */ #define TASK_CACHE_SIZE \ diff --git a/kernel/task.c b/kernel/task.c index cbd9310..f019320 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -445,20 +445,20 @@ void sched_wakeup_task(tcb_t *task) } } -/* Efficient Round-Robin Task Selection with O(n) Complexity +/* Efficient Round-Robin Task Selection (Cursor-Based, O(1) Complexity) * - * Selects the next ready task using circular traversal of the master task list. + * Selects the next ready task by advancing the per-priority round-robin + * cursor (rr_cursor) circularly using list API list_cnext(). * - * Complexity: O(n) where n = number of tasks - * - Best case: O(1) when next task in sequence is ready - * - Worst case: O(n) when only one task is ready and it's the last checked - * - Typical case: O(k) where k << n (number of non-ready tasks to skip) + * Complexity: O(1) + * - Always constant-time selection, regardless of total task count. + * - No need to traverse the task list. * * Performance characteristics: - * - Excellent for small-to-medium task counts (< 50 tasks) - * - Simple and reliable implementation - * - Good cache locality due to sequential list traversal - * - Priority-aware time slice allocation + * - Ideal for systems with frequent context switches or many tasks. + * - Excellent cache locality: only touches nodes in the active ready queue. + * - Priority-aware: highest non-empty ready queue is chosen via bitmap lookup. + * - Each priority level maintains its own rr_cursor to ensure fair rotation. */ uint16_t sched_select_next_task(void) { @@ -471,31 +471,34 @@ uint16_t sched_select_next_task(void) if (current_task->state == TASK_RUNNING) current_task->state = TASK_READY; - /* Round-robin search: find next ready task in the master task list */ - list_node_t *start_node = kcb->task_current; - list_node_t *node = start_node; - int iterations = 0; /* Safety counter to prevent infinite loops */ + /* Check out bitmap */ + uint32_t bitmap = kcb->harts->ready_bitmap; + if (unlikely(!bitmap)) + panic(ERR_NO_TASKS); - do { - /* Move to next task (circular) */ - node = list_cnext(kcb->tasks, node); - if (!node || !node->data) - continue; + /* Find top priority ready queue */ + int top_prio_level = 0; + for (; !(bitmap & 1U); top_prio_level++, bitmap >>= 1) + ; - tcb_t *task = node->data; + list_node_t **cursor = &kcb->harts->rr_cursors[top_prio_level]; + list_t *rq = kcb->harts->ready_queues[top_prio_level]; + if (unlikely(!rq || !*cursor)) + panic(ERR_NO_TASKS); - /* Skip non-ready tasks */ - if (task->state != TASK_READY) - continue; + /* Update next task with top priority cursor */ + kcb->task_current = *cursor; - /* Found a ready task */ - kcb->task_current = node; - task->state = TASK_RUNNING; - task->time_slice = get_priority_timeslice(task->prio_level); + /* Advance top priority cursor to next task node */ + *cursor = list_cnext(rq, *cursor); - return task->id; + /* Update new task properties */ + tcb_t *new_task = kcb->task_current->data; + new_task->time_slice = get_priority_timeslice(new_task->prio_level); + new_task->state = TASK_RUNNING; - } while (node != start_node && ++iterations < SCHED_IMAX); + if (kcb->task_current) + return new_task->id; /* No ready tasks found - this should not happen in normal operation */ panic(ERR_NO_TASKS); From debd486889415d871c4d088c0690866c33bf5de5 Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Sun, 19 Oct 2025 13:59:40 +0800 Subject: [PATCH 08/25] Add ready queue dequeue path in mo_task_suspend() Previously, mo_task_suspend() only changed the task state to TASK_SUSPENDED without removing the task from the ready queue. As a result, suspended tasks could still be selected by the scheduler, leading to incorrect task switching and inconsistent queue states. This change adds a dequeue operation to remove the corresponding task node from its ready queue before marking it as suspended. Additionally, the condition to detect the currently running task has been updated: the scheduler now compares the TCB pointer (kcb->task_current->data == task) instead of the list node (kcb->task_current == node), since kcb->task_current now stores a ready queue node rather than a global task list node. If the suspended task is currently running, the CPU will yield after the task is suspended to allow the scheduler to select the next runnable task. This ensures that suspended tasks are no longer visible to the scheduler until they are resumed. --- kernel/task.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/kernel/task.c b/kernel/task.c index f019320..fb9d8f8 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -779,8 +779,15 @@ int32_t mo_task_suspend(uint16_t id) return ERR_TASK_CANT_SUSPEND; } + /* Remove task node from ready queue if task is in ready queue + * (TASK_RUNNING/TASK_READY).*/ + if (task->state == TASK_READY || task->state == TASK_RUNNING) { + list_node_t *rq_node = sched_dequeue_task(task); + free(rq_node); + } + task->state = TASK_SUSPENDED; - bool is_current = (kcb->task_current == node); + bool is_current = (kcb->task_current->data == task); CRITICAL_LEAVE(); From 5c0fe59a542fdbeb331af8b2ab8ddd094e74ca18 Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Fri, 24 Oct 2025 11:35:00 +0800 Subject: [PATCH 09/25] Add ready queue dequeue path in mo_task_cancel() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Previously, mo_task_cancel() only removed the task node from the global task list (kcb->tasks) but did not remove it from the ready queue. As a result, the scheduler could still select a canceled task that remained in the ready queue. Additionally, freeing the node twice could occur because the same node was already freed after list_remove(), leading to a double-free issue. This change adds a call to sched_dequeue_task() to remove the task from the ready queue, ensuring that once a task is canceled, it will no longer appear in the scheduler’s selection path. This also prevents memory corruption caused by double-freeing list nodes. --- kernel/task.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/kernel/task.c b/kernel/task.c index fb9d8f8..8c0b087 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -722,12 +722,17 @@ int32_t mo_task_cancel(uint16_t id) } } + /* Remove from ready queue */ + if (tcb->state == TASK_READY) { + list_node_t *rq_node = sched_dequeue_task(tcb); + free(rq_node); + } + CRITICAL_LEAVE(); /* Free memory outside critical section */ free(tcb->stack); free(tcb); - free(node); return ERR_OK; } From 88c9619bce38c92578e2781dd6d2a23a55b72026 Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Fri, 24 Oct 2025 11:47:29 +0800 Subject: [PATCH 10/25] Add sched_enqueue_task() in mo_task_resume() Previously, mo_task_resume() only changed resumed task state to TASK_READY, but didn't enqueue it into ready queue. As a result, the scheduler could not select the resumed task for execution. This change adds sched_enqueue_task() to insert the resumed task into the appropriate ready queue and update the ready bitmap, ensuring the resumed task becomes schedulable again. --- kernel/task.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/kernel/task.c b/kernel/task.c index 8c0b087..a9721ba 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -819,9 +819,8 @@ int32_t mo_task_resume(uint16_t id) CRITICAL_LEAVE(); return ERR_TASK_CANT_RESUME; } - - /* mark as ready - scheduler will find it */ - task->state = TASK_READY; + /* Enqueue resumed task into ready queue */ + sched_enqueue_task(task); CRITICAL_LEAVE(); return ERR_OK; From dae030ad96d7a56a01a2e92eb1a3c5f3fc8e0618 Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Sun, 19 Oct 2025 16:06:24 +0800 Subject: [PATCH 11/25] Add ready queue enqueue path in mo_task_wakeup() Previously, mo_task_wakeup() only changed the task state to TASK_READY without enqueuing the task back into the ready queue. As a result, a woken-up task could remain invisible to the scheduler and never be selected for execution. This change adds a call to sched_enqueue_task() to insert the task into the appropriate ready queue based on its priority level. The ready bitmap, task counts of each ready queue, and RR cursor are updated accordingly to maintain scheduler consistency. With this update, tasks transitioned from a blocked or suspended state can be properly scheduled for execution once they are woken up. --- kernel/task.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/kernel/task.c b/kernel/task.c index a9721ba..1c5ac72 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -429,20 +429,16 @@ void sched_tick_current_task(void) } } -/* Task wakeup - simple state transition approach */ +/* Task wakeup and enqueue into ready queue */ void sched_wakeup_task(tcb_t *task) { if (unlikely(!task)) return; - /* Mark task as ready - scheduler will find it during round-robin traversal + /* Enqueue task into ready queue for scheduler selection by rr_cursor. */ - if (task->state != TASK_READY) { - task->state = TASK_READY; - /* Ensure task has time slice */ - if (task->time_slice == 0) - task->time_slice = get_priority_timeslice(task->prio_level); - } + if (task->state != TASK_READY && task->state != TASK_RUNNING) + sched_enqueue_task(task); } /* Efficient Round-Robin Task Selection (Cursor-Based, O(1) Complexity) From 90475324837b3b8119f98e3cee56ce0f7cf25121 Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Tue, 21 Oct 2025 16:50:12 +0800 Subject: [PATCH 12/25] Add sched_migrate_task() helper This commit introduces a new API, sched_migrate_task(), which enables migration of a task between ready queues of different priority levels. The function safely removes the task from its current ready queue and enqueues it into the target queue, updating the corresponding RR cursor and ready bitmap to maintain scheduler consistency. This helper will be used in mo_task_priority() and other task management routines that adjust task priority dynamically. Future improvement: The current enqueue path allocates a new list node for each task insertion based on its TCB pointer. In the future, this can be optimized by directly transferring or reusing the existing list node between ready queues, eliminating the need for an additional malloc() and free() operations during priority migrations. --- kernel/task.c | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/kernel/task.c b/kernel/task.c index 1c5ac72..643b61b 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -411,6 +411,28 @@ static list_node_t *sched_dequeue_task(tcb_t *task) return rq_node; } +/* Task migration from origin to new priority ready queue */ +static void sched_migrate_task(tcb_t *task, int16_t priority) +{ + if (unlikely(!task || !is_valid_priority(priority))) + return; + + if (task->prio == priority) + return; + + /* Unlink task node from origin ready queue */ + list_node_t *rq_node = sched_dequeue_task(task); + free(rq_node); + + /* Update new properties */ + task->prio = priority; + task->prio_level = extract_priority_level(priority); + + /* Enqueue into new priority ready queue*/ + sched_enqueue_task(task); + return; +} + /* Handle time slice expiration for current task */ void sched_tick_current_task(void) { From 40cbc78099f6f4dfdec6c3331d0f7f1f5dd27ffa Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Wed, 22 Oct 2025 13:23:40 +0800 Subject: [PATCH 13/25] Use mo_task_migration() in mo_task_priority() This change refactors the priority update process in mo_task_priority() to include early-return checks and proper task migration handling. - Early-return conditions: * Prevent modification of the idle task. * Disallow assigning TASK_PRIO_IDLE to non-idle tasks. The idle task is created by idle_task_init() during system startup and must retain its fixed priority. - Task migration: If the priority-changed task resides in a ready queue (TASK_READY or TASK_RUNNING), sched_migrate_task() is called to move it to the queue corresponding to the new priority. - Running task behavior: When the current running task changes its own priority, it yields the CPU so the scheduler can dispatch the next highest-priority task. --- kernel/task.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/kernel/task.c b/kernel/task.c index 643b61b..89ea7be 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -862,12 +862,22 @@ int32_t mo_task_priority(uint16_t id, uint16_t priority) return ERR_TASK_NOT_FOUND; } + bool is_current = (kcb->task_current->data == task); + + /* Removed task from ready queue */ + if (task->state == TASK_RUNNING || task->state == TASK_READY) + sched_migrate_task(task, priority); + /* Update priority and level */ task->prio = priority; task->prio_level = extract_priority_level(priority); task->time_slice = get_priority_timeslice(task->prio_level); CRITICAL_LEAVE(); + + if (is_current) + mo_task_yield(); + return ERR_OK; } From df3b2d7523258cb74c89ca1432e1662af7c81905 Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Tue, 21 Oct 2025 16:33:35 +0800 Subject: [PATCH 14/25] Add idle task and initialization API MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This commit introduces the system idle task and its initialization API (idle_task_init()). The idle task serves as the default execution context when no other runnable tasks exist in the system. The sched_idle() function supports both preemptive and cooperative modes. In sched_t, a list node named task_idle is added to record the idle task sentinel. The idle task never enters any ready queue and its priority level cannot be changed. When idle_task_init() is called, the idle task is initialized as the first execution context. This eliminates the need for additional APIs in main() to set up the initial high-priority task during system launch. This design allows task priorities to be adjusted safely during app_main(), while keeping the scheduler’s entry point consistent. --- include/sys/task.h | 18 ++++++++++-- kernel/task.c | 72 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 88 insertions(+), 2 deletions(-) diff --git a/include/sys/task.h b/include/sys/task.h index 9bb676e..76f2fd6 100644 --- a/include/sys/task.h +++ b/include/sys/task.h @@ -95,8 +95,8 @@ typedef struct sched { list_node_t *rr_cursors[TASK_PRIORITY_LEVELS]; /* Round-robin position */ /* Hart-Specific Data */ - uint8_t hart_id; /* RISC-V hart identifier */ - + uint8_t hart_id; /* RISC-V hart identifier */ + list_node_t *task_idle; /* Idle task */ } sched_t; /* Kernel Control Block (KCB) @@ -303,3 +303,17 @@ void _sched_block(queue_t *wait_q); * Returns 'true' to enable preemptive scheduling, or 'false' for cooperative */ int32_t app_main(void); + +/* Initialize the idle task + * + * This function statically creates and initializes the idle task structure. + * It should be called once during system startup. + * + * The idle task is a permanent system task that runs when no other + * ready tasks exist. It is never enqueued into any ready queue and + * cannot be suspended, canceled, or priority modified. + * + * Only one idle task exists per hart. Its priority is fixed to the + * lowest level and its time slice is zero. + */ +void idle_task_init(void); \ No newline at end of file diff --git a/kernel/task.c b/kernel/task.c index 89ea7be..5a7c00b 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -21,6 +21,7 @@ static sched_t hart0 = { .ready_queues = {NULL}, .rr_cursors = {NULL}, .hart_id = 0, + .task_idle = NULL, }; /* Kernel-wide control block (KCB) */ @@ -463,6 +464,20 @@ void sched_wakeup_task(tcb_t *task) sched_enqueue_task(task); } +/* System idle task, it will be executed when no ready tasks in ready queue */ +static void sched_idle(void) +{ + if (!kcb->preemptive) + /* Cooperative mode idle */ + while (1) + mo_task_yield(); + + /* Preemptive mode idle */ + while (1) + mo_task_wfi(); +} + + /* Efficient Round-Robin Task Selection (Cursor-Based, O(1) Complexity) * * Selects the next ready task by advancing the per-priority round-robin @@ -627,6 +642,63 @@ static bool init_task_stack(tcb_t *tcb, size_t stack_size) return true; } +/* Initialize idle task */ +void idle_task_init(void) +{ + /* Ensure proper alignment */ + size_t stack_size = DEFAULT_STACK_SIZE; + stack_size = (stack_size + 0xF) & ~0xFU; + + /* Allocate and initialize TCB */ + tcb_t *idle = malloc(sizeof(tcb_t)); + if (!idle) + panic(ERR_TCB_ALLOC); + + idle->entry = &sched_idle; + idle->delay = 0; + idle->rt_prio = NULL; + idle->state = TASK_READY; + idle->flags = 0; + + /* Set idle task priority */ + idle->prio = TASK_PRIO_IDLE; + idle->prio_level = 0; + idle->time_slice = 0; + + /* Set idle task id and task count */ + idle->id = kcb->next_tid++; + kcb->task_count++; + + /* Initialize stack */ + if (!init_task_stack(idle, stack_size)) { + free(idle); + panic(ERR_STACK_ALLOC); + } + + /* Allocate and initialize idle task node */ + list_node_t *idle_task_node = malloc(sizeof(list_node_t)); + if (!idle_task_node) { + free(idle->stack); + free(idle); + panic(ERR_STACK_ALLOC); + } + idle_task_node->data = idle; + idle_task_node->next = NULL; + kcb->harts->task_idle = idle_task_node; + + /* Initialize idle task execution context */ + hal_context_init(&idle->context, (size_t) idle->stack, stack_size, + (size_t) &sched_idle); + + printf("idle id %u: entry=%p stack=%p size=%u\n", idle->id, &sched_idle, + idle->stack, (unsigned int) stack_size); + + if (!kcb->task_current) + kcb->task_current = kcb->harts->task_idle; + + return; +} + /* Task Management API */ int32_t mo_task_spawn(void *task_entry, uint16_t stack_size_req) From f80f535b88284c8feddb1fc7cf84f76d6e8af7fc Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Fri, 24 Oct 2025 00:42:35 +0800 Subject: [PATCH 15/25] Add sched_switch_to_idle() helper When all ready queues are empty, the scheduler should switch to idle mode and wait for incoming interrupts. This commit introduces a dedicated helper to handle that transition, centralizing the logic and improving readbility of the scheduler path to idle. --- kernel/task.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/kernel/task.c b/kernel/task.c index 5a7c00b..67a1672 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -477,6 +477,14 @@ static void sched_idle(void) mo_task_wfi(); } +/* Switch to idle task and return idle task id */ +static inline tcb_t *sched_switch_to_idle(void) +{ + kcb->task_current = kcb->harts->task_idle; + tcb_t *idle = kcb->harts->task_idle->data; + idle->state = TASK_RUNNING; + return idle; +} /* Efficient Round-Robin Task Selection (Cursor-Based, O(1) Complexity) * From b0ea73031682d8bde4549cfe3c563d55053b026c Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Fri, 24 Oct 2025 01:00:02 +0800 Subject: [PATCH 16/25] Add sched_switch_to_idle() helper in the scheduler Previously, when all ready queues were empty, the scheduler would trigger a kernel panic. This condition should instead transition into the idle task rather than panic. The new sched_switch_to_idle() helper centralizes this logic, making the path to idle clearer and more readable. --- kernel/task.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/task.c b/kernel/task.c index 67a1672..f44fec3 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -515,7 +515,7 @@ uint16_t sched_select_next_task(void) /* Check out bitmap */ uint32_t bitmap = kcb->harts->ready_bitmap; if (unlikely(!bitmap)) - panic(ERR_NO_TASKS); + return sched_switch_to_idle()->id; /* Find top priority ready queue */ int top_prio_level = 0; From b38a1d0f2079cb7a53fa6ad7465ae1c1d053d30a Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Fri, 24 Oct 2025 02:06:08 +0800 Subject: [PATCH 17/25] Add idle_task_init() call in main() The idle task is now initialized in main() during system startup. This ensures that the scheduler always has a valid execution context before any user or application tasks are created. Initializing the idle task early guarantees a safe fallback path when no runnable tasks exist and keeps the scheduler entry point consistent. --- kernel/main.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/kernel/main.c b/kernel/main.c index efa46ff..7b63df3 100644 --- a/kernel/main.c +++ b/kernel/main.c @@ -23,6 +23,9 @@ int32_t main(void) printf("Heap initialized, %u bytes available\n", (unsigned int) (size_t) &_heap_size); + /* Initialize idle task */ + idle_task_init(); + /* Call the application's main entry point to create initial tasks. */ kcb->preemptive = (bool) app_main(); printf("Scheduler mode: %s\n", From 7257b0780648774faacc9d7942d2f0d704676168 Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Fri, 24 Oct 2025 02:04:47 +0800 Subject: [PATCH 18/25] Refactor launch sequence in main() for scheduler initialization This change sets up the scheduler state during system startup by assigning kcb->task_current to kcb->harts->task_idle and dispatching to the idle task as the first execution context. This commit also keeps the scheduling entry path consistent between startup and runtime. --- kernel/main.c | 24 +++++++++--------------- 1 file changed, 9 insertions(+), 15 deletions(-) diff --git a/kernel/main.c b/kernel/main.c index 7b63df3..3baccd5 100644 --- a/kernel/main.c +++ b/kernel/main.c @@ -23,35 +23,29 @@ int32_t main(void) printf("Heap initialized, %u bytes available\n", (unsigned int) (size_t) &_heap_size); - /* Initialize idle task */ + /* Initialize the first current task as idle sentinel node. + * This ensures a valid entry point before any real task runs. + */ idle_task_init(); + kcb->task_current = kcb->harts->task_idle; /* Call the application's main entry point to create initial tasks. */ kcb->preemptive = (bool) app_main(); printf("Scheduler mode: %s\n", kcb->preemptive ? "Preemptive" : "Cooperative"); - /* Verify that the application created at least one task. - * If 'kcb->task_current' is still NULL, it means mo_task_spawn was never - * successfully called. - */ - if (!kcb->task_current) - panic(ERR_NO_TASKS); - /* Save the kernel's context. This is a formality to establish a base * execution context before launching the first real task. */ setjmp(kcb->context); - /* Launch the first task. - * 'kcb->task_current' was set by the first call to mo_task_spawn. - * This function transfers control and does not return. + /* Launch the first task (idle task), then scheduler will select highest + * priority task. This function transfers control and does not return. */ - tcb_t *first_task = kcb->task_current->data; - if (!first_task) - panic(ERR_NO_TASKS); + tcb_t *idle = kcb->task_current->data; + idle->state = TASK_RUNNING; - hal_dispatch_init(first_task->context); + hal_dispatch_init(idle->context); /* This line should be unreachable. */ panic(ERR_UNKNOWN); From e31e0799db4cec8263380f1624c9cf1d011f3c06 Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Fri, 24 Oct 2025 02:09:33 +0800 Subject: [PATCH 19/25] Remove first-task binding from task initialization Previously, both mo_task_spawn() and idle_task_init() implicitly bound their created tasks to kcb->task_current as the first execution context. This behavior caused ambiguity with the scheduler, which is now responsible for determining the active task during system startup. This change removes the initial binding logic from both functions, allowing the startup process (main()) to explicitly assign kcb->task_current (typically to the idle task) during launch. This ensures a single, centralized initialization flow and improves the separation between task creation and scheduling control. --- kernel/task.c | 7 ------- 1 file changed, 7 deletions(-) diff --git a/kernel/task.c b/kernel/task.c index f44fec3..333bf19 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -701,9 +701,6 @@ void idle_task_init(void) printf("idle id %u: entry=%p stack=%p size=%u\n", idle->id, &sched_idle, idle->stack, (unsigned int) stack_size); - if (!kcb->task_current) - kcb->task_current = kcb->harts->task_idle; - return; } @@ -769,10 +766,6 @@ int32_t mo_task_spawn(void *task_entry, uint16_t stack_size_req) /* Push node to ready queue */ sched_enqueue_task(tcb); - if (!kcb->task_current) { - kcb->task_current = kcb->harts->rr_cursors[tcb->prio_level]; - tcb->state = TASK_RUNNING; - } CRITICAL_LEAVE(); From a0c40a05c0b9d2552686a820319f24f4dc545e46 Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Sun, 26 Oct 2025 15:04:46 +0800 Subject: [PATCH 20/25] Add De Bruijn LUT for future O(1) priority selection Prepare for O(1) bitmap index lookup by adding a 32-entry De Bruijn sequence table. The table will be used in later commits to replace iterative bit scanning. No functional change in this patch. --- kernel/task.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/kernel/task.c b/kernel/task.c index 333bf19..132fa82 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -337,6 +337,11 @@ void panic(int32_t ecode) hal_panic(); } +/* RISC-V optimized priority finding using De Bruijn sequence */ +static const uint8_t debruijn_lut[32] = { + 0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8, + 31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9}; + /* Weak aliases for context switching functions. */ void dispatch(void); void yield(void); From 9e21688bd14d24f56ee8818ca908b08c4a7f7ff2 Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Sun, 26 Oct 2025 15:17:41 +0800 Subject: [PATCH 21/25] Implement De Bruijn-based top priority helper Implement the helper function that uses a De Bruijn multiply-and-LUT approach to compute the index of the least-significant set bit in O(1) time complexity. This helper is not yet wired into the scheduler logic; integration will follow in a later commit. No functional change in this patch. --- kernel/task.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/kernel/task.c b/kernel/task.c index 132fa82..659e577 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -342,6 +342,18 @@ static const uint8_t debruijn_lut[32] = { 0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8, 31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9}; +/* O(1) priority selection optimized for RISC-V */ +static inline uint8_t find_highest_ready_priority(uint32_t bitmap) +{ + /* Isolate rightmost set bit (highest priority) */ + uint32_t isolated = bitmap & (-bitmap); + + /* De Bruijn multiplication for O(1) bit position finding */ + uint32_t hash = (isolated * 0x077CB531U) >> 27; + + return debruijn_lut[hash & 0x1F]; +} + /* Weak aliases for context switching functions. */ void dispatch(void); void yield(void); From 0b002a8721b707d02a4b9dbde9cebe0a5b90f583 Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Sun, 26 Oct 2025 15:22:56 +0800 Subject: [PATCH 22/25] Use De Brujin-based top priority helper in scheduler Replace the iterative bitmap scanning with the De Bruijn multiply+LUT method via the new helper. This change makes top-priority selection constant-time and deterministic. --- kernel/task.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/kernel/task.c b/kernel/task.c index 659e577..2cd66f8 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -535,9 +535,7 @@ uint16_t sched_select_next_task(void) return sched_switch_to_idle()->id; /* Find top priority ready queue */ - int top_prio_level = 0; - for (; !(bitmap & 1U); top_prio_level++, bitmap >>= 1) - ; + uint8_t top_prio_level = find_highest_ready_priority(bitmap); list_node_t **cursor = &kcb->harts->rr_cursors[top_prio_level]; list_t *rq = kcb->harts->ready_queues[top_prio_level]; From 3572bea0d0efa0542b2a0977bd3b69bcd9f71e1a Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Wed, 5 Nov 2025 09:29:23 +0800 Subject: [PATCH 23/25] Add dequeuing ready queue path in _sched_block() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Previously, _sched_block() only enqueued the task into the wait queue and set its state to TASK_BLOCKED. In the new scheduler design (ready-queue–based), a blocked task must also be removed from its priority's ready queue to prevent it from being selected by the scheduler. This change adds the missing dequeue path for the corresponding ready queue, ensuring behavior consistency. --- kernel/task.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/kernel/task.c b/kernel/task.c index 2cd66f8..13ffc1e 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -1050,6 +1050,8 @@ void _sched_block(queue_t *wait_q) tcb_t *self = kcb->task_current->data; + sched_dequeue_task(self); + if (queue_enqueue(wait_q, self) != 0) panic(ERR_SEM_OPERATION); From 29b76cf364264733f6cfd38b6105628f90b391a0 Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Wed, 5 Nov 2025 10:20:09 +0800 Subject: [PATCH 24/25] Make sched_wakeup_task() globally visible Previously, sched_wakeup_task() was limited to internal use within the scheduler module. This change makes it globally visible so that it can be reused in semaphore.c for task wake-up operations. --- include/sys/task.h | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/include/sys/task.h b/include/sys/task.h index 76f2fd6..63c2786 100644 --- a/include/sys/task.h +++ b/include/sys/task.h @@ -316,4 +316,7 @@ int32_t app_main(void); * Only one idle task exists per hart. Its priority is fixed to the * lowest level and its time slice is zero. */ -void idle_task_init(void); \ No newline at end of file +void idle_task_init(void); + +/* Wake up and enqueue task into ready queue */ +void sched_wakeup_task(tcb_t *); \ No newline at end of file From f6b0f58555b900f5fab4f168c2251792e78aefe8 Mon Sep 17 00:00:00 2001 From: vicLin8712 Date: Wed, 5 Nov 2025 10:25:06 +0800 Subject: [PATCH 25/25] Add sched_wakeup_task() in mo_sem_signal() Previously, mo_sem_signal() only changed the awakened task state to TASK_READY when a semaphore signal was triggered. In the new scheduler design, which selects runnable tasks from ready queues, the awakened task must also be enqueued for scheduling. This change invokes sched_wakeup_task() to perform the enqueue operation, ensuring the awakened task is properly inserted into the ready queue. --- kernel/semaphore.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/semaphore.c b/kernel/semaphore.c index 640e372..fbc3271 100644 --- a/kernel/semaphore.c +++ b/kernel/semaphore.c @@ -176,7 +176,7 @@ void mo_sem_signal(sem_t *s) if (likely(awakened_task)) { /* Validate awakened task state consistency */ if (likely(awakened_task->state == TASK_BLOCKED)) { - awakened_task->state = TASK_READY; + sched_wakeup_task(awakened_task); should_yield = true; } else { /* Task state inconsistency - this should not happen */