nuttx-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From acas...@apache.org
Subject [incubator-nuttx] 01/02: sched/sched/sched.h: Make naming of all internal names consistent:
Date Sat, 09 May 2020 19:58:51 GMT
This is an automated email from the ASF dual-hosted git repository.

acassis pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-nuttx.git

commit f92dba212d1551f541dfedd61a6ad1401d8aaff4
Author: Gregory Nutt <gnutt@nuttx.org>
AuthorDate: Sat May 9 12:40:14 2020 -0600

    sched/sched/sched.h:  Make naming of all internal names consistent:
    
    1. Add internal scheduler functions should begin with nxsched_, not sched_
    2. Follow the consistent naming patter of https://cwiki.apache.org/confluence/display/NUTTX/Naming+of+OS+Internal+Functions
---
 TODO                                             |  6 +-
 arch/arm/src/arm/arm_blocktask.c                 |  6 +-
 arch/arm/src/arm/arm_releasepending.c            |  2 +-
 arch/arm/src/arm/arm_reprioritizertr.c           | 10 +--
 arch/arm/src/arm/arm_unblocktask.c               |  4 +-
 arch/arm/src/armv6-m/arm_blocktask.c             |  6 +-
 arch/arm/src/armv6-m/arm_releasepending.c        |  2 +-
 arch/arm/src/armv6-m/arm_reprioritizertr.c       | 10 +--
 arch/arm/src/armv6-m/arm_unblocktask.c           |  4 +-
 arch/arm/src/armv7-a/arm_blocktask.c             |  6 +-
 arch/arm/src/armv7-a/arm_releasepending.c        |  2 +-
 arch/arm/src/armv7-a/arm_reprioritizertr.c       | 10 +--
 arch/arm/src/armv7-a/arm_unblocktask.c           |  4 +-
 arch/arm/src/armv7-m/arm_blocktask.c             |  6 +-
 arch/arm/src/armv7-m/arm_releasepending.c        |  2 +-
 arch/arm/src/armv7-m/arm_reprioritizertr.c       | 10 +--
 arch/arm/src/armv7-m/arm_unblocktask.c           |  4 +-
 arch/arm/src/armv7-r/arm_blocktask.c             |  6 +-
 arch/arm/src/armv7-r/arm_releasepending.c        |  2 +-
 arch/arm/src/armv7-r/arm_reprioritizertr.c       | 10 +--
 arch/arm/src/armv7-r/arm_unblocktask.c           |  4 +-
 arch/arm/src/armv8-m/arm_blocktask.c             |  6 +-
 arch/arm/src/armv8-m/arm_releasepending.c        |  2 +-
 arch/arm/src/armv8-m/arm_reprioritizertr.c       | 10 +--
 arch/arm/src/armv8-m/arm_unblocktask.c           |  4 +-
 arch/avr/src/avr/up_blocktask.c                  |  6 +-
 arch/avr/src/avr/up_releasepending.c             |  2 +-
 arch/avr/src/avr/up_reprioritizertr.c            | 10 +--
 arch/avr/src/avr/up_unblocktask.c                |  4 +-
 arch/avr/src/avr32/up_blocktask.c                |  6 +-
 arch/avr/src/avr32/up_releasepending.c           |  2 +-
 arch/avr/src/avr32/up_reprioritizertr.c          | 10 +--
 arch/avr/src/avr32/up_unblocktask.c              |  4 +-
 arch/hc/src/common/up_blocktask.c                |  6 +-
 arch/hc/src/common/up_releasepending.c           |  2 +-
 arch/hc/src/common/up_reprioritizertr.c          | 10 +--
 arch/hc/src/common/up_unblocktask.c              |  4 +-
 arch/mips/src/mips32/mips_blocktask.c            |  6 +-
 arch/mips/src/mips32/mips_releasepending.c       |  2 +-
 arch/mips/src/mips32/mips_reprioritizertr.c      | 10 +--
 arch/mips/src/mips32/mips_unblocktask.c          |  4 +-
 arch/misoc/src/lm32/lm32_blocktask.c             |  6 +-
 arch/misoc/src/lm32/lm32_releasepending.c        |  2 +-
 arch/misoc/src/lm32/lm32_reprioritizertr.c       | 10 +--
 arch/misoc/src/lm32/lm32_unblocktask.c           |  4 +-
 arch/misoc/src/minerva/minerva_blocktask.c       |  6 +-
 arch/misoc/src/minerva/minerva_releasepending.c  |  2 +-
 arch/misoc/src/minerva/minerva_reprioritizertr.c | 10 +--
 arch/misoc/src/minerva/minerva_unblocktask.c     |  4 +-
 arch/or1k/src/common/up_blocktask.c              |  6 +-
 arch/or1k/src/common/up_releasepending.c         |  2 +-
 arch/or1k/src/common/up_reprioritizertr.c        | 10 +--
 arch/or1k/src/common/up_unblocktask.c            |  4 +-
 arch/renesas/src/common/up_blocktask.c           |  6 +-
 arch/renesas/src/common/up_releasepending.c      |  2 +-
 arch/renesas/src/common/up_reprioritizertr.c     | 10 +--
 arch/renesas/src/common/up_unblocktask.c         |  4 +-
 arch/risc-v/src/rv32im/riscv_blocktask.c         |  6 +-
 arch/risc-v/src/rv32im/riscv_releasepending.c    |  2 +-
 arch/risc-v/src/rv32im/riscv_reprioritizertr.c   | 10 +--
 arch/risc-v/src/rv32im/riscv_unblocktask.c       |  4 +-
 arch/risc-v/src/rv64gc/riscv_blocktask.c         |  6 +-
 arch/risc-v/src/rv64gc/riscv_releasepending.c    |  2 +-
 arch/risc-v/src/rv64gc/riscv_reprioritizertr.c   | 10 +--
 arch/risc-v/src/rv64gc/riscv_unblocktask.c       |  4 +-
 arch/sim/src/sim/up_blocktask.c                  |  6 +-
 arch/sim/src/sim/up_releasepending.c             |  2 +-
 arch/sim/src/sim/up_reprioritizertr.c            | 10 +--
 arch/sim/src/sim/up_unblocktask.c                |  4 +-
 arch/x86/src/common/up_blocktask.c               |  6 +-
 arch/x86/src/common/up_releasepending.c          |  2 +-
 arch/x86/src/common/up_reprioritizertr.c         | 10 +--
 arch/x86/src/common/up_unblocktask.c             |  4 +-
 arch/x86_64/src/common/up_blocktask.c            |  6 +-
 arch/x86_64/src/common/up_releasepending.c       |  2 +-
 arch/x86_64/src/common/up_reprioritizertr.c      | 10 +--
 arch/x86_64/src/common/up_unblocktask.c          |  4 +-
 arch/xtensa/src/common/xtensa_blocktask.c        |  6 +-
 arch/xtensa/src/common/xtensa_releasepending.c   |  2 +-
 arch/xtensa/src/common/xtensa_reprioritizertr.c  | 10 +--
 arch/xtensa/src/common/xtensa_unblocktask.c      |  4 +-
 arch/z16/src/common/z16_blocktask.c              |  6 +-
 arch/z16/src/common/z16_releasepending.c         |  2 +-
 arch/z16/src/common/z16_reprioritizertr.c        | 10 +--
 arch/z16/src/common/z16_unblocktask.c            |  4 +-
 arch/z80/src/common/z80_blocktask.c              |  6 +-
 arch/z80/src/common/z80_releasepending.c         |  2 +-
 arch/z80/src/common/z80_reprioritizertr.c        | 10 +--
 arch/z80/src/common/z80_unblocktask.c            |  4 +-
 sched/group/group_continue.c                     |  2 +-
 sched/group/group_suspendchildren.c              |  2 +-
 sched/irq/irq_csection.c                         | 10 +--
 sched/paging/pg_miss.c                           |  2 +-
 sched/paging/pg_worker.c                         |  6 +-
 sched/pthread/pthread_create.c                   |  8 +--
 sched/sched/sched.h                              | 82 ++++++++++++------------
 sched/sched/sched_addblocked.c                   | 10 +--
 sched/sched/sched_addprioritized.c               |  4 +-
 sched/sched/sched_addreadytorun.c                | 36 +++++------
 sched/sched/sched_continue.c                     |  4 +-
 sched/sched/sched_cpupause.c                     |  4 +-
 sched/sched/sched_cpuselect.c                    |  4 +-
 sched/sched/sched_critmonitor.c                  | 16 ++---
 sched/sched/sched_lock.c                         | 51 ++++++---------
 sched/sched/sched_mergepending.c                 | 36 +++++------
 sched/sched/sched_mergeprioritized.c             |  8 +--
 sched/sched/sched_processtimer.c                 |  4 +-
 sched/sched/sched_removeblocked.c                |  4 +-
 sched/sched/sched_removereadytorun.c             | 18 +++---
 sched/sched/sched_reprioritize.c                 |  4 +-
 sched/sched/sched_resumescheduler.c              | 10 +--
 sched/sched/sched_roundrobin.c                   |  8 +--
 sched/sched/sched_setaffinity.c                  |  4 +-
 sched/sched/sched_setparam.c                     |  4 +-
 sched/sched/sched_setpriority.c                  | 16 ++---
 sched/sched/sched_setscheduler.c                 | 10 +--
 sched/sched/sched_sporadic.c                     | 50 +++++++--------
 sched/sched/sched_suspend.c                      |  8 +--
 sched/sched/sched_suspendscheduler.c             |  4 +-
 sched/sched/sched_tasklistlock.c                 | 14 ++--
 sched/sched/sched_timerexpiration.c              | 28 ++++----
 sched/sched/sched_unlock.c                       | 20 +++---
 sched/sched/sched_verifytcb.c                    |  4 +-
 sched/sched/sched_yield.c                        |  2 +-
 sched/semaphore/sem_holder.c                     | 10 +--
 sched/signal/sig_default.c                       |  4 +-
 sched/signal/sig_dispatch.c                      |  2 +-
 sched/task/task_exit.c                           | 10 +--
 sched/task/task_recover.c                        |  2 +-
 sched/task/task_restart.c                        |  2 +-
 sched/task/task_terminate.c                      |  2 +-
 sched/wdog/wd_cancel.c                           |  2 +-
 sched/wdog/wd_start.c                            |  4 +-
 sched/wqueue/kwork_inherit.c                     |  6 +-
 134 files changed, 508 insertions(+), 523 deletions(-)

diff --git a/TODO b/TODO
index 0965b44..2b50886 100644
--- a/TODO
+++ b/TODO
@@ -591,10 +591,10 @@ o SMP
 
                The log below was reported is Nuttx running on two cores
                Cortex-A7 architecture in SMP mode.  You can notice see that
-               when sched_addreadytorun() was called, the g_cpu_irqset is 3.
+               when nxsched_add_readytorun() was called, the g_cpu_irqset is 3.
 
-                 sched_addreadytorun: irqset cpu 1, me 0 btcbname init, irqset 1 irqcount 2.
-                 sched_addreadytorun: sched_addreadytorun line 338 g_cpu_irqset = 3.
+                 nxsched_add_readytorun: irqset cpu 1, me 0 btcbname init, irqset 1 irqcount 2.
+                 nxsched_add_readytorun: nxsched_add_readytorun line 338 g_cpu_irqset = 3.
 
                This can happen, but only under a very certain condition.
                g_cpu_irqset only exists to support this certain condition:
diff --git a/arch/arm/src/arm/arm_blocktask.c b/arch/arm/src/arm/arm_blocktask.c
index 85e0c46..d146927 100644
--- a/arch/arm/src/arm/arm_blocktask.c
+++ b/arch/arm/src/arm/arm_blocktask.c
@@ -89,11 +89,11 @@ void up_block_task(struct tcb_s *tcb, tstate_t task_state)
    * it should also be true that rtcb == tcb.
    */
 
-  switch_needed = sched_removereadytorun(tcb);
+  switch_needed = nxsched_remove_readytorun(tcb);
 
   /* Add the task to the specified blocked task list */
 
-  sched_addblocked(tcb, (tstate_t)task_state);
+  nxsched_add_blocked(tcb, (tstate_t)task_state);
 
   /* If there are any pending tasks, then add them to the ready-to-run
    * task list now
@@ -101,7 +101,7 @@ void up_block_task(struct tcb_s *tcb, tstate_t task_state)
 
   if (g_pendingtasks.head)
     {
-      switch_needed |= sched_mergepending();
+      switch_needed |= nxsched_merge_pending();
     }
 
   /* Now, perform the context switch if one is needed */
diff --git a/arch/arm/src/arm/arm_releasepending.c b/arch/arm/src/arm/arm_releasepending.c
index 2ff74c7..cb36eaa 100644
--- a/arch/arm/src/arm/arm_releasepending.c
+++ b/arch/arm/src/arm/arm_releasepending.c
@@ -56,7 +56,7 @@ void up_release_pending(void)
 
   /* Merge the g_pendingtasks list into the ready-to-run task list */
 
-  if (sched_mergepending())
+  if (nxsched_merge_pending())
     {
       /* The currently active task has changed!  We will need to switch
        * contexts.
diff --git a/arch/arm/src/arm/arm_reprioritizertr.c b/arch/arm/src/arm/arm_reprioritizertr.c
index 3b3d853..6c2416b 100644
--- a/arch/arm/src/arm/arm_reprioritizertr.c
+++ b/arch/arm/src/arm/arm_reprioritizertr.c
@@ -83,25 +83,25 @@ void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
       sinfo("TCB=%p PRI=%d\n", tcb, priority);
 
       /* Remove the tcb task from the ready-to-run list.
-       * sched_removereadytorun will return true if we just
+       * nxsched_remove_readytorun will return true if we just
        * remove the head of the ready to run list.
        */
 
-      switch_needed = sched_removereadytorun(tcb);
+      switch_needed = nxsched_remove_readytorun(tcb);
 
       /* Setup up the new task priority */
 
       tcb->sched_priority = (uint8_t)priority;
 
       /* Return the task to the specified blocked task list.
-       * sched_addreadytorun will return true if the task was
+       * nxsched_add_readytorun will return true if the task was
        * added to the new list.  We will need to perform a context
        * switch only if the EXCLUSIVE or of the two calls is non-zero
        * (i.e., one and only one the calls changes the head of the
        * ready-to-run list).
        */
 
-      switch_needed ^= sched_addreadytorun(tcb);
+      switch_needed ^= nxsched_add_readytorun(tcb);
 
       /* Now, perform the context switch if one is needed */
 
@@ -114,7 +114,7 @@ void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
 
           if (g_pendingtasks.head)
             {
-              sched_mergepending();
+              nxsched_merge_pending();
             }
 
           /* Update scheduler parameters */
diff --git a/arch/arm/src/arm/arm_unblocktask.c b/arch/arm/src/arm/arm_unblocktask.c
index c0dd378..2642253 100644
--- a/arch/arm/src/arm/arm_unblocktask.c
+++ b/arch/arm/src/arm/arm_unblocktask.c
@@ -65,13 +65,13 @@ void up_unblock_task(struct tcb_s *tcb)
 
   /* Remove the task from the blocked task list */
 
-  sched_removeblocked(tcb);
+  nxsched_remove_blocked(tcb);
 
   /* Add the task in the correct location in the prioritized
    * ready-to-run task list
    */
 
-  if (sched_addreadytorun(tcb))
+  if (nxsched_add_readytorun(tcb))
     {
       /* The currently active task has changed! We need to do
        * a context switch to the new task.
diff --git a/arch/arm/src/armv6-m/arm_blocktask.c b/arch/arm/src/armv6-m/arm_blocktask.c
index 5af2d45..90562fa 100644
--- a/arch/arm/src/armv6-m/arm_blocktask.c
+++ b/arch/arm/src/armv6-m/arm_blocktask.c
@@ -73,11 +73,11 @@ void up_block_task(struct tcb_s *tcb, tstate_t task_state)
    * it should also be true that rtcb == tcb.
    */
 
-  switch_needed = sched_removereadytorun(tcb);
+  switch_needed = nxsched_remove_readytorun(tcb);
 
   /* Add the task to the specified blocked task list */
 
-  sched_addblocked(tcb, (tstate_t)task_state);
+  nxsched_add_blocked(tcb, (tstate_t)task_state);
 
   /* If there are any pending tasks, then add them to the ready-to-run
    * task list now
@@ -85,7 +85,7 @@ void up_block_task(struct tcb_s *tcb, tstate_t task_state)
 
   if (g_pendingtasks.head)
     {
-      switch_needed |= sched_mergepending();
+      switch_needed |= nxsched_merge_pending();
     }
 
   /* Now, perform the context switch if one is needed */
diff --git a/arch/arm/src/armv6-m/arm_releasepending.c b/arch/arm/src/armv6-m/arm_releasepending.c
index cf5b6c8..07269e7 100644
--- a/arch/arm/src/armv6-m/arm_releasepending.c
+++ b/arch/arm/src/armv6-m/arm_releasepending.c
@@ -55,7 +55,7 @@ void up_release_pending(void)
 
   /* Merge the g_pendingtasks list into the ready-to-run task list */
 
-  if (sched_mergepending())
+  if (nxsched_merge_pending())
     {
       /* The currently active task has changed!  We will need to
        * switch contexts.
diff --git a/arch/arm/src/armv6-m/arm_reprioritizertr.c b/arch/arm/src/armv6-m/arm_reprioritizertr.c
index 71ff1d6..993dd02 100644
--- a/arch/arm/src/armv6-m/arm_reprioritizertr.c
+++ b/arch/arm/src/armv6-m/arm_reprioritizertr.c
@@ -82,24 +82,24 @@ void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
       sinfo("TCB=%p PRI=%d\n", tcb, priority);
 
       /* Remove the tcb task from the ready-to-run list.
-       * sched_removereadytorun will return true if we just removed the head
+       * nxsched_remove_readytorun will return true if we just removed the head
        * of the ready to run list.
        */
 
-      switch_needed = sched_removereadytorun(tcb);
+      switch_needed = nxsched_remove_readytorun(tcb);
 
       /* Setup up the new task priority */
 
       tcb->sched_priority = (uint8_t)priority;
 
-      /* Return the task to the ready-to-run task list. sched_addreadytorun
+      /* Return the task to the ready-to-run task list. nxsched_add_readytorun
        * will return true if the task was added to the head of ready-to-run
        * list.  We will need to perform a context switch only if the
        * EXCLUSIVE or of the two calls is non-zero (i.e., one and only one
        * the calls changes the head of the ready-to-run list).
        */
 
-      switch_needed ^= sched_addreadytorun(tcb);
+      switch_needed ^= nxsched_add_readytorun(tcb);
 
       /* Now, perform the context switch if one is needed (i.e. if the head
        * of the ready-to-run list is no longer the same).
@@ -114,7 +114,7 @@ void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
 
           if (g_pendingtasks.head)
             {
-              sched_mergepending();
+              nxsched_merge_pending();
             }
 
           /* Update scheduler parameters */
diff --git a/arch/arm/src/armv6-m/arm_unblocktask.c b/arch/arm/src/armv6-m/arm_unblocktask.c
index d6558d8..ff4845a 100644
--- a/arch/arm/src/armv6-m/arm_unblocktask.c
+++ b/arch/arm/src/armv6-m/arm_unblocktask.c
@@ -63,13 +63,13 @@ void up_unblock_task(struct tcb_s *tcb)
 
   /* Remove the task from the blocked task list */
 
-  sched_removeblocked(tcb);
+  nxsched_remove_blocked(tcb);
 
   /* Add the task in the correct location in the prioritized
    * ready-to-run task list
    */
 
-  if (sched_addreadytorun(tcb))
+  if (nxsched_add_readytorun(tcb))
     {
       /* The currently active task has changed! We need to do
        * a context switch to the new task.
diff --git a/arch/arm/src/armv7-a/arm_blocktask.c b/arch/arm/src/armv7-a/arm_blocktask.c
index e82fe61..4f554ff 100644
--- a/arch/arm/src/armv7-a/arm_blocktask.c
+++ b/arch/arm/src/armv7-a/arm_blocktask.c
@@ -89,11 +89,11 @@ void up_block_task(struct tcb_s *tcb, tstate_t task_state)
    * it should also be true that rtcb == tcb.
    */
 
-  switch_needed = sched_removereadytorun(tcb);
+  switch_needed = nxsched_remove_readytorun(tcb);
 
   /* Add the task to the specified blocked task list */
 
-  sched_addblocked(tcb, (tstate_t)task_state);
+  nxsched_add_blocked(tcb, (tstate_t)task_state);
 
   /* If there are any pending tasks, then add them to the ready-to-run
    * task list now
@@ -101,7 +101,7 @@ void up_block_task(struct tcb_s *tcb, tstate_t task_state)
 
   if (g_pendingtasks.head)
     {
-      switch_needed |= sched_mergepending();
+      switch_needed |= nxsched_merge_pending();
     }
 
   /* Now, perform the context switch if one is needed */
diff --git a/arch/arm/src/armv7-a/arm_releasepending.c b/arch/arm/src/armv7-a/arm_releasepending.c
index 9e954df..5cfffc9 100644
--- a/arch/arm/src/armv7-a/arm_releasepending.c
+++ b/arch/arm/src/armv7-a/arm_releasepending.c
@@ -55,7 +55,7 @@ void up_release_pending(void)
 
   /* Merge the g_pendingtasks list into the ready-to-run task list */
 
-  if (sched_mergepending())
+  if (nxsched_merge_pending())
     {
       /* The currently active task has changed!  We will need to
        * switch contexts.
diff --git a/arch/arm/src/armv7-a/arm_reprioritizertr.c b/arch/arm/src/armv7-a/arm_reprioritizertr.c
index 4fbc45a..683d0ec 100644
--- a/arch/arm/src/armv7-a/arm_reprioritizertr.c
+++ b/arch/arm/src/armv7-a/arm_reprioritizertr.c
@@ -83,25 +83,25 @@ void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
       sinfo("TCB=%p PRI=%d\n", tcb, priority);
 
       /* Remove the tcb task from the ready-to-run list.
-       * sched_removereadytorun will return true if we just
+       * nxsched_remove_readytorun will return true if we just
        * remove the head of the ready to run list.
        */
 
-      switch_needed = sched_removereadytorun(tcb);
+      switch_needed = nxsched_remove_readytorun(tcb);
 
       /* Setup up the new task priority */
 
       tcb->sched_priority = (uint8_t)priority;
 
       /* Return the task to the specified blocked task list.
-       * sched_addreadytorun will return true if the task was
+       * nxsched_add_readytorun will return true if the task was
        * added to the new list.  We will need to perform a context
        * switch only if the EXCLUSIVE or of the two calls is non-zero
        * (i.e., one and only one the calls changes the head of the
        * ready-to-run list).
        */
 
-      switch_needed ^= sched_addreadytorun(tcb);
+      switch_needed ^= nxsched_add_readytorun(tcb);
 
       /* Now, perform the context switch if one is needed */
 
@@ -114,7 +114,7 @@ void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
 
           if (g_pendingtasks.head)
             {
-              sched_mergepending();
+              nxsched_merge_pending();
             }
 
           /* Update scheduler parameters */
diff --git a/arch/arm/src/armv7-a/arm_unblocktask.c b/arch/arm/src/armv7-a/arm_unblocktask.c
index 106d088..6bee68d 100644
--- a/arch/arm/src/armv7-a/arm_unblocktask.c
+++ b/arch/arm/src/armv7-a/arm_unblocktask.c
@@ -78,13 +78,13 @@ void up_unblock_task(struct tcb_s *tcb)
 
   /* Remove the task from the blocked task list */
 
-  sched_removeblocked(tcb);
+  nxsched_remove_blocked(tcb);
 
   /* Add the task in the correct location in the prioritized
    * ready-to-run task list
    */
 
-  if (sched_addreadytorun(tcb))
+  if (nxsched_add_readytorun(tcb))
     {
       /* The currently active task has changed! We need to do
        * a context switch to the new task.
diff --git a/arch/arm/src/armv7-m/arm_blocktask.c b/arch/arm/src/armv7-m/arm_blocktask.c
index af1b0cd..3f6b8ba 100644
--- a/arch/arm/src/armv7-m/arm_blocktask.c
+++ b/arch/arm/src/armv7-m/arm_blocktask.c
@@ -73,11 +73,11 @@ void up_block_task(struct tcb_s *tcb, tstate_t task_state)
    * it should also be true that rtcb == tcb.
    */
 
-  switch_needed = sched_removereadytorun(tcb);
+  switch_needed = nxsched_remove_readytorun(tcb);
 
   /* Add the task to the specified blocked task list */
 
-  sched_addblocked(tcb, (tstate_t)task_state);
+  nxsched_add_blocked(tcb, (tstate_t)task_state);
 
   /* If there are any pending tasks, then add them to the ready-to-run
    * task list now
@@ -85,7 +85,7 @@ void up_block_task(struct tcb_s *tcb, tstate_t task_state)
 
   if (g_pendingtasks.head)
     {
-      switch_needed |= sched_mergepending();
+      switch_needed |= nxsched_merge_pending();
     }
 
   /* Now, perform the context switch if one is needed */
diff --git a/arch/arm/src/armv7-m/arm_releasepending.c b/arch/arm/src/armv7-m/arm_releasepending.c
index 1ffc48d..b228bbb 100644
--- a/arch/arm/src/armv7-m/arm_releasepending.c
+++ b/arch/arm/src/armv7-m/arm_releasepending.c
@@ -59,7 +59,7 @@ void up_release_pending(void)
   sched_lock();
 #endif
 
-  if (sched_mergepending())
+  if (nxsched_merge_pending())
     {
       /* The currently active task has changed!  We will need to switch
        * contexts.
diff --git a/arch/arm/src/armv7-m/arm_reprioritizertr.c b/arch/arm/src/armv7-m/arm_reprioritizertr.c
index d84d37a..3695be7 100644
--- a/arch/arm/src/armv7-m/arm_reprioritizertr.c
+++ b/arch/arm/src/armv7-m/arm_reprioritizertr.c
@@ -82,24 +82,24 @@ void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
       sinfo("TCB=%p PRI=%d\n", tcb, priority);
 
       /* Remove the tcb task from the ready-to-run list.
-       * sched_removereadytorun will return true if we just removed the head
+       * nxsched_remove_readytorun will return true if we just removed the head
        * of the ready to run list.
        */
 
-      switch_needed = sched_removereadytorun(tcb);
+      switch_needed = nxsched_remove_readytorun(tcb);
 
       /* Setup up the new task priority */
 
       tcb->sched_priority = (uint8_t)priority;
 
-      /* Return the task to the ready-to-run task list. sched_addreadytorun
+      /* Return the task to the ready-to-run task list. nxsched_add_readytorun
        * will return true if the task was added to the head of ready-to-run
        * list.  We will need to perform a context switch only if the
        * EXCLUSIVE or of the two calls is non-zero (i.e., one and only one
        * the calls changes the head of the ready-to-run list).
        */
 
-      switch_needed ^= sched_addreadytorun(tcb);
+      switch_needed ^= nxsched_add_readytorun(tcb);
 
       /* Now, perform the context switch if one is needed (i.e. if the head
        * of the ready-to-run list is no longer the same).
@@ -114,7 +114,7 @@ void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
 
           if (g_pendingtasks.head)
             {
-              sched_mergepending();
+              nxsched_merge_pending();
             }
 
           /* Update scheduler parameters */
diff --git a/arch/arm/src/armv7-m/arm_unblocktask.c b/arch/arm/src/armv7-m/arm_unblocktask.c
index 5b5071f..d4d4148 100644
--- a/arch/arm/src/armv7-m/arm_unblocktask.c
+++ b/arch/arm/src/armv7-m/arm_unblocktask.c
@@ -64,13 +64,13 @@ void up_unblock_task(struct tcb_s *tcb)
 
   /* Remove the task from the blocked task list */
 
-  sched_removeblocked(tcb);
+  nxsched_remove_blocked(tcb);
 
   /* Add the task in the correct location in the prioritized
    * ready-to-run task list
    */
 
-  if (sched_addreadytorun(tcb))
+  if (nxsched_add_readytorun(tcb))
     {
       /* The currently active task has changed! We need to do
        * a context switch to the new task.
diff --git a/arch/arm/src/armv7-r/arm_blocktask.c b/arch/arm/src/armv7-r/arm_blocktask.c
index efefdd6..d6a2e8d 100644
--- a/arch/arm/src/armv7-r/arm_blocktask.c
+++ b/arch/arm/src/armv7-r/arm_blocktask.c
@@ -89,11 +89,11 @@ void up_block_task(struct tcb_s *tcb, tstate_t task_state)
    * it should also be true that rtcb == tcb.
    */
 
-  switch_needed = sched_removereadytorun(tcb);
+  switch_needed = nxsched_remove_readytorun(tcb);
 
   /* Add the task to the specified blocked task list */
 
-  sched_addblocked(tcb, (tstate_t)task_state);
+  nxsched_add_blocked(tcb, (tstate_t)task_state);
 
   /* If there are any pending tasks, then add them to the ready-to-run
    * task list now
@@ -101,7 +101,7 @@ void up_block_task(struct tcb_s *tcb, tstate_t task_state)
 
   if (g_pendingtasks.head)
     {
-      switch_needed |= sched_mergepending();
+      switch_needed |= nxsched_merge_pending();
     }
 
   /* Now, perform the context switch if one is needed */
diff --git a/arch/arm/src/armv7-r/arm_releasepending.c b/arch/arm/src/armv7-r/arm_releasepending.c
index c5ddf76..d134a72 100644
--- a/arch/arm/src/armv7-r/arm_releasepending.c
+++ b/arch/arm/src/armv7-r/arm_releasepending.c
@@ -56,7 +56,7 @@ void up_release_pending(void)
 
   /* Merge the g_pendingtasks list into the ready-to-run task list */
 
-  if (sched_mergepending())
+  if (nxsched_merge_pending())
     {
       /* The currently active task has changed!  We will need to
        * switch contexts.
diff --git a/arch/arm/src/armv7-r/arm_reprioritizertr.c b/arch/arm/src/armv7-r/arm_reprioritizertr.c
index 5f13b20..ce9ef37 100644
--- a/arch/arm/src/armv7-r/arm_reprioritizertr.c
+++ b/arch/arm/src/armv7-r/arm_reprioritizertr.c
@@ -83,25 +83,25 @@ void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
       sinfo("TCB=%p PRI=%d\n", tcb, priority);
 
       /* Remove the tcb task from the ready-to-run list.
-       * sched_removereadytorun will return true if we just
+       * nxsched_remove_readytorun will return true if we just
        * remove the head of the ready to run list.
        */
 
-      switch_needed = sched_removereadytorun(tcb);
+      switch_needed = nxsched_remove_readytorun(tcb);
 
       /* Setup up the new task priority */
 
       tcb->sched_priority = (uint8_t)priority;
 
       /* Return the task to the specified blocked task list.
-       * sched_addreadytorun will return true if the task was
+       * nxsched_add_readytorun will return true if the task was
        * added to the new list.  We will need to perform a context
        * switch only if the EXCLUSIVE or of the two calls is non-zero
        * (i.e., one and only one the calls changes the head of the
        * ready-to-run list).
        */
 
-      switch_needed ^= sched_addreadytorun(tcb);
+      switch_needed ^= nxsched_add_readytorun(tcb);
 
       /* Now, perform the context switch if one is needed */
 
@@ -114,7 +114,7 @@ void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
 
           if (g_pendingtasks.head)
             {
-              sched_mergepending();
+              nxsched_merge_pending();
             }
 
           /* Update scheduler parameters */
diff --git a/arch/arm/src/armv7-r/arm_unblocktask.c b/arch/arm/src/armv7-r/arm_unblocktask.c
index baf2490..ce845df 100644
--- a/arch/arm/src/armv7-r/arm_unblocktask.c
+++ b/arch/arm/src/armv7-r/arm_unblocktask.c
@@ -77,13 +77,13 @@ void up_unblock_task(struct tcb_s *tcb)
 
   /* Remove the task from the blocked task list */
 
-  sched_removeblocked(tcb);
+  nxsched_remove_blocked(tcb);
 
   /* Add the task in the correct location in the prioritized
    * ready-to-run task list
    */
 
-  if (sched_addreadytorun(tcb))
+  if (nxsched_add_readytorun(tcb))
     {
       /* The currently active task has changed! We need to do
        * a context switch to the new task.
diff --git a/arch/arm/src/armv8-m/arm_blocktask.c b/arch/arm/src/armv8-m/arm_blocktask.c
index 240b51a..ae2b46e 100755
--- a/arch/arm/src/armv8-m/arm_blocktask.c
+++ b/arch/arm/src/armv8-m/arm_blocktask.c
@@ -73,11 +73,11 @@ void up_block_task(struct tcb_s *tcb, tstate_t task_state)
    * it should also be true that rtcb == tcb.
    */
 
-  switch_needed = sched_removereadytorun(tcb);
+  switch_needed = nxsched_remove_readytorun(tcb);
 
   /* Add the task to the specified blocked task list */
 
-  sched_addblocked(tcb, (tstate_t)task_state);
+  nxsched_add_blocked(tcb, (tstate_t)task_state);
 
   /* If there are any pending tasks, then add them to the ready-to-run
    * task list now
@@ -85,7 +85,7 @@ void up_block_task(struct tcb_s *tcb, tstate_t task_state)
 
   if (g_pendingtasks.head)
     {
-      switch_needed |= sched_mergepending();
+      switch_needed |= nxsched_merge_pending();
     }
 
   /* Now, perform the context switch if one is needed */
diff --git a/arch/arm/src/armv8-m/arm_releasepending.c b/arch/arm/src/armv8-m/arm_releasepending.c
index 1a47074..c3755ec 100755
--- a/arch/arm/src/armv8-m/arm_releasepending.c
+++ b/arch/arm/src/armv8-m/arm_releasepending.c
@@ -59,7 +59,7 @@ void up_release_pending(void)
   sched_lock();
 #endif
 
-  if (sched_mergepending())
+  if (nxsched_merge_pending())
     {
       /* The currently active task has changed!  We will need to switch
        * contexts.
diff --git a/arch/arm/src/armv8-m/arm_reprioritizertr.c b/arch/arm/src/armv8-m/arm_reprioritizertr.c
index 2d855fb..2c1203a 100755
--- a/arch/arm/src/armv8-m/arm_reprioritizertr.c
+++ b/arch/arm/src/armv8-m/arm_reprioritizertr.c
@@ -82,24 +82,24 @@ void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
       sinfo("TCB=%p PRI=%d\n", tcb, priority);
 
       /* Remove the tcb task from the ready-to-run list.
-       * sched_removereadytorun will return true if we just removed the head
+       * nxsched_remove_readytorun will return true if we just removed the head
        * of the ready to run list.
        */
 
-      switch_needed = sched_removereadytorun(tcb);
+      switch_needed = nxsched_remove_readytorun(tcb);
 
       /* Setup up the new task priority */
 
       tcb->sched_priority = (uint8_t)priority;
 
-      /* Return the task to the ready-to-run task list. sched_addreadytorun
+      /* Return the task to the ready-to-run task list. nxsched_add_readytorun
        * will return true if the task was added to the head of ready-to-run
        * list.  We will need to perform a context switch only if the
        * EXCLUSIVE or of the two calls is non-zero (i.e., one and only one
        * the calls changes the head of the ready-to-run list).
        */
 
-      switch_needed ^= sched_addreadytorun(tcb);
+      switch_needed ^= nxsched_add_readytorun(tcb);
 
       /* Now, perform the context switch if one is needed (i.e. if the head
        * of the ready-to-run list is no longer the same).
@@ -114,7 +114,7 @@ void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
 
           if (g_pendingtasks.head)
             {
-              sched_mergepending();
+              nxsched_merge_pending();
             }
 
           /* Update scheduler parameters */
diff --git a/arch/arm/src/armv8-m/arm_unblocktask.c b/arch/arm/src/armv8-m/arm_unblocktask.c
index e4ec903..75f42bf 100755
--- a/arch/arm/src/armv8-m/arm_unblocktask.c
+++ b/arch/arm/src/armv8-m/arm_unblocktask.c
@@ -64,13 +64,13 @@ void up_unblock_task(struct tcb_s *tcb)
 
   /* Remove the task from the blocked task list */
 
-  sched_removeblocked(tcb);
+  nxsched_remove_blocked(tcb);
 
   /* Add the task in the correct location in the prioritized
    * ready-to-run task list
    */
 
-  if (sched_addreadytorun(tcb))
+  if (nxsched_add_readytorun(tcb))
     {
       /* The currently active task has changed! We need to do
        * a context switch to the new task.
diff --git a/arch/avr/src/avr/up_blocktask.c b/arch/avr/src/avr/up_blocktask.c
index 8db6d1b..e0c13aa 100644
--- a/arch/avr/src/avr/up_blocktask.c
+++ b/arch/avr/src/avr/up_blocktask.c
@@ -88,11 +88,11 @@ void up_block_task(struct tcb_s *tcb, tstate_t task_state)
    * it should also be true that rtcb == tcb.
    */
 
-  switch_needed = sched_removereadytorun(tcb);
+  switch_needed = nxsched_remove_readytorun(tcb);
 
   /* Add the task to the specified blocked task list */
 
-  sched_addblocked(tcb, (tstate_t)task_state);
+  nxsched_add_blocked(tcb, (tstate_t)task_state);
 
   /* If there are any pending tasks, then add them to the ready-to-run
    * task list now
@@ -100,7 +100,7 @@ void up_block_task(struct tcb_s *tcb, tstate_t task_state)
 
   if (g_pendingtasks.head)
     {
-      switch_needed |= sched_mergepending();
+      switch_needed |= nxsched_merge_pending();
     }
 
   /* Now, perform the context switch if one is needed */
diff --git a/arch/avr/src/avr/up_releasepending.c b/arch/avr/src/avr/up_releasepending.c
index 9f7ee47..a8c24b1 100644
--- a/arch/avr/src/avr/up_releasepending.c
+++ b/arch/avr/src/avr/up_releasepending.c
@@ -55,7 +55,7 @@ void up_release_pending(void)
 
   /* Merge the g_pendingtasks list into the ready-to-run task list */
 
-  if (sched_mergepending())
+  if (nxsched_merge_pending())
     {
       /* The currently active task has changed!  We will need to switch
        * contexts.
diff --git a/arch/avr/src/avr/up_reprioritizertr.c b/arch/avr/src/avr/up_reprioritizertr.c
index a883bab..ce91625 100644
--- a/arch/avr/src/avr/up_reprioritizertr.c
+++ b/arch/avr/src/avr/up_reprioritizertr.c
@@ -82,25 +82,25 @@ void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
       sinfo("TCB=%p PRI=%d\n", tcb, priority);
 
       /* Remove the tcb task from the ready-to-run list.
-       * sched_removereadytorun will return true if we just
+       * nxsched_remove_readytorun will return true if we just
        * remove the head of the ready to run list.
        */
 
-      switch_needed = sched_removereadytorun(tcb);
+      switch_needed = nxsched_remove_readytorun(tcb);
 
       /* Setup up the new task priority */
 
       tcb->sched_priority = (uint8_t)priority;
 
       /* Return the task to the specified blocked task list.
-       * sched_addreadytorun will return true if the task was
+       * nxsched_add_readytorun will return true if the task was
        * added to the new list.  We will need to perform a context
        * switch only if the EXCLUSIVE or of the two calls is non-zero
        * (i.e., one and only one the calls changes the head of the
        * ready-to-run list).
        */
 
-      switch_needed ^= sched_addreadytorun(tcb);
+      switch_needed ^= nxsched_add_readytorun(tcb);
 
       /* Now, perform the context switch if one is needed */
 
@@ -113,7 +113,7 @@ void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
 
           if (g_pendingtasks.head)
             {
-              sched_mergepending();
+              nxsched_merge_pending();
             }
 
           /* Update scheduler parameters */
diff --git a/arch/avr/src/avr/up_unblocktask.c b/arch/avr/src/avr/up_unblocktask.c
index d369726..dafba0f 100644
--- a/arch/avr/src/avr/up_unblocktask.c
+++ b/arch/avr/src/avr/up_unblocktask.c
@@ -79,13 +79,13 @@ void up_unblock_task(struct tcb_s *tcb)
 
   /* Remove the task from the blocked task list */
 
-  sched_removeblocked(tcb);
+  nxsched_remove_blocked(tcb);
 
   /* Add the task in the correct location in the prioritized
    * ready-to-run task list
    */
 
-  if (sched_addreadytorun(tcb))
+  if (nxsched_add_readytorun(tcb))
     {
       /* The currently active task has changed! We need to do
        * a context switch to the new task.
diff --git a/arch/avr/src/avr32/up_blocktask.c b/arch/avr/src/avr32/up_blocktask.c
index 759c8d3..38d8f0c 100644
--- a/arch/avr/src/avr32/up_blocktask.c
+++ b/arch/avr/src/avr32/up_blocktask.c
@@ -89,11 +89,11 @@ void up_block_task(struct tcb_s *tcb, tstate_t task_state)
    * it should also be true that rtcb == tcb.
    */
 
-  switch_needed = sched_removereadytorun(tcb);
+  switch_needed = nxsched_remove_readytorun(tcb);
 
   /* Add the task to the specified blocked task list */
 
-  sched_addblocked(tcb, (tstate_t)task_state);
+  nxsched_add_blocked(tcb, (tstate_t)task_state);
 
   /* If there are any pending tasks, then add them to the ready-to-run
    * task list now
@@ -101,7 +101,7 @@ void up_block_task(struct tcb_s *tcb, tstate_t task_state)
 
   if (g_pendingtasks.head)
     {
-      switch_needed |= sched_mergepending();
+      switch_needed |= nxsched_merge_pending();
     }
 
   /* Now, perform the context switch if one is needed */
diff --git a/arch/avr/src/avr32/up_releasepending.c b/arch/avr/src/avr32/up_releasepending.c
index f25b628..63448ad 100644
--- a/arch/avr/src/avr32/up_releasepending.c
+++ b/arch/avr/src/avr32/up_releasepending.c
@@ -56,7 +56,7 @@ void up_release_pending(void)
 
   /* Merge the g_pendingtasks list into the ready-to-run task list */
 
-  if (sched_mergepending())
+  if (nxsched_merge_pending())
     {
       /* The currently active task has changed!  We will need to switch
        * contexts.
diff --git a/arch/avr/src/avr32/up_reprioritizertr.c b/arch/avr/src/avr32/up_reprioritizertr.c
index c8293de..cb01037 100644
--- a/arch/avr/src/avr32/up_reprioritizertr.c
+++ b/arch/avr/src/avr32/up_reprioritizertr.c
@@ -83,25 +83,25 @@ void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
       sinfo("TCB=%p PRI=%d\n", tcb, priority);
 
       /* Remove the tcb task from the ready-to-run list.
-       * sched_removereadytorun will return true if we just
+       * nxsched_remove_readytorun will return true if we just
        * remove the head of the ready to run list.
        */
 
-      switch_needed = sched_removereadytorun(tcb);
+      switch_needed = nxsched_remove_readytorun(tcb);
 
       /* Setup up the new task priority */
 
       tcb->sched_priority = (uint8_t)priority;
 
       /* Return the task to the specified blocked task list.
-       * sched_addreadytorun will return true if the task was
+       * nxsched_add_readytorun will return true if the task was
        * added to the new list.  We will need to perform a context
        * switch only if the EXCLUSIVE or of the two calls is non-zero
        * (i.e., one and only one the calls changes the head of the
        * ready-to-run list).
        */
 
-      switch_needed ^= sched_addreadytorun(tcb);
+      switch_needed ^= nxsched_add_readytorun(tcb);
 
       /* Now, perform the context switch if one is needed */
 
@@ -114,7 +114,7 @@ void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
 
           if (g_pendingtasks.head)
             {
-              sched_mergepending();
+              nxsched_merge_pending();
             }
 
           /* Update scheduler parameters */
diff --git a/arch/avr/src/avr32/up_unblocktask.c b/arch/avr/src/avr32/up_unblocktask.c
index 1afbf66..1b52d35 100644
--- a/arch/avr/src/avr32/up_unblocktask.c
+++ b/arch/avr/src/avr32/up_unblocktask.c
@@ -80,13 +80,13 @@ void up_unblock_task(struct tcb_s *tcb)
 
   /* Remove the task from the blocked task list */
 
-  sched_removeblocked(tcb);
+  nxsched_remove_blocked(tcb);
 
   /* Add the task in the correct location in the prioritized
    * ready-to-run task list
    */
 
-  if (sched_addreadytorun(tcb))
+  if (nxsched_add_readytorun(tcb))
     {
       /* The currently active task has changed! We need to do
        * a context switch to the new task.
diff --git a/arch/hc/src/common/up_blocktask.c b/arch/hc/src/common/up_blocktask.c
index dc94aaa..5ce5c6f 100644
--- a/arch/hc/src/common/up_blocktask.c
+++ b/arch/hc/src/common/up_blocktask.c
@@ -74,11 +74,11 @@ void up_block_task(struct tcb_s *tcb, tstate_t task_state)
    * it should also be true that rtcb == tcb.
    */
 
-  switch_needed = sched_removereadytorun(tcb);
+  switch_needed = nxsched_remove_readytorun(tcb);
 
   /* Add the task to the specified blocked task list */
 
-  sched_addblocked(tcb, (tstate_t)task_state);
+  nxsched_add_blocked(tcb, (tstate_t)task_state);
 
   /* If there are any pending tasks, then add them to the ready-to-run
    * task list now
@@ -86,7 +86,7 @@ void up_block_task(struct tcb_s *tcb, tstate_t task_state)
 
   if (g_pendingtasks.head)
     {
-      switch_needed |= sched_mergepending();
+      switch_needed |= nxsched_merge_pending();
     }
 
   /* Now, perform the context switch if one is needed */
diff --git a/arch/hc/src/common/up_releasepending.c b/arch/hc/src/common/up_releasepending.c
index 8e82797..d140da1 100644
--- a/arch/hc/src/common/up_releasepending.c
+++ b/arch/hc/src/common/up_releasepending.c
@@ -55,7 +55,7 @@ void up_release_pending(void)
 
   /* Merge the g_pendingtasks list into the ready-to-run task list */
 
-  if (sched_mergepending())
+  if (nxsched_merge_pending())
     {
       /* The currently active task has changed!  We will need to switch
        * contexts.
diff --git a/arch/hc/src/common/up_reprioritizertr.c b/arch/hc/src/common/up_reprioritizertr.c
index 74177a3..48002b1 100644
--- a/arch/hc/src/common/up_reprioritizertr.c
+++ b/arch/hc/src/common/up_reprioritizertr.c
@@ -83,25 +83,25 @@ void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
       sinfo("TCB=%p PRI=%d\n", tcb, priority);
 
       /* Remove the tcb task from the ready-to-run list.
-       * sched_removereadytorun will return true if we just
+       * nxsched_remove_readytorun will return true if we just
        * remove the head of the ready to run list.
        */
 
-      switch_needed = sched_removereadytorun(tcb);
+      switch_needed = nxsched_remove_readytorun(tcb);
 
       /* Setup up the new task priority */
 
       tcb->sched_priority = (uint8_t)priority;
 
       /* Return the task to the specified blocked task list.
-       * sched_addreadytorun will return true if the task was
+       * nxsched_add_readytorun will return true if the task was
        * added to the new list.  We will need to perform a context
        * switch only if the EXCLUSIVE or of the two calls is non-zero
        * (i.e., one and only one the calls changes the head of the
        * ready-to-run list).
        */
 
-      switch_needed ^= sched_addreadytorun(tcb);
+      switch_needed ^= nxsched_add_readytorun(tcb);
 
       /* Now, perform the context switch if one is needed */
 
@@ -114,7 +114,7 @@ void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
 
           if (g_pendingtasks.head)
             {
-              sched_mergepending();
+              nxsched_merge_pending();
             }
 
           /* Update scheduler parameters */
diff --git a/arch/hc/src/common/up_unblocktask.c b/arch/hc/src/common/up_unblocktask.c
index 7fe7b15..bc92418 100644
--- a/arch/hc/src/common/up_unblocktask.c
+++ b/arch/hc/src/common/up_unblocktask.c
@@ -65,13 +65,13 @@ void up_unblock_task(struct tcb_s *tcb)
 
   /* Remove the task from the blocked task list */
 
-  sched_removeblocked(tcb);
+  nxsched_remove_blocked(tcb);
 
   /* Add the task in the correct location in the prioritized
    * ready-to-run task list
    */
 
-  if (sched_addreadytorun(tcb))
+  if (nxsched_add_readytorun(tcb))
     {
       /* The currently active task has changed! We need to do
        * a context switch to the new task.
diff --git a/arch/mips/src/mips32/mips_blocktask.c b/arch/mips/src/mips32/mips_blocktask.c
index 82a1cbd..78b6153 100644
--- a/arch/mips/src/mips32/mips_blocktask.c
+++ b/arch/mips/src/mips32/mips_blocktask.c
@@ -90,11 +90,11 @@ void up_block_task(struct tcb_s *tcb, tstate_t task_state)
    * it should also be true that rtcb == tcb.
    */
 
-  switch_needed = sched_removereadytorun(tcb);
+  switch_needed = nxsched_remove_readytorun(tcb);
 
   /* Add the task to the specified blocked task list */
 
-  sched_addblocked(tcb, (tstate_t)task_state);
+  nxsched_add_blocked(tcb, (tstate_t)task_state);
 
   /* If there are any pending tasks, then add them to the ready-to-run
    * task list now
@@ -102,7 +102,7 @@ void up_block_task(struct tcb_s *tcb, tstate_t task_state)
 
   if (g_pendingtasks.head)
     {
-      switch_needed |= sched_mergepending();
+      switch_needed |= nxsched_merge_pending();
     }
 
   /* Now, perform the context switch if one is needed */
diff --git a/arch/mips/src/mips32/mips_releasepending.c b/arch/mips/src/mips32/mips_releasepending.c
index bddf88e..3b0aff4 100644
--- a/arch/mips/src/mips32/mips_releasepending.c
+++ b/arch/mips/src/mips32/mips_releasepending.c
@@ -75,7 +75,7 @@ void up_release_pending(void)
 
   /* sched_lock(); */
 
-  if (sched_mergepending())
+  if (nxsched_merge_pending())
     {
       /* The currently active task has changed!  We will need to switch
        * contexts.
diff --git a/arch/mips/src/mips32/mips_reprioritizertr.c b/arch/mips/src/mips32/mips_reprioritizertr.c
index 0547282..a9c2699 100644
--- a/arch/mips/src/mips32/mips_reprioritizertr.c
+++ b/arch/mips/src/mips32/mips_reprioritizertr.c
@@ -100,25 +100,25 @@ void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
       sinfo("TCB=%p PRI=%d\n", tcb, priority);
 
       /* Remove the tcb task from the ready-to-run list.
-       * sched_removereadytorun will return true if we just
+       * nxsched_remove_readytorun will return true if we just
        * remove the head of the ready to run list.
        */
 
-      switch_needed = sched_removereadytorun(tcb);
+      switch_needed = nxsched_remove_readytorun(tcb);
 
       /* Setup up the new task priority */
 
       tcb->sched_priority = (uint8_t)priority;
 
       /* Return the task to the specified blocked task list.
-       * sched_addreadytorun will return true if the task was
+       * nxsched_add_readytorun will return true if the task was
        * added to the new list.  We will need to perform a context
        * switch only if the EXCLUSIVE or of the two calls is non-zero
        * (i.e., one and only one the calls changes the head of the
        * ready-to-run list).
        */
 
-      switch_needed ^= sched_addreadytorun(tcb);
+      switch_needed ^= nxsched_add_readytorun(tcb);
 
       /* Now, perform the context switch if one is needed */
 
@@ -131,7 +131,7 @@ void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
 
           if (g_pendingtasks.head)
             {
-              sched_mergepending();
+              nxsched_merge_pending();
             }
 
           /* Update scheduler parameters */
diff --git a/arch/mips/src/mips32/mips_unblocktask.c b/arch/mips/src/mips32/mips_unblocktask.c
index 73b2168..9fc88d7 100644
--- a/arch/mips/src/mips32/mips_unblocktask.c
+++ b/arch/mips/src/mips32/mips_unblocktask.c
@@ -82,13 +82,13 @@ void up_unblock_task(struct tcb_s *tcb)
 
   /* Remove the task from the blocked task list */
 
-  sched_removeblocked(tcb);
+  nxsched_remove_blocked(tcb);
 
   /* Add the task in the correct location in the prioritized
    * ready-to-run task list
    */
 
-  if (sched_addreadytorun(tcb))
+  if (nxsched_add_readytorun(tcb))
     {
       /* The currently active task has changed! We need to do
        * a context switch to the new task.
diff --git a/arch/misoc/src/lm32/lm32_blocktask.c b/arch/misoc/src/lm32/lm32_blocktask.c
index 7f4c60e..08c94a4 100644
--- a/arch/misoc/src/lm32/lm32_blocktask.c
+++ b/arch/misoc/src/lm32/lm32_blocktask.c
@@ -91,11 +91,11 @@ void up_block_task(struct tcb_s *tcb, tstate_t task_state)
    * it should also be true that rtcb == tcb.
    */
 
-  switch_needed = sched_removereadytorun(tcb);
+  switch_needed = nxsched_remove_readytorun(tcb);
 
   /* Add the task to the specified blocked task list */
 
-  sched_addblocked(tcb, (tstate_t)task_state);
+  nxsched_add_blocked(tcb, (tstate_t)task_state);
 
   /* If there are any pending tasks, then add them to the ready-to-run
    * task list now
@@ -103,7 +103,7 @@ void up_block_task(struct tcb_s *tcb, tstate_t task_state)
 
   if (g_pendingtasks.head)
     {
-      switch_needed |= sched_mergepending();
+      switch_needed |= nxsched_merge_pending();
     }
 
   /* Now, perform the context switch if one is needed */
diff --git a/arch/misoc/src/lm32/lm32_releasepending.c b/arch/misoc/src/lm32/lm32_releasepending.c
index be20f9a..8807e66 100644
--- a/arch/misoc/src/lm32/lm32_releasepending.c
+++ b/arch/misoc/src/lm32/lm32_releasepending.c
@@ -74,7 +74,7 @@ void up_release_pending(void)
 
   /* Merge the g_pendingtasks list into the ready-to-run task list */
 
-  if (sched_mergepending())
+  if (nxsched_merge_pending())
     {
       /* The currently active task has changed!  We will need to switch
        * contexts.
diff --git a/arch/misoc/src/lm32/lm32_reprioritizertr.c b/arch/misoc/src/lm32/lm32_reprioritizertr.c
index aebdee3..0063dca 100644
--- a/arch/misoc/src/lm32/lm32_reprioritizertr.c
+++ b/arch/misoc/src/lm32/lm32_reprioritizertr.c
@@ -85,25 +85,25 @@ void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
       sinfo("TCB=%p PRI=%d\n", tcb, priority);
 
       /* Remove the tcb task from the ready-to-run list.
-       * sched_removereadytorun will return true if we just
+       * nxsched_remove_readytorun will return true if we just
        * remove the head of the ready to run list.
        */
 
-      switch_needed = sched_removereadytorun(tcb);
+      switch_needed = nxsched_remove_readytorun(tcb);
 
       /* Setup up the new task priority */
 
       tcb->sched_priority = (uint8_t)priority;
 
       /* Return the task to the specified blocked task list.
-       * sched_addreadytorun will return true if the task was
+       * nxsched_add_readytorun will return true if the task was
        * added to the new list.  We will need to perform a context
        * switch only if the EXCLUSIVE or of the two calls is non-zero
        * (i.e., one and only one the calls changes the head of the
        * ready-to-run list).
        */
 
-      switch_needed ^= sched_addreadytorun(tcb);
+      switch_needed ^= nxsched_add_readytorun(tcb);
 
       /* Now, perform the context switch if one is needed */
 
@@ -116,7 +116,7 @@ void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
 
           if (g_pendingtasks.head)
             {
-              sched_mergepending();
+              nxsched_merge_pending();
             }
 
           /* Update scheduler parameters */
diff --git a/arch/misoc/src/lm32/lm32_unblocktask.c b/arch/misoc/src/lm32/lm32_unblocktask.c
index 90050de..dbe9334 100644
--- a/arch/misoc/src/lm32/lm32_unblocktask.c
+++ b/arch/misoc/src/lm32/lm32_unblocktask.c
@@ -83,13 +83,13 @@ void up_unblock_task(struct tcb_s *tcb)
 
   /* Remove the task from the blocked task list */
 
-  sched_removeblocked(tcb);
+  nxsched_remove_blocked(tcb);
 
   /* Add the task in the correct location in the prioritized
    * ready-to-run task list
    */
 
-  if (sched_addreadytorun(tcb))
+  if (nxsched_add_readytorun(tcb))
     {
       /* The currently active task has changed! We need to do
        * a context switch to the new task.
diff --git a/arch/misoc/src/minerva/minerva_blocktask.c b/arch/misoc/src/minerva/minerva_blocktask.c
index 9233f70..772ef88 100644
--- a/arch/misoc/src/minerva/minerva_blocktask.c
+++ b/arch/misoc/src/minerva/minerva_blocktask.c
@@ -91,11 +91,11 @@ void up_block_task(struct tcb_s *tcb, tstate_t task_state)
    * it should also be true that rtcb == tcb.
    */
 
-  switch_needed = sched_removereadytorun(tcb);
+  switch_needed = nxsched_remove_readytorun(tcb);
 
   /* Add the task to the specified blocked task list */
 
-  sched_addblocked(tcb, (tstate_t) task_state);
+  nxsched_add_blocked(tcb, (tstate_t) task_state);
 
   /* If there are any pending tasks, then add them to the ready-to-run task
    * list now
@@ -103,7 +103,7 @@ void up_block_task(struct tcb_s *tcb, tstate_t task_state)
 
   if (g_pendingtasks.head)
     {
-      switch_needed |= sched_mergepending();
+      switch_needed |= nxsched_merge_pending();
     }
 
   /* Now, perform the context switch if one is needed */
diff --git a/arch/misoc/src/minerva/minerva_releasepending.c b/arch/misoc/src/minerva/minerva_releasepending.c
index e036d10..a30ab96 100644
--- a/arch/misoc/src/minerva/minerva_releasepending.c
+++ b/arch/misoc/src/minerva/minerva_releasepending.c
@@ -76,7 +76,7 @@ void up_release_pending(void)
 
   /* sched_lock(); */
 
-  if (sched_mergepending())
+  if (nxsched_merge_pending())
     {
       /* The currently active task has changed! We will need to switch
        * contexts. Update scheduler parameters.
diff --git a/arch/misoc/src/minerva/minerva_reprioritizertr.c b/arch/misoc/src/minerva/minerva_reprioritizertr.c
index a6f4daf..697e4e5 100644
--- a/arch/misoc/src/minerva/minerva_reprioritizertr.c
+++ b/arch/misoc/src/minerva/minerva_reprioritizertr.c
@@ -85,24 +85,24 @@ void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
       sinfo("TCB=%p PRI=%d\n", tcb, priority);
 
       /* Remove the tcb task from the ready-to-run list.
-       * sched_removereadytorun() will return true if we just remove the
+       * nxsched_remove_readytorun() will return true if we just remove the
        * head of the ready to run list.
        */
 
-      switch_needed = sched_removereadytorun(tcb);
+      switch_needed = nxsched_remove_readytorun(tcb);
 
       /* Setup up the new task priority */
 
       tcb->sched_priority = (uint8_t) priority;
 
       /* Return the task to the specified blocked task list.
-       * sched_addreadytorun will return true if the task was added to the
+       * nxsched_add_readytorun will return true if the task was added to the
        * new list.  We will need to perform a context switch only if the
        * EXCLUSIVE or of the two calls is non-zero (i.e., one and only one
        * the calls changes the head of the ready-to-run list).
        */
 
-      switch_needed ^= sched_addreadytorun(tcb);
+      switch_needed ^= nxsched_add_readytorun(tcb);
 
       /* Now, perform the context switch if one is needed */
 
@@ -115,7 +115,7 @@ void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
 
           if (g_pendingtasks.head)
             {
-              sched_mergepending();
+              nxsched_merge_pending();
             }
 
           /* Update scheduler parameters */
diff --git a/arch/misoc/src/minerva/minerva_unblocktask.c b/arch/misoc/src/minerva/minerva_unblocktask.c
index 65704f3..495790f 100644
--- a/arch/misoc/src/minerva/minerva_unblocktask.c
+++ b/arch/misoc/src/minerva/minerva_unblocktask.c
@@ -83,13 +83,13 @@ void up_unblock_task(struct tcb_s *tcb)
 
   /* Remove the task from the blocked task list */
 
-  sched_removeblocked(tcb);
+  nxsched_remove_blocked(tcb);
 
   /* Add the task in the correct location in the prioritized ready-to-run
    * task list
    */
 
-  if (sched_addreadytorun(tcb))
+  if (nxsched_add_readytorun(tcb))
     {
       /* The currently active task has changed! We need to do a context
        * switch to the new task.
diff --git a/arch/or1k/src/common/up_blocktask.c b/arch/or1k/src/common/up_blocktask.c
index 732c0b2..6104015 100644
--- a/arch/or1k/src/common/up_blocktask.c
+++ b/arch/or1k/src/common/up_blocktask.c
@@ -89,11 +89,11 @@ void up_block_task(struct tcb_s *tcb, tstate_t task_state)
    * it should also be true that rtcb == tcb.
    */
 
-  switch_needed = sched_removereadytorun(tcb);
+  switch_needed = nxsched_remove_readytorun(tcb);
 
   /* Add the task to the specified blocked task list */
 
-  sched_addblocked(tcb, (tstate_t)task_state);
+  nxsched_add_blocked(tcb, (tstate_t)task_state);
 
   /* If there are any pending tasks, then add them to the ready-to-run
    * task list now
@@ -101,7 +101,7 @@ void up_block_task(struct tcb_s *tcb, tstate_t task_state)
 
   if (g_pendingtasks.head)
     {
-      switch_needed |= sched_mergepending();
+      switch_needed |= nxsched_merge_pending();
     }
 
   /* Now, perform the context switch if one is needed */
diff --git a/arch/or1k/src/common/up_releasepending.c b/arch/or1k/src/common/up_releasepending.c
index 1fe4ac6..a322a53 100644
--- a/arch/or1k/src/common/up_releasepending.c
+++ b/arch/or1k/src/common/up_releasepending.c
@@ -56,7 +56,7 @@ void up_release_pending(void)
 
   /* Merge the g_pendingtasks list into the ready-to-run task list */
 
-  if (sched_mergepending())
+  if (nxsched_merge_pending())
     {
       /* The currently active task has changed!  We will need to switch
        * contexts.
diff --git a/arch/or1k/src/common/up_reprioritizertr.c b/arch/or1k/src/common/up_reprioritizertr.c
index 1cea87c..6ea650b 100644
--- a/arch/or1k/src/common/up_reprioritizertr.c
+++ b/arch/or1k/src/common/up_reprioritizertr.c
@@ -83,25 +83,25 @@ void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
       sinfo("TCB=%p PRI=%d\n", tcb, priority);
 
       /* Remove the tcb task from the ready-to-run list.
-       * sched_removereadytorun will return true if we just
+       * nxsched_remove_readytorun will return true if we just
        * remove the head of the ready to run list.
        */
 
-      switch_needed = sched_removereadytorun(tcb);
+      switch_needed = nxsched_remove_readytorun(tcb);
 
       /* Setup up the new task priority */
 
       tcb->sched_priority = (uint8_t)priority;
 
       /* Return the task to the specified blocked task list.
-       * sched_addreadytorun will return true if the task was
+       * nxsched_add_readytorun will return true if the task was
        * added to the new list.  We will need to perform a context
        * switch only if the EXCLUSIVE or of the two calls is non-zero
        * (i.e., one and only one the calls changes the head of the
        * ready-to-run list).
        */
 
-      switch_needed ^= sched_addreadytorun(tcb);
+      switch_needed ^= nxsched_add_readytorun(tcb);
 
       /* Now, perform the context switch if one is needed */
 
@@ -114,7 +114,7 @@ void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
 
           if (g_pendingtasks.head)
             {
-              sched_mergepending();
+              nxsched_merge_pending();
             }
 
           /* Update scheduler parameters */
diff --git a/arch/or1k/src/common/up_unblocktask.c b/arch/or1k/src/common/up_unblocktask.c
index 95da315..1ef8ac2 100644
--- a/arch/or1k/src/common/up_unblocktask.c
+++ b/arch/or1k/src/common/up_unblocktask.c
@@ -65,13 +65,13 @@ void up_unblock_task(struct tcb_s *tcb)
 
   /* Remove the task from the blocked task list */
 
-  sched_removeblocked(tcb);
+  nxsched_remove_blocked(tcb);
 
   /* Add the task in the correct location in the prioritized
    * ready-to-run task list
    */
 
-  if (sched_addreadytorun(tcb))
+  if (nxsched_add_readytorun(tcb))
     {
       /* The currently active task has changed! We need to do
        * a context switch to the new task.
diff --git a/arch/renesas/src/common/up_blocktask.c b/arch/renesas/src/common/up_blocktask.c
index 3a643fc..9aca436 100644
--- a/arch/renesas/src/common/up_blocktask.c
+++ b/arch/renesas/src/common/up_blocktask.c
@@ -73,11 +73,11 @@ void up_block_task(struct tcb_s *tcb, tstate_t task_state)
    * it should also be true that rtcb == tcb.
    */
 
-  switch_needed = sched_removereadytorun(tcb);
+  switch_needed = nxsched_remove_readytorun(tcb);
 
   /* Add the task to the specified blocked task list */
 
-  sched_addblocked(tcb, (tstate_t)task_state);
+  nxsched_add_blocked(tcb, (tstate_t)task_state);
 
   /* If there are any pending tasks, then add them to the ready-to-run
    * task list now
@@ -85,7 +85,7 @@ void up_block_task(struct tcb_s *tcb, tstate_t task_state)
 
   if (g_pendingtasks.head)
     {
-      switch_needed |= sched_mergepending();
+      switch_needed |= nxsched_merge_pending();
     }
 
   /* Now, perform the context switch if one is needed */
diff --git a/arch/renesas/src/common/up_releasepending.c b/arch/renesas/src/common/up_releasepending.c
index 29caeb1..6c25fde 100644
--- a/arch/renesas/src/common/up_releasepending.c
+++ b/arch/renesas/src/common/up_releasepending.c
@@ -56,7 +56,7 @@ void up_release_pending(void)
 
   /* Merge the g_pendingtasks list into the ready-to-run task list */
 
-  if (sched_mergepending())
+  if (nxsched_merge_pending())
     {
       /* The currently active task has changed!  We will need to switch
        * contexts.
diff --git a/arch/renesas/src/common/up_reprioritizertr.c b/arch/renesas/src/common/up_reprioritizertr.c
index 2092be8..12d3fb3 100644
--- a/arch/renesas/src/common/up_reprioritizertr.c
+++ b/arch/renesas/src/common/up_reprioritizertr.c
@@ -83,25 +83,25 @@ void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
       sinfo("TCB=%p PRI=%d\n", tcb, priority);
 
       /* Remove the tcb task from the ready-to-run list.
-       * sched_removereadytorun will return true if we just
+       * nxsched_remove_readytorun will return true if we just
        * remove the head of the ready to run list.
        */
 
-      switch_needed = sched_removereadytorun(tcb);
+      switch_needed = nxsched_remove_readytorun(tcb);
 
       /* Setup up the new task priority */
 
       tcb->sched_priority = (uint8_t)priority;
 
       /* Return the task to the specified blocked task list.
-       * sched_addreadytorun will return true if the task was
+       * nxsched_add_readytorun will return true if the task was
        * added to the new list.  We will need to perform a context
        * switch only if the EXCLUSIVE or of the two calls is non-zero
        * (i.e., one and only one the calls changes the head of the
        * ready-to-run list).
        */
 
-      switch_needed ^= sched_addreadytorun(tcb);
+      switch_needed ^= nxsched_add_readytorun(tcb);
 
       /* Now, perform the context switch if one is needed */
 
@@ -114,7 +114,7 @@ void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
 
           if (g_pendingtasks.head)
             {
-              sched_mergepending();
+              nxsched_merge_pending();
             }
 
           /* Update scheduler parameters */
diff --git a/arch/renesas/src/common/up_unblocktask.c b/arch/renesas/src/common/up_unblocktask.c
index f54844e..f1cd867 100644
--- a/arch/renesas/src/common/up_unblocktask.c
+++ b/arch/renesas/src/common/up_unblocktask.c
@@ -65,13 +65,13 @@ void up_unblock_task(struct tcb_s *tcb)
 
   /* Remove the task from the blocked task list */
 
-  sched_removeblocked(tcb);
+  nxsched_remove_blocked(tcb);
 
   /* Add the task in the correct location in the prioritized
    * ready-to-run task list
    */
 
-  if (sched_addreadytorun(tcb))
+  if (nxsched_add_readytorun(tcb))
     {
       /* The currently active task has changed! We need to do
        * a context switch to the new task.
diff --git a/arch/risc-v/src/rv32im/riscv_blocktask.c b/arch/risc-v/src/rv32im/riscv_blocktask.c
index 7fa6a8b..2b14183 100644
--- a/arch/risc-v/src/rv32im/riscv_blocktask.c
+++ b/arch/risc-v/src/rv32im/riscv_blocktask.c
@@ -90,11 +90,11 @@ void up_block_task(struct tcb_s *tcb, tstate_t task_state)
    * it should also be true that rtcb == tcb.
    */
 
-  switch_needed = sched_removereadytorun(tcb);
+  switch_needed = nxsched_remove_readytorun(tcb);
 
   /* Add the task to the specified blocked task list */
 
-  sched_addblocked(tcb, (tstate_t)task_state);
+  nxsched_add_blocked(tcb, (tstate_t)task_state);
 
   /* If there are any pending tasks, then add them to the ready-to-run
    * task list now
@@ -102,7 +102,7 @@ void up_block_task(struct tcb_s *tcb, tstate_t task_state)
 
   if (g_pendingtasks.head)
     {
-      switch_needed |= sched_mergepending();
+      switch_needed |= nxsched_merge_pending();
     }
 
   /* Now, perform the context switch if one is needed */
diff --git a/arch/risc-v/src/rv32im/riscv_releasepending.c b/arch/risc-v/src/rv32im/riscv_releasepending.c
index f825745..d8c0de1 100644
--- a/arch/risc-v/src/rv32im/riscv_releasepending.c
+++ b/arch/risc-v/src/rv32im/riscv_releasepending.c
@@ -75,7 +75,7 @@ void up_release_pending(void)
 
   /* sched_lock(); */
 
-  if (sched_mergepending())
+  if (nxsched_merge_pending())
     {
       /* The currently active task has changed!  We will need to switch
        * contexts.
diff --git a/arch/risc-v/src/rv32im/riscv_reprioritizertr.c b/arch/risc-v/src/rv32im/riscv_reprioritizertr.c
index 1bea1dd..4c42e58 100644
--- a/arch/risc-v/src/rv32im/riscv_reprioritizertr.c
+++ b/arch/risc-v/src/rv32im/riscv_reprioritizertr.c
@@ -100,25 +100,25 @@ void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
       sinfo("TCB=%p PRI=%d\n", tcb, priority);
 
       /* Remove the tcb task from the ready-to-run list.
-       * sched_removereadytorun will return true if we just
+       * nxsched_remove_readytorun will return true if we just
        * remove the head of the ready to run list.
        */
 
-      switch_needed = sched_removereadytorun(tcb);
+      switch_needed = nxsched_remove_readytorun(tcb);
 
       /* Setup up the new task priority */
 
       tcb->sched_priority = (uint8_t)priority;
 
       /* Return the task to the specified blocked task list.
-       * sched_addreadytorun will return true if the task was
+       * nxsched_add_readytorun will return true if the task was
        * added to the new list.  We will need to perform a context
        * switch only if the EXCLUSIVE or of the two calls is non-zero
        * (i.e., one and only one the calls changes the head of the
        * ready-to-run list).
        */
 
-      switch_needed ^= sched_addreadytorun(tcb);
+      switch_needed ^= nxsched_add_readytorun(tcb);
 
       /* Now, perform the context switch if one is needed */
 
@@ -131,7 +131,7 @@ void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
 
           if (g_pendingtasks.head)
             {
-              sched_mergepending();
+              nxsched_merge_pending();
             }
 
           /* Update scheduler parameters */
diff --git a/arch/risc-v/src/rv32im/riscv_unblocktask.c b/arch/risc-v/src/rv32im/riscv_unblocktask.c
index 739b8b7..3b67cf8 100644
--- a/arch/risc-v/src/rv32im/riscv_unblocktask.c
+++ b/arch/risc-v/src/rv32im/riscv_unblocktask.c
@@ -82,13 +82,13 @@ void up_unblock_task(struct tcb_s *tcb)
 
   /* Remove the task from the blocked task list */
 
-  sched_removeblocked(tcb);
+  nxsched_remove_blocked(tcb);
 
   /* Add the task in the correct location in the prioritized
    * ready-to-run task list
    */
 
-  if (sched_addreadytorun(tcb))
+  if (nxsched_add_readytorun(tcb))
     {
       /* The currently active task has changed! We need to do
        * a context switch to the new task.
diff --git a/arch/risc-v/src/rv64gc/riscv_blocktask.c b/arch/risc-v/src/rv64gc/riscv_blocktask.c
index ce07e63..24b98ca 100644
--- a/arch/risc-v/src/rv64gc/riscv_blocktask.c
+++ b/arch/risc-v/src/rv64gc/riscv_blocktask.c
@@ -90,11 +90,11 @@ void up_block_task(struct tcb_s *tcb, tstate_t task_state)
    * it should also be true that rtcb == tcb.
    */
 
-  switch_needed = sched_removereadytorun(tcb);
+  switch_needed = nxsched_remove_readytorun(tcb);
 
   /* Add the task to the specified blocked task list */
 
-  sched_addblocked(tcb, (tstate_t)task_state);
+  nxsched_add_blocked(tcb, (tstate_t)task_state);
 
   /* If there are any pending tasks, then add them to the ready-to-run
    * task list now
@@ -102,7 +102,7 @@ void up_block_task(struct tcb_s *tcb, tstate_t task_state)
 
   if (g_pendingtasks.head)
     {
-      switch_needed |= sched_mergepending();
+      switch_needed |= nxsched_merge_pending();
     }
 
   /* Now, perform the context switch if one is needed */
diff --git a/arch/risc-v/src/rv64gc/riscv_releasepending.c b/arch/risc-v/src/rv64gc/riscv_releasepending.c
index 6009e5e..eff977c 100644
--- a/arch/risc-v/src/rv64gc/riscv_releasepending.c
+++ b/arch/risc-v/src/rv64gc/riscv_releasepending.c
@@ -75,7 +75,7 @@ void up_release_pending(void)
 
   /* sched_lock(); */
 
-  if (sched_mergepending())
+  if (nxsched_merge_pending())
     {
       /* The currently active task has changed!  We will need to switch
        * contexts.
diff --git a/arch/risc-v/src/rv64gc/riscv_reprioritizertr.c b/arch/risc-v/src/rv64gc/riscv_reprioritizertr.c
index 13990b7..a415766 100644
--- a/arch/risc-v/src/rv64gc/riscv_reprioritizertr.c
+++ b/arch/risc-v/src/rv64gc/riscv_reprioritizertr.c
@@ -100,25 +100,25 @@ void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
       sinfo("TCB=%p PRI=%d\n", tcb, priority);
 
       /* Remove the tcb task from the ready-to-run list.
-       * sched_removereadytorun will return true if we just
+       * nxsched_remove_readytorun will return true if we just
        * remove the head of the ready to run list.
        */
 
-      switch_needed = sched_removereadytorun(tcb);
+      switch_needed = nxsched_remove_readytorun(tcb);
 
       /* Setup up the new task priority */
 
       tcb->sched_priority = (uint8_t)priority;
 
       /* Return the task to the specified blocked task list.
-       * sched_addreadytorun will return true if the task was
+       * nxsched_add_readytorun will return true if the task was
        * added to the new list.  We will need to perform a context
        * switch only if the EXCLUSIVE or of the two calls is non-zero
        * (i.e., one and only one the calls changes the head of the
        * ready-to-run list).
        */
 
-      switch_needed ^= sched_addreadytorun(tcb);
+      switch_needed ^= nxsched_add_readytorun(tcb);
 
       /* Now, perform the context switch if one is needed */
 
@@ -131,7 +131,7 @@ void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
 
           if (g_pendingtasks.head)
             {
-              sched_mergepending();
+              nxsched_merge_pending();
             }
 
           /* Update scheduler parameters */
diff --git a/arch/risc-v/src/rv64gc/riscv_unblocktask.c b/arch/risc-v/src/rv64gc/riscv_unblocktask.c
index 01b40a5..0dfd147 100644
--- a/arch/risc-v/src/rv64gc/riscv_unblocktask.c
+++ b/arch/risc-v/src/rv64gc/riscv_unblocktask.c
@@ -82,13 +82,13 @@ void up_unblock_task(struct tcb_s *tcb)
 
   /* Remove the task from the blocked task list */
 
-  sched_removeblocked(tcb);
+  nxsched_remove_blocked(tcb);
 
   /* Add the task in the correct location in the prioritized
    * ready-to-run task list
    */
 
-  if (sched_addreadytorun(tcb))
+  if (nxsched_add_readytorun(tcb))
     {
       /* The currently active task has changed! We need to do
        * a context switch to the new task.
diff --git a/arch/sim/src/sim/up_blocktask.c b/arch/sim/src/sim/up_blocktask.c
index a8bc9b2..e81df88 100644
--- a/arch/sim/src/sim/up_blocktask.c
+++ b/arch/sim/src/sim/up_blocktask.c
@@ -90,11 +90,11 @@ void up_block_task(struct tcb_s *tcb, tstate_t task_state)
    * it should also be true that rtcb == tcb.
    */
 
-  switch_needed = sched_removereadytorun(tcb);
+  switch_needed = nxsched_remove_readytorun(tcb);
 
   /* Add the task to the specified blocked task list */
 
-  sched_addblocked(tcb, (tstate_t)task_state);
+  nxsched_add_blocked(tcb, (tstate_t)task_state);
 
   /* If there are any pending tasks, then add them to the ready-to-run
    * task list now
@@ -102,7 +102,7 @@ void up_block_task(struct tcb_s *tcb, tstate_t task_state)
 
   if (g_pendingtasks.head)
     {
-      switch_needed |= sched_mergepending();
+      switch_needed |= nxsched_merge_pending();
     }
 
   /* Now, perform the context switch if one is needed */
diff --git a/arch/sim/src/sim/up_releasepending.c b/arch/sim/src/sim/up_releasepending.c
index 113ec2a..b1c1fb9 100644
--- a/arch/sim/src/sim/up_releasepending.c
+++ b/arch/sim/src/sim/up_releasepending.c
@@ -72,7 +72,7 @@ void up_release_pending(void)
 
   /* sched_lock(); */
 
-  if (sched_mergepending())
+  if (nxsched_merge_pending())
     {
       /* The currently active task has changed!  We will need to switch
        * contexts.
diff --git a/arch/sim/src/sim/up_reprioritizertr.c b/arch/sim/src/sim/up_reprioritizertr.c
index 6347a4e..c4ce317 100644
--- a/arch/sim/src/sim/up_reprioritizertr.c
+++ b/arch/sim/src/sim/up_reprioritizertr.c
@@ -98,25 +98,25 @@ void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
       sinfo("TCB=%p PRI=%d\n", tcb, priority);
 
       /* Remove the tcb task from the ready-to-run list.
-       * sched_removereadytorun will return true if we just
+       * nxsched_remove_readytorun will return true if we just
        * remove the head of the ready to run list.
        */
 
-      switch_needed = sched_removereadytorun(tcb);
+      switch_needed = nxsched_remove_readytorun(tcb);
 
       /* Setup up the new task priority */
 
       tcb->sched_priority = (uint8_t)priority;
 
       /* Return the task to the specified blocked task list.
-       * sched_addreadytorun will return true if the task was
+       * nxsched_add_readytorun will return true if the task was
        * added to the new list.  We will need to perform a context
        * switch only if the EXCLUSIVE or of the two calls is non-zero
        * (i.e., one and only one the calls changes the head of the
        * ready-to-run list).
        */
 
-      switch_needed ^= sched_addreadytorun(tcb);
+      switch_needed ^= nxsched_add_readytorun(tcb);
 
       /* Now, perform the context switch if one is needed */
 
@@ -129,7 +129,7 @@ void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
 
           if (g_pendingtasks.head)
             {
-              sched_mergepending();
+              nxsched_merge_pending();
             }
 
           /* Update scheduler parameters */
diff --git a/arch/sim/src/sim/up_unblocktask.c b/arch/sim/src/sim/up_unblocktask.c
index a2eb19f..5f739bc 100644
--- a/arch/sim/src/sim/up_unblocktask.c
+++ b/arch/sim/src/sim/up_unblocktask.c
@@ -82,13 +82,13 @@ void up_unblock_task(FAR struct tcb_s *tcb)
 
   /* Remove the task from the blocked task list */
 
-  sched_removeblocked(tcb);
+  nxsched_remove_blocked(tcb);
 
   /* Add the task in the correct location in the prioritized
    * ready-to-run task list
    */
 
-  if (sched_addreadytorun(tcb))
+  if (nxsched_add_readytorun(tcb))
     {
       /* The currently active task has changed! */
 
diff --git a/arch/x86/src/common/up_blocktask.c b/arch/x86/src/common/up_blocktask.c
index 318ce82..b3504bd 100644
--- a/arch/x86/src/common/up_blocktask.c
+++ b/arch/x86/src/common/up_blocktask.c
@@ -74,11 +74,11 @@ void up_block_task(struct tcb_s *tcb, tstate_t task_state)
    * it should also be true that rtcb == tcb.
    */
 
-  switch_needed = sched_removereadytorun(tcb);
+  switch_needed = nxsched_remove_readytorun(tcb);
 
   /* Add the task to the specified blocked task list */
 
-  sched_addblocked(tcb, (tstate_t)task_state);
+  nxsched_add_blocked(tcb, (tstate_t)task_state);
 
   /* If there are any pending tasks, then add them to the ready-to-run
    * task list now
@@ -86,7 +86,7 @@ void up_block_task(struct tcb_s *tcb, tstate_t task_state)
 
   if (g_pendingtasks.head)
     {
-      switch_needed |= sched_mergepending();
+      switch_needed |= nxsched_merge_pending();
     }
 
   /* Now, perform the context switch if one is needed */
diff --git a/arch/x86/src/common/up_releasepending.c b/arch/x86/src/common/up_releasepending.c
index 4e379e6..7cafbd1 100644
--- a/arch/x86/src/common/up_releasepending.c
+++ b/arch/x86/src/common/up_releasepending.c
@@ -56,7 +56,7 @@ void up_release_pending(void)
 
   /* Merge the g_pendingtasks list into the ready-to-run task list */
 
-  if (sched_mergepending())
+  if (nxsched_merge_pending())
     {
       /* The currently active task has changed!  We will need to switch
        * contexts.
diff --git a/arch/x86/src/common/up_reprioritizertr.c b/arch/x86/src/common/up_reprioritizertr.c
index f5638f5..25013bd 100644
--- a/arch/x86/src/common/up_reprioritizertr.c
+++ b/arch/x86/src/common/up_reprioritizertr.c
@@ -83,25 +83,25 @@ void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
       sinfo("TCB=%p PRI=%d\n", tcb, priority);
 
       /* Remove the tcb task from the ready-to-run list.
-       * sched_removereadytorun will return true if we just
+       * nxsched_remove_readytorun will return true if we just
        * remove the head of the ready to run list.
        */
 
-      switch_needed = sched_removereadytorun(tcb);
+      switch_needed = nxsched_remove_readytorun(tcb);
 
       /* Setup up the new task priority */
 
       tcb->sched_priority = (uint8_t)priority;
 
       /* Return the task to the specified blocked task list.
-       * sched_addreadytorun will return true if the task was
+       * nxsched_add_readytorun will return true if the task was
        * added to the new list.  We will need to perform a context
        * switch only if the EXCLUSIVE or of the two calls is non-zero
        * (i.e., one and only one the calls changes the head of the
        * ready-to-run list).
        */
 
-      switch_needed ^= sched_addreadytorun(tcb);
+      switch_needed ^= nxsched_add_readytorun(tcb);
 
       /* Now, perform the context switch if one is needed */
 
@@ -114,7 +114,7 @@ void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
 
           if (g_pendingtasks.head)
             {
-              sched_mergepending();
+              nxsched_merge_pending();
             }
 
           /* Update scheduler parameters */
diff --git a/arch/x86/src/common/up_unblocktask.c b/arch/x86/src/common/up_unblocktask.c
index aedc3ec..d62d00b 100644
--- a/arch/x86/src/common/up_unblocktask.c
+++ b/arch/x86/src/common/up_unblocktask.c
@@ -64,13 +64,13 @@ void up_unblock_task(struct tcb_s *tcb)
 
   /* Remove the task from the blocked task list */
 
-  sched_removeblocked(tcb);
+  nxsched_remove_blocked(tcb);
 
   /* Add the task in the correct location in the prioritized
    * ready-to-run task list
    */
 
-  if (sched_addreadytorun(tcb))
+  if (nxsched_add_readytorun(tcb))
     {
       /* The currently active task has changed! We need to do
        * a context switch to the new task.
diff --git a/arch/x86_64/src/common/up_blocktask.c b/arch/x86_64/src/common/up_blocktask.c
index fb4fa67..7a9143b 100644
--- a/arch/x86_64/src/common/up_blocktask.c
+++ b/arch/x86_64/src/common/up_blocktask.c
@@ -74,11 +74,11 @@ void up_block_task(struct tcb_s *tcb, tstate_t task_state)
    * it should also be true that rtcb == tcb.
    */
 
-  switch_needed = sched_removereadytorun(tcb);
+  switch_needed = nxsched_remove_readytorun(tcb);
 
   /* Add the task to the specified blocked task list */
 
-  sched_addblocked(tcb, (tstate_t)task_state);
+  nxsched_add_blocked(tcb, (tstate_t)task_state);
 
   /* If there are any pending tasks, then add them to the ready-to-run
    * task list now
@@ -86,7 +86,7 @@ void up_block_task(struct tcb_s *tcb, tstate_t task_state)
 
   if (g_pendingtasks.head)
     {
-      switch_needed |= sched_mergepending();
+      switch_needed |= nxsched_merge_pending();
     }
 
   /* Now, perform the context switch if one is needed */
diff --git a/arch/x86_64/src/common/up_releasepending.c b/arch/x86_64/src/common/up_releasepending.c
index 98481e1..1d8298c 100644
--- a/arch/x86_64/src/common/up_releasepending.c
+++ b/arch/x86_64/src/common/up_releasepending.c
@@ -56,7 +56,7 @@ void up_release_pending(void)
 
   /* Merge the g_pendingtasks list into the ready-to-run task list */
 
-  if (sched_mergepending())
+  if (nxsched_merge_pending())
     {
       /* The currently active task has changed!  We will need to switch
        * contexts.
diff --git a/arch/x86_64/src/common/up_reprioritizertr.c b/arch/x86_64/src/common/up_reprioritizertr.c
index f95a595..843f38d 100644
--- a/arch/x86_64/src/common/up_reprioritizertr.c
+++ b/arch/x86_64/src/common/up_reprioritizertr.c
@@ -83,25 +83,25 @@ void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
       sinfo("TCB=%p PRI=%d\n", tcb, priority);
 
       /* Remove the tcb task from the ready-to-run list.
-       * sched_removereadytorun will return true if we just
+       * nxsched_remove_readytorun will return true if we just
        * remove the head of the ready to run list.
        */
 
-      switch_needed = sched_removereadytorun(tcb);
+      switch_needed = nxsched_remove_readytorun(tcb);
 
       /* Setup up the new task priority */
 
       tcb->sched_priority = (uint8_t)priority;
 
       /* Return the task to the specified blocked task list.
-       * sched_addreadytorun will return true if the task was
+       * nxsched_add_readytorun will return true if the task was
        * added to the new list.  We will need to perform a context
        * switch only if the EXCLUSIVE or of the two calls is non-zero
        * (i.e., one and only one the calls changes the head of the
        * ready-to-run list).
        */
 
-      switch_needed ^= sched_addreadytorun(tcb);
+      switch_needed ^= nxsched_add_readytorun(tcb);
 
       /* Now, perform the context switch if one is needed */
 
@@ -114,7 +114,7 @@ void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
 
           if (g_pendingtasks.head)
             {
-              sched_mergepending();
+              nxsched_merge_pending();
             }
 
           /* Update scheduler parameters */
diff --git a/arch/x86_64/src/common/up_unblocktask.c b/arch/x86_64/src/common/up_unblocktask.c
index 1f07cc3..1cac932 100644
--- a/arch/x86_64/src/common/up_unblocktask.c
+++ b/arch/x86_64/src/common/up_unblocktask.c
@@ -66,13 +66,13 @@ void up_unblock_task(struct tcb_s *tcb)
 
   /* Remove the task from the blocked task list */
 
-  sched_removeblocked(tcb);
+  nxsched_remove_blocked(tcb);
 
   /* Add the task in the correct location in the prioritized
    * ready-to-run task list
    */
 
-  if (sched_addreadytorun(tcb))
+  if (nxsched_add_readytorun(tcb))
     {
       /* The currently active task has changed! We need to do
        * a context switch to the new task.
diff --git a/arch/xtensa/src/common/xtensa_blocktask.c b/arch/xtensa/src/common/xtensa_blocktask.c
index 197504d..934f460 100644
--- a/arch/xtensa/src/common/xtensa_blocktask.c
+++ b/arch/xtensa/src/common/xtensa_blocktask.c
@@ -90,11 +90,11 @@ void up_block_task(struct tcb_s *tcb, tstate_t task_state)
    * it should also be true that rtcb == tcb.
    */
 
-  switch_needed = sched_removereadytorun(tcb);
+  switch_needed = nxsched_remove_readytorun(tcb);
 
   /* Add the task to the specified blocked task list */
 
-  sched_addblocked(tcb, (tstate_t)task_state);
+  nxsched_add_blocked(tcb, (tstate_t)task_state);
 
   /* If there are any pending tasks, then add them to the ready-to-run
    * task list now
@@ -102,7 +102,7 @@ void up_block_task(struct tcb_s *tcb, tstate_t task_state)
 
   if (g_pendingtasks.head)
     {
-      switch_needed |= sched_mergepending();
+      switch_needed |= nxsched_merge_pending();
     }
 
   /* Now, perform the context switch if one is needed */
diff --git a/arch/xtensa/src/common/xtensa_releasepending.c b/arch/xtensa/src/common/xtensa_releasepending.c
index b63ba17..3f875c8 100644
--- a/arch/xtensa/src/common/xtensa_releasepending.c
+++ b/arch/xtensa/src/common/xtensa_releasepending.c
@@ -75,7 +75,7 @@ void up_release_pending(void)
 
   /* sched_lock(); */
 
-  if (sched_mergepending())
+  if (nxsched_merge_pending())
     {
       /* The currently active task has changed!  We will need to
        * switch contexts.
diff --git a/arch/xtensa/src/common/xtensa_reprioritizertr.c b/arch/xtensa/src/common/xtensa_reprioritizertr.c
index 27a9a9e..501964f 100644
--- a/arch/xtensa/src/common/xtensa_reprioritizertr.c
+++ b/arch/xtensa/src/common/xtensa_reprioritizertr.c
@@ -85,25 +85,25 @@ void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
       sinfo("TCB=%p PRI=%d\n", tcb, priority);
 
       /* Remove the tcb task from the ready-to-run list.
-       * sched_removereadytorun will return true if we just
+       * nxsched_remove_readytorun will return true if we just
        * remove the head of the ready to run list.
        */
 
-      switch_needed = sched_removereadytorun(tcb);
+      switch_needed = nxsched_remove_readytorun(tcb);
 
       /* Setup up the new task priority */
 
       tcb->sched_priority = (uint8_t)priority;
 
       /* Return the task to the specified blocked task list.
-       * sched_addreadytorun will return true if the task was
+       * nxsched_add_readytorun will return true if the task was
        * added to the new list.  We will need to perform a context
        * switch only if the EXCLUSIVE or of the two calls is non-zero
        * (i.e., one and only one the calls changes the head of the
        * ready-to-run list).
        */
 
-      switch_needed ^= sched_addreadytorun(tcb);
+      switch_needed ^= nxsched_add_readytorun(tcb);
 
       /* Now, perform the context switch if one is needed */
 
@@ -116,7 +116,7 @@ void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
 
           if (g_pendingtasks.head)
             {
-              sched_mergepending();
+              nxsched_merge_pending();
             }
 
           /* Update scheduler parameters */
diff --git a/arch/xtensa/src/common/xtensa_unblocktask.c b/arch/xtensa/src/common/xtensa_unblocktask.c
index 4727046..2c796ed 100644
--- a/arch/xtensa/src/common/xtensa_unblocktask.c
+++ b/arch/xtensa/src/common/xtensa_unblocktask.c
@@ -82,13 +82,13 @@ void up_unblock_task(struct tcb_s *tcb)
 
   /* Remove the task from the blocked task list */
 
-  sched_removeblocked(tcb);
+  nxsched_remove_blocked(tcb);
 
   /* Add the task in the correct location in the prioritized
    * ready-to-run task list
    */
 
-  if (sched_addreadytorun(tcb))
+  if (nxsched_add_readytorun(tcb))
     {
       /* The currently active task has changed! We need to do
        * a context switch to the new task.
diff --git a/arch/z16/src/common/z16_blocktask.c b/arch/z16/src/common/z16_blocktask.c
index b44d105..3de333b 100644
--- a/arch/z16/src/common/z16_blocktask.c
+++ b/arch/z16/src/common/z16_blocktask.c
@@ -75,11 +75,11 @@ void up_block_task(FAR struct tcb_s *tcb, tstate_t task_state)
    * it should also be true that rtcb == tcb.
    */
 
-  switch_needed = sched_removereadytorun(tcb);
+  switch_needed = nxsched_remove_readytorun(tcb);
 
   /* Add the task to the specified blocked task list */
 
-  sched_addblocked(tcb, (tstate_t)task_state);
+  nxsched_add_blocked(tcb, (tstate_t)task_state);
 
   /* If there are any pending tasks, then add them to the ready-to-run
    * task list now
@@ -87,7 +87,7 @@ void up_block_task(FAR struct tcb_s *tcb, tstate_t task_state)
 
   if (g_pendingtasks.head)
     {
-      switch_needed |= sched_mergepending();
+      switch_needed |= nxsched_merge_pending();
     }
 
   /* Now, perform the context switch if one is needed */
diff --git a/arch/z16/src/common/z16_releasepending.c b/arch/z16/src/common/z16_releasepending.c
index 9fd93ea..1921240 100644
--- a/arch/z16/src/common/z16_releasepending.c
+++ b/arch/z16/src/common/z16_releasepending.c
@@ -57,7 +57,7 @@ void up_release_pending(void)
 
   /* Merge the g_pendingtasks list into the ready-to-run task list */
 
-  if (sched_mergepending())
+  if (nxsched_merge_pending())
     {
       /* The currently active task has changed!  We will need to switch
        * contexts.
diff --git a/arch/z16/src/common/z16_reprioritizertr.c b/arch/z16/src/common/z16_reprioritizertr.c
index 499ae0b..b047dcc 100644
--- a/arch/z16/src/common/z16_reprioritizertr.c
+++ b/arch/z16/src/common/z16_reprioritizertr.c
@@ -84,25 +84,25 @@ void up_reprioritize_rtr(FAR struct tcb_s *tcb, uint8_t priority)
       sinfo("TCB=%p PRI=%d\n", tcb, priority);
 
       /* Remove the tcb task from the ready-to-run list.
-       * sched_removereadytorun will return true if we just
+       * nxsched_remove_readytorun will return true if we just
        * remove the head of the ready to run list.
        */
 
-      switch_needed = sched_removereadytorun(tcb);
+      switch_needed = nxsched_remove_readytorun(tcb);
 
       /* Setup up the new task priority */
 
       tcb->sched_priority = (uint8_t)priority;
 
       /* Return the task to the specified blocked task list.
-       * sched_addreadytorun will return true if the task was
+       * nxsched_add_readytorun will return true if the task was
        * added to the new list.  We will need to perform a context
        * switch only if the EXCLUSIVE or of the two calls is non-zero
        * (i.e., one and only one the calls changes the head of the
        * ready-to-run list).
        */
 
-      switch_needed ^= sched_addreadytorun(tcb);
+      switch_needed ^= nxsched_add_readytorun(tcb);
 
       /* Now, perform the context switch if one is needed */
 
@@ -115,7 +115,7 @@ void up_reprioritize_rtr(FAR struct tcb_s *tcb, uint8_t priority)
 
           if (g_pendingtasks.head)
             {
-              sched_mergepending();
+              nxsched_merge_pending();
             }
 
           /* Update scheduler parameters */
diff --git a/arch/z16/src/common/z16_unblocktask.c b/arch/z16/src/common/z16_unblocktask.c
index 0b55619..f113ca2 100644
--- a/arch/z16/src/common/z16_unblocktask.c
+++ b/arch/z16/src/common/z16_unblocktask.c
@@ -68,13 +68,13 @@ void up_unblock_task(FAR struct tcb_s *tcb)
 
   /* Remove the task from the blocked task list */
 
-  sched_removeblocked(tcb);
+  nxsched_remove_blocked(tcb);
 
   /* Add the task in the correct location in the prioritized
    * ready-to-run task list
    */
 
-  if (sched_addreadytorun(tcb))
+  if (nxsched_add_readytorun(tcb))
     {
       /* The currently active task has changed! We need to do
        * a context switch to the new task.
diff --git a/arch/z80/src/common/z80_blocktask.c b/arch/z80/src/common/z80_blocktask.c
index ad962c8..5d52108 100644
--- a/arch/z80/src/common/z80_blocktask.c
+++ b/arch/z80/src/common/z80_blocktask.c
@@ -77,11 +77,11 @@ void up_block_task(FAR struct tcb_s *tcb, tstate_t task_state)
    * it should also be true that rtcb == tcb.
    */
 
-  switch_needed = sched_removereadytorun(tcb);
+  switch_needed = nxsched_remove_readytorun(tcb);
 
   /* Add the task to the specified blocked task list */
 
-  sched_addblocked(tcb, (tstate_t)task_state);
+  nxsched_add_blocked(tcb, (tstate_t)task_state);
 
   /* If there are any pending tasks, then add them to the ready-to-run
    * task list now
@@ -89,7 +89,7 @@ void up_block_task(FAR struct tcb_s *tcb, tstate_t task_state)
 
   if (g_pendingtasks.head)
     {
-      switch_needed |= sched_mergepending();
+      switch_needed |= nxsched_merge_pending();
     }
 
   /* Now, perform the context switch if one is needed */
diff --git a/arch/z80/src/common/z80_releasepending.c b/arch/z80/src/common/z80_releasepending.c
index c7214ff..1d591f5 100644
--- a/arch/z80/src/common/z80_releasepending.c
+++ b/arch/z80/src/common/z80_releasepending.c
@@ -59,7 +59,7 @@ void up_release_pending(void)
 
   /* Merge the g_pendingtasks list into the ready-to-run task list */
 
-  if (sched_mergepending())
+  if (nxsched_merge_pending())
     {
       /* The currently active task has changed!  We will need to switch
        * contexts.
diff --git a/arch/z80/src/common/z80_reprioritizertr.c b/arch/z80/src/common/z80_reprioritizertr.c
index 2ab1103..70a2832 100644
--- a/arch/z80/src/common/z80_reprioritizertr.c
+++ b/arch/z80/src/common/z80_reprioritizertr.c
@@ -86,25 +86,25 @@ void up_reprioritize_rtr(FAR struct tcb_s *tcb, uint8_t priority)
       sinfo("TCB=%p PRI=%d\n", tcb, priority);
 
       /* Remove the tcb task from the ready-to-run list.
-       * sched_removereadytorun will return true if we just
+       * nxsched_remove_readytorun will return true if we just
        * remove the head of the ready to run list.
        */
 
-      switch_needed = sched_removereadytorun(tcb);
+      switch_needed = nxsched_remove_readytorun(tcb);
 
       /* Setup up the new task priority */
 
       tcb->sched_priority = (uint8_t)priority;
 
       /* Return the task to the specified blocked task list.
-       * sched_addreadytorun will return true if the task was
+       * nxsched_add_readytorun will return true if the task was
        * added to the new list.  We will need to perform a context
        * switch only if the EXCLUSIVE or of the two calls is non-zero
        * (i.e., one and only one the calls changes the head of the
        * ready-to-run list).
        */
 
-      switch_needed ^= sched_addreadytorun(tcb);
+      switch_needed ^= nxsched_add_readytorun(tcb);
 
       /* Now, perform the context switch if one is needed */
 
@@ -117,7 +117,7 @@ void up_reprioritize_rtr(FAR struct tcb_s *tcb, uint8_t priority)
 
           if (g_pendingtasks.head)
             {
-              sched_mergepending();
+              nxsched_merge_pending();
             }
 
           /* Update scheduler parameters */
diff --git a/arch/z80/src/common/z80_unblocktask.c b/arch/z80/src/common/z80_unblocktask.c
index c49824d..d88672e 100644
--- a/arch/z80/src/common/z80_unblocktask.c
+++ b/arch/z80/src/common/z80_unblocktask.c
@@ -70,13 +70,13 @@ void up_unblock_task(FAR struct tcb_s *tcb)
 
   /* Remove the task from the blocked task list */
 
-  sched_removeblocked(tcb);
+  nxsched_remove_blocked(tcb);
 
   /* Add the task in the correct location in the prioritized
    * ready-to-run task list
    */
 
-  if (sched_addreadytorun(tcb))
+  if (nxsched_add_readytorun(tcb))
     {
       /* The currently active task has changed! We need to do
        * a context switch to the new task.
diff --git a/sched/group/group_continue.c b/sched/group/group_continue.c
index 9b06ec4..7efcd04 100644
--- a/sched/group/group_continue.c
+++ b/sched/group/group_continue.c
@@ -64,7 +64,7 @@ static int group_continue_handler(pid_t pid, FAR void *arg)
   rtcb = nxsched_get_tcb(pid);
   if (rtcb != NULL)
     {
-      sched_continue(rtcb);
+      nxsched_continue(rtcb);
     }
 
   /* Always return zero.  We need to visit each member of the group */
diff --git a/sched/group/group_suspendchildren.c b/sched/group/group_suspendchildren.c
index 1c71fc4..5ce006a 100644
--- a/sched/group/group_suspendchildren.c
+++ b/sched/group/group_suspendchildren.c
@@ -83,7 +83,7 @@ static int group_suspendchildren_handler(pid_t pid, FAR void *arg)
       rtcb = nxsched_get_tcb(pid);
       if (rtcb != NULL)
         {
-          sched_suspend(rtcb);
+          nxsched_suspend(rtcb);
         }
     }
 
diff --git a/sched/irq/irq_csection.c b/sched/irq/irq_csection.c
index dbe97d3..dea9427 100644
--- a/sched/irq/irq_csection.c
+++ b/sched/irq/irq_csection.c
@@ -376,7 +376,7 @@ try_again:
               /* Note that we have entered the critical section */
 
 #ifdef CONFIG_SCHED_CRITMONITOR
-              sched_critmon_csection(rtcb, true);
+              nxsched_critmon_csection(rtcb, true);
 #endif
 #ifdef CONFIG_SCHED_INSTRUMENTATION_CSECTION
               sched_note_csection(rtcb, true);
@@ -419,7 +419,7 @@ irqstate_t enter_critical_section(void)
           /* Note that we have entered the critical section */
 
 #ifdef CONFIG_SCHED_CRITMONITOR
-          sched_critmon_csection(rtcb, true);
+          nxsched_critmon_csection(rtcb, true);
 #endif
 #ifdef CONFIG_SCHED_INSTRUMENTATION_CSECTION
           sched_note_csection(rtcb, true);
@@ -526,7 +526,7 @@ void leave_critical_section(irqstate_t flags)
               /* No.. Note that we have left the critical section */
 
 #ifdef CONFIG_SCHED_CRITMONITOR
-              sched_critmon_csection(rtcb, false);
+              nxsched_critmon_csection(rtcb, false);
 #endif
 #ifdef CONFIG_SCHED_INSTRUMENTATION_CSECTION
               sched_note_csection(rtcb, false);
@@ -550,7 +550,7 @@ void leave_critical_section(irqstate_t flags)
                    * because we were within a critical section then.
                    */
 
-                  if (g_pendingtasks.head != NULL && !sched_islocked_global())
+                  if (g_pendingtasks.head != NULL && !nxsched_islocked_global())
                     {
                       /* Release any ready-to-run tasks that have collected
                        * in g_pendingtasks.  NOTE: This operation has a very
@@ -604,7 +604,7 @@ void leave_critical_section(irqstate_t flags)
           /* Note that we have left the critical section */
 
 #ifdef CONFIG_SCHED_CRITMONITOR
-          sched_critmon_csection(rtcb, false);
+          nxsched_critmon_csection(rtcb, false);
 #endif
 #ifdef CONFIG_SCHED_INSTRUMENTATION_CSECTION
           sched_note_csection(rtcb, false);
diff --git a/sched/paging/pg_miss.c b/sched/paging/pg_miss.c
index 380014b..8960e6a 100644
--- a/sched/paging/pg_miss.c
+++ b/sched/paging/pg_miss.c
@@ -166,7 +166,7 @@ void pg_miss(void)
 
       pginfo("New worker priority. %d->%d\n",
              wtcb->sched_priority, ftcb->sched_priority);
-      nxsched_setpriority(wtcb, ftcb->sched_priority);
+      nxsched_set_priority(wtcb, ftcb->sched_priority);
     }
 
   /* Signal the page fill worker thread.
diff --git a/sched/paging/pg_worker.c b/sched/paging/pg_worker.c
index 49dcedc..b12dda5 100644
--- a/sched/paging/pg_worker.c
+++ b/sched/paging/pg_worker.c
@@ -177,7 +177,7 @@ static void pg_callback(FAR struct tcb_s *tcb, int result)
         {
           pginfo("New worker priority. %d->%d\n",
                  wtcb->sched_priority, priority);
-          nxsched_setpriority(wtcb, priority);
+          nxsched_set_priority(wtcb, priority);
         }
 
       /* Save the page fill result (don't permit the value -EBUSY) */
@@ -289,7 +289,7 @@ static inline bool pg_dequeue(void)
 
                   pginfo("New worker priority. %d->%d\n",
                          wtcb->sched_priority, priority);
-                  nxsched_setpriority(wtcb, priority);
+                  nxsched_set_priority(wtcb, priority);
                 }
 
               /* Return with g_pftcb holding the pointer to
@@ -456,7 +456,7 @@ static inline void pg_alldone(void)
   g_pftcb = NULL;
   pginfo("New worker priority. %d->%d\n",
          wtcb->sched_priority, CONFIG_PAGING_DEFPRIO);
-  nxsched_setpriority(wtcb, CONFIG_PAGING_DEFPRIO);
+  nxsched_set_priority(wtcb, CONFIG_PAGING_DEFPRIO);
 }
 
 /****************************************************************************
diff --git a/sched/pthread/pthread_create.c b/sched/pthread/pthread_create.c
index df3715a..2d635fe 100644
--- a/sched/pthread/pthread_create.c
+++ b/sched/pthread/pthread_create.c
@@ -196,7 +196,7 @@ static void pthread_start(void)
 
   if (ptcb->cmn.sched_priority > ptcb->cmn.init_priority)
     {
-      DEBUGVERIFY(nxsched_setpriority(&ptcb->cmn, ptcb->cmn.init_priority));
+      DEBUGVERIFY(nxsched_set_priority(&ptcb->cmn, ptcb->cmn.init_priority));
     }
 
   /* Pass control to the thread entry point. In the kernel build this has to
@@ -389,7 +389,7 @@ int pthread_create(FAR pthread_t *thread, FAR const pthread_attr_t *attr,
 
       /* Initialize the sporadic policy */
 
-      ret = sched_sporadic_initialize(&ptcb->cmn);
+      ret = nxsched_initialize_sporadic(&ptcb->cmn);
       if (ret >= 0)
         {
           sporadic               = ptcb->cmn.sporadic;
@@ -405,7 +405,7 @@ int pthread_create(FAR pthread_t *thread, FAR const pthread_attr_t *attr,
 
           /* And start the first replenishment interval */
 
-          ret = sched_sporadic_start(&ptcb->cmn);
+          ret = nxsched_start_sporadic(&ptcb->cmn);
         }
 
       /* Handle any failures */
@@ -556,7 +556,7 @@ int pthread_create(FAR pthread_t *thread, FAR const pthread_attr_t *attr,
 
       if (ptcb->cmn.sched_priority < parent->sched_priority)
         {
-          ret = nxsched_setpriority(&ptcb->cmn, parent->sched_priority);
+          ret = nxsched_set_priority(&ptcb->cmn, parent->sched_priority);
           if (ret < 0)
             {
               ret = -ret;
diff --git a/sched/sched/sched.h b/sched/sched/sched.h
index a1dfe70..5490ff9 100644
--- a/sched/sched/sched.h
+++ b/sched/sched/sched.h
@@ -369,15 +369,15 @@ extern volatile int16_t g_global_lockcount;
 
 /* Task list manipulation functions */
 
-bool sched_addreadytorun(FAR struct tcb_s *rtrtcb);
-bool sched_removereadytorun(FAR struct tcb_s *rtrtcb);
-bool sched_addprioritized(FAR struct tcb_s *tcb, DSEG dq_queue_t *list);
-void sched_mergeprioritized(FAR dq_queue_t *list1, FAR dq_queue_t *list2,
+bool nxsched_add_readytorun(FAR struct tcb_s *rtrtcb);
+bool nxsched_remove_readytorun(FAR struct tcb_s *rtrtcb);
+bool nxsched_add_prioritized(FAR struct tcb_s *tcb, DSEG dq_queue_t *list);
+void nxsched_merge_prioritized(FAR dq_queue_t *list1, FAR dq_queue_t *list2,
                             uint8_t task_state);
-bool sched_mergepending(void);
-void sched_addblocked(FAR struct tcb_s *btcb, tstate_t task_state);
-void sched_removeblocked(FAR struct tcb_s *btcb);
-int  nxsched_setpriority(FAR struct tcb_s *tcb, int sched_priority);
+bool nxsched_merge_pending(void);
+void nxsched_add_blocked(FAR struct tcb_s *btcb, tstate_t task_state);
+void nxsched_remove_blocked(FAR struct tcb_s *btcb);
+int  nxsched_set_priority(FAR struct tcb_s *tcb, int sched_priority);
 
 /* Priority inheritance support */
 
@@ -385,43 +385,43 @@ int  nxsched_setpriority(FAR struct tcb_s *tcb, int sched_priority);
 int  nxsched_reprioritize(FAR struct tcb_s *tcb, int sched_priority);
 #else
 #  define nxsched_reprioritize(tcb,sched_priority) \
-     nxsched_setpriority(tcb,sched_priority)
+     nxsched_set_priority(tcb,sched_priority)
 #endif
 
 /* Support for tickless operation */
 
 #ifdef CONFIG_SCHED_TICKLESS
-unsigned int sched_timer_cancel(void);
-void sched_timer_resume(void);
-void sched_timer_reassess(void);
+unsigned int nxsched_cancel_timer(void);
+void nxsched_resume_timer(void);
+void nxsched_reassess_timer(void);
 #else
-#  define sched_timer_cancel() (0)
-#  define sched_timer_resume()
-#  define sched_timer_reassess()
+#  define nxsched_cancel_timer() (0)
+#  define nxsched_resume_timer()
+#  define nxsched_reassess_timer()
 #endif
 
 /* Scheduler policy support */
 
 #if CONFIG_RR_INTERVAL > 0
-uint32_t sched_roundrobin_process(FAR struct tcb_s *tcb, uint32_t ticks,
+uint32_t nxsched_process_roundrobin(FAR struct tcb_s *tcb, uint32_t ticks,
                                   bool noswitches);
 #endif
 
 #ifdef CONFIG_SCHED_SPORADIC
-int  sched_sporadic_initialize(FAR struct tcb_s *tcb);
-int  sched_sporadic_start(FAR struct tcb_s *tcb);
-int  sched_sporadic_stop(FAR struct tcb_s *tcb);
-int  sched_sporadic_reset(FAR struct tcb_s *tcb);
-int  sched_sporadic_resume(FAR struct tcb_s *tcb);
-int  sched_sporadic_suspend(FAR struct tcb_s *tcb);
-uint32_t sched_sporadic_process(FAR struct tcb_s *tcb, uint32_t ticks,
+int  nxsched_initialize_sporadic(FAR struct tcb_s *tcb);
+int  nxsched_start_sporadic(FAR struct tcb_s *tcb);
+int  nxsched_stop_sporadic(FAR struct tcb_s *tcb);
+int  nxsched_reset_sporadic(FAR struct tcb_s *tcb);
+int  nxsched_resume_sporadic(FAR struct tcb_s *tcb);
+int  nxsched_suspend_sporadic(FAR struct tcb_s *tcb);
+uint32_t nxsched_process_sporadic(FAR struct tcb_s *tcb, uint32_t ticks,
                                 bool noswitches);
-void sched_sporadic_lowpriority(FAR struct tcb_s *tcb);
+void nxsched_sporadic_lowpriority(FAR struct tcb_s *tcb);
 #endif
 
 #ifdef CONFIG_SIG_SIGSTOP_ACTION
-void sched_suspend(FAR struct tcb_s *tcb);
-void sched_continue(FAR struct tcb_s *tcb);
+void nxsched_suspend(FAR struct tcb_s *tcb);
+void nxsched_continue(FAR struct tcb_s *tcb);
 #endif
 
 #ifdef CONFIG_SMP
@@ -429,26 +429,26 @@ void sched_continue(FAR struct tcb_s *tcb);
 FAR struct tcb_s *this_task(void);
 #endif
 
-int  sched_cpu_select(cpu_set_t affinity);
-int  sched_cpu_pause(FAR struct tcb_s *tcb);
+int  nxsched_select_cpu(cpu_set_t affinity);
+int  nxsched_pause_cpu(FAR struct tcb_s *tcb);
 
-irqstate_t sched_tasklist_lock(void);
-void sched_tasklist_unlock(irqstate_t lock);
+irqstate_t nxsched_lock_tasklist(void);
+void nxsched_unlock_tasklist(irqstate_t lock);
 
 #if defined(CONFIG_ARCH_HAVE_FETCHADD) && !defined(CONFIG_ARCH_GLOBAL_IRQDISABLE)
-#  define sched_islocked_global() \
+#  define nxsched_islocked_global() \
      (spin_islocked(&g_cpu_schedlock) || g_global_lockcount > 0)
 #else
-#  define sched_islocked_global() \
+#  define nxsched_islocked_global() \
      spin_islocked(&g_cpu_schedlock)
 #endif
 
-#  define sched_islocked_tcb(tcb) sched_islocked_global()
+#  define nxsched_islocked_tcb(tcb) nxsched_islocked_global()
 
 #else
-#  define sched_cpu_select(a)     (0)
-#  define sched_cpu_pause(t)      (-38)  /* -ENOSYS */
-#  define sched_islocked_tcb(tcb) ((tcb)->lockcount > 0)
+#  define nxsched_select_cpu(a)     (0)
+#  define nxsched_pause_cpu(t)      (-38)  /* -ENOSYS */
+#  define nxsched_islocked_tcb(tcb) ((tcb)->lockcount > 0)
 #endif
 
 #if defined(CONFIG_SCHED_CPULOAD) && !defined(CONFIG_SCHED_CPULOAD_EXTCLK)
@@ -460,14 +460,14 @@ void weak_function nxsched_process_cpuload(void);
 /* Critical section monitor */
 
 #ifdef CONFIG_SCHED_CRITMONITOR
-void sched_critmon_preemption(FAR struct tcb_s *tcb, bool state);
-void sched_critmon_csection(FAR struct tcb_s *tcb, bool state);
-void sched_critmon_resume(FAR struct tcb_s *tcb);
-void sched_critmon_suspend(FAR struct tcb_s *tcb);
+void nxsched_critmon_preemption(FAR struct tcb_s *tcb, bool state);
+void nxsched_critmon_csection(FAR struct tcb_s *tcb, bool state);
+void nxsched_resume_critmon(FAR struct tcb_s *tcb);
+void nxsched_suspend_critmon(FAR struct tcb_s *tcb);
 #endif
 
 /* TCB operations */
 
-bool sched_verifytcb(FAR struct tcb_s *tcb);
+bool nxsched_verify_tcb(FAR struct tcb_s *tcb);
 
 #endif /* __SCHED_SCHED_SCHED_H */
diff --git a/sched/sched/sched_addblocked.c b/sched/sched/sched_addblocked.c
index 4873307..6b6d855 100644
--- a/sched/sched/sched_addblocked.c
+++ b/sched/sched/sched_addblocked.c
@@ -49,7 +49,7 @@
  ****************************************************************************/
 
 /****************************************************************************
- * Name: sched_addblocked
+ * Name: nxsched_add_blocked
  *
  * Description:
  *   This function adds a TCB to one of the blocked state task lists as
@@ -68,7 +68,7 @@
  *
  ****************************************************************************/
 
-void sched_addblocked(FAR struct tcb_s *btcb, tstate_t task_state)
+void nxsched_add_blocked(FAR struct tcb_s *btcb, tstate_t task_state)
 {
   FAR dq_queue_t *tasklist;
 
@@ -80,7 +80,7 @@ void sched_addblocked(FAR struct tcb_s *btcb, tstate_t task_state)
 #ifdef CONFIG_SMP
   /* Lock the tasklists before accessing */
 
-  irqstate_t lock = sched_tasklist_lock();
+  irqstate_t lock = nxsched_lock_tasklist();
 #endif
 
   /* Add the TCB to the blocked task list associated with this state. */
@@ -93,7 +93,7 @@ void sched_addblocked(FAR struct tcb_s *btcb, tstate_t task_state)
     {
       /* Add the task to a prioritized list */
 
-      sched_addprioritized(btcb, tasklist);
+      nxsched_add_prioritized(btcb, tasklist);
     }
   else
     {
@@ -105,7 +105,7 @@ void sched_addblocked(FAR struct tcb_s *btcb, tstate_t task_state)
 #ifdef CONFIG_SMP
   /* Unlock the tasklists */
 
-  sched_tasklist_unlock(lock);
+  nxsched_unlock_tasklist(lock);
 #endif
 
   /* Make sure the TCB's state corresponds to the list */
diff --git a/sched/sched/sched_addprioritized.c b/sched/sched/sched_addprioritized.c
index 7bdf66a..84d0076 100644
--- a/sched/sched/sched_addprioritized.c
+++ b/sched/sched/sched_addprioritized.c
@@ -51,7 +51,7 @@
  ****************************************************************************/
 
 /****************************************************************************
- * Name: sched_addprioritized
+ * Name: nxsched_add_prioritized
  *
  * Description:
  *  This function adds a TCB to a prioritized TCB list.
@@ -76,7 +76,7 @@
  *
  ****************************************************************************/
 
-bool sched_addprioritized(FAR struct tcb_s *tcb, DSEG dq_queue_t *list)
+bool nxsched_add_prioritized(FAR struct tcb_s *tcb, DSEG dq_queue_t *list)
 {
   FAR struct tcb_s *next;
   FAR struct tcb_s *prev;
diff --git a/sched/sched/sched_addreadytorun.c b/sched/sched/sched_addreadytorun.c
index c5e7c9a..acb5eca 100644
--- a/sched/sched/sched_addreadytorun.c
+++ b/sched/sched/sched_addreadytorun.c
@@ -52,7 +52,7 @@
  ****************************************************************************/
 
 /****************************************************************************
- * Name:  sched_addreadytorun
+ * Name:  nxsched_add_readytorun
  *
  * Description:
  *   This function adds a TCB to the ready to run list.  If the currently
@@ -80,7 +80,7 @@
  ****************************************************************************/
 
 #ifndef CONFIG_SMP
-bool sched_addreadytorun(FAR struct tcb_s *btcb)
+bool nxsched_add_readytorun(FAR struct tcb_s *btcb)
 {
   FAR struct tcb_s *rtcb = this_task();
   bool ret;
@@ -97,14 +97,14 @@ bool sched_addreadytorun(FAR struct tcb_s *btcb)
        * g_pendingtasks task list for now.
        */
 
-      sched_addprioritized(btcb, (FAR dq_queue_t *)&g_pendingtasks);
+      nxsched_add_prioritized(btcb, (FAR dq_queue_t *)&g_pendingtasks);
       btcb->task_state = TSTATE_TASK_PENDING;
       ret = false;
     }
 
   /* Otherwise, add the new task to the ready-to-run task list */
 
-  else if (sched_addprioritized(btcb, (FAR dq_queue_t *)&g_readytorun))
+  else if (nxsched_add_prioritized(btcb, (FAR dq_queue_t *)&g_readytorun))
     {
       /* The new btcb was added at the head of the ready-to-run list.  It
        * is now the new active task!
@@ -129,7 +129,7 @@ bool sched_addreadytorun(FAR struct tcb_s *btcb)
 #endif /* !CONFIG_SMP */
 
 /****************************************************************************
- * Name:  sched_addreadytorun
+ * Name:  nxsched_add_readytorun
  *
  * Description:
  *   This function adds a TCB to one of the ready to run lists.  That might
@@ -164,7 +164,7 @@ bool sched_addreadytorun(FAR struct tcb_s *btcb)
  ****************************************************************************/
 
 #ifdef CONFIG_SMP
-bool sched_addreadytorun(FAR struct tcb_s *btcb)
+bool nxsched_add_readytorun(FAR struct tcb_s *btcb)
 {
   FAR struct tcb_s *rtcb;
   FAR dq_queue_t *tasklist;
@@ -176,7 +176,7 @@ bool sched_addreadytorun(FAR struct tcb_s *btcb)
 
   /* Lock the tasklists before accessing */
 
-  irqstate_t lock = sched_tasklist_lock();
+  irqstate_t lock = nxsched_lock_tasklist();
 
   /* Check if the blocked TCB is locked to this CPU */
 
@@ -192,7 +192,7 @@ bool sched_addreadytorun(FAR struct tcb_s *btcb)
        * (possibly its IDLE task).
        */
 
-      cpu = sched_cpu_select(btcb->affinity);
+      cpu = nxsched_select_cpu(btcb->affinity);
     }
 
   /* Get the task currently running on the CPU (may be the IDLE task) */
@@ -242,14 +242,14 @@ bool sched_addreadytorun(FAR struct tcb_s *btcb)
    */
 
   me = this_cpu();
-  if ((sched_islocked_global() || irq_cpu_locked(me)) &&
+  if ((nxsched_islocked_global() || irq_cpu_locked(me)) &&
       task_state != TSTATE_TASK_ASSIGNED)
     {
       /* Add the new ready-to-run task to the g_pendingtasks task list for
        * now.
        */
 
-      sched_addprioritized(btcb, (FAR dq_queue_t *)&g_pendingtasks);
+      nxsched_add_prioritized(btcb, (FAR dq_queue_t *)&g_pendingtasks);
       btcb->task_state = TSTATE_TASK_PENDING;
       doswitch = false;
     }
@@ -263,7 +263,7 @@ bool sched_addreadytorun(FAR struct tcb_s *btcb)
        * Add the task to the ready-to-run (but not running) task list
        */
 
-      sched_addprioritized(btcb, (FAR dq_queue_t *)&g_readytorun);
+      nxsched_add_prioritized(btcb, (FAR dq_queue_t *)&g_readytorun);
 
       btcb->task_state = TSTATE_TASK_READYTORUN;
       doswitch         = false;
@@ -276,9 +276,9 @@ bool sched_addreadytorun(FAR struct tcb_s *btcb)
 
       if (cpu != me)
         {
-          sched_tasklist_unlock(lock);
+          nxsched_unlock_tasklist(lock);
           DEBUGVERIFY(up_cpu_pause(cpu));
-          lock = sched_tasklist_lock();
+          lock = nxsched_lock_tasklist();
         }
 
       /* Add the task to the list corresponding to the selected state
@@ -286,7 +286,7 @@ bool sched_addreadytorun(FAR struct tcb_s *btcb)
        */
 
       tasklist = (FAR dq_queue_t *)&g_assignedtasks[cpu];
-      switched = sched_addprioritized(btcb, tasklist);
+      switched = nxsched_add_prioritized(btcb, tasklist);
 
       /* If the selected task list was the g_assignedtasks[] list and if the
        * new tasks is the highest priority (RUNNING) task, then a context
@@ -391,7 +391,7 @@ bool sched_addreadytorun(FAR struct tcb_s *btcb)
                * different CPU the next time that it runs.
                */
 
-              if (sched_islocked_global())
+              if (nxsched_islocked_global())
                 {
                   next->task_state = TSTATE_TASK_PENDING;
                   tasklist         = (FAR dq_queue_t *)&g_pendingtasks;
@@ -402,7 +402,7 @@ bool sched_addreadytorun(FAR struct tcb_s *btcb)
                   tasklist         = (FAR dq_queue_t *)&g_readytorun;
                 }
 
-              sched_addprioritized(next, tasklist);
+              nxsched_add_prioritized(next, tasklist);
             }
 
           doswitch = true;
@@ -413,7 +413,7 @@ bool sched_addreadytorun(FAR struct tcb_s *btcb)
            *
            * REVISIT: I have seen this assertion fire.  Apparently another
            * CPU may add another, higher priority task to the same
-           * g_assignedtasks[] list sometime after sched_cpu_select() was
+           * g_assignedtasks[] list sometime after nxsched_select_cpu() was
            * called above, leaving this TCB in the wrong task list if
            * task_state is TSTATE_TASK_ASSIGNED).
            */
@@ -435,7 +435,7 @@ bool sched_addreadytorun(FAR struct tcb_s *btcb)
 
   /* Unlock the tasklists */
 
-  sched_tasklist_unlock(lock);
+  nxsched_unlock_tasklist(lock);
   return doswitch;
 }
 
diff --git a/sched/sched/sched_continue.c b/sched/sched/sched_continue.c
index ba2ca76..c89d08b 100644
--- a/sched/sched/sched_continue.c
+++ b/sched/sched/sched_continue.c
@@ -39,7 +39,7 @@
  ****************************************************************************/
 
 /****************************************************************************
- * Name: sched_continue
+ * Name: nxsched_continue
  *
  * Description:
  *   Resume the specified thread.  This is normally calling indirectly
@@ -47,7 +47,7 @@
  *
  ****************************************************************************/
 
-void sched_continue(FAR struct tcb_s *tcb)
+void nxsched_continue(FAR struct tcb_s *tcb)
 {
   irqstate_t flags;
 
diff --git a/sched/sched/sched_cpupause.c b/sched/sched/sched_cpupause.c
index d2b14d1..525193a 100644
--- a/sched/sched/sched_cpupause.c
+++ b/sched/sched/sched_cpupause.c
@@ -55,7 +55,7 @@
  ****************************************************************************/
 
 /****************************************************************************
- * Name: sched_cpu_pause
+ * Name: nxsched_pause_cpu
  *
  * Description:
  *   Check if task associated with 'tcb' is running on a different CPU.  If
@@ -78,7 +78,7 @@
  *
  ****************************************************************************/
 
-int sched_cpu_pause(FAR struct tcb_s *tcb)
+int nxsched_pause_cpu(FAR struct tcb_s *tcb)
 {
   int cpu;
   int ret;
diff --git a/sched/sched/sched_cpuselect.c b/sched/sched/sched_cpuselect.c
index 09d457c..d6ff835 100644
--- a/sched/sched/sched_cpuselect.c
+++ b/sched/sched/sched_cpuselect.c
@@ -59,7 +59,7 @@
  ****************************************************************************/
 
 /****************************************************************************
- * Name:  sched_cpu_select
+ * Name:  nxsched_select_cpu
  *
  * Description:
  *   Return the index to the CPU with the lowest priority running task,
@@ -76,7 +76,7 @@
  *
  ****************************************************************************/
 
-int sched_cpu_select(cpu_set_t affinity)
+int nxsched_select_cpu(cpu_set_t affinity)
 {
   uint8_t minprio;
   int cpu;
diff --git a/sched/sched/sched_critmonitor.c b/sched/sched/sched_critmonitor.c
index 897e5ce..5cff390 100644
--- a/sched/sched/sched_critmonitor.c
+++ b/sched/sched/sched_critmonitor.c
@@ -79,7 +79,7 @@ uint32_t g_crit_max[1];
  ****************************************************************************/
 
 /****************************************************************************
- * Name: sched_critmon_preemption
+ * Name: nxsched_critmon_preemption
  *
  * Description:
  *   Called when there is any change in pre-emptible state of a thread.
@@ -90,7 +90,7 @@ uint32_t g_crit_max[1];
  *
  ****************************************************************************/
 
-void sched_critmon_preemption(FAR struct tcb_s *tcb, bool state)
+void nxsched_critmon_preemption(FAR struct tcb_s *tcb, bool state)
 {
   int cpu = this_cpu();
 
@@ -144,7 +144,7 @@ void sched_critmon_preemption(FAR struct tcb_s *tcb, bool state)
 }
 
 /****************************************************************************
- * Name: sched_critmon_csection
+ * Name: nxsched_critmon_csection
  *
  * Description:
  *   Called when a thread enters or leaves a critical section.
@@ -155,7 +155,7 @@ void sched_critmon_preemption(FAR struct tcb_s *tcb, bool state)
  *
  ****************************************************************************/
 
-void sched_critmon_csection(FAR struct tcb_s *tcb, bool state)
+void nxsched_critmon_csection(FAR struct tcb_s *tcb, bool state)
 {
   int cpu = this_cpu();
 
@@ -208,7 +208,7 @@ void sched_critmon_csection(FAR struct tcb_s *tcb, bool state)
 }
 
 /****************************************************************************
- * Name: sched_critmon_resume
+ * Name: nxsched_resume_critmon
  *
  * Description:
  *   Called when a thread resumes execution, perhaps re-establishing a
@@ -220,7 +220,7 @@ void sched_critmon_csection(FAR struct tcb_s *tcb, bool state)
  *
  ****************************************************************************/
 
-void sched_critmon_resume(FAR struct tcb_s *tcb)
+void nxsched_resume_critmon(FAR struct tcb_s *tcb)
 {
   uint32_t elapsed;
   int cpu = this_cpu();
@@ -285,7 +285,7 @@ void sched_critmon_resume(FAR struct tcb_s *tcb)
 }
 
 /****************************************************************************
- * Name: sched_critmon_suspend
+ * Name: nxsched_suspend_critmon
  *
  * Description:
  *   Called when a thread suspends execution, perhaps terminating a
@@ -297,7 +297,7 @@ void sched_critmon_resume(FAR struct tcb_s *tcb)
  *
  ****************************************************************************/
 
-void sched_critmon_suspend(FAR struct tcb_s *tcb)
+void nxsched_suspend_critmon(FAR struct tcb_s *tcb)
 {
   uint32_t elapsed;
 
diff --git a/sched/sched/sched_lock.c b/sched/sched/sched_lock.c
index d77d6d3..e0dc6f5 100644
--- a/sched/sched/sched_lock.c
+++ b/sched/sched/sched_lock.c
@@ -1,35 +1,20 @@
 /****************************************************************************
  * sched/sched/sched_lock.c
  *
- *   Copyright (C) 2007, 2009, 2016, 2018 Gregory Nutt. All rights reserved.
- *   Author: Gregory Nutt <gnutt@nuttx.org>
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.  The
+ * ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License.  You may obtain a copy of the License at
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
+ *   http://www.apache.org/licenses/LICENSE-2.0
  *
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- * 3. Neither the name NuttX nor the names of its contributors may be
- *    used to endorse or promote products derived from this software
- *    without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
- * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
- * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
  *
  ****************************************************************************/
 
@@ -259,7 +244,7 @@ int sched_lock(void)
           /* Note that we have pre-emption locked */
 
 #ifdef CONFIG_SCHED_CRITMONITOR
-          sched_critmon_preemption(rtcb, true);
+          nxsched_critmon_preemption(rtcb, true);
 #endif
 #ifdef CONFIG_SCHED_INSTRUMENTATION_PREEMPTION
           sched_note_premption(rtcb, true);
@@ -269,12 +254,12 @@ int sched_lock(void)
 
       /* Move any tasks in the ready-to-run list to the pending task list
        * where they will not be available to run until the scheduler is
-       * unlocked and sched_mergepending() is called.
+       * unlocked and nxsched_merge_pending() is called.
        */
 
-      sched_mergeprioritized((FAR dq_queue_t *)&g_readytorun,
-                             (FAR dq_queue_t *)&g_pendingtasks,
-                             TSTATE_TASK_PENDING);
+      nxsched_merge_prioritized((FAR dq_queue_t *)&g_readytorun,
+                                (FAR dq_queue_t *)&g_pendingtasks,
+                                TSTATE_TASK_PENDING);
     }
 
   return OK;
@@ -314,7 +299,7 @@ int sched_lock(void)
           /* Note that we have pre-emption locked */
 
 #ifdef CONFIG_SCHED_CRITMONITOR
-          sched_critmon_preemption(rtcb, true);
+          nxsched_critmon_preemption(rtcb, true);
 #endif
 #ifdef CONFIG_SCHED_INSTRUMENTATION_PREEMPTION
           sched_note_premption(rtcb, true);
diff --git a/sched/sched/sched_mergepending.c b/sched/sched/sched_mergepending.c
index 79b41ec..fa70fc2 100644
--- a/sched/sched/sched_mergepending.c
+++ b/sched/sched/sched_mergepending.c
@@ -47,7 +47,7 @@
  ****************************************************************************/
 
 /****************************************************************************
- * Name: sched_mergepending
+ * Name: nxsched_merge_pending
  *
  * Description:
  *   This function merges the prioritized g_pendingtasks list into the
@@ -69,7 +69,7 @@
  ****************************************************************************/
 
 #ifndef CONFIG_SMP
-bool sched_mergepending(void)
+bool nxsched_merge_pending(void)
 {
   FAR struct tcb_s *ptcb;
   FAR struct tcb_s *pnext;
@@ -90,7 +90,7 @@ bool sched_mergepending(void)
       pnext = ptcb->flink;
 
       /* REVISIT:  Why don't we just remove the ptcb from pending task list
-       * and call sched_addreadytorun?
+       * and call nxsched_add_readytorun?
        */
 
       /* Search the ready-to-run list to find the location to insert the
@@ -153,7 +153,7 @@ bool sched_mergepending(void)
 #endif /* !CONFIG_SMP */
 
 /****************************************************************************
- * Name: sched_mergepending
+ * Name: nxsched_merge_pending
  *
  * Description:
  *   This function merges the prioritized g_pendingtasks list into the
@@ -175,7 +175,7 @@ bool sched_mergepending(void)
  ****************************************************************************/
 
 #ifdef CONFIG_SMP
-bool sched_mergepending(void)
+bool nxsched_merge_pending(void)
 {
   FAR struct tcb_s *rtcb;
   FAR struct tcb_s *ptcb;
@@ -186,7 +186,7 @@ bool sched_mergepending(void)
 
   /* Lock the tasklist before accessing */
 
-  irqstate_t lock = sched_tasklist_lock();
+  irqstate_t lock = nxsched_lock_tasklist();
 
   /* Remove and process every TCB in the g_pendingtasks list.
    *
@@ -195,7 +195,7 @@ bool sched_mergepending(void)
    */
 
   me = this_cpu();
-  if (!sched_islocked_global() && !irq_cpu_locked(me))
+  if (!nxsched_islocked_global() && !irq_cpu_locked(me))
     {
       /* Find the CPU that is executing the lowest priority task */
 
@@ -207,7 +207,7 @@ bool sched_mergepending(void)
           goto errout_with_lock;
         }
 
-      cpu  = sched_cpu_select(ALL_CPUS); /* REVISIT:  Maybe ptcb->affinity */
+      cpu  = nxsched_select_cpu(ALL_CPUS); /* REVISIT:  Maybe ptcb->affinity */
       rtcb = current_task(cpu);
 
       /* Loop while there is a higher priority task in the pending task list
@@ -228,24 +228,24 @@ bool sched_mergepending(void)
 
           /* Add the pending task to the correct ready-to-run list. */
 
-          sched_tasklist_unlock(lock);
-          ret |= sched_addreadytorun(tcb);
-          lock = sched_tasklist_lock();
+          nxsched_unlock_tasklist(lock);
+          ret |= nxsched_add_readytorun(tcb);
+          lock = nxsched_lock_tasklist();
 
           /* This operation could cause the scheduler to become locked.
            * Check if that happened.
            */
 
-          if (sched_islocked_global() || irq_cpu_locked(me))
+          if (nxsched_islocked_global() || irq_cpu_locked(me))
             {
               /* Yes.. then we may have incorrectly placed some TCBs in the
                * g_readytorun list (unlikely, but possible).  We will have to
                * move them back to the pending task list.
                */
 
-              sched_mergeprioritized((FAR dq_queue_t *)&g_readytorun,
-                                     (FAR dq_queue_t *)&g_pendingtasks,
-                                     TSTATE_TASK_PENDING);
+              nxsched_merge_prioritized((FAR dq_queue_t *)&g_readytorun,
+                                        (FAR dq_queue_t *)&g_pendingtasks,
+                                        TSTATE_TASK_PENDING);
 
               /* And return with the scheduler locked and tasks in the
                * pending task list.
@@ -265,7 +265,7 @@ bool sched_mergepending(void)
               goto errout_with_lock;
             }
 
-          cpu  = sched_cpu_select(ALL_CPUS); /* REVISIT:  Maybe ptcb->affinity */
+          cpu  = nxsched_select_cpu(ALL_CPUS); /* REVISIT:  Maybe ptcb->affinity */
           rtcb = current_task(cpu);
         }
 
@@ -273,7 +273,7 @@ bool sched_mergepending(void)
        * tasks in the pending task list to the ready-to-run task list.
        */
 
-      sched_mergeprioritized((FAR dq_queue_t *)&g_pendingtasks,
+      nxsched_merge_prioritized((FAR dq_queue_t *)&g_pendingtasks,
                              (FAR dq_queue_t *)&g_readytorun,
                              TSTATE_TASK_READYTORUN);
     }
@@ -282,7 +282,7 @@ errout_with_lock:
 
   /* Unlock the tasklist */
 
-  sched_tasklist_unlock(lock);
+  nxsched_unlock_tasklist(lock);
   return ret;
 }
 #endif /* CONFIG_SMP */
diff --git a/sched/sched/sched_mergeprioritized.c b/sched/sched/sched_mergeprioritized.c
index 4372588..2a6ee47 100644
--- a/sched/sched/sched_mergeprioritized.c
+++ b/sched/sched/sched_mergeprioritized.c
@@ -36,7 +36,7 @@
  ****************************************************************************/
 
 /****************************************************************************
- * Name: sched_mergeprioritized
+ * Name: nxsched_merge_prioritized
  *
  * Description:
  *  This function merges the content of the prioritized task list '1ist1'
@@ -60,7 +60,7 @@
  *
  ****************************************************************************/
 
-void sched_mergeprioritized(FAR dq_queue_t *list1, FAR dq_queue_t *list2,
+void nxsched_merge_prioritized(FAR dq_queue_t *list1, FAR dq_queue_t *list2,
                             uint8_t task_state)
 {
   dq_queue_t clone;
@@ -71,7 +71,7 @@ void sched_mergeprioritized(FAR dq_queue_t *list1, FAR dq_queue_t *list2,
 #ifdef CONFIG_SMP
   /* Lock the tasklists before accessing */
 
-  irqstate_t lock = sched_tasklist_lock();
+  irqstate_t lock = nxsched_lock_tasklist();
 #endif
 
   DEBUGASSERT(list1 != NULL && list2 != NULL);
@@ -168,7 +168,7 @@ ret_with_lock:
 #ifdef CONFIG_SMP
   /* Unlock the tasklists */
 
-  sched_tasklist_unlock(lock);
+  nxsched_unlock_tasklist(lock);
 #endif
   return;
 }
diff --git a/sched/sched/sched_processtimer.c b/sched/sched/sched_processtimer.c
index 19c906a..b6ff270 100644
--- a/sched/sched/sched_processtimer.c
+++ b/sched/sched/sched_processtimer.c
@@ -87,7 +87,7 @@ static inline void nxsched_cpu_scheduler(int cpu)
        * timeslice.
        */
 
-      sched_roundrobin_process(rtcb, 1, false);
+      nxsched_process_roundrobin(rtcb, 1, false);
     }
 #endif
 
@@ -100,7 +100,7 @@ static inline void nxsched_cpu_scheduler(int cpu)
        * budget.
        */
 
-      sched_sporadic_process(rtcb, 1, false);
+      nxsched_process_sporadic(rtcb, 1, false);
     }
 #endif
 }
diff --git a/sched/sched/sched_removeblocked.c b/sched/sched/sched_removeblocked.c
index 9ed15a9..ccf9360 100644
--- a/sched/sched/sched_removeblocked.c
+++ b/sched/sched/sched_removeblocked.c
@@ -49,7 +49,7 @@
  ****************************************************************************/
 
 /****************************************************************************
- * Name: sched_removeblocked
+ * Name: nxsched_remove_blocked
  *
  * Description:
  *   This function removes a TCB from one of the blocked state task
@@ -67,7 +67,7 @@
  *
  ****************************************************************************/
 
-void sched_removeblocked(FAR struct tcb_s *btcb)
+void nxsched_remove_blocked(FAR struct tcb_s *btcb)
 {
   tstate_t task_state = btcb->task_state;
 
diff --git a/sched/sched/sched_removereadytorun.c b/sched/sched/sched_removereadytorun.c
index fa842be..094ffa2 100644
--- a/sched/sched/sched_removereadytorun.c
+++ b/sched/sched/sched_removereadytorun.c
@@ -53,7 +53,7 @@
  ****************************************************************************/
 
 /****************************************************************************
- * Name: sched_removereadytorun
+ * Name: nxsched_remove_readytorun
  *
  * Description:
  *   This function removes a TCB from the ready to run list.
@@ -75,7 +75,7 @@
  ****************************************************************************/
 
 #ifndef CONFIG_SMP
-bool sched_removereadytorun(FAR struct tcb_s *rtcb)
+bool nxsched_remove_readytorun(FAR struct tcb_s *rtcb)
 {
   bool doswitch = false;
 
@@ -112,7 +112,7 @@ bool sched_removereadytorun(FAR struct tcb_s *rtcb)
 #endif /* !CONFIG_SMP */
 
 /****************************************************************************
- * Name: sched_removereadytorun
+ * Name: nxsched_remove_readytorun
  *
  * Description:
  *   This function removes a TCB from the ready to run list.
@@ -134,7 +134,7 @@ bool sched_removereadytorun(FAR struct tcb_s *rtcb)
  ****************************************************************************/
 
 #ifdef CONFIG_SMP
-bool sched_removereadytorun(FAR struct tcb_s *rtcb)
+bool nxsched_remove_readytorun(FAR struct tcb_s *rtcb)
 {
   FAR dq_queue_t *tasklist;
   bool doswitch = false;
@@ -142,7 +142,7 @@ bool sched_removereadytorun(FAR struct tcb_s *rtcb)
 
   /* Lock the tasklists before accessing */
 
-  irqstate_t lock = sched_tasklist_lock();
+  irqstate_t lock = nxsched_lock_tasklist();
 
   /* Which CPU (if any) is the task running on?  Which task list holds the
    * TCB?
@@ -188,9 +188,9 @@ bool sched_removereadytorun(FAR struct tcb_s *rtcb)
       me = this_cpu();
       if (cpu != me)
         {
-          sched_tasklist_unlock(lock);
+          nxsched_unlock_tasklist(lock);
           DEBUGVERIFY(up_cpu_pause(cpu));
-          lock = sched_tasklist_lock();
+          lock = nxsched_lock_tasklist();
         }
 
       /* The task is running but the CPU that it was running on has been
@@ -211,7 +211,7 @@ bool sched_removereadytorun(FAR struct tcb_s *rtcb)
        * REVISIT: What if it is not the IDLE thread?
        */
 
-      if (!sched_islocked_global() && !irq_cpu_locked(me))
+      if (!nxsched_islocked_global() && !irq_cpu_locked(me))
         {
           /* Search for the highest priority task that can run on this
            * CPU.
@@ -338,7 +338,7 @@ bool sched_removereadytorun(FAR struct tcb_s *rtcb)
 
   /* Unlock the tasklists */
 
-  sched_tasklist_unlock(lock);
+  nxsched_unlock_tasklist(lock);
   return doswitch;
 }
 #endif /* CONFIG_SMP */
diff --git a/sched/sched/sched_reprioritize.c b/sched/sched/sched_reprioritize.c
index 3d88acc..6312df3 100644
--- a/sched/sched/sched_reprioritize.c
+++ b/sched/sched/sched_reprioritize.c
@@ -80,12 +80,12 @@
 
 int nxsched_reprioritize(FAR struct tcb_s *tcb, int sched_priority)
 {
-  /* This function is equivalent to nxsched_setpriority() BUT it also has the
+  /* This function is equivalent to nxsched_set_priority() BUT it also has the
    * side effect of discarding all priority inheritance history.  This is
    * done only on explicit, user-initiated reprioritization.
    */
 
-  int ret = nxsched_setpriority(tcb, sched_priority);
+  int ret = nxsched_set_priority(tcb, sched_priority);
   if (ret == 0)
     {
       /* Reset the base_priority -- the priority that the thread would return
diff --git a/sched/sched/sched_resumescheduler.c b/sched/sched/sched_resumescheduler.c
index 4802fe5..fc9fe26 100644
--- a/sched/sched/sched_resumescheduler.c
+++ b/sched/sched/sched_resumescheduler.c
@@ -91,14 +91,14 @@ void nxsched_resume_scheduler(FAR struct tcb_s *tcb)
     {
       /* Reset the replenishment cycle if it is appropriate to do so */
 
-      DEBUGVERIFY(sched_sporadic_resume(tcb));
+      DEBUGVERIFY(nxsched_resume_sporadic(tcb));
     }
 #endif
 
   /* Indicate the task has been resumed */
 
 #ifdef CONFIG_SCHED_CRITMONITOR
-  sched_critmon_resume(tcb);
+  nxsched_resume_critmon(tcb);
 #endif
 #ifdef CONFIG_SCHED_INSTRUMENTATION
   sched_note_resume(tcb);
@@ -106,7 +106,7 @@ void nxsched_resume_scheduler(FAR struct tcb_s *tcb)
 
 #ifdef CONFIG_SMP
   /* NOTE: The following logic for adjusting global IRQ controls were
-   * derived from sched_addreadytorun() and sched_removedreadytorun()
+   * derived from nxsched_add_readytorun() and sched_removedreadytorun()
    * Here, we only handles clearing logic to defer unlocking IRQ lock
    * followed by context switching.
    */
@@ -120,8 +120,8 @@ void nxsched_resume_scheduler(FAR struct tcb_s *tcb)
   if (tcb->irqcount > 0)
     {
       /* Do notihing here
-       * NOTE: spin_setbit() is done in sched_addreadytorun()
-       * and sched_removereadytorun()
+       * NOTE: spin_setbit() is done in nxsched_add_readytorun()
+       * and nxsched_remove_readytorun()
        */
     }
 
diff --git a/sched/sched/sched_roundrobin.c b/sched/sched/sched_roundrobin.c
index 4c289cb..8367ae2 100644
--- a/sched/sched/sched_roundrobin.c
+++ b/sched/sched/sched_roundrobin.c
@@ -68,7 +68,7 @@
  ****************************************************************************/
 
 /****************************************************************************
- * Name:  sched_roundrobin_process
+ * Name:  nxsched_process_roundrobin
  *
  * Description:
  *   Check if the currently executing task has exceeded its time slice.
@@ -96,7 +96,7 @@
  *
  ****************************************************************************/
 
-uint32_t sched_roundrobin_process(FAR struct tcb_s *tcb, uint32_t ticks,
+uint32_t nxsched_process_roundrobin(FAR struct tcb_s *tcb, uint32_t ticks,
                                   bool noswitches)
 {
   uint32_t ret;
@@ -121,10 +121,10 @@ uint32_t sched_roundrobin_process(FAR struct tcb_s *tcb, uint32_t ticks,
   /* Did decrementing the timeslice counter cause the timeslice to expire? */
 
   ret = tcb->timeslice;
-  if (tcb->timeslice <= 0 && !sched_islocked_tcb(tcb))
+  if (tcb->timeslice <= 0 && !nxsched_islocked_tcb(tcb))
     {
       /* We will also suppress context switches if we were called via one
-       * of the unusual cases handled by sched_timer_reassess().  In that
+       * of the unusual cases handled by nxsched_reassess_timer().  In that
        * case, we will return a value of one so that the timer will expire
        * as soon as possible and we can perform this action in the normal
        * timer expiration context.
diff --git a/sched/sched/sched_setaffinity.c b/sched/sched/sched_setaffinity.c
index 4919d9e..813175f 100644
--- a/sched/sched/sched_setaffinity.c
+++ b/sched/sched/sched_setaffinity.c
@@ -130,13 +130,13 @@ int nxsched_set_affinity(pid_t pid, size_t cpusetsize,
           /* No.. then we will need to move the task from the assigned
            * task list to some other ready to run list.
            *
-           * nxsched_setpriority() will do just what we want... it will
+           * nxsched_set_priority() will do just what we want... it will
            * remove the task from its current position in the some assigned
            * task list and then simply put it back in the right place.  This
            * works even if the task is this task.
            */
 
-          ret = nxsched_setpriority(tcb, tcb->sched_priority);
+          ret = nxsched_set_priority(tcb, tcb->sched_priority);
         }
     }
 
diff --git a/sched/sched/sched_setparam.c b/sched/sched/sched_setparam.c
index d3bfe7a..75cc804 100644
--- a/sched/sched/sched_setparam.c
+++ b/sched/sched/sched_setparam.c
@@ -186,7 +186,7 @@ int nxsched_set_param(pid_t pid, FAR const struct sched_param *param)
       /* Stop/reset current sporadic scheduling */
 
       flags = enter_critical_section();
-      ret = sched_sporadic_reset(tcb);
+      ret = nxsched_reset_sporadic(tcb);
       if (ret >= 0)
         {
           /* Save the sporadic scheduling parameters and reset to the
@@ -206,7 +206,7 @@ int nxsched_set_param(pid_t pid, FAR const struct sched_param *param)
 
           /* And restart at the next replenishment interval */
 
-          ret = sched_sporadic_start(tcb);
+          ret = nxsched_start_sporadic(tcb);
         }
 
       /* Restore interrupts and handler errors */
diff --git a/sched/sched/sched_setpriority.c b/sched/sched/sched_setpriority.c
index 83dc88b..7487962 100644
--- a/sched/sched/sched_setpriority.c
+++ b/sched/sched/sched_setpriority.c
@@ -68,7 +68,7 @@ static FAR struct tcb_s *nxsched_nexttcb(FAR struct tcb_s *tcb)
    * then use the 'nxttcb' which will probably be the IDLE thread.
    */
 
-  if (!sched_islocked_global() && !irq_cpu_locked(cpu))
+  if (!nxsched_islocked_global() && !irq_cpu_locked(cpu))
     {
       /* Search for the highest priority task that can run on this CPU. */
 
@@ -188,7 +188,7 @@ static void nxsched_readytorun_setpriority(FAR struct tcb_s *tcb,
 
   if (tcb->task_state == TSTATE_TASK_READYTORUN)
     {
-      cpu = sched_cpu_select(tcb->affinity);
+      cpu = nxsched_select_cpu(tcb->affinity);
     }
 
   /* CASE 2b.  The task is ready to run, and assigned to a CPU.  An increase
@@ -236,7 +236,7 @@ static void nxsched_readytorun_setpriority(FAR struct tcb_s *tcb,
        * It should not be at the head of the list.
        */
 
-      bool check = sched_removereadytorun(tcb);
+      bool check = nxsched_remove_readytorun(tcb);
       DEBUGASSERT(check == false);
       UNUSED(check);
 
@@ -248,7 +248,7 @@ static void nxsched_readytorun_setpriority(FAR struct tcb_s *tcb,
        * end up at the head of the list.
        */
 
-      check = sched_addreadytorun(tcb);
+      check = nxsched_add_readytorun(tcb);
       DEBUGASSERT(check == false);
       UNUSED(check);
     }
@@ -291,7 +291,7 @@ static inline void nxsched_blocked_setpriority(FAR struct tcb_s *tcb,
 
       /* Put it back into the prioritized list at the correct position. */
 
-      sched_addprioritized(tcb, tasklist);
+      nxsched_add_prioritized(tcb, tasklist);
     }
 
   /* CASE 3b. The task resides in a non-prioritized list. */
@@ -309,7 +309,7 @@ static inline void nxsched_blocked_setpriority(FAR struct tcb_s *tcb,
  ****************************************************************************/
 
 /****************************************************************************
- * Name:  nxsched_setpriority
+ * Name:  nxsched_set_priority
  *
  * Description:
  *   This function sets the priority of a specified task.
@@ -323,7 +323,7 @@ static inline void nxsched_blocked_setpriority(FAR struct tcb_s *tcb,
  *   sched_priority - The new task priority
  *
  * Returned Value:
- *   On success, nxsched_setpriority() returns 0 (OK). On error, a negated
+ *   On success, nxsched_set_priority() returns 0 (OK). On error, a negated
  *   errno value is returned.
  *
  *  EINVAL The parameter 'param' is invalid or does not make sense for the
@@ -333,7 +333,7 @@ static inline void nxsched_blocked_setpriority(FAR struct tcb_s *tcb,
  *
  ****************************************************************************/
 
-int nxsched_setpriority(FAR struct tcb_s *tcb, int sched_priority)
+int nxsched_set_priority(FAR struct tcb_s *tcb, int sched_priority)
 {
   irqstate_t flags;
 
diff --git a/sched/sched/sched_setscheduler.c b/sched/sched/sched_setscheduler.c
index f9cd7a5..1768c08 100644
--- a/sched/sched/sched_setscheduler.c
+++ b/sched/sched/sched_setscheduler.c
@@ -134,7 +134,7 @@ int nxsched_set_scheduler(pid_t pid, int policy,
 
           if ((tcb->flags & TCB_FLAG_POLICY_MASK) == TCB_FLAG_SCHED_SPORADIC)
             {
-              DEBUGVERIFY(sched_sporadic_stop(tcb));
+              DEBUGVERIFY(nxsched_stop_sporadic(tcb));
             }
 #endif
 
@@ -155,7 +155,7 @@ int nxsched_set_scheduler(pid_t pid, int policy,
 
           if ((tcb->flags & TCB_FLAG_POLICY_MASK) == TCB_FLAG_SCHED_SPORADIC)
             {
-              DEBUGVERIFY(sched_sporadic_stop(tcb));
+              DEBUGVERIFY(nxsched_stop_sporadic(tcb));
             }
 #endif
 
@@ -220,11 +220,11 @@ int nxsched_set_scheduler(pid_t pid, int policy,
 
           if ((tcb->flags & TCB_FLAG_POLICY_MASK) == TCB_FLAG_SCHED_SPORADIC)
             {
-              ret = sched_sporadic_reset(tcb);
+              ret = nxsched_reset_sporadic(tcb);
             }
           else
             {
-              ret = sched_sporadic_initialize(tcb);
+              ret = nxsched_initialize_sporadic(tcb);
             }
 
           /* Save the sporadic scheduling parameters. */
@@ -245,7 +245,7 @@ int nxsched_set_scheduler(pid_t pid, int policy,
 
               /* And restart at the next replenishment interval */
 
-              ret = sched_sporadic_start(tcb);
+              ret = nxsched_start_sporadic(tcb);
             }
 
           /* Handle errors */
diff --git a/sched/sched/sched_sporadic.c b/sched/sched/sched_sporadic.c
index b511e0d..981ec3b 100644
--- a/sched/sched/sched_sporadic.c
+++ b/sched/sched/sched_sporadic.c
@@ -470,12 +470,12 @@ static void sporadic_budget_expire(int argc, wdparm_t arg1, ...)
    * violating the lock.
    *
    * What we do instead is just deallocate all timers.  When the lock is
-   * finally released, sched_sporadic_lowpriority() and that will restart
+   * finally released, nxsched_sporadic_lowpriority() and that will restart
    * the interval period. timeslice == -1 is the cue to sched_unlock() that
    * this operation is needed.
    */
 
-  if (sched_islocked_tcb(tcb))
+  if (nxsched_islocked_tcb(tcb))
     {
       DEBUGASSERT((mrepl->flags && SPORADIC_FLAG_ALLOCED) != 0 &&
                   sporadic->nrepls > 0);
@@ -619,12 +619,12 @@ static void sporadic_replenish_expire(int argc, wdparm_t arg1, ...)
    * violating the lock.
    *
    * What we do instead is just deallocate all timers.  When the lock is
-   * finally released, sched_sporadic_lowpriority() and that will restart
+   * finally released, nxsched_sporadic_lowpriority() and that will restart
    * the interval period. timeslice == -1 is the cue to sched_unlock() that
    * this operation is needed.
    */
 
-  if (sched_islocked_tcb(tcb))
+  if (nxsched_islocked_tcb(tcb))
     {
       /* Set the timeslice to the magic value */
 
@@ -768,7 +768,7 @@ FAR struct replenishment_s *
  ****************************************************************************/
 
 /****************************************************************************
- * Name: sched_sporadic_initialize
+ * Name: nxsched_initialize_sporadic
  *
  * Description:
  *   Allocate resources needed by the sporadic scheduling policy.
@@ -781,7 +781,7 @@ FAR struct replenishment_s *
  *
  ****************************************************************************/
 
-int sched_sporadic_initialize(FAR struct tcb_s *tcb)
+int nxsched_initialize_sporadic(FAR struct tcb_s *tcb)
 {
   FAR struct sporadic_s *sporadic;
   int i;
@@ -815,7 +815,7 @@ int sched_sporadic_initialize(FAR struct tcb_s *tcb)
 }
 
 /****************************************************************************
- * Name: sched_sporadic_start
+ * Name: nxsched_start_sporadic
  *
  * Description:
  *   Called to initialize sporadic scheduling on a given thread.  This
@@ -841,7 +841,7 @@ int sched_sporadic_initialize(FAR struct tcb_s *tcb)
  *
  ****************************************************************************/
 
-int sched_sporadic_start(FAR struct tcb_s *tcb)
+int nxsched_start_sporadic(FAR struct tcb_s *tcb)
 {
   FAR struct sporadic_s *sporadic;
   FAR struct replenishment_s *mrepl;
@@ -866,7 +866,7 @@ int sched_sporadic_start(FAR struct tcb_s *tcb)
 }
 
 /****************************************************************************
- * Name: sched_sporadic_stop
+ * Name: nxsched_stop_sporadic
  *
  * Description:
  *   Called to terminate sporadic scheduling on a given thread and to
@@ -890,13 +890,13 @@ int sched_sporadic_start(FAR struct tcb_s *tcb)
  *
  ****************************************************************************/
 
-int sched_sporadic_stop(FAR struct tcb_s *tcb)
+int nxsched_stop_sporadic(FAR struct tcb_s *tcb)
 {
   DEBUGASSERT(tcb && tcb->sporadic);
 
   /* Stop all timers, reset scheduling */
 
-  sched_sporadic_reset(tcb);
+  nxsched_reset_sporadic(tcb);
 
   /* The free the container holder the sporadic scheduling parameters */
 
@@ -906,7 +906,7 @@ int sched_sporadic_stop(FAR struct tcb_s *tcb)
 }
 
 /****************************************************************************
- * Name: sched_sporadic_reset
+ * Name: nxsched_reset_sporadic
  *
  * Description:
  *   Called to stop sporadic scheduling on a given thread.  This
@@ -914,7 +914,7 @@ int sched_sporadic_stop(FAR struct tcb_s *tcb)
  *
  *     - When the sporadic scheduling parameters are changed via
  *       sched_setparam()
- *     - From sched_sporadic_stop when under those conditions.
+ *     - From nxsched_stop_sporadic when under those conditions.
  *
  * Input Parameters:
  *   tcb - The TCB of the thread that is beginning sporadic scheduling.
@@ -929,7 +929,7 @@ int sched_sporadic_stop(FAR struct tcb_s *tcb)
  *
  ****************************************************************************/
 
-int sched_sporadic_reset(FAR struct tcb_s *tcb)
+int nxsched_reset_sporadic(FAR struct tcb_s *tcb)
 {
   FAR struct sporadic_s *sporadic;
   FAR struct replenishment_s *repl;
@@ -970,7 +970,7 @@ int sched_sporadic_reset(FAR struct tcb_s *tcb)
 }
 
 /****************************************************************************
- * Name: sched_sporadic_resume
+ * Name: nxsched_resume_sporadic
  *
  * Description:
  *   Called to start the next replenishment interval.  This function is
@@ -995,7 +995,7 @@ int sched_sporadic_reset(FAR struct tcb_s *tcb)
  *
  ****************************************************************************/
 
-int sched_sporadic_resume(FAR struct tcb_s *tcb)
+int nxsched_resume_sporadic(FAR struct tcb_s *tcb)
 {
   FAR struct sporadic_s *sporadic;
   FAR struct replenishment_s *repl;
@@ -1115,7 +1115,7 @@ int sched_sporadic_resume(FAR struct tcb_s *tcb)
 }
 
 /****************************************************************************
- * Name: sched_sporadic_suspend
+ * Name: nxsched_suspend_sporadic
  *
  * Description:
  *   Called to when a thread with sporadic scheduling is suspended.  In this
@@ -1131,7 +1131,7 @@ int sched_sporadic_resume(FAR struct tcb_s *tcb)
  *
  ****************************************************************************/
 
-int sched_sporadic_suspend(FAR struct tcb_s *tcb)
+int nxsched_suspend_sporadic(FAR struct tcb_s *tcb)
 {
   FAR struct sporadic_s *sporadic;
 
@@ -1161,7 +1161,7 @@ int sched_sporadic_suspend(FAR struct tcb_s *tcb)
 }
 
 /****************************************************************************
- * Name: sched_sporadic_process
+ * Name: nxsched_process_sporadic
  *
  * Description:
  *   Process the elapsed time interval. Called from this context:
@@ -1188,7 +1188,7 @@ int sched_sporadic_suspend(FAR struct tcb_s *tcb)
  *
  ****************************************************************************/
 
-uint32_t sched_sporadic_process(FAR struct tcb_s *tcb, uint32_t ticks,
+uint32_t nxsched_process_sporadic(FAR struct tcb_s *tcb, uint32_t ticks,
                                 bool noswitches)
 {
   FAR struct sporadic_s *sporadic;
@@ -1225,7 +1225,7 @@ uint32_t sched_sporadic_process(FAR struct tcb_s *tcb, uint32_t ticks,
       /* Does the thread have the scheduler locked? */
 
       sporadic = tcb->sporadic;
-      if (sched_islocked_tcb(tcb))
+      if (nxsched_islocked_tcb(tcb))
         {
           /* Yes... then we have no option but to give the thread more
            * time at the higher priority.  Dropping the priority could
@@ -1241,7 +1241,7 @@ uint32_t sched_sporadic_process(FAR struct tcb_s *tcb, uint32_t ticks,
         }
 
       /* We will also suppress context switches if we were called via one of
-       * the unusual cases handled by sched_timer_reassess(). In that case,
+       * the unusual cases handled by nxsched_reassess_timer(). In that case,
        * we will return a value of one so that the timer will expire as soon
        * as possible and we can perform this action in the normal timer
        * expiration context.
@@ -1287,7 +1287,7 @@ uint32_t sched_sporadic_process(FAR struct tcb_s *tcb, uint32_t ticks,
 }
 
 /****************************************************************************
- * Name: sched_sporadic_lowpriority
+ * Name: nxsched_sporadic_lowpriority
  *
  * Description:
  *   Drop to the lower priority for the duration of the replenishment
@@ -1309,7 +1309,7 @@ uint32_t sched_sporadic_process(FAR struct tcb_s *tcb, uint32_t ticks,
  *
  ****************************************************************************/
 
-void sched_sporadic_lowpriority(FAR struct tcb_s *tcb)
+void nxsched_sporadic_lowpriority(FAR struct tcb_s *tcb)
 {
   FAR struct sporadic_s *sporadic;
   FAR struct replenishment_s *mrepl;
@@ -1324,7 +1324,7 @@ void sched_sporadic_lowpriority(FAR struct tcb_s *tcb)
   tcb->timeslice = 0;
 
   /* Allocate a new main timer.  There should be no timers active at this
-   * phase since they were stopped in sched_sporadic_process().
+   * phase since they were stopped in nxsched_process_sporadic().
    */
 
   DEBUGASSERT(sporadic->nrepls < sporadic->max_repl);
diff --git a/sched/sched/sched_suspend.c b/sched/sched/sched_suspend.c
index e24b1fc..10591c5 100644
--- a/sched/sched/sched_suspend.c
+++ b/sched/sched/sched_suspend.c
@@ -39,7 +39,7 @@
  ****************************************************************************/
 
 /****************************************************************************
- * Name: sched_suspend
+ * Name: nxsched_suspend
  *
  * Description:
  *   Suspend/pause the specified thread.  This is normally calling indirectly
@@ -47,7 +47,7 @@
  *
  ****************************************************************************/
 
-void sched_suspend(FAR struct tcb_s *tcb)
+void nxsched_suspend(FAR struct tcb_s *tcb)
 {
   irqstate_t flags;
 
@@ -62,7 +62,7 @@ void sched_suspend(FAR struct tcb_s *tcb)
     {
       /* Remove the TCB from the the blocked task list. */
 
-      sched_removeblocked(tcb);
+      nxsched_remove_blocked(tcb);
 
       /* Set the errno value to EINTR.  The task will be restarted in the
        * running or runnable state and will appear to have awakened from
@@ -73,7 +73,7 @@ void sched_suspend(FAR struct tcb_s *tcb)
 
       /* Move the TCB to the g_stoppedtasks list. */
 
-      sched_addblocked(tcb, TSTATE_TASK_STOPPED);
+      nxsched_add_blocked(tcb, TSTATE_TASK_STOPPED);
     }
   else
     {
diff --git a/sched/sched/sched_suspendscheduler.c b/sched/sched/sched_suspendscheduler.c
index e202357..d634425 100644
--- a/sched/sched/sched_suspendscheduler.c
+++ b/sched/sched/sched_suspendscheduler.c
@@ -79,14 +79,14 @@ void nxsched_suspend_scheduler(FAR struct tcb_s *tcb)
 
   if ((tcb->flags & TCB_FLAG_POLICY_MASK) == TCB_FLAG_SCHED_SPORADIC)
     {
-      DEBUGVERIFY(sched_sporadic_suspend(tcb));
+      DEBUGVERIFY(nxsched_suspend_sporadic(tcb));
     }
 #endif
 
   /* Indicate that the task has been suspended */
 
 #ifdef CONFIG_SCHED_CRITMONITOR
-  sched_critmon_suspend(tcb);
+  nxsched_suspend_critmon(tcb);
 #endif
 #ifdef CONFIG_SCHED_INSTRUMENTATION
   sched_note_suspend(tcb);
diff --git a/sched/sched/sched_tasklistlock.c b/sched/sched/sched_tasklistlock.c
index 9e2f90b..8226471 100644
--- a/sched/sched/sched_tasklistlock.c
+++ b/sched/sched/sched_tasklistlock.c
@@ -62,7 +62,7 @@ static volatile uint8_t g_tasklist_lock_count[CONFIG_SMP_NCPUS];
  ****************************************************************************/
 
 /****************************************************************************
- * Name: sched_tasklist_lock()
+ * Name: nxsched_lock_tasklist()
  *
  * Description:
  *   Disable local interrupts and take the global spinlock (g_tasklist_lock)
@@ -74,10 +74,10 @@ static volatile uint8_t g_tasklist_lock_count[CONFIG_SMP_NCPUS];
  *
  * Returned Value:
  *   An opaque, architecture-specific value that represents the state of
- *   the interrupts prior to the call to sched_tasklist_lock();
+ *   the interrupts prior to the call to nxsched_lock_tasklist();
  ****************************************************************************/
 
-irqstate_t sched_tasklist_lock(void)
+irqstate_t nxsched_lock_tasklist(void)
 {
   int me;
   irqstate_t ret;
@@ -96,26 +96,26 @@ irqstate_t sched_tasklist_lock(void)
 }
 
 /****************************************************************************
- * Name: sched_tasklist_unlock()
+ * Name: nxsched_unlock_tasklist()
  *
  * Description:
  *   Decrement the call counter (g_tasklist_lock_count[cpu]) and if it
  *   decrements to zero then release the spinlock (g_tasklist_lock) and
  *   restore the interrupt state as it was prior to the previous call to
- *   sched_tasklist_lock().
+ *   nxsched_lock_tasklist().
  *
  *   NOTE: This API is used to protect tasklists in the scheduler. So do not
  *   use this API for other purposes.
  *
  * Input Parameters:
  *   lock - The architecture-specific value that represents the state of
- *          the interrupts prior to the call to sched_tasklist_lock().
+ *          the interrupts prior to the call to nxsched_lock_tasklist().
  *
  * Returned Value:
  *   None
  ****************************************************************************/
 
-void sched_tasklist_unlock(irqstate_t lock)
+void nxsched_unlock_tasklist(irqstate_t lock)
 {
   int me;
 
diff --git a/sched/sched/sched_timerexpiration.c b/sched/sched/sched_timerexpiration.c
index 2d091f9..d3899d9 100644
--- a/sched/sched/sched_timerexpiration.c
+++ b/sched/sched/sched_timerexpiration.c
@@ -48,7 +48,7 @@
  * Pre-processor Definitions
  ****************************************************************************/
 
-/* In the original design, it was planned that sched_timer_reassess() be
+/* In the original design, it was planned that nxsched_reassess_timer() be
  * called whenever there was a change at the head of the ready-to-run
  * list.  That call was intended to establish a new time-slice or to
  * stop an old time-slice timer.  However, it turns out that that
@@ -185,7 +185,7 @@ static uint32_t nxsched_cpu_scheduler(int cpu, uint32_t ticks,
        * timeslice.
        */
 
-      ret = sched_roundrobin_process(rtcb, ticks, noswitches);
+      ret = nxsched_process_roundrobin(rtcb, ticks, noswitches);
     }
 #endif
 
@@ -212,7 +212,7 @@ static uint32_t nxsched_cpu_scheduler(int cpu, uint32_t ticks,
        * budget.
        */
 
-      ret = sched_sporadic_process(rtcb, ticks, noswitches);
+      ret = nxsched_process_sporadic(rtcb, ticks, noswitches);
     }
 #endif
 
@@ -558,12 +558,12 @@ void nxsched_timer_expiration(void)
 #endif
 
 /****************************************************************************
- * Name:  sched_timer_cancel
+ * Name:  nxsched_cancel_timer
  *
  * Description:
  *   Stop the current timing activity.  This is currently called just before
  *   a new entry is inserted at the head of a timer list and also as part
- *   of the processing of sched_timer_reassess().
+ *   of the processing of nxsched_reassess_timer().
  *
  *   This function(1) cancels the current timer, (2) determines how much of
  *   the interval has elapsed, (3) completes any partially timed events
@@ -581,7 +581,7 @@ void nxsched_timer_expiration(void)
  ****************************************************************************/
 
 #ifdef CONFIG_SCHED_TICKLESS_ALARM
-unsigned int sched_timer_cancel(void)
+unsigned int nxsched_cancel_timer(void)
 {
   struct timespec ts;
   unsigned int elapsed;
@@ -631,7 +631,7 @@ unsigned int sched_timer_cancel(void)
   return nxsched_timer_process(elapsed, true);
 }
 #else
-unsigned int sched_timer_cancel(void)
+unsigned int nxsched_cancel_timer(void)
 {
   struct timespec ts;
   unsigned int ticks;
@@ -674,7 +674,7 @@ unsigned int sched_timer_cancel(void)
 #endif
 
 /****************************************************************************
- * Name:  sched_timer_resume
+ * Name:  nxsched_resume_timer
  *
  * Description:
  *   Re-assess the next deadline and restart the interval timer.  This is
@@ -688,13 +688,13 @@ unsigned int sched_timer_cancel(void)
  *   None.
  *
  * Assumptions:
- *   This function is called right after sched_timer_cancel().  If
+ *   This function is called right after nxsched_cancel_timer().  If
  *   CONFIG_SCHED_TICKLESS_ALARM=y, then g_stop_time must be the value time
  *   when the timer was cancelled.
  *
  ****************************************************************************/
 
-void sched_timer_resume(void)
+void nxsched_resume_timer(void)
 {
   unsigned int nexttime;
 
@@ -713,7 +713,7 @@ void sched_timer_resume(void)
 }
 
 /****************************************************************************
- * Name:  sched_timer_reassess
+ * Name:  nxsched_reassess_timer
  *
  * Description:
  *   It is necessary to re-assess the timer interval in several
@@ -725,7 +725,7 @@ void sched_timer_resume(void)
  *   - When pre-emption is re-enabled.  A previous time slice may have
  *     expired while pre-emption was enabled and now needs to be executed.
  *
- *   In the original design, it was also planned that sched_timer_reassess()
+ *   In the original design, it was also planned that nxsched_reassess_timer()
  *   be called whenever there was a change at the head of the ready-to-run
  *   list.  That call was intended to establish a new time-slice for the
  *   newly activated task or to stop the timer if time-slicing is no longer
@@ -746,13 +746,13 @@ void sched_timer_resume(void)
  *
  ****************************************************************************/
 
-void sched_timer_reassess(void)
+void nxsched_reassess_timer(void)
 {
   unsigned int nexttime;
 
   /* Cancel and restart the timer */
 
-  nexttime = sched_timer_cancel();
+  nexttime = nxsched_cancel_timer();
   nxsched_timer_start(nexttime);
 }
 
diff --git a/sched/sched/sched_unlock.c b/sched/sched/sched_unlock.c
index c454f5e..ea32ff8 100644
--- a/sched/sched/sched_unlock.c
+++ b/sched/sched/sched_unlock.c
@@ -106,7 +106,7 @@ int sched_unlock(void)
           /* Note that we no longer have pre-emption disabled. */
 
 #ifdef CONFIG_SCHED_CRITMONITOR
-          sched_critmon_preemption(rtcb, false);
+          nxsched_critmon_preemption(rtcb, false);
 #endif
 #ifdef CONFIG_SCHED_INSTRUMENTATION_PREEMPTION
           sched_note_premption(rtcb, false);
@@ -142,7 +142,7 @@ int sched_unlock(void)
            * There are certain conditions that we must avoid by preventing
            * releasing the pending tasks while within the critical section
            * of other CPUs.  This logic does that and there is matching
-           * logic in sched_addreadytorun to avoid starting new tasks within
+           * logic in nxsched_add_readytorun to avoid starting new tasks within
            * the critical section (unless the CPU is the holder of the lock).
            *
            * REVISIT: If this CPU is only one that holds the IRQ lock, then
@@ -151,7 +151,7 @@ int sched_unlock(void)
            * BEFORE it clears IRQ lock.
            */
 
-          if (!sched_islocked_global() && !irq_cpu_locked(cpu) &&
+          if (!nxsched_islocked_global() && !irq_cpu_locked(cpu) &&
               g_pendingtasks.head != NULL)
             {
               up_release_pending();
@@ -182,7 +182,7 @@ int sched_unlock(void)
 #ifdef CONFIG_SCHED_TICKLESS
               else
                 {
-                  sched_timer_reassess();
+                  nxsched_reassess_timer();
                 }
 #endif
             }
@@ -206,7 +206,7 @@ int sched_unlock(void)
                * now
                */
 
-              sched_sporadic_lowpriority(rtcb);
+              nxsched_sporadic_lowpriority(rtcb);
 
 #ifdef CONFIG_SCHED_TICKLESS
               /* Make sure that the call to up_release_pending() did not
@@ -215,7 +215,7 @@ int sched_unlock(void)
 
               if (rtcb == current_task(cpu))
                 {
-                  sched_timer_reassess();
+                  nxsched_reassess_timer();
                 }
 #endif
             }
@@ -261,7 +261,7 @@ int sched_unlock(void)
           /* Note that we no longer have pre-emption disabled. */
 
 #ifdef CONFIG_SCHED_CRITMONITOR
-          sched_critmon_preemption(rtcb, false);
+          nxsched_critmon_preemption(rtcb, false);
 #endif
 #ifdef CONFIG_SCHED_INSTRUMENTATION_PREEMPTION
           sched_note_premption(rtcb, false);
@@ -313,7 +313,7 @@ int sched_unlock(void)
 #ifdef CONFIG_SCHED_TICKLESS
               else
                 {
-                  sched_timer_reassess();
+                  nxsched_reassess_timer();
                 }
 #endif
             }
@@ -337,7 +337,7 @@ int sched_unlock(void)
                * now
                */
 
-              sched_sporadic_lowpriority(rtcb);
+              nxsched_sporadic_lowpriority(rtcb);
 
 #ifdef CONFIG_SCHED_TICKLESS
               /* Make sure that the call to up_release_pending() did not
@@ -346,7 +346,7 @@ int sched_unlock(void)
 
               if (rtcb == this_task())
                 {
-                  sched_timer_reassess();
+                  nxsched_reassess_timer();
                 }
 #endif
             }
diff --git a/sched/sched/sched_verifytcb.c b/sched/sched/sched_verifytcb.c
index 887672b..3344acf 100644
--- a/sched/sched/sched_verifytcb.c
+++ b/sched/sched/sched_verifytcb.c
@@ -49,7 +49,7 @@
  ****************************************************************************/
 
 /****************************************************************************
- * Name: sched_verifytcb
+ * Name: nxsched_verify_tcb
  *
  * Description:
  *   Return true if the tcb refers to an active task; false if it is a stale
@@ -57,7 +57,7 @@
  *
  ****************************************************************************/
 
-bool sched_verifytcb(FAR struct tcb_s *tcb)
+bool nxsched_verify_tcb(FAR struct tcb_s *tcb)
 {
   /* Return true if the PID hashes to this TCB.  This will catch the case
    * where the task associated with the TCB has terminated (note that
diff --git a/sched/sched/sched_yield.c b/sched/sched/sched_yield.c
index 2387433..76c78fc 100644
--- a/sched/sched/sched_yield.c
+++ b/sched/sched/sched_yield.c
@@ -74,6 +74,6 @@ int sched_yield(void)
    * at the same priority.
    */
 
-  ret = nxsched_setpriority(rtcb, rtcb->sched_priority);
+  ret = nxsched_set_priority(rtcb, rtcb->sched_priority);
   return ret < 0 ? ERROR : OK;
 }
diff --git a/sched/semaphore/sem_holder.c b/sched/semaphore/sem_holder.c
index c91ab6a..c72ccd6 100644
--- a/sched/semaphore/sem_holder.c
+++ b/sched/semaphore/sem_holder.c
@@ -326,7 +326,7 @@ static int nxsem_boostholderprio(FAR struct semholder_s *pholder,
    * Perhaps its plan is to kill a thread, then destroy the semaphore.
    */
 
-  if (!sched_verifytcb(htcb))
+  if (!nxsched_verify_tcb(htcb))
     {
       swarn("WARNING: TCB 0x%08x is a stale handle, counts lost\n", htcb);
       nxsem_freeholder(sem, pholder);
@@ -377,7 +377,7 @@ static int nxsem_boostholderprio(FAR struct semholder_s *pholder,
            * switch may occur during up_block_task() processing.
            */
 
-          nxsched_setpriority(htcb, rtcb->sched_priority);
+          nxsched_set_priority(htcb, rtcb->sched_priority);
         }
       else
         {
@@ -415,7 +415,7 @@ static int nxsem_boostholderprio(FAR struct semholder_s *pholder,
        * will occur during up_block_task() processing.
        */
 
-      nxsched_setpriority(htcb, rtcb->sched_priority);
+      nxsched_set_priority(htcb, rtcb->sched_priority);
     }
 #endif
 
@@ -490,7 +490,7 @@ static int nxsem_restoreholderprio(FAR struct tcb_s *htcb,
    * Perhaps its plan is to kill a thread, then destroy the semaphore.
    */
 
-  if (!sched_verifytcb(htcb))
+  if (!nxsched_verify_tcb(htcb))
     {
       swarn("WARNING: TCB 0x%08x is a stale handle, counts lost\n", htcb);
 
@@ -570,7 +570,7 @@ static int nxsem_restoreholderprio(FAR struct tcb_s *htcb,
            * base_priority)
            */
 
-          nxsched_setpriority(htcb, rpriority);
+          nxsched_set_priority(htcb, rpriority);
         }
       else
         {
diff --git a/sched/signal/sig_default.c b/sched/signal/sig_default.c
index b30a7da..264509f 100644
--- a/sched/signal/sig_default.c
+++ b/sched/signal/sig_default.c
@@ -337,7 +337,7 @@ static void nxsig_stop_task(int signo)
 #endif
 
   /* Lock the scheduler so this thread is not pre-empted until after we
-   * call sched_suspend().
+   * call nxsched_suspend().
    */
 
   sched_lock();
@@ -377,7 +377,7 @@ static void nxsig_stop_task(int signo)
 
   /* Then, finally, suspend this the final thread of the task group */
 
-  sched_suspend(rtcb);
+  nxsched_suspend(rtcb);
   sched_unlock();
 }
 #endif
diff --git a/sched/signal/sig_dispatch.c b/sched/signal/sig_dispatch.c
index 44c8960..782d5b1 100644
--- a/sched/signal/sig_dispatch.c
+++ b/sched/signal/sig_dispatch.c
@@ -461,7 +461,7 @@ int nxsig_tcbdispatch(FAR struct tcb_s *stcb, siginfo_t *info)
 #ifdef HAVE_GROUP_MEMBERS
           group_continue(stcb);
 #else
-          sched_continue(stcb);
+          nxsched_continue(stcb);
 #endif
         }
 #endif
diff --git a/sched/task/task_exit.c b/sched/task/task_exit.c
index 6558dce..c731786 100644
--- a/sched/task/task_exit.c
+++ b/sched/task/task_exit.c
@@ -104,11 +104,11 @@ int nxtask_exit(void)
    * context switch will definitely be necessary -- that must be done
    * by the architecture-specific logic.
    *
-   * sched_removereadytorun will mark the task at the head of the
+   * nxsched_remove_readytorun will mark the task at the head of the
    * ready-to-run with state == TSTATE_TASK_RUNNING
    */
 
-  sched_removereadytorun(dtcb);
+  nxsched_remove_readytorun(dtcb);
 
   /* Get the new task at the head of the ready to run list */
 
@@ -119,7 +119,7 @@ int nxtask_exit(void)
 #endif
 
 #ifdef CONFIG_SMP
-  /* Because clearing the global IRQ control in sched_removereadytorun()
+  /* Because clearing the global IRQ control in nxsched_remove_readytorun()
    * was moved to nxsched_resume_scheduler(). So call the API here.
    */
 
@@ -153,7 +153,7 @@ int nxtask_exit(void)
    * behavior.
    */
 
-  sched_addblocked(dtcb, TSTATE_TASK_INACTIVE);
+  nxsched_add_blocked(dtcb, TSTATE_TASK_INACTIVE);
   ret = nxtask_terminate(dtcb->pid, true);
   rtcb->task_state = TSTATE_TASK_RUNNING;
 
@@ -177,7 +177,7 @@ int nxtask_exit(void)
 
   if (g_pendingtasks.head != NULL)
     {
-      sched_mergepending();
+      nxsched_merge_pending();
     }
 
   return ret;
diff --git a/sched/task/task_recover.c b/sched/task/task_recover.c
index 2a0efbf..483d2c8 100644
--- a/sched/task/task_recover.c
+++ b/sched/task/task_recover.c
@@ -97,7 +97,7 @@ void nxtask_recover(FAR struct tcb_s *tcb)
     {
       /* Stop current sporadic scheduling */
 
-      DEBUGVERIFY(sched_sporadic_stop(tcb));
+      DEBUGVERIFY(nxsched_stop_sporadic(tcb));
     }
 #endif
 }
diff --git a/sched/task/task_restart.c b/sched/task/task_restart.c
index b4a6929..247fe3a 100644
--- a/sched/task/task_restart.c
+++ b/sched/task/task_restart.c
@@ -117,7 +117,7 @@ int task_restart(pid_t pid)
    * that CPU, the restart take effect.
    */
 
-  cpu = sched_cpu_pause(&tcb->cmn);
+  cpu = nxsched_pause_cpu(&tcb->cmn);
 #endif /* CONFIG_SMP */
 
   /* Try to recover from any bad states */
diff --git a/sched/task/task_terminate.c b/sched/task/task_terminate.c
index ca1e1fc..127ca5b 100644
--- a/sched/task/task_terminate.c
+++ b/sched/task/task_terminate.c
@@ -142,7 +142,7 @@ int nxtask_terminate(pid_t pid, bool nonblocking)
    * the case, then we will pause the CPU that the thread is running on.
    */
 
-  cpu = sched_cpu_pause(dtcb);
+  cpu = nxsched_pause_cpu(dtcb);
 
   /* Get the task list associated with the thread's state and CPU */
 
diff --git a/sched/wdog/wd_cancel.c b/sched/wdog/wd_cancel.c
index b387386..f7354bf 100644
--- a/sched/wdog/wd_cancel.c
+++ b/sched/wdog/wd_cancel.c
@@ -121,7 +121,7 @@ int wd_cancel(WDOG_ID wdog)
            * interval event.
            */
 
-          sched_timer_reassess();
+          nxsched_reassess_timer();
         }
 
       /* Mark the watchdog inactive */
diff --git a/sched/wdog/wd_start.c b/sched/wdog/wd_start.c
index cf9cb27..19d2a2d 100644
--- a/sched/wdog/wd_start.c
+++ b/sched/wdog/wd_start.c
@@ -236,7 +236,7 @@ int wd_start(WDOG_ID wdog, int32_t delay, wdentry_t wdentry,  int argc, ...)
    * could even remove it).
    */
 
-  sched_timer_cancel();
+  nxsched_cancel_timer();
 #endif
 
   /* Do the easy case first -- when the watchdog timer queue is empty. */
@@ -340,7 +340,7 @@ int wd_start(WDOG_ID wdog, int32_t delay, wdentry_t wdentry,  int argc, ...)
    * new delay.
    */
 
-  sched_timer_resume();
+  nxsched_resume_timer();
 #endif
 
   leave_critical_section(flags);
diff --git a/sched/wqueue/kwork_inherit.c b/sched/wqueue/kwork_inherit.c
index 75d95a4..7957409 100644
--- a/sched/wqueue/kwork_inherit.c
+++ b/sched/wqueue/kwork_inherit.c
@@ -109,7 +109,7 @@ static void lpwork_boostworker(pid_t wpid, uint8_t reqprio)
            * sched_unblock() processing.
            */
 
-          nxsched_setpriority(wtcb, reqprio);
+          nxsched_set_priority(wtcb, reqprio);
         }
       else
         {
@@ -146,7 +146,7 @@ static void lpwork_boostworker(pid_t wpid, uint8_t reqprio)
        * sched_unlock() processing.
        */
 
-      nxsched_setpriority(wtcb, reqprio);
+      nxsched_set_priority(wtcb, reqprio);
     }
 #endif
 }
@@ -250,7 +250,7 @@ static void lpwork_restoreworker(pid_t wpid, uint8_t reqprio)
            * base_priority)
            */
 
-          nxsched_setpriority(wtcb, wpriority);
+          nxsched_set_priority(wtcb, wpriority);
         }
       else
         {


Mime
View raw message