nuttx-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From gn...@apache.org
Subject [incubator-nuttx] branch master updated: Porting arch/armv8-m support
Date Sun, 26 Apr 2020 13:43:45 GMT
This is an automated email from the ASF dual-hosted git repository.

gnutt pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-nuttx.git


The following commit(s) were added to refs/heads/master by this push:
     new 2376d8a  Porting arch/armv8-m support
2376d8a is described below

commit 2376d8a2661b2cefecf64649b286ba3f42c2d7f8
Author: qiaowei <qiaowei@xiaomi.com>
AuthorDate: Wed Apr 22 10:09:50 2020 +0800

    Porting arch/armv8-m support
    
    1. Add dsp extension; float point based on hardware and software.
    2. Delete folder "iar"
    3. Add tool chain for cortex-M23 and cortex-M35p
    
    Signed-off-by: qiaowei <qiaowei@xiaomi.com>
    Change-Id: I5bfc78abb025adb0ad4fae37e2b444915f477fe7
---
 arch/arm/Kconfig                                   |  59 +-
 arch/arm/include/armv8-m/irq.h                     | 401 +++++++++
 arch/arm/include/armv8-m/irq_cmnvector.h           | 152 ++++
 arch/arm/include/armv8-m/irq_lazyfpu.h             | 170 ++++
 arch/arm/include/armv8-m/nvicpri.h                 |  81 ++
 arch/arm/include/armv8-m/spinlock.h                |  24 +
 arch/arm/include/armv8-m/syscall.h                 | 248 ++++++
 arch/arm/include/irq.h                             |   2 +
 arch/arm/include/setjmp.h                          |   2 +-
 arch/arm/include/syscall.h                         |   2 +
 arch/arm/include/types.h                           |   2 +-
 arch/arm/src/Makefile                              |   2 +
 arch/arm/src/armv8-m/Kconfig                       | 243 ++++++
 arch/arm/src/armv8-m/Toolchain.defs                | 274 ++++++
 arch/arm/src/armv8-m/barriers.h                    |  42 +
 arch/arm/src/armv8-m/dwt.h                         | 191 +++++
 arch/arm/src/armv8-m/etm.h                         | 916 +++++++++++++++++++++
 arch/arm/src/armv8-m/exc_return.h                  | 104 +++
 arch/arm/src/armv8-m/fpb.h                         | 167 ++++
 arch/arm/src/armv8-m/itm.h                         | 184 +++++
 .../{include/irq.h => src/armv8-m/itm_syslog.h}    |  58 +-
 arch/arm/src/armv8-m/mpu.h                         | 446 ++++++++++
 arch/arm/src/armv8-m/nvic.h                        | 696 ++++++++++++++++
 arch/arm/src/armv8-m/psr.h                         |  72 ++
 arch/arm/src/armv8-m/ram_vectors.h                 | 103 +++
 arch/arm/src/armv8-m/svcall.h                      | 131 +++
 arch/arm/src/armv8-m/systick.h                     |  76 ++
 arch/arm/src/armv8-m/tpi.h                         | 200 +++++
 arch/arm/src/armv8-m/up_assert.c                   | 457 ++++++++++
 arch/arm/src/armv8-m/up_blocktask.c                | 147 ++++
 arch/arm/src/armv8-m/up_cache.c                    | 823 ++++++++++++++++++
 arch/arm/src/armv8-m/up_copyarmstate.c             |  90 ++
 arch/arm/src/armv8-m/up_copyfullstate.c            |  62 ++
 arch/arm/src/armv8-m/up_doirq.c                    |  90 ++
 arch/arm/src/armv8-m/up_exception.S                | 331 ++++++++
 arch/arm/src/armv8-m/up_fetchadd.S                 | 242 ++++++
 arch/arm/src/armv8-m/up_fpu.S                      | 270 ++++++
 arch/arm/src/armv8-m/up_fullcontextrestore.S       |  79 ++
 arch/arm/src/armv8-m/up_hardfault.c                | 136 +++
 arch/arm/src/armv8-m/up_initialstate.c             | 144 ++++
 arch/arm/src/armv8-m/up_itm.c                      | 156 ++++
 arch/arm/src/armv8-m/up_itm_syslog.c               | 192 +++++
 arch/arm/src/armv8-m/up_lazyexception.S            | 350 ++++++++
 arch/arm/src/armv8-m/up_memfault.c                 |  77 ++
 arch/arm/src/armv8-m/up_mpu.c                      | 388 +++++++++
 arch/arm/src/armv8-m/up_ramvec_attach.c            |  95 +++
 arch/arm/src/armv8-m/up_ramvec_initialize.c        | 154 ++++
 arch/arm/src/armv8-m/up_releasepending.c           | 120 +++
 arch/arm/src/armv8-m/up_reprioritizertr.c          | 174 ++++
 arch/arm/src/armv8-m/up_saveusercontext.S          |  88 ++
 arch/arm/src/armv8-m/up_schedulesigaction.c        | 435 ++++++++++
 arch/arm/src/armv8-m/up_setjmp.S                   | 162 ++++
 arch/arm/src/armv8-m/up_sigdeliver.c               | 196 +++++
 arch/arm/src/armv8-m/up_signal_dispatch.c          |  76 ++
 arch/arm/src/armv8-m/up_signal_handler.S           | 103 +++
 arch/arm/src/armv8-m/up_stackcheck.c               | 114 +++
 arch/arm/src/armv8-m/up_svcall.c                   | 500 +++++++++++
 arch/arm/src/armv8-m/up_switchcontext.S            |  81 ++
 arch/arm/src/armv8-m/up_systemreset.c              |  66 ++
 arch/arm/src/armv8-m/up_systick.c                  | 310 +++++++
 arch/arm/src/armv8-m/up_testset.S                  | 108 +++
 arch/arm/src/armv8-m/up_trigger_irq.c              |  83 ++
 arch/arm/src/armv8-m/up_unblocktask.c              | 131 +++
 .../syscall.h => src/armv8-m/up_vectors.c}         |  92 +--
 arch/arm/src/armv8-m/vfork.S                       | 126 +++
 arch/arm/src/common/up_internal.h                  |   6 +-
 66 files changed, 12209 insertions(+), 93 deletions(-)

diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 671a6c0..23e90be 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -517,10 +517,6 @@ config ARCH_CORTEXM0
 	select ARCH_HAVE_RESET
 	select ARCH_HAVE_HARDFAULT_DEBUG
 
-config ARCH_CORTEXM23
-	bool
-	default n
-
 config ARCH_ARMV7M
 	bool
 	default n
@@ -540,10 +536,6 @@ config ARCH_CORTEXM3
 	select ARCH_HAVE_HARDFAULT_DEBUG
 	select ARCH_HAVE_MEMFAULT_DEBUG
 
-config ARCH_CORTEXM33
-	bool
-	default n
-
 config ARCH_CORTEXM4
 	bool
 	default n
@@ -648,6 +640,53 @@ config ARCH_CORTEXR7
 	select ARCH_HAVE_MPU
 	select ARCH_HAVE_TESTSET
 
+config ARCH_ARMV8M
+	bool
+	default n
+	select ARCH_HAVE_SETJMP
+
+config ARCH_CORTEXM23
+	bool
+	default n
+	select ARCH_ARMV8M
+	select ARCH_HAVE_IRQPRIO
+	select ARCH_HAVE_IRQTRIGGER
+	select ARCH_HAVE_RAMVECTORS
+	select ARCH_HAVE_LAZYFPU
+	select ARCH_HAVE_HIPRI_INTERRUPT
+	select ARCH_HAVE_RESET
+	select ARCH_HAVE_TESTSET
+	select ARCH_HAVE_HARDFAULT_DEBUG
+	select ARCH_HAVE_MEMFAULT_DEBUG
+
+config ARCH_CORTEXM33
+	bool
+	default n
+	select ARCH_ARMV8M
+	select ARCH_HAVE_IRQPRIO
+	select ARCH_HAVE_IRQTRIGGER
+	select ARCH_HAVE_RAMVECTORS
+	select ARCH_HAVE_LAZYFPU
+	select ARCH_HAVE_HIPRI_INTERRUPT
+	select ARCH_HAVE_RESET
+	select ARCH_HAVE_TESTSET
+	select ARCH_HAVE_HARDFAULT_DEBUG
+	select ARCH_HAVE_MEMFAULT_DEBUG
+
+config ARCH_CORTEXM35P
+	bool
+	default n
+	select ARCH_ARMV8M
+	select ARCH_HAVE_IRQPRIO
+	select ARCH_HAVE_IRQTRIGGER
+	select ARCH_HAVE_RAMVECTORS
+	select ARCH_HAVE_LAZYFPU
+	select ARCH_HAVE_HIPRI_INTERRUPT
+	select ARCH_HAVE_RESET
+	select ARCH_HAVE_TESTSET
+	select ARCH_HAVE_HARDFAULT_DEBUG
+	select ARCH_HAVE_MEMFAULT_DEBUG
+
 config ARCH_FAMILY
 	string
 	default "arm"		if ARCH_ARM7TDMI || ARCH_ARM926EJS || ARCH_ARM920T
@@ -655,6 +694,7 @@ config ARCH_FAMILY
 	default "armv7-a"	if ARCH_ARMV7A
 	default "armv7-m"	if ARCH_ARMV7M
 	default "armv7-r"	if ARCH_ARMV7R
+	default "armv8-m"	if ARCH_ARMV8M
 
 config ARCH_CHIP
 	string
@@ -845,6 +885,9 @@ endif
 if ARCH_ARMV7R
 source arch/arm/src/armv7-r/Kconfig
 endif
+if ARCH_ARMV8M
+source arch/arm/src/armv8-m/Kconfig
+endif
 if ARCH_ARM7TDMI || ARCH_ARM920T || ARCH_ARM926EJS || ARCH_ARM1136J || ARCH_ARM1156T2 || ARCH_ARM1176JZ
 source arch/arm/src/arm/Kconfig
 endif
diff --git a/arch/arm/include/armv8-m/irq.h b/arch/arm/include/armv8-m/irq.h
new file mode 100755
index 0000000..69051ef
--- /dev/null
+++ b/arch/arm/include/armv8-m/irq.h
@@ -0,0 +1,401 @@
+/****************************************************************************
+ * arch/arm/include/armv8-m/irq.h
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.  The
+ * ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ *
+ ****************************************************************************/
+
+/* This file should never be included directly but, rather, only indirectly
+ * through nuttx/irq.h
+ */
+
+#ifndef __ARCH_ARM_INCLUDE_ARMV8_M_IRQ_H
+#define __ARCH_ARM_INCLUDE_ARMV8_M_IRQ_H
+
+/****************************************************************************
+ * Included Files
+ ****************************************************************************/
+
+#include <nuttx/config.h>
+
+#include <nuttx/irq.h>
+#ifndef __ASSEMBLY__
+#  include <nuttx/compiler.h>
+#  include <arch/armv8-m/nvicpri.h>
+#  include <stdint.h>
+#endif
+
+/* Included implementation-dependent register save structure layouts */
+
+#ifndef CONFIG_ARMV8M_LAZYFPU
+#  include <arch/armv8-m/irq_cmnvector.h>
+#else
+#  include <arch/armv8-m/irq_lazyfpu.h>
+#endif
+
+/****************************************************************************
+ * Pre-processor Definitions
+ ****************************************************************************/
+/* Configuration ************************************************************/
+
+/* If this is a kernel build, how many nested system calls should we
+ * support?
+ */
+
+#ifndef CONFIG_SYS_NNEST
+#  define CONFIG_SYS_NNEST 2
+#endif
+
+/* Alternate register names *************************************************/
+
+#define REG_A1              REG_R0
+#define REG_A2              REG_R1
+#define REG_A3              REG_R2
+#define REG_A4              REG_R3
+#define REG_V1              REG_R4
+#define REG_V2              REG_R5
+#define REG_V3              REG_R6
+#define REG_V4              REG_R7
+#define REG_V5              REG_R8
+#define REG_V6              REG_R9
+#define REG_V7              REG_R10
+#define REG_SB              REG_R9
+#define REG_SL              REG_R10
+#define REG_FP              REG_R11
+#define REG_IP              REG_R12
+#define REG_SP              REG_R13
+#define REG_LR              REG_R14
+#define REG_PC              REG_R15
+
+/* The PIC register is usually R10. It can be R9 is stack checking is enabled
+ * or if the user changes it with -mpic-register on the GCC command line.
+ */
+
+#define REG_PIC             REG_R10
+
+/****************************************************************************
+ * Public Types
+ ****************************************************************************/
+#ifndef __ASSEMBLY__
+
+/* This structure represents the return state from a system call */
+
+#ifdef CONFIG_LIB_SYSCALL
+struct xcpt_syscall_s
+{
+  uint32_t excreturn;   /* The EXC_RETURN value */
+  uint32_t sysreturn;   /* The return PC */
+};
+#endif
+
+/* The following structure is included in the TCB and defines the complete
+ * state of the thread.
+ */
+
+struct xcptcontext
+{
+  /* The following function pointer is non-zero if there
+   * are pending signals to be processed.
+   */
+
+  FAR void *sigdeliver; /* Actual type is sig_deliver_t */
+
+  /* These are saved copies of LR, PRIMASK, and xPSR used during
+   * signal processing.
+   *
+   * REVISIT:  Because there is only one copy of these save areas,
+   * only a single signal handler can be active.  This precludes
+   * queuing of signal actions.  As a result, signals received while
+   * another signal handler is executing will be ignored!
+   */
+
+  uint32_t saved_pc;
+#ifdef CONFIG_ARMV8M_USEBASEPRI
+  uint32_t saved_basepri;
+#else
+  uint32_t saved_primask;
+#endif
+  uint32_t saved_xpsr;
+#ifdef CONFIG_BUILD_PROTECTED
+  uint32_t saved_lr;
+
+  /* This is the saved address to use when returning from a user-space
+   * signal handler.
+   */
+
+  uint32_t sigreturn;
+
+#endif
+
+#ifdef CONFIG_LIB_SYSCALL
+  /* The following array holds the return address and the exc_return value
+   * needed to return from each nested system call.
+   */
+
+  uint8_t nsyscalls;
+  struct xcpt_syscall_s syscall[CONFIG_SYS_NNEST];
+
+#endif
+
+  /* Register save area */
+
+  uint32_t regs[XCPTCONTEXT_REGS];
+};
+#endif
+
+/****************************************************************************
+ * Inline functions
+ ****************************************************************************/
+
+#ifndef __ASSEMBLY__
+
+/* Name: up_irq_save, up_irq_restore, and friends.
+ *
+ * NOTE: This function should never be called from application code and,
+ * as a general rule unless you really know what you are doing, this
+ * function should not be called directly from operation system code either:
+ * Typically, the wrapper functions, enter_critical_section() and
+ * leave_critical section(), are probably what you really want.
+ */
+
+/* Get/set the PRIMASK register */
+
+static inline uint8_t getprimask(void) inline_function;
+static inline uint8_t getprimask(void)
+{
+  uint32_t primask;
+  __asm__ __volatile__
+    (
+     "\tmrs  %0, primask\n"
+     : "=r" (primask)
+     :
+     : "memory");
+
+  return (uint8_t)primask;
+}
+
+static inline void setprimask(uint32_t primask) inline_function;
+static inline void setprimask(uint32_t primask)
+{
+  __asm__ __volatile__
+    (
+      "\tmsr primask, %0\n"
+      :
+      : "r" (primask)
+      : "memory");
+}
+
+static inline void cpsie(void) inline_function;
+static inline void cpsie(void)
+{
+  __asm__ __volatile__ ("\tcpsie  i\n");
+}
+
+static inline void cpsid(void) inline_function;
+static inline void cpsid(void)
+{
+  __asm__ __volatile__ ("\tcpsid  i\n");
+}
+
+/* Get/set the BASEPRI register.  The BASEPRI register defines the minimum
+ * priority for exception processing. When BASEPRI is set to a nonzero
+ * value, it prevents the activation of all exceptions with the same or
+ * lower priority level as the BASEPRI value.
+ */
+
+static inline uint8_t getbasepri(void) inline_function;
+static inline uint8_t getbasepri(void)
+{
+  uint32_t basepri;
+
+  __asm__ __volatile__
+    (
+     "\tmrs  %0, basepri\n"
+     : "=r" (basepri)
+     :
+     : "memory");
+
+  return (uint8_t)basepri;
+}
+
+static inline void setbasepri(uint32_t basepri) inline_function;
+static inline void setbasepri(uint32_t basepri)
+{
+  __asm__ __volatile__
+    (
+      "\tmsr basepri, %0\n"
+      :
+      : "r" (basepri)
+      : "memory");
+}
+
+
+#  define raisebasepri(b) setbasepri(b);
+
+/* Disable IRQs */
+
+static inline void up_irq_disable(void) inline_function;
+static inline void up_irq_disable(void)
+{
+#ifdef CONFIG_ARMV8M_USEBASEPRI
+  /* Probably raising priority */
+
+  raisebasepri(NVIC_SYSH_DISABLE_PRIORITY);
+#else
+  __asm__ __volatile__ ("\tcpsid  i\n");
+#endif
+}
+
+/* Save the current primask state & disable IRQs */
+
+static inline irqstate_t up_irq_save(void) inline_function;
+static inline irqstate_t up_irq_save(void)
+{
+#ifdef CONFIG_ARMV8M_USEBASEPRI
+  /* Probably raising priority */
+
+  uint8_t basepri = getbasepri();
+  raisebasepri(NVIC_SYSH_DISABLE_PRIORITY);
+  return (irqstate_t)basepri;
+
+#else
+
+  unsigned short primask;
+
+  /* Return the current value of primask register and set
+   * bit 0 of the primask register to disable interrupts
+   */
+
+  __asm__ __volatile__
+    (
+     "\tmrs    %0, primask\n"
+     "\tcpsid  i\n"
+     : "=r" (primask)
+     :
+     : "memory");
+
+  return primask;
+#endif
+}
+
+/* Enable IRQs */
+
+static inline void up_irq_enable(void) inline_function;
+static inline void up_irq_enable(void)
+{
+  /* In this case, we are always retaining or lowering the priority value */
+
+  setbasepri(NVIC_SYSH_PRIORITY_MIN);
+  __asm__ __volatile__ ("\tcpsie  i\n");
+}
+
+/* Restore saved primask state */
+
+static inline void up_irq_restore(irqstate_t flags) inline_function;
+static inline void up_irq_restore(irqstate_t flags)
+{
+#ifdef CONFIG_ARMV8M_USEBASEPRI
+  /* In this case, we are always retaining or lowering the priority value */
+
+  setbasepri((uint32_t)flags);
+
+#else
+  /* If bit 0 of the primask is 0, then we need to restore
+   * interrupts.
+   */
+
+  __asm__ __volatile__
+    (
+      "\ttst    %0, #1\n"
+      "\tbne.n  1f\n"
+      "\tcpsie  i\n"
+      "1:\n"
+      :
+      : "r" (flags)
+      : "memory");
+
+#endif
+}
+
+/* Get/set IPSR */
+
+static inline uint32_t getipsr(void) inline_function;
+static inline uint32_t getipsr(void)
+{
+  uint32_t ipsr;
+  __asm__ __volatile__
+    (
+     "\tmrs  %0, ipsr\n"
+     : "=r" (ipsr)
+     :
+     : "memory");
+
+  return ipsr;
+}
+
+/* Get/set CONTROL */
+
+static inline uint32_t getcontrol(void) inline_function;
+static inline uint32_t getcontrol(void)
+{
+  uint32_t control;
+  __asm__ __volatile__
+    (
+     "\tmrs  %0, control\n"
+     : "=r" (control)
+     :
+     : "memory");
+
+  return control;
+}
+
+static inline void setcontrol(uint32_t control) inline_function;
+static inline void setcontrol(uint32_t control)
+{
+  __asm__ __volatile__
+    (
+      "\tmsr control, %0\n"
+      :
+      : "r" (control)
+      : "memory");
+}
+
+#endif /* __ASSEMBLY__ */
+
+/****************************************************************************
+ * Public Data
+ ****************************************************************************/
+
+/****************************************************************************
+ * Public Function Prototypes
+ ****************************************************************************/
+
+#ifndef __ASSEMBLY__
+#ifdef __cplusplus
+#define EXTERN extern "C"
+extern "C"
+{
+#else
+#define EXTERN extern
+#endif
+
+#undef EXTERN
+#ifdef __cplusplus
+}
+#endif
+#endif
+
+#endif /* __ARCH_ARM_INCLUDE_ARMV8_M_IRQ_H */
diff --git a/arch/arm/include/armv8-m/irq_cmnvector.h b/arch/arm/include/armv8-m/irq_cmnvector.h
new file mode 100755
index 0000000..24b02b3
--- /dev/null
+++ b/arch/arm/include/armv8-m/irq_cmnvector.h
@@ -0,0 +1,152 @@
+/****************************************************************************
+ * arch/arm/include/armv8-m/irq_cmnvector.h
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.  The
+ * ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ *
+ ****************************************************************************/
+
+#ifndef __ARCH_ARM_INCLUDE_ARMV8_M_IRQ_CMNVECTOR_H
+#define __ARCH_ARM_INCLUDE_ARMV8_M_IRQ_CMNVECTOR_H
+
+/****************************************************************************
+ * Included Files
+ ****************************************************************************/
+
+/****************************************************************************
+ * Pre-processor Definitions
+ ****************************************************************************/
+
+/* IRQ Stack Frame Format: */
+
+/* The following additional registers are stored by the interrupt handling
+ * logic.
+ */
+
+#define REG_R13             (0)  /* R13 = SP at time of interrupt */
+#ifdef CONFIG_ARMV8M_USEBASEPRI
+#  define REG_BASEPRI       (1)  /* BASEPRI */
+#else
+#  define REG_PRIMASK       (1)  /* PRIMASK */
+#endif
+#define REG_R4              (2)  /* R4 */
+#define REG_R5              (3)  /* R5 */
+#define REG_R6              (4)  /* R6 */
+#define REG_R7              (5)  /* R7 */
+#define REG_R8              (6)  /* R8 */
+#define REG_R9              (7)  /* R9 */
+#define REG_R10             (8)  /* R10 */
+#define REG_R11             (9)  /* R11 */
+#define REG_EXC_RETURN      (10) /* EXC_RETURN */
+#define SW_INT_REGS         (11)
+
+#ifdef CONFIG_ARCH_FPU
+
+/* If the MCU supports a floating point unit, then it will be necessary
+ * to save the state of the non-volatile registers before calling code
+ * that may save and overwrite them.
+ */
+
+#  define REG_S16           (SW_INT_REGS + 0)  /* S16 */
+#  define REG_S17           (SW_INT_REGS + 1)  /* S17 */
+#  define REG_S18           (SW_INT_REGS + 2)  /* S18 */
+#  define REG_S19           (SW_INT_REGS + 3)  /* S19 */
+#  define REG_S20           (SW_INT_REGS + 4)  /* S20 */
+#  define REG_S21           (SW_INT_REGS + 5)  /* S21 */
+#  define REG_S22           (SW_INT_REGS + 6)  /* S22 */
+#  define REG_S23           (SW_INT_REGS + 7)  /* S23 */
+#  define REG_S24           (SW_INT_REGS + 8)  /* S24 */
+#  define REG_S25           (SW_INT_REGS + 9)  /* S25 */
+#  define REG_S26           (SW_INT_REGS + 10) /* S26 */
+#  define REG_S27           (SW_INT_REGS + 11) /* S27 */
+#  define REG_S28           (SW_INT_REGS + 12) /* S28 */
+#  define REG_S29           (SW_INT_REGS + 13) /* S29 */
+#  define REG_S30           (SW_INT_REGS + 14) /* S30 */
+#  define REG_S31           (SW_INT_REGS + 15) /* S31 */
+#  define SW_FPU_REGS       (16)
+#else
+#  define SW_FPU_REGS       (0)
+#endif
+
+/* The total number of registers saved by software */
+
+#define SW_XCPT_REGS        (SW_INT_REGS + SW_FPU_REGS)
+#define SW_XCPT_SIZE        (4 * SW_XCPT_REGS)
+
+/* On entry into an IRQ, the hardware automatically saves the following
+ * registers on the stack in this (address) order:
+ */
+
+#define REG_R0              (SW_XCPT_REGS + 0) /* R0 */
+#define REG_R1              (SW_XCPT_REGS + 1) /* R1 */
+#define REG_R2              (SW_XCPT_REGS + 2) /* R2 */
+#define REG_R3              (SW_XCPT_REGS + 3) /* R3 */
+#define REG_R12             (SW_XCPT_REGS + 4) /* R12 */
+#define REG_R14             (SW_XCPT_REGS + 5) /* R14 = LR */
+#define REG_R15             (SW_XCPT_REGS + 6) /* R15 = PC */
+#define REG_XPSR            (SW_XCPT_REGS + 7) /* xPSR */
+#define HW_INT_REGS         (8)
+
+#ifdef CONFIG_ARCH_FPU
+
+/* If the FPU is enabled, the hardware also saves the volatile FP registers.
+ */
+
+#  define REG_S0            (SW_XCPT_REGS + 8)  /* S0 */
+#  define REG_S1            (SW_XCPT_REGS + 9)  /* S1 */
+#  define REG_S2            (SW_XCPT_REGS + 10) /* S2 */
+#  define REG_S3            (SW_XCPT_REGS + 11) /* S3 */
+#  define REG_S4            (SW_XCPT_REGS + 12) /* S4 */
+#  define REG_S5            (SW_XCPT_REGS + 13) /* S5 */
+#  define REG_S6            (SW_XCPT_REGS + 14) /* S6 */
+#  define REG_S7            (SW_XCPT_REGS + 15) /* S7 */
+#  define REG_S8            (SW_XCPT_REGS + 16) /* S8 */
+#  define REG_S9            (SW_XCPT_REGS + 17) /* S9 */
+#  define REG_S10           (SW_XCPT_REGS + 18) /* S10 */
+#  define REG_S11           (SW_XCPT_REGS + 19) /* S11 */
+#  define REG_S12           (SW_XCPT_REGS + 20) /* S12 */
+#  define REG_S13           (SW_XCPT_REGS + 21) /* S13 */
+#  define REG_S14           (SW_XCPT_REGS + 22) /* S14 */
+#  define REG_S15           (SW_XCPT_REGS + 23) /* S15 */
+#  define REG_FPSCR         (SW_XCPT_REGS + 24) /* FPSCR */
+#  define REG_FP_RESERVED   (SW_XCPT_REGS + 25) /* Reserved */
+#  define HW_FPU_REGS       (18)
+#else
+#  define HW_FPU_REGS       (0)
+#endif
+
+#define HW_XCPT_REGS        (HW_INT_REGS + HW_FPU_REGS)
+#define HW_XCPT_SIZE        (4 * HW_XCPT_REGS)
+
+#define XCPTCONTEXT_REGS    (HW_XCPT_REGS + SW_XCPT_REGS)
+#define XCPTCONTEXT_SIZE    (4 * XCPTCONTEXT_REGS)
+
+/****************************************************************************
+ * Public Types
+ ****************************************************************************/
+
+/****************************************************************************
+ * Inline functions
+ ****************************************************************************/
+
+/****************************************************************************
+ * Public Data
+ ****************************************************************************/
+
+/****************************************************************************
+ * Public Function Prototypes
+ ****************************************************************************/
+
+#endif /* __ARCH_ARM_INCLUDE_ARMV8_M_IRQ_CMNVECTOR_H */
diff --git a/arch/arm/include/armv8-m/irq_lazyfpu.h b/arch/arm/include/armv8-m/irq_lazyfpu.h
new file mode 100755
index 0000000..88c5e03
--- /dev/null
+++ b/arch/arm/include/armv8-m/irq_lazyfpu.h
@@ -0,0 +1,170 @@
+/****************************************************************************
+ * arch/arm/include/armv8-m/irq_lazyfpu.h
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.  The
+ * ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ *
+ ****************************************************************************/
+
+#ifndef __ARCH_ARM_INCLUDE_ARMV8_M_IRQ_LAZYFPU_H
+#define __ARCH_ARM_INCLUDE_ARMV8_M_IRQ_LAZYFPU_H
+
+/****************************************************************************
+ * Included Files
+ ****************************************************************************/
+
+/****************************************************************************
+ * Pre-processor Definitions
+ ****************************************************************************/
+
+/* IRQ Stack Frame Format: */
+
+/* The following additional registers are stored by the interrupt handling
+ * logic.
+ */
+
+#define REG_R13             (0)  /* R13 = SP at time of interrupt */
+#ifdef CONFIG_ARMV8M_USEBASEPRI
+#  define REG_BASEPRI       (1)  /* BASEPRI */
+#else
+#  define REG_PRIMASK       (1)  /* PRIMASK */
+#endif
+#define REG_R4              (2)  /* R4 */
+#define REG_R5              (3)  /* R5 */
+#define REG_R6              (4)  /* R6 */
+#define REG_R7              (5)  /* R7 */
+#define REG_R8              (6)  /* R8 */
+#define REG_R9              (7)  /* R9 */
+#define REG_R10             (8)  /* R10 */
+#define REG_R11             (9)  /* R11 */
+
+#ifdef CONFIG_BUILD_PROTECTED
+#  define REG_EXC_RETURN    (10) /* EXC_RETURN */
+#  define SW_INT_REGS       (11)
+#else
+#  define SW_INT_REGS       (10)
+#endif
+
+/* If the MCU supports a floating point unit, then it will be necessary
+ * to save the state of the FPU status register and data registers on
+ * each context switch.  These registers are not saved during interrupt
+ * level processing, however. So, as a consequence, floating point
+ * operations may NOT be performed in interrupt handlers.
+ *
+ * The FPU provides an extension register file containing 32 single-
+ * precision registers. These can be viewed as:
+ *
+ * - Sixteen 64-bit doubleword registers, D0-D15
+ * - Thirty-two 32-bit single-word registers, S0-S31
+ *   S<2n> maps to the least significant half of D<n>
+ *   S<2n+1> maps to the most significant half of D<n>.
+ */
+
+#ifdef CONFIG_ARCH_FPU
+#  define REG_D0            (SW_INT_REGS+0)  /* D0 */
+#  define REG_S0            (SW_INT_REGS+0)  /* S0 */
+#  define REG_S1            (SW_INT_REGS+1)  /* S1 */
+#  define REG_D1            (SW_INT_REGS+2)  /* D1 */
+#  define REG_S2            (SW_INT_REGS+2)  /* S2 */
+#  define REG_S3            (SW_INT_REGS+3)  /* S3 */
+#  define REG_D2            (SW_INT_REGS+4)  /* D2 */
+#  define REG_S4            (SW_INT_REGS+4)  /* S4 */
+#  define REG_S5            (SW_INT_REGS+5)  /* S5 */
+#  define REG_D3            (SW_INT_REGS+6)  /* D3 */
+#  define REG_S6            (SW_INT_REGS+6)  /* S6 */
+#  define REG_S7            (SW_INT_REGS+7)  /* S7 */
+#  define REG_D4            (SW_INT_REGS+8)  /* D4 */
+#  define REG_S8            (SW_INT_REGS+8)  /* S8 */
+#  define REG_S9            (SW_INT_REGS+9)  /* S9 */
+#  define REG_D5            (SW_INT_REGS+10) /* D5 */
+#  define REG_S10           (SW_INT_REGS+10) /* S10 */
+#  define REG_S11           (SW_INT_REGS+11) /* S11 */
+#  define REG_D6            (SW_INT_REGS+12) /* D6 */
+#  define REG_S12           (SW_INT_REGS+12) /* S12 */
+#  define REG_S13           (SW_INT_REGS+13) /* S13 */
+#  define REG_D7            (SW_INT_REGS+14) /* D7 */
+#  define REG_S14           (SW_INT_REGS+14) /* S14 */
+#  define REG_S15           (SW_INT_REGS+15) /* S15 */
+#  define REG_D8            (SW_INT_REGS+16) /* D8 */
+#  define REG_S16           (SW_INT_REGS+16) /* S16 */
+#  define REG_S17           (SW_INT_REGS+17) /* S17 */
+#  define REG_D9            (SW_INT_REGS+18) /* D9 */
+#  define REG_S18           (SW_INT_REGS+18) /* S18 */
+#  define REG_S19           (SW_INT_REGS+19) /* S19 */
+#  define REG_D10           (SW_INT_REGS+20) /* D10 */
+#  define REG_S20           (SW_INT_REGS+20) /* S20 */
+#  define REG_S21           (SW_INT_REGS+21) /* S21 */
+#  define REG_D11           (SW_INT_REGS+22) /* D11 */
+#  define REG_S22           (SW_INT_REGS+22) /* S22 */
+#  define REG_S23           (SW_INT_REGS+23) /* S23 */
+#  define REG_D12           (SW_INT_REGS+24) /* D12 */
+#  define REG_S24           (SW_INT_REGS+24) /* S24 */
+#  define REG_S25           (SW_INT_REGS+25) /* S25 */
+#  define REG_D13           (SW_INT_REGS+26) /* D13 */
+#  define REG_S26           (SW_INT_REGS+26) /* S26 */
+#  define REG_S27           (SW_INT_REGS+27) /* S27 */
+#  define REG_D14           (SW_INT_REGS+28) /* D14 */
+#  define REG_S28           (SW_INT_REGS+28) /* S28 */
+#  define REG_S29           (SW_INT_REGS+29) /* S29 */
+#  define REG_D15           (SW_INT_REGS+30) /* D15 */
+#  define REG_S30           (SW_INT_REGS+30) /* S30 */
+#  define REG_S31           (SW_INT_REGS+31) /* S31 */
+#  define REG_FPSCR         (SW_INT_REGS+32) /* Floating point status and control */
+#  define SW_FPU_REGS       (33)
+#else
+#  define SW_FPU_REGS       (0)
+#endif
+
+/* The total number of registers saved by software */
+
+#define SW_XCPT_REGS        (SW_INT_REGS + SW_FPU_REGS)
+#define SW_XCPT_SIZE        (4 * SW_XCPT_REGS)
+
+/* On entry into an IRQ, the hardware automatically saves the following
+ * registers on the stack in this (address) order:
+ */
+
+#define REG_R0              (SW_XCPT_REGS+0) /* R0 */
+#define REG_R1              (SW_XCPT_REGS+1) /* R1 */
+#define REG_R2              (SW_XCPT_REGS+2) /* R2 */
+#define REG_R3              (SW_XCPT_REGS+3) /* R3 */
+#define REG_R12             (SW_XCPT_REGS+4) /* R12 */
+#define REG_R14             (SW_XCPT_REGS+5) /* R14 = LR */
+#define REG_R15             (SW_XCPT_REGS+6) /* R15 = PC */
+#define REG_XPSR            (SW_XCPT_REGS+7) /* xPSR */
+
+#define HW_XCPT_REGS        (8)
+#define HW_XCPT_SIZE        (4 * HW_XCPT_REGS)
+
+#define XCPTCONTEXT_REGS    (HW_XCPT_REGS + SW_XCPT_REGS)
+#define XCPTCONTEXT_SIZE    (HW_XCPT_SIZE + SW_XCPT_SIZE)
+
+/****************************************************************************
+ * Public Types
+ ****************************************************************************/
+
+/****************************************************************************
+ * Inline functions
+ ****************************************************************************/
+
+/****************************************************************************
+ * Public Data
+ ****************************************************************************/
+
+/****************************************************************************
+ * Public Function Prototypes
+ ****************************************************************************/
+
+#endif /* __ARCH_ARM_INCLUDE_ARMV8_M_IRQ_LAZYFPU_H */
diff --git a/arch/arm/include/armv8-m/nvicpri.h b/arch/arm/include/armv8-m/nvicpri.h
new file mode 100755
index 0000000..56aafb1
--- /dev/null
+++ b/arch/arm/include/armv8-m/nvicpri.h
@@ -0,0 +1,81 @@
+/************************************************************************************
+ * arch/arm/include/armv8-m/nvicpri.h
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.  The
+ * ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ *
+ ************************************************************************************/
+
+#ifndef __ARCH_ARM_INCLUDE_ARM8_M_NVICPRI_H
+#define __ARCH_ARM_INCLUDE_ARM8_M_NVICPRI_H
+
+/************************************************************************************
+ * Included Files
+ ************************************************************************************/
+
+#include <arch/chip/chip.h>
+
+/************************************************************************************
+ * Pre-processor Definitions
+ ************************************************************************************/
+
+/* If CONFIG_ARMV8M_USEBASEPRI is selected, then interrupts will be disabled
+ * by setting the BASEPRI register to NVIC_SYSH_DISABLE_PRIORITY so that most
+ * interrupts will not have execution priority.  SVCall must have execution
+ * priority in all cases.
+ *
+ * In the normal cases, interrupts are not nest-able and all interrupts run
+ * at an execution priority between NVIC_SYSH_PRIORITY_MIN and
+ * NVIC_SYSH_PRIORITY_MAX (with NVIC_SYSH_PRIORITY_MAX reserved for SVCall).
+ *
+ * If, in addition, CONFIG_ARCH_HIPRI_INTERRUPT is defined, then special
+ * high priority interrupts are supported.  These are not "nested" in the
+ * normal sense of the word.  These high priority interrupts can interrupt
+ * normal processing but execute outside of OS (although they can "get back
+ * into the game" via a PendSV interrupt).
+ *
+ * In the normal course of things, interrupts must occasionally be disabled
+ * using the up_irq_save() inline function to prevent contention in use of
+ * resources that may be shared between interrupt level and non-interrupt
+ * level logic.  Now the question arises, if we are using
+ * CONFIG_ARCH_HIPRI_INTERRUPT=y, do we disable all interrupts except
+ * SVCall (we cannot disable SVCall interrupts).  Or do we only disable the
+ * "normal" interrupts?
+ *
+ * If we are using the BASEPRI register to disable interrupts, then the
+ * answer is that we must disable ONLY the "normal interrupts".  That
+ * is because we cannot disable SVCALL interrupts and we cannot permit
+ * SVCAll interrupts running at a higher priority than the high priority
+ * interrupts (otherwise, they will introduce jitter in the high priority
+ * interrupt response time.)
+ *
+ * Hence, if you need to disable the high priority interrupt, you will have
+ * to disable the interrupt either at the peripheral that generates the
+ * interrupt or at the NVIC.  Disabling global interrupts via the BASEPRI
+ * register cannot effect high priority interrupts.
+ */
+
+/* The high priority interrupt must be highest priority.  This prevents
+ * SVCALL handling from adding jitter to high priority interrupt response.
+ * Disabling interrupts will disable all interrupts EXCEPT SVCALL and the
+ * high priority interrupts.
+ */
+
+#define NVIC_SYSH_MAXNORMAL_PRIORITY  NVIC_SYSH_PRIORITY_DEFAULT
+#define NVIC_SYSH_HIGH_PRIORITY       (NVIC_SYSH_PRIORITY_DEFAULT - 2*NVIC_SYSH_PRIORITY_STEP)
+#define NVIC_SYSH_DISABLE_PRIORITY    NVIC_SYSH_PRIORITY_DEFAULT
+#define NVIC_SYSH_SVCALL_PRIORITY     (NVIC_SYSH_PRIORITY_DEFAULT - 1*NVIC_SYSH_PRIORITY_STEP)
+
+#endif /* __ARCH_ARM_INCLUDE_ARM8_M_NVICPRI_H */
diff --git a/arch/arm/include/armv8-m/spinlock.h b/arch/arm/include/armv8-m/spinlock.h
new file mode 100755
index 0000000..4713fc2
--- /dev/null
+++ b/arch/arm/include/armv8-m/spinlock.h
@@ -0,0 +1,24 @@
+/****************************************************************************
+ * arch/arm/include/armv8-a/spinlock.h
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.  The
+ * ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ *
+ ****************************************************************************/
+
+#ifndef __ARCH_ARM_INCLUDE_ARMV8_M_SPINLOCK_H
+#define __ARCH_ARM_INCLUDE_ARMV8_M_SPINLOCK_H
+
+#endif /* __ARCH_ARM_INCLUDE_ARMV8_M_SPINLOCK_H */
diff --git a/arch/arm/include/armv8-m/syscall.h b/arch/arm/include/armv8-m/syscall.h
new file mode 100755
index 0000000..03ad475
--- /dev/null
+++ b/arch/arm/include/armv8-m/syscall.h
@@ -0,0 +1,248 @@
+/****************************************************************************
+ * arch/arm/include/armv8-m/syscall.h
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.  The
+ * ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ *
+ ****************************************************************************/
+
+/* This file should never be included directly but, rather, only indirectly
+ * through include/syscall.h or include/sys/sycall.h
+ */
+
+#ifndef __ARCH_ARM_INCLUDE_ARMV8_M_SYSCALL_H
+#define __ARCH_ARM_INCLUDE_ARMV8_M_SYSCALL_H
+
+/****************************************************************************
+ * Included Files
+ ****************************************************************************/
+
+#include <nuttx/config.h>
+
+#ifndef __ASSEMBLY__
+#  include <stdint.h>
+#endif
+
+/****************************************************************************
+ * Pre-processor Definitions
+ ****************************************************************************/
+
+/* This is the value used as the argument to the SVC instruction.  It is not
+ * used.
+ */
+
+#define SYS_syscall 0x00
+#define SYS_smhcall 0xab
+
+/* The SYS_signal_handler_return is executed here... its value is not always
+ * available in this context and so is assumed to be 7.
+ */
+
+#ifndef SYS_signal_handler_return
+#  define SYS_signal_handler_return (7)
+#elif SYS_signal_handler_return != 7
+#  error "SYS_signal_handler_return was assumed to be 7"
+#endif
+
+/****************************************************************************
+ * Public Types
+ ****************************************************************************/
+
+/****************************************************************************
+ * Inline functions
+ ****************************************************************************/
+
+#ifndef __ASSEMBLY__
+
+/* SVC call with SYS_ call number and no parameters */
+
+static inline uintptr_t sys_call0(unsigned int nbr)
+{
+  register long reg0 __asm__("r0") = (long)(nbr);
+
+  __asm__ __volatile__
+  (
+    "svc %1"
+    : "=r"(reg0)
+    : "i"(SYS_syscall), "r"(reg0)
+    : "memory"
+  );
+
+  return reg0;
+}
+
+/* SVC call with SYS_ call number and one parameter */
+
+static inline uintptr_t sys_call1(unsigned int nbr, uintptr_t parm1)
+{
+  register long reg0 __asm__("r0") = (long)(nbr);
+  register long reg1 __asm__("r1") = (long)(parm1);
+
+  __asm__ __volatile__
+  (
+    "svc %1"
+    : "=r"(reg0)
+    : "i"(SYS_syscall), "r"(reg0), "r"(reg1)
+    : "memory"
+  );
+
+  return reg0;
+}
+
+/* SVC call with SYS_ call number and two parameters */
+
+static inline uintptr_t sys_call2(unsigned int nbr, uintptr_t parm1,
+                                  uintptr_t parm2)
+{
+  register long reg0 __asm__("r0") = (long)(nbr);
+  register long reg2 __asm__("r2") = (long)(parm2);
+  register long reg1 __asm__("r1") = (long)(parm1);
+
+  __asm__ __volatile__
+  (
+    "svc %1"
+    : "=r"(reg0)
+    : "i"(SYS_syscall), "r"(reg0), "r"(reg1), "r"(reg2)
+    : "memory"
+  );
+
+  return reg0;
+}
+
+/* SVC call with SYS_ call number and three parameters */
+
+static inline uintptr_t sys_call4(unsigned int nbr, uintptr_t parm1,
+                                  uintptr_t parm2, uintptr_t parm3,
+                                  uintptr_t parm4);
+static inline uintptr_t sys_call3(unsigned int nbr, uintptr_t parm1,
+                                  uintptr_t parm2, uintptr_t parm3)
+{
+  return sys_call4(nbr, parm1, parm2, parm3, 0);
+}
+
+/* SVC call with SYS_ call number and four parameters.
+ *
+ * NOTE the nonstandard parameter passing:  parm4 is in R4
+ */
+
+static inline uintptr_t sys_call4(unsigned int nbr, uintptr_t parm1,
+                                  uintptr_t parm2, uintptr_t parm3,
+                                  uintptr_t parm4)
+{
+  register long reg0 __asm__("r0") = (long)(nbr);
+  register long reg4 __asm__("r4") = (long)(parm4);
+  register long reg3 __asm__("r3") = (long)(parm3);
+  register long reg2 __asm__("r2") = (long)(parm2);
+  register long reg1 __asm__("r1") = (long)(parm1);
+
+  __asm__ __volatile__
+  (
+    "svc %1"
+    : "=r"(reg0)
+    : "i"(SYS_syscall), "r"(reg0), "r"(reg1), "r"(reg2),
+      "r"(reg3), "r"(reg4)
+    : "memory"
+  );
+
+  return reg0;
+}
+
+/* SVC call with SYS_ call number and five parameters.
+ *
+ * NOTE the nonstandard parameter passing:  parm4 and parm5 are in R4 and R5
+ */
+
+static inline uintptr_t sys_call6(unsigned int nbr, uintptr_t parm1,
+                                  uintptr_t parm2, uintptr_t parm3,
+                                  uintptr_t parm4, uintptr_t parm5,
+                                  uintptr_t parm6);
+static inline uintptr_t sys_call5(unsigned int nbr, uintptr_t parm1,
+                                  uintptr_t parm2, uintptr_t parm3,
+                                  uintptr_t parm4, uintptr_t parm5)
+{
+  return sys_call6(nbr, parm1, parm2, parm3, parm4, parm5, 0);
+}
+
+/* SVC call with SYS_ call number and six parameters.
+ *
+ * NOTE the nonstandard parameter passing:  parm4-parm6 are in R4-R6
+ */
+
+static inline uintptr_t sys_call6(unsigned int nbr, uintptr_t parm1,
+                                  uintptr_t parm2, uintptr_t parm3,
+                                  uintptr_t parm4, uintptr_t parm5,
+                                  uintptr_t parm6)
+{
+  register long reg0 __asm__("r0") = (long)(nbr);
+  register long reg6 __asm__("r6") = (long)(parm6);
+  register long reg5 __asm__("r5") = (long)(parm5);
+  register long reg4 __asm__("r4") = (long)(parm4);
+  register long reg3 __asm__("r3") = (long)(parm3);
+  register long reg2 __asm__("r2") = (long)(parm2);
+  register long reg1 __asm__("r1") = (long)(parm1);
+
+  __asm__ __volatile__
+  (
+    "svc %1"
+    : "=r"(reg0)
+    : "i"(SYS_syscall), "r"(reg0), "r"(reg1), "r"(reg2),
+      "r"(reg3), "r"(reg4), "r"(reg5), "r"(reg6)
+    : "memory"
+  );
+
+  return reg0;
+}
+
+/* semihosting(SMH) call with call number and one parameter */
+
+static inline long smh_call(unsigned int nbr, void *parm)
+{
+  register long reg0 __asm__("r0") = (long)(nbr);
+  register long reg1 __asm__("r1") = (long)(parm);
+
+  __asm__ __volatile__
+  (
+  "bkpt %1"
+    : "=r"(reg0)
+    : "i"(SYS_smhcall), "r"(reg0), "r"(reg1)
+    : "memory"
+  );
+
+  return reg0;
+}
+
+/****************************************************************************
+ * Public Data
+ ****************************************************************************/
+
+/****************************************************************************
+ * Public Function Prototypes
+ ****************************************************************************/
+
+#ifdef __cplusplus
+#define EXTERN extern "C"
+extern "C"
+{
+#else
+#define EXTERN extern
+#endif
+
+#undef EXTERN
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __ASSEMBLY__ */
+#endif /* __ARCH_ARM_INCLUDE_ARMV8_M_SYSCALL_H */
diff --git a/arch/arm/include/irq.h b/arch/arm/include/irq.h
index 6c47d52..97d1556 100644
--- a/arch/arm/include/irq.h
+++ b/arch/arm/include/irq.h
@@ -63,6 +63,8 @@
 #  include <arch/armv7-r/irq.h>
 #elif defined(CONFIG_ARCH_ARMV7M)
 #  include <arch/armv7-m/irq.h>
+#elif defined(CONFIG_ARCH_ARMV8M)
+#  include <arch/armv8-m/irq.h>
 #elif defined(CONFIG_ARCH_CORTEXM0)
 #  include <arch/armv6-m/irq.h>
 #else
diff --git a/arch/arm/include/setjmp.h b/arch/arm/include/setjmp.h
index 2b25067..1202b48 100644
--- a/arch/arm/include/setjmp.h
+++ b/arch/arm/include/setjmp.h
@@ -46,7 +46,7 @@
  * Public Types
  ****************************************************************************/
 
-#ifdef CONFIG_ARCH_ARMV7M
+#if defined(CONFIG_ARCH_ARMV7M) || defined(CONFIG_ARCH_ARMV8M)
 struct setjmp_buf_s
 {
   /* Note: core registers r0-r3 are caller-saved */
diff --git a/arch/arm/include/syscall.h b/arch/arm/include/syscall.h
index 324f889..69d7cae 100644
--- a/arch/arm/include/syscall.h
+++ b/arch/arm/include/syscall.h
@@ -52,6 +52,8 @@
 #  include <arch/armv7-r/syscall.h>
 #elif defined(CONFIG_ARCH_ARMV7M)
 #  include <arch/armv7-m/syscall.h>
+#elif defined(CONFIG_ARCH_ARMV8M)
+#  include <arch/armv8-m/syscall.h>
 #elif defined(CONFIG_ARCH_CORTEXM0)
 #  include <arch/armv6-m/syscall.h>
 #else
diff --git a/arch/arm/include/types.h b/arch/arm/include/types.h
index f2e4560..dda8c40 100644
--- a/arch/arm/include/types.h
+++ b/arch/arm/include/types.h
@@ -104,7 +104,7 @@ typedef unsigned int       _size_t;
  */
 
 #ifdef __thumb2__
-#if defined(CONFIG_ARMV7M_USEBASEPRI) || defined(CONFIG_ARCH_ARMV6M)
+#if defined(CONFIG_ARMV7M_USEBASEPRI) || defined(CONFIG_ARCH_ARMV6M) || defined(CONFIG_ARMV8M_USEBASEPRI)
 typedef unsigned char      irqstate_t;
 #else
 typedef unsigned short     irqstate_t;
diff --git a/arch/arm/src/Makefile b/arch/arm/src/Makefile
index 046304b..c1fd722 100644
--- a/arch/arm/src/Makefile
+++ b/arch/arm/src/Makefile
@@ -43,6 +43,8 @@ else ifeq ($(CONFIG_ARCH_ARMV7R),y)     # ARMv7-R
 ARCH_SUBDIR = armv7-r
 else ifeq ($(CONFIG_ARCH_ARMV7M),y)     # ARMv7-M
 ARCH_SUBDIR = armv7-m
+else ifeq ($(CONFIG_ARCH_ARMV8M),y)     # ARMv8-M
+ARCH_SUBDIR = armv8-m
 else ifeq ($(CONFIG_ARCH_CORTEXM0),y)   # Cortex-M0 is ARMv6-M
 ARCH_SUBDIR = armv6-m
 else                                    # ARM9, ARM7TDMI, etc.
diff --git a/arch/arm/src/armv8-m/Kconfig b/arch/arm/src/armv8-m/Kconfig
new file mode 100755
index 0000000..9d4a45a
--- /dev/null
+++ b/arch/arm/src/armv8-m/Kconfig
@@ -0,0 +1,243 @@
+#
+# For a description of the syntax of this configuration file,
+# see the file kconfig-language.txt in the NuttX tools repository.
+#
+
+comment "ARMV8M Configuration Options"
+
+config ARMV8M_HAVE_ICACHE
+	bool
+	default n
+
+config ARMV8M_HAVE_DCACHE
+	bool
+	default n
+
+config ARMV8M_LAZYFPU
+	bool "Lazy FPU storage"
+	default n
+	depends on ARCH_HAVE_LAZYFPU
+	---help---
+		There are two forms of the common vector logic.  There are pros and
+		cons to each option:
+
+		1) The standard common vector logic exploits features of the ARMv8-M
+		   architecture to save the all of floating registers on entry into
+		   each interrupt and then to restore the floating registers when
+		   the interrupt returns.  The primary advantage to this approach is
+		   that floating point operations are available in interrupt
+		   handling logic.  Since the volatile registers are preserved,
+		   operations on the floating point registers by interrupt handling
+		   logic has no ill effect.  The downside is, of course, that more
+		   stack operations are required on each interrupt to save and store
+		   the floating point registers.  Because of the some special
+		   features of the ARMv-M, this is not as much overhead as you might
+		   expect, but overhead nonetheless.
+
+		2) The lazy FPU common vector logic does not save or restore
+		   floating point registers on entry and exit from the interrupt
+		   handler. Rather, the floating point registers are not restored
+		   until it is absolutely necessary to do so when a context switch
+		   occurs and the interrupt handler will be returning to a different
+		   floating point context.  Since floating point registers are not
+		   protected, floating point operations must not be performed in
+		   interrupt handling logic.  Better interrupt performance is be
+		   expected, however.
+
+		By default, the "standard" common vector logic is build.  This
+		option selects the alternate lazy FPU common vector logic.
+
+config ARMV8M_USEBASEPRI
+	bool "Use BASEPRI Register"
+	default y if ARCH_HIPRI_INTERRUPT
+	---help---
+		Use the BASEPRI register to enable and disable interrupts. By
+		default, the PRIMASK register is used for this purpose. This
+		usually results in hardfaults when supervisor calls are made.
+		Though, these hardfaults are properly handled by the RTOS, the
+		hardfaults can confuse some debuggers. With the BASEPRI
+		register, these hardfaults, will be avoided. For more details see
+		http://www.nuttx.org/doku.php?id=wiki:nxinternal:svcall
+
+		WARNING:  If CONFIG_ARCH_HIPRI_INTERRUPT is selected, then you
+		MUST select CONFIG_ARMV8M_USEBASEPRI.  The Kconfig dependencies
+		here will permit to select an invalid configuration because it
+		cannot enforce that requirement.  If you create this invalild
+		configuration, you will encounter some problems that may be
+		very difficult to debug.
+
+config ARMV8M_ICACHE
+	bool "Use I-Cache"
+	default n
+	depends on ARMV8M_HAVE_ICACHE
+	select ARCH_ICACHE
+
+config ARMV8M_DCACHE
+	bool "Use D-Cache"
+	default n
+	depends on ARMV8M_HAVE_DCACHE
+	select ARCH_DCACHE
+
+config ARMV8M_DCACHE_WRITETHROUGH
+	bool "D-Cache Write-Through"
+	default n
+	depends on ARMV8M_DCACHE
+
+config ARMV8M_HAVE_ITCM
+	bool
+	default n
+
+config ARMV8M_HAVE_DTCM
+	bool
+	default n
+
+config ARMV8M_ITCM
+	bool "Use ITCM"
+	default n
+	depends on ARMV8M_HAVE_ITCM
+
+config ARMV8M_DTCM
+	bool "Use DTCM"
+	default n
+	depends on ARMV8M_HAVE_DTCM
+
+choice
+	prompt "Toolchain Selection"
+	default ARMV8M_TOOLCHAIN_GNU_EABIW if TOOLCHAIN_WINDOWS
+	default ARMV8M_TOOLCHAIN_GNU_EABIL if !TOOLCHAIN_WINDOWS
+
+config ARMV8M_TOOLCHAIN_ATOLLIC
+	bool "Atollic Lite/Pro for Windows"
+	depends on TOOLCHAIN_WINDOWS
+	select ARCH_TOOLCHAIN_GNU
+
+config ARMV8M_TOOLCHAIN_BUILDROOT
+	bool "Buildroot (Cygwin or Linux)"
+	depends on !WINDOWS_NATIVE
+	select ARCH_TOOLCHAIN_GNU
+
+config ARMV8M_TOOLCHAIN_CODEREDL
+	bool "CodeRed for Linux"
+	depends on HOST_LINUX
+	select ARCH_TOOLCHAIN_GNU
+
+config ARMV8M_TOOLCHAIN_CODEREDW
+	bool "CodeRed for Windows"
+	depends on TOOLCHAIN_WINDOWS
+	select ARCH_TOOLCHAIN_GNU
+
+config ARMV8M_TOOLCHAIN_CODESOURCERYL
+	bool "CodeSourcery GNU toolchain under Linux"
+	depends on HOST_LINUX
+	select ARCH_TOOLCHAIN_GNU
+
+config ARMV8M_TOOLCHAIN_CODESOURCERYW
+	bool "CodeSourcery GNU toolchain under Windows"
+	depends on TOOLCHAIN_WINDOWS
+	select ARCH_TOOLCHAIN_GNU
+
+config ARMV8M_TOOLCHAIN_DEVKITARM
+	bool "devkitARM GNU toolchain"
+	depends on TOOLCHAIN_WINDOWS
+	select ARCH_TOOLCHAIN_GNU
+
+config ARMV8M_TOOLCHAIN_GNU_EABIL
+	bool "Generic GNU EABI toolchain under Linux (or other POSIX environment)"
+	depends on !WINDOWS_NATIVE
+	select ARCH_TOOLCHAIN_GNU
+	---help---
+		This option should work for any modern GNU toolchain (GCC 4.5 or newer)
+		configured for arm-none-eabi.
+
+config ARMV8M_TOOLCHAIN_GNU_EABIW
+	bool "Generic GNU EABI toolchain under Windows"
+	depends on TOOLCHAIN_WINDOWS
+	select ARCH_TOOLCHAIN_GNU
+
+config ARMV8M_TOOLCHAIN_CLANGL
+	bool "Generic Clang toolchain under Linux (or other POSIX environment)"
+	depends on !WINDOWS_NATIVE
+	select ARCH_TOOLCHAIN_GNU
+
+config ARMV8M_TOOLCHAIN_CLANGW
+	bool "Generic Clang toolchain under Windows"
+	depends on TOOLCHAIN_WINDOWS
+	select ARCH_TOOLCHAIN_GNU
+	---help---
+		This option should work for any modern GNU toolchain (GCC 4.5 or newer)
+		configured for arm-none-eabi.
+
+config ARMV8M_TOOLCHAIN_RAISONANCE
+	bool "STMicro Raisonance for Windows"
+	depends on TOOLCHAIN_WINDOWS
+	select ARCH_TOOLCHAIN_GNU
+
+endchoice
+
+config ARMV8M_OABI_TOOLCHAIN
+	bool "OABI (vs EABI)"
+	default n
+	depends on ARMV8M_TOOLCHAIN_BUILDROOT
+	---help---
+		Most of the older buildroot toolchains are OABI and are named
+		arm-nuttx-elf- vs. arm-nuttx-eabi-
+
+config ARMV8M_TARGET2_PREL
+	bool "R_ARM_TARGET2 is PC relative"
+	default n if !CXX_EXCEPTION
+	default y if CXX_EXCEPTION
+	depends on ELF
+	---help---
+		Perform a PC relative relocation for relocation type R_ARM_TARGET2
+
+config ARMV8M_HAVE_STACKCHECK
+	bool
+	default n
+
+config ARMV8M_STACKCHECK
+	bool "Check for stack overflow on each function call"
+	default n
+	depends on ARMV8M_HAVE_STACKCHECK
+	---help---
+		This check uses R10 to check for a stack overflow within each
+		function call. This has performances and code size impacts, but it
+		will be able to catch hard to find stack overflows.
+
+		Currently only available only for the STM32, SAM3/4 and SAMA5D
+		architectures.  The changes are not complex and patches for
+		other architectures will be accepted.
+
+		This option requires that you are using a GCC toolchain and that
+		you also include -finstrument-functions in your CFLAGS when you
+		compile.  This addition to your CFLAGS should probably be added
+		to the definition of the CFFLAGS in your board Make.defs file.
+
+config ARMV8M_ITMSYSLOG
+	bool "ITM SYSLOG support"
+	default n
+	select ARCH_SYSLOG
+	select SYSLOG
+	---help---
+		Enable hooks to support ITM syslog output.  This requires additional
+		MCU support in order to be used.  See arch/arm/src/armv8-m/itm_syslog.h
+		for additional initialization information.
+
+if ARMV8M_ITMSYSLOG
+
+config ARMV8M_ITMSYSLOG_PORT
+	int "ITM SYSLOG Port"
+	default 0
+	range 0 31
+
+config ARMV8M_ITMSYSLOG_SWODIV
+	int "ITM SYSLOG SWO divider"
+	default 15
+	range 1 8192
+
+endif # ARMV8M_ITMSYSLOG
+
+config ARMV8M_SYSTICK
+	bool "SysTick timer driver"
+	depends on TIMER
+	---help---
+		Enable SysTick timer driver.
diff --git a/arch/arm/src/armv8-m/Toolchain.defs b/arch/arm/src/armv8-m/Toolchain.defs
new file mode 100755
index 0000000..0415051
--- /dev/null
+++ b/arch/arm/src/armv8-m/Toolchain.defs
@@ -0,0 +1,274 @@
+############################################################################
+# arch/arm/src/armv8-m/Toolchain.defs
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.  The
+# ASF licenses this file to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance with the
+# License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+############################################################################
+
+# Setup for the selected toolchain
+
+#
+# Handle old-style chip-specific toolchain names in the absence of
+# a new-style toolchain specification, force the selection of a single
+# toolchain and allow the selected toolchain to be overridden by a
+# command-line selection.
+#
+
+ifeq ($(filter y, \
+      $(CONFIG_ARMV8M_TOOLCHAIN_ATOLLIC) \
+    ),y)
+  CONFIG_ARMV8M_TOOLCHAIN ?= ATOLLIC
+endif
+
+ifeq ($(filter y, \
+      $(CONFIG_ARMV8M_TOOLCHAIN_BUILDROOT) \
+    ),y)
+  CONFIG_ARMV8M_TOOLCHAIN ?= BUILDROOT
+endif
+
+ifeq ($(filter y, \
+      $(CONFIG_ARMV8M_TOOLCHAIN_CODEREDL) \
+    ),y)
+  CONFIG_ARMV8M_TOOLCHAIN ?= CODEREDL
+endif
+
+ifeq ($(filter y, \
+      $(CONFIG_ARMV8M_TOOLCHAIN_CODEREDW) \
+    ),y)
+  CONFIG_ARMV8M_TOOLCHAIN ?= CODEREDW
+endif
+
+ifeq ($(filter y, \
+      $(CONFIG_ARMV8M_TOOLCHAIN_CODESOURCERYL) \
+    ),y)
+  CONFIG_ARMV8M_TOOLCHAIN ?= CODESOURCERYL
+endif
+
+ifeq ($(filter y, \
+      $(CONFIG_ARMV8M_TOOLCHAIN_CODESOURCERYW) \
+    ),y)
+  CONFIG_ARMV8M_TOOLCHAIN ?= CODESOURCERYW
+endif
+
+ifeq ($(filter y, \
+      $(CONFIG_ARMV8M_TOOLCHAIN_DEVKITARM) \
+    ),y)
+  CONFIG_ARMV8M_TOOLCHAIN ?= DEVKITARM
+endif
+
+ifeq ($(filter y, \
+      $(CONFIG_ARMV8M_TOOLCHAIN_RAISONANCE) \
+    ),y)
+  CONFIG_ARMV8M_TOOLCHAIN ?= RAISONANCE
+endif
+
+ifeq ($(filter y, \
+      $(CONFIG_ARMV8M_TOOLCHAIN_GNU_EABIL) \
+    ),y)
+  CONFIG_ARMV8M_TOOLCHAIN ?= GNU_EABIL
+endif
+
+ifeq ($(filter y, \
+      $(CONFIG_ARMV8M_TOOLCHAIN_GNU_EABIW) \
+    ),y)
+  CONFIG_ARMV8M_TOOLCHAIN ?= GNU_EABIW
+endif
+
+ifeq ($(filter y, \
+      $(CONFIG_ARMV8M_TOOLCHAIN_CLANGL) \
+    ),y)
+  CONFIG_ARMV8M_TOOLCHAIN ?= CLANGL
+endif
+
+ifeq ($(filter y, \
+      $(CONFIG_ARMV8M_TOOLCHAIN_CLANGW) \
+    ),y)
+  CONFIG_ARMV8M_TOOLCHAIN ?= CLANGW
+endif
+
+#
+# Supported toolchains
+#
+# Each toolchain definition should set:
+#
+#  CROSSDEV         The GNU toolchain triple (command prefix)
+#  ARCROSSDEV       If required, an alternative prefix used when
+#                   invoking ar and nm.
+#  ARCHCPUFLAGS     CPU-specific flags selecting the instruction set
+#                   FPU options, etc.
+#  MAXOPTIMIZATION  The maximum optimization level that results in
+#                   reliable code generation.
+#
+
+ifeq ($(CONFIG_DEBUG_CUSTOMOPT),y)
+  MAXOPTIMIZATION := $(CONFIG_DEBUG_OPTLEVEL)
+endif
+
+# Parametrization for ARCHCPUFLAGS
+ifeq ($(CONFIG_ARCH_CORTEXM23),y)
+  TOOLCHAIN_MCPU     := -mcpu=cortex-m23
+  TOOLCHAIN_MARCH    := -march=armv8-m.main
+  TOOLCHAIN_MFLOAT   := -mfloat-abi=soft
+else ifeq ($(CONFIG_ARCH_CORTEXM33),y)
+  TOOLCHAIN_MCPU     := -mcpu=cortex-m33
+  TOOLCHAIN_MARCH    := -march=armv8-m.main+dsp
+  ifeq ($(CONFIG_ARCH_FPU),y)
+    TOOLCHAIN_MFLOAT := -mfpu=fpv5-sp-d16 -mfloat-abi=hard
+  else
+    TOOLCHAIN_MFLOAT := -mfloat-abi=soft
+  endif
+else ifeq ($(CONFIG_ARCH_CORTEXM35P),y)
+  TOOLCHAIN_MCPU     := -mcpu=cortex-m35p
+  TOOLCHAIN_MARCH    := -march=armv8-m.main+dsp
+  ifeq ($(CONFIG_ARCH_FPU),y)
+    TOOLCHAIN_MFLOAT := -mfpu=fpv5-sp-d16 -mfloat-abi=hard
+  else
+    TOOLCHAIN_MFLOAT := -mfloat-abi=soft
+  endif
+endif
+
+# Atollic toolchain under Windows
+
+ifeq ($(CONFIG_ARMV8M_TOOLCHAIN),ATOLLIC)
+  CROSSDEV ?= arm-atollic-eabi-
+  ARCROSSDEV ?= arm-atollic-eabi-
+  MAXOPTIMIZATION ?= -Os
+  ARCHCPUFLAGS = $(TOOLCHAIN_MCPU) -mthumb $(TOOLCHAIN_MFLOAT)
+  ifeq ($(CONFIG_WINDOWS_CYGWIN),y)
+    WINTOOL = y
+  endif
+endif
+
+# NuttX buildroot under Linux or Cygwin
+
+ifeq ($(CONFIG_ARMV8M_TOOLCHAIN),BUILDROOT)
+ifeq ($(CONFIG_ARMV8M_OABI_TOOLCHAIN),y)
+  CROSSDEV ?= arm-nuttx-elf-
+  ARCROSSDEV ?= arm-nuttx-elf-
+  ARCHCPUFLAGS = $(TOOLCHAIN_MCPU) $(TOOLCHAIN_MFLOAT)
+else
+  CROSSDEV ?= arm-nuttx-eabi-
+  ARCROSSDEV ?= arm-nuttx-eabi-
+  ARCHCPUFLAGS = $(TOOLCHAIN_MCPU) -mthumb $(TOOLCHAIN_MFLOAT)
+endif
+  MAXOPTIMIZATION ?= -Os
+endif
+
+# Code Red RedSuite under Linux
+
+ifeq ($(CONFIG_ARMV8M_TOOLCHAIN),CODEREDL)
+  CROSSDEV ?= arm-none-eabi-
+  ARCROSSDEV ?= arm-none-eabi-
+  MAXOPTIMIZATION ?= -Os
+  ARCHCPUFLAGS = $(TOOLCHAIN_MCPU) -mthumb $(TOOLCHAIN_MFLOAT)
+endif
+
+# Code Red RedSuite under Windows
+
+ifeq ($(CONFIG_ARMV8M_TOOLCHAIN),CODEREDW)
+  CROSSDEV ?= arm-none-eabi-
+  ARCROSSDEV ?= arm-none-eabi-
+  MAXOPTIMIZATION ?= -Os
+  ARCHCPUFLAGS = $(TOOLCHAIN_MCPU) -mthumb $(TOOLCHAIN_MFLOAT)
+  ifeq ($(CONFIG_WINDOWS_CYGWIN),y)
+    WINTOOL = y
+  endif
+endif
+
+# CodeSourcery under Linux
+
+ifeq ($(CONFIG_ARMV8M_TOOLCHAIN),CODESOURCERYL)
+  CROSSDEV ?= arm-none-eabi-
+  ARCROSSDEV ?= arm-none-eabi-
+  MAXOPTIMIZATION ?= -O2
+  ARCHCPUFLAGS = $(TOOLCHAIN_MCPU) -mthumb $(TOOLCHAIN_MFLOAT)
+endif
+
+# CodeSourcery under Windows
+
+ifeq ($(CONFIG_ARMV8M_TOOLCHAIN),CODESOURCERYW)
+  CROSSDEV ?= arm-none-eabi-
+  ARCROSSDEV ?= arm-none-eabi-
+  MAXOPTIMIZATION ?= -Os
+  ARCHCPUFLAGS = $(TOOLCHAIN_MCPU) -mthumb $(TOOLCHAIN_MFLOAT)
+  ifeq ($(CONFIG_WINDOWS_CYGWIN),y)
+    WINTOOL = y
+  endif
+endif
+
+# devkitARM under Windows
+
+ifeq ($(CONFIG_ARMV8M_TOOLCHAIN),DEVKITARM)
+  CROSSDEV ?= arm-none-eabi-
+  ARCROSSDEV ?= arm-none-eabi-
+  ifeq ($(CONFIG_WINDOWS_CYGWIN),y)
+    WINTOOL = y
+  endif
+  ARCHCPUFLAGS = $(TOOLCHAIN_MCPU) -mthumb $(TOOLCHAIN_MFLOAT)
+endif
+
+# Generic GNU EABI toolchain on OS X, Linux or any typical Posix system
+
+ifeq ($(CONFIG_ARMV8M_TOOLCHAIN),GNU_EABIL)
+  CROSSDEV ?= arm-none-eabi-
+  ARCROSSDEV ?= arm-none-eabi-
+  MAXOPTIMIZATION ?= -Os
+  ARCHCPUFLAGS = $(TOOLCHAIN_MCPU) -mthumb $(TOOLCHAIN_MFLOAT)
+endif
+
+# Generic GNU EABI toolchain under Windows
+
+ifeq ($(CONFIG_ARMV8M_TOOLCHAIN),GNU_EABIW)
+  CROSSDEV ?= arm-none-eabi-
+  ARCROSSDEV ?= arm-none-eabi-
+  MAXOPTIMIZATION ?= -Os
+  ARCHCPUFLAGS = $(TOOLCHAIN_MCPU) -mthumb $(TOOLCHAIN_MFLOAT)
+  ifeq ($(CONFIG_WINDOWS_CYGWIN),y)
+    WINTOOL = y
+  endif
+endif
+
+# Clang toolchain on OS X, Linux or any typical Posix system
+
+ifeq ($(CONFIG_ARMV8M_TOOLCHAIN),CLANGL)
+  CROSSDEV ?= arm-none-eabi-
+  ARCROSSDEV ?= arm-none-eabi-
+  MAXOPTIMIZATION ?= -Os
+  ARCHCPUFLAGS = $(TOOLCHAIN_MCPU) -mthumb $(TOOLCHAIN_MFLOAT)
+endif
+
+# Clang toolchain under Windows
+
+ifeq ($(CONFIG_ARMV8M_TOOLCHAIN),CLANGW)
+  CROSSDEV ?= arm-none-eabi-
+  ARCROSSDEV ?= arm-none-eabi-
+  MAXOPTIMIZATION ?= -Os
+  ARCHCPUFLAGS = $(TOOLCHAIN_MCPU) -mthumb $(TOOLCHAIN_MFLOAT)
+  ifeq ($(CONFIG_WINDOWS_CYGWIN),y)
+    WINTOOL = y
+  endif
+endif
+
+# Raisonance RIDE7 under Windows
+
+ifeq ($(CONFIG_ARMV8M_TOOLCHAIN),RAISONANCE)
+  CROSSDEV ?= arm-none-eabi-
+  ARCROSSDEV ?= arm-none-eabi-
+  ifeq ($(CONFIG_WINDOWS_CYGWIN),y)
+    WINTOOL = y
+  endif
+  ARCHCPUFLAGS = $(TOOLCHAIN_MCPU) -mthumb $(TOOLCHAIN_MFLOAT)
+endif
diff --git a/arch/arm/src/armv8-m/barriers.h b/arch/arm/src/armv8-m/barriers.h
new file mode 100755
index 0000000..10ffba2
--- /dev/null
+++ b/arch/arm/src/armv8-m/barriers.h
@@ -0,0 +1,42 @@
+/************************************************************************************
+ * arch/arm/src/armv8-m/barriers.h
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.  The
+ * ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ *
+ ************************************************************************************/
+
+#ifndef __ARCH_ARM_SRC_COMMON_ARMV8_M_BARRIERS_H
+#define __ARCH_ARM_SRC_COMMON_ARMV8_M_BARRIERS_H
+
+/************************************************************************************
+ * Included Files
+ ************************************************************************************/
+
+/************************************************************************************
+ * Pre-processor Definitions
+ ************************************************************************************/
+
+/* ARMv8-M memory barriers */
+
+#define arm_isb(n) __asm__ __volatile__ ("isb " #n : : : "memory")
+#define arm_dsb(n) __asm__ __volatile__ ("dsb " #n : : : "memory")
+#define arm_dmb(n) __asm__ __volatile__ ("dmb " #n : : : "memory")
+
+#define ARM_DSB()  arm_dsb(15)
+#define ARM_ISB()  arm_isb(15)
+#define ARM_DMB()  arm_dmb(15)
+
+#endif /* __ARCH_ARM_SRC_COMMON_ARMV8_M_BARRIERS_H */
diff --git a/arch/arm/src/armv8-m/dwt.h b/arch/arm/src/armv8-m/dwt.h
new file mode 100755
index 0000000..7df90d7
--- /dev/null
+++ b/arch/arm/src/armv8-m/dwt.h
@@ -0,0 +1,191 @@
+/***************************************************************************************
+ * arch/arm/src/armv8-m/dwt.h
+ *
+ *   Copyright (c) 2009 - 2013 ARM LIMITED
+ *
+ *  All rights reserved.
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions are met:
+ *
+ *  - Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  - Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *  - Neither the name of ARM nor the names of its contributors may be used
+ *    to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ *  ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE
+ *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ *  POSSIBILITY OF SUCH DAMAGE.
+ *
+ *   Copyright (C) 2014 Pierre-noel Bouteville . All rights reserved.
+ *   Author: Pierre-noel Bouteville <pnb990@gmail.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ * 3. Neither the name NuttX nor the names of its contributors may be
+ *    used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ ***********************************************************************************************/
+
+#ifndef __ARCH_ARM_SRC_ARMV8_M_DWT_H
+#define __ARCH_ARM_SRC_ARMV8_M_DWT_H
+
+/***********************************************************************************************
+ * Pre-processor Definitions
+ ***********************************************************************************************/
+
+/* Data Watchpoint and Trace Register (DWT) Definitions ****************************************/
+
+/* DWT Register Base Address *******************************************************************/
+
+#define DWT_BASE                     (0xe0001000ul)
+
+/* DWT Register Addresses **********************************************************************/
+
+#define DWT_CTRL                     (DWT_BASE + 0x0000)  /* Control Register */
+#define DWT_CYCCNT                   (DWT_BASE + 0x0004)  /* Cycle Count Register */
+#define DWT_CPICNT                   (DWT_BASE + 0x0008)  /* CPI Count Register */
+#define DWT_EXCCNT                   (DWT_BASE + 0x000c)  /* Exception Overhead Count Register */
+#define DWT_SLEEPCNT                 (DWT_BASE + 0x0010)  /* Sleep Count Register */
+#define DWT_LSUCNT                   (DWT_BASE + 0x0014)  /* LSU Count Register */
+#define DWT_FOLDCNT                  (DWT_BASE + 0x0018)  /* Folded-instruction Count Register */
+#define DWT_PCSR                     (DWT_BASE + 0x001c)  /* Program Counter Sample Register */
+#define DWT_COMP0                    (DWT_BASE + 0x0020)  /* Comparator Register 0 */
+#define DWT_MASK0                    (DWT_BASE + 0x0024)  /* Mask Register 0 */
+#define DWT_FUNCTION0                (DWT_BASE + 0x0028)  /* Function Register 0 */
+#define DWT_COMP1                    (DWT_BASE + 0x0030)  /* Comparator Register 1 */
+#define DWT_MASK1                    (DWT_BASE + 0x0034)  /* Mask Register 1 */
+#define DWT_FUNCTION1                (DWT_BASE + 0x0038)  /* Function Register 1 */
+#define DWT_COMP2                    (DWT_BASE + 0x0040)  /* Comparator Register 2 */
+#define DWT_MASK2                    (DWT_BASE + 0x0044)  /* Mask Register 2 */
+#define DWT_FUNCTION2                (DWT_BASE + 0x0048)  /* Function Register 2 */
+#define DWT_COMP3                    (DWT_BASE + 0x0050)  /* Comparator Register 3 */
+#define DWT_MASK3                    (DWT_BASE + 0x0054)  /* Mask Register 3 */
+#define DWT_FUNCTION3                (DWT_BASE + 0x0058)  /* Function Register 3 */
+
+/* DWT Register Bit Field Definitions **********************************************************/
+
+/* DWT CTRL */
+
+#define DWT_CTRL_NUMCOMP_SHIFT        28
+#define DWT_CTRL_NUMCOMP_MASK         (0xFul << DWT_CTRL_NUMCOMP_SHIFT)
+#define DWT_CTRL_NOTRCPKT_SHIFT       27
+#define DWT_CTRL_NOTRCPKT_MASK        (0x1ul << DWT_CTRL_NOTRCPKT_SHIFT)
+#define DWT_CTRL_NOEXTTRIG_SHIFT      26
+#define DWT_CTRL_NOEXTTRIG_MASK       (0x1ul << DWT_CTRL_NOEXTTRIG_SHIFT)
+#define DWT_CTRL_NOCYCCNT_SHIFT       25
+#define DWT_CTRL_NOCYCCNT_MASK        (0x1ul << DWT_CTRL_NOCYCCNT_SHIFT)
+#define DWT_CTRL_NOPRFCNT_SHIFT       24
+#define DWT_CTRL_NOPRFCNT_MASK        (0x1ul << DWT_CTRL_NOPRFCNT_SHIFT)
+#define DWT_CTRL_CYCEVTENA_SHIFT      22
+#define DWT_CTRL_CYCEVTENA_MASK       (0x1ul << DWT_CTRL_CYCEVTENA_SHIFT)
+#define DWT_CTRL_FOLDEVTENA_SHIFT     21
+#define DWT_CTRL_FOLDEVTENA_MASK      (0x1ul << DWT_CTRL_FOLDEVTENA_SHIFT)
+#define DWT_CTRL_LSUEVTENA_SHIFT      20
+#define DWT_CTRL_LSUEVTENA_MASK       (0x1ul << DWT_CTRL_LSUEVTENA_SHIFT)
+#define DWT_CTRL_SLEEPEVTENA_SHIFT    19
+#define DWT_CTRL_SLEEPEVTENA_MASK     (0x1ul << DWT_CTRL_SLEEPEVTENA_SHIFT)
+#define DWT_CTRL_EXCEVTENA_SHIFT      18
+#define DWT_CTRL_EXCEVTENA_MASK       (0x1ul << DWT_CTRL_EXCEVTENA_SHIFT)
+#define DWT_CTRL_CPIEVTENA_SHIFT      17
+#define DWT_CTRL_CPIEVTENA_MASK       (0x1ul << DWT_CTRL_CPIEVTENA_SHIFT)
+#define DWT_CTRL_EXCTRCENA_SHIFT      16
+#define DWT_CTRL_EXCTRCENA_MASK       (0x1ul << DWT_CTRL_EXCTRCENA_SHIFT)
+#define DWT_CTRL_PCSAMPLENA_SHIFT     12
+#define DWT_CTRL_PCSAMPLENA_MASK      (0x1ul << DWT_CTRL_PCSAMPLENA_SHIFT)
+#define DWT_CTRL_SYNCTAP_SHIFT        10
+#define DWT_CTRL_SYNCTAP_MASK         (0x3ul << DWT_CTRL_SYNCTAP_SHIFT)
+#define DWT_CTRL_CYCTAP_SHIFT         9
+#define DWT_CTRL_CYCTAP_MASK          (0x1ul << DWT_CTRL_CYCTAP_SHIFT)
+#define DWT_CTRL_POSTINIT_SHIFT       5
+#define DWT_CTRL_POSTINIT_MASK        (0xful << DWT_CTRL_POSTINIT_SHIFT)
+#define DWT_CTRL_POSTPRESET_SHIFT     1
+#define DWT_CTRL_POSTPRESET_MASK      (0xful << DWT_CTRL_POSTPRESET_SHIFT)
+#define DWT_CTRL_CYCCNTENA_SHIFT      0
+#define DWT_CTRL_CYCCNTENA_MASK       (0x1ul << DWT_CTRL_CYCCNTENA_SHIFT)
+
+/* DWT CPICNT */
+
+#define DWT_CPICNT_CPICNT_SHIFT       0
+#define DWT_CPICNT_CPICNT_MASK        (0xfful << DWT_CPICNT_CPICNT_SHIFT)
+
+/* DWT EXCCNT */
+
+#define DWT_EXCCNT_EXCCNT_SHIFT       0
+#define DWT_EXCCNT_EXCCNT_MASK        (0xfful << DWT_EXCCNT_EXCCNT_SHIFT)
+
+/* DWT SLEEPCNT */
+
+#define DWT_SLEEPCNT_SLEEPCNT_SHIFT   0
+#define DWT_SLEEPCNT_SLEEPCNT_MASK    (0xfful << DWT_SLEEPCNT_SLEEPCNT_SHIFT)
+
+/* DWT LSUCNT */
+
+#define DWT_LSUCNT_LSUCNT_SHIFT       0
+#define DWT_LSUCNT_LSUCNT_MASK        (0xfful << DWT_LSUCNT_LSUCNT_SHIFT)
+
+/* DWT FOLDCNT */
+
+#define DWT_FOLDCNT_FOLDCNT_SHIFT     0
+#define DWT_FOLDCNT_FOLDCNT_MASK      (0xfful << DWT_FOLDCNT_FOLDCNT_SHIFT)
+
+/* DWT MASK */
+
+#define DWT_MASK_MASK_SHIFT           0
+#define DWT_MASK_MASK_MASK            (0x1ful << DWT_MASK_MASK_SHIFT)
+
+/* DWT FUNCTION */
+
+#define DWT_FUNCTION_MATCHED_SHIFT    24
+#define DWT_FUNCTION_MATCHED_MASK     (0x1ul << DWT_FUNCTION_MATCHED_SHIFT)
+#define DWT_FUNCTION_DATAVADDR1_SHIFT 16
+#define DWT_FUNCTION_DATAVADDR1_MASK  (0xful << DWT_FUNCTION_DATAVADDR1_SHIFT)
+#define DWT_FUNCTION_DATAVADDR0_SHIFT 12
+#define DWT_FUNCTION_DATAVADDR0_MASK  (0xful << DWT_FUNCTION_DATAVADDR0_SHIFT)
+#define DWT_FUNCTION_DATAVSIZE_SHIFT  10
+#define DWT_FUNCTION_DATAVSIZE_MASK   (0x3ul << DWT_FUNCTION_DATAVSIZE_SHIFT)
+#define DWT_FUNCTION_LNK1ENA_SHIFT    9
+#define DWT_FUNCTION_LNK1ENA_MASK     (0x1ul << DWT_FUNCTION_LNK1ENA_SHIFT)
+#define DWT_FUNCTION_DATAVMATCH_SHIFT 8
+#define DWT_FUNCTION_DATAVMATCH_MASK  (0x1ul << DWT_FUNCTION_DATAVMATCH_SHIFT)
+#define DWT_FUNCTION_CYCMATCH_SHIFT   7
+#define DWT_FUNCTION_CYCMATCH_MASK    0x1ul << DWT_FUNCTION_CYCMATCH_SHIFT)
+#define DWT_FUNCTION_EMITRANGE_SHIFT  5
+#define DWT_FUNCTION_EMITRANGE_MASK   (0x1ul << DWT_FUNCTION_EMITRANGE_SHIFT)
+#define DWT_FUNCTION_FUNCTION_SHIFT   0
+#define DWT_FUNCTION_FUNCTION_MASK    (0xful << DWT_FUNCTION_FUNCTION_SHIFT)
+
+#endif /* __ARCH_ARM_SRC_ARMV8_M_DWT_H */
diff --git a/arch/arm/src/armv8-m/etm.h b/arch/arm/src/armv8-m/etm.h
new file mode 100755
index 0000000..af8d4e7
--- /dev/null
+++ b/arch/arm/src/armv8-m/etm.h
@@ -0,0 +1,916 @@
+/*******************************************************************************************************************************
+ * arch/arm/src/armv8-m/etm.h
+ *
+ *  Copyright 2014 Silicon Laboratories, Inc. http://www.silabs.com</b>
+ *
+ * Permission is granted to anyone to use this software for any purpose,
+ * including commercial applications, and to alter it and redistribute it
+ * freely, subject to the following restrictions:
+ *
+ * 1. The origin of this software must not be misrepresented; you must not
+ *    claim that you wrote the original software.@n
+ * 2. Altered source versions must be plainly marked as such, and must not be
+ *    misrepresented as being the original software.@n
+ * 3. This notice may not be removed or altered from any source distribution.
+ *
+ * DISCLAIMER OF WARRANTY/LIMITATION OF REMEDIES: Silicon Laboratories, Inc.
+ * has no obligation to support this Software. Silicon Laboratories, Inc. is
+ * providing the Software "AS IS", with no express or implied warranties of any
+ * kind, including, but not limited to, any implied warranties of
+ * merchantability or fitness for any particular purpose or warranties against
+ * infringement of any proprietary rights of a third party.
+ *
+ * Silicon Laboratories, Inc. will not be liable for any consequential,
+ * incidental, or special damages, or any other relief, or for any claim by
+ * any third party, arising from your use of this Software.
+ *
+ *   Copyright (C) 2014 Pierre-noel Bouteville . All rights reserved.
+ *   Copyright (C) 2014 Gregory Nutt. All rights reserved.
+ *   Authors: Pierre-noel Bouteville <pnb990@gmail.com>
+ *            Gregory Nutt <gnutt@nuttx.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ * 3. Neither the name NuttX nor the names of its contributors may be
+ *    used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ *******************************************************************************************************************************/
+
+#ifndef __ARCH_ARM_SRC_ARMV8_M_ETM_H
+#define __ARCH_ARM_SRC_ARMV8_M_ETM_H
+
+/*******************************************************************************************************************************
+ * Included Files
+ *******************************************************************************************************************************/
+
+/*******************************************************************************************************************************
+ * Pre-processor Definitions
+ *******************************************************************************************************************************/
+/* ETM Register Base Address ***************************************************************************************************/
+
+#define ETM_BASE                                      (0xe0041000ul)
+
+/* ETM Register Offsets ********************************************************************************************************/
+
+#define ETM_ETMCR_OFFSET                              0x0000 /* Main Control Register  */
+#define ETM_ETMCCR_OFFSET                             0x0004 /* Configuration Code Register  */
+#define ETM_ETMTRIGGER_OFFSET                         0x0008 /* ETM Trigger Event Register  */
+#define ETM_ETMSR_OFFSET                              0x0010 /* ETM Status Register  */
+#define ETM_ETMSCR_OFFSET                             0x0014 /* ETM System Configuration Register  */
+#define ETM_ETMTEEVR_OFFSET                           0x0020 /* ETM TraceEnable Event Register  */
+#define ETM_ETMTECR1_OFFSET                           0x0024 /* ETM Trace control Register  */
+#define ETM_ETMFFLR_OFFSET                            0x002c /* ETM Fifo Full Level Register  */
+#define ETM_ETMCNTRLDVR1_OFFSET                       0x0140 /* Counter Reload Value  */
+#define ETM_ETMSYNCFR_OFFSET                          0x01e0 /* Synchronisation Frequency Register  */
+#define ETM_ETMIDR_OFFSET                             0x01e4 /* ID Register  */
+#define ETM_ETMCCER_OFFSET                            0x01e8 /* Configuration Code Extension Register  */
+#define ETM_ETMTESSEICR_OFFSET                        0x01f0 /* TraceEnable Start/Stop EmbeddedICE Control Register  */
+#define ETM_ETMTSEVR_OFFSET                           0x01f8 /* Timestamp Event Register  */
+#define ETM_ETMTRACEIDR_OFFSET                        0x0200 /* CoreSight Trace ID Register  */
+#define ETM_ETMIDR2_OFFSET                            0x0208 /* ETM ID Register 2  */
+#define ETM_ETMPDSR_OFFSET                            0x0314 /* Device Power-down Status Register  */
+#define ETM_ETMISCIN_OFFSET                           0x0ee0 /* Integration Test Miscellaneous Inputs Register  */
+#define ETM_ITTRIGOUT_OFFSET                          0x0ee8 /* Integration Test Trigger Out Register  */
+#define ETM_ETMITATBCTR2_OFFSET                       0x0ef0 /* ETM Integration Test ATB Control 2 Register  */
+#define ETM_ETMITATBCTR0_OFFSET                       0x0ef8 /* ETM Integration Test ATB Control 0 Register  */
+#define ETM_ETMITCTRL_OFFSET                          0x0f00 /* ETM Integration Control Register  */
+#define ETM_ETMCLAIMSET_OFFSET                        0x0fa0 /* ETM Claim Tag Set Register  */
+#define ETM_ETMCLAIMCLR_OFFSET                        0x0fa4 /* ETM Claim Tag Clear Register  */
+#define ETM_ETMLAR_OFFSET                             0x0fb0 /* ETM Lock Access Register  */
+#define ETM_ETMLSR_OFFSET                             0x0fb4 /* Lock Status Register  */
+#define ETM_ETMAUTHSTATUS_OFFSET                      0x0fb8 /* ETM Authentication Status Register  */
+#define ETM_ETMDEVTYPE_OFFSET                         0x0fcc /* CoreSight Device Type Register  */
+#define ETM_ETMPIDR4_OFFSET                           0x0fd0 /* Peripheral ID4 Register  */
+#define ETM_ETMPIDR5_OFFSET                           0x0fd4 /* Peripheral ID5 Register  */
+#define ETM_ETMPIDR6_OFFSET                           0x0fd8 /* Peripheral ID6 Register  */
+#define ETM_ETMPIDR7_OFFSET                           0x0fdc /* Peripheral ID7 Register  */
+#define ETM_ETMPIDR0_OFFSET                           0x0fe0 /* Peripheral ID0 Register  */
+#define ETM_ETMPIDR1_OFFSET                           0x0fe4 /* Peripheral ID1 Register  */
+#define ETM_ETMPIDR2_OFFSET                           0x0fe8 /* Peripheral ID2 Register  */
+#define ETM_ETMPIDR3_OFFSET                           0x0fec /* Peripheral ID3 Register  */
+#define ETM_ETMCIDR0_OFFSET                           0x0ff0 /* Component ID0 Register  */
+#define ETM_ETMCIDR1_OFFSET                           0x0ff4 /* Component ID1 Register  */
+#define ETM_ETMCIDR2_OFFSET                           0x0ff8 /* Component ID2 Register  */
+#define ETM_ETMCIDR3_OFFSET                           0x0ffc /* Component ID3 Register  */
+
+/* ETM Register Addresses ******************************************************************************************************/
+
+#define ETM_ETMCR                                     (ETM_BASE+ETM_ETMCR_OFFSET)
+#define ETM_ETMCCR                                    (ETM_BASE+ETM_ETMCCR_OFFSET)
+#define ETM_ETMTRIGGER                                (ETM_BASE+ETM_ETMTRIGGER_OFFSET)
+#define ETM_ETMSR                                     (ETM_BASE+ETM_ETMSR_OFFSET)
+#define ETM_ETMSCR                                    (ETM_BASE+ETM_ETMSCR_OFFSET)
+#define ETM_ETMTEEVR                                  (ETM_BASE+ETM_ETMTEEVR_OFFSET)
+#define ETM_ETMTECR1                                  (ETM_BASE+ETM_ETMTECR1_OFFSET)
+#define ETM_ETMFFLR                                   (ETM_BASE+ETM_ETMFFLR_OFFSET)
+#define ETM_ETMCNTRLDVR1                              (ETM_BASE+ETM_ETMCNTRLDVR1_OFFSET)
+#define ETM_ETMSYNCFR                                 (ETM_BASE+ETM_ETMSYNCFR_OFFSET)
+#define ETM_ETMIDR                                    (ETM_BASE+ETM_ETMIDR_OFFSET)
+#define ETM_ETMCCER                                   (ETM_BASE+ETM_ETMCCER_OFFSET)
+#define ETM_ETMTESSEICR                               (ETM_BASE+ETM_ETMTESSEICR_OFFSET)
+#define ETM_ETMTSEVR                                  (ETM_BASE+ETM_ETMTSEVR_OFFSET)
+#define ETM_ETMTRACEIDR                               (ETM_BASE+ETM_ETMTRACEIDR_OFFSET)
+#define ETM_ETMIDR2                                   (ETM_BASE+ETM_ETMIDR2_OFFSET)
+#define ETM_ETMPDSR                                   (ETM_BASE+ETM_ETMPDSR_OFFSET)
+#define ETM_ETMISCIN                                  (ETM_BASE+ETM_ETMISCIN_OFFSET)
+#define ETM_ITTRIGOUT                                 (ETM_BASE+ETM_ITTRIGOUT_OFFSET)
+#define ETM_ETMITATBCTR2                              (ETM_BASE+ETM_ETMITATBCTR2_OFFSET)
+#define ETM_ETMITATBCTR0                              (ETM_BASE+ETM_ETMITATBCTR0_OFFSET)
+#define ETM_ETMITCTRL                                 (ETM_BASE+ETM_ETMITCTRL_OFFSET)
+#define ETM_ETMCLAIMSET                               (ETM_BASE+ETM_ETMCLAIMSET_OFFSET)
+#define ETM_ETMCLAIMCLR                               (ETM_BASE+ETM_ETMCLAIMCLR_OFFSET)
+#define ETM_ETMLAR                                    (ETM_BASE+ETM_ETMLAR_OFFSET)
+#define ETM_ETMLSR                                    (ETM_BASE+ETM_ETMLSR_OFFSET)
+#define ETM_ETMAUTHSTATUS                             (ETM_BASE+ETM_ETMAUTHSTATUS_OFFSET)
+#define ETM_ETMDEVTYPE                                (ETM_BASE+ETM_ETMDEVTYPE_OFFSET)
+#define ETM_ETMPIDR4                                  (ETM_BASE+ETM_ETMPIDR4_OFFSET)
+#define ETM_ETMPIDR5                                  (ETM_BASE+ETM_ETMPIDR5_OFFSET)
+#define ETM_ETMPIDR6                                  (ETM_BASE+ETM_ETMPIDR6_OFFSET)
+#define ETM_ETMPIDR7                                  (ETM_BASE+ETM_ETMPIDR7_OFFSET)
+#define ETM_ETMPIDR0                                  (ETM_BASE+ETM_ETMPIDR0_OFFSET)
+#define ETM_ETMPIDR1                                  (ETM_BASE+ETM_ETMPIDR1_OFFSET)
+#define ETM_ETMPIDR2                                  (ETM_BASE+ETM_ETMPIDR2_OFFSET)
+#define ETM_ETMPIDR3                                  (ETM_BASE+ETM_ETMPIDR3_OFFSET)
+#define ETM_ETMCIDR0                                  (ETM_BASE+ETM_ETMCIDR0_OFFSET)
+#define ETM_ETMCIDR1                                  (ETM_BASE+ETM_ETMCIDR1_OFFSET)
+#define ETM_ETMCIDR2                                  (ETM_BASE+ETM_ETMCIDR2_OFFSET)
+#define ETM_ETMCIDR3                                  (ETM_BASE+ETM_ETMCIDR3_OFFSET)
+
+/* ETM Register Bit Field Definitions ******************************************************************************************/
+
+/* Bit fields for ETM ETMCR */
+
+#define _ETM_ETMCR_RESETVALUE                         0x00000411UL                           /* Default value for ETM_ETMCR */
+#define _ETM_ETMCR_MASK                               0x10632FF1UL                           /* Mask for ETM_ETMCR */
+
+#define ETM_ETMCR_POWERDWN                            (0x1UL << 0)                           /* ETM Control in low power mode */
+#define _ETM_ETMCR_POWERDWN_SHIFT                     0                                      /* Shift value for ETM_POWERDWN */
+#define _ETM_ETMCR_POWERDWN_MASK                      0x1UL                                  /* Bit mask for ETM_POWERDWN */
+#define _ETM_ETMCR_POWERDWN_DEFAULT                   0x00000001UL                           /* Mode DEFAULT for ETM_ETMCR */
+#define ETM_ETMCR_POWERDWN_DEFAULT                    (_ETM_ETMCR_POWERDWN_DEFAULT << 0)     /* Shifted mode DEFAULT for ETM_ETMCR */
+#define _ETM_ETMCR_PORTSIZE_SHIFT                     4                                      /* Shift value for ETM_PORTSIZE */
+#define _ETM_ETMCR_PORTSIZE_MASK                      0x70UL                                 /* Bit mask for ETM_PORTSIZE */
+#define _ETM_ETMCR_PORTSIZE_DEFAULT                   0x00000001UL                           /* Mode DEFAULT for ETM_ETMCR */
+#define ETM_ETMCR_PORTSIZE_DEFAULT                    (_ETM_ETMCR_PORTSIZE_DEFAULT << 4)     /* Shifted mode DEFAULT for ETM_ETMCR */
+#define ETM_ETMCR_STALL                               (0x1UL << 7)                           /* Stall Processor */
+#define _ETM_ETMCR_STALL_SHIFT                        7                                      /* Shift value for ETM_STALL */
+#define _ETM_ETMCR_STALL_MASK                         0x80UL                                 /* Bit mask for ETM_STALL */
+#define _ETM_ETMCR_STALL_DEFAULT                      0x00000000UL                           /* Mode DEFAULT for ETM_ETMCR */
+#define ETM_ETMCR_STALL_DEFAULT                       (_ETM_ETMCR_STALL_DEFAULT << 7)        /* Shifted mode DEFAULT for ETM_ETMCR */
+#define ETM_ETMCR_BRANCHOUTPUT                        (0x1UL << 8)                           /* Branch Output */
+#define _ETM_ETMCR_BRANCHOUTPUT_SHIFT                 8                                      /* Shift value for ETM_BRANCHOUTPUT */
+#define _ETM_ETMCR_BRANCHOUTPUT_MASK                  0x100UL                                /* Bit mask for ETM_BRANCHOUTPUT */
+#define _ETM_ETMCR_BRANCHOUTPUT_DEFAULT               0x00000000UL                           /* Mode DEFAULT for ETM_ETMCR */
+#define ETM_ETMCR_BRANCHOUTPUT_DEFAULT                (_ETM_ETMCR_BRANCHOUTPUT_DEFAULT << 8) /* Shifted mode DEFAULT for ETM_ETMCR */
+#define ETM_ETMCR_DBGREQCTRL                          (0x1UL << 9)                           /* Debug Request Control */
+#define _ETM_ETMCR_DBGREQCTRL_SHIFT                   9                                      /* Shift value for ETM_DBGREQCTRL */
+#define _ETM_ETMCR_DBGREQCTRL_MASK                    0x200UL                                /* Bit mask for ETM_DBGREQCTRL */
+#define _ETM_ETMCR_DBGREQCTRL_DEFAULT                 0x00000000UL                           /* Mode DEFAULT for ETM_ETMCR */
+#define ETM_ETMCR_DBGREQCTRL_DEFAULT                  (_ETM_ETMCR_DBGREQCTRL_DEFAULT << 9)   /* Shifted mode DEFAULT for ETM_ETMCR */
+#define ETM_ETMCR_ETMPROG                             (0x1UL << 10)                          /* ETM Programming */
+#define _ETM_ETMCR_ETMPROG_SHIFT                      10                                     /* Shift value for ETM_ETMPROG */
+#define _ETM_ETMCR_ETMPROG_MASK                       0x400UL                                /* Bit mask for ETM_ETMPROG */
+#define _ETM_ETMCR_ETMPROG_DEFAULT                    0x00000001UL                           /* Mode DEFAULT for ETM_ETMCR */
+#define ETM_ETMCR_ETMPROG_DEFAULT                     (_ETM_ETMCR_ETMPROG_DEFAULT << 10)     /* Shifted mode DEFAULT for ETM_ETMCR */
+#define ETM_ETMCR_ETMPORTSEL                          (0x1UL << 11)                          /* ETM Port Selection */
+#define _ETM_ETMCR_ETMPORTSEL_SHIFT                   11                                     /* Shift value for ETM_ETMPORTSEL */
+#define _ETM_ETMCR_ETMPORTSEL_MASK                    0x800UL                                /* Bit mask for ETM_ETMPORTSEL */
+#define _ETM_ETMCR_ETMPORTSEL_DEFAULT                 0x00000000UL                           /* Mode DEFAULT for ETM_ETMCR */
+#define _ETM_ETMCR_ETMPORTSEL_ETMLOW                  0x00000000UL                           /* Mode ETMLOW for ETM_ETMCR */
+#define _ETM_ETMCR_ETMPORTSEL_ETMHIGH                 0x00000001UL                           /* Mode ETMHIGH for ETM_ETMCR */
+#define ETM_ETMCR_ETMPORTSEL_DEFAULT                  (_ETM_ETMCR_ETMPORTSEL_DEFAULT << 11)  /* Shifted mode DEFAULT for ETM_ETMCR */
+#define ETM_ETMCR_ETMPORTSEL_ETMLOW                   (_ETM_ETMCR_ETMPORTSEL_ETMLOW << 11)   /* Shifted mode ETMLOW for ETM_ETMCR */
+#define ETM_ETMCR_ETMPORTSEL_ETMHIGH                  (_ETM_ETMCR_ETMPORTSEL_ETMHIGH << 11)  /* Shifted mode ETMHIGH for ETM_ETMCR */
+#define ETM_ETMCR_PORTMODE2                           (0x1UL << 13)                          /* Port Mode[2] */
+#define _ETM_ETMCR_PORTMODE2_SHIFT                    13                                     /* Shift value for ETM_PORTMODE2 */
+#define _ETM_ETMCR_PORTMODE2_MASK                     0x2000UL                               /* Bit mask for ETM_PORTMODE2 */
+#define _ETM_ETMCR_PORTMODE2_DEFAULT                  0x00000000UL                           /* Mode DEFAULT for ETM_ETMCR */
+#define ETM_ETMCR_PORTMODE2_DEFAULT                   (_ETM_ETMCR_PORTMODE2_DEFAULT << 13)   /* Shifted mode DEFAULT for ETM_ETMCR */
+#define _ETM_ETMCR_PORTMODE_SHIFT                     16                                     /* Shift value for ETM_PORTMODE */
+#define _ETM_ETMCR_PORTMODE_MASK                      0x30000UL                              /* Bit mask for ETM_PORTMODE */
+#define _ETM_ETMCR_PORTMODE_DEFAULT                   0x00000000UL                           /* Mode DEFAULT for ETM_ETMCR */
+#define ETM_ETMCR_PORTMODE_DEFAULT                    (_ETM_ETMCR_PORTMODE_DEFAULT << 16)    /* Shifted mode DEFAULT for ETM_ETMCR */
+#define _ETM_ETMCR_EPORTSIZE_SHIFT                    21                                     /* Shift value for ETM_EPORTSIZE */
+#define _ETM_ETMCR_EPORTSIZE_MASK                     0x600000UL                             /* Bit mask for ETM_EPORTSIZE */
+#define _ETM_ETMCR_EPORTSIZE_DEFAULT                  0x00000000UL                           /* Mode DEFAULT for ETM_ETMCR */
+#define ETM_ETMCR_EPORTSIZE_DEFAULT                   (_ETM_ETMCR_EPORTSIZE_DEFAULT << 21)   /* Shifted mode DEFAULT for ETM_ETMCR */
+#define ETM_ETMCR_TSTAMPEN                            (0x1UL << 28)                          /* Time Stamp Enable */
+#define _ETM_ETMCR_TSTAMPEN_SHIFT                     28                                     /* Shift value for ETM_TSTAMPEN */
+#define _ETM_ETMCR_TSTAMPEN_MASK                      0x10000000UL                           /* Bit mask for ETM_TSTAMPEN */
+#define _ETM_ETMCR_TSTAMPEN_DEFAULT                   0x00000000UL                           /* Mode DEFAULT for ETM_ETMCR */
+#define ETM_ETMCR_TSTAMPEN_DEFAULT                    (_ETM_ETMCR_TSTAMPEN_DEFAULT << 28)    /* Shifted mode DEFAULT for ETM_ETMCR */
+
+/* Bit fields for ETM ETMCCR */
+
+#define _ETM_ETMCCR_RESETVALUE                        0x8C802000UL                             /* Default value for ETM_ETMCCR */
+#define _ETM_ETMCCR_MASK                              0x8FFFFFFFUL                             /* Mask for ETM_ETMCCR */
+
+#define _ETM_ETMCCR_ADRCMPPAIR_SHIFT                  0                                        /* Shift value for ETM_ADRCMPPAIR */
+#define _ETM_ETMCCR_ADRCMPPAIR_MASK                   0xFUL                                    /* Bit mask for ETM_ADRCMPPAIR */
+#define _ETM_ETMCCR_ADRCMPPAIR_DEFAULT                0x00000000UL                             /* Mode DEFAULT for ETM_ETMCCR */
+#define ETM_ETMCCR_ADRCMPPAIR_DEFAULT                 (_ETM_ETMCCR_ADRCMPPAIR_DEFAULT << 0)    /* Shifted mode DEFAULT for ETM_ETMCCR */
+#define _ETM_ETMCCR_DATACMPNUM_SHIFT                  4                                        /* Shift value for ETM_DATACMPNUM */
+#define _ETM_ETMCCR_DATACMPNUM_MASK                   0xF0UL                                   /* Bit mask for ETM_DATACMPNUM */
+#define _ETM_ETMCCR_DATACMPNUM_DEFAULT                0x00000000UL                             /* Mode DEFAULT for ETM_ETMCCR */
+#define ETM_ETMCCR_DATACMPNUM_DEFAULT                 (_ETM_ETMCCR_DATACMPNUM_DEFAULT << 4)    /* Shifted mode DEFAULT for ETM_ETMCCR */
+#define _ETM_ETMCCR_MMDECCNT_SHIFT                    8                                        /* Shift value for ETM_MMDECCNT */
+#define _ETM_ETMCCR_MMDECCNT_MASK                     0x1F00UL                                 /* Bit mask for ETM_MMDECCNT */
+#define _ETM_ETMCCR_MMDECCNT_DEFAULT                  0x00000000UL                             /* Mode DEFAULT for ETM_ETMCCR */
+#define ETM_ETMCCR_MMDECCNT_DEFAULT                   (_ETM_ETMCCR_MMDECCNT_DEFAULT << 8)      /* Shifted mode DEFAULT for ETM_ETMCCR */
+#define _ETM_ETMCCR_COUNTNUM_SHIFT                    13                                       /* Shift value for ETM_COUNTNUM */
+#define _ETM_ETMCCR_COUNTNUM_MASK                     0xE000UL                                 /* Bit mask for ETM_COUNTNUM */
+#define _ETM_ETMCCR_COUNTNUM_DEFAULT                  0x00000001UL                             /* Mode DEFAULT for ETM_ETMCCR */
+#define ETM_ETMCCR_COUNTNUM_DEFAULT                   (_ETM_ETMCCR_COUNTNUM_DEFAULT << 13)     /* Shifted mode DEFAULT for ETM_ETMCCR */
+#define ETM_ETMCCR_SEQPRES                            (0x1UL << 16)                            /* Sequencer Present */
+#define _ETM_ETMCCR_SEQPRES_SHIFT                     16                                       /* Shift value for ETM_SEQPRES */
+#define _ETM_ETMCCR_SEQPRES_MASK                      0x10000UL                                /* Bit mask for ETM_SEQPRES */
+#define _ETM_ETMCCR_SEQPRES_DEFAULT                   0x00000000UL                             /* Mode DEFAULT for ETM_ETMCCR */
+#define ETM_ETMCCR_SEQPRES_DEFAULT                    (_ETM_ETMCCR_SEQPRES_DEFAULT << 16)      /* Shifted mode DEFAULT for ETM_ETMCCR */
+#define _ETM_ETMCCR_EXTINPNUM_SHIFT                   17                                       /* Shift value for ETM_EXTINPNUM */
+#define _ETM_ETMCCR_EXTINPNUM_MASK                    0xE0000UL                                /* Bit mask for ETM_EXTINPNUM */
+#define _ETM_ETMCCR_EXTINPNUM_DEFAULT                 0x00000000UL                             /* Mode DEFAULT for ETM_ETMCCR */
+#define _ETM_ETMCCR_EXTINPNUM_ZERO                    0x00000000UL                             /* Mode ZERO for ETM_ETMCCR */
+#define _ETM_ETMCCR_EXTINPNUM_ONE                     0x00000001UL                             /* Mode ONE for ETM_ETMCCR */
+#define _ETM_ETMCCR_EXTINPNUM_TWO                     0x00000002UL                             /* Mode TWO for ETM_ETMCCR */
+#define ETM_ETMCCR_EXTINPNUM_DEFAULT                  (_ETM_ETMCCR_EXTINPNUM_DEFAULT << 17)    /* Shifted mode DEFAULT for ETM_ETMCCR */
+#define ETM_ETMCCR_EXTINPNUM_ZERO                     (_ETM_ETMCCR_EXTINPNUM_ZERO << 17)       /* Shifted mode ZERO for ETM_ETMCCR */
+#define ETM_ETMCCR_EXTINPNUM_ONE                      (_ETM_ETMCCR_EXTINPNUM_ONE << 17)        /* Shifted mode ONE for ETM_ETMCCR */
+#define ETM_ETMCCR_EXTINPNUM_TWO                      (_ETM_ETMCCR_EXTINPNUM_TWO << 17)        /* Shifted mode TWO for ETM_ETMCCR */
+#define _ETM_ETMCCR_EXTOUTNUM_SHIFT                   20                                       /* Shift value for ETM_EXTOUTNUM */
+#define _ETM_ETMCCR_EXTOUTNUM_MASK                    0x700000UL                               /* Bit mask for ETM_EXTOUTNUM */
+#define _ETM_ETMCCR_EXTOUTNUM_DEFAULT                 0x00000000UL                             /* Mode DEFAULT for ETM_ETMCCR */
+#define ETM_ETMCCR_EXTOUTNUM_DEFAULT                  (_ETM_ETMCCR_EXTOUTNUM_DEFAULT << 20)    /* Shifted mode DEFAULT for ETM_ETMCCR */
+#define ETM_ETMCCR_FIFOFULLPRES                       (0x1UL << 23)                            /* FIFIO FULL present */
+#define _ETM_ETMCCR_FIFOFULLPRES_SHIFT                23                                       /* Shift value for ETM_FIFOFULLPRES */
+#define _ETM_ETMCCR_FIFOFULLPRES_MASK                 0x800000UL                               /* Bit mask for ETM_FIFOFULLPRES */
+#define _ETM_ETMCCR_FIFOFULLPRES_DEFAULT              0x00000001UL                             /* Mode DEFAULT for ETM_ETMCCR */
+#define ETM_ETMCCR_FIFOFULLPRES_DEFAULT               (_ETM_ETMCCR_FIFOFULLPRES_DEFAULT << 23) /* Shifted mode DEFAULT for ETM_ETMCCR */
+#define _ETM_ETMCCR_IDCOMPNUM_SHIFT                   24                                       /* Shift value for ETM_IDCOMPNUM */
+#define _ETM_ETMCCR_IDCOMPNUM_MASK                    0x3000000UL                              /* Bit mask for ETM_IDCOMPNUM */
+#define _ETM_ETMCCR_IDCOMPNUM_DEFAULT                 0x00000000UL                             /* Mode DEFAULT for ETM_ETMCCR */
+#define ETM_ETMCCR_IDCOMPNUM_DEFAULT                  (_ETM_ETMCCR_IDCOMPNUM_DEFAULT << 24)    /* Shifted mode DEFAULT for ETM_ETMCCR */
+#define ETM_ETMCCR_TRACESS                            (0x1UL << 26)                            /* Trace Start/Stop Block Present */
+#define _ETM_ETMCCR_TRACESS_SHIFT                     26                                       /* Shift value for ETM_TRACESS */
+#define _ETM_ETMCCR_TRACESS_MASK                      0x4000000UL                              /* Bit mask for ETM_TRACESS */
+#define _ETM_ETMCCR_TRACESS_DEFAULT                   0x00000001UL                             /* Mode DEFAULT for ETM_ETMCCR */
+#define ETM_ETMCCR_TRACESS_DEFAULT                    (_ETM_ETMCCR_TRACESS_DEFAULT << 26)      /* Shifted mode DEFAULT for ETM_ETMCCR */
+#define ETM_ETMCCR_MMACCESS                           (0x1UL << 27)                            /* Coprocessor and Memeory Access */
+#define _ETM_ETMCCR_MMACCESS_SHIFT                    27                                       /* Shift value for ETM_MMACCESS */
+#define _ETM_ETMCCR_MMACCESS_MASK                     0x8000000UL                              /* Bit mask for ETM_MMACCESS */
+#define _ETM_ETMCCR_MMACCESS_DEFAULT                  0x00000001UL                             /* Mode DEFAULT for ETM_ETMCCR */
+#define ETM_ETMCCR_MMACCESS_DEFAULT                   (_ETM_ETMCCR_MMACCESS_DEFAULT << 27)     /* Shifted mode DEFAULT for ETM_ETMCCR */
+#define ETM_ETMCCR_ETMID                              (0x1UL << 31)                            /* ETM ID Register Present */
+#define _ETM_ETMCCR_ETMID_SHIFT                       31                                       /* Shift value for ETM_ETMID */
+#define _ETM_ETMCCR_ETMID_MASK                        0x80000000UL                             /* Bit mask for ETM_ETMID */
+#define _ETM_ETMCCR_ETMID_DEFAULT                     0x00000001UL                             /* Mode DEFAULT for ETM_ETMCCR */
+#define ETM_ETMCCR_ETMID_DEFAULT                      (_ETM_ETMCCR_ETMID_DEFAULT << 31)        /* Shifted mode DEFAULT for ETM_ETMCCR */
+
+/* Bit fields for ETM ETMTRIGGER */
+
+#define _ETM_ETMTRIGGER_RESETVALUE                    0x00000000UL                           /* Default value for ETM_ETMTRIGGER */
+#define _ETM_ETMTRIGGER_MASK                          0x0001FFFFUL                           /* Mask for ETM_ETMTRIGGER */
+
+#define _ETM_ETMTRIGGER_RESA_SHIFT                    0                                      /* Shift value for ETM_RESA */
+#define _ETM_ETMTRIGGER_RESA_MASK                     0x7FUL                                 /* Bit mask for ETM_RESA */
+#define _ETM_ETMTRIGGER_RESA_DEFAULT                  0x00000000UL                           /* Mode DEFAULT for ETM_ETMTRIGGER */
+#define ETM_ETMTRIGGER_RESA_DEFAULT                   (_ETM_ETMTRIGGER_RESA_DEFAULT << 0)    /* Shifted mode DEFAULT for ETM_ETMTRIGGER */
+#define _ETM_ETMTRIGGER_RESB_SHIFT                    7                                      /* Shift value for ETM_RESB */
+#define _ETM_ETMTRIGGER_RESB_MASK                     0x3F80UL                               /* Bit mask for ETM_RESB */
+#define _ETM_ETMTRIGGER_RESB_DEFAULT                  0x00000000UL                           /* Mode DEFAULT for ETM_ETMTRIGGER */
+#define ETM_ETMTRIGGER_RESB_DEFAULT                   (_ETM_ETMTRIGGER_RESB_DEFAULT << 7)    /* Shifted mode DEFAULT for ETM_ETMTRIGGER */
+#define _ETM_ETMTRIGGER_ETMFCN_SHIFT                  14                                     /* Shift value for ETM_ETMFCN */
+#define _ETM_ETMTRIGGER_ETMFCN_MASK                   0x1C000UL                              /* Bit mask for ETM_ETMFCN */
+#define _ETM_ETMTRIGGER_ETMFCN_DEFAULT                0x00000000UL                           /* Mode DEFAULT for ETM_ETMTRIGGER */
+#define ETM_ETMTRIGGER_ETMFCN_DEFAULT                 (_ETM_ETMTRIGGER_ETMFCN_DEFAULT << 14) /* Shifted mode DEFAULT for ETM_ETMTRIGGER */
+
+/* Bit fields for ETM ETMSR */
+
+#define _ETM_ETMSR_RESETVALUE                         0x00000002UL                         /* Default value for ETM_ETMSR */
+#define _ETM_ETMSR_MASK                               0x0000000FUL                         /* Mask for ETM_ETMSR */
+
+#define ETM_ETMSR_ETHOF                               (0x1UL << 0)                         /* ETM Overflow */
+#define _ETM_ETMSR_ETHOF_SHIFT                        0                                    /* Shift value for ETM_ETHOF */
+#define _ETM_ETMSR_ETHOF_MASK                         0x1UL                                /* Bit mask for ETM_ETHOF */
+#define _ETM_ETMSR_ETHOF_DEFAULT                      0x00000000UL                         /* Mode DEFAULT for ETM_ETMSR */
+#define ETM_ETMSR_ETHOF_DEFAULT                       (_ETM_ETMSR_ETHOF_DEFAULT << 0)      /* Shifted mode DEFAULT for ETM_ETMSR */
+#define ETM_ETMSR_ETMPROGBIT                          (0x1UL << 1)                         /* ETM Programming Bit Status */
+#define _ETM_ETMSR_ETMPROGBIT_SHIFT                   1                                    /* Shift value for ETM_ETMPROGBIT */
+#define _ETM_ETMSR_ETMPROGBIT_MASK                    0x2UL                                /* Bit mask for ETM_ETMPROGBIT */
+#define _ETM_ETMSR_ETMPROGBIT_DEFAULT                 0x00000001UL                         /* Mode DEFAULT for ETM_ETMSR */
+#define ETM_ETMSR_ETMPROGBIT_DEFAULT                  (_ETM_ETMSR_ETMPROGBIT_DEFAULT << 1) /* Shifted mode DEFAULT for ETM_ETMSR */
+#define ETM_ETMSR_TRACESTAT                           (0x1UL << 2)                         /* Trace Start/Stop Status */
+#define _ETM_ETMSR_TRACESTAT_SHIFT                    2                                    /* Shift value for ETM_TRACESTAT */
+#define _ETM_ETMSR_TRACESTAT_MASK                     0x4UL                                /* Bit mask for ETM_TRACESTAT */
+#define _ETM_ETMSR_TRACESTAT_DEFAULT                  0x00000000UL                         /* Mode DEFAULT for ETM_ETMSR */
+#define ETM_ETMSR_TRACESTAT_DEFAULT                   (_ETM_ETMSR_TRACESTAT_DEFAULT << 2)  /* Shifted mode DEFAULT for ETM_ETMSR */
+#define ETM_ETMSR_TRIGBIT                             (0x1UL << 3)                         /* Trigger Bit */
+#define _ETM_ETMSR_TRIGBIT_SHIFT                      3                                    /* Shift value for ETM_TRIGBIT */
+#define _ETM_ETMSR_TRIGBIT_MASK                       0x8UL                                /* Bit mask for ETM_TRIGBIT */
+#define _ETM_ETMSR_TRIGBIT_DEFAULT                    0x00000000UL                         /* Mode DEFAULT for ETM_ETMSR */
+#define ETM_ETMSR_TRIGBIT_DEFAULT                     (_ETM_ETMSR_TRIGBIT_DEFAULT << 3)    /* Shifted mode DEFAULT for ETM_ETMSR */
+
+/* Bit fields for ETM ETMSCR */
+
+#define _ETM_ETMSCR_RESETVALUE                        0x00020D09UL                            /* Default value for ETM_ETMSCR */
+#define _ETM_ETMSCR_MASK                              0x00027F0FUL                            /* Mask for ETM_ETMSCR */
+
+#define _ETM_ETMSCR_MAXPORTSIZE_SHIFT                 0                                       /* Shift value for ETM_MAXPORTSIZE */
+#define _ETM_ETMSCR_MAXPORTSIZE_MASK                  0x7UL                                   /* Bit mask for ETM_MAXPORTSIZE */
+#define _ETM_ETMSCR_MAXPORTSIZE_DEFAULT               0x00000001UL                            /* Mode DEFAULT for ETM_ETMSCR */
+#define ETM_ETMSCR_MAXPORTSIZE_DEFAULT                (_ETM_ETMSCR_MAXPORTSIZE_DEFAULT << 0)  /* Shifted mode DEFAULT for ETM_ETMSCR */
+#define ETM_ETMSCR_Reserved                           (0x1UL << 3)                            /* Reserved */
+#define _ETM_ETMSCR_Reserved_SHIFT                    3                                       /* Shift value for ETM_Reserved */
+#define _ETM_ETMSCR_Reserved_MASK                     0x8UL                                   /* Bit mask for ETM_Reserved */
+#define _ETM_ETMSCR_Reserved_DEFAULT                  0x00000001UL                            /* Mode DEFAULT for ETM_ETMSCR */
+#define ETM_ETMSCR_Reserved_DEFAULT                   (_ETM_ETMSCR_Reserved_DEFAULT << 3)     /* Shifted mode DEFAULT for ETM_ETMSCR */
+#define ETM_ETMSCR_FIFOFULL                           (0x1UL << 8)                            /* FIFO FULL Supported */
+#define _ETM_ETMSCR_FIFOFULL_SHIFT                    8                                       /* Shift value for ETM_FIFOFULL */
+#define _ETM_ETMSCR_FIFOFULL_MASK                     0x100UL                                 /* Bit mask for ETM_FIFOFULL */
+#define _ETM_ETMSCR_FIFOFULL_DEFAULT                  0x00000001UL                            /* Mode DEFAULT for ETM_ETMSCR */
+#define ETM_ETMSCR_FIFOFULL_DEFAULT                   (_ETM_ETMSCR_FIFOFULL_DEFAULT << 8)     /* Shifted mode DEFAULT for ETM_ETMSCR */
+#define ETM_ETMSCR_MAXPORTSIZE3                       (0x1UL << 9)                            /* Max Port Size[3] */
+#define _ETM_ETMSCR_MAXPORTSIZE3_SHIFT                9                                       /* Shift value for ETM_MAXPORTSIZE3 */
+#define _ETM_ETMSCR_MAXPORTSIZE3_MASK                 0x200UL                                 /* Bit mask for ETM_MAXPORTSIZE3 */
+#define _ETM_ETMSCR_MAXPORTSIZE3_DEFAULT              0x00000000UL                            /* Mode DEFAULT for ETM_ETMSCR */
+#define ETM_ETMSCR_MAXPORTSIZE3_DEFAULT               (_ETM_ETMSCR_MAXPORTSIZE3_DEFAULT << 9) /* Shifted mode DEFAULT for ETM_ETMSCR */
+#define ETM_ETMSCR_PORTSIZE                           (0x1UL << 10)                           /* Port Size Supported */
+#define _ETM_ETMSCR_PORTSIZE_SHIFT                    10                                      /* Shift value for ETM_PORTSIZE */
+#define _ETM_ETMSCR_PORTSIZE_MASK                     0x400UL                                 /* Bit mask for ETM_PORTSIZE */
+#define _ETM_ETMSCR_PORTSIZE_DEFAULT                  0x00000001UL                            /* Mode DEFAULT for ETM_ETMSCR */
+#define ETM_ETMSCR_PORTSIZE_DEFAULT                   (_ETM_ETMSCR_PORTSIZE_DEFAULT << 10)    /* Shifted mode DEFAULT for ETM_ETMSCR */
+#define ETM_ETMSCR_PORTMODE                           (0x1UL << 11)                           /* Port Mode Supported */
+#define _ETM_ETMSCR_PORTMODE_SHIFT                    11                                      /* Shift value for ETM_PORTMODE */
+#define _ETM_ETMSCR_PORTMODE_MASK                     0x800UL                                 /* Bit mask for ETM_PORTMODE */
+#define _ETM_ETMSCR_PORTMODE_DEFAULT                  0x00000001UL                            /* Mode DEFAULT for ETM_ETMSCR */
+#define ETM_ETMSCR_PORTMODE_DEFAULT                   (_ETM_ETMSCR_PORTMODE_DEFAULT << 11)    /* Shifted mode DEFAULT for ETM_ETMSCR */
+#define _ETM_ETMSCR_PROCNUM_SHIFT                     12                                      /* Shift value for ETM_PROCNUM */
+#define _ETM_ETMSCR_PROCNUM_MASK                      0x7000UL                                /* Bit mask for ETM_PROCNUM */
+#define _ETM_ETMSCR_PROCNUM_DEFAULT                   0x00000000UL                            /* Mode DEFAULT for ETM_ETMSCR */
+#define ETM_ETMSCR_PROCNUM_DEFAULT                    (_ETM_ETMSCR_PROCNUM_DEFAULT << 12)     /* Shifted mode DEFAULT for ETM_ETMSCR */
+#define ETM_ETMSCR_NOFETCHCOMP                        (0x1UL << 17)                           /* No Fetch Comparison */
+#define _ETM_ETMSCR_NOFETCHCOMP_SHIFT                 17                                      /* Shift value for ETM_NOFETCHCOMP */
+#define _ETM_ETMSCR_NOFETCHCOMP_MASK                  0x20000UL                               /* Bit mask for ETM_NOFETCHCOMP */
+#define _ETM_ETMSCR_NOFETCHCOMP_DEFAULT               0x00000001UL                            /* Mode DEFAULT for ETM_ETMSCR */
+#define ETM_ETMSCR_NOFETCHCOMP_DEFAULT                (_ETM_ETMSCR_NOFETCHCOMP_DEFAULT << 17) /* Shifted mode DEFAULT for ETM_ETMSCR */
+
+/* Bit fields for ETM ETMTEEVR */
+
+#define _ETM_ETMTEEVR_RESETVALUE                      0x00000000UL                           /* Default value for ETM_ETMTEEVR */
+#define _ETM_ETMTEEVR_MASK                            0x0001FFFFUL                           /* Mask for ETM_ETMTEEVR */
+
+#define _ETM_ETMTEEVR_RESA_SHIFT                      0                                      /* Shift value for ETM_RESA */
+#define _ETM_ETMTEEVR_RESA_MASK                       0x7FUL                                 /* Bit mask for ETM_RESA */
+#define _ETM_ETMTEEVR_RESA_DEFAULT                    0x00000000UL                           /* Mode DEFAULT for ETM_ETMTEEVR */
+#define ETM_ETMTEEVR_RESA_DEFAULT                     (_ETM_ETMTEEVR_RESA_DEFAULT << 0)      /* Shifted mode DEFAULT for ETM_ETMTEEVR */
+#define _ETM_ETMTEEVR_RESB_SHIFT                      7                                      /* Shift value for ETM_RESB */
+#define _ETM_ETMTEEVR_RESB_MASK                       0x3F80UL                               /* Bit mask for ETM_RESB */
+#define _ETM_ETMTEEVR_RESB_DEFAULT                    0x00000000UL                           /* Mode DEFAULT for ETM_ETMTEEVR */
+#define ETM_ETMTEEVR_RESB_DEFAULT                     (_ETM_ETMTEEVR_RESB_DEFAULT << 7)      /* Shifted mode DEFAULT for ETM_ETMTEEVR */
+#define _ETM_ETMTEEVR_ETMFCNEN_SHIFT                  14                                     /* Shift value for ETM_ETMFCNEN */
+#define _ETM_ETMTEEVR_ETMFCNEN_MASK                   0x1C000UL                              /* Bit mask for ETM_ETMFCNEN */
+#define _ETM_ETMTEEVR_ETMFCNEN_DEFAULT                0x00000000UL                           /* Mode DEFAULT for ETM_ETMTEEVR */
+#define ETM_ETMTEEVR_ETMFCNEN_DEFAULT                 (_ETM_ETMTEEVR_ETMFCNEN_DEFAULT << 14) /* Shifted mode DEFAULT for ETM_ETMTEEVR */
+
+/* Bit fields for ETM ETMTECR1 */
+
+#define _ETM_ETMTECR1_RESETVALUE                      0x00000000UL                           /* Default value for ETM_ETMTECR1 */
+#define _ETM_ETMTECR1_MASK                            0x03FFFFFFUL                           /* Mask for ETM_ETMTECR1 */
+
+#define _ETM_ETMTECR1_ADRCMP_SHIFT                    0                                      /* Shift value for ETM_ADRCMP */
+#define _ETM_ETMTECR1_ADRCMP_MASK                     0xFFUL                                 /* Bit mask for ETM_ADRCMP */
+#define _ETM_ETMTECR1_ADRCMP_DEFAULT                  0x00000000UL                           /* Mode DEFAULT for ETM_ETMTECR1 */
+#define ETM_ETMTECR1_ADRCMP_DEFAULT                   (_ETM_ETMTECR1_ADRCMP_DEFAULT << 0)    /* Shifted mode DEFAULT for ETM_ETMTECR1 */
+#define _ETM_ETMTECR1_MEMMAP_SHIFT                    8                                      /* Shift value for ETM_MEMMAP */
+#define _ETM_ETMTECR1_MEMMAP_MASK                     0xFFFF00UL                             /* Bit mask for ETM_MEMMAP */
+#define _ETM_ETMTECR1_MEMMAP_DEFAULT                  0x00000000UL                           /* Mode DEFAULT for ETM_ETMTECR1 */
+#define ETM_ETMTECR1_MEMMAP_DEFAULT                   (_ETM_ETMTECR1_MEMMAP_DEFAULT << 8)    /* Shifted mode DEFAULT for ETM_ETMTECR1 */
+#define ETM_ETMTECR1_INCEXCTL                         (0x1UL << 24)                          /* Trace Include/Exclude Flag */
+#define _ETM_ETMTECR1_INCEXCTL_SHIFT                  24                                     /* Shift value for ETM_INCEXCTL */
+#define _ETM_ETMTECR1_INCEXCTL_MASK                   0x1000000UL                            /* Bit mask for ETM_INCEXCTL */
+#define _ETM_ETMTECR1_INCEXCTL_DEFAULT                0x00000000UL                           /* Mode DEFAULT for ETM_ETMTECR1 */
+#define _ETM_ETMTECR1_INCEXCTL_INC                    0x00000000UL                           /* Mode INC for ETM_ETMTECR1 */
+#define _ETM_ETMTECR1_INCEXCTL_EXC                    0x00000001UL                           /* Mode EXC for ETM_ETMTECR1 */
+#define ETM_ETMTECR1_INCEXCTL_DEFAULT                 (_ETM_ETMTECR1_INCEXCTL_DEFAULT << 24) /* Shifted mode DEFAULT for ETM_ETMTECR1 */
+#define ETM_ETMTECR1_INCEXCTL_INC                     (_ETM_ETMTECR1_INCEXCTL_INC << 24)     /* Shifted mode INC for ETM_ETMTECR1 */
+#define ETM_ETMTECR1_INCEXCTL_EXC                     (_ETM_ETMTECR1_INCEXCTL_EXC << 24)     /* Shifted mode EXC for ETM_ETMTECR1 */
+#define ETM_ETMTECR1_TCE                              (0x1UL << 25)                          /* Trace Control Enable */
+#define _ETM_ETMTECR1_TCE_SHIFT                       25                                     /* Shift value for ETM_TCE */
+#define _ETM_ETMTECR1_TCE_MASK                        0x2000000UL                            /* Bit mask for ETM_TCE */
+#define _ETM_ETMTECR1_TCE_DEFAULT                     0x00000000UL                           /* Mode DEFAULT for ETM_ETMTECR1 */
+#define _ETM_ETMTECR1_TCE_EN                          0x00000000UL                           /* Mode EN for ETM_ETMTECR1 */
+#define _ETM_ETMTECR1_TCE_DIS                         0x00000001UL                           /* Mode DIS for ETM_ETMTECR1 */
+#define ETM_ETMTECR1_TCE_DEFAULT                      (_ETM_ETMTECR1_TCE_DEFAULT << 25)      /* Shifted mode DEFAULT for ETM_ETMTECR1 */
+#define ETM_ETMTECR1_TCE_EN                           (_ETM_ETMTECR1_TCE_EN << 25)           /* Shifted mode EN for ETM_ETMTECR1 */
+#define ETM_ETMTECR1_TCE_DIS                          (_ETM_ETMTECR1_TCE_DIS << 25)          /* Shifted mode DIS for ETM_ETMTECR1 */
+
+/* Bit fields for ETM ETMFFLR */
+
+#define _ETM_ETMFFLR_RESETVALUE                       0x00000000UL                        /* Default value for ETM_ETMFFLR */
+#define _ETM_ETMFFLR_MASK                             0x000000FFUL                        /* Mask for ETM_ETMFFLR */
+
+#define _ETM_ETMFFLR_BYTENUM_SHIFT                    0                                   /* Shift value for ETM_BYTENUM */
+#define _ETM_ETMFFLR_BYTENUM_MASK                     0xFFUL                              /* Bit mask for ETM_BYTENUM */
+#define _ETM_ETMFFLR_BYTENUM_DEFAULT                  0x00000000UL                        /* Mode DEFAULT for ETM_ETMFFLR */
+#define ETM_ETMFFLR_BYTENUM_DEFAULT                   (_ETM_ETMFFLR_BYTENUM_DEFAULT << 0) /* Shifted mode DEFAULT for ETM_ETMFFLR */
+
+/* Bit fields for ETM ETMCNTRLDVR1 */
+
+#define _ETM_ETMCNTRLDVR1_RESETVALUE                  0x00000000UL                           /* Default value for ETM_ETMCNTRLDVR1 */
+#define _ETM_ETMCNTRLDVR1_MASK                        0x0000FFFFUL                           /* Mask for ETM_ETMCNTRLDVR1 */
+
+#define _ETM_ETMCNTRLDVR1_COUNT_SHIFT                 0                                      /* Shift value for ETM_COUNT */
+#define _ETM_ETMCNTRLDVR1_COUNT_MASK                  0xFFFFUL                               /* Bit mask for ETM_COUNT */
+#define _ETM_ETMCNTRLDVR1_COUNT_DEFAULT               0x00000000UL                           /* Mode DEFAULT for ETM_ETMCNTRLDVR1 */
+#define ETM_ETMCNTRLDVR1_COUNT_DEFAULT                (_ETM_ETMCNTRLDVR1_COUNT_DEFAULT << 0) /* Shifted mode DEFAULT for ETM_ETMCNTRLDVR1 */
+
+/* Bit fields for ETM ETMSYNCFR */
+
+#define _ETM_ETMSYNCFR_RESETVALUE                     0x00000400UL                       /* Default value for ETM_ETMSYNCFR */
+#define _ETM_ETMSYNCFR_MASK                           0x00000FFFUL                       /* Mask for ETM_ETMSYNCFR */
+
+#define _ETM_ETMSYNCFR_FREQ_SHIFT                     0                                  /* Shift value for ETM_FREQ */
+#define _ETM_ETMSYNCFR_FREQ_MASK                      0xFFFUL                            /* Bit mask for ETM_FREQ */
+#define _ETM_ETMSYNCFR_FREQ_DEFAULT                   0x00000400UL                       /* Mode DEFAULT for ETM_ETMSYNCFR */
+#define ETM_ETMSYNCFR_FREQ_DEFAULT                    (_ETM_ETMSYNCFR_FREQ_DEFAULT << 0) /* Shifted mode DEFAULT for ETM_ETMSYNCFR */
+
+/* Bit fields for ETM ETMIDR */
+
+#define _ETM_ETMIDR_RESETVALUE                        0x4114F253UL                         /* Default value for ETM_ETMIDR */
+#define _ETM_ETMIDR_MASK                              0xFF1DFFFFUL                         /* Mask for ETM_ETMIDR */
+
+#define _ETM_ETMIDR_IMPVER_SHIFT                      0                                    /* Shift value for ETM_IMPVER */
+#define _ETM_ETMIDR_IMPVER_MASK                       0xFUL                                /* Bit mask for ETM_IMPVER */
+#define _ETM_ETMIDR_IMPVER_DEFAULT                    0x00000003UL                         /* Mode DEFAULT for ETM_ETMIDR */
+#define ETM_ETMIDR_IMPVER_DEFAULT                     (_ETM_ETMIDR_IMPVER_DEFAULT << 0)    /* Shifted mode DEFAULT for ETM_ETMIDR */
+#define _ETM_ETMIDR_ETMMINVER_SHIFT                   4                                    /* Shift value for ETM_ETMMINVER */
+#define _ETM_ETMIDR_ETMMINVER_MASK                    0xF0UL                               /* Bit mask for ETM_ETMMINVER */
+#define _ETM_ETMIDR_ETMMINVER_DEFAULT                 0x00000005UL                         /* Mode DEFAULT for ETM_ETMIDR */
+#define ETM_ETMIDR_ETMMINVER_DEFAULT                  (_ETM_ETMIDR_ETMMINVER_DEFAULT << 4) /* Shifted mode DEFAULT for ETM_ETMIDR */
+#define _ETM_ETMIDR_ETMMAJVER_SHIFT                   8                                    /* Shift value for ETM_ETMMAJVER */
+#define _ETM_ETMIDR_ETMMAJVER_MASK                    0xF00UL                              /* Bit mask for ETM_ETMMAJVER */
+#define _ETM_ETMIDR_ETMMAJVER_DEFAULT                 0x00000002UL                         /* Mode DEFAULT for ETM_ETMIDR */
+#define ETM_ETMIDR_ETMMAJVER_DEFAULT                  (_ETM_ETMIDR_ETMMAJVER_DEFAULT << 8) /* Shifted mode DEFAULT for ETM_ETMIDR */
+#define _ETM_ETMIDR_PROCFAM_SHIFT                     12                                   /* Shift value for ETM_PROCFAM */
+#define _ETM_ETMIDR_PROCFAM_MASK                      0xF000UL                             /* Bit mask for ETM_PROCFAM */
+#define _ETM_ETMIDR_PROCFAM_DEFAULT                   0x0000000FUL                         /* Mode DEFAULT for ETM_ETMIDR */
+#define ETM_ETMIDR_PROCFAM_DEFAULT                    (_ETM_ETMIDR_PROCFAM_DEFAULT << 12)  /* Shifted mode DEFAULT for ETM_ETMIDR */
+#define ETM_ETMIDR_LPCF                               (0x1UL << 16)                        /* Load PC First */
+#define _ETM_ETMIDR_LPCF_SHIFT                        16                                   /* Shift value for ETM_LPCF */
+#define _ETM_ETMIDR_LPCF_MASK                         0x10000UL                            /* Bit mask for ETM_LPCF */
+#define _ETM_ETMIDR_LPCF_DEFAULT                      0x00000000UL                         /* Mode DEFAULT for ETM_ETMIDR */
+#define ETM_ETMIDR_LPCF_DEFAULT                       (_ETM_ETMIDR_LPCF_DEFAULT << 16)     /* Shifted mode DEFAULT for ETM_ETMIDR */
+#define ETM_ETMIDR_THUMBT                             (0x1UL << 18)                        /* 32-bit Thumb Instruction Tracing */
+#define _ETM_ETMIDR_THUMBT_SHIFT                      18                                   /* Shift value for ETM_THUMBT */
+#define _ETM_ETMIDR_THUMBT_MASK                       0x40000UL                            /* Bit mask for ETM_THUMBT */
+#define _ETM_ETMIDR_THUMBT_DEFAULT                    0x00000001UL                         /* Mode DEFAULT for ETM_ETMIDR */
+#define ETM_ETMIDR_THUMBT_DEFAULT                     (_ETM_ETMIDR_THUMBT_DEFAULT << 18)   /* Shifted mode DEFAULT for ETM_ETMIDR */
+#define ETM_ETMIDR_SECEXT                             (0x1UL << 19)                        /* Security Extension Support */
+#define _ETM_ETMIDR_SECEXT_SHIFT                      19                                   /* Shift value for ETM_SECEXT */
+#define _ETM_ETMIDR_SECEXT_MASK                       0x80000UL                            /* Bit mask for ETM_SECEXT */
+#define _ETM_ETMIDR_SECEXT_DEFAULT                    0x00000000UL                         /* Mode DEFAULT for ETM_ETMIDR */
+#define ETM_ETMIDR_SECEXT_DEFAULT                     (_ETM_ETMIDR_SECEXT_DEFAULT << 19)   /* Shifted mode DEFAULT for ETM_ETMIDR */
+#define ETM_ETMIDR_BPE                                (0x1UL << 20)                        /* Branch Packet Encoding */
+#define _ETM_ETMIDR_BPE_SHIFT                         20                                   /* Shift value for ETM_BPE */
+#define _ETM_ETMIDR_BPE_MASK                          0x100000UL                           /* Bit mask for ETM_BPE */
+#define _ETM_ETMIDR_BPE_DEFAULT                       0x00000001UL                         /* Mode DEFAULT for ETM_ETMIDR */
+#define ETM_ETMIDR_BPE_DEFAULT                        (_ETM_ETMIDR_BPE_DEFAULT << 20)      /* Shifted mode DEFAULT for ETM_ETMIDR */
+#define _ETM_ETMIDR_IMPCODE_SHIFT                     24                                   /* Shift value for ETM_IMPCODE */
+#define _ETM_ETMIDR_IMPCODE_MASK                      0xFF000000UL                         /* Bit mask for ETM_IMPCODE */
+#define _ETM_ETMIDR_IMPCODE_DEFAULT                   0x00000041UL                         /* Mode DEFAULT for ETM_ETMIDR */
+#define ETM_ETMIDR_IMPCODE_DEFAULT                    (_ETM_ETMIDR_IMPCODE_DEFAULT << 24)  /* Shifted mode DEFAULT for ETM_ETMIDR */
+
+/* Bit fields for ETM ETMCCER */
+
+#define _ETM_ETMCCER_RESETVALUE                       0x18541800UL                           /* Default value for ETM_ETMCCER */
+#define _ETM_ETMCCER_MASK                             0x387FFFFBUL                           /* Mask for ETM_ETMCCER */
+
+#define _ETM_ETMCCER_EXTINPSEL_SHIFT                  0                                      /* Shift value for ETM_EXTINPSEL */
+#define _ETM_ETMCCER_EXTINPSEL_MASK                   0x3UL                                  /* Bit mask for ETM_EXTINPSEL */
+#define _ETM_ETMCCER_EXTINPSEL_DEFAULT                0x00000000UL                           /* Mode DEFAULT for ETM_ETMCCER */
+#define ETM_ETMCCER_EXTINPSEL_DEFAULT                 (_ETM_ETMCCER_EXTINPSEL_DEFAULT << 0)  /* Shifted mode DEFAULT for ETM_ETMCCER */
+#define _ETM_ETMCCER_EXTINPBUS_SHIFT                  3                                      /* Shift value for ETM_EXTINPBUS */
+#define _ETM_ETMCCER_EXTINPBUS_MASK                   0x7F8UL                                /* Bit mask for ETM_EXTINPBUS */
+#define _ETM_ETMCCER_EXTINPBUS_DEFAULT                0x00000000UL                           /* Mode DEFAULT for ETM_ETMCCER */
+#define ETM_ETMCCER_EXTINPBUS_DEFAULT                 (_ETM_ETMCCER_EXTINPBUS_DEFAULT << 3)  /* Shifted mode DEFAULT for ETM_ETMCCER */
+#define ETM_ETMCCER_READREGS                          (0x1UL << 11)                          /* Readable Registers */
+#define _ETM_ETMCCER_READREGS_SHIFT                   11                                     /* Shift value for ETM_READREGS */
+#define _ETM_ETMCCER_READREGS_MASK                    0x800UL                                /* Bit mask for ETM_READREGS */
+#define _ETM_ETMCCER_READREGS_DEFAULT                 0x00000001UL                           /* Mode DEFAULT for ETM_ETMCCER */
+#define ETM_ETMCCER_READREGS_DEFAULT                  (_ETM_ETMCCER_READREGS_DEFAULT << 11)  /* Shifted mode DEFAULT for ETM_ETMCCER */
+#define ETM_ETMCCER_DADDRCMP                          (0x1UL << 12)                          /* Data Address comparisons */
+#define _ETM_ETMCCER_DADDRCMP_SHIFT                   12                                     /* Shift value for ETM_DADDRCMP */
+#define _ETM_ETMCCER_DADDRCMP_MASK                    0x1000UL                               /* Bit mask for ETM_DADDRCMP */
+#define _ETM_ETMCCER_DADDRCMP_DEFAULT                 0x00000001UL                           /* Mode DEFAULT for ETM_ETMCCER */
+#define ETM_ETMCCER_DADDRCMP_DEFAULT                  (_ETM_ETMCCER_DADDRCMP_DEFAULT << 12)  /* Shifted mode DEFAULT for ETM_ETMCCER */
+#define _ETM_ETMCCER_INSTRES_SHIFT                    13                                     /* Shift value for ETM_INSTRES */
+#define _ETM_ETMCCER_INSTRES_MASK                     0xE000UL                               /* Bit mask for ETM_INSTRES */
+#define _ETM_ETMCCER_INSTRES_DEFAULT                  0x00000000UL                           /* Mode DEFAULT for ETM_ETMCCER */
+#define ETM_ETMCCER_INSTRES_DEFAULT                   (_ETM_ETMCCER_INSTRES_DEFAULT << 13)   /* Shifted mode DEFAULT for ETM_ETMCCER */
+#define _ETM_ETMCCER_EICEWPNT_SHIFT                   16                                     /* Shift value for ETM_EICEWPNT */
+#define _ETM_ETMCCER_EICEWPNT_MASK                    0xF0000UL                              /* Bit mask for ETM_EICEWPNT */
+#define _ETM_ETMCCER_EICEWPNT_DEFAULT                 0x00000004UL                           /* Mode DEFAULT for ETM_ETMCCER */
+#define ETM_ETMCCER_EICEWPNT_DEFAULT                  (_ETM_ETMCCER_EICEWPNT_DEFAULT << 16)  /* Shifted mode DEFAULT for ETM_ETMCCER */
+#define ETM_ETMCCER_TEICEWPNT                         (0x1UL << 20)                          /* Trace Sart/Stop Block Uses EmbeddedICE watchpoint inputs */
+#define _ETM_ETMCCER_TEICEWPNT_SHIFT                  20                                     /* Shift value for ETM_TEICEWPNT */
+#define _ETM_ETMCCER_TEICEWPNT_MASK                   0x100000UL                             /* Bit mask for ETM_TEICEWPNT */
+#define _ETM_ETMCCER_TEICEWPNT_DEFAULT                0x00000001UL                           /* Mode DEFAULT for ETM_ETMCCER */
+#define ETM_ETMCCER_TEICEWPNT_DEFAULT                 (_ETM_ETMCCER_TEICEWPNT_DEFAULT << 20) /* Shifted mode DEFAULT for ETM_ETMCCER */
+#define ETM_ETMCCER_EICEIMP                           (0x1UL << 21)                          /* EmbeddedICE Behavior control Implemented */
+#define _ETM_ETMCCER_EICEIMP_SHIFT                    21                                     /* Shift value for ETM_EICEIMP */
+#define _ETM_ETMCCER_EICEIMP_MASK                     0x200000UL                             /* Bit mask for ETM_EICEIMP */
+#define _ETM_ETMCCER_EICEIMP_DEFAULT                  0x00000000UL                           /* Mode DEFAULT for ETM_ETMCCER */
+#define ETM_ETMCCER_EICEIMP_DEFAULT                   (_ETM_ETMCCER_EICEIMP_DEFAULT << 21)   /* Shifted mode DEFAULT for ETM_ETMCCER */
+#define ETM_ETMCCER_TIMP                              (0x1UL << 22)                          /* Timestamping Implemented */
+#define _ETM_ETMCCER_TIMP_SHIFT                       22                                     /* Shift value for ETM_TIMP */
+#define _ETM_ETMCCER_TIMP_MASK                        0x400000UL                             /* Bit mask for ETM_TIMP */
+#define _ETM_ETMCCER_TIMP_DEFAULT                     0x00000001UL                           /* Mode DEFAULT for ETM_ETMCCER */
+#define ETM_ETMCCER_TIMP_DEFAULT                      (_ETM_ETMCCER_TIMP_DEFAULT << 22)      /* Shifted mode DEFAULT for ETM_ETMCCER */
+#define ETM_ETMCCER_RFCNT                             (0x1UL << 27)                          /* Reduced Function Counter */
+#define _ETM_ETMCCER_RFCNT_SHIFT                      27                                     /* Shift value for ETM_RFCNT */
+#define _ETM_ETMCCER_RFCNT_MASK                       0x8000000UL                            /* Bit mask for ETM_RFCNT */
+#define _ETM_ETMCCER_RFCNT_DEFAULT                    0x00000001UL                           /* Mode DEFAULT for ETM_ETMCCER */
+#define ETM_ETMCCER_RFCNT_DEFAULT                     (_ETM_ETMCCER_RFCNT_DEFAULT << 27)     /* Shifted mode DEFAULT for ETM_ETMCCER */
+#define ETM_ETMCCER_TENC                              (0x1UL << 28)                          /* Timestamp Encoding */
+#define _ETM_ETMCCER_TENC_SHIFT                       28                                     /* Shift value for ETM_TENC */
+#define _ETM_ETMCCER_TENC_MASK                        0x10000000UL                           /* Bit mask for ETM_TENC */
+#define _ETM_ETMCCER_TENC_DEFAULT                     0x00000001UL                           /* Mode DEFAULT for ETM_ETMCCER */
+#define ETM_ETMCCER_TENC_DEFAULT                      (_ETM_ETMCCER_TENC_DEFAULT << 28)      /* Shifted mode DEFAULT for ETM_ETMCCER */
+#define ETM_ETMCCER_TSIZE                             (0x1UL << 29)                          /* Timestamp Size */
+#define _ETM_ETMCCER_TSIZE_SHIFT                      29                                     /* Shift value for ETM_TSIZE */
+#define _ETM_ETMCCER_TSIZE_MASK                       0x20000000UL                           /* Bit mask for ETM_TSIZE */
+#define _ETM_ETMCCER_TSIZE_DEFAULT                    0x00000000UL                           /* Mode DEFAULT for ETM_ETMCCER */
+#define ETM_ETMCCER_TSIZE_DEFAULT                     (_ETM_ETMCCER_TSIZE_DEFAULT << 29)     /* Shifted mode DEFAULT for ETM_ETMCCER */
+
+/* Bit fields for ETM ETMTESSEICR */
+
+#define _ETM_ETMTESSEICR_RESETVALUE                   0x00000000UL                              /* Default value for ETM_ETMTESSEICR */
+#define _ETM_ETMTESSEICR_MASK                         0x000F000FUL                              /* Mask for ETM_ETMTESSEICR */
+
+#define _ETM_ETMTESSEICR_STARTRSEL_SHIFT              0                                         /* Shift value for ETM_STARTRSEL */
+#define _ETM_ETMTESSEICR_STARTRSEL_MASK               0xFUL                                     /* Bit mask for ETM_STARTRSEL */
+#define _ETM_ETMTESSEICR_STARTRSEL_DEFAULT            0x00000000UL                              /* Mode DEFAULT for ETM_ETMTESSEICR */
+#define ETM_ETMTESSEICR_STARTRSEL_DEFAULT             (_ETM_ETMTESSEICR_STARTRSEL_DEFAULT << 0) /* Shifted mode DEFAULT for ETM_ETMTESSEICR */
+#define _ETM_ETMTESSEICR_STOPRSEL_SHIFT               16                                        /* Shift value for ETM_STOPRSEL */
+#define _ETM_ETMTESSEICR_STOPRSEL_MASK                0xF0000UL                                 /* Bit mask for ETM_STOPRSEL */
+#define _ETM_ETMTESSEICR_STOPRSEL_DEFAULT             0x00000000UL                              /* Mode DEFAULT for ETM_ETMTESSEICR */
+#define ETM_ETMTESSEICR_STOPRSEL_DEFAULT              (_ETM_ETMTESSEICR_STOPRSEL_DEFAULT << 16) /* Shifted mode DEFAULT for ETM_ETMTESSEICR */
+
+/* Bit fields for ETM ETMTSEVR */
+
+#define _ETM_ETMTSEVR_RESETVALUE                      0x00000000UL                            /* Default value for ETM_ETMTSEVR */
+#define _ETM_ETMTSEVR_MASK                            0x0001FFFFUL                            /* Mask for ETM_ETMTSEVR */
+
+#define _ETM_ETMTSEVR_RESAEVT_SHIFT                   0                                       /* Shift value for ETM_RESAEVT */
+#define _ETM_ETMTSEVR_RESAEVT_MASK                    0x7FUL                                  /* Bit mask for ETM_RESAEVT */
+#define _ETM_ETMTSEVR_RESAEVT_DEFAULT                 0x00000000UL                            /* Mode DEFAULT for ETM_ETMTSEVR */
+#define ETM_ETMTSEVR_RESAEVT_DEFAULT                  (_ETM_ETMTSEVR_RESAEVT_DEFAULT << 0)    /* Shifted mode DEFAULT for ETM_ETMTSEVR */
+#define _ETM_ETMTSEVR_RESBEVT_SHIFT                   7                                       /* Shift value for ETM_RESBEVT */
+#define _ETM_ETMTSEVR_RESBEVT_MASK                    0x3F80UL                                /* Bit mask for ETM_RESBEVT */
+#define _ETM_ETMTSEVR_RESBEVT_DEFAULT                 0x00000000UL                            /* Mode DEFAULT for ETM_ETMTSEVR */
+#define ETM_ETMTSEVR_RESBEVT_DEFAULT                  (_ETM_ETMTSEVR_RESBEVT_DEFAULT << 7)    /* Shifted mode DEFAULT for ETM_ETMTSEVR */
+#define _ETM_ETMTSEVR_ETMFCNEVT_SHIFT                 14                                      /* Shift value for ETM_ETMFCNEVT */
+#define _ETM_ETMTSEVR_ETMFCNEVT_MASK                  0x1C000UL                               /* Bit mask for ETM_ETMFCNEVT */
+#define _ETM_ETMTSEVR_ETMFCNEVT_DEFAULT               0x00000000UL                            /* Mode DEFAULT for ETM_ETMTSEVR */
+#define ETM_ETMTSEVR_ETMFCNEVT_DEFAULT                (_ETM_ETMTSEVR_ETMFCNEVT_DEFAULT << 14) /* Shifted mode DEFAULT for ETM_ETMTSEVR */
+
+/* Bit fields for ETM ETMTRACEIDR */
+
+#define _ETM_ETMTRACEIDR_RESETVALUE                   0x00000000UL                            /* Default value for ETM_ETMTRACEIDR */
+#define _ETM_ETMTRACEIDR_MASK                         0x0000007FUL                            /* Mask for ETM_ETMTRACEIDR */
+
+#define _ETM_ETMTRACEIDR_TRACEID_SHIFT                0                                       /* Shift value for ETM_TRACEID */
+#define _ETM_ETMTRACEIDR_TRACEID_MASK                 0x7FUL                                  /* Bit mask for ETM_TRACEID */
+#define _ETM_ETMTRACEIDR_TRACEID_DEFAULT              0x00000000UL                            /* Mode DEFAULT for ETM_ETMTRACEIDR */
+#define ETM_ETMTRACEIDR_TRACEID_DEFAULT               (_ETM_ETMTRACEIDR_TRACEID_DEFAULT << 0) /* Shifted mode DEFAULT for ETM_ETMTRACEIDR */
+
+/* Bit fields for ETM ETMIDR2 */
+
+#define _ETM_ETMIDR2_RESETVALUE                       0x00000000UL                    /* Default value for ETM_ETMIDR2 */
+#define _ETM_ETMIDR2_MASK                             0x00000003UL                    /* Mask for ETM_ETMIDR2 */
+
+#define ETM_ETMIDR2_RFE                               (0x1UL << 0)                    /* RFE Transfer Order */
+#define _ETM_ETMIDR2_RFE_SHIFT                        0                               /* Shift value for ETM_RFE */
+#define _ETM_ETMIDR2_RFE_MASK                         0x1UL                           /* Bit mask for ETM_RFE */
+#define _ETM_ETMIDR2_RFE_DEFAULT                      0x00000000UL                    /* Mode DEFAULT for ETM_ETMIDR2 */
+#define _ETM_ETMIDR2_RFE_PC                           0x00000000UL                    /* Mode PC for ETM_ETMIDR2 */
+#define _ETM_ETMIDR2_RFE_CPSR                         0x00000001UL                    /* Mode CPSR for ETM_ETMIDR2 */
+#define ETM_ETMIDR2_RFE_DEFAULT                       (_ETM_ETMIDR2_RFE_DEFAULT << 0) /* Shifted mode DEFAULT for ETM_ETMIDR2 */
+#define ETM_ETMIDR2_RFE_PC                            (_ETM_ETMIDR2_RFE_PC << 0)      /* Shifted mode PC for ETM_ETMIDR2 */
+#define ETM_ETMIDR2_RFE_CPSR                          (_ETM_ETMIDR2_RFE_CPSR << 0)    /* Shifted mode CPSR for ETM_ETMIDR2 */
+#define ETM_ETMIDR2_SWP                               (0x1UL << 1)                    /* SWP Transfer Order */
+#define _ETM_ETMIDR2_SWP_SHIFT                        1                               /* Shift value for ETM_SWP */
+#define _ETM_ETMIDR2_SWP_MASK                         0x2UL                           /* Bit mask for ETM_SWP */
+#define _ETM_ETMIDR2_SWP_DEFAULT                      0x00000000UL                    /* Mode DEFAULT for ETM_ETMIDR2 */
+#define _ETM_ETMIDR2_SWP_LOAD                         0x00000000UL                    /* Mode LOAD for ETM_ETMIDR2 */
+#define _ETM_ETMIDR2_SWP_STORE                        0x00000001UL                    /* Mode STORE for ETM_ETMIDR2 */
+#define ETM_ETMIDR2_SWP_DEFAULT                       (_ETM_ETMIDR2_SWP_DEFAULT << 1) /* Shifted mode DEFAULT for ETM_ETMIDR2 */
+#define ETM_ETMIDR2_SWP_LOAD                          (_ETM_ETMIDR2_SWP_LOAD << 1)    /* Shifted mode LOAD for ETM_ETMIDR2 */
+#define ETM_ETMIDR2_SWP_STORE                         (_ETM_ETMIDR2_SWP_STORE << 1)   /* Shifted mode STORE for ETM_ETMIDR2 */
+
+/* Bit fields for ETM ETMPDSR */
+
+#define _ETM_ETMPDSR_RESETVALUE                       0x00000001UL                      /* Default value for ETM_ETMPDSR */
+#define _ETM_ETMPDSR_MASK                             0x00000001UL                      /* Mask for ETM_ETMPDSR */
+
+#define ETM_ETMPDSR_ETMUP                             (0x1UL << 0)                      /* ETM Powered Up */
+#define _ETM_ETMPDSR_ETMUP_SHIFT                      0                                 /* Shift value for ETM_ETMUP */
+#define _ETM_ETMPDSR_ETMUP_MASK                       0x1UL                             /* Bit mask for ETM_ETMUP */
+#define _ETM_ETMPDSR_ETMUP_DEFAULT                    0x00000001UL                      /* Mode DEFAULT for ETM_ETMPDSR */
+#define ETM_ETMPDSR_ETMUP_DEFAULT                     (_ETM_ETMPDSR_ETMUP_DEFAULT << 0) /* Shifted mode DEFAULT for ETM_ETMPDSR */
+
+/* Bit fields for ETM ETMISCIN */
+
+#define _ETM_ETMISCIN_RESETVALUE                      0x00000000UL                          /* Default value for ETM_ETMISCIN */
+#define _ETM_ETMISCIN_MASK                            0x00000013UL                          /* Mask for ETM_ETMISCIN */
+
+#define _ETM_ETMISCIN_EXTIN_SHIFT                     0                                     /* Shift value for ETM_EXTIN */
+#define _ETM_ETMISCIN_EXTIN_MASK                      0x3UL                                 /* Bit mask for ETM_EXTIN */
+#define _ETM_ETMISCIN_EXTIN_DEFAULT                   0x00000000UL                          /* Mode DEFAULT for ETM_ETMISCIN */
+#define ETM_ETMISCIN_EXTIN_DEFAULT                    (_ETM_ETMISCIN_EXTIN_DEFAULT << 0)    /* Shifted mode DEFAULT for ETM_ETMISCIN */
+#define ETM_ETMISCIN_COREHALT                         (0x1UL << 4)                          /* Core Halt */
+#define _ETM_ETMISCIN_COREHALT_SHIFT                  4                                     /* Shift value for ETM_COREHALT */
+#define _ETM_ETMISCIN_COREHALT_MASK                   0x10UL                                /* Bit mask for ETM_COREHALT */
+#define _ETM_ETMISCIN_COREHALT_DEFAULT                0x00000000UL                          /* Mode DEFAULT for ETM_ETMISCIN */
+#define ETM_ETMISCIN_COREHALT_DEFAULT                 (_ETM_ETMISCIN_COREHALT_DEFAULT << 4) /* Shifted mode DEFAULT for ETM_ETMISCIN */
+
+/* Bit fields for ETM ITTRIGOUT */
+
+#define _ETM_ITTRIGOUT_RESETVALUE                     0x00000000UL                             /* Default value for ETM_ITTRIGOUT */
+#define _ETM_ITTRIGOUT_MASK                           0x00000001UL                             /* Mask for ETM_ITTRIGOUT */
+
+#define ETM_ITTRIGOUT_TRIGGEROUT                      (0x1UL << 0)                             /* Trigger output value */
+#define _ETM_ITTRIGOUT_TRIGGEROUT_SHIFT               0                                        /* Shift value for ETM_TRIGGEROUT */
+#define _ETM_ITTRIGOUT_TRIGGEROUT_MASK                0x1UL                                    /* Bit mask for ETM_TRIGGEROUT */
+#define _ETM_ITTRIGOUT_TRIGGEROUT_DEFAULT             0x00000000UL                             /* Mode DEFAULT for ETM_ITTRIGOUT */
+#define ETM_ITTRIGOUT_TRIGGEROUT_DEFAULT              (_ETM_ITTRIGOUT_TRIGGEROUT_DEFAULT << 0) /* Shifted mode DEFAULT for ETM_ITTRIGOUT */
+
+/* Bit fields for ETM ETMITATBCTR2 */
+
+#define _ETM_ETMITATBCTR2_RESETVALUE                  0x00000001UL                             /* Default value for ETM_ETMITATBCTR2 */
+#define _ETM_ETMITATBCTR2_MASK                        0x00000001UL                             /* Mask for ETM_ETMITATBCTR2 */
+
+#define ETM_ETMITATBCTR2_ATREADY                      (0x1UL << 0)                             /* ATREADY Input Value */
+#define _ETM_ETMITATBCTR2_ATREADY_SHIFT               0                                        /* Shift value for ETM_ATREADY */
+#define _ETM_ETMITATBCTR2_ATREADY_MASK                0x1UL                                    /* Bit mask for ETM_ATREADY */
+#define _ETM_ETMITATBCTR2_ATREADY_DEFAULT             0x00000001UL                             /* Mode DEFAULT for ETM_ETMITATBCTR2 */
+#define ETM_ETMITATBCTR2_ATREADY_DEFAULT              (_ETM_ETMITATBCTR2_ATREADY_DEFAULT << 0) /* Shifted mode DEFAULT for ETM_ETMITATBCTR2 */
+
+/* Bit fields for ETM ETMITATBCTR0 */
+
+#define _ETM_ETMITATBCTR0_RESETVALUE                  0x00000000UL                             /* Default value for ETM_ETMITATBCTR0 */
+#define _ETM_ETMITATBCTR0_MASK                        0x00000001UL                             /* Mask for ETM_ETMITATBCTR0 */
+
+#define ETM_ETMITATBCTR0_ATVALID                      (0x1UL << 0)                             /* ATVALID Output Value */
+#define _ETM_ETMITATBCTR0_ATVALID_SHIFT               0                                        /* Shift value for ETM_ATVALID */
+#define _ETM_ETMITATBCTR0_ATVALID_MASK                0x1UL                                    /* Bit mask for ETM_ATVALID */
+#define _ETM_ETMITATBCTR0_ATVALID_DEFAULT             0x00000000UL                             /* Mode DEFAULT for ETM_ETMITATBCTR0 */
+#define ETM_ETMITATBCTR0_ATVALID_DEFAULT              (_ETM_ETMITATBCTR0_ATVALID_DEFAULT << 0) /* Shifted mode DEFAULT for ETM_ETMITATBCTR0 */
+
+/* Bit fields for ETM ETMITCTRL */
+
+#define _ETM_ETMITCTRL_RESETVALUE                     0x00000000UL                       /* Default value for ETM_ETMITCTRL */
+#define _ETM_ETMITCTRL_MASK                           0x00000001UL                       /* Mask for ETM_ETMITCTRL */
+
+#define ETM_ETMITCTRL_ITEN                            (0x1UL << 0)                       /* Integration Mode Enable */
+#define _ETM_ETMITCTRL_ITEN_SHIFT                     0                                  /* Shift value for ETM_ITEN */
+#define _ETM_ETMITCTRL_ITEN_MASK                      0x1UL                              /* Bit mask for ETM_ITEN */
+#define _ETM_ETMITCTRL_ITEN_DEFAULT                   0x00000000UL                       /* Mode DEFAULT for ETM_ETMITCTRL */
+#define ETM_ETMITCTRL_ITEN_DEFAULT                    (_ETM_ETMITCTRL_ITEN_DEFAULT << 0) /* Shifted mode DEFAULT for ETM_ETMITCTRL */
+
+/* Bit fields for ETM ETMCLAIMSET */
+
+#define _ETM_ETMCLAIMSET_RESETVALUE                   0x0000000FUL                           /* Default value for ETM_ETMCLAIMSET */
+#define _ETM_ETMCLAIMSET_MASK                         0x000000FFUL                           /* Mask for ETM_ETMCLAIMSET */
+
+#define _ETM_ETMCLAIMSET_SETTAG_SHIFT                 0                                      /* Shift value for ETM_SETTAG */
+#define _ETM_ETMCLAIMSET_SETTAG_MASK                  0xFFUL                                 /* Bit mask for ETM_SETTAG */
+#define _ETM_ETMCLAIMSET_SETTAG_DEFAULT               0x0000000FUL                           /* Mode DEFAULT for ETM_ETMCLAIMSET */
+#define ETM_ETMCLAIMSET_SETTAG_DEFAULT                (_ETM_ETMCLAIMSET_SETTAG_DEFAULT << 0) /* Shifted mode DEFAULT for ETM_ETMCLAIMSET */
+
+/* Bit fields for ETM ETMCLAIMCLR */
+
+#define _ETM_ETMCLAIMCLR_RESETVALUE                   0x00000000UL                           /* Default value for ETM_ETMCLAIMCLR */
+#define _ETM_ETMCLAIMCLR_MASK                         0x00000001UL                           /* Mask for ETM_ETMCLAIMCLR */
+
+#define ETM_ETMCLAIMCLR_CLRTAG                        (0x1UL << 0)                           /* Tag Bits */
+#define _ETM_ETMCLAIMCLR_CLRTAG_SHIFT                 0                                      /* Shift value for ETM_CLRTAG */
+#define _ETM_ETMCLAIMCLR_CLRTAG_MASK                  0x1UL                                  /* Bit mask for ETM_CLRTAG */
+#define _ETM_ETMCLAIMCLR_CLRTAG_DEFAULT               0x00000000UL                           /* Mode DEFAULT for ETM_ETMCLAIMCLR */
+#define ETM_ETMCLAIMCLR_CLRTAG_DEFAULT                (_ETM_ETMCLAIMCLR_CLRTAG_DEFAULT << 0) /* Shifted mode DEFAULT for ETM_ETMCLAIMCLR */
+
+/* Bit fields for ETM ETMLAR */
+
+#define _ETM_ETMLAR_RESETVALUE                        0x00000000UL                   /* Default value for ETM_ETMLAR */
+#define _ETM_ETMLAR_MASK                              0x00000001UL                   /* Mask for ETM_ETMLAR */
+
+#define ETM_ETMLAR_KEY                                (0x1UL << 0)                   /* Key Value */
+#define _ETM_ETMLAR_KEY_SHIFT                         0                              /* Shift value for ETM_KEY */
+#define _ETM_ETMLAR_KEY_MASK                          0x1UL                          /* Bit mask for ETM_KEY */
+#define _ETM_ETMLAR_KEY_DEFAULT                       0x00000000UL                   /* Mode DEFAULT for ETM_ETMLAR */
+#define ETM_ETMLAR_KEY_DEFAULT                        (_ETM_ETMLAR_KEY_DEFAULT << 0) /* Shifted mode DEFAULT for ETM_ETMLAR */
+
+/* Bit fields for ETM ETMLSR */
+
+#define _ETM_ETMLSR_RESETVALUE                        0x00000003UL                       /* Default value for ETM_ETMLSR */
+#define _ETM_ETMLSR_MASK                              0x00000003UL                       /* Mask for ETM_ETMLSR */
+
+#define ETM_ETMLSR_LOCKIMP                            (0x1UL << 0)                       /* ETM Locking Implemented */
+#define _ETM_ETMLSR_LOCKIMP_SHIFT                     0                                  /* Shift value for ETM_LOCKIMP */
+#define _ETM_ETMLSR_LOCKIMP_MASK                      0x1UL                              /* Bit mask for ETM_LOCKIMP */
+#define _ETM_ETMLSR_LOCKIMP_DEFAULT                   0x00000001UL                       /* Mode DEFAULT for ETM_ETMLSR */
+#define ETM_ETMLSR_LOCKIMP_DEFAULT                    (_ETM_ETMLSR_LOCKIMP_DEFAULT << 0) /* Shifted mode DEFAULT for ETM_ETMLSR */
+#define ETM_ETMLSR_LOCKED                             (0x1UL << 1)                       /* ETM locked */
+#define _ETM_ETMLSR_LOCKED_SHIFT                      1                                  /* Shift value for ETM_LOCKED */
+#define _ETM_ETMLSR_LOCKED_MASK                       0x2UL                              /* Bit mask for ETM_LOCKED */
+#define _ETM_ETMLSR_LOCKED_DEFAULT                    0x00000001UL                       /* Mode DEFAULT for ETM_ETMLSR */
+#define ETM_ETMLSR_LOCKED_DEFAULT                     (_ETM_ETMLSR_LOCKED_DEFAULT << 1)  /* Shifted mode DEFAULT for ETM_ETMLSR */
+
+/* Bit fields for ETM ETMAUTHSTATUS */
+
+#define _ETM_ETMAUTHSTATUS_RESETVALUE                 0x000000C0UL                                      /* Default value for ETM_ETMAUTHSTATUS */
+#define _ETM_ETMAUTHSTATUS_MASK                       0x000000FFUL                                      /* Mask for ETM_ETMAUTHSTATUS */
+
+#define _ETM_ETMAUTHSTATUS_NONSECINVDBG_SHIFT         0                                                 /* Shift value for ETM_NONSECINVDBG */
+#define _ETM_ETMAUTHSTATUS_NONSECINVDBG_MASK          0x3UL                                             /* Bit mask for ETM_NONSECINVDBG */
+#define _ETM_ETMAUTHSTATUS_NONSECINVDBG_DEFAULT       0x00000000UL                                      /* Mode DEFAULT for ETM_ETMAUTHSTATUS */
+#define ETM_ETMAUTHSTATUS_NONSECINVDBG_DEFAULT        (_ETM_ETMAUTHSTATUS_NONSECINVDBG_DEFAULT << 0)    /* Shifted mode DEFAULT for ETM_ETMAUTHSTATUS */
+#define _ETM_ETMAUTHSTATUS_NONSECNONINVDBG_SHIFT      2                                                 /* Shift value for ETM_NONSECNONINVDBG */
+#define _ETM_ETMAUTHSTATUS_NONSECNONINVDBG_MASK       0xCUL                                             /* Bit mask for ETM_NONSECNONINVDBG */
+#define _ETM_ETMAUTHSTATUS_NONSECNONINVDBG_DEFAULT    0x00000000UL                                      /* Mode DEFAULT for ETM_ETMAUTHSTATUS */
+#define _ETM_ETMAUTHSTATUS_NONSECNONINVDBG_DISABLE    0x00000002UL                                      /* Mode DISABLE for ETM_ETMAUTHSTATUS */
+#define _ETM_ETMAUTHSTATUS_NONSECNONINVDBG_ENABLE     0x00000003UL                                      /* Mode ENABLE for ETM_ETMAUTHSTATUS */
+#define ETM_ETMAUTHSTATUS_NONSECNONINVDBG_DEFAULT     (_ETM_ETMAUTHSTATUS_NONSECNONINVDBG_DEFAULT << 2) /* Shifted mode DEFAULT for ETM_ETMAUTHSTATUS */
+#define ETM_ETMAUTHSTATUS_NONSECNONINVDBG_DISABLE     (_ETM_ETMAUTHSTATUS_NONSECNONINVDBG_DISABLE << 2) /* Shifted mode DISABLE for ETM_ETMAUTHSTATUS */
+#define ETM_ETMAUTHSTATUS_NONSECNONINVDBG_ENABLE      (_ETM_ETMAUTHSTATUS_NONSECNONINVDBG_ENABLE << 2)  /* Shifted mode ENABLE for ETM_ETMAUTHSTATUS */
+#define _ETM_ETMAUTHSTATUS_SECINVDBG_SHIFT            4                                                 /* Shift value for ETM_SECINVDBG */
+#define _ETM_ETMAUTHSTATUS_SECINVDBG_MASK             0x30UL                                            /* Bit mask for ETM_SECINVDBG */
+#define _ETM_ETMAUTHSTATUS_SECINVDBG_DEFAULT          0x00000000UL                                      /* Mode DEFAULT for ETM_ETMAUTHSTATUS */
+#define ETM_ETMAUTHSTATUS_SECINVDBG_DEFAULT           (_ETM_ETMAUTHSTATUS_SECINVDBG_DEFAULT << 4)       /* Shifted mode DEFAULT for ETM_ETMAUTHSTATUS */
+#define _ETM_ETMAUTHSTATUS_SECNONINVDBG_SHIFT         6                                                 /* Shift value for ETM_SECNONINVDBG */
+#define _ETM_ETMAUTHSTATUS_SECNONINVDBG_MASK          0xC0UL                                            /* Bit mask for ETM_SECNONINVDBG */
+#define _ETM_ETMAUTHSTATUS_SECNONINVDBG_DEFAULT       0x00000003UL                                      /* Mode DEFAULT for ETM_ETMAUTHSTATUS */
+#define ETM_ETMAUTHSTATUS_SECNONINVDBG_DEFAULT        (_ETM_ETMAUTHSTATUS_SECNONINVDBG_DEFAULT << 6)    /* Shifted mode DEFAULT for ETM_ETMAUTHSTATUS */
+
+/* Bit fields for ETM ETMDEVTYPE */
+
+#define _ETM_ETMDEVTYPE_RESETVALUE                    0x00000013UL                             /* Default value for ETM_ETMDEVTYPE */
+#define _ETM_ETMDEVTYPE_MASK                          0x000000FFUL                             /* Mask for ETM_ETMDEVTYPE */
+
+#define _ETM_ETMDEVTYPE_TRACESRC_SHIFT                0                                        /* Shift value for ETM_TRACESRC */
+#define _ETM_ETMDEVTYPE_TRACESRC_MASK                 0xFUL                                    /* Bit mask for ETM_TRACESRC */
+#define _ETM_ETMDEVTYPE_TRACESRC_DEFAULT              0x00000003UL                             /* Mode DEFAULT for ETM_ETMDEVTYPE */
+#define ETM_ETMDEVTYPE_TRACESRC_DEFAULT               (_ETM_ETMDEVTYPE_TRACESRC_DEFAULT << 0)  /* Shifted mode DEFAULT for ETM_ETMDEVTYPE */
+#define _ETM_ETMDEVTYPE_PROCTRACE_SHIFT               4                                        /* Shift value for ETM_PROCTRACE */
+#define _ETM_ETMDEVTYPE_PROCTRACE_MASK                0xF0UL                                   /* Bit mask for ETM_PROCTRACE */
+#define _ETM_ETMDEVTYPE_PROCTRACE_DEFAULT             0x00000001UL                             /* Mode DEFAULT for ETM_ETMDEVTYPE */
+#define ETM_ETMDEVTYPE_PROCTRACE_DEFAULT              (_ETM_ETMDEVTYPE_PROCTRACE_DEFAULT << 4) /* Shifted mode DEFAULT for ETM_ETMDEVTYPE */
+
+/* Bit fields for ETM ETMPIDR4 */
+
+#define _ETM_ETMPIDR4_RESETVALUE                      0x00000004UL                          /* Default value for ETM_ETMPIDR4 */
+#define _ETM_ETMPIDR4_MASK                            0x000000FFUL                          /* Mask for ETM_ETMPIDR4 */
+
+#define _ETM_ETMPIDR4_CONTCODE_SHIFT                  0                                     /* Shift value for ETM_CONTCODE */
+#define _ETM_ETMPIDR4_CONTCODE_MASK                   0xFUL                                 /* Bit mask for ETM_CONTCODE */
+#define _ETM_ETMPIDR4_CONTCODE_DEFAULT                0x00000004UL                          /* Mode DEFAULT for ETM_ETMPIDR4 */
+#define ETM_ETMPIDR4_CONTCODE_DEFAULT                 (_ETM_ETMPIDR4_CONTCODE_DEFAULT << 0) /* Shifted mode DEFAULT for ETM_ETMPIDR4 */
+#define _ETM_ETMPIDR4_COUNT_SHIFT                     4                                     /* Shift value for ETM_COUNT */
+#define _ETM_ETMPIDR4_COUNT_MASK                      0xF0UL                                /* Bit mask for ETM_COUNT */
+#define _ETM_ETMPIDR4_COUNT_DEFAULT                   0x00000000UL                          /* Mode DEFAULT for ETM_ETMPIDR4 */
+#define ETM_ETMPIDR4_COUNT_DEFAULT                    (_ETM_ETMPIDR4_COUNT_DEFAULT << 4)    /* Shifted mode DEFAULT for ETM_ETMPIDR4 */
+
+/* Bit fields for ETM ETMPIDR5 */
+
+#define _ETM_ETMPIDR5_RESETVALUE                      0x00000000UL /* Default value for ETM_ETMPIDR5 */
+#define _ETM_ETMPIDR5_MASK                            0x00000000UL /* Mask for ETM_ETMPIDR5 */
+
+/* Bit fields for ETM ETMPIDR6 */
+
+#define _ETM_ETMPIDR6_RESETVALUE                      0x00000000UL /* Default value for ETM_ETMPIDR6 */
+#define _ETM_ETMPIDR6_MASK                            0x00000000UL /* Mask for ETM_ETMPIDR6 */
+
+/* Bit fields for ETM ETMPIDR7 */
+
+#define _ETM_ETMPIDR7_RESETVALUE                      0x00000000UL /* Default value for ETM_ETMPIDR7 */
+#define _ETM_ETMPIDR7_MASK                            0x00000000UL /* Mask for ETM_ETMPIDR7 */
+
+/* Bit fields for ETM ETMPIDR0 */
+
+#define _ETM_ETMPIDR0_RESETVALUE                      0x00000024UL                         /* Default value for ETM_ETMPIDR0 */
+#define _ETM_ETMPIDR0_MASK                            0x000000FFUL                         /* Mask for ETM_ETMPIDR0 */
+
+#define _ETM_ETMPIDR0_PARTNUM_SHIFT                   0                                    /* Shift value for ETM_PARTNUM */
+#define _ETM_ETMPIDR0_PARTNUM_MASK                    0xFFUL                               /* Bit mask for ETM_PARTNUM */
+#define _ETM_ETMPIDR0_PARTNUM_DEFAULT                 0x00000024UL                         /* Mode DEFAULT for ETM_ETMPIDR0 */
+#define ETM_ETMPIDR0_PARTNUM_DEFAULT                  (_ETM_ETMPIDR0_PARTNUM_DEFAULT << 0) /* Shifted mode DEFAULT for ETM_ETMPIDR0 */
+
+/* Bit fields for ETM ETMPIDR1 */
+
+#define _ETM_ETMPIDR1_RESETVALUE                      0x000000B9UL                         /* Default value for ETM_ETMPIDR1 */
+#define _ETM_ETMPIDR1_MASK                            0x000000FFUL                         /* Mask for ETM_ETMPIDR1 */
+
+#define _ETM_ETMPIDR1_PARTNUM_SHIFT                   0                                    /* Shift value for ETM_PARTNUM */
+#define _ETM_ETMPIDR1_PARTNUM_MASK                    0xFUL                                /* Bit mask for ETM_PARTNUM */
+#define _ETM_ETMPIDR1_PARTNUM_DEFAULT                 0x00000009UL                         /* Mode DEFAULT for ETM_ETMPIDR1 */
+#define ETM_ETMPIDR1_PARTNUM_DEFAULT                  (_ETM_ETMPIDR1_PARTNUM_DEFAULT << 0) /* Shifted mode DEFAULT for ETM_ETMPIDR1 */
+#define _ETM_ETMPIDR1_IDCODE_SHIFT                    4                                    /* Shift value for ETM_IDCODE */
+#define _ETM_ETMPIDR1_IDCODE_MASK                     0xF0UL                               /* Bit mask for ETM_IDCODE */
+#define _ETM_ETMPIDR1_IDCODE_DEFAULT                  0x0000000BUL                         /* Mode DEFAULT for ETM_ETMPIDR1 */
+#define ETM_ETMPIDR1_IDCODE_DEFAULT                   (_ETM_ETMPIDR1_IDCODE_DEFAULT << 4)  /* Shifted mode DEFAULT for ETM_ETMPIDR1 */
+
+/* Bit fields for ETM ETMPIDR2 */
+
+#define _ETM_ETMPIDR2_RESETVALUE                      0x0000003BUL                         /* Default value for ETM_ETMPIDR2 */
+#define _ETM_ETMPIDR2_MASK                            0x000000FFUL                         /* Mask for ETM_ETMPIDR2 */
+
+#define _ETM_ETMPIDR2_IDCODE_SHIFT                    0                                    /* Shift value for ETM_IDCODE */
+#define _ETM_ETMPIDR2_IDCODE_MASK                     0x7UL                                /* Bit mask for ETM_IDCODE */
+#define _ETM_ETMPIDR2_IDCODE_DEFAULT                  0x00000003UL                         /* Mode DEFAULT for ETM_ETMPIDR2 */
+#define ETM_ETMPIDR2_IDCODE_DEFAULT                   (_ETM_ETMPIDR2_IDCODE_DEFAULT << 0)  /* Shifted mode DEFAULT for ETM_ETMPIDR2 */
+#define ETM_ETMPIDR2_ALWAYS1                          (0x1UL << 3)                         /* Always 1 */
+#define _ETM_ETMPIDR2_ALWAYS1_SHIFT                   3                                    /* Shift value for ETM_ALWAYS1 */
+#define _ETM_ETMPIDR2_ALWAYS1_MASK                    0x8UL                                /* Bit mask for ETM_ALWAYS1 */
+#define _ETM_ETMPIDR2_ALWAYS1_DEFAULT                 0x00000001UL                         /* Mode DEFAULT for ETM_ETMPIDR2 */
+#define ETM_ETMPIDR2_ALWAYS1_DEFAULT                  (_ETM_ETMPIDR2_ALWAYS1_DEFAULT << 3) /* Shifted mode DEFAULT for ETM_ETMPIDR2 */
+#define _ETM_ETMPIDR2_REV_SHIFT                       4                                    /* Shift value for ETM_REV */
+#define _ETM_ETMPIDR2_REV_MASK                        0xF0UL                               /* Bit mask for ETM_REV */
+#define _ETM_ETMPIDR2_REV_DEFAULT                     0x00000003UL                         /* Mode DEFAULT for ETM_ETMPIDR2 */
+#define ETM_ETMPIDR2_REV_DEFAULT                      (_ETM_ETMPIDR2_REV_DEFAULT << 4)     /* Shifted mode DEFAULT for ETM_ETMPIDR2 */
+
+/* Bit fields for ETM ETMPIDR3 */
+
+#define _ETM_ETMPIDR3_RESETVALUE                      0x00000000UL                         /* Default value for ETM_ETMPIDR3 */
+#define _ETM_ETMPIDR3_MASK                            0x000000FFUL                         /* Mask for ETM_ETMPIDR3 */
+
+#define _ETM_ETMPIDR3_CUSTMOD_SHIFT                   0                                    /* Shift value for ETM_CUSTMOD */
+#define _ETM_ETMPIDR3_CUSTMOD_MASK                    0xFUL                                /* Bit mask for ETM_CUSTMOD */
+#define _ETM_ETMPIDR3_CUSTMOD_DEFAULT                 0x00000000UL                         /* Mode DEFAULT for ETM_ETMPIDR3 */
+#define ETM_ETMPIDR3_CUSTMOD_DEFAULT                  (_ETM_ETMPIDR3_CUSTMOD_DEFAULT << 0) /* Shifted mode DEFAULT for ETM_ETMPIDR3 */
+#define _ETM_ETMPIDR3_REVAND_SHIFT                    4                                    /* Shift value for ETM_REVAND */
+#define _ETM_ETMPIDR3_REVAND_MASK                     0xF0UL                               /* Bit mask for ETM_REVAND */
+#define _ETM_ETMPIDR3_REVAND_DEFAULT                  0x00000000UL                         /* Mode DEFAULT for ETM_ETMPIDR3 */
+#define ETM_ETMPIDR3_REVAND_DEFAULT                   (_ETM_ETMPIDR3_REVAND_DEFAULT << 4)  /* Shifted mode DEFAULT for ETM_ETMPIDR3 */
+
+/* Bit fields for ETM ETMCIDR0 */
+
+#define _ETM_ETMCIDR0_RESETVALUE                      0x0000000DUL                        /* Default value for ETM_ETMCIDR0 */
+#define _ETM_ETMCIDR0_MASK                            0x000000FFUL                        /* Mask for ETM_ETMCIDR0 */
+
+#define _ETM_ETMCIDR0_PREAMB_SHIFT                    0                                   /* Shift value for ETM_PREAMB */
+#define _ETM_ETMCIDR0_PREAMB_MASK                     0xFFUL                              /* Bit mask for ETM_PREAMB */
+#define _ETM_ETMCIDR0_PREAMB_DEFAULT                  0x0000000DUL                        /* Mode DEFAULT for ETM_ETMCIDR0 */
+#define ETM_ETMCIDR0_PREAMB_DEFAULT                   (_ETM_ETMCIDR0_PREAMB_DEFAULT << 0) /* Shifted mode DEFAULT for ETM_ETMCIDR0 */
+
+/* Bit fields for ETM ETMCIDR1 */
+
+#define _ETM_ETMCIDR1_RESETVALUE                      0x00000090UL                        /* Default value for ETM_ETMCIDR1 */
+#define _ETM_ETMCIDR1_MASK                            0x000000FFUL                        /* Mask for ETM_ETMCIDR1 */
+
+#define _ETM_ETMCIDR1_PREAMB_SHIFT                    0                                   /* Shift value for ETM_PREAMB */
+#define _ETM_ETMCIDR1_PREAMB_MASK                     0xFFUL                              /* Bit mask for ETM_PREAMB */
+#define _ETM_ETMCIDR1_PREAMB_DEFAULT                  0x00000090UL                        /* Mode DEFAULT for ETM_ETMCIDR1 */
+#define ETM_ETMCIDR1_PREAMB_DEFAULT                   (_ETM_ETMCIDR1_PREAMB_DEFAULT << 0) /* Shifted mode DEFAULT for ETM_ETMCIDR1 */
+
+/* Bit fields for ETM ETMCIDR2 */
+
+#define _ETM_ETMCIDR2_RESETVALUE                      0x00000005UL                        /* Default value for ETM_ETMCIDR2 */
+#define _ETM_ETMCIDR2_MASK                            0x000000FFUL                        /* Mask for ETM_ETMCIDR2 */
+
+#define _ETM_ETMCIDR2_PREAMB_SHIFT                    0                                   /* Shift value for ETM_PREAMB */
+#define _ETM_ETMCIDR2_PREAMB_MASK                     0xFFUL                              /* Bit mask for ETM_PREAMB */
+#define _ETM_ETMCIDR2_PREAMB_DEFAULT                  0x00000005UL                        /* Mode DEFAULT for ETM_ETMCIDR2 */
+#define ETM_ETMCIDR2_PREAMB_DEFAULT                   (_ETM_ETMCIDR2_PREAMB_DEFAULT << 0) /* Shifted mode DEFAULT for ETM_ETMCIDR2 */
+
+/* Bit fields for ETM ETMCIDR3 */
+
+#define _ETM_ETMCIDR3_RESETVALUE                      0x000000B1UL                        /* Default value for ETM_ETMCIDR3 */
+#define _ETM_ETMCIDR3_MASK                            0x000000FFUL                        /* Mask for ETM_ETMCIDR3 */
+
+#define _ETM_ETMCIDR3_PREAMB_SHIFT                    0                                   /* Shift value for ETM_PREAMB */
+#define _ETM_ETMCIDR3_PREAMB_MASK                     0xFFUL                              /* Bit mask for ETM_PREAMB */
+#define _ETM_ETMCIDR3_PREAMB_DEFAULT                  0x000000B1UL                        /* Mode DEFAULT for ETM_ETMCIDR3 */
+#define ETM_ETMCIDR3_PREAMB_DEFAULT                   (_ETM_ETMCIDR3_PREAMB_DEFAULT << 0) /* Shifted mode DEFAULT for ETM_ETMCIDR3 */
+
+#endif /* __ARCH_ARM_SRC_ARMV8_M_ETM_H */
diff --git a/arch/arm/src/armv8-m/exc_return.h b/arch/arm/src/armv8-m/exc_return.h
new file mode 100755
index 0000000..061d333
--- /dev/null
+++ b/arch/arm/src/armv8-m/exc_return.h
@@ -0,0 +1,104 @@
+/************************************************************************************
+ * arch/arm/src/armv8-m/exc_return.h
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.  The
+ * ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ *
+ ************************************************************************************/
+
+#ifndef __ARCH_ARM_SRC_ARMV8_M_EXC_RETURN_H
+#define __ARCH_ARM_SRC_ARMV8_M_EXC_RETURN_H
+
+/************************************************************************************
+ * Included Files
+ ************************************************************************************/
+
+#include <nuttx/config.h>
+
+/************************************************************************************
+ * Pre-processor Definitions
+ ************************************************************************************/
+
+/* The processor saves an EXC_RETURN value to the LR on exception entry. The
+ * exception mechanism relies on this value to detect when the processor has
+ * completed an exception handler.
+ *
+ * Bits [31:28] of an EXC_RETURN value are always 1.  When the processor loads a
+ * value matching this pattern to the PC it detects that the operation is a not
+ * a normal branch operation and instead, that the exception is complete.
+ * Therefore, it starts the exception return sequence.
+ *
+ * Bits[4:0] of the EXC_RETURN value indicate the required return stack and eventual
+ * processor mode.  The remaining bits of the EXC_RETURN value should be set to 1.
+ */
+
+/* EXC_RETURN_BASE: Bits that are always set in an EXC_RETURN value. */
+
+#define EXC_RETURN_BASE          0xffffffe1
+
+/* EXC_RETURN_PROCESS_STACK: The exception saved (and will restore) the hardware
+ * context using the process stack pointer (if not set, the context was saved
+ * using the main stack pointer)
+ */
+
+#define EXC_RETURN_PROCESS_STACK (1 << 2)
+
+/* EXC_RETURN_THREAD_MODE: The exception will return to thread mode (if not set,
+ * return stays in handler mode)
+ */
+
+#define EXC_RETURN_THREAD_MODE   (1 << 3)
+
+/* EXC_RETURN_STD_CONTEXT: The state saved on the stack does not include the
+ * volatile FP registers and FPSCR.  If this bit is clear, the state does include
+ * these registers.
+ */
+
+#define EXC_RETURN_STD_CONTEXT   (1 << 4)
+
+/* EXC_RETURN_HANDLER: Return to handler mode. Exception return gets state from
+ * the main stack. Execution uses MSP after return.
+ */
+
+#define EXC_RETURN_HANDLER       0xfffffff1
+
+/* EXC_RETURN_PRIVTHR: Return to privileged thread mode. Exception return gets
+ * state from the main stack. Execution uses MSP after return.
+ */
+
+#if !defined(CONFIG_ARMV8M_LAZYFPU) && defined(CONFIG_ARCH_FPU)
+#  define EXC_RETURN_PRIVTHR     (EXC_RETURN_BASE | EXC_RETURN_THREAD_MODE)
+#else
+#  define EXC_RETURN_PRIVTHR     (EXC_RETURN_BASE | EXC_RETURN_STD_CONTEXT | \
+                                  EXC_RETURN_THREAD_MODE)
+#endif
+
+/* EXC_RETURN_UNPRIVTHR: Return to unprivileged thread mode. Exception return gets
+ * state from the process stack. Execution uses PSP after return.
+ */
+
+#if !defined(CONFIG_ARMV8M_LAZYFPU) && defined(CONFIG_ARCH_FPU)
+#  define EXC_RETURN_UNPRIVTHR   (EXC_RETURN_BASE | EXC_RETURN_THREAD_MODE | \
+                                  EXC_RETURN_PROCESS_STACK)
+#else
+#  define EXC_RETURN_UNPRIVTHR   (EXC_RETURN_BASE | EXC_RETURN_STD_CONTEXT | \
+                                  EXC_RETURN_THREAD_MODE | EXC_RETURN_PROCESS_STACK)
+#endif
+
+/************************************************************************************
+ * Inline Functions
+ ************************************************************************************/
+
+#endif /* __ARCH_ARM_SRC_ARMV8_M_EXC_RETURN_H */
diff --git a/arch/arm/src/armv8-m/fpb.h b/arch/arm/src/armv8-m/fpb.h
new file mode 100755
index 0000000..afe5113
--- /dev/null
+++ b/arch/arm/src/armv8-m/fpb.h
@@ -0,0 +1,167 @@
+/****************************************************************************
+ * arch/arm/src/armv8-m/fpb.h
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.  The
+ * ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ *
+ ****************************************************************************/
+
+#ifndef __ARCH_ARM_SRC_ARMV8-M_FPB_H
+#define __ARCH_ARM_SRC_ARMV8-M_FPB_H
+
+/******************************************************************************
+ * Pre-processor Definitions
+ ******************************************************************************/
+
+/* Flash Patch and Breakpoint Unit FPB ***************************************/
+
+/* FPB Register Base Address *************************************************/
+
+#define FPB_BASE                  0xe0002000
+
+/* FPB Register Offsets *******************************************************/
+
+#define FPB_CTRL_OFFSET       0x0000  /* Control */
+#define FPB_REMAP_OFFSET      0x0004  /* Remap */
+#define FPB_COMP0_OFFSET      0x0008  /* Comparator 0 */
+#define FPB_COMP1_OFFSET      0x000c  /* Comparator 1 */
+#define FPB_COMP2_OFFSET      0x0010  /* Comparator 2 */
+#define FPB_COMP3_OFFSET      0x0014  /* Comparator 3 */
+#define FPB_COMP4_OFFSET      0x0018  /* Comparator 4 */
+#define FPB_COMP5_OFFSET      0x001C  /* Comparator 5 */
+#define FPB_COMP6_OFFSET      0x0020  /* Comparator 6 */
+#define FPB_COMP7_OFFSET      0x0024  /* Comparator 7 */
+
+/* FPB Register Addresses *****************************************************/
+
+#define FPB_CTRL              (FPB_BASE + FPB_CTRL_OFFSET)
+#define FPB_REMAP             (FPB_BASE + FPB_REMAP_OFFSET)
+#define FPB_COMP0             (FPB_BASE + FPB_COMP0_OFFSET)
+#define FPB_COMP1             (FPB_BASE + FPB_COMP1_OFFSET)
+#define FPB_COMP2             (FPB_BASE + FPB_COMP2_OFFSET)
+#define FPB_COMP3             (FPB_BASE + FPB_COMP3_OFFSET)
+#define FPB_COMP4             (FPB_BASE + FPB_COMP4_OFFSET)
+#define FPB_COMP5             (FPB_BASE + FPB_COMP5_OFFSET)
+#define FPB_COMP6             (FPB_BASE + FPB_COMP6_OFFSET)
+#define FPB_COMP7             (FPB_BASE + FPB_COMP7_OFFSET
+
+/* FPB Register Bitfield Definitions ******************************************/
+
+/* FPB_CTRL */
+
+/* NUM_CODE2
+ *
+ * Number of full banks of code comparators, sixteen comparators per bank.
+ * Where less than sixteen code comparators are provided, the bank count is
+ * zero, and the number present indicated by NUM_CODE1. This read only field
+ * contains 3'b000 to indicate 0 banks for Cortex-M processor.
+ */
+
+#define FPB_CTRL_NUM_CODE2_SHIFT  12
+#define FPB_CTRL_NUM_CODE2_MASK   0x00003000
+
+/* NUM_LIT
+ *
+ * Number of literal slots field.
+ *
+ * 0: No literal slots
+ * 2: Two literal slots
+ */
+
+#define FPB_CTRL_NUM_LIT_SHIFT    8
+#define FPB_CTRL_NUM_LIT_MASK     0x00000f00
+
+/* NUM_CODE1
+ *
+ * Number of code slots field.
+ *
+ * 0: No code slots
+ * 2: Two code slots
+ * 6: Six code slots
+ */
+
+#define FPB_CTRL_NUM_CODE1_SHIFT  4
+#define FPB_CTRL_NUM_CODE1_MASK   0x000000f0
+
+/* KEY
+ *
+ * Key field. In order to write to this register, this bit-field must be
+ * written to '1'. This bit always reads 0.
+ */
+
+#define FPB_CTRL_KEY_SHIFT        1
+#define FPB_CTRL_KEY_MASK         0x00000002
+#  define FPB_CTRL_KEY            0x00000002
+
+/* ENABLE
+ *
+ * Flash patch unit enable bit
+ *
+ * 0: Flash patch unit disabled
+ * 1: Flash patch unit enabled
+ */
+
+#define FPB_CTRL_ENABLE_SHIFT     0
+#define FPB_CTRL_ENABLE_MASK      0x00000001
+#  define FPB_CTRL_ENABLE         0x00000001
+
+/* FPB_REMAP */
+
+/* REMAP
+ *
+ * Remap base address field.
+ */
+
+#define FPB_REMAP_REMAP_SHIFT     5
+#define FPB_REMAP_REMAP_MASK      0x1fffffe0
+
+/* FPB_COMP0 - FPB_COMP7 */
+
+/* REPLACE
+ *
+ * This selects what happens when the COMP address is matched. Address
+ * remapping only takes place for the 0x0 setting.
+ *
+ * 0: Remap to remap address. See REMAP.REMAP
+ * 1: Set BKPT on lower halfword, upper is unaffected
+ * 2: Set BKPT on upper halfword, lower is unaffected
+ * 3: Set BKPT on both lower and upper halfwords.
+ */
+
+#define FPB_COMP0_REPLACE_SHIFT   30
+#define FPB_COMP0_REPLACE_MASK    0xc0000000
+
+/* COMP
+ *
+ * Comparison address.
+ */
+
+#define FPB_COMP0_COMP_SHIFT      2
+#define FPB_COMP0_COMP_MASK       0x1ffffffc
+
+/* ENABLE
+ *
+ * Compare and remap enable comparator. CTRL.ENABLE must also be set to
+ * enable comparisons.
+ *
+ * 0: Compare and remap for comparator 0 disabled
+ * 1: Compare and remap for comparator 0 enabled
+ */
+
+#define FPB_COMP0_ENABLE_MASK     0x00000001
+#define FPB_COMP0_ENABLE_SHIFT    0
+#  define FPB_COMP0_ENABLE        0x00000001
+
+#endif /* __ARCH_ARM_SRC_ARMV8-M_FPB_H */
diff --git a/arch/arm/src/armv8-m/itm.h b/arch/arm/src/armv8-m/itm.h
new file mode 100755
index 0000000..f3a9217
--- /dev/null
+++ b/arch/arm/src/armv8-m/itm.h
@@ -0,0 +1,184 @@
+/***********************************************************************************************
+ * arch/arm/src/armv8-m/itm.h
+ *
+ *   Copyright (c) 2009 - 2013 ARM LIMITED
+ *
+ *  All rights reserved.
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions are met:
+ *
+ *  - Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  - Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *  - Neither the name of ARM nor the names of its contributors may be used
+ *    to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ *  ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE
+ *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ *  POSSIBILITY OF SUCH DAMAGE.
+ *
+ *   Copyright (C) 2014 Pierre-noel Bouteville . All rights reserved.
+ *   Author: Pierre-noel Bouteville <pnb990@gmail.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ * 3. Neither the name NuttX nor the names of its contributors may be
+ *    used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ ***********************************************************************************************/
+
+#ifndef __ARCH_ARM_SRC_ARMV8_M_ITM_H
+#define __ARCH_ARM_SRC_ARMV8_M_ITM_H
+
+/***********************************************************************************************
+ * Included Files
+ ***********************************************************************************************/
+
+#include <stdint.h>
+
+/***********************************************************************************************
+ * Pre-processor Definitions
+ ***********************************************************************************************/
+
+/* Instrumentation Trace Macrocell Register (ITM) Definitions **********************************/
+/* ITM Register Base Address *******************************************************************/
+
+#define ITM_BASE                 (0xe0000000ul)
+
+/* ITM Register Addresses **********************************************************************/
+
+#define ITM_PORT(i)              (ITM_BASE + (i * 4)) /* Stimulus Port 32-bit */
+#define ITM_TER                  (ITM_BASE + 0x0e00)  /* Trace Enable Register */
+#define ITM_TPR                  (ITM_BASE + 0x0e40)  /* Trace Privilege Register */
+#define ITM_TCR                  (ITM_BASE + 0x0e80)  /* Trace Control Register */
+#define ITM_IWR                  (ITM_BASE + 0x0ef8)  /* Integration Write Register */
+#define ITM_IRR                  (ITM_BASE + 0x0efc)  /* Integration Read Register */
+#define ITM_IMCR                 (ITM_BASE + 0x0f00)  /* Integration Mode Control Register */
+#define ITM_LAR                  (ITM_BASE + 0x0fb0)  /* Lock Access Register */
+#define ITM_LSR                  (ITM_BASE + 0x0fb4)  /* Lock Status Register */
+#define ITM_PID4                 (ITM_BASE + 0x0fd0)  /* Peripheral Identification Register #4 */
+#define ITM_PID5                 (ITM_BASE + 0x0fd4)  /* Peripheral Identification Register #5 */
+#define ITM_PID6                 (ITM_BASE + 0x0fd8)  /* Peripheral Identification Register #6 */
+#define ITM_PID7                 (ITM_BASE + 0x0fdc)  /* Peripheral Identification Register #7 */
+#define ITM_PID0                 (ITM_BASE + 0x0fe0)  /* Peripheral Identification Register #0 */
+#define ITM_PID1                 (ITM_BASE + 0x0fe4)  /* Peripheral Identification Register #1 */
+#define ITM_PID2                 (ITM_BASE + 0x0fe8)  /* Peripheral Identification Register #2 */
+#define ITM_PID3                 (ITM_BASE + 0x0fec)  /* Peripheral Identification Register #3 */
+#define ITM_CID0                 (ITM_BASE + 0x0ff0)  /* Component  Identification Register #0 */
+#define ITM_CID1                 (ITM_BASE + 0x0ff4)  /* Component  Identification Register #1 */
+#define ITM_CID2                 (ITM_BASE + 0x0ff8)  /* Component  Identification Register #2 */
+#define ITM_CID3                 (ITM_BASE + 0x0ffc)  /* Component  Identification Register #3 */
+
+/* ITM Register Bit Field Definitions **********************************************************/
+
+/* ITM TPR */
+
+#define ITM_TPR_PRIVMASK_SHIFT   0
+#define ITM_TPR_PRIVMASK_MASK    (0xful << ITM_TPR_PRIVMASK_SHIFT)
+
+/* ITM TCR */
+
+#define ITM_TCR_BUSY_SHIFT       23
+#define ITM_TCR_BUSY_MASK        (1ul << ITM_TCR_BUSY_SHIFT)
+#define ITM_TCR_TraceBusID_SHIFT 16
+#define ITM_TCR_TraceBusID_MASK  (0x7ful << ITM_TCR_TraceBusID_SHIFT)
+#define ITM_TCR_GTSFREQ_SHIFT    10
+#define ITM_TCR_GTSFREQ_MASK     (3ul << ITM_TCR_GTSFREQ_SHIFT)
+#define ITM_TCR_TSPrescale_SHIFT 8
+#define ITM_TCR_TSPrescale_MASK  (3ul << ITM_TCR_TSPrescale_SHIFT)
+#define ITM_TCR_SWOENA_SHIFT     4
+#define ITM_TCR_SWOENA_MASK      (1ul << ITM_TCR_SWOENA_SHIFT)
+#define ITM_TCR_DWTENA_SHIFT     3
+#define ITM_TCR_DWTENA_MASK      (1ul << ITM_TCR_DWTENA_SHIFT)
+#define ITM_TCR_SYNCENA_SHIFT    2
+#define ITM_TCR_SYNCENA_MASK     (1ul << ITM_TCR_SYNCENA_SHIFT)
+#define ITM_TCR_TSENA_SHIFT      1
+#define ITM_TCR_TSENA_MASK       (1ul << ITM_TCR_TSENA_SHIFT)
+#define ITM_TCR_ITMENA_SHIFT     0
+#define ITM_TCR_ITMENA_MASK      (1ul << ITM_TCR_ITMENA_SHIFT)
+
+/* ITM IWR */
+
+#define ITM_IWR_ATVALIDM_SHIFT   0
+#define ITM_IWR_ATVALIDM_MASK    (1ul << ITM_IWR_ATVALIDM_SHIFT)
+
+/* ITM IRR */
+
+#define ITM_IRR_ATREADYM_SHIFT   0
+#define ITM_IRR_ATREADYM_MASK    (1ul << ITM_IRR_ATREADYM_SHIFT)
+
+/* ITM IMCR */
+
+#define ITM_IMCR_INTEGRATION_SHIFT 0
+#define ITM_IMCR_INTEGRATION_MASK  (1ul << ITM_IMCR_INTEGRATION_SHIFT)
+
+/* ITM LSR */
+
+#define ITM_LSR_ByteAcc_SHIFT    2
+#define ITM_LSR_ByteAcc_MASK     (1ul << ITM_LSR_ByteAcc_SHIFT)
+#define ITM_LSR_Access_SHIFT     1
+#define ITM_LSR_Access_MASK      (1ul << ITM_LSR_Access_SHIFT)
+#define ITM_LSR_Present_SHIFT    0
+#define ITM_LSR_Present_MASK     (1ul << ITM_LSR_Present_SHIFT)
+
+/* Value identifying g_itm_rxbuffer is ready for next character. */
+
+#define ITM_RXBUFFER_EMPTY       0x5aa55aa5
+
+/***********************************************************************************************
+ * Public Data
+ ***********************************************************************************************/
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+extern volatile int32_t g_itm_rxbuffer; /* External variable to receive characters. */
+
+/***********************************************************************************************
+ * Public Function Prototypes
+ ***********************************************************************************************/
+
+uint32_t itm_sendchar(uint32_t ch);
+int32_t itm_receivechar(void);
+int32_t itm_checkchar(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __ARCH_ARM_SRC_ARMV8_M_ITM_H */
diff --git a/arch/arm/include/irq.h b/arch/arm/src/armv8-m/itm_syslog.h
old mode 100644
new mode 100755
similarity index 63%
copy from arch/arm/include/irq.h
copy to arch/arm/src/armv8-m/itm_syslog.h
index 6c47d52..88f70e3
--- a/arch/arm/include/irq.h
+++ b/arch/arm/src/armv8-m/itm_syslog.h
@@ -1,9 +1,10 @@
 /****************************************************************************
- * arch/arm/include/irq.h
+ * arch/arm/src/armv8-m/itm_syslog.h
  *
- *   Copyright (C) 2007-2009, 2011, 2015, 2019 Gregory Nutt. All rights
- *     reserved.
- *   Author: Gregory Nutt <gnutt@nuttx.org>
+ *   Copyright (C) 2014 Pierre-noel Bouteville . All rights reserved.
+ *   Copyright (C) 2014 Gregory Nutt. All rights reserved.
+ *   Authors: Pierre-noel Bouteville <pnb990@gmail.com>
+ *            Gregory Nutt <gnutt@nuttx.org>
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -34,39 +35,32 @@
  *
  ****************************************************************************/
 
-/* This file should never be included directly but, rather, only indirectly
- * through nuttx/irq.h
- */
-
-#ifndef __ARCH_ARM_INCLUDE_IRQ_H
-#define __ARCH_ARM_INCLUDE_IRQ_H
+#ifndef __ARCH_ARM_SRC_ARMV8_M_ITM_SYSLOG_H
+#define __ARCH_ARM_SRC_ARMV8_M_ITM_SYSLOG_H
 
 /****************************************************************************
- * Included Files
+ * Public Functions
  ****************************************************************************/
 
-/* Include NuttX-specific IRQ definitions */
-
-#include <nuttx/irq.h>
-
-/* Include chip-specific IRQ definitions (including IRQ numbers) */
-
-#include <arch/chip/irq.h>
-
-/* Include ARM architecture-specific IRQ definitions (including register
- * save structure and up_irq_save()/up_irq_restore() functions)
- */
+/****************************************************************************
+ * Name: itm_syslog_initialize
+ *
+ * Description:
+ *   Performs ARM-specific initialize for the ITM SYSLOG functions.
+ *   Additional, board specific logic may be required to:
+ *
+ *   - Enable/configured serial wire output pins
+ *   - Enable debug clocking.
+ *
+ *   Those operations must be performed by MCU-specific logic before this
+ *   function is called.
+ *
+ ****************************************************************************/
 
-#if defined(CONFIG_ARCH_ARMV7A)
-#  include <arch/armv7-a/irq.h>
-#elif defined(CONFIG_ARCH_ARMV7R)
-#  include <arch/armv7-r/irq.h>
-#elif defined(CONFIG_ARCH_ARMV7M)
-#  include <arch/armv7-m/irq.h>
-#elif defined(CONFIG_ARCH_CORTEXM0)
-#  include <arch/armv6-m/irq.h>
+#ifdef CONFIG_ARMV8M_ITMSYSLOG
+void itm_syslog_initialize(void);
 #else
-#  include <arch/arm/irq.h>
+#  define itm_syslog_initialize()
 #endif
 
-#endif /* __ARCH_ARM_INCLUDE_IRQ_H */
+#endif /* __ARCH_ARM_SRC_ARMV8_M_ITM_SYSLOG_H */
diff --git a/arch/arm/src/armv8-m/mpu.h b/arch/arm/src/armv8-m/mpu.h
new file mode 100755
index 0000000..30dfc19
--- /dev/null
+++ b/arch/arm/src/armv8-m/mpu.h
@@ -0,0 +1,446 @@
+/*********************************************************************************************
+ * arch/arm/src/armv8-m/mpu.h
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.  The
+ * ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ *
+ *********************************************************************************************/
+
+#ifndef __ARCH_ARM_SRC_ARMV8M_MPU_H
+#define __ARCH_ARM_SRC_ARMV8M_MPU_H
+
+/*********************************************************************************************
+ * Included Files
+ *********************************************************************************************/
+
+#include <nuttx/config.h>
+
+#ifndef __ASSEMBLY__
+#  include <sys/types.h>
+#  include <stdint.h>
+#  include <stdbool.h>
+#  include <assert.h>
+#  include <debug.h>
+
+#  include "up_arch.h"
+#endif
+
+/*********************************************************************************************
+ * Pre-processor Definitions
+ *********************************************************************************************/
+
+/* MPU Register Addresses */
+
+#define MPU_TYPE                0xe000ed90 /* MPU Type Register */
+#define MPU_CTRL                0xe000ed94 /* MPU Control Register */
+#define MPU_RNR                 0xe000ed98 /* MPU Region Number Register */
+#define MPU_RBAR                0xe000ed9c /* MPU Region Base Address Register */
+#define MPU_RASR                0xe000eda0 /* MPU Region Attribute and Size Register */
+
+#define MPU_RBAR_A1             0xe000eda4 /* MPU alias registers */
+#define MPU_RASR_A1             0xe000eda8
+#define MPU_RBAR_A2             0xe000edac
+#define MPU_RASR_A2             0xe000edb0
+#define MPU_RBAR_A3             0xe000edb4
+#define MPU_RASR_A3             0xe000edb8
+
+/* MPU Type Register Bit Definitions */
+
+#define MPU_TYPE_SEPARATE       (1 << 0) /* Bit 0: 0:unified or 1:separate memory maps */
+#define MPU_TYPE_DREGION_SHIFT  (8)      /* Bits 8-15: Number MPU data regions */
+#define MPU_TYPE_DREGION_MASK   (0xff << MPU_TYPE_DREGION_SHIFT)
+#define MPU_TYPE_IREGION_SHIFT  (16)     /* Bits 16-23: Number MPU instruction regions */
+#define MPU_TYPE_IREGION_MASK   (0xff << MPU_TYPE_IREGION_SHIFT)
+
+/* MPU Control Register Bit Definitions */
+
+#define MPU_CTRL_ENABLE         (1 << 0)  /* Bit 0: Enable the MPU */
+#define MPU_CTRL_HFNMIENA       (1 << 1)  /* Bit 1: Enable MPU during hard fault, NMI, and FAULTMAS */
+#define MPU_CTRL_PRIVDEFENA     (1 << 2)  /* Bit 2: Enable privileged access to default memory map */
+
+/* MPU Region Number Register Bit Definitions */
+
+#if defined(CONFIG_ARM_MPU_NREGIONS) && defined(CONFIG_ARM_MPU)
+#  if CONFIG_ARM_MPU_NREGIONS <= 8
+#    define MPU_RNR_MASK            (0x00000007)
+#  elif CONFIG_ARM_MPU_NREGIONS <= 16
+#    define MPU_RNR_MASK            (0x0000000f)
+#  elif CONFIG_ARM_MPU_NREGIONS <= 32
+#    define MPU_RNR_MASK            (0x0000001f)
+#  else
+#    error "FIXME: Unsupported number of MPU regions"
+#  endif
+#endif
+
+/* MPU Region Base Address Register Bit Definitions */
+
+#define MPU_RBAR_REGION_SHIFT   (0)                         /* Bits 0-3: MPU region */
+#define MPU_RBAR_REGION_MASK    (15 << MPU_RBAR_REGION_SHIFT)
+#define MPU_RBAR_VALID          (1 << 4)                    /* Bit 4: MPU Region Number valid */
+#define MPU_RBAR_ADDR_MASK      0xffffffe0                  /* Bits N-31:  Region base addrese */
+
+/* MPU Region Attributes and Size Register Bit Definitions */
+
+#define MPU_RASR_ENABLE         (1 << 0)                   /* Bit 0: Region enable */
+#define MPU_RASR_SIZE_SHIFT     (1)                        /* Bits 1-5: Size of the MPU protection region */
+#define MPU_RASR_SIZE_MASK      (31 << MPU_RASR_SIZE_SHIFT)
+#  define MPU_RASR_SIZE_LOG2(n) ((n-1) << MPU_RASR_SIZE_SHIFT)
+#define MPU_RASR_SRD_SHIFT      (8)                        /* Bits 8-15: Subregion disable */
+#define MPU_RASR_SRD_MASK       (0xff << MPU_RASR_SRD_SHIFT)
+#  define MPU_RASR_SRD_0        (0x01 << MPU_RASR_SRD_SHIFT)
+#  define MPU_RASR_SRD_1        (0x02 << MPU_RASR_SRD_SHIFT)
+#  define MPU_RASR_SRD_2        (0x04 << MPU_RASR_SRD_SHIFT)
+#  define MPU_RASR_SRD_3        (0x08 << MPU_RASR_SRD_SHIFT)
+#  define MPU_RASR_SRD_4        (0x10 << MPU_RASR_SRD_SHIFT)
+#  define MPU_RASR_SRD_5        (0x20 << MPU_RASR_SRD_SHIFT)
+#  define MPU_RASR_SRD_6        (0x40 << MPU_RASR_SRD_SHIFT)
+#  define MPU_RASR_SRD_7        (0x80 << MPU_RASR_SRD_SHIFT)
+#define MPU_RASR_ATTR_SHIFT     (16)                       /* Bits 16-31: MPU Region Attribute field */
+#define MPU_RASR_ATTR_MASK      (0xffff << MPU_RASR_ATTR_SHIFT)
+#  define MPU_RASR_B            (1 << 16)                  /* Bit 16: Bufferable */
+#  define MPU_RASR_C            (1 << 17)                  /* Bit 17: Cacheable */
+#  define MPU_RASR_S            (1 << 18)                  /* Bit 18: Shareable */
+#  define MPU_RASR_TEX_SHIFT    (19)                       /* Bits 19-21: TEX Address Permission */
+#  define MPU_RASR_TEX_MASK     (7 << MPU_RASR_TEX_SHIFT)
+#    define MPU_RASR_TEX_SO     (0 << MPU_RASR_TEX_SHIFT) /* Strongly Ordered */
+#    define MPU_RASR_TEX_NOR    (1 << MPU_RASR_TEX_SHIFT) /* Normal           */
+#    define MPU_RASR_TEX_DEV    (2 << MPU_RASR_TEX_SHIFT) /* Device           */
+#    define MPU_RASR_TEX_BB(bb) ((4|(bb)) << MPU_RASR_TEX_SHIFT)
+#      define MPU_RASR_CP_NC    (0)                       /* Non-cacheable */
+#      define MPU_RASR_CP_WBRA  (1)                       /* Write back, write and Read- Allocate */
+#      define MPU_RASR_CP_WT    (2)                       /* Write through, no Write-Allocate */
+#      define MPU_RASR_CP_WB    (4)                       /* Write back, no Write-Allocate */
+#  define MPU_RASR_AP_SHIFT     (24)                      /* Bits 24-26: Access permission */
+#  define MPU_RASR_AP_MASK      (7 << MPU_RASR_AP_SHIFT)
+#    define MPU_RASR_AP_NONO    (0 << MPU_RASR_AP_SHIFT)  /* P:None U:None */
+#    define MPU_RASR_AP_RWNO    (1 << MPU_RASR_AP_SHIFT)  /* P:RW   U:None */
+#    define MPU_RASR_AP_RWRO    (2 << MPU_RASR_AP_SHIFT)  /* P:RW   U:RO   */
+#    define MPU_RASR_AP_RWRW    (3 << MPU_RASR_AP_SHIFT)  /* P:RW   U:RW   */
+#    define MPU_RASR_AP_RONO    (5 << MPU_RASR_AP_SHIFT)  /* P:RO   U:None */
+#    define MPU_RASR_AP_RORO    (6 << MPU_RASR_AP_SHIFT)  /* P:RO   U:RO   */
+#  define MPU_RASR_XN           (1 << 28)                 /* Bit 28: Instruction access disable */
+
+#ifdef CONFIG_ARM_MPU
+
+/*********************************************************************************************
+ * Public Function Prototypes
+ *********************************************************************************************/
+
+#ifndef __ASSEMBLY__
+#undef EXTERN
+#if defined(__cplusplus)
+#define EXTERN extern "C"
+extern "C"
+{
+#else
+#define EXTERN extern
+#endif
+
+/*********************************************************************************************
+ * Name: mpu_allocregion
+ *
+ * Description:
+ *  Allocate the next region
+ *
+ *********************************************************************************************/
+
+unsigned int mpu_allocregion(void);
+
+/*********************************************************************************************
+ * Name: mpu_log2regionceil
+ *
+ * Description:
+ *   Determine the smallest value of l2size (log base 2 size) such that the
+ *   following is true:
+ *
+ *   size <= (1 << l2size)
+ *
+ *********************************************************************************************/
+
+uint8_t mpu_log2regionceil(size_t size);
+
+/*********************************************************************************************
+ * Name: mpu_log2regionfloor
+ *
+ * Description:
+ *   Determine the largest value of l2size (log base 2 size) such that the
+ *   following is true:
+ *
+ *   size >= (1 << l2size)
+ *
+ *********************************************************************************************/
+
+uint8_t mpu_log2regionfloor(size_t size);
+
+/*********************************************************************************************
+ * Name: mpu_subregion
+ *
+ * Description:
+ *   Given (1) the offset to the beginning of valid data, (2) the size of the
+ *   memory to be mapped and (2) the log2 size of the mapping to use,
+ *   determine the minimal sub-region set to span that memory region.
+ *
+ * Assumption:
+ *   l2size has the same properties as the return value from
+ *   mpu_log2regionceil()
+ *
+ *********************************************************************************************/
+
+uint32_t mpu_subregion(uintptr_t base, size_t size, uint8_t l2size);
+
+/*********************************************************************************************
+ * Name: mpu_control
+ *
+ * Description:
+ *   Configure and enable (or disable) the MPU
+ *
+ *********************************************************************************************/
+
+void mpu_control(bool enable, bool hfnmiena, bool privdefena);
+
+/*********************************************************************************************
+ * Name: mpu_configure_region
+ *
+ * Description:
+ *   Configure a region for privileged, strongly ordered memory
+ *
+ *********************************************************************************************/
+
+void mpu_configure_region(uintptr_t base, size_t size,
+                                        uint32_t flags);
+
+/*********************************************************************************************
+ * Inline Functions
+ *********************************************************************************************/
+
+/*********************************************************************************************
+ * Name: mpu_showtype
+ *
+ * Description:
+ *   Show the characteristics of the MPU
+ *
+ *********************************************************************************************/
+
+#ifdef CONFIG_DEBUG_SCHED_INFO
+#  define mpu_showtype() \
+    do \
+      { \
+        uint32_t regval = getreg32(MPU_TYPE); \
+        sinfo("%s MPU Regions: data=%d instr=%d\n", \
+          (regval & MPU_TYPE_SEPARATE) != 0 ? "Separate" : "Unified", \
+          (regval & MPU_TYPE_DREGION_MASK) >> MPU_TYPE_DREGION_SHIFT, \
+          (regval & MPU_TYPE_IREGION_MASK) >> MPU_TYPE_IREGION_SHIFT); \
+    } while (0)
+#else
+#  define mpu_showtype() do { } while (0)
+#endif
+
+/*********************************************************************************************
+ * Name: mpu_priv_stronglyordered
+ *
+ * Description:
+ *   Configure a region for privileged, strongly ordered memory
+ *
+ *********************************************************************************************/
+
+#define mpu_priv_stronglyordered(base, size) \
+  do \
+    { \
+      /* The configure the region */ \
+      mpu_configure_region(base, size, \
+                           MPU_RASR_TEX_SO   | /* Ordered            */ \
+                                               /* Not Cacheable      */ \
+                                               /* Not Bufferable     */ \
+                           MPU_RASR_S        | /* Shareable          */ \
+                           MPU_RASR_AP_RWNO    /* P:RW   U:None      */ \
+                                               /* Instruction access */); \
+    } while (0)
+
+/*********************************************************************************************
+ * Name: mpu_user_flash
+ *
+ * Description:
+ *   Configure a region for user program flash
+ *
+ *********************************************************************************************/
+
+#define mpu_user_flash(base, size) \
+  do \
+    { \
+      /* The configure the region */ \
+      mpu_configure_region(base, size, \
+                           MPU_RASR_TEX_SO   | /* Ordered            */ \
+                           MPU_RASR_C        | /* Cacheable          */ \
+                                               /* Not Bufferable     */ \
+                                               /* Not Shareable      */ \
+                           MPU_RASR_AP_RORO    /* P:RO   U:RO        */ \
+                                               /* Instruction access */); \
+    } while (0)
+
+/*********************************************************************************************
+ * Name: mpu_priv_flash
+ *
+ * Description:
+ *   Configure a region for privileged program flash
+ *
+ *********************************************************************************************/
+
+#define mpu_priv_flash(base, size) \
+  do \
+    { \
+      /* The configure the region */ \
+      mpu_configure_region(base, size, \
+                           MPU_RASR_TEX_SO   | /* Ordered            */ \
+                           MPU_RASR_C        | /* Cacheable          */ \
+                                               /* Not Bufferable     */ \
+                                               /* Not Shareable      */ \
+                           MPU_RASR_AP_RONO    /* P:RO   U:None      */ \
+                                               /* Instruction access */); \
+    } while (0)
+
+/*********************************************************************************************
+ * Name: mpu_user_intsram
+ *
+ * Description:
+ *   Configure a region as user internal SRAM
+ *
+ *********************************************************************************************/
+
+#define mpu_user_intsram(base, size) \
+  do \
+    { \
+      /* The configure the region */ \
+      mpu_configure_region(base, size, \
+                           MPU_RASR_TEX_SO   | /* Ordered            */ \
+                           MPU_RASR_C        | /* Cacheable          */ \
+                                               /* Not Bufferable     */ \
+                           MPU_RASR_S        | /* Shareable          */ \
+                           MPU_RASR_AP_RWRW    /* P:RW   U:RW        */ \
+                                               /* Instruction access */); \
+    } while (0)
+
+/*********************************************************************************************
+ * Name: mpu_priv_intsram
+ *
+ * Description:
+ *   Configure a region as privileged internal SRAM
+ *
+ *********************************************************************************************/
+
+#define mpu_priv_intsram(base, size) \
+  do \
+    { \
+      /* The configure the region */ \
+      mpu_configure_region(base, size,\
+                           MPU_RASR_TEX_SO   | /* Ordered            */ \
+                           MPU_RASR_C        | /* Cacheable          */ \
+                                               /* Not Bufferable     */ \
+                           MPU_RASR_S        | /* Shareable          */ \
+                           MPU_RASR_AP_RWNO    /* P:RW   U:None      */ \
+                                               /* Instruction access */); \
+    } while (0)
+
+/*********************************************************************************************
+ * Name: mpu_user_extsram
+ *
+ * Description:
+ *   Configure a region as user external SRAM
+ *
+ *********************************************************************************************/
+
+#define mpu_user_extsram(base, size) \
+  do \
+    { \
+      /* The configure the region */ \
+      mpu_configure_region(base, size, \
+                           MPU_RASR_TEX_SO   | /* Ordered            */ \
+                           MPU_RASR_C        | /* Cacheable          */ \
+                           MPU_RASR_B        | /* Bufferable         */ \
+                           MPU_RASR_S        | /* Shareable          */ \
+                           MPU_RASR_AP_RWRW    /* P:RW   U:RW        */ \
+                                               /* Instruction access */); \
+    } while (0)
+
+/*********************************************************************************************
+ * Name: mpu_priv_extsram
+ *
+ * Description:
+ *   Configure a region as privileged external SRAM
+ *
+ *********************************************************************************************/
+
+#define mpu_priv_extsram(base, size) \
+  do \
+    { \
+      /* The configure the region */ \
+      mpu_configure_region(base, size, \
+                           MPU_RASR_TEX_SO   | /* Ordered            */ \
+                           MPU_RASR_C        | /* Cacheable          */ \
+                           MPU_RASR_B        | /* Bufferable         */ \
+                           MPU_RASR_S        | /* Shareable          */ \
+                           MPU_RASR_AP_RWNO    /* P:RW   U:None      */ \
+                                               /* Instruction access */); \
+    } while (0)
+
+/*********************************************************************************************
+ * Name: mpu_peripheral
+ *
+ * Description:
+ *   Configure a region as privileged peripheral address space
+ *
+ *********************************************************************************************/
+
+#define mpu_peripheral(base, size) \
+  do \
+    { \
+      /* Then configure the region */ \
+      mpu_configure_region(base, size, \
+                           MPU_RASR_TEX_DEV  | /* Device                */ \
+                                               /* Not Cacheable         */ \
+                           MPU_RASR_B        | /* Bufferable            */ \
+                           MPU_RASR_S        | /* Shareable             */ \
+                           MPU_RASR_AP_RWNO  | /* P:RW   U:None         */ \
+                           MPU_RASR_XN         /* No Instruction access */); \
+    } while (0)
+
+/*********************************************************************************************
+ * Name: mpu_user_peripheral
+ *
+ * Description:
+ *   Configure a region as user peripheral address space
+ *
+ *********************************************************************************************/
+
+#define mpu_user_peripheral(base, size) \
+  do \
+    { \
+      /* Then configure the region */ \
+      mpu_configure_region(base, size, \
+                           MPU_RASR_TEX_DEV  | /* Device                */ \
+                                               /* Not Cacheable         */ \
+                           MPU_RASR_B        | /* Bufferable            */ \
+                           MPU_RASR_S        | /* Shareable             */ \
+                           MPU_RASR_AP_RWRW  | /* P:RW     U:RW         */ \
+                           MPU_RASR_XN         /* No Instruction access */); \
+    } while (0)
+
+#undef EXTERN
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* __ASSEMBLY__ */
+#endif /* CONFIG_ARM_MPU */
+#endif /* __ARCH_ARM_SRC_ARMV8M_MPU_H */
diff --git a/arch/arm/src/armv8-m/nvic.h b/arch/arm/src/armv8-m/nvic.h
new file mode 100755
index 0000000..bfcb69d
--- /dev/null
+++ b/arch/arm/src/armv8-m/nvic.h
@@ -0,0 +1,696 @@
+/********************************************************************************************
+ * arch/arm/src/armv8-m/nvic.h
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.  The
+ * ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ *
+ ********************************************************************************************/
+
+#ifndef __ARCH_ARM_SRC_COMMON_ARMV8_M_NVIC_H
+#define __ARCH_ARM_SRC_COMMON_ARMV8_M_NVIC_H
+
+/********************************************************************************************
+ * Included Files
+ ********************************************************************************************/
+
+#include <nuttx/config.h>
+
+/********************************************************************************************
+ * Pre-processor Definitions
+ ********************************************************************************************/
+
+/* Exception/interrupt vector numbers *******************************************************/
+
+                                               /* Vector  0: Reset stack pointer value */
+                                               /* Vector  1: Reset */
+#define NVIC_IRQ_NMI                    (2)    /* Vector  2: Non-Maskable Interrupt (NMI) */
+#define NVIC_IRQ_HARDFAULT              (3)    /* Vector  3: Hard fault */
+#define NVIC_IRQ_MEMFAULT               (4)    /* Vector  4: Memory management (MPU) */
+#define NVIC_IRQ_BUSFAULT               (5)    /* Vector  5: Bus fault */
+#define NVIC_IRQ_USAGEFAULT             (6)    /* Vector  6: Usage fault */
+                                               /* Vectors 7-10: Reserved */
+#define NVIC_IRQ_SVCALL                 (11)   /* Vector 11: SVC call */
+#define NVIC_IRQ_DBGMONITOR             (12)   /* Vector 12: Debug Monitor */
+                                               /* Vector 13: Reserved */
+#define NVIC_IRQ_PENDSV                 (14)   /* Vector 14: Pendable system service request */
+#define NVIC_IRQ_SYSTICK                (15)   /* Vector 15: System tick */
+
+/* External interrupts (vectors >= 16).  These definitions are chip-specific */
+
+#define NVIC_IRQ_FIRST                  (16)    /* Vector number of the first interrupt */
+
+/* NVIC base address ************************************************************************/
+
+#define ARMV8M_NVIC_BASE                0xe000e000
+
+/* NVIC register offsets ********************************************************************/
+
+#define NVIC_ICTR_OFFSET                0x0004 /* Interrupt controller type register */
+#define NVIC_SYSTICK_CTRL_OFFSET        0x0010 /* SysTick control and status register */
+#define NVIC_SYSTICK_RELOAD_OFFSET      0x0014 /* SysTick reload value register */
+#define NVIC_SYSTICK_CURRENT_OFFSET     0x0018 /* SysTick current value register */
+#define NVIC_SYSTICK_CALIB_OFFSET       0x001c /* SysTick calibration value register */
+
+#define NVIC_IRQ_ENABLE_OFFSET(n)       (0x0100 + 4*((n) >> 5))
+#define NVIC_IRQ0_31_ENABLE_OFFSET      0x0100 /* IRQ 0-31 set enable register */
+#define NVIC_IRQ32_63_ENABLE_OFFSET     0x0104 /* IRQ 32-63 set enable register */
+#define NVIC_IRQ64_95_ENABLE_OFFSET     0x0108 /* IRQ 64-95 set enable register */
+#define NVIC_IRQ96_127_ENABLE_OFFSET    0x010c /* IRQ 96-127 set enable register */
+#define NVIC_IRQ128_159_ENABLE_OFFSET   0x0110 /* IRQ 128-159 set enable register */
+#define NVIC_IRQ160_191_ENABLE_OFFSET   0x0114 /* IRQ 160-191 set enable register */
+#define NVIC_IRQ192_223_ENABLE_OFFSET   0x0118 /* IRQ 192-223 set enable register */
+#define NVIC_IRQ224_239_ENABLE_OFFSET   0x011c /* IRQ 224-239 set enable register */
+
+#define NVIC_IRQ_CLEAR_OFFSET(n)        (0x0180 + 4*((n) >> 5))
+#define NVIC_IRQ0_31_CLEAR_OFFSET       0x0180 /* IRQ 0-31 clear enable register */
+#define NVIC_IRQ32_63_CLEAR_OFFSET      0x0184 /* IRQ 32-63 clear enable register */
+#define NVIC_IRQ64_95_CLEAR_OFFSET      0x0188 /* IRQ 64-95 clear enable register */
+#define NVIC_IRQ96_127_CLEAR_OFFSET     0x018c /* IRQ 96-127 clear enable register */
+#define NVIC_IRQ128_159_CLEAR_OFFSET    0x0190 /* IRQ 128-159 clear enable register */
+#define NVIC_IRQ160_191_CLEAR_OFFSET    0x0194 /* IRQ 160-191 clear enable register */
+#define NVIC_IRQ192_223_CLEAR_OFFSET    0x0198 /* IRQ 192-223 clear enable register */
+#define NVIC_IRQ224_239_CLEAR_OFFSET    0x019c /* IRQ 224-2391 clear enable register */
+
+#define NVIC_IRQ_PEND_OFFSET(n)         (0x0200 + 4*((n) >> 5))
+#define NVIC_IRQ0_31_PEND_OFFSET        0x0200 /* IRQ 0-31 set pending register */
+#define NVIC_IRQ32_63_PEND_OFFSET       0x0204 /* IRQ 32-63 set pending register */
+#define NVIC_IRQ64_95_PEND_OFFSET       0x0208 /* IRQ 64-95 set pending register */
+#define NVIC_IRQ96_127_PEND_OFFSET      0x020c /* IRQ 96-127 set pending register */
+#define NVIC_IRQ128_159_PEND_OFFSET     0x0210 /* IRQ 128-159 set pending register */
+#define NVIC_IRQ160_191_PEND_OFFSET     0x0214 /* IRQ 160-191 set pending register */
+#define NVIC_IRQ192_223_PEND_OFFSET     0x0218 /* IRQ 192-2231 set pending register */
+#define NVIC_IRQ224_239_PEND_OFFSET     0x021c /* IRQ 224-2391 set pending register */
+
+#define NVIC_IRQ_CLRPEND_OFFSET(n)      (0x0280 + 4*((n) >> 5))
+#define NVIC_IRQ0_31_CLRPEND_OFFSET     0x0280 /* IRQ 0-31 clear pending register */
+#define NVIC_IRQ32_63_CLRPEND_OFFSET    0x0284 /* IRQ 32-63 clear pending register */
+#define NVIC_IRQ64_95_CLRPEND_OFFSET    0x0288 /* IRQ 64-95 clear pending register */
+#define NVIC_IRQ96_127_CLRPEND_OFFSET   0x028c /* IRQ 96-127 clear pending register */
+#define NVIC_IRQ128_159_CLRPEND_OFFSET  0x0290 /* IRQ 128-159 clear pending register */
+#define NVIC_IRQ160_191_CLRPEND_OFFSET  0x0294 /* IRQ 160-191 clear pending register */
+#define NVIC_IRQ192_223_CLRPEND_OFFSET  0x0298 /* IRQ 192-223 clear pending register */
+#define NVIC_IRQ224_239_CLRPEND_OFFSET  0x029c /* IRQ 224-239 clear pending register */
+
+#define NVIC_IRQ_ACTIVE_OFFSET(n)       (0x0300 + 4*((n) >> 5))
+#define NVIC_IRQ0_31_ACTIVE_OFFSET      0x0300 /* IRQ 0-31 active bit register */
+#define NVIC_IRQ32_63_ACTIVE_OFFSET     0x0304 /* IRQ 32-63 active bit register */
+#define NVIC_IRQ64_95_ACTIVE_OFFSET     0x0308 /* IRQ 64-95 active bit register */
+#define NVIC_IRQ96_127_ACTIVE_OFFSET    0x030c /* IRQ 96-127 active bit register */
+#define NVIC_IRQ128_159_ACTIVE_OFFSET   0x0310 /* IRQ 128-159 active bit register */
+#define NVIC_IRQ160_191_ACTIVE_OFFSET   0x0314 /* IRQ 160-191 active bit register */
+#define NVIC_IRQ192_223_ACTIVE_OFFSET   0x0318 /* IRQ 192-223 active bit register */
+#define NVIC_IRQ224_239_ACTIVE_OFFSET   0x031c /* IRQ 224-239 active bit register */
+
+#define NVIC_IRQ_PRIORITY_OFFSET(n)     (0x0400 + 4*((n) >> 2))
+#define NVIC_IRQ0_3_PRIORITY_OFFSET     0x0400 /* IRQ 0-3 priority register */
+#define NVIC_IRQ4_7_PRIORITY_OFFSET     0x0404 /* IRQ 4-7 priority register */
+#define NVIC_IRQ8_11_PRIORITY_OFFSET    0x0408 /* IRQ 8-11 priority register */
+#define NVIC_IRQ12_15_PRIORITY_OFFSET   0x040c /* IRQ 12-15 priority register */
+#define NVIC_IRQ16_19_PRIORITY_OFFSET   0x0410 /* IRQ 16-19 priority register */
+#define NVIC_IRQ20_23_PRIORITY_OFFSET   0x0414 /* IRQ 20-23 priority register */
+#define NVIC_IRQ24_27_PRIORITY_OFFSET   0x0418 /* IRQ 24-29 priority register */
+#define NVIC_IRQ28_31_PRIORITY_OFFSET   0x041c /* IRQ 28-31 priority register */
+#define NVIC_IRQ32_35_PRIORITY_OFFSET   0x0420 /* IRQ 32-35 priority register */
+#define NVIC_IRQ36_39_PRIORITY_OFFSET   0x0424 /* IRQ 36-39 priority register */
+#define NVIC_IRQ40_43_PRIORITY_OFFSET   0x0428 /* IRQ 40-43 priority register */
+#define NVIC_IRQ44_47_PRIORITY_OFFSET   0x042c /* IRQ 44-47 priority register */
+#define NVIC_IRQ48_51_PRIORITY_OFFSET   0x0430 /* IRQ 48-51 priority register */
+#define NVIC_IRQ52_55_PRIORITY_OFFSET   0x0434 /* IRQ 52-55 priority register */
+#define NVIC_IRQ56_59_PRIORITY_OFFSET   0x0438 /* IRQ 56-59 priority register */
+#define NVIC_IRQ60_63_PRIORITY_OFFSET   0x043c /* IRQ 60-63 priority register */
+#define NVIC_IRQ64_67_PRIORITY_OFFSET   0x0440 /* IRQ 64-67 priority register */
+#define NVIC_IRQ68_71_PRIORITY_OFFSET   0x0444 /* IRQ 68-71 priority register */
+#define NVIC_IRQ72_75_PRIORITY_OFFSET   0x0448 /* IRQ 72-75 priority register */
+#define NVIC_IRQ76_79_PRIORITY_OFFSET   0x044c /* IRQ 76-79 priority register */
+#define NVIC_IRQ80_83_PRIORITY_OFFSET   0x0450 /* IRQ 80-83 priority register */
+#define NVIC_IRQ84_87_PRIORITY_OFFSET   0x0454 /* IRQ 84-87 priority register */
+#define NVIC_IRQ88_91_PRIORITY_OFFSET   0x0458 /* IRQ 88-91 priority register */
+#define NVIC_IRQ92_95_PRIORITY_OFFSET   0x045c /* IRQ 92-95 priority register */
+#define NVIC_IRQ96_99_PRIORITY_OFFSET   0x0460 /* IRQ 96-99 priority register */
+#define NVIC_IRQ100_103_PRIORITY_OFFSET 0x0464 /* IRQ 100-103 priority register */
+#define NVIC_IRQ104_107_PRIORITY_OFFSET 0x0468 /* IRQ 104-107 priority register */
+#define NVIC_IRQ108_111_PRIORITY_OFFSET 0x046c /* IRQ 108-111 priority register */
+#define NVIC_IRQ112_115_PRIORITY_OFFSET 0x0470 /* IRQ 112-115 priority register */
+#define NVIC_IRQ116_119_PRIORITY_OFFSET 0x0474 /* IRQ 116-119 priority register */
+#define NVIC_IRQ120_123_PRIORITY_OFFSET 0x0478 /* IRQ 120-123 priority register */
+#define NVIC_IRQ124_127_PRIORITY_OFFSET 0x047c /* IRQ 124-127 priority register */
+#define NVIC_IRQ128_131_PRIORITY_OFFSET 0x0480 /* IRQ 128-131 priority register */
+#define NVIC_IRQ132_135_PRIORITY_OFFSET 0x0484 /* IRQ 132-135 priority register */
+#define NVIC_IRQ136_139_PRIORITY_OFFSET 0x0488 /* IRQ 136-139 priority register */
+#define NVIC_IRQ140_143_PRIORITY_OFFSET 0x048c /* IRQ 140-143 priority register */
+#define NVIC_IRQ144_147_PRIORITY_OFFSET 0x0490 /* IRQ 144-147 priority register */
+#define NVIC_IRQ148_151_PRIORITY_OFFSET 0x0494 /* IRQ 148-151 priority register */
+#define NVIC_IRQ152_155_PRIORITY_OFFSET 0x0498 /* IRQ 152-155 priority register */
+#define NVIC_IRQ156_159_PRIORITY_OFFSET 0x049c /* IRQ 156-159 priority register */
+#define NVIC_IRQ160_163_PRIORITY_OFFSET 0x04a0 /* IRQ 160-163 priority register */
+#define NVIC_IRQ164_167_PRIORITY_OFFSET 0x04a4 /* IRQ 164-167 priority register */
+#define NVIC_IRQ168_171_PRIORITY_OFFSET 0x04a8 /* IRQ 168-171 priority register */
+#define NVIC_IRQ172_175_PRIORITY_OFFSET 0x04ac /* IRQ 172-175 priority register */
+#define NVIC_IRQ176_179_PRIORITY_OFFSET 0x04b0 /* IRQ 176-179 priority register */
+#define NVIC_IRQ180_183_PRIORITY_OFFSET 0x04b4 /* IRQ 180-183 priority register */
+#define NVIC_IRQ184_187_PRIORITY_OFFSET 0x04b8 /* IRQ 184-187 priority register */
+#define NVIC_IRQ188_191_PRIORITY_OFFSET 0x04bc /* IRQ 188-191 priority register */
+#define NVIC_IRQ192_195_PRIORITY_OFFSET 0x04c0 /* IRQ 192-195 priority register */
+#define NVIC_IRQ196_199_PRIORITY_OFFSET 0x04c4 /* IRQ 196-199 priority register */
+#define NVIC_IRQ200_203_PRIORITY_OFFSET 0x04c8 /* IRQ 200-203 priority register */
+#define NVIC_IRQ204_207_PRIORITY_OFFSET 0x04cc /* IRQ 204-207 priority register */
+#define NVIC_IRQ208_211_PRIORITY_OFFSET 0x04d0 /* IRQ 208-211 priority register */
+#define NVIC_IRQ212_215_PRIORITY_OFFSET 0x04d4 /* IRQ 212-215 priority register */
+#define NVIC_IRQ216_219_PRIORITY_OFFSET 0x04d8 /* IRQ 216-219 priority register */
+#define NVIC_IRQ220_223_PRIORITY_OFFSET 0x04dc /* IRQ 220-223 priority register */
+#define NVIC_IRQ224_227_PRIORITY_OFFSET 0x04e0 /* IRQ 224-227 priority register */
+#define NVIC_IRQ228_231_PRIORITY_OFFSET 0x04e4 /* IRQ 228-231 priority register */
+#define NVIC_IRQ232_235_PRIORITY_OFFSET 0x04e8 /* IRQ 232-235 priority register */
+#define NVIC_IRQ236_239_PRIORITY_OFFSET 0x04ec /* IRQ 236-239 priority register */
+
+/* System Control Block (SCB) */
+
+#define NVIC_CPUID_BASE_OFFSET          0x0d00 /* CPUID base register */
+#define NVIC_INTCTRL_OFFSET             0x0d04 /* Interrupt control state register */
+#define NVIC_VECTAB_OFFSET              0x0d08 /* Vector table offset register */
+#define NVIC_AIRCR_OFFSET               0x0d0c /* Application interrupt/reset control registr */
+#define NVIC_SYSCON_OFFSET              0x0d10 /* System control register */
+#define NVIC_CFGCON_OFFSET              0x0d14 /* Configuration control register */
+#define NVIC_SYSH_PRIORITY_OFFSET(n)    (0x0d14 + 4*((n) >> 2))
+#define NVIC_SYSH4_7_PRIORITY_OFFSET    0x0d18 /* System handlers 4-7 priority register */
+#define NVIC_SYSH8_11_PRIORITY_OFFSET   0x0d1c /* System handler 8-11 priority register */
+#define NVIC_SYSH12_15_PRIORITY_OFFSET  0x0d20 /* System handler 12-15 priority register */
+#define NVIC_SYSHCON_OFFSET             0x0d24 /* System handler control and state register */
+#define NVIC_CFAULTS_OFFSET             0x0d28 /* Configurable fault status register */
+#define NVIC_HFAULTS_OFFSET             0x0d2c /* Hard fault status register */
+#define NVIC_DFAULTS_OFFSET             0x0d30 /* Debug fault status register */
+#define NVIC_MEMMANAGE_ADDR_OFFSET      0x0d34 /* Mem manage address register */
+#define NVIC_BFAULT_ADDR_OFFSET         0x0d38 /* Bus fault address register */
+#define NVIC_AFAULTS_OFFSET             0x0d3c /* Auxiliary fault status register */
+#define NVIC_PFR0_OFFSET                0x0d40 /* Processor feature register 0 */
+#define NVIC_PFR1_OFFSET                0x0d44 /* Processor feature register 1 */
+#define NVIC_DFR0_OFFSET                0x0d48 /* Debug feature register 0 */
+#define NVIC_AFR0_OFFSET                0x0d4c /* Auxiliary feature register 0 */
+#define NVIC_MMFR0_OFFSET               0x0d50 /* Memory model feature register 0 */
+#define NVIC_MMFR1_OFFSET               0x0d54 /* Memory model feature register 1 */
+#define NVIC_MMFR2_OFFSET               0x0d58 /* Memory model feature register 2 */
+#define NVIC_MMFR3_OFFSET               0x0d5c /* Memory model feature register 3 */
+#define NVIC_ISAR0_OFFSET               0x0d60 /* ISA feature register 0 */
+#define NVIC_ISAR1_OFFSET               0x0d64 /* ISA feature register 1 */
+#define NVIC_ISAR2_OFFSET               0x0d68 /* ISA feature register 2 */
+#define NVIC_ISAR3_OFFSET               0x0d6c /* ISA feature register 3 */
+#define NVIC_ISAR4_OFFSET               0x0d70 /* ISA feature register 4 */
+#define NVIC_CLIDR_OFFSET               0x0d78 /* Cache Level ID register (Cortex-M7) */
+#define NVIC_CTR_OFFSET                 0x0d7c /* Cache Type register (Cortex-M7) */
+#define NVIC_CCSIDR_OFFSET              0x0d80 /* Cache Size ID Register (Cortex-M7) */
+#define NVIC_CSSELR_OFFSET              0x0d84 /* Cache Size Selection Register (Cortex-M7) */
+#define NVIC_CPACR_OFFSET               0x0d88 /* Coprocessor Access Control Register */
+#define NVIC_DHCSR_OFFSET               0x0df0 /* Debug Halting Control and Status Register */
+#define NVIC_DCRSR_OFFSET               0x0df4 /* Debug Core Register Selector Register */
+#define NVIC_DCRDR_OFFSET               0x0df8 /* Debug Core Register Data Register */
+#define NVIC_DEMCR_OFFSET               0x0dfc /* Debug Exception and Monitor Control Register */
+#define NVIC_STIR_OFFSET                0x0f00 /* Software trigger interrupt register */
+#define NVIC_FPCCR_OFFSET               0x0f34 /* Floating-point Context Control Register */
+#define NVIC_FPCAR_OFFSET               0x0f38 /* Floating-point Context Address Register */
+#define NVIC_FPDSCR_OFFSET              0x0f3c /* Floating-point Default Status Control Register */
+#define NVIC_MVFR0_OFFSET               0x0f40 /* Media and VFP Feature Register 0 */
+#define NVIC_MVFR1_OFFSET               0x0f44 /* Media and VFP Feature Register 1 */
+#define NVIC_MVFR2_OFFSET               0x0f48 /* Media and VFP Feature Register 2 */
+#define NVIC_ICIALLU_OFFSET             0x0f50 /* I-Cache Invalidate All to PoU (Cortex-M7) */
+#define NVIC_ICIMVAU_OFFSET             0x0f58 /* I-Cache Invalidate by MVA to PoU (Cortex-M7) */
+#define NVIC_DCIMVAC_OFFSET             0x0f5c /* D-Cache Invalidate by MVA to PoC (Cortex-M7) */
+#define NVIC_DCISW_OFFSET               0x0f60 /* D-Cache Invalidate by Set-way (Cortex-M7) */
+#define NVIC_DCCMVAU_OFFSET             0x0f64 /* D-Cache Clean by MVA to PoU (Cortex-M7) */
+#define NVIC_DCCMVAC_OFFSET             0x0f68 /* D-Cache Clean by MVA to PoC (Cortex-M7) */
+#define NVIC_DCCSW_OFFSET               0x0f6c /* D-Cache Clean by Set-way (Cortex-M7) */
+#define NVIC_DCCIMVAC_OFFSET            0x0f70 /* D-Cache Clean and Invalidate by MVA to PoC (Cortex-M7) */
+#define NVIC_DCCISW_OFFSET              0x0f74 /* D-Cache Clean and Invalidate by Set-way (Cortex-M7) */
+#define NVIC_BPIALL_OFFSET              0x0f78 /* Branch predictor invalidate all (Cortex-M7) */
+#define NVIC_ITCMCR_OFFSET              0x0f90 /* Instruction Tightly-Coupled Memory Control Register */
+#define NVIC_DTCMCR_OFFSET              0x0f94 /* Data Tightly-Coupled Memory Control Registers */
+#define NVIC_AHBPCR_OFFSET              0x0f98 /* AHBP Control Register */
+#define NVIC_CACR_OFFSET                0x0f9c /* L1 Cache Control Register */
+#define NVIC_AHBSCR_OFFSET              0x0fa0 /* AHB Slave Control Register */
+#define NVIC_ABFSR_OFFSET               0x0fa8 /* Auxiliary Bus Fault Status */
+#define NVIC_PID4_OFFSET                0x0fd0 /* Peripheral identification register (PID4) */
+#define NVIC_PID5_OFFSET                0x0fd4 /* Peripheral identification register (PID5) */
+#define NVIC_PID6_OFFSET                0x0fd8 /* Peripheral identification register (PID6) */
+#define NVIC_PID7_OFFSET                0x0fdc /* Peripheral identification register (PID7) */
+#define NVIC_PID0_OFFSET                0x0fe0 /* Peripheral identification register bits 7:0 (PID0) */
+#define NVIC_PID1_OFFSET                0x0fe4 /* Peripheral identification register bits 15:8 (PID1) */
+#define NVIC_PID2_OFFSET                0x0fe8 /* Peripheral identification register bits 23:16 (PID2) */
+#define NVIC_PID3_OFFSET                0x0fec /* Peripheral identification register bits 23:16 (PID3) */
+#define NVIC_CID0_OFFSET                0x0ff0 /* Component identification register bits 7:0 (CID0) */
+#define NVIC_CID1_OFFSET                0x0ff4 /* Component identification register bits 15:8 (CID0) */
+#define NVIC_CID2_OFFSET                0x0ff8 /* Component identification register bits 23:16 (CID0) */
+#define NVIC_CID3_OFFSET                0x0ffc /* Component identification register bits 23:16 (CID0) */
+
+/* NVIC register addresses ******************************************************************/
+
+#define NVIC_ICTR                       (ARMV8M_NVIC_BASE + NVIC_ICTR_OFFSET)
+#define NVIC_SYSTICK_CTRL               (ARMV8M_NVIC_BASE + NVIC_SYSTICK_CTRL_OFFSET)
+#define NVIC_SYSTICK_RELOAD             (ARMV8M_NVIC_BASE + NVIC_SYSTICK_RELOAD_OFFSET)
+#define NVIC_SYSTICK_CURRENT            (ARMV8M_NVIC_BASE + NVIC_SYSTICK_CURRENT_OFFSET)
+#define NVIC_SYSTICK_CALIB              (ARMV8M_NVIC_BASE + NVIC_SYSTICK_CALIB_OFFSET)
+
+#define NVIC_IRQ_ENABLE(n)              (ARMV8M_NVIC_BASE + NVIC_IRQ_ENABLE_OFFSET(n))
+#define NVIC_IRQ0_31_ENABLE             (ARMV8M_NVIC_BASE + NVIC_IRQ0_31_ENABLE_OFFSET)
+#define NVIC_IRQ32_63_ENABLE            (ARMV8M_NVIC_BASE + NVIC_IRQ32_63_ENABLE_OFFSET)
+#define NVIC_IRQ64_95_ENABLE            (ARMV8M_NVIC_BASE + NVIC_IRQ64_95_ENABLE_OFFSET)
+#define NVIC_IRQ96_127_ENABLE           (ARMV8M_NVIC_BASE + NVIC_IRQ96_127_ENABLE_OFFSET)
+#define NVIC_IRQ128_159_ENABLE          (ARMV8M_NVIC_BASE + NVIC_IRQ128_159_ENABLE_OFFSET)
+#define NVIC_IRQ160_191_ENABLE          (ARMV8M_NVIC_BASE + NVIC_IRQ160_191_ENABLE_OFFSET)
+#define NVIC_IRQ192_223_ENABLE          (ARMV8M_NVIC_BASE + NVIC_IRQ192_223_ENABLE_OFFSET)
+#define NVIC_IRQ224_239_ENABLE          (ARMV8M_NVIC_BASE + NVIC_IRQ224_239_ENABLE_OFFSET)
+
+#define NVIC_IRQ_CLEAR(n)               (ARMV8M_NVIC_BASE + NVIC_IRQ_CLEAR_OFFSET(n))
+#define NVIC_IRQ0_31_CLEAR              (ARMV8M_NVIC_BASE + NVIC_IRQ0_31_CLEAR_OFFSET)
+#define NVIC_IRQ32_63_CLEAR             (ARMV8M_NVIC_BASE + NVIC_IRQ32_63_CLEAR_OFFSET)
+#define NVIC_IRQ64_95_CLEAR             (ARMV8M_NVIC_BASE + NVIC_IRQ64_95_CLEAR_OFFSET)
+#define NVIC_IRQ96_127_CLEAR            (ARMV8M_NVIC_BASE + NVIC_IRQ96_127_CLEAR_OFFSET)
+#define NVIC_IRQ128_159_CLEAR           (ARMV8M_NVIC_BASE + NVIC_IRQ128_159_CLEAR_OFFSET)
+#define NVIC_IRQ160_191_CLEAR           (ARMV8M_NVIC_BASE + NVIC_IRQ160_191_CLEAR_OFFSET)
+#define NVIC_IRQ192_223_CLEAR           (ARMV8M_NVIC_BASE + NVIC_IRQ192_223_CLEAR_OFFSET)
+#define NVIC_IRQ224_239_CLEAR           (ARMV8M_NVIC_BASE + NVIC_IRQ224_239_CLEAR_OFFSET)
+
+#define NVIC_IRQ_PEND(n)                (ARMV8M_NVIC_BASE + NVIC_IRQ_PEND_OFFSET(n))
+#define NVIC_IRQ0_31_PEND               (ARMV8M_NVIC_BASE + NVIC_IRQ0_31_PEND_OFFSET)
+#define NVIC_IRQ32_63_PEND              (ARMV8M_NVIC_BASE + NVIC_IRQ32_63_PEND_OFFSET)
+#define NVIC_IRQ64_95_PEND              (ARMV8M_NVIC_BASE + NVIC_IRQ64_95_PEND_OFFSET)
+#define NVIC_IRQ96_127_PEND             (ARMV8M_NVIC_BASE + NVIC_IRQ96_127_PEND_OFFSET)
+#define NVIC_IRQ128_159_PEND            (ARMV8M_NVIC_BASE + NVIC_IRQ128_159_PEND_OFFSET)
+#define NVIC_IRQ160_191_PEND            (ARMV8M_NVIC_BASE + NVIC_IRQ160_191_PEND_OFFSET)
+#define NVIC_IRQ192_223_PEND            (ARMV8M_NVIC_BASE + NVIC_IRQ192_223_PEND_OFFSET)
+#define NVIC_IRQ224_239_PEND            (ARMV8M_NVIC_BASE + NVIC_IRQ224_239_PEND_OFFSET)
+
+#define NVIC_IRQ_CLRPEND(n)             (ARMV8M_NVIC_BASE + NVIC_IRQ_CLRPEND_OFFSET(n))
+#define NVIC_IRQ0_31_CLRPEND            (ARMV8M_NVIC_BASE + NVIC_IRQ0_31_CLRPEND_OFFSET)
+#define NVIC_IRQ32_63_CLRPEND           (ARMV8M_NVIC_BASE + NVIC_IRQ32_63_CLRPEND_OFFSET)
+#define NVIC_IRQ64_95_CLRPEND           (ARMV8M_NVIC_BASE + NVIC_IRQ64_95_CLRPEND_OFFSET)
+#define NVIC_IRQ96_127_CLRPEND          (ARMV8M_NVIC_BASE + NVIC_IRQ96_127_CLRPEND_OFFSET)
+#define NVIC_IRQ128_159_CLRPEND         (ARMV8M_NVIC_BASE + NVIC_IRQ128_159_CLRPEND_OFFSET)
+#define NVIC_IRQ160_191_CLRPEND         (ARMV8M_NVIC_BASE + NVIC_IRQ160_191_CLRPEND_OFFSET)
+#define NVIC_IRQ192_223_CLRPEND         (ARMV8M_NVIC_BASE + NVIC_IRQ192_223_CLRPEND_OFFSET)
+#define NVIC_IRQ224_239_CLRPEND         (ARMV8M_NVIC_BASE + NVIC_IRQ224_239_CLRPEND_OFFSET)
+
+#define NVIC_IRQ_ACTIVE(n)              (ARMV8M_NVIC_BASE + NVIC_IRQ_ACTIVE_OFFSET(n))
+#define NVIC_IRQ0_31_ACTIVE             (ARMV8M_NVIC_BASE + NVIC_IRQ0_31_ACTIVE_OFFSET)
+#define NVIC_IRQ32_63_ACTIVE            (ARMV8M_NVIC_BASE + NVIC_IRQ32_63_ACTIVE_OFFSET)
+#define NVIC_IRQ64_95_ACTIVE            (ARMV8M_NVIC_BASE + NVIC_IRQ64_95_ACTIVE_OFFSET)
+#define NVIC_IRQ96_127_ACTIVE           (ARMV8M_NVIC_BASE + NVIC_IRQ96_127_ACTIVE_OFFSET)
+#define NVIC_IRQ128_159_ACTIVE          (ARMV8M_NVIC_BASE + NVIC_IRQ128_159_ACTIVE_OFFSET)
+#define NVIC_IRQ160_191_ACTIVE          (ARMV8M_NVIC_BASE + NVIC_IRQ160_191_ACTIVE_OFFSET)
+#define NVIC_IRQ192_223_ACTIVE          (ARMV8M_NVIC_BASE + NVIC_IRQ192_223_ACTIVE_OFFSET)
+#define NVIC_IRQ224_239_ACTIVE          (ARMV8M_NVIC_BASE + NVIC_IRQ224_239_ACTIVE_OFFSET)
+
+#define NVIC_IRQ_PRIORITY(n)            (ARMV8M_NVIC_BASE + NVIC_IRQ_PRIORITY_OFFSET(n))
+#define NVIC_IRQ0_3_PRIORITY            (ARMV8M_NVIC_BASE + NVIC_IRQ0_3_PRIORITY_OFFSET)
+#define NVIC_IRQ4_7_PRIORITY            (ARMV8M_NVIC_BASE + NVIC_IRQ4_7_PRIORITY_OFFSET)
+#define NVIC_IRQ8_11_PRIORITY           (ARMV8M_NVIC_BASE + NVIC_IRQ8_11_PRIORITY_OFFSET)
+#define NVIC_IRQ12_15_PRIORITY          (ARMV8M_NVIC_BASE + NVIC_IRQ12_15_PRIORITY_OFFSET)
+#define NVIC_IRQ16_19_PRIORITY          (ARMV8M_NVIC_BASE + NVIC_IRQ16_19_PRIORITY_OFFSET)
+#define NVIC_IRQ20_23_PRIORITY          (ARMV8M_NVIC_BASE + NVIC_IRQ20_23_PRIORITY_OFFSET)
+#define NVIC_IRQ24_27_PRIORITY          (ARMV8M_NVIC_BASE + NVIC_IRQ24_27_PRIORITY_OFFSET)
+#define NVIC_IRQ28_31_PRIORITY          (ARMV8M_NVIC_BASE + NVIC_IRQ28_31_PRIORITY_OFFSET)
+#define NVIC_IRQ32_35_PRIORITY          (ARMV8M_NVIC_BASE + NVIC_IRQ32_35_PRIORITY_OFFSET)
+#define NVIC_IRQ36_39_PRIORITY          (ARMV8M_NVIC_BASE + NVIC_IRQ36_39_PRIORITY_OFFSET)
+#define NVIC_IRQ40_43_PRIORITY          (ARMV8M_NVIC_BASE + NVIC_IRQ40_43_PRIORITY_OFFSET)
+#define NVIC_IRQ44_47_PRIORITY          (ARMV8M_NVIC_BASE + NVIC_IRQ44_47_PRIORITY_OFFSET)
+#define NVIC_IRQ48_51_PRIORITY          (ARMV8M_NVIC_BASE + NVIC_IRQ48_51_PRIORITY_OFFSET)
+#define NVIC_IRQ52_55_PRIORITY          (ARMV8M_NVIC_BASE + NVIC_IRQ52_55_PRIORITY_OFFSET)
+#define NVIC_IRQ56_59_PRIORITY          (ARMV8M_NVIC_BASE + NVIC_IRQ56_59_PRIORITY_OFFSET)
+#define NVIC_IRQ60_63_PRIORITY          (ARMV8M_NVIC_BASE + NVIC_IRQ60_63_PRIORITY_OFFSET)
+#define NVIC_IRQ64_67_PRIORITY          (ARMV8M_NVIC_BASE + NVIC_IRQ64_67_PRIORITY_OFFSET)
+#define NVIC_IRQ68_71_PRIORITY          (ARMV8M_NVIC_BASE + NVIC_IRQ68_71_PRIORITY_OFFSET)
+#define NVIC_IRQ72_75_PRIORITY          (ARMV8M_NVIC_BASE + NVIC_IRQ72_75_PRIORITY_OFFSET)
+#define NVIC_IRQ76_79_PRIORITY          (ARMV8M_NVIC_BASE + NVIC_IRQ76_79_PRIORITY_OFFSET)
+#define NVIC_IRQ80_83_PRIORITY          (ARMV8M_NVIC_BASE + NVIC_IRQ80_83_PRIORITY_OFFSET)
+#define NVIC_IRQ84_87_PRIORITY          (ARMV8M_NVIC_BASE + NVIC_IRQ84_87_PRIORITY_OFFSET)
+#define NVIC_IRQ88_91_PRIORITY          (ARMV8M_NVIC_BASE + NVIC_IRQ88_91_PRIORITY_OFFSET)
+#define NVIC_IRQ92_95_PRIORITY          (ARMV8M_NVIC_BASE + NVIC_IRQ92_95_PRIORITY_OFFSET)
+#define NVIC_IRQ96_99_PRIORITY          (ARMV8M_NVIC_BASE + NVIC_IRQ96_99_PRIORITY_OFFSET)
+#define NVIC_IRQ100_103_PRIORITY        (ARMV8M_NVIC_BASE + NVIC_IRQ100_103_PRIORITY_OFFSET)
+#define NVIC_IRQ104_107_PRIORITY        (ARMV8M_NVIC_BASE + NVIC_IRQ104_107_PRIORITY_OFFSET)
+#define NVIC_IRQ108_111_PRIORITY        (ARMV8M_NVIC_BASE + NVIC_IRQ108_111_PRIORITY_OFFSET)
+#define NVIC_IRQ112_115_PRIORITY        (ARMV8M_NVIC_BASE + NVIC_IRQ112_115_PRIORITY_OFFSET)
+#define NVIC_IRQ116_119_PRIORITY        (ARMV8M_NVIC_BASE + NVIC_IRQ116_119_PRIORITY_OFFSET)
+#define NVIC_IRQ120_123_PRIORITY        (ARMV8M_NVIC_BASE + NVIC_IRQ120_123_PRIORITY_OFFSET)
+#define NVIC_IRQ124_127_PRIORITY        (ARMV8M_NVIC_BASE + NVIC_IRQ124_127_PRIORITY_OFFSET)
+#define NVIC_IRQ128_131_PRIORITY        (ARMV8M_NVIC_BASE + NVIC_IRQ128_131_PRIORITY_OFFSET)
+#define NVIC_IRQ132_135_PRIORITY        (ARMV8M_NVIC_BASE + NVIC_IRQ132_135_PRIORITY_OFFSET)
+#define NVIC_IRQ136_139_PRIORITY        (ARMV8M_NVIC_BASE + NVIC_IRQ136_139_PRIORITY_OFFSET)
+#define NVIC_IRQ140_143_PRIORITY        (ARMV8M_NVIC_BASE + NVIC_IRQ140_143_PRIORITY_OFFSET)
+#define NVIC_IRQ144_147_PRIORITY        (ARMV8M_NVIC_BASE + NVIC_IRQ144_147_PRIORITY_OFFSET)
+#define NVIC_IRQ148_151_PRIORITY        (ARMV8M_NVIC_BASE + NVIC_IRQ148_151_PRIORITY_OFFSET)
+#define NVIC_IRQ152_155_PRIORITY        (ARMV8M_NVIC_BASE + NVIC_IRQ152_155_PRIORITY_OFFSET)
+#define NVIC_IRQ156_159_PRIORITY        (ARMV8M_NVIC_BASE + NVIC_IRQ156_159_PRIORITY_OFFSET)
+#define NVIC_IRQ160_163_PRIORITY        (ARMV8M_NVIC_BASE + NVIC_IRQ160_163_PRIORITY_OFFSET)
+#define NVIC_IRQ164_167_PRIORITY        (ARMV8M_NVIC_BASE + NVIC_IRQ164_167_PRIORITY_OFFSET)
+#define NVIC_IRQ168_171_PRIORITY        (ARMV8M_NVIC_BASE + NVIC_IRQ168_171_PRIORITY_OFFSET)
+#define NVIC_IRQ172_175_PRIORITY        (ARMV8M_NVIC_BASE + NVIC_IRQ172_175_PRIORITY_OFFSET)
+#define NVIC_IRQ176_179_PRIORITY        (ARMV8M_NVIC_BASE + NVIC_IRQ176_179_PRIORITY_OFFSET)
+#define NVIC_IRQ180_183_PRIORITY        (ARMV8M_NVIC_BASE + NVIC_IRQ180_183_PRIORITY_OFFSET)
+#define NVIC_IRQ184_187_PRIORITY        (ARMV8M_NVIC_BASE + NVIC_IRQ184_187_PRIORITY_OFFSET)
+#define NVIC_IRQ188_191_PRIORITY        (ARMV8M_NVIC_BASE + NVIC_IRQ188_191_PRIORITY_OFFSET)
+#define NVIC_IRQ192_195_PRIORITY        (ARMV8M_NVIC_BASE + NVIC_IRQ192_195_PRIORITY_OFFSET)
+#define NVIC_IRQ196_199_PRIORITY        (ARMV8M_NVIC_BASE + NVIC_IRQ196_199_PRIORITY_OFFSET)
+#define NVIC_IRQ200_203_PRIORITY        (ARMV8M_NVIC_BASE + NVIC_IRQ200_203_PRIORITY_OFFSET)
+#define NVIC_IRQ204_207_PRIORITY        (ARMV8M_NVIC_BASE + NVIC_IRQ204_207_PRIORITY_OFFSET)
+#define NVIC_IRQ208_211_PRIORITY        (ARMV8M_NVIC_BASE + NVIC_IRQ208_211_PRIORITY_OFFSET)
+#define NVIC_IRQ212_215_PRIORITY        (ARMV8M_NVIC_BASE + NVIC_IRQ212_215_PRIORITY_OFFSET)
+#define NVIC_IRQ216_219_PRIORITY        (ARMV8M_NVIC_BASE + NVIC_IRQ216_219_PRIORITY_OFFSET)
+#define NVIC_IRQ220_223_PRIORITY        (ARMV8M_NVIC_BASE + NVIC_IRQ220_223_PRIORITY_OFFSET)
+#define NVIC_IRQ224_227_PRIORITY        (ARMV8M_NVIC_BASE + NVIC_IRQ224_227_PRIORITY_OFFSET)
+#define NVIC_IRQ228_231_PRIORITY        (ARMV8M_NVIC_BASE + NVIC_IRQ228_231_PRIORITY_OFFSET)
+#define NVIC_IRQ232_235_PRIORITY        (ARMV8M_NVIC_BASE + NVIC_IRQ232_235_PRIORITY_OFFSET)
+
+#define NVIC_CPUID_BASE                 (ARMV8M_NVIC_BASE + NVIC_CPUID_BASE_OFFSET)
+#define NVIC_INTCTRL                    (ARMV8M_NVIC_BASE + NVIC_INTCTRL_OFFSET)
+#define NVIC_VECTAB                     (ARMV8M_NVIC_BASE + NVIC_VECTAB_OFFSET)
+#define NVIC_AIRCR                      (ARMV8M_NVIC_BASE + NVIC_AIRCR_OFFSET)
+#define NVIC_SYSCON                     (ARMV8M_NVIC_BASE + NVIC_SYSCON_OFFSET)
+#define NVIC_CFGCON                     (ARMV8M_NVIC_BASE + NVIC_CFGCON_OFFSET)
+#define NVIC_SYSH_PRIORITY(n)           (ARMV8M_NVIC_BASE + NVIC_SYSH_PRIORITY_OFFSET(n))
+#define NVIC_SYSH4_7_PRIORITY           (ARMV8M_NVIC_BASE + NVIC_SYSH4_7_PRIORITY_OFFSET)
+#define NVIC_SYSH8_11_PRIORITY          (ARMV8M_NVIC_BASE + NVIC_SYSH8_11_PRIORITY_OFFSET)
+#define NVIC_SYSH12_15_PRIORITY         (ARMV8M_NVIC_BASE + NVIC_SYSH12_15_PRIORITY_OFFSET)
+#define NVIC_SYSHCON                    (ARMV8M_NVIC_BASE + NVIC_SYSHCON_OFFSET)
+#define NVIC_CFAULTS                    (ARMV8M_NVIC_BASE + NVIC_CFAULTS_OFFSET)
+#define NVIC_HFAULTS                    (ARMV8M_NVIC_BASE + NVIC_HFAULTS_OFFSET)
+#define NVIC_DFAULTS                    (ARMV8M_NVIC_BASE + NVIC_DFAULTS_OFFSET)
+#define NVIC_MEMMANAGE_ADDR             (ARMV8M_NVIC_BASE + NVIC_MEMMANAGE_ADDR_OFFSET)
+#define NVIC_BFAULT_ADDR                (ARMV8M_NVIC_BASE + NVIC_BFAULT_ADDR_OFFSET)
+#define NVIC_AFAULTS                    (ARMV8M_NVIC_BASE + NVIC_AFAULTS_OFFSET)
+#define NVIC_PFR0                       (ARMV8M_NVIC_BASE + NVIC_PFR0_OFFSET)
+#define NVIC_PFR1                       (ARMV8M_NVIC_BASE + NVIC_PFR1_OFFSET)
+#define NVIC_DFR0                       (ARMV8M_NVIC_BASE + NVIC_DFR0_OFFSET)
+#define NVIC_AFR0                       (ARMV8M_NVIC_BASE + NVIC_AFR0_OFFSET)
+#define NVIC_MMFR0                      (ARMV8M_NVIC_BASE + NVIC_MMFR0_OFFSET)
+#define NVIC_MMFR1                      (ARMV8M_NVIC_BASE + NVIC_MMFR1_OFFSET)
+#define NVIC_MMFR2                      (ARMV8M_NVIC_BASE + NVIC_MMFR2_OFFSET)
+#define NVIC_MMFR3                      (ARMV8M_NVIC_BASE + NVIC_MMFR3_OFFSET)
+#define NVIC_ISAR0                      (ARMV8M_NVIC_BASE + NVIC_ISAR0_OFFSET)
+#define NVIC_ISAR1                      (ARMV8M_NVIC_BASE + NVIC_ISAR1_OFFSET)
+#define NVIC_ISAR2                      (ARMV8M_NVIC_BASE + NVIC_ISAR2_OFFSET)
+#define NVIC_ISAR3                      (ARMV8M_NVIC_BASE + NVIC_ISAR3_OFFSET)
+#define NVIC_ISAR4                      (ARMV8M_NVIC_BASE + NVIC_ISAR4_OFFSET)
+#define NVIC_CLIDR                      (ARMV8M_NVIC_BASE + NVIC_CLIDR_OFFSET)
+#define NVIC_CTR                        (ARMV8M_NVIC_BASE + NVIC_CTR_OFFSET)
+#define NVIC_CCSIDR                     (ARMV8M_NVIC_BASE + NVIC_CCSIDR_OFFSET)
+#define NVIC_CSSELR                     (ARMV8M_NVIC_BASE + NVIC_CSSELR_OFFSET)
+#define NVIC_CPACR                      (ARMV8M_NVIC_BASE + NVIC_CPACR_OFFSET)
+#define NVIC_DHCSR                      (ARMV8M_NVIC_BASE + NVIC_DHCSR_OFFSET)
+#define NVIC_DCRSR                      (ARMV8M_NVIC_BASE + NVIC_DCRSR_OFFSET)
+#define NVIC_DCRDR                      (ARMV8M_NVIC_BASE + NVIC_DCRDR_OFFSET)
+#define NVIC_DEMCR                      (ARMV8M_NVIC_BASE + NVIC_DEMCR_OFFSET)
+#define NVIC_STIR                       (ARMV8M_NVIC_BASE + NVIC_STIR_OFFSET)
+#define NVIC_FPCCR                      (ARMV8M_NVIC_BASE + NVIC_FPCCR_OFFSET)
+#define NVIC_ICIALLU                    (ARMV8M_NVIC_BASE + NVIC_ICIALLU_OFFSET)
+#define NVIC_ICIMVAU                    (ARMV8M_NVIC_BASE + NVIC_ICIMVAU_OFFSET)
+#define NVIC_DCIMVAC                    (ARMV8M_NVIC_BASE + NVIC_DCIMVAC_OFFSET)
+#define NVIC_DCISW                      (ARMV8M_NVIC_BASE + NVIC_DCISW_OFFSET)
+#define NVIC_DCCMVAU                    (ARMV8M_NVIC_BASE + NVIC_DCCMVAU_OFFSET)
+#define NVIC_DCCMVAC                    (ARMV8M_NVIC_BASE + NVIC_DCCMVAC_OFFSET)
+#define NVIC_DCCSW                      (ARMV8M_NVIC_BASE + NVIC_DCCSW_OFFSET)
+#define NVIC_DCCIMVAC                   (ARMV8M_NVIC_BASE + NVIC_DCCIMVAC_OFFSET)
+#define NVIC_DCCISW                     (ARMV8M_NVIC_BASE + NVIC_DCCISW_OFFSET)
+#define NVIC_BPIALL                     (ARMV8M_NVIC_BASE + NVIC_BPIALL_OFFSET)
+#define NVIC_ITCMCR                     (ARMV8M_NVIC_BASE + NVIC_ITCMCR_OFFSET)
+#define NVIC_DTCMCR                     (ARMV8M_NVIC_BASE + NVIC_DTCMCR_OFFSET)
+#define NVIC_AHBPCR                     (ARMV8M_NVIC_BASE + NVIC_AHBPCR_OFFSET)
+#define NVIC_CACR                       (ARMV8M_NVIC_BASE + NVIC_CACR_OFFSET)
+#define NVIC_AHBSCR                     (ARMV8M_NVIC_BASE + NVIC_AHBSCR_OFFSET)
+#define NVIC_ABFSR                      (ARMV8M_NVIC_BASE + NVIC_ABFSR_OFFSET)
+#define NVIC_PID4                       (ARMV8M_NVIC_BASE + NVIC_PID4_OFFSET)
+#define NVIC_PID5                       (ARMV8M_NVIC_BASE + NVIC_PID5_OFFSET)
+#define NVIC_PID6                       (ARMV8M_NVIC_BASE + NVIC_PID6_OFFSET)
+#define NVIC_PID7                       (ARMV8M_NVIC_BASE + NVIC_PID7_OFFSET)
+#define NVIC_PID0                       (ARMV8M_NVIC_BASE + NVIC_PID0_OFFSET)
+#define NVIC_PID1                       (ARMV8M_NVIC_BASE + NVIC_PID1_OFFSET)
+#define NVIC_PID2                       (ARMV8M_NVIC_BASE + NVIC_PID2_OFFSET)
+#define NVIC_PID3                       (ARMV8M_NVIC_BASE + NVIC_PID3_OFFSET)
+#define NVIC_CID0                       (ARMV8M_NVIC_BASE + NVIC_CID0_OFFSET)
+#define NVIC_CID1                       (ARMV8M_NVIC_BASE + NVIC_CID1_OFFSET)
+#define NVIC_CID2                       (ARMV8M_NVIC_BASE + NVIC_CID2_OFFSET)
+#define NVIC_CID3                       (ARMV8M_NVIC_BASE + NVIC_CID3_OFFSET)
+
+/* NVIC register bit definitions ************************************************************/
+
+/* Interrupt controller type (INCTCTL_TYPE) */
+
+#define NVIC_ICTR_INTLINESNUM_SHIFT     0    /* Bits 0-3: Number of interrupt inputs / 32 - 1 */
+#define NVIC_ICTR_INTLINESNUM_MASK      (15 << NVIC_ICTR_INTLINESNUM_SHIFT)
+
+/* SysTick control and status register (SYSTICK_CTRL) */
+
+#define NVIC_SYSTICK_CTRL_ENABLE        (1 << 0)  /* Bit 0:  Enable */
+#define NVIC_SYSTICK_CTRL_TICKINT       (1 << 1)  /* Bit 1:  Tick interrupt */
+#define NVIC_SYSTICK_CTRL_CLKSOURCE     (1 << 2)  /* Bit 2:  Clock source */
+#define NVIC_SYSTICK_CTRL_COUNTFLAG     (1 << 16) /* Bit 16: Count Flag */
+
+/* SysTick reload value register (SYSTICK_RELOAD) */
+
+#define NVIC_SYSTICK_RELOAD_SHIFT       0         /* Bits 23-0: Timer reload value */
+#define NVIC_SYSTICK_RELOAD_MASK        (0x00ffffff << NVIC_SYSTICK_RELOAD_SHIFT)
+
+/* SysTick current value register (SYSTICK_CURRENT) */
+
+#define NVIC_SYSTICK_CURRENT_SHIFT      0         /* Bits 23-0: Timer current value */
+#define NVIC_SYSTICK_CURRENT_MASK       (0x00ffffff << NVIC_SYSTICK_RELOAD_SHIFT)
+
+/* SysTick calibration value register (SYSTICK_CALIB) */
+
+#define NVIC_SYSTICK_CALIB_TENMS_SHIFT  0         /* Bits 23-0: Calibration value */
+#define NVIC_SYSTICK_CALIB_TENMS_MASK   (0x00ffffff << NVIC_SYSTICK_CALIB_TENMS_SHIFT)
+#define NVIC_SYSTICK_CALIB_SKEW         (1 << 30) /* Bit 30: Calibration value inexact */
+#define NVIC_SYSTICK_CALIB_NOREF        (1 << 31) /* Bit 31: No external reference clock */
+
+/* Interrupt control state register (INTCTRL) */
+
+#define NVIC_INTCTRL_NMIPENDSET         (1 << 31) /* Bit 31: Set pending NMI bit */
+#define NVIC_INTCTRL_PENDSVSET          (1 << 28) /* Bit 28: Set pending PendSV bit */
+#define NVIC_INTCTRL_PENDSVCLR          (1 << 27) /* Bit 27: Clear pending PendSV bit */
+#define NVIC_INTCTRL_PENDSTSET          (1 << 26) /* Bit 26: Set pending SysTick bit */
+#define NVIC_INTCTRL_PENDSTCLR          (1 << 25) /* Bit 25: Clear pending SysTick bit */
+#define NVIC_INTCTRL_ISPREEMPOT         (1 << 23) /* Bit 23: Pending active next cycle */
+#define NVIC_INTCTRL_ISRPENDING         (1 << 22) /* Bit 22: Interrupt pending flag */
+#define NVIC_INTCTRL_VECTPENDING_SHIFT  12        /* Bits 21-12: Pending ISR number field */
+#define NVIC_INTCTRL_VECTPENDING_MASK   (0x3ff << NVIC_INTCTRL_VECTPENDING_SHIFT)
+#define NVIC_INTCTRL_RETTOBASE          (1 << 11) /* Bit 11: no other exceptions pending */
+#define NVIC_INTCTRL_VECTACTIVE_SHIFT   0         /* Bits 8-0: Active ISR number */
+#define NVIC_INTCTRL_VECTACTIVE_MASK    (0x1ff << NVIC_INTCTRL_VECTACTIVE_SHIFT)
+
+/* System control register (SYSCON) */
+
+                                                  /* Bit 0:  Reserved */
+#define NVIC_SYSCON_SLEEPONEXIT         (1 << 1)  /* Bit 1:  Sleep-on-exit (returning from Handler to Thread mode) */
+#define NVIC_SYSCON_SLEEPDEEP           (1 << 2)  /* Bit 2: Use deep sleep in low power mode */
+                                                  /* Bit 3:  Reserved */
+#define NVIC_SYSCON_SEVONPEND           (1 << 4)  /* Bit 4: Send Event on Pending bit */
+                                                  /* Bits 5-31: Reserved */
+
+/* Configuration control register (CFGCON) */
+
+#define NVIC_CFGCON_NONBASETHRDENA      (1 << 0)  /* Bit 0: How processor enters thread mode */
+#define NVIC_CFGCON_USERSETMPEND        (1 << 1)  /* Bit 1: Enables unprivileged access to STIR */
+#define NVIC_CFGCON_UNALIGNTRP          (1 << 3)  /* Bit 3: Enables unaligned access traps */
+#define NVIC_CFGCON_DIV0TRP             (1 << 4)  /* Bit 4: Enables fault on divide-by-zero */
+#define NVIC_CFGCON_BFHFNMIGN           (1 << 8)  /* Bit 8: Disables data bus faults */
+#define NVIC_CFGCON_STKALIGN            (1 << 9)  /* Bit 9: Indicates stack alignment on exception */
+                                                  /* Cortex-M7: */
+#define NVIC_CFGCON_DC                  (1 << 16) /* Bit 16: Data cache enable */
+#define NVIC_CFGCON_IC                  (1 << 17) /* Bit 17: Instruction cache enable */
+#define NVIC_CFGCON_BP                  (1 << 18) /* Bit 18: Branch prediction enable */
+
+/* System handler 4-7 priority register */
+
+#define NVIC_SYSH_PRIORITY_PR4_SHIFT    0
+#define NVIC_SYSH_PRIORITY_PR4_MASK     (0xff << NVIC_SYSH_PRIORITY_PR4_SHIFT)
+#define NVIC_SYSH_PRIORITY_PR5_SHIFT    8
+#define NVIC_SYSH_PRIORITY_PR5_MASK     (0xff << NVIC_SYSH_PRIORITY_PR5_SHIFT)
+#define NVIC_SYSH_PRIORITY_PR6_SHIFT    16
+#define NVIC_SYSH_PRIORITY_PR6_MASK     (0xff << NVIC_SYSH_PRIORITY_PR6_SHIFT)
+#define NVIC_SYSH_PRIORITY_PR7_SHIFT    24
+#define NVIC_SYSH_PRIORITY_PR7_MASK     (0xff << NVIC_SYSH_PRIORITY_PR7_SHIFT)
+
+/* System handler 8-11 priority register */
+
+#define NVIC_SYSH_PRIORITY_PR8_SHIFT    0
+#define NVIC_SYSH_PRIORITY_PR8_MASK     (0xff << NVIC_SYSH_PRIORITY_PR8_SHIFT)
+#define NVIC_SYSH_PRIORITY_PR9_SHIFT    8
+#define NVIC_SYSH_PRIORITY_PR9_MASK     (0xff << NVIC_SYSH_PRIORITY_PR9_SHIFT)
+#define NVIC_SYSH_PRIORITY_PR10_SHIFT   16
+#define NVIC_SYSH_PRIORITY_PR10_MASK    (0xff << NVIC_SYSH_PRIORITY_PR10_SHIFT)
+#define NVIC_SYSH_PRIORITY_PR11_SHIFT   24
+#define NVIC_SYSH_PRIORITY_PR11_MASK    (0xff << NVIC_SYSH_PRIORITY_PR11_SHIFT)
+
+/* System handler 12-15 priority register */
+
+#define NVIC_SYSH_PRIORITY_PR12_SHIFT   0
+#define NVIC_SYSH_PRIORITY_PR12_MASK    (0xff << NVIC_SYSH_PRIORITY_PR12_SHIFT)
+#define NVIC_SYSH_PRIORITY_PR13_SHIFT   8
+#define NVIC_SYSH_PRIORITY_PR13_MASK    (0xff << NVIC_SYSH_PRIORITY_PR13_SHIFT)
+#define NVIC_SYSH_PRIORITY_PR14_SHIFT   16
+#define NVIC_SYSH_PRIORITY_PR14_MASK    (0xff << NVIC_SYSH_PRIORITY_PR14_SHIFT)
+#define NVIC_SYSH_PRIORITY_PR15_SHIFT   24
+#define NVIC_SYSH_PRIORITY_PR15_MASK    (0xff << NVIC_SYSH_PRIORITY_PR15_SHIFT)
+
+/* Application Interrupt and Reset Control Register (AIRCR) */
+
+#define NVIC_AIRCR_VECTRESET            (1 << 0)  /* Bit 0:  VECTRESET */
+#define NVIC_AIRCR_VECTCLRACTIVE        (1 << 1)  /* Bit 1:  Reserved for debug use */
+#define NVIC_AIRCR_SYSRESETREQ          (1 << 2)  /* Bit 2:  System reset */
+                                                  /* Bits 2-7:  Reserved */
+#define NVIC_AIRCR_PRIGROUP_SHIFT       (8)       /* Bits 8-14: PRIGROUP */
+#define NVIC_AIRCR_PRIGROUP_MASK        (7 << NVIC_AIRCR_PRIGROUP_SHIFT)
+#define NVIC_AIRCR_ENDIANNESS           (1 << 15) /* Bit 15: 1=Big endian */
+#define NVIC_AIRCR_VECTKEY_SHIFT        (16)      /* Bits 16-31: VECTKEY */
+#define NVIC_AIRCR_VECTKEY_MASK         (0xffff << NVIC_AIRCR_VECTKEY_SHIFT)
+#define NVIC_AIRCR_VECTKEYSTAT_SHIFT    (16)      /* Bits 16-31: VECTKEYSTAT */
+#define NVIC_AIRCR_VECTKEYSTAT_MASK     (0xffff << NVIC_AIRCR_VECTKEYSTAT_SHIFT)
+
+/* System handler control and state register (SYSHCON) */
+
+#define NVIC_SYSHCON_MEMFAULTACT        (1 << 0)  /* Bit 0:  MemManage is active */
+#define NVIC_SYSHCON_BUSFAULTACT        (1 << 1)  /* Bit 1:  BusFault is active */
+#define NVIC_SYSHCON_USGFAULTACT        (1 << 3)  /* Bit 3:  UsageFault is active */
+#define NVIC_SYSHCON_SVCALLACT          (1 << 7)  /* Bit 7:  SVCall is active */
+#define NVIC_SYSHCON_MONITORACT         (1 << 8)  /* Bit 8:  Monitor is active */
+#define NVIC_SYSHCON_PENDSVACT          (1 << 10) /* Bit 10: PendSV is active */
+#define NVIC_SYSHCON_SYSTICKACT         (1 << 11) /* Bit 11: SysTick is active */
+#define NVIC_SYSHCON_USGFAULTPENDED     (1 << 12) /* Bit 12: Usage fault is pended */
+#define NVIC_SYSHCON_MEMFAULTPENDED     (1 << 13) /* Bit 13: MemManage is pended */
+#define NVIC_SYSHCON_BUSFAULTPENDED     (1 << 14) /* Bit 14: BusFault is pended */
+#define NVIC_SYSHCON_SVCALLPENDED       (1 << 15) /* Bit 15: SVCall is pended */
+#define NVIC_SYSHCON_MEMFAULTENA        (1 << 16) /* Bit 16: MemFault enabled */
+#define NVIC_SYSHCON_BUSFAULTENA        (1 << 17) /* Bit 17: BusFault enabled */
+#define NVIC_SYSHCON_USGFAULTENA        (1 << 18) /* Bit 18: UsageFault enabled */
+
+/* Cache Level ID register (Cortex-M7) */
+
+#define NVIC_CLIDR_L1CT_SHIFT           (0)      /* Bits 0-2: Level 1 cache type */
+#define NVIC_CLIDR_L1CT_MASK            (7 << NVIC_CLIDR_L1CT_SHIFT)
+#  define NVIC_CLIDR_L1CT_ICACHE        (1 << NVIC_CLIDR_LOC_SHIFT)
+#  define NVIC_CLIDR_L1CT_DCACHE        (2 << NVIC_CLIDR_LOC_SHIFT)
+#define NVIC_CLIDR_LOC_SHIFT            (24)      /* Bits 24-26: Level of Coherency */
+#define NVIC_CLIDR_LOC_MASK             (7 << NVIC_CLIDR_LOC_SHIFT)
+#  define NVIC_CLIDR_LOC_IMPLEMENTED    (1 << NVIC_CLIDR_LOC_SHIFT)
+#  define NVIC_CLIDR_LOC_UNIMPLEMENTED  (0 << NVIC_CLIDR_LOC_SHIFT)
+#define NVIC_CLIDR_LOUU_SHIFT           (27)      /* Bits 27-29: Level of Unification Uniprocessor */
+#define NVIC_CLIDR_LOUU_MASK            (7 << NVIC_CLIDR_LOUU_SHIFT)
+#  define NVIC_CLIDR_LOUU_IMPLEMENTED   (1 << NVIC_CLIDR_LOUU_SHIFT)
+#  define NVIC_CLIDR_LOUU_UNIMPLEMENTED (0 << NVIC_CLIDR_LOUU_SHIFT)
+
+/* Cache Type register (Cortex-M7) */
+
+#define NVIC_CTR_IMINLINE_SHIFT         (0)       /* Bits 0-3: ImInLine */
+#define NVIC_CTR_IMINLINE_MASK          (15 << NVIC_CTR_IMINLINE_SHIFT)
+#define NVIC_CTR_DMINLINE_SHIFT         (16)      /* Bits 16-19: DmInLine */
+#define NVIC_CTR_DMINLINE_MASK          (15 << NVIC_CTR_DMINLINE_SHIFT)
+#define NVIC_CTR_ERG_SHIFT              (20)      /* Bits 20-23: ERG */
+#define NVIC_CTR_ERG_MASK               (15 << NVIC_CTR_ERG_SHIFT)
+#define NVIC_CTR_CWG_SHIFT              (24)      /* Bits 24-27: ERG */
+#define NVIC_CTR_CWG_MASK               (15 << NVIC_CTR_CWG_SHIFT)
+#define NVIC_CTR_FORMAT_SHIFT           (29)      /* Bits 29-31: Format */
+#define NVIC_CTR_FORMAT_MASK            (7 << NVIC_CTR_FORMAT_SHIFT)
+
+/* Cache Size ID Register (Cortex-M7) */
+
+#define NVIC_CCSIDR_LINESIZE_SHIFT      (0)       /* Bits 0-2: Number of words in each cache line */
+#define NVIC_CCSIDR_LINESIZE_MASK       (7 << NVIC_CCSIDR_LINESIZE_SHIFT)
+#define NVIC_CCSIDR_ASSOCIATIVITY_SHIFT (3)       /* Bits 3-12: Number of ways - 1 */
+#define NVIC_CCSIDR_ASSOCIATIVITY_MASK  (0x3ff << NVIC_CCSIDR_ASSOCIATIVITY_SHIFT)
+#define NVIC_CCSIDR_NUMSETS_SHIFT       (13)      /* Bits 13-27: Number of sets - 1 */
+#define NVIC_CCSIDR_NUMSETS_MASK        (0x7fff << NVIC_CCSIDR_NUMSETS_SHIFT)
+#define NVIC_CCSIDR_WA_SHIFT            (1 << 28) /* Bits 28: Write Allocation support */
+#define NVIC_CCSIDR_RA_SHIFT            (1 << 29) /* Bits 29: Read Allocation support */
+#define NVIC_CCSIDR_WB_SHIFT            (1 << 30) /* Bits 30: Write-Back support */
+#define NVIC_CCSIDR_WT_SHIFT            (1 << 31) /* Bits 31: Write-Through support */
+
+/* Cache Size Selection Register (Cortex-M7) */
+
+#define NVIC_CSSELR_IND                 (1 << 0)  /* Bit 0: Selects either instruction or data cache */
+#  define NVIC_CSSELR_IND_ICACHE        (0 << 0)  /*   0=Instruction Cache */
+#  define NVIC_CSSELR_IND_DCACHE        (1 << 0)  /*   1=Data Cache */
+
+#define NVIC_CSSELR_LEVEL_SHIFT         (1)       /* Bit 1-3: Selects cache level */
+#define NVIC_CSSELR_LEVEL_MASK          (7 << NVIC_CSSELR_LEVEL_SHIFT)
+  #define NVIC_CSSELR_LEVEL_1           (0 << NVIC_CSSELR_LEVEL_SHIFT)
+
+/* Debug Exception and Monitor Control Register (DEMCR) */
+
+#define NVIC_DEMCR_VCCORERESET          (1 << 0)  /* Bit 0:  Reset Vector Catch */
+#define NVIC_DEMCR_VCMMERR              (1 << 4)  /* Bit 4:  Debug trap on Memory Management faults */
+#define NVIC_DEMCR_VCNOCPERR            (1 << 5)  /* Bit 5:  Debug trap on Usage Fault access to non-present coprocessor */
+#define NVIC_DEMCR_VCCHKERR             (1 << 6)  /* Bit 6:  Debug trap on Usage Fault enabled checking errors */
+#define NVIC_DEMCR_VCSTATERR            (1 << 7)  /* Bit 7:  Debug trap on Usage Fault state error */
+#define NVIC_DEMCR_VCBUSERR             (1 << 8)  /* Bit 8:  Debug Trap on normal Bus error */
+#define NVIC_DEMCR_VCINTERR             (1 << 9)  /* Bit 9:  Debug Trap on interrupt/exception service errors */
+#define NVIC_DEMCR_VCHARDERR            (1 << 10) /* Bit 10: Debug trap on Hard Fault */
+#define NVIC_DEMCR_MONEN                (1 << 16) /* Bit 16: Enable the debug monitor */
+#define NVIC_DEMCR_MONPEND              (1 << 17) /* Bit 17: Pend the monitor to activate when priority permits */
+#define NVIC_DEMCR_MONSTEP              (1 << 18) /* Bit 18: Steps the core */
+#define NVIC_DEMCR_MONREQ               (1 << 19) /* Bit 19: Monitor wake-up mode */
+#define NVIC_DEMCR_TRCENA               (1 << 24) /* Bit 24: Enable trace and debug blocks */
+
+/* Instruction Tightly-Coupled Memory Control Register (ITCMCR) */
+
+/* Data Tightly-Coupled Memory Control Registers (DTCMCR */
+
+#define NVIC_TCMCR_EN                   (1 << 0)  /* Bit 9:  TCM enable */
+#define NVIC_TCMCR_RMW                  (1 << 1)  /* Bit 1:  Read-Modify-Write (RMW) enable */
+#define NVIC_TCMCR_RETEN                (1 << 2)  /* Bit 2:  Retry phase enable */
+#define NVIC_TCMCR_SZ_SHIFT             (3)       /* Bits 3-6: Size of the TCM */
+#define NVIC_TCMCR_SZ_MASK              (15 << NVIC_TCMCR_SZ_SHIFT)
+#  define NVIC_TCMCR_SZ_NONE            (0 << NVIC_TCMCR_SZ_SHIFT) /* No TCM implemented */
+#  define NVIC_TCMCR_SZ_4KB             (3 << NVIC_TCMCR_SZ_SHIFT)
+#  define NVIC_TCMCR_SZ_8KB             (4 << NVIC_TCMCR_SZ_SHIFT)
+#  define NVIC_TCMCR_SZ_16KB            (5 << NVIC_TCMCR_SZ_SHIFT)
+#  define NVIC_TCMCR_SZ_32KB            (6 << NVIC_TCMCR_SZ_SHIFT)
+#  define NVIC_TCMCR_SZ_64KB            (7 << NVIC_TCMCR_SZ_SHIFT)
+#  define NVIC_TCMCR_SZ_128KB           (8 << NVIC_TCMCR_SZ_SHIFT)
+#  define NVIC_TCMCR_SZ_256KB           (9 << NVIC_TCMCR_SZ_SHIFT)
+#  define NVIC_TCMCR_SZ_512KB           (10 << NVIC_TCMCR_SZ_SHIFT)
+#  define NVIC_TCMCR_SZ_1MB             (11 << NVIC_TCMCR_SZ_SHIFT)
+#  define NVIC_TCMCR_SZ_2MB             (12 << NVIC_TCMCR_SZ_SHIFT)
+#  define NVIC_TCMCR_SZ_4MB             (13 << NVIC_TCMCR_SZ_SHIFT)
+#  define NVIC_TCMCR_SZ_8MB             (14 << NVIC_TCMCR_SZ_SHIFT)
+#  define NVIC_TCMCR_SZ_16MB            (15 << NVIC_TCMCR_SZ_SHIFT)
+
+/* AHBP Control Register (AHBPCR, Cortex-M7) */
+
+#define NVIC_AHBPCR_EN                  (1 << 0)  /* Bit 0: AHBP enable */
+#define NVIC_AHBPCR_SZ_SHIFT            (1)       /* Bits 1-3: AHBP size */
+#define NVIC_AHBPCR_SZ_MASK             (7 << NVIC_AHBPCR_SZ_SHIFT)
+#  define NVIC_AHBPCR_SZ_DISABLED       (0 << NVIC_AHBPCR_SZ_SHIFT)
+#  define NVIC_AHBPCR_SZ_64MB           (1 << NVIC_AHBPCR_SZ_SHIFT)
+#  define NVIC_AHBPCR_SZ_128MB          (2 << NVIC_AHBPCR_SZ_SHIFT)
+#  define NVIC_AHBPCR_SZ_256MB          (3 << NVIC_AHBPCR_SZ_SHIFT)
+#  define NVIC_AHBPCR_SZ_512MB          (4 << NVIC_AHBPCR_SZ_SHIFT)
+
+/* L1 Cache Control Register (CACR, Cortex-M7) */
+
+#define NVIC_CACR_SIWT                  (1 << 0)  /* Bit 0:  Shared cacheable-is-WT for data cache */
+#define NVIC_CACR_ECCDIS                (1 << 1)  /* Bit 1:  Enables ECC in the instruction and data cache */
+#define NVIC_CACR_FORCEWT               (1 << 2)  /* Bit 2:  Enables Force Write-Through in the data cache */
+
+/********************************************************************************************
+ * Public Types
+ ********************************************************************************************/
+
+/********************************************************************************************
+ * Public Data
+ ********************************************************************************************/
+
+/********************************************************************************************
+ * Public Function Prototypes
+ ********************************************************************************************/
+
+#endif /* __ARCH_ARM_SRC_COMMON_ARMV8_M_NVIC_H */
diff --git a/arch/arm/src/armv8-m/psr.h b/arch/arm/src/armv8-m/psr.h
new file mode 100755
index 0000000..bdefe9f
--- /dev/null
+++ b/arch/arm/src/armv8-m/psr.h
@@ -0,0 +1,72 @@
+/************************************************************************************
+ * arch/arm/src/armv8-m/psr.h
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.  The
+ * ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ *
+ ************************************************************************************/
+
+#ifndef __ARCH_ARM_SRC_COMMON_ARMV8_M_PSR_H
+#define __ARCH_ARM_SRC_COMMON_ARMV8_M_PSR_H
+
+/************************************************************************************
+ * Included Files
+ ************************************************************************************/
+
+/************************************************************************************
+ * Pre-processor Definitions
+ ************************************************************************************/
+
+/* Application Program Status Register (APSR) */
+
+#define ARMV8M_APSR_Q            (1 << 27) /* Bit 27: Sticky saturation flag */
+#define ARMV8M_APSR_V            (1 << 28) /* Bit 28: Overflow flag */
+#define ARMV8M_APSR_C            (1 << 29) /* Bit 29: Carry/borrow flag */
+#define ARMV8M_APSR_Z            (1 << 30) /* Bit 30: Zero flag */
+#define ARMV8M_APSR_N            (1 << 31) /* Bit 31: Negative, less than flag */
+
+/* Interrupt Program Status Register (IPSR) */
+
+#define ARMV8M_IPSR_ISR_SHIFT    0         /* Bits 8-0: ISR number */
+#define ARMV8M_IPSR_ISR_MASK     (0x1ff << ARMV8M_IPSR_ISR_SHIFT)
+
+/* Execution PSR Register (EPSR) */
+
+#define ARMV8M_EPSR_ICIIT1_SHIFT 10        /* Bits 15-10: Interrupt-Continuable-Instruction/If-Then bits */
+#define ARMV8M_EPSR_ICIIT1_MASK  (3 << ARMV8M_EPSR_ICIIT1_SHIFT)
+#define ARMV8M_EPSR_T            (1 << 24) /* Bit 24: T-bit */
+#define ARMV8M_EPSR_ICIIT2_SHIFT 25        /* Bits 26-25: Interrupt-Continuable-Instruction/If-Then bits */
+#define ARMV8M_EPSR_ICIIT2_MASK  (3 << ARMV8M_EPSR_ICIIT2_SHIFT)
+
+/* Save xPSR bits */
+
+#define ARMV8M_XPSR_ISR_SHIFT    ARMV8M_IPSR_ISR_SHIFT
+#define ARMV8M_XPSR_ISR_MASK     ARMV8M_IPSR_ISR_MASK
+#define ARMV8M_XPSR_ICIIT1_SHIFT ARMV8M_EPSR_ICIIT1_SHIFT/
+#define ARMV8M_XPSR_ICIIT1_MASK  ARMV8M_EPSR_ICIIT1_MASK
+#define ARMV8M_XPSR_T            ARMV8M_EPSR_T
+#define ARMV8M_XPSR_ICIIT2_SHIFT ARMV8M_EPSR_ICIIT2_SHIFT
+#define ARMV8M_XPSR_ICIIT2_MASK  ARMV8M_EPSR_ICIIT2_MASK
+#define ARMV8M_XPSR_Q            ARMV8M_APSR_Q
+#define ARMV8M_XPSR_V            ARMV8M_APSR_V
+#define ARMV8M_XPSR_C            ARMV8M_APSR_C
+#define ARMV8M_XPSR_Z            ARMV8M_APSR_Z
+#define ARMV8M_XPSR_N            ARMV8M_APSR_N
+
+/************************************************************************************
+ * Inline Functions
+ ************************************************************************************/
+
+#endif /* __ARCH_ARM_SRC_COMMON_ARMV8_M_PSR_H */
diff --git a/arch/arm/src/armv8-m/ram_vectors.h b/arch/arm/src/armv8-m/ram_vectors.h
new file mode 100755
index 0000000..17168b0
--- /dev/null
+++ b/arch/arm/src/armv8-m/ram_vectors.h
@@ -0,0 +1,103 @@
+/****************************************************************************
+ * arch/arm/src/armv8-m/ram_vectors.h
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.  The
+ * ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ *
+ ****************************************************************************/
+
+#ifndef __ARCH_ARM_SRC_COMMON_ARMV8_M_RAM_VECTORS_H
+#define __ARCH_ARM_SRC_COMMON_ARMV8_M_RAM_VECTORS_H
+
+/****************************************************************************
+ * Included Files
+ ****************************************************************************/
+
+#include <nuttx/config.h>
+#include <arch/irq.h>
+
+#include "up_internal.h"
+#include "chip.h"
+
+#ifdef CONFIG_ARCH_RAMVECTORS
+
+/****************************************************************************
+ * Pre-processor Definitions
+ ****************************************************************************/
+
+/* This is the size of the vector table (in 4-byte entries).  This size
+ * includes the (1) the peripheral interrupts, (2) space for 15 Cortex-M
+ * exceptions, and (3) IDLE stack pointer which lies at the beginning of the
+ * table.
+ */
+
+#define ARMV8M_VECTAB_SIZE (ARMV8M_PERIPHERAL_INTERRUPTS + 16)
+
+/****************************************************************************
+ * Public Data
+ ****************************************************************************/
+
+/* If CONFIG_ARCH_RAMVECTORS is defined, then the ARM logic must provide
+ * ARM-specific implementations of irq_initialize(), irq_attach(), and
+ * irq_dispatch.  In this case, it is also assumed that the ARM vector
+ * table resides in RAM, has the name up_ram_vectors, and has been
+ * properly positioned and aligned in memory by the linker script.
+ *
+ * REVISIT: Can this alignment requirement vary from core-to-core?  Yes, it
+ * depends on the number of vectors supported by the MCU. The safest thing
+ * to do is to put the vector table at the beginning of RAM in order toforce
+ * the highest alignment possible.
+ */
+
+extern up_vector_t g_ram_vectors[ARMV8M_VECTAB_SIZE]
+  __attribute__ ((section (".ram_vectors"), aligned (128)));
+
+/****************************************************************************
+ * Public Function Prototypes
+ ****************************************************************************/
+
+/****************************************************************************
+ * Name: up_ramvec_initialize
+ *
+ * Description:
+ *   Copy vectors to RAM an configure the NVIC to use the RAM vectors.
+ *
+ ****************************************************************************/
+
+void up_ramvec_initialize(void);
+
+/****************************************************************************
+ * Name: exception_common
+ *
+ * Description:
+ *   This is the default, common vector handling entrypoint.
+ *
+ ****************************************************************************/
+
+void exception_common(void);
+
+/****************************************************************************
+ * Name: up_ramvec_attach
+ *
+ * Description:
+ *   Configure the ram vector table so that IRQ number 'irq' will be
+ *   dipatched by hardware to 'vector'
+ *
+ ****************************************************************************/
+
+int up_ramvec_attach(int irq, up_vector_t vector);
+
+#endif /* CONFIG_ARCH_RAMVECTORS */
+#endif /* __ARCH_ARM_SRC_COMMON_ARMV8_M_RAM_VECTORS_H */
diff --git a/arch/arm/src/armv8-m/svcall.h b/arch/arm/src/armv8-m/svcall.h
new file mode 100755
index 0000000..cd1a9fb
--- /dev/null
+++ b/arch/arm/src/armv8-m/svcall.h
@@ -0,0 +1,131 @@
+/************************************************************************************
+ * arch/arm/src/armv8-m/svcall.h
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.  The
+ * ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ *
+ ************************************************************************************/
+
+#ifndef __ARCH_ARM_SRC_ARMV8_M_SVCALL_H
+#define __ARCH_ARM_SRC_ARMV8_M_SVCALL_H
+
+/************************************************************************************
+ * Included Files
+ ************************************************************************************/
+
+#include <nuttx/config.h>
+
+#ifdef CONFIG_LIB_SYSCALL
+#  include <syscall.h>
+#endif
+
+/************************************************************************************
+ * Pre-processor Definitions
+ ************************************************************************************/
+
+/* Configuration ********************************************************************/
+
+/* This logic uses three system calls {0,1,2} for context switching and one for the
+ * syscall return.  So a minimum of four syscall values must be reserved.  If
+ * CONFIG_BUILD_PROTECTED is defined, then four more syscall values must be reserved.
+ */
+
+#ifdef CONFIG_LIB_SYSCALL
+#  ifdef CONFIG_BUILD_PROTECTED
+#    ifndef CONFIG_SYS_RESERVED
+#      error "CONFIG_SYS_RESERVED must be defined to have the value 8"
+#    elif CONFIG_SYS_RESERVED != 8
+#      error "CONFIG_SYS_RESERVED must have the value 8"
+#    endif
+#  else
+#    ifndef CONFIG_SYS_RESERVED
+#      error "CONFIG_SYS_RESERVED must be defined to have the value 4"
+#    elif CONFIG_SYS_RESERVED != 4
+#      error "CONFIG_SYS_RESERVED must have the value 4"
+#    endif
+#  endif
+#endif
+
+/* Cortex-M system calls ************************************************************/
+
+/* SYS call 0:
+ *
+ * int up_saveusercontext(uint32_t *saveregs);
+ */
+
+#define SYS_save_context          (0)
+
+/* SYS call 1:
+ *
+ * void up_fullcontextrestore(uint32_t *restoreregs) noreturn_function;
+ */
+
+#define SYS_restore_context       (1)
+
+/* SYS call 2:
+ *
+ * void up_switchcontext(uint32_t *saveregs, uint32_t *restoreregs);
+ */
+
+#define SYS_switch_context        (2)
+
+#ifdef CONFIG_LIB_SYSCALL
+/* SYS call 3:
+ *
+ * void up_syscall_return(void);
+ */
+
+#define SYS_syscall_return        (3)
+
+#ifdef CONFIG_BUILD_PROTECTED
+/* SYS call 4:
+ *
+ * void up_task_start(main_t taskentry, int argc, FAR char *argv[])
+ *        noreturn_function;
+ */
+
+#define SYS_task_start            (4)
+
+/* SYS call 5:
+ *
+ * void up_pthread_start(pthread_startroutine_t entrypt, pthread_addr_t arg)
+ *        noreturn_function
+ */
+
+#define SYS_pthread_start         (5)
+
+/* SYS call 6:
+ *
+ * void signal_handler(_sa_sigaction_t sighand, int signo, FAR siginfo_t *info,
+ *                     FAR void *ucontext);
+ */
+
+#define SYS_signal_handler        (6)
+
+/* SYS call 7:
+ *
+ * void signal_handler_return(void);
+ */
+
+#define SYS_signal_handler_return (7)
+
+#endif /* CONFIG_BUILD_PROTECTED */
+#endif /* CONFIG_LIB_SYSCALL */
+
+/************************************************************************************
+ * Inline Functions
+ ************************************************************************************/
+
+#endif /* __ARCH_ARM_SRC_ARMV8_M_SVCALL_H */
diff --git a/arch/arm/src/armv8-m/systick.h b/arch/arm/src/armv8-m/systick.h
new file mode 100755
index 0000000..7f3a55c
--- /dev/null
+++ b/arch/arm/src/armv8-m/systick.h
@@ -0,0 +1,76 @@
+/****************************************************************************
+ * arch/arm/src/armv8-m/systick.h
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.  The
+ * ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ *
+ ****************************************************************************/
+
+#ifndef __ARCH_ARM_SRC_ARMV8_M_SYSTICK_H
+#define __ARCH_ARM_SRC_ARMV8_M_SYSTICK_H
+
+/****************************************************************************
+ * Included Files
+ ****************************************************************************/
+
+#include <nuttx/config.h>
+#include <nuttx/timers/timer.h>
+
+/****************************************************************************
+ * Public Function Prototypes
+ ****************************************************************************/
+
+#ifdef __cplusplus
+#define EXTERN extern "C"
+extern "C"
+{
+#else
+#define EXTERN extern
+#endif
+
+/****************************************************************************
+ * Name: systick_initialize
+ *
+ * Description:
+ *   This function initialize SYSTICK hardware module which is part of NVIC
+ *   and return an instance of a "lower half" timer interface.
+ *
+ * Input parameters:
+ *   coreclk - false if SYSTICK working clock come from the external
+ *     reference clock, otherwise true.
+ *   freq - The clock frequency in Hz. If freq is zero, calculate the value
+ *     from NVIC_SYSTICK_CALIB register.
+ *   minor - The timer driver minor number, skip the registration if minor
+ *     < 0.
+ *
+ * Returned Value:
+ *   On success, a non-NULL timer_lowerhalf_s is returned to the caller.
+ *   In the event of any failure, a NULL value is returned.
+ *
+ ****************************************************************************/
+
+#ifdef CONFIG_ARMV8M_SYSTICK
+struct timer_lowerhalf_s *systick_initialize(bool coreclk, unsigned int freq,
+                                             int minor);
+#else
+#  define systick_initialize(coreclk, freq, minor) NULL
+#endif /* CONFIG_ARMV8M_SYSTICK */
+
+#undef EXTERN
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __ARCH_ARM_SRC_ARMV8_M_SYSTICK_H */
diff --git a/arch/arm/src/armv8-m/tpi.h b/arch/arm/src/armv8-m/tpi.h
new file mode 100755
index 0000000..816c583
--- /dev/null
+++ b/arch/arm/src/armv8-m/tpi.h
@@ -0,0 +1,200 @@
+/***********************************************************************************************
+ * arch/arm/src/armv8-m/tpi.h
+ *
+ *   Copyright (c) 2009 - 2013 ARM LIMITED
+ *
+ *  All rights reserved.
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions are met:
+ *
+ *  - Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  - Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *  - Neither the name of ARM nor the names of its contributors may be used
+ *    to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ *  ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE
+ *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ *  POSSIBILITY OF SUCH DAMAGE.
+ *
+ *   Copyright (C) 2014 Pierre-noel Bouteville . All rights reserved.
+ *   Author: Pierre-noel Bouteville <pnb990@gmail.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ * 3. Neither the name NuttX nor the names of its contributors may be
+ *    used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ ***********************************************************************************************/
+
+#ifndef __ARCH_ARM_SRC_ARMV8_M_TPI_H
+#define __ARCH_ARM_SRC_ARMV8_M_TPI_H
+
+/***********************************************************************************************
+ * Pre-processor Definitions
+ ***********************************************************************************************/
+
+/* Trace Port Interface Register (TPI) Definitions *********************************************/
+
+/* TPI Register Base Address *******************************************************************/
+
+#define TPI_BASE                      (0xe0040000ul)
+
+/* TPI Register Addresses **********************************************************************/
+
+#define TPI_SSPSR                     (TPI_BASE + 0x0000) /* Supported Parallel Port Size Register */
+#define TPI_CSPSR                     (TPI_BASE + 0x0004) /* Current Parallel Port Size Register */
+#define TPI_ACPR                      (TPI_BASE + 0x0010) /* Asynchronous Clock Prescaler Register */
+#define TPI_SPPR                      (TPI_BASE + 0x00f0) /* Selected Pin Protocol Register */
+#define TPI_FFSR                      (TPI_BASE + 0x0300) /* Formatter and Flush Status Register */
+#define TPI_FFCR                      (TPI_BASE + 0x0304) /* Formatter and Flush Control Register */
+#define TPI_FSCR                      (TPI_BASE + 0x0308) /* Formatter Synchronization Counter Register */
+#define TPI_TRIGGER                   (TPI_BASE + 0x0ee8) /* TRIGGER */
+#define TPI_FIFO0                     (TPI_BASE + 0x0eec) /* Integration ETM Data */
+#define TPI_ITATBCTR2                 (TPI_BASE + 0x0ef0) /* ITATBCTR2 */
+#define TPI_ITATBCTR0                 (TPI_BASE + 0x0ef8) /* ITATBCTR0 */
+#define TPI_FIFO1                     (TPI_BASE + 0x0efc) /* Integration ITM Data */
+#define TPI_ITCTRL                    (TPI_BASE + 0x0f00) /* Integration Mode Control */
+#define TPI_CLAIMSET                  (TPI_BASE + 0x0fa0) /* Claim tag set */
+#define TPI_CLAIMCLR                  (TPI_BASE + 0x0fa4) /* Claim tag clear */
+#define TPI_DEVID                     (TPI_BASE + 0x0fc8) /* TPIU_DEVID */
+#define TPI_DEVTYPE                   (TPI_BASE + 0x0fcc) /* TPIU_DEVTYPE */
+
+/* TPI Register Bit Field Definitions **********************************************************/
+
+/* TPI ACPR */
+
+#define TPI_ACPR_PRESCALER_SHIFT      0
+#define TPI_ACPR_PRESCALER_MASK       (0x1ffful << TPI_ACPR_PRESCALER_SHIFT)
+
+/* TPI SPPR */
+
+#define TPI_SPPR_TXMODE_SHIFT         0
+#define TPI_SPPR_TXMODE_MASK          (0x3ul << TPI_SPPR_TXMODE_SHIFT)
+
+/* TPI FFSR */
+
+#define TPI_FFSR_FtNonStop_SHIFT      3
+#define TPI_FFSR_FtNonStop_MASK       (0x1ul << TPI_FFSR_FtNonStop_SHIFT)
+#define TPI_FFSR_TCPresent_SHIFT      2
+#define TPI_FFSR_TCPresent_MASK       (0x1ul << TPI_FFSR_TCPresent_SHIFT)
+#define TPI_FFSR_FtStopped_SHIFT      1
+#define TPI_FFSR_FtStopped_MASK       (0x1ul << TPI_FFSR_FtStopped_SHIFT)
+#define TPI_FFSR_FlInProg_SHIFT       0
+#define TPI_FFSR_FlInProg_MASK        (0x1ul << TPI_FFSR_FlInProg_SHIFT)
+
+/* TPI FFCR */
+
+#define TPI_FFCR_TrigIn_SHIFT         8
+#define TPI_FFCR_TrigIn_MASK          (0x1ul << TPI_FFCR_TrigIn_SHIFT)
+#define TPI_FFCR_EnFCont_SHIFT        1
+#define TPI_FFCR_EnFCont_MASK         (0x1ul << TPI_FFCR_EnFCont_SHIFT)
+
+#define TPI_TRIGGER_TRIGGER_SHIFT     0
+#define TPI_TRIGGER_TRIGGER_MASK      (0x1ul << TPI_TRIGGER_TRIGGER_SHIFT)
+
+/* TPI FIFO0 */
+
+#define TPI_FIFO0_ITM_ATVALID_SHIFT   29
+#define TPI_FIFO0_ITM_ATVALID_MASK    (0x3ul << TPI_FIFO0_ITM_ATVALID_SHIFT)
+#define TPI_FIFO0_ITM_bytecount_SHIFT 27
+#define TPI_FIFO0_ITM_bytecount_MASK  (0x3ul << TPI_FIFO0_ITM_bytecount_SHIFT)
+#define TPI_FIFO0_ETM_ATVALID_SHIFT   26
+#define TPI_FIFO0_ETM_ATVALID_MASK    (0x3ul << TPI_FIFO0_ETM_ATVALID_SHIFT)
+#define TPI_FIFO0_ETM_bytecount_SHIFT 24
+#define TPI_FIFO0_ETM_bytecount_MASK  (0x3ul << TPI_FIFO0_ETM_bytecount_SHIFT)
+#define TPI_FIFO0_ETM2_SHIFT          16
+#define TPI_FIFO0_ETM2_MASK           (0xfful << TPI_FIFO0_ETM2_SHIFT)
+#define TPI_FIFO0_ETM1_SHIFT          8
+#define TPI_FIFO0_ETM1_MASK           (0xfful << TPI_FIFO0_ETM1_SHIFT)
+#define TPI_FIFO0_ETM0_SHIFT          0
+#define TPI_FIFO0_ETM0_MASK           (0xfful << TPI_FIFO0_ETM0_SHIFT)
+
+/* TPI ITATBCTR2 */
+
+#define TPI_ITATBCTR2_ATREADY_SHIFT   0
+#define TPI_ITATBCTR2_ATREADY_MASK    (0x1ul << TPI_ITATBCTR2_ATREADY_SHIFT)
+
+/* TPI FIFO1 */
+
+#define TPI_FIFO1_ITM_ATVALID_SHIFT   29
+#define TPI_FIFO1_ITM_ATVALID_MASK    (0x3ul << TPI_FIFO1_ITM_ATVALID_SHIFT)
+#define TPI_FIFO1_ITM_bytecount_SHIFT 27
+#define TPI_FIFO1_ITM_bytecount_MASK  (0x3ul << TPI_FIFO1_ITM_bytecount_SHIFT)
+#define TPI_FIFO1_ETM_ATVALID_SHIFT   26
+#define TPI_FIFO1_ETM_ATVALID_MASK    (0x3ul << TPI_FIFO1_ETM_ATVALID_SHIFT)
+#define TPI_FIFO1_ETM_bytecount_SHIFT 24
+#define TPI_FIFO1_ETM_bytecount_MASK  (0x3ul << TPI_FIFO1_ETM_bytecount_SHIFT)
+#define TPI_FIFO1_ITM2_SHIFT          16
+#define TPI_FIFO1_ITM2_MASK           (0xfful << TPI_FIFO1_ITM2_SHIFT)
+#define TPI_FIFO1_ITM1_SHIFT          8
+#define TPI_FIFO1_ITM1_MASK           (0xfful << TPI_FIFO1_ITM1_SHIFT)
+#define TPI_FIFO1_ITM0_SHIFT          0
+#define TPI_FIFO1_ITM0_MASK           (0xfful << TPI_FIFO1_ITM0_SHIFT)
+
+/* TPI ITATBCTR0 */
+
+#define TPI_ITATBCTR0_ATREADY_SHIFT   0
+#define TPI_ITATBCTR0_ATREADY_MASK    (0x1ul << TPI_ITATBCTR0_ATREADY_SHIFT)
+
+/* TPI ITCTRL */
+
+#define TPI_ITCTRL_Mode_SHIFT         0
+#define TPI_ITCTRL_Mode_MASK          (0x1ul << TPI_ITCTRL_Mode_SHIFT)
+
+/* TPI DEVID */
+
+#define TPI_DEVID_NRZVALID_SHIFT      11
+#define TPI_DEVID_NRZVALID_MASK       (0x1ul << TPI_DEVID_NRZVALID_SHIFT)
+#define TPI_DEVID_MANCVALID_SHIFT     10
+#define TPI_DEVID_MANCVALID_MASK      (0x1ul << TPI_DEVID_MANCVALID_SHIFT)
+#define TPI_DEVID_PTINVALID_SHIFT     9
+#define TPI_DEVID_PTINVALID_MASK      (0x1ul << TPI_DEVID_PTINVALID_SHIFT)
+#define TPI_DEVID_MinBufSz_SHIFT      6
+#define TPI_DEVID_MinBufSz_MASK       (0x7ul << TPI_DEVID_MinBufSz_SHIFT)
+#define TPI_DEVID_AsynClkIn_SHIFT     5
+#define TPI_DEVID_AsynClkIn_MASK      (0x1ul << TPI_DEVID_AsynClkIn_SHIFT)
+#define TPI_DEVID_NrTraceInput_SHIFT  0
+#define TPI_DEVID_NrTraceInput_MASK   (0x1ful << TPI_DEVID_NrTraceInput_SHIFT)
+
+/* TPI DEVTYPE */
+
+#define TPI_DEVTYPE_SubType_SHIFT     0
+#define TPI_DEVTYPE_SubType_MASK      (0xful << TPI_DEVTYPE_SubType_SHIFT)
+#define TPI_DEVTYPE_MajorType_SHIFT   4
+#define TPI_DEVTYPE_MajorType_MASK    (0xful << TPI_DEVTYPE_MajorType_SHIFT)
+
+#endif /* __ARCH_ARM_SRC_ARMV8_M_TPI_H */
diff --git a/arch/arm/src/armv8-m/up_assert.c b/arch/arm/src/armv8-m/up_assert.c
new file mode 100755
index 0000000..0b5f736
--- /dev/null
+++ b/arch/arm/src/armv8-m/up_assert.c
@@ -0,0 +1,457 @@
+/****************************************************************************
+ * arch/arm/src/armv8-m/up_assert.c
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.  The
+ * ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ *
+ ****************************************************************************/
+
+/****************************************************************************
+ * Included Files
+ ****************************************************************************/
+
+#include <nuttx/config.h>
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <debug.h>
+
+#include <nuttx/irq.h>
+#include <nuttx/arch.h>
+#include <nuttx/board.h>
+#include <nuttx/syslog/syslog.h>
+#include <nuttx/usb/usbdev_trace.h>
+
+#include <arch/board/board.h>
+
+#include "sched/sched.h"
+#include "irq/irq.h"
+
+#include "up_arch.h"
+#include "up_internal.h"
+#include "chip.h"
+
+/****************************************************************************
+ * Pre-processor Definitions
+ ****************************************************************************/
+
+/* USB trace dumping */
+
+#ifndef CONFIG_USBDEV_TRACE
+#  undef CONFIG_ARCH_USBDUMP
+#endif
+
+#ifndef CONFIG_BOARD_RESET_ON_ASSERT
+#  define CONFIG_BOARD_RESET_ON_ASSERT 0
+#endif
+
+/****************************************************************************
+ * Private Data
+ ****************************************************************************/
+
+#ifdef CONFIG_ARCH_STACKDUMP
+static uint32_t s_last_regs[XCPTCONTEXT_REGS];
+#endif
+
+/****************************************************************************
+ * Private Functions
+ ****************************************************************************/
+
+/****************************************************************************
+ * Name: up_getsp
+ ****************************************************************************/
+
+/* I don't know if the builtin to get SP is enabled */
+
+static inline uint32_t up_getsp(void)
+{
+  uint32_t sp;
+  __asm__
+  (
+    "\tmov %0, sp\n\t"
+    : "=r"(sp)
+  );
+  return sp;
+}
+
+/****************************************************************************
+ * Name: up_stackdump
+ ****************************************************************************/
+
+#ifdef CONFIG_ARCH_STACKDUMP
+static void up_stackdump(uint32_t sp, uint32_t stack_base)
+{
+  uint32_t stack ;
+
+  for (stack = sp & ~0x1f; stack < stack_base; stack += 32)
+    {
+      uint32_t *ptr = (uint32_t *)stack;
+      _alert("%08x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
+             stack, ptr[0], ptr[1], ptr[2], ptr[3],
+             ptr[4], ptr[5], ptr[6], ptr[7]);
+    }
+}
+#else
+#  define up_stackdump(sp,stack_base)
+#endif
+
+/****************************************************************************
+ * Name: up_taskdump
+ ****************************************************************************/
+
+#ifdef CONFIG_STACK_COLORATION
+static void up_taskdump(FAR struct tcb_s *tcb, FAR void *arg)
+{
+  /* Dump interesting properties of this task */
+
+#if CONFIG_TASK_NAME_SIZE > 0
+  _alert("%s: PID=%d Stack Used=%lu of %lu\n",
+        tcb->name, tcb->pid, (unsigned long)up_check_tcbstack(tcb),
+        (unsigned long)tcb->adj_stack_size);
+#else
+  _alert("PID: %d Stack Used=%lu of %lu\n",
+        tcb->pid, (unsigned long)up_check_tcbstack(tcb),
+        (unsigned long)tcb->adj_stack_size);
+#endif
+}
+#endif
+
+/****************************************************************************
+ * Name: up_showtasks
+ ****************************************************************************/
+
+#ifdef CONFIG_STACK_COLORATION
+static inline void up_showtasks(void)
+{
+  /* Dump interesting properties of each task in the crash environment */
+
+  sched_foreach(up_taskdump, NULL);
+}
+#else
+#  define up_showtasks()
+#endif
+
+/****************************************************************************
+ * Name: up_registerdump
+ ****************************************************************************/
+
+#ifdef CONFIG_ARCH_STACKDUMP
+static inline void up_registerdump(void)
+{
+  volatile uint32_t *regs = CURRENT_REGS;
+
+  /* Are user registers available from interrupt processing? */
+
+  if (regs == NULL)
+    {
+      /* No.. capture user registers by hand */
+
+      up_saveusercontext(s_last_regs);
+      regs = s_last_regs;
+    }
+
+  /* Dump the interrupt registers */
+
+  _alert("R0: %08x %08x %08x %08x %08x %08x %08x %08x\n",
+        regs[REG_R0], regs[REG_R1], regs[REG_R2], regs[REG_R3],
+        regs[REG_R4], regs[REG_R5], regs[REG_R6], regs[REG_R7]);
+  _alert("R8: %08x %08x %08x %08x %08x %08x %08x %08x\n",
+        regs[REG_R8],  regs[REG_R9],  regs[REG_R10], regs[REG_R11],
+        regs[REG_R12], regs[REG_R13], regs[REG_R14], regs[REG_R15]);
+
+#ifdef CONFIG_ARMV8M_USEBASEPRI
+  _alert("xPSR: %08x BASEPRI: %08x CONTROL: %08x\n",
+        regs[REG_XPSR], regs[REG_BASEPRI], getcontrol());
+#else
+  _alert("xPSR: %08x PRIMASK: %08x CONTROL: %08x\n",
+        regs[REG_XPSR], regs[REG_PRIMASK], getcontrol());
+#endif
+
+#ifdef REG_EXC_RETURN
+  _alert("EXC_RETURN: %08x\n", regs[REG_EXC_RETURN]);
+#endif
+}
+#else
+# define up_registerdump()
+#endif
+
+/****************************************************************************
+ * Name: assert_tracecallback
+ ****************************************************************************/
+
+#ifdef CONFIG_ARCH_USBDUMP
+static int usbtrace_syslog(FAR const char *fmt, ...)
+{
+  va_list ap;
+  int ret;
+
+  /* Let nx_vsyslog do the real work */
+
+  va_start(ap, fmt);
+  ret = nx_vsyslog(LOG_EMERG, fmt, &ap);
+  va_end(ap);
+  return ret;
+}
+
+static int assert_tracecallback(FAR struct usbtrace_s *trace, FAR void *arg)
+{
+  usbtrace_trprintf(usbtrace_syslog, trace->event, trace->value);
+  return 0;
+}
+#endif
+
+/****************************************************************************
+ * Name: up_dumpstate
+ ****************************************************************************/
+
+#ifdef CONFIG_ARCH_STACKDUMP
+static void up_dumpstate(void)
+{
+  struct tcb_s *rtcb = running_task();
+  uint32_t sp = up_getsp();
+  uint32_t ustackbase;
+  uint32_t ustacksize;
+#if CONFIG_ARCH_INTERRUPTSTACK > 7
+  uint32_t istackbase;
+  uint32_t istacksize;
+#endif
+
+  /* Dump the registers (if available) */
+
+  up_registerdump();
+
+  /* Get the limits on the user stack memory */
+
+  if (rtcb->pid == 0) /* Check for CPU0 IDLE thread */
+    {
+      ustackbase = g_idle_topstack - 4;
+      ustacksize = CONFIG_IDLETHREAD_STACKSIZE;
+    }
+  else
+    {
+      ustackbase = (uint32_t)rtcb->adj_stack_ptr;
+      ustacksize = (uint32_t)rtcb->adj_stack_size;
+    }
+
+#if CONFIG_ARCH_INTERRUPTSTACK > 7
+  /* Get the limits on the interrupt stack memory */
+
+#ifdef CONFIG_SMP
+  istackbase = (uint32_t)up_intstack_base();
+#else
+  istackbase = (uint32_t)&g_intstackbase;
+#endif
+  istacksize = (CONFIG_ARCH_INTERRUPTSTACK & ~7);
+
+  /* Show interrupt stack info */
+
+  _alert("sp:     %08x\n", sp);
+  _alert("IRQ stack:\n");
+  _alert("  base: %08x\n", istackbase);
+  _alert("  size: %08x\n", istacksize);
+#ifdef CONFIG_STACK_COLORATION
+  _alert("  used: %08x\n", up_check_intstack());
+#endif
+
+  /* Does the current stack pointer lie within the interrupt
+   * stack?
+   */
+
+  if (sp <= istackbase && sp > istackbase - istacksize)
+    {
+      /* Yes.. dump the interrupt stack */
+
+      up_stackdump(sp, istackbase);
+    }
+  else if (CURRENT_REGS)
+    {
+      _alert("ERROR: Stack pointer is not within the interrupt stack\n");
+      up_stackdump(istackbase - istacksize, istackbase);
+    }
+
+  /* Extract the user stack pointer if we are in an interrupt handler.
+   * If we are not in an interrupt handler.  Then sp is the user stack
+   * pointer (and the above range check should have failed).
+   */
+
+  if (CURRENT_REGS)
+    {
+      sp = CURRENT_REGS[REG_R13];
+      _alert("sp:     %08x\n", sp);
+    }
+
+  _alert("User stack:\n");
+  _alert("  base: %08x\n", ustackbase);
+  _alert("  size: %08x\n", ustacksize);
+#ifdef CONFIG_STACK_COLORATION
+  _alert("  used: %08x\n", up_check_tcbstack(rtcb));
+#endif
+
+  /* Dump the user stack if the stack pointer lies within the allocated user
+   * stack memory.
+   */
+
+  if (sp <= ustackbase && sp > ustackbase - ustacksize)
+    {
+      up_stackdump(sp, ustackbase);
+    }
+  else
+    {
+      _alert("ERROR: Stack pointer is not within the allocated stack\n");
+      up_stackdump(ustackbase - ustacksize, ustackbase);
+    }
+
+#else
+
+  /* Show user stack info */
+
+  _alert("sp:         %08x\n", sp);
+  _alert("stack base: %08x\n", ustackbase);
+  _alert("stack size: %08x\n", ustacksize);
+#ifdef CONFIG_STACK_COLORATION
+  _alert("stack used: %08x\n", up_check_tcbstack(rtcb));
+#endif
+
+  /* Dump the user stack if the stack pointer lies within the allocated user
+   * stack memory.
+   */
+
+  if (sp > ustackbase || sp <= ustackbase - ustacksize)
+    {
+      _alert("ERROR: Stack pointer is not within the allocated stack\n");
+      up_stackdump(ustackbase - ustacksize, ustackbase);
+    }
+  else
+    {
+      up_stackdump(sp, ustackbase);
+    }
+
+#endif
+
+#ifdef CONFIG_SMP
+  /* Show the CPU number */
+
+  _alert("CPU%d:\n", up_cpu_index());
+#endif
+
+  /* Dump the state of all tasks (if available) */
+
+  up_showtasks();
+
+#ifdef CONFIG_ARCH_USBDUMP
+  /* Dump USB trace data */
+
+  usbtrace_enumerate(assert_tracecallback, NULL);
+#endif
+}
+#else
+# define up_dumpstate()
+#endif
+
+/****************************************************************************
+ * Name: _up_assert
+ ****************************************************************************/
+
+static void _up_assert(int errorcode) noreturn_function;
+static void _up_assert(int errorcode)
+{
+  /* Flush any buffered SYSLOG data */
+
+  syslog_flush();
+
+  /* Are we in an interrupt handler or the idle task? */
+
+  if (CURRENT_REGS || (running_task())->flink == NULL)
+    {
+      up_irq_save();
+      for (; ; )
+        {
+#ifdef CONFIG_SMP
+          /* Try (again) to stop activity on other CPUs */
+
+          spin_trylock(&g_cpu_irqlock);
+#endif
+
+#if CONFIG_BOARD_RESET_ON_ASSERT >= 1
+          board_reset(CONFIG_BOARD_ASSERT_RESET_VALUE);
+#endif
+#ifdef CONFIG_ARCH_LEDS
+          board_autoled_on(LED_PANIC);
+          up_mdelay(250);
+          board_autoled_off(LED_PANIC);
+          up_mdelay(250);
+#endif
+        }
+    }
+  else
+    {
+#if CONFIG_BOARD_RESET_ON_ASSERT >= 2
+      board_reset(CONFIG_BOARD_ASSERT_RESET_VALUE);
+#endif
+      exit(errorcode);
+    }
+}
+
+/****************************************************************************
+ * Public Functions
+ ****************************************************************************/
+
+/****************************************************************************
+ * Name: up_assert
+ ****************************************************************************/
+
+void up_assert(const uint8_t *filename, int lineno)
+{
+#if CONFIG_TASK_NAME_SIZE > 0 && defined(CONFIG_DEBUG_ALERT)
+  struct tcb_s *rtcb = running_task();
+#endif
+
+  board_autoled_on(LED_ASSERTION);
+
+  /* Flush any buffered SYSLOG data (prior to the assertion) */
+
+  syslog_flush();
+
+#ifdef CONFIG_SMP
+#if CONFIG_TASK_NAME_SIZE > 0
+  _alert("Assertion failed CPU%d at file:%s line: %d task: %s\n",
+        up_cpu_index(), filename, lineno, rtcb->name);
+#else
+  _alert("Assertion failed CPU%d at file:%s line: %d\n",
+        up_cpu_index(), filename, lineno);
+#endif
+#else
+#if CONFIG_TASK_NAME_SIZE > 0
+  _alert("Assertion failed at file:%s line: %d task: %s\n",
+        filename, lineno, rtcb->name);
+#else
+  _alert("Assertion failed at file:%s line: %d\n",
+        filename, lineno);
+#endif
+#endif
+
+  up_dumpstate();
+
+  /* Flush any buffered SYSLOG data (from the above) */
+
+  syslog_flush();
+
+#ifdef CONFIG_BOARD_CRASHDUMP
+  board_crashdump(up_getsp(), running_task(), filename, lineno);
+#endif
+
+  _up_assert(EXIT_FAILURE);
+}
diff --git a/arch/arm/src/armv8-m/up_blocktask.c b/arch/arm/src/armv8-m/up_blocktask.c
new file mode 100755
index 0000000..e0bd82f
--- /dev/null
+++ b/arch/arm/src/armv8-m/up_blocktask.c
@@ -0,0 +1,147 @@
+/****************************************************************************
+ * arch/arm/src/armv8-m/up_blocktask.c
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.  The
+ * ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ *
+ ****************************************************************************/
+
+/****************************************************************************
+ * Included Files
+ ****************************************************************************/
+
+#include <nuttx/config.h>
+
+#include <stdbool.h>
+#include <sched.h>
+#include <debug.h>
+
+#include <nuttx/arch.h>
+#include <nuttx/sched.h>
+
+#include "sched/sched.h"
+#include "up_internal.h"
+
+/****************************************************************************
+ * Public Functions
+ ****************************************************************************/
+
+/****************************************************************************
+ * Name: up_block_task
+ *
+ * Description:
+ *   The currently executing task at the head of the ready to run list must
+ *   be stopped.  Save its context and move it to the inactive list
+ *   specified by task_state.
+ *
+ * Input Parameters:
+ *   tcb: Refers to a task in the ready-to-run list (normally the task at
+ *     the head of the list).  It must be stopped, its context saved and
+ *     moved into one of the waiting task lists.  If it was the task at the
+ *     head of the ready-to-run list, then a context switch to the new
+ *     ready to run task must be performed.
+ *   task_state: Specifies which waiting task list should hold the blocked
+ *     task TCB.
+ *
+ ****************************************************************************/
+
+void up_block_task(struct tcb_s *tcb, tstate_t task_state)
+{
+  struct tcb_s *rtcb = this_task();
+  bool switch_needed;
+
+  /* Verify that the context switch can be performed */
+
+  DEBUGASSERT((tcb->task_state >= FIRST_READY_TO_RUN_STATE) &&
+              (tcb->task_state <= LAST_READY_TO_RUN_STATE));
+
+  /* Remove the tcb task from the ready-to-run list.  If we are blocking the
+   * task at the head of the task list (the most likely case), then a
+   * context switch to the next ready-to-run task is needed. In this case,
+   * it should also be true that rtcb == tcb.
+   */
+
+  switch_needed = sched_removereadytorun(tcb);
+
+  /* Add the task to the specified blocked task list */
+
+  sched_addblocked(tcb, (tstate_t)task_state);
+
+  /* If there are any pending tasks, then add them to the ready-to-run
+   * task list now
+   */
+
+  if (g_pendingtasks.head)
+    {
+      switch_needed |= sched_mergepending();
+    }
+
+  /* Now, perform the context switch if one is needed */
+
+  if (switch_needed)
+    {
+      /* Update scheduler parameters */
+
+      sched_suspend_scheduler(rtcb);
+
+      /* Are we in an interrupt handler? */
+
+      if (CURRENT_REGS)
+        {
+          /* Yes, then we have to do things differently.
+           * Just copy the CURRENT_REGS into the OLD rtcb.
+           */
+
+          up_savestate(rtcb->xcp.regs);
+
+          /* Restore the exception context of the rtcb at the (new) head
+           * of the ready-to-run task list.
+           */
+
+          rtcb = this_task();
+
+          /* Reset scheduler parameters */
+
+          sched_resume_scheduler(rtcb);
+
+          /* Then switch contexts */
+
+          up_restorestate(rtcb->xcp.regs);
+        }
+
+      /* No, then we will need to perform the user context switch */
+
+      else
+        {
+          struct tcb_s *nexttcb = this_task();
+
+          /* Reset scheduler parameters */
+
+          sched_resume_scheduler(nexttcb);
+
+          /* Switch context to the context of the task at the head of the
+           * ready to run list.
+           */
+
+          up_switchcontext(rtcb->xcp.regs, nexttcb->xcp.regs);
+
+          /* up_switchcontext forces a context switch to the task at the
+           * head of the ready-to-run list.  It does not 'return' in the
+           * normal sense.  When it does return, it is because the blocked
+           * task is again ready to run and has execution priority.
+           */
+        }
+    }
+}
diff --git a/arch/arm/src/armv8-m/up_cache.c b/arch/arm/src/armv8-m/up_cache.c
new file mode 100755
index 0000000..863f185
--- /dev/null
+++ b/arch/arm/src/armv8-m/up_cache.c
@@ -0,0 +1,823 @@
+/****************************************************************************
+ * arch/arm/src/armv8-m/up_cache.c
+ *
+ *   Copyright (C) 2015, 2018-2019 Gregory Nutt. All rights reserved.
+ *   Author: Gregory Nutt <gnutt@nuttx.org>
+ *           Bob Feretich <bob.feretich@rafresearch.com>
+ *
+ * Some logic in this header file derives from the ARM CMSIS core_cm7.h
+ * header file which has a compatible 3-clause BSD license:
+ *
+ *   Copyright (c) 2009 - 2014 ARM LIMITED.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ * 3. Neither the name ARM, NuttX nor the names of its contributors may be
+ *    used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ ****************************************************************************/
+
+/****************************************************************************
+ * Included Files
+ ****************************************************************************/
+
+#include <nuttx/config.h>
+#include <nuttx/cache.h>
+
+#include "up_arch.h"
+#include "barriers.h"
+#include "nvic.h"
+
+/****************************************************************************
+ * Pre-processor Definitions
+ ****************************************************************************/
+
+/* Cache Size ID (CCSIDR) register macros used by inline functions
+ * Given the value of the CCSIDR reginer (n):
+ *
+ *   CCSIDR_WAYS    - Returns the (number of ways) - 1
+ *   CCSIDR_SETS    - Returns the (number of sets) - 1
+ *   CCSIDR_LSSHIFT - Returns log2(cache line size in words) - 2
+ *                    Eg. 0 -> 4 words
+ *                        1 -> 8 words
+ *                        ...
+ */
+
+#define CCSIDR_WAYS(n) \
+  (((n) & NVIC_CCSIDR_ASSOCIATIVITY_MASK) >> NVIC_CCSIDR_ASSOCIATIVITY_SHIFT)
+#define CCSIDR_SETS(n) \
+  (((n) & NVIC_CCSIDR_NUMSETS_MASK) >> NVIC_CCSIDR_NUMSETS_SHIFT)
+#define CCSIDR_LSSHIFT(n) \
+  (((n) & NVIC_CCSIDR_LINESIZE_MASK) >> NVIC_CCSIDR_LINESIZE_SHIFT)
+
+/****************************************************************************
+ * Inline Functions
+ ****************************************************************************/
+
+/****************************************************************************
+ * Name: arm_clz
+ *
+ * Description:
+ *   Access to CLZ instructions
+ *
+ * Input Parameters:
+ *   value - The value to perform the CLZ operation on
+ *
+ * Returned Value:
+ *   None
+ *
+ ****************************************************************************/
+
+static inline uint32_t arm_clz(unsigned int value)
+{
+  uint32_t ret;
+
+  __asm__ __volatile__ ("clz %0, %1" : "=r"(ret) : "r"(value));
+  return ret;
+}
+
+/****************************************************************************
+ * Public Functions
+ ****************************************************************************/
+
+/****************************************************************************
+ * Name: up_enable_icache
+ *
+ * Description:
+ *   Enable the I-Cache
+ *
+ * Input Parameters:
+ *   None
+ *
+ * Returned Value:
+ *   None
+ *
+ ****************************************************************************/
+
+#ifdef CONFIG_ARMV8M_ICACHE
+void up_enable_icache(void)
+{
+  uint32_t regval;
+
+  ARM_DSB();
+  ARM_ISB();
+
+  /* Invalidate the entire I-Cache */
+
+  putreg32(0, NVIC_ICIALLU);
+
+  /* Enable the I-Cache */
+
+  regval  = getreg32(NVIC_CFGCON);
+  regval |= NVIC_CFGCON_IC;
+  putreg32(regval, NVIC_CFGCON);
+
+  ARM_DSB();
+  ARM_ISB();
+}
+#endif
+
+/****************************************************************************
+ * Name: up_disable_icache
+ *
+ * Description:
+ *   Disable the I-Cache
+ *
+ * Input Parameters:
+ *   None
+ *
+ * Returned Value:
+ *   None
+ *
+ ****************************************************************************/
+
+#ifdef CONFIG_ARMV8M_ICACHE
+void up_disable_icache(void)
+{
+  uint32_t regval;
+
+  ARM_DSB();
+  ARM_ISB();
+
+  /* Disable the I-Cache */
+
+  regval  = getreg32(NVIC_CFGCON);
+  regval &= ~NVIC_CFGCON_IC;
+  putreg32(regval, NVIC_CFGCON);
+
+  /* Invalidate the entire I-Cache */
+
+  putreg32(0, NVIC_ICIALLU);
+
+  ARM_DSB();
+  ARM_ISB();
+}
+#endif
+
+/****************************************************************************
+ * Name: up_invalidate_icache_all
+ *
+ * Description:
+ *   Invalidate the entire contents of I cache.
+ *
+ * Input Parameters:
+ *   None
+ *
+ * Returned Value:
+ *   None
+ *
+ ****************************************************************************/
+
+#ifdef CONFIG_ARMV8M_ICACHE
+void up_invalidate_icache_all(void)
+{
+  ARM_DSB();
+  ARM_ISB();
+
+  /* Invalidate the entire I-Cache */
+
+  putreg32(0, NVIC_ICIALLU);
+
+  ARM_DSB();
+  ARM_ISB();
+}
+#endif
+
+/****************************************************************************
+ * Name: up_enable_dcache
+ *
+ * Description:
+ *   Enable the D-Cache
+ *
+ * Input Parameters:
+ *   None
+ *
+ * Returned Value:
+ *   None
+ *
+ ****************************************************************************/
+
+#ifdef CONFIG_ARMV8M_DCACHE
+void up_enable_dcache(void)
+{
+  uint32_t ccsidr;
+  uint32_t ccr;
+  uint32_t sshift;
+  uint32_t wshift;
+  uint32_t sw;
+  uint32_t sets;
+  uint32_t ways;
+
+  /* Get the characteristics of the D-Cache */
+
+  ccsidr = getreg32(NVIC_CCSIDR);
+  sets   = CCSIDR_SETS(ccsidr);          /* (Number of sets) - 1 */
+  sshift = CCSIDR_LSSHIFT(ccsidr) + 4;   /* log2(cache-line-size-in-bytes) */
+  ways   = CCSIDR_WAYS(ccsidr);          /* (Number of ways) - 1 */
+
+  /* Calculate the bit offset for the way field in the DCISW register by
+   * counting the number of leading zeroes.  For example:
+   *
+   *   Number of  Value of ways  Field
+   *   Ways       'ways'         Offset
+   *     2         1             31
+   *     4         3             30
+   *     8         7             29
+   *   ...
+   */
+
+  wshift = arm_clz(ways) & 0x1f;
+
+  /* Invalidate the entire D-Cache */
+
+  ARM_DSB();
+  do
+    {
+      int32_t tmpways = ways;
+
+      do
+        {
+          sw = ((tmpways << wshift) | (sets << sshift));
+          putreg32(sw, NVIC_DCISW);
+        }
+      while (tmpways--);
+    }
+  while (sets--);
+
+  ARM_DSB();
+
+#ifdef CONFIG_ARMV8M_DCACHE_WRITETHROUGH
+  ccr = getreg32(NVIC_CACR);
+  ccr |= NVIC_CACR_FORCEWT;
+  putreg32(ccr, NVIC_CACR);
+#endif
+
+  /* Enable the D-Cache */
+
+  ccr  = getreg32(NVIC_CFGCON);
+  ccr |= NVIC_CFGCON_DC;
+  putreg32(ccr, NVIC_CFGCON);
+
+  ARM_DSB();
+  ARM_ISB();
+}
+#endif /* CONFIG_ARMV8M_DCACHE */
+
+/****************************************************************************
+ * Name: up_disable_dcache
+ *
+ * Description:
+ *   Disable the D-Cache
+ *
+ * Input Parameters:
+ *   None
+ *
+ * Returned Value:
+ *   None
+ *
+ ****************************************************************************/
+
+#ifdef CONFIG_ARMV8M_DCACHE
+void up_disable_dcache(void)
+{
+  uint32_t ccsidr;
+  uint32_t ccr;
+  uint32_t sshift;
+  uint32_t wshift;
+  uint32_t sw;
+  uint32_t sets;
+  uint32_t ways;
+
+  /* Get the characteristics of the D-Cache */
+
+  ccsidr = getreg32(NVIC_CCSIDR);
+  sets   = CCSIDR_SETS(ccsidr);          /* (Number of sets) - 1 */
+  sshift = CCSIDR_LSSHIFT(ccsidr) + 4;   /* log2(cache-line-size-in-bytes) */
+  ways   = CCSIDR_WAYS(ccsidr);          /* (Number of ways) - 1 */
+
+  /* Calculate the bit offset for the way field in the DCCISW register by
+   * counting the number of leading zeroes.  For example:
+   *
+   *   Number of  Value of ways  Field
+   *   Ways       'ways'         Offset
+   *     2         1             31
+   *     4         3             30
+   *     8         7             29
+   *   ...
+   */
+
+  wshift = arm_clz(ways) & 0x1f;
+
+  ARM_DSB();
+
+  /* Disable the D-Cache */
+
+  ccr = getreg32(NVIC_CFGCON);
+  ccr &= ~NVIC_CFGCON_DC;
+  putreg32(ccr, NVIC_CFGCON);
+
+  /* Clean and invalidate the entire D-Cache */
+
+  do
+    {
+      int32_t tmpways = ways;
+
+      do
+        {
+          sw = ((tmpways << wshift) | (sets << sshift));
+          putreg32(sw, NVIC_DCCISW);
+        }
+      while (tmpways--);
+    }
+  while (sets--);
+
+  ARM_DSB();
+  ARM_ISB();
+}
+#endif /* CONFIG_ARMV8M_DCACHE */
+
+/****************************************************************************
+ * Name: up_invalidate_dcache
+ *
+ * Description:
+ *   Invalidate the data cache within the specified region; we will be
+ *   performing a DMA operation in this region and we want to purge old data
+ *   in the cache. Note that this function invalidates all cache ways
+ *   in sets that could be associated with the address range, regardless of
+ *   whether the address range is contained in the cache or not.
+ *
+ * Input Parameters:
+ *   start - virtual start address of region
+ *   end   - virtual end address of region + 1
+ *
+ * Returned Value:
+ *   None
+ *
+ * Assumptions:
+ *   This operation is not atomic.  This function assumes that the caller
+ *   has exclusive access to the address range so that no harm is done if
+ *   the operation is pre-empted.
+ *
+ ****************************************************************************/
+
+#ifdef CONFIG_ARMV8M_DCACHE
+void up_invalidate_dcache(uintptr_t start, uintptr_t end)
+{
+  uint32_t ccsidr;
+  uint32_t sshift;
+  uint32_t ssize;
+
+  /* Get the characteristics of the D-Cache */
+
+  ccsidr = getreg32(NVIC_CCSIDR);
+  sshift = CCSIDR_LSSHIFT(ccsidr) + 4;   /* log2(cache-line-size-in-bytes) */
+
+  /* Invalidate the D-Cache containing this range of addresses */
+
+  ssize  = (1 << sshift);
+
+  /* Round down the start address to the nearest cache line boundary.
+   *
+   *   sshift = 5      : Offset to the beginning of the set field
+   *   (ssize - 1)  = 0x007f : Mask of the set field
+   */
+
+  start &= ~(ssize - 1);
+  ARM_DSB();
+
+  do
+    {
+      /* The below store causes the cache to check its directory and
+       * determine if this address is contained in the cache. If so, it
+       * invalidate that cache line. Only the cache way containing the
+       * address is invalidated. If the address is not in the cache, then
+       * nothing is invalidated.
+       */
+
+      putreg32(start, NVIC_DCIMVAC);
+
+      /* Increment the address by the size of one cache line. */
+
+      start += ssize;
+    }
+  while (start < end);
+
+  ARM_DSB();
+  ARM_ISB();
+}
+#endif /* CONFIG_ARMV8M_DCACHE */
+
+/****************************************************************************
+ * Name: up_invalidate_dcache_all
+ *
+ * Description:
+ *   Invalidate the entire contents of D cache.
+ *
+ * Input Parameters:
+ *   None
+ *
+ * Returned Value:
+ *   None
+ *
+ ****************************************************************************/
+
+#ifdef CONFIG_ARMV8M_DCACHE
+void up_invalidate_dcache_all(void)
+{
+  uint32_t ccsidr;
+  uint32_t sshift;
+  uint32_t wshift;
+  uint32_t sw;
+  uint32_t sets;
+  uint32_t ways;
+
+  /* Get the characteristics of the D-Cache */
+
+  ccsidr = getreg32(NVIC_CCSIDR);
+  sets   = CCSIDR_SETS(ccsidr);          /* (Number of sets) - 1 */
+  sshift = CCSIDR_LSSHIFT(ccsidr) + 4;   /* log2(cache-line-size-in-bytes) */
+  ways   = CCSIDR_WAYS(ccsidr);          /* (Number of ways) - 1 */
+
+  /* Calculate the bit offset for the way field in the DCISW register by
+   * counting the number of leading zeroes.  For example:
+   *
+   *   Number of  Value of ways  Field
+   *   Ways       'ways'         Offset
+   *     2         1             31
+   *     4         3             30
+   *     8         7             29
+   *   ...
+   */
+
+  wshift = arm_clz(ways) & 0x1f;
+
+  ARM_DSB();
+
+  /* Invalidate the entire D-Cache */
+
+  do
+    {
+      int32_t tmpways = ways;
+
+      do
+        {
+          sw = ((tmpways << wshift) | (sets << sshift));
+          putreg32(sw, NVIC_DCISW);
+        }
+      while (tmpways--);
+    }
+  while (sets--);
+
+  ARM_DSB();
+  ARM_ISB();
+}
+#endif /* CONFIG_ARMV8M_DCACHE */
+
+/****************************************************************************
+ * Name: up_clean_dcache
+ *
+ * Description:
+ *   Clean the data cache within the specified region by flushing the
+ *   contents of the data cache to memory.
+ *
+ *   NOTE: This operation is un-necessary if the DCACHE is configured in
+ *   write-through mode.
+ *
+ * Input Parameters:
+ *   start - virtual start address of region
+ *   end   - virtual end address of region + 1
+ *
+ * Returned Value:
+ *   None
+ *
+ * Assumptions:
+ *   This operation is not atomic.  This function assumes that the caller
+ *   has exclusive access to the address range so that no harm is done if
+ *   the operation is pre-empted.
+ *
+ ****************************************************************************/
+
+#ifdef CONFIG_ARMV8M_DCACHE
+void up_clean_dcache(uintptr_t start, uintptr_t end)
+{
+#ifndef CONFIG_ARMV8M_DCACHE_WRITETHROUGH
+  uint32_t ccsidr;
+  uint32_t sshift;
+  uint32_t ssize;
+
+  /* Get the characteristics of the D-Cache */
+
+  ccsidr = getreg32(NVIC_CCSIDR);
+  sshift = CCSIDR_LSSHIFT(ccsidr) + 4;   /* log2(cache-line-size-in-bytes) */
+
+  /* Clean the D-Cache over the range of addresses */
+
+  ssize  = (1 << sshift);
+  start &= ~(ssize - 1);
+  ARM_DSB();
+
+  do
+    {
+      /* The below store causes the cache to check its directory and
+       * determine if this address is contained in the cache. If so, it
+       * clean that cache line. Only the cache way containing the
+       * address is invalidated. If the address is not in the cache, then
+       * nothing is invalidated.
+       */
+
+      putreg32(start, NVIC_DCCMVAC);
+
+      /* Increment the address by the size of one cache line. */
+
+      start += ssize;
+    }
+  while (start < end);
+
+  ARM_DSB();
+  ARM_ISB();
+#endif /* !CONFIG_ARMV8M_DCACHE_WRITETHROUGH */
+}
+#endif /* CONFIG_ARMV8M_DCACHE */
+
+/****************************************************************************
+ * Name: up_clean_dcache_all
+ *
+ * Description:
+ *   Clean the entire data cache within the specified region by flushing the
+ *   contents of the data cache to memory.
+ *
+ *   NOTE: This operation is un-necessary if the DCACHE is configured in
+ *   write-through mode.
+ *
+ * Input Parameters:
+ *   None
+ *
+ * Returned Value:
+ *   None
+ *
+ * Assumptions:
+ *   This operation is not atomic.  This function assumes that the caller
+ *   has exclusive access to the address range so that no harm is done if
+ *   the operation is pre-empted.
+ *
+ ****************************************************************************/
+
+#ifdef CONFIG_ARMV8M_DCACHE
+void up_clean_dcache_all(void)
+{
+#ifndef CONFIG_ARMV8M_DCACHE_WRITETHROUGH
+  uint32_t ccsidr;
+  uint32_t sshift;
+  uint32_t wshift;
+  uint32_t sw;
+  uint32_t sets;
+  uint32_t ways;
+
+  /* Get the characteristics of the D-Cache */
+
+  ccsidr = getreg32(NVIC_CCSIDR);
+  sets   = CCSIDR_SETS(ccsidr);          /* (Number of sets) - 1 */
+  sshift = CCSIDR_LSSHIFT(ccsidr) + 4;   /* log2(cache-line-size-in-bytes) */
+  ways   = CCSIDR_WAYS(ccsidr);          /* (Number of ways) - 1 */
+
+  /* Calculate the bit offset for the way field in the DCCSW register by
+   * counting the number of leading zeroes.  For example:
+   *
+   *   Number of  Value of ways  Field
+   *   Ways       'ways'         Offset
+   *     2         1             31
+   *     4         3             30
+   *     8         7             29
+   *   ...
+   */
+
+  wshift = arm_clz(ways) & 0x1f;
+
+  ARM_DSB();
+
+  /* Clean the entire D-Cache */
+
+  do
+    {
+      int32_t tmpways = ways;
+
+      do
+        {
+          sw = ((tmpways << wshift) | (sets << sshift));
+          putreg32(sw, NVIC_DCCSW);
+        }
+      while (tmpways--);
+    }
+  while (sets--);
+
+  ARM_DSB();
+  ARM_ISB();
+#endif /* !CONFIG_ARMV8M_DCACHE_WRITETHROUGH */
+}
+#endif /* CONFIG_ARMV8M_DCACHE */
+
+/****************************************************************************
+ * Name: up_flush_dcache
+ *
+ * Description:
+ *   Flush the data cache within the specified region by cleaning and
+ *   invalidating the D cache.
+ *
+ *   NOTE: If DCACHE write-through is configured, then this operation is the
+ *   same as up_invalidate_cache().
+ *
+ * Input Parameters:
+ *   start - virtual start address of region
+ *   end   - virtual end address of region + 1
+ *
+ * Returned Value:
+ *   None
+ *
+ * Assumptions:
+ *   This operation is not atomic.  This function assumes that the caller
+ *   has exclusive access to the address range so that no harm is done if
+ *   the operation is pre-empted.
+ *
+ ****************************************************************************/
+
+#ifdef CONFIG_ARMV8M_DCACHE
+void up_flush_dcache(uintptr_t start, uintptr_t end)
+{
+#ifndef CONFIG_ARMV8M_DCACHE_WRITETHROUGH
+  uint32_t ccsidr;
+  uint32_t sshift;
+  uint32_t ssize;
+
+  /* Get the characteristics of the D-Cache */
+
+  ccsidr = getreg32(NVIC_CCSIDR);
+  sshift = CCSIDR_LSSHIFT(ccsidr) + 4;   /* log2(cache-line-size-in-bytes) */
+
+  /* Clean and invalidate the D-Cache over the range of addresses */
+
+  ssize  = (1 << sshift);
+  start &= ~(ssize - 1);
+  ARM_DSB();
+
+  do
+    {
+      /* The below store causes the cache to check its directory and
+       * determine if this address is contained in the cache. If so, it clean
+       * and invalidate that cache line. Only the cache way containing the
+       * address is invalidated. If the address is not in the cache, then
+       * nothing is invalidated.
+       */
+
+      putreg32(start, NVIC_DCCIMVAC);
+
+      /* Increment the address by the size of one cache line. */
+
+      start += ssize;
+    }
+  while (start < end);
+
+  ARM_DSB();
+  ARM_ISB();
+#else
+  up_invalidate_dcache(start, end);
+#endif /* !CONFIG_ARMV8M_DCACHE_WRITETHROUGH */
+}
+#endif /* CONFIG_ARMV8M_DCACHE */
+
+/****************************************************************************
+ * Name: up_flush_dcache_all
+ *
+ * Description:
+ *   Flush the entire data cache by cleaning and invalidating the D cache.
+ *
+ *   NOTE: If DCACHE write-through is configured, then this operation is the
+ *   same as up_invalidate_cache_all().
+ *
+ * Input Parameters:
+ *   None
+ *
+ * Returned Value:
+ *   None
+ *
+ * Assumptions:
+ *   This operation is not atomic.  This function assumes that the caller
+ *   has exclusive access to the address range so that no harm is done if
+ *   the operation is pre-empted.
+ *
+ ****************************************************************************/
+
+#ifdef CONFIG_ARMV8M_DCACHE
+void up_flush_dcache_all(void)
+{
+#ifndef CONFIG_ARMV8M_DCACHE_WRITETHROUGH
+  uint32_t ccsidr;
+  uint32_t sshift;
+  uint32_t wshift;
+  uint32_t sw;
+  uint32_t sets;
+  uint32_t ways;
+
+  /* Get the characteristics of the D-Cache */
+
+  ccsidr = getreg32(NVIC_CCSIDR);
+  sets   = CCSIDR_SETS(ccsidr);          /* (Number of sets) - 1 */
+  sshift = CCSIDR_LSSHIFT(ccsidr) + 4;   /* log2(cache-line-size-in-bytes) */
+  ways   = CCSIDR_WAYS(ccsidr);          /* (Number of ways) - 1 */
+
+  /* Calculate the bit offset for the way field in the DCCISW register by
+   * counting the number of leading zeroes.  For example:
+   *
+   *   Number of  Value of ways  Field
+   *   Ways       'ways'         Offset
+   *     2         1             31
+   *     4         3             30
+   *     8         7             29
+   *   ...
+   */
+
+  wshift = arm_clz(ways) & 0x1f;
+
+  ARM_DSB();
+
+  /* Clean and invalidate the entire D-Cache */
+
+  do
+    {
+      int32_t tmpways = ways;
+
+      do
+        {
+          sw = ((tmpways << wshift) | (sets << sshift));
+          putreg32(sw, NVIC_DCCISW);
+        }
+      while (tmpways--);
+    }
+  while (sets--);
+
+  ARM_DSB();
+  ARM_ISB();
+#else
+  up_invalidate_dcache_all();
+#endif /* !CONFIG_ARMV8M_DCACHE_WRITETHROUGH */
+}
+#endif /* CONFIG_ARMV8M_DCACHE */
+
+/****************************************************************************
+ * Name: up_coherent_dcache
+ *
+ * Description:
+ *   Ensure that the I and D caches are coherent within specified region
+ *   by cleaning the D cache (i.e., flushing the D cache contents to memory
+ *   and invalidating the I cache. This is typically used when code has been
+ *   written to a memory region, and will be executed.
+ *
+ * Input Parameters:
+ *   addr - virtual start address of region
+ *   len  - Size of the address region in bytes
+ *
+ * Returned Value:
+ *   None
+ *
+ ****************************************************************************/
+
+#ifdef CONFIG_ARMV8M_ICACHE
+void up_coherent_dcache(uintptr_t addr, size_t len)
+{
+  uintptr_t end;
+
+  if (len > 0)
+    {
+      /* Flush any dirtcy D-Cache lines to memory */
+
+      end = addr + len;
+      up_clean_dcache(addr, end);
+      UNUSED(end);
+
+      /* Invalidate the entire I-Cache */
+
+      up_invalidate_icache_all();
+    }
+}
+#endif
diff --git a/arch/arm/src/armv8-m/up_copyarmstate.c b/arch/arm/src/armv8-m/up_copyarmstate.c
new file mode 100755
index 0000000..87ab5b3
--- /dev/null
+++ b/arch/arm/src/armv8-m/up_copyarmstate.c
@@ -0,0 +1,90 @@
+/****************************************************************************
+ * arch/arm/src/armv8-m/up_copyarmstate.c
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.  The
+ * ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ *
+ ****************************************************************************/
+
+/****************************************************************************
+ * Included Files
+ ****************************************************************************/
+
+#include <nuttx/config.h>
+
+#include <stdint.h>
+
+#include <arch/irq.h>
+
+#include "up_internal.h"
+
+#if defined(CONFIG_ARCH_FPU) && defined(CONFIG_ARMV8M_LAZYFPU)
+
+/****************************************************************************
+ * Public Functions
+ ****************************************************************************/
+
+/****************************************************************************
+ * Name: up_copyarmstate
+ *
+ * Description:
+ *    Copy the ARM portion of the register save area (omitting the floating
+ *    point registers) and save the floating pointer register directly.
+ *
+ ****************************************************************************/
+
+void up_copyarmstate(uint32_t *dest, uint32_t *src)
+{
+  int i;
+
+  /* In the Cortex-M3 model, the state is copied from the stack to the TCB,
+   * but only a reference is passed to get the state from the TCB.  So the
+   * following check avoids copying the TCB save area onto itself:
+   */
+
+  if (src != dest)
+    {
+      /* Save the floating point registers: This will initialize the floating
+       * registers at indices SW_INT_REGS through (SW_INT_REGS+SW_FPU_REGS-1)
+       */
+
+      up_savefpu(dest);
+
+      /* Save the block of ARM registers that were saved by the interrupt
+       * handling logic.  Indices: 0 through (SW_INT_REGS-1).
+       */
+
+      for (i = 0; i < SW_INT_REGS; i++)
+        {
+          *dest++ = *src++;
+        }
+
+      /* Skip over the floating point registers and save the block of ARM
+       * registers that were saved by the hardware when the interrupt was
+       * taken.  Indices: (SW_INT_REGS+SW_FPU_REGS) through
+       * (XCPTCONTEXT_REGS-1)
+       */
+
+      src  += SW_FPU_REGS;
+      dest += SW_FPU_REGS;
+
+      for (i = 0; i < HW_XCPT_REGS; i++)
+        {
+          *dest++ = *src++;
+        }
+    }
+}
+
+#endif /* CONFIG_ARCH_FPU && CONFIG_ARMV8M_LAZYFPU */
diff --git a/arch/arm/src/armv8-m/up_copyfullstate.c b/arch/arm/src/armv8-m/up_copyfullstate.c
new file mode 100755
index 0000000..cfd6fe8
--- /dev/null
+++ b/arch/arm/src/armv8-m/up_copyfullstate.c
@@ -0,0 +1,62 @@
+/****************************************************************************
+ * arch/arm/src/armv8-m/up_copyfullstate.c
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.  The
+ * ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ *
+ ****************************************************************************/
+
+/****************************************************************************
+ * Included Files
+ ****************************************************************************/
+
+#include <nuttx/config.h>
+
+#include <stdint.h>
+#include <arch/irq.h>
+
+#include "up_internal.h"
+
+/****************************************************************************
+ * Public Functions
+ ****************************************************************************/
+
+/****************************************************************************
+ * Name: up_copyfullstate
+ *
+ * Description:
+ *    Copy the entire register save area (including the floating point
+ *    registers if applicable).  This is a little faster than most memcpy's
+ *    since it does 32-bit transfers.
+ *
+ ****************************************************************************/
+
+void up_copyfullstate(uint32_t *dest, uint32_t *src)
+{
+  int i;
+
+  /* In the Cortex-M3 model, the state is copied from the stack to the TCB,
+   * but only a reference is passed to get the state from the TCB.  So the
+   * following check avoids copying the TCB save area onto itself:
+   */
+
+  if (src != dest)
+    {
+      for (i = 0; i < XCPTCONTEXT_REGS; i++)
+        {
+          *dest++ = *src++;
+        }
+    }
+}
diff --git a/arch/arm/src/armv8-m/up_doirq.c b/arch/arm/src/armv8-m/up_doirq.c
new file mode 100755
index 0000000..7349da9
--- /dev/null
+++ b/arch/arm/src/armv8-m/up_doirq.c
@@ -0,0 +1,90 @@
+/****************************************************************************
+ * arch/arm/src/armv8-m/up_doirq.c
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.  The
+ * ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ *
+ ****************************************************************************/
+
+/****************************************************************************
+ * Included Files
+ ****************************************************************************/
+
+#include <nuttx/config.h>
+
+#include <stdint.h>
+#include <assert.h>
+
+#include <nuttx/irq.h>
+#include <nuttx/arch.h>
+#include <nuttx/board.h>
+#include <arch/board/board.h>
+
+#include "up_arch.h"
+#include "up_internal.h"
+
+/****************************************************************************
+ * Public Functions
+ ****************************************************************************/
+
+uint32_t *up_doirq(int irq, uint32_t *regs)
+{
+  board_autoled_on(LED_INIRQ);
+#ifdef CONFIG_SUPPRESS_INTERRUPTS
+  PANIC();
+#else
+  uint32_t *savestate;
+
+  /* Nested interrupts are not supported in this implementation.  If you
+   * want to implement nested interrupts, you would have to (1) change the
+   * way that CURRENT_REGS is handled and (2) the design associated with
+   * CONFIG_ARCH_INTERRUPTSTACK.  The savestate variable will not work for
+   * that purpose as implemented here because only the outermost nested
+   * interrupt can result in a context switch.
+   */
+
+  /* Current regs non-zero indicates that we are processing an interrupt;
+   * CURRENT_REGS is also used to manage interrupt level context switches.
+   */
+
+  savestate    = (uint32_t *)CURRENT_REGS;
+  CURRENT_REGS = regs;
+
+  /* Acknowledge the interrupt */
+
+  up_ack_irq(irq);
+
+  /* Deliver the IRQ */
+
+  irq_dispatch(irq, regs);
+
+  /* If a context switch occurred while processing the interrupt then
+   * CURRENT_REGS may have change value.  If we return any value different
+   * from the input regs, then the lower level will know that a context
+   * switch occurred during interrupt processing.
+   */
+
+  regs = (uint32_t *)CURRENT_REGS;
+
+  /* Restore the previous value of CURRENT_REGS.  NULL would indicate that
+   * we are no longer in an interrupt handler.  It will be non-NULL if we
+   * are returning from a nested interrupt.
+   */
+
+  CURRENT_REGS = savestate;
+#endif
+  board_autoled_off(LED_INIRQ);
+  return regs;
+}
diff --git a/arch/arm/src/armv8-m/up_exception.S b/arch/arm/src/armv8-m/up_exception.S
new file mode 100755
index 0000000..048a6c3
--- /dev/null
+++ b/arch/arm/src/armv8-m/up_exception.S
@@ -0,0 +1,331 @@
+/************************************************************************************
+ * arch/arm/src/armv8-m/gnu/up_exception.S
+ *
+ *   Copyright (C) 2009-2013, 2015-2016, 2018 Gregory Nutt. All rights reserved.
+ *   Copyright (C) 2012 Michael Smith. All rights reserved.
+ *   Author: Gregory Nutt <gnutt@nuttx.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ * 3. Neither the name NuttX nor the names of its contributors may be
+ *    used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ ************************************************************************************/
+
+/************************************************************************************
+ * Included Files
+ ************************************************************************************/
+
+#include <nuttx/config.h>
+
+#include <arch/irq.h>
+#include <arch/armv8-m/nvicpri.h>
+
+#include "chip.h"
+#include "exc_return.h"
+
+/************************************************************************************
+ * Pre-processor Definitions
+ ************************************************************************************/
+/* Configuration ********************************************************************/
+
+#ifdef CONFIG_ARCH_HIPRI_INTERRUPT
+  /* In kernel mode without an interrupt stack, this interrupt handler will set the
+   * MSP to the stack pointer of the interrupted thread.  If the interrupted thread
+   * was a privileged thread, that will be the MSP otherwise it will be the PSP.  If
+   * the PSP is used, then the value of the MSP will be invalid when the interrupt
+   * handler returns because it will be a pointer to an old position in the
+   * unprivileged stack.  Then when the high priority interrupt occurs and uses this
+   * stale MSP, there will most likely be a system failure.
+   *
+   * If the interrupt stack is selected, on the other hand, then the interrupt
+   * handler will always set the MSP to the interrupt stack.  So when the high
+   * priority interrupt occurs, it will either use the MSP of the last privileged
+   * thread to run or, in the case of the nested interrupt, the interrupt stack if
+   * no privileged task has run.
+   */
+
+#  if defined(CONFIG_BUILD_PROTECTED) && CONFIG_ARCH_INTERRUPTSTACK < 4
+#    error Interrupt stack must be used with high priority interrupts in kernel mode
+#  endif
+
+  /* Use the BASEPRI to control interrupts is required if nested, high
+   * priority interrupts are supported.
+   */
+
+#  ifndef CONFIG_ARMV8M_USEBASEPRI
+#    error CONFIG_ARMV8M_USEBASEPRI must be used with CONFIG_ARCH_HIPRI_INTERRUPT
+#  endif
+#endif
+
+/************************************************************************************
+ * Public Symbols
+ ************************************************************************************/
+
+	.globl		exception_common
+
+	.syntax		unified
+	.thumb
+	.file		"up_exception.S"
+
+/************************************************************************************
+ * Macro Definitions
+ ************************************************************************************/
+
+/************************************************************************************
+ * Name: setintstack
+ *
+ * Description:
+ *   Set the current stack pointer to the  "top" the interrupt stack.  Single CPU
+ *   case.  Must be provided by MCU-specific logic in the SMP case.
+ *
+ ************************************************************************************/
+
+#if !defined(CONFIG_SMP) && CONFIG_ARCH_INTERRUPTSTACK > 7
+	.macro	setintstack, tmp1, tmp2
+	ldr		sp, =g_intstackbase
+	.endm
+#endif
+
+/************************************************************************************
+ * .text
+ ************************************************************************************/
+
+/* Common exception handling logic.  On entry here, the return stack is on either
+ * the PSP or the MSP and looks like the following:
+ *
+ *      REG_XPSR
+ *      REG_R15
+ *      REG_R14
+ *      REG_R12
+ *      REG_R3
+ *      REG_R2
+ *      REG_R1
+ * MSP->REG_R0
+ *
+ * And
+ *      IPSR contains the IRQ number
+ *      R14 Contains the EXC_RETURN value
+ *      We are in handler mode and the current SP is the MSP
+ *
+ * If CONFIG_ARCH_FPU is defined, the volatile FP registers and FPSCR are on the
+ * return stack immediately above REG_XPSR.
+ */
+
+	.text
+	.type	exception_common, function
+	.thumb_func
+exception_common:
+
+	mrs		r0, ipsr				/* R0=exception number */
+
+	/* Complete the context save */
+
+	/* The EXC_RETURN value tells us whether the context is on the MSP or PSP */
+
+	tst		r14, #EXC_RETURN_PROCESS_STACK /* nonzero if context on process stack */
+	beq		1f						/* Branch if context already on the MSP */
+	mrs		r1, psp					/* R1=The process stack pointer (PSP) */
+	mov     sp, r1					/* Set the MSP to the PSP */
+
+1:
+	mov		r2, sp					/* R2=Copy of the main/process stack pointer */
+	add		r2, #HW_XCPT_SIZE		/* R2=MSP/PSP before the interrupt was taken */
+									/* (ignoring the xPSR[9] alignment bit) */
+#ifdef CONFIG_ARMV8M_USEBASEPRI
+	mrs		r3, basepri				/* R3=Current BASEPRI setting */
+#else
+	mrs		r3, primask				/* R3=Current PRIMASK setting */
+#endif
+
+#ifdef CONFIG_ARCH_FPU
+
+	/* Save the non-volatile FP registers here.
+	 *
+	 * This routine is the only point where we can save these registers; either before
+	 * or after calling up_doirq.  The compiler is free to use them at any time as long
+	 * as they are restored before returning, so we can't assume that we can get at the
+	 * true values of these registers in any routine called from here.
+	 *
+	 * REVISIT: we could do all this saving lazily on the context switch side if we knew
+	 * where to put the registers.
+	 */
+
+	vstmdb	sp!, {s16-s31}			/* Save the non-volatile FP context */
+
+#endif
+
+	stmdb	sp!, {r2-r11,r14}		/* Save the remaining registers plus the SP/PRIMASK values */
+
+	/* There are two arguments to up_doirq:
+	 *
+	 *   R0 = The IRQ number
+	 *   R1 = The top of the stack points to the saved state
+	 */
+
+	mov		r1, sp
+
+	/* Also save the top of the stack in a preserved register */
+
+	mov		r4, sp
+
+#if CONFIG_ARCH_INTERRUPTSTACK > 7
+	/* If CONFIG_ARCH_INTERRUPTSTACK is defined, we will set the MSP to use
+	 * a special special interrupt stack pointer.  The way that this is done
+	 * here prohibits nested interrupts without some additional logic!
+	 */
+
+	setintstack	r2, r3
+
+#else
+	/* Otherwise, we will re-use the interrupted thread's stack.  That may
+	 * mean using either MSP or PSP stack for interrupt level processing (in
+	 * kernel mode).
+	 */
+
+	bic		r2, r4, #7				/* Get the stack pointer with 8-byte alignment */
+	mov		sp, r2					/* Instantiate the aligned stack */
+
+#endif
+
+	bl		up_doirq				/* R0=IRQ, R1=register save (msp) */
+	mov		r1, r4					/* Recover R1=main stack pointer */
+
+	/* On return from up_doirq, R0 will hold a pointer to register context
+	 * array to use for the interrupt return.  If that return value is the same
+	 * as current stack pointer, then things are relatively easy.
+	 */
+
+	cmp		r0, r1					/* Context switch? */
+	beq		2f						/* Branch if no context switch */
+
+	/* We are returning with a pending context switch.  This case is different
+	 * because in this case, the register save structure does not lie on the
+	 * stack but, rather within a TCB structure.  We'll have to copy some
+	 * values to the stack.
+	 */
+
+	/* Copy the hardware-saved context to the stack, and restore the software
+	 * saved context directly.
+	 *
+	 * XXX In the normal case, it appears that this entire operation is unnecessary;
+	 *     context switch time would be improved if we could work out when the stack
+	 *     is dirty and avoid the work...
+	 */
+
+	add		r1, r0, #SW_XCPT_SIZE 	/* R1=Address of HW save area in reg array */
+	ldmia	r1!, {r4-r11}			/* Fetch eight registers in HW save area */
+#ifdef CONFIG_ARCH_FPU
+	vldmia	r1!, {s0-s15}			/* Fetch sixteen FP registers in HW save area */
+	ldmia	r1, {r2-r3}				/* Fetch FPSCR and Reserved in HW save area */
+#endif
+	ldr		r1, [r0, #(4*REG_SP)]	/* R1=Value of SP before interrupt */
+#ifdef CONFIG_ARCH_FPU
+	stmdb	r1!, {r2-r3}			/* Store FPSCR and Reserved on the return stack */
+	vstmdb	r1!, {s0-s15}			/* Store sixteen FP registers on the return stack */
+#endif
+	stmdb	r1!, {r4-r11}			/* Store eight registers on the return stack */
+	ldmia	r0!, {r2-r11,r14}		/* Recover R4-R11, r14 + 2 temp values */
+#ifdef CONFIG_ARCH_FPU
+	vldmia	r0, {s16-s31}			/* Recover S16-S31 */
+#endif
+
+	b		3f						/* Re-join common logic */
+
+2:
+	/* We are returning with no context switch.  We simply need to "unwind"
+	 * the same stack frame that we created at entry.
+	 */
+
+	ldmia	r1!, {r2-r11,r14}		/* Recover R4-R11, r14 + 2 temp values */
+#ifdef CONFIG_ARCH_FPU
+	vldmia  r1!, {s16-s31}			/* Recover S16-S31 */
+#endif
+
+3:
+	/* The EXC_RETURN value tells us whether we are returning on the MSP or PSP
+	 */
+
+#ifdef CONFIG_BUILD_PROTECTED
+	/* The EXC_RETURN value will be 0xfffffff9 (privileged thread) or 0xfffffff1
+	 * (handler mode) if the stack is on the MSP.  It can only be on the PSP if
+	 * EXC_RETURN is 0xfffffffd (unprivileged thread)
+	 */
+
+	mrs		r2, control				/* R2=Contents of the control register */
+	tst		r14, #EXC_RETURN_PROCESS_STACK /* nonzero if context on process stack */
+	beq		4f						/* Branch if privileged */
+
+	orr		r2, r2, #1				/* Unprivileged mode */
+	msr		psp, r1					/* R1=The process stack pointer */
+	b		5f
+4:
+	bic		r2, r2, #1				/* Privileged mode */
+	msr		msp, r1					/* R1=The main stack pointer */
+5:
+	msr		control, r2				/* Save the updated control register */
+#else
+	tst		r14, #EXC_RETURN_PROCESS_STACK /* nonzero if context on process stack */
+	ite		eq						/* next two instructions conditional */
+	msreq	msp, r1					/* R1=The main stack pointer */
+	msrne	psp, r1					/* R1=The process stack pointer */
+#endif
+
+	/* Restore the interrupt state */
+
+#ifdef CONFIG_ARMV8M_USEBASEPRI
+	msr		basepri, r3				/* Restore interrupts priority masking */
+#else
+	msr		primask, r3				/* Restore interrupts */
+#endif
+
+	/* Always return with R14 containing the special value that will: (1)
+	 * return to thread mode, and (2) select the correct stack.
+	 */
+
+	bx		r14						/* And return */
+
+	.size	exception_common, .-exception_common
+
+/************************************************************************************
+ *  Name: g_intstackalloc/g_intstackbase
+ *
+ * Description:
+ *   Shouldn't happen
+ *
+ ************************************************************************************/
+
+#if !defined(CONFIG_SMP) && CONFIG_ARCH_INTERRUPTSTACK > 7
+	.bss
+	.global	g_intstackalloc
+	.global	g_intstackbase
+	.align	8
+g_intstackalloc:
+	.skip	((CONFIG_ARCH_INTERRUPTSTACK + 4) & ~7)
+g_intstackbase:
+	.size	g_intstackalloc, .-g_intstackalloc
+#endif
+
+	.end
diff --git a/arch/arm/src/armv8-m/up_fetchadd.S b/arch/arm/src/armv8-m/up_fetchadd.S
new file mode 100755
index 0000000..ad31749
--- /dev/null
+++ b/arch/arm/src/armv8-m/up_fetchadd.S
@@ -0,0 +1,242 @@
+/****************************************************************************
+ * arch/arm/src/armv8-m/gnu/up_fetchadd.S
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.  The
+ * ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ *
+ ****************************************************************************/
+
+/****************************************************************************
+ * Included Files
+ ****************************************************************************/
+
+#include <nuttx/config.h>
+
+	.syntax		unified
+	.thumb
+	.file	"up_fetchadd.S"
+
+/****************************************************************************
+ * Public Functions
+ ****************************************************************************/
+
+	.text
+
+/****************************************************************************
+ * Name: up_fetchadd32
+ *
+ * Description:
+ *   Perform an atomic fetch add operation on the provided 32-bit value.
+ *
+ *   This function must be provided via the architecture-specific logic.
+ *
+ * Input Parameters:
+ *   addr  - The address of 32-bit value to be incremented.
+ *   value - The 32-bit addend
+ *
+ * Returned Value:
+ *   The incremented value (volatile!)
+ *
+ ****************************************************************************/
+
+	.globl	up_fetchadd32
+	.type	up_fetchadd32, %function
+
+up_fetchadd32:
+
+1:
+	ldrex	r2, [r0]			/* Fetch the value to be incremented */
+	add		r2, r2, r1			/* Add the addend */
+
+	strex	r3, r2, [r0]		/* Attempt to save the result */
+	teq		r3, #0				/* r3 will be 1 if strex failed */
+	bne		1b					/* Failed to lock... try again */
+
+	mov		r0, r2				/* Return the incremented value */
+	bx		lr					/* Successful! */
+	.size	up_fetchadd32, . - up_fetchadd32
+
+/****************************************************************************
+ * Name: up_fetchsub32
+ *
+ * Description:
+ *   Perform an atomic fetch subtract operation on the provided 32-bit value.
+ *
+ *   This function must be provided via the architecture-specific logic.
+ *
+ * Input Parameters:
+ *   addr  - The address of 32-bit value to be decremented.
+ *   value - The 32-bit subtrahend
+ *
+ * Returned Value:
+ *   The decremented value (volatile!)
+ *
+ ****************************************************************************/
+
+	.globl	up_fetchsub32
+	.type	up_fetchsub32, %function
+
+up_fetchsub32:
+
+1:
+	ldrex	r2, [r0]			/* Fetch the value to be decremented */
+	sub		r2, r2, r1			/* Subtract the subtrahend */
+
+	strex	r3, r2, [r0]		/* Attempt to save the result */
+	teq		r3, #0				/* r3 will be 1 if strex failed */
+	bne		1b					/* Failed to lock... try again */
+
+	mov		r0, r2				/* Return the decremented value */
+	bx		lr					/* Successful! */
+	.size	up_fetchsub32, . - up_fetchsub32
+
+/****************************************************************************
+ * Name: up_fetchadd16
+ *
+ * Description:
+ *   Perform an atomic fetch add operation on the provided 16-bit value.
+ *
+ *   This function must be provided via the architecture-specific logic.
+ *
+ * Input Parameters:
+ *   addr  - The address of 16-bit value to be incremented.
+ *   value - The 16-bit addend
+ *
+ * Returned Value:
+ *   The incremented value (volatile!)
+ *
+ ****************************************************************************/
+
+	.globl	up_fetchadd16
+	.type	up_fetchadd16, %function
+
+up_fetchadd16:
+
+1:
+	ldrexh	r2, [r0]			/* Fetch the value to be incremented */
+	add		r2, r2, r1			/* Add the addend */
+
+	strexh	r3, r2, [r0]		/* Attempt to save the result */
+	teq		r3, #0				/* r3 will be 1 if strexh failed */
+	bne		1b					/* Failed to lock... try again */
+
+	mov		r0, r2				/* Return the incremented value */
+	bx		lr					/* Successful! */
+	.size	up_fetchadd16, . - up_fetchadd16
+
+/****************************************************************************
+ * Name: up_fetchsub16
+ *
+ * Description:
+ *   Perform an atomic fetch subtract operation on the provided 16-bit value.
+ *
+ *   This function must be provided via the architecture-specific logic.
+ *
+ * Input Parameters:
+ *   addr  - The address of 16-bit value to be decremented.
+ *   value - The 16-bit subtrahend
+ *
+ * Returned Value:
+ *   The decremented value (volatile!)
+ *
+ ****************************************************************************/
+
+	.globl	up_fetchsub16
+	.type	up_fetchsub16, %function
+
+up_fetchsub16:
+
+1:
+	ldrexh	r2, [r0]			/* Fetch the value to be decremented */
+	sub		r2, r2, r1			/* Subtract the subtrahend */
+
+	/* Attempt to save the decremented value */
+
+	strexh	r3, r2, [r0]		/* Attempt to save the result */
+	teq		r3, #0				/* r3 will be 1 if strexh failed */
+	bne		1b					/* Failed to lock... try again */
+
+	mov		r0, r2				/* Return the decremented value */
+	bx		lr					/* Successful! */
+	.size	up_fetchsub16, . - up_fetchsub16
+
+/****************************************************************************
+ * Name: up_fetchadd8
+ *
+ * Description:
+ *   Perform an atomic fetch add operation on the provided 8-bit value.
+ *
+ *   This function must be provided via the architecture-specific logic.
+ *
+ * Input Parameters:
+ *   addr  - The address of 8-bit value to be incremented.
+ *   value - The 8-bit addend
+ *
+ * Returned Value:
+ *   The incremented value (volatile!)
+ *
+ ****************************************************************************/
+
+	.globl	up_fetchadd8
+	.type	up_fetchadd8, %function
+
+up_fetchadd8:
+
+1:
+	ldrexb	r2, [r0]			/* Fetch the value to be incremented */
+	add		r2, r2, r1			/* Add the addend */
+
+	strexb	r3, r2, [r0]		/* Attempt to save the result */
+	teq		r3, #0				/* r3 will be 1 if strexb failed */
+	bne		1b					/* Failed to lock... try again */
+
+	mov		r0, r2				/* Return the incremented value */
+	bx		lr					/* Successful! */
+	.size	up_fetchadd8, . - up_fetchadd8
+
+/****************************************************************************
+ * Name: up_fetchsub8
+ *
+ * Description:
+ *   Perform an atomic fetch subtract operation on the provided 8-bit value.
+ *
+ *   This function must be provided via the architecture-specific logic.
+ *
+ * Input Parameters:
+ *   addr  - The address of 8-bit value to be decremented.
+ *   value - The 8-bit subtrahend
+ *
+ * Returned Value:
+ *   The decremented value (volatile!)
+ *
+ ****************************************************************************/
+
+	.globl	up_fetchsub8
+	.type	up_fetchsub8, %function
+
+up_fetchsub8:
+
+1:
+	ldrexb	r2, [r0]			/* Fetch the value to be decremented */
+	sub		r2, r2, r1			/* Subtract the subtrahend */
+
+	strexb	r3, r2, [r0]		/* Attempt to save the result */
+	teq		r3, #0				/* r3 will be 1 if strexb failed */
+	bne		1b					/* Failed to lock... try again */
+
+	mov		r0, r2				/* Return the decremented value */
+	bx		lr					/* Successful! */
+	.size	up_fetchsub8, . - up_fetchsub8
+	.end
diff --git a/arch/arm/src/armv8-m/up_fpu.S b/arch/arm/src/armv8-m/up_fpu.S
new file mode 100755
index 0000000..0afd0d3
--- /dev/null
+++ b/arch/arm/src/armv8-m/up_fpu.S
@@ -0,0 +1,270 @@
+/************************************************************************************
+ * arch/arm/src/armv8-m/gnu/up_fpu.S
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.  The
+ * ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ *
+ ************************************************************************************/
+/*
+ * When this file is assembled, it will require the following GCC options:
+ *
+ * -mcpu=cortex-m4 -mfloat-abi=hard -mfpu=vfp -meabi=5 -mthumb
+ */
+
+/************************************************************************************
+ * Included Files
+ ************************************************************************************/
+
+#include <nuttx/config.h>
+
+#include <arch/irq.h>
+
+#ifdef CONFIG_ARCH_FPU
+
+/************************************************************************************
+ * Pre-processor Definitions
+ ************************************************************************************/
+
+/************************************************************************************
+ * Public Symbols
+ ************************************************************************************/
+
+	.globl		up_savefpu
+	.globl		up_restorefpu
+
+	.syntax		unified
+	.thumb
+	.file		"up_fpu.S"
+
+/************************************************************************************
+ * Public Functions
+ ************************************************************************************/
+
+/************************************************************************************
+ * Name: up_savefpu
+ *
+ * Description:
+ *   Given the pointer to a register save area (in R0), save the state of the
+ *   floating point registers.
+ *
+ * C Function Prototype:
+ *   void up_savefpu(uint32_t *regs);
+ *
+ * Input Parameters:
+ *   regs - A pointer to the register save area in which to save the floating point
+ *     registers
+ *
+ * Returned Value:
+ *   None
+ *
+ ************************************************************************************/
+
+	.thumb_func
+	.type	up_savefpu, function
+up_savefpu:
+
+	add		r1, r0, #(4*REG_S0)		/* R1=Address of FP register storage */
+
+	/* Some older GNU assemblers don't support all the newer UAL mnemonics. */
+
+#if 1 /* Use UAL mnemonics */
+	/* Store all floating point registers.  Registers are stored in numeric order,
+	 * s0, s1, ... in increasing address order.
+	 */
+
+	vstmia	r1!, {s0-s31}			/* Save the full FP context */
+
+	/* Store the floating point control and status register.  At the end of the
+	 * vstmia, r1 will point to the FPCSR storage location.
+	 */
+
+	vmrs	r2, fpscr				/* Fetch the FPCSR */
+	str		r2, [r1], #4			/* Save the floating point control and status register */
+#else
+	/* Store all floating point registers */
+
+#if 1 /* Use store multiple */
+	fstmias	r1!, {s0-s31}			/* Save the full FP context */
+#else
+	vmov	r2, r3, d0				/* r2, r3 = d0 */
+	str		r2, [r1], #4			/* Save S0 and S1 values */
+	str		r3, [r1], #4
+	vmov	r2, r3, d1				/* r2, r3 = d1 */
+	str		r2, [r1], #4			/* Save S2 and S3 values */
+	str		r3, [r1], #4
+	vmov	r2, r3, d2				/* r2, r3 = d2 */
+	str		r2, [r1], #4			/* Save S4 and S5 values */
+	str		r3, [r1], #4
+	vmov	r2, r3, d3				/* r2, r3 = d3 */
+	str		r2, [r1], #4			/* Save S6 and S7 values */
+	str		r3, [r1], #4
+	vmov	r2, r3, d4				/* r2, r3 = d4 */
+	str		r2, [r1], #4			/* Save S8 and S9 values */
+	str		r3, [r1], #4
+	vmov	r2, r3, d5				/* r2, r3 = d5 */
+	str		r2, [r1], #4			/* Save S10 and S11 values */
+	str		r3, [r1], #4
+	vmov	r2, r3, d6				/* r2, r3 = d6 */
+	str		r2, [r1], #4			/* Save S12 and S13 values */
+	str		r3, [r1], #4
+	vmov	r2, r3, d7				/* r2, r3 = d7 */
+	str		r2, [r1], #4			/* Save S14 and S15 values */
+	str		r3, [r1], #4
+	vmov	r2, r3, d8				/* r2, r3 = d8 */
+	str		r2, [r1], #4			/* Save S16 and S17 values */
+	str		r3, [r1], #4
+	vmov	r2, r3, d9				/* r2, r3 = d9 */
+	str		r2, [r1], #4			/* Save S18 and S19 values */
+	str		r3, [r1], #4
+	vmov	r2, r3, d10				/* r2, r3 = d10 */
+	str		r2, [r1], #4			/* Save S20 and S21 values */
+	str		r3, [r1], #4
+	vmov	r2, r3, d11				/* r2, r3 = d11 */
+	str		r2, [r1], #4			/* Save S22 and S23 values */
+	str		r3, [r1], #4
+	vmov	r2, r3, d12				/* r2, r3 = d12 */
+	str		r2, [r1], #4			/* Save S24 and S25 values */
+	str		r3, [r1], #4
+	vmov	r2, r3, d13				/* r2, r3 = d13 */
+	str		r2, [r1], #4			/* Save S26 and S27 values */
+	str		r3, [r1], #4
+	vmov	r2, r3, d14				/* r2, r3 = d14 */
+	str		r2, [r1], #4			/* Save S28 and S29 values */
+	str		r3, [r1], #4
+	vmov	r2, r3, d15				/* r2, r3 = d15 */
+	str		r2, [r1], #4			/* Save S30 and S31 values */
+	str		r3, [r1], #4
+#endif
+
+	/* Store the floating point control and status register */
+
+	fmrx	r2, fpscr				/* Fetch the FPCSR */
+	str		r2, [r1], #4			/* Save the floating point control and status register */
+#endif
+	bx		lr
+
+	.size	up_savefpu, .-up_savefpu
+
+/************************************************************************************
+ * Name: up_restorefpu
+ *
+ * Description:
+ *   Given the pointer to a register save area (in R0), restore the state of the
+ *   floating point registers.
+ *
+ * C Function Prototype:
+ *   void up_restorefpu(const uint32_t *regs);
+ *
+ * Input Parameters:
+ *   regs - A pointer to the register save area containing the floating point
+ *     registers.
+ *
+ * Returned Value:
+ *   This function does not return anything explicitly.  However, it is called from
+ *   interrupt level assembly logic that assumes that r0 is preserved.
+ *
+ ************************************************************************************/
+
+	.thumb_func
+	.type	up_restorefpu, function
+up_restorefpu:
+
+	add		r1, r0, #(4*REG_S0)		/* R1=Address of FP register storage */
+
+	/* Some older GNU assemblers don't support all the newer UAL mnemonics. */
+
+#if 1 /* Use UAL mnemonics */
+	/* Load all floating point registers.  Registers are loaded in numeric order,
+	 * s0, s1, ... in increasing address order.
+	 */
+
+	vldmia	r1!, {s0-s31}			/* Restore the full FP context */
+
+	/* Load the floating point control and status register.   At the end of the
+	 * vstmia, r1 will point to the FPCSR storage location.
+	 */
+
+	ldr		r2, [r1], #4			/* Fetch the floating point control and status register */
+	vmsr	fpscr, r2				/* Restore the FPCSR */
+#else
+	/* Load all floating point registers  Registers are loaded in numeric order,
+	 * s0, s1, ... in increasing address order.
+	 */
+
+#if 1 /* Use load multiple */
+	fldmias	r1!, {s0-s31}			/* Restore the full FP context */
+#else
+	ldr		r2, [r1], #4			/* Fetch S0 and S1 values */
+	ldr		r3, [r1], #4
+	vmov	d0, r2, r3				/* Save as d0 */
+	ldr		r2, [r1], #4			/* Fetch S2 and S3 values */
+	ldr		r3, [r1], #4
+	vmov	d1, r2, r3				/* Save as d1 */
+	ldr		r2, [r1], #4			/* Fetch S4 and S5 values */
+	ldr		r3, [r1], #4
+	vmov	d2, r2, r3				/* Save as d2 */
+	ldr		r2, [r1], #4			/* Fetch S6 and S7 values */
+	ldr		r3, [r1], #4
+	vmov	d3, r2, r3				/* Save as d3 */
+	ldr		r2, [r1], #4			/* Fetch S8 and S9 values */
+	ldr		r3, [r1], #4
+	vmov	d4, r2, r3				/* Save as d4 */
+	ldr		r2, [r1], #4			/* Fetch S10 and S11 values */
+	ldr		r3, [r1], #4
+	vmov	d5, r2, r3				/* Save as d5 */
+	ldr		r2, [r1], #4			/* Fetch S12 and S13 values */
+	ldr		r3, [r1], #4
+	vmov	d6, r2, r3				/* Save as d6 */
+	ldr		r2, [r1], #4			/* Fetch S14 and S15 values */
+	ldr		r3, [r1], #4
+	vmov	d7, r2, r3				/* Save as d7 */
+	ldr		r2, [r1], #4			/* Fetch S16 and S17 values */
+	ldr		r3, [r1], #4
+	vmov	d8, r2, r3				/* Save as d8 */
+	ldr		r2, [r1], #4			/* Fetch S18 and S19 values */
+	ldr		r3, [r1], #4
+	vmov	d9, r2, r3				/* Save as d9 */
+	ldr		r2, [r1], #4			/* Fetch S20 and S21 values */
+	ldr		r3, [r1], #4
+	vmov	d10, r2, r3				/* Save as d10 */
+	ldr		r2, [r1], #4			/* Fetch S22 and S23 values */
+	ldr		r3, [r1], #4
+	vmov	d11, r2, r3				/* Save as d11 */
+	ldr		r2, [r1], #4			/* Fetch S24 and S25 values */
+	ldr		r3, [r1], #4
+	vmov	d12, r2, r3				/* Save as d12 */
+	ldr		r2, [r1], #4			/* Fetch S26 and S27 values */
+	ldr		r3, [r1], #4
+	vmov	d13, r2, r3				/* Save as d13 */
+	ldr		r2, [r1], #4			/* Fetch S28 and S29 values */
+	ldr		r3, [r1], #4
+	vmov	d14, r2, r3				/* Save as d14 */
+	ldr		r2, [r1], #4			/* Fetch S30 and S31 values */
+	ldr		r3, [r1], #4
+	vmov	d15, r2, r3				/* Save as d15 */
+#endif
+
+	/* Load the floating point control and status register.  r1 points t
+	 * the address of the FPCSR register.
+	 */
+
+	ldr		r2, [r1], #4			/* Fetch the floating point control and status register */
+	fmxr	fpscr, r2				/* Restore the FPCSR */
+#endif
+	bx		lr
+
+	.size	up_restorefpu, .-up_restorefpu
+#endif /* CONFIG_ARCH_FPU */
+	.end
diff --git a/arch/arm/src/armv8-m/up_fullcontextrestore.S b/arch/arm/src/armv8-m/up_fullcontextrestore.S
new file mode 100755
index 0000000..eb01e09
--- /dev/null
+++ b/arch/arm/src/armv8-m/up_fullcontextrestore.S
@@ -0,0 +1,79 @@
+/************************************************************************************
+ * arch/arm/src/armv8-m/gnu/up_fullcontextrestore.S
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.  The
+ * ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ *
+ ************************************************************************************/
+
+/************************************************************************************
+ * Included Files
+ ************************************************************************************/
+
+#include <nuttx/config.h>
+#include <arch/irq.h>
+
+#include "nvic.h"
+#include "svcall.h"
+
+/************************************************************************************
+ * Pre-processor Definitions
+ ************************************************************************************/
+
+/************************************************************************************
+ * Public Symbols
+ ************************************************************************************/
+
+	.syntax	unified
+	.thumb
+	.file	"up_fullcontextrestore.S"
+
+/************************************************************************************
+ * Macros
+ ************************************************************************************/
+
+/************************************************************************************
+ * Public Functions
+ ************************************************************************************/
+
+/************************************************************************************
+ * Name: up_fullcontextrestore
+ *
+ * Description:
+ *   Restore the current thread context.  Full prototype is:
+ *
+ *   void up_fullcontextrestore(uint32_t *restoreregs) noreturn_function;
+ *
+ * Returned Value:
+ *   None
+ *
+ ************************************************************************************/
+
+	.thumb_func
+	.globl	up_fullcontextrestore
+	.type	up_fullcontextrestore, function
+up_fullcontextrestore:
+
+	/* Perform the System call with R0=1 and R1=regs */
+
+	mov		r1, r0						/* R1: regs */
+	mov		r0, #SYS_restore_context	/* R0: restore context */
+	svc		0							/* Force synchronous SVCall (or Hard Fault) */
+
+	/* This call should not return */
+
+	bx		lr							/* Unnecessary ... will not return */
+	.size	up_fullcontextrestore, .-up_fullcontextrestore
+	.end
diff --git a/arch/arm/src/armv8-m/up_hardfault.c b/arch/arm/src/armv8-m/up_hardfault.c
new file mode 100755
index 0000000..94d0d73
--- /dev/null
+++ b/arch/arm/src/armv8-m/up_hardfault.c
@@ -0,0 +1,136 @@
+/****************************************************************************
+ * arch/arm/src/armv8-m/up_hardfault.c
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.  The
+ * ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ *
+ ****************************************************************************/
+
+/****************************************************************************
+ * Included Files
+ ****************************************************************************/
+
+#include <nuttx/config.h>
+
+#include <stdint.h>
+#include <string.h>
+#include <assert.h>
+#include <debug.h>
+
+#include <nuttx/userspace.h>
+#include <arch/irq.h>
+
+#include "up_arch.h"
+#include "nvic.h"
+#include "up_internal.h"
+
+/****************************************************************************
+ * Pre-processor Definitions
+ ****************************************************************************/
+
+/* If CONFIG_ARMV8M_USEBASEPRI=n, then debug output from this file may
+ * interfere with context switching!
+ */
+
+#ifdef CONFIG_DEBUG_HARDFAULT_ALERT
+# define hfalert(format, ...)  _alert(format, ##__VA_ARGS__)
+#else
+# define hfalert(x...)
+#endif
+
+#ifdef CONFIG_DEBUG_HARDFAULT_INFO
+# define hfinfo(format, ...)   _info(format, ##__VA_ARGS__)
+#else
+# define hfinfo(x...)
+#endif
+
+#define INSN_SVC0        0xdf00 /* insn: svc 0 */
+
+/****************************************************************************
+ * Public Functions
+ ****************************************************************************/
+
+/****************************************************************************
+ * Name: up_hardfault
+ *
+ * Description:
+ *   This is Hard Fault exception handler.  It also catches SVC call
+ *   exceptions that are performed in bad contexts.
+ *
+ ****************************************************************************/
+
+int up_hardfault(int irq, FAR void *context, FAR void *arg)
+{
+  /* Get the value of the program counter where the fault occurred */
+
+#ifndef CONFIG_ARMV8M_USEBASEPRI
+  uint32_t *regs = (uint32_t *)context;
+  uint16_t *pc = (uint16_t *)regs[REG_PC] - 1;
+
+  /* Check if the pc lies in known FLASH memory.
+   * REVISIT:  What if the PC lies in "unknown" external memory?  Best
+   * use the BASEPRI register if you have external memory.
+   */
+
+#ifdef CONFIG_BUILD_PROTECTED
+  /* In the kernel build, SVCalls are expected in either the base, kernel
+   * FLASH region or in the user FLASH region.
+   */
+
+  if (((uintptr_t)pc >= (uintptr_t)_START_TEXT &&
+       (uintptr_t)pc <  (uintptr_t)_END_TEXT) ||
+      ((uintptr_t)pc >= (uintptr_t)USERSPACE->us_textstart &&
+       (uintptr_t)pc <  (uintptr_t)USERSPACE->us_textend))
+#else
+  /* SVCalls are expected only from the base, kernel FLASH region */
+
+  if ((uintptr_t)pc >= (uintptr_t)_START_TEXT &&
+      (uintptr_t)pc <  (uintptr_t)_END_TEXT)
+#endif
+    {
+      /* Fetch the instruction that caused the Hard fault */
+
+      uint16_t insn = *pc;
+      hfinfo("  PC: %p INSN: %04x\n", pc, insn);
+
+      /* If this was the instruction 'svc 0', then forward processing
+       * to the SVCall handler
+       */
+
+      if (insn == INSN_SVC0)
+        {
+          hfinfo("Forward SVCall\n");
+          return up_svcall(irq, context, arg);
+        }
+    }
+#endif
+
+  /* Dump some hard fault info */
+
+  hfalert("Hard Fault:\n");
+  hfalert("  IRQ: %d regs: %p\n", irq, context);
+  hfalert("  BASEPRI: %08x PRIMASK: %08x IPSR: %08x CONTROL: %08x\n",
+          getbasepri(), getprimask(), getipsr(), getcontrol());
+  hfalert("  CFAULTS: %08x HFAULTS: %08x DFAULTS: %08x BFAULTADDR: %08x "
+          "AFAULTS: %08x\n",
+          getreg32(NVIC_CFAULTS), getreg32(NVIC_HFAULTS),
+          getreg32(NVIC_DFAULTS), getreg32(NVIC_BFAULT_ADDR),
+          getreg32(NVIC_AFAULTS));
+
+  up_irq_save();
+  _alert("PANIC!!! Hard fault: %08x\n", getreg32(NVIC_HFAULTS));
+  PANIC();
+  return OK;
+}
diff --git a/arch/arm/src/armv8-m/up_initialstate.c b/arch/arm/src/armv8-m/up_initialstate.c
new file mode 100755
index 0000000..390eb26
--- /dev/null
+++ b/arch/arm/src/armv8-m/up_initialstate.c
@@ -0,0 +1,144 @@
+/****************************************************************************
+ * arch/arm/src/armv8-m/up_initialstate.c
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.  The
+ * ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ *
+ ****************************************************************************/
+
+/****************************************************************************
+ * Included Files
+ ****************************************************************************/
+
+#include <nuttx/config.h>
+
+#include <sys/types.h>
+#include <stdint.h>
+#include <string.h>
+
+#include <nuttx/arch.h>
+#include <arch/armv8-m/nvicpri.h>
+
+#include "up_internal.h"
+#include "up_arch.h"
+
+#include "psr.h"
+#include "exc_return.h"
+
+/****************************************************************************
+ * Public Functions
+ ****************************************************************************/
+
+/****************************************************************************
+ * Name: up_initial_state
+ *
+ * Description:
+ *   A new thread is being started and a new TCB
+ *   has been created. This function is called to initialize
+ *   the processor specific portions of the new TCB.
+ *
+ *   This function must setup the initial architecture registers
+ *   and/or  stack so that execution will begin at tcb->start
+ *   on the next context switch.
+ *
+ ****************************************************************************/
+
+void up_initial_state(struct tcb_s *tcb)
+{
+  struct xcptcontext *xcp = &tcb->xcp;
+
+  /* Initialize the initial exception register context structure */
+
+  memset(xcp, 0, sizeof(struct xcptcontext));
+
+  /* Save the initial stack pointer */
+
+  xcp->regs[REG_SP]      = (uint32_t)tcb->adj_stack_ptr;
+
+#ifdef CONFIG_ARMV8M_STACKCHECK
+  /* Set the stack limit value */
+
+  xcp->regs[REG_R10]     = (uint32_t)tcb->stack_alloc_ptr + 64;
+#endif
+
+  /* Save the task entry point (stripping off the thumb bit) */
+
+  xcp->regs[REG_PC]      = (uint32_t)tcb->start & ~1;
+
+  /* Specify thumb mode */
+
+  xcp->regs[REG_XPSR]    = ARMV8M_XPSR_T;
+
+  /* If this task is running PIC, then set the PIC base register to the
+   * address of the allocated D-Space region.
+   */
+
+#ifdef CONFIG_PIC
+  if (tcb->dspace != NULL)
+    {
+      /* Set the PIC base register (probably R10) to the address of the
+       * alloacated D-Space region.
+       */
+
+      xcp->regs[REG_PIC] = (uint32_t)tcb->dspace->region;
+    }
+
+#ifdef CONFIG_NXFLAT
+  /* Make certain that bit 0 is set in the main entry address.  This
+   * is only an issue when NXFLAT is enabled.  NXFLAT doesn't know
+   * anything about thumb; the addresses that NXFLAT sets are based
+   * on file header info and won't have bit 0 set.
+   */
+
+  tcb->entry.main = (main_t)((uint32_t)tcb->entry.main | 1);
+#endif
+#endif /* CONFIG_PIC */
+
+#if !defined(CONFIG_ARMV8M_LAZYFPU) || defined(CONFIG_BUILD_PROTECTED)
+  /* All tasks start via a stub function in kernel space.  So all
+   * tasks must start in privileged thread mode.  If CONFIG_BUILD_PROTECTED
+   * is defined, then that stub function will switch to unprivileged
+   * mode before transferring control to the user task.
+   */
+
+  xcp->regs[REG_EXC_RETURN] = EXC_RETURN_PRIVTHR;
+
+#endif /* !CONFIG_ARMV8M_LAZYFPU || CONFIG_BUILD_PROTECTED */
+
+#if !defined(CONFIG_ARMV8M_LAZYFPU) && defined(CONFIG_ARCH_FPU)
+
+  xcp->regs[REG_FPSCR] = 0;      /* REVISIT: Initial FPSCR should be configurable */
+  xcp->regs[REG_FP_RESERVED] = 0;
+
+#endif /* !CONFIG_ARMV8M_LAZYFPU && CONFIG_ARCH_FPU */
+
+  /* Enable or disable interrupts, based on user configuration */
+
+#ifdef CONFIG_SUPPRESS_INTERRUPTS
+
+#ifdef CONFIG_ARMV8M_USEBASEPRI
+  xcp->regs[REG_BASEPRI] = NVIC_SYSH_DISABLE_PRIORITY;
+#else
+  xcp->regs[REG_PRIMASK] = 1;
+#endif
+
+#else /* CONFIG_SUPPRESS_INTERRUPTS */
+
+#ifdef CONFIG_ARMV8M_USEBASEPRI
+  xcp->regs[REG_BASEPRI] = NVIC_SYSH_PRIORITY_MIN;
+#endif
+
+#endif /* CONFIG_SUPPRESS_INTERRUPTS */
+}
diff --git a/arch/arm/src/armv8-m/up_itm.c b/arch/arm/src/armv8-m/up_itm.c
new file mode 100755
index 0000000..8e5dcbc
--- /dev/null
+++ b/arch/arm/src/armv8-m/up_itm.c
@@ -0,0 +1,156 @@
+/****************************************************************************
+ * arch/arm/src/armv8-m/up_itm.c
+ *
+ *   Copyright (c) 2009 - 2013 ARM LIMITED
+ *
+ *  All rights reserved.
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions are met:
+ *  - Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  - Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *  - Neither the name of ARM nor the names of its contributors may be used
+ *    to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *  *
+ *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ *  ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE
+ *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ *  POSSIBILITY OF SUCH DAMAGE.
+ *
+ *   Copyright (C) 2014 Pierre-noel Bouteville . All rights reserved.
+ *   Copyright (C) 2014 Gregory Nutt. All rights reserved.
+ *   Authors: Pierre-noel Bouteville <pnb990@gmail.com>
+ *            Gregory Nutt <gnutt@nuttx.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ * 3. Neither the name NuttX nor the names of its contributors may be
+ *    used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ ****************************************************************************/
+
+/****************************************************************************
+ * Included Files
+ ****************************************************************************/
+
+#include <nuttx/config.h>
+
+#include <stdint.h>
+
+#include "up_arch.h"
+#include "itm.h"
+
+/****************************************************************************
+ * Public Data
+ ****************************************************************************/
+
+/****************************************************************************
+ * Public Functions
+ ****************************************************************************/
+
+/****************************************************************************
+ * Name: itm_sendchar
+ *
+ * Description:
+ *   The function transmits a character via the ITM channel 0, and
+ *   - Just returns when no debugger is connected that has booked the output.
+ *   - Is blocking when a debugger is connected, but the previous character
+ *     sent has not been transmitted.
+ *
+ * Input Parameters:
+ *   ch - Character to transmit.
+ *
+ * Returned Value:
+ *   Character to transmit.
+ *
+ ****************************************************************************/
+
+uint32_t itm_sendchar(uint32_t ch)
+{
+  if ((getreg32(ITM_TCR) & ITM_TCR_ITMENA_MASK) && /* ITM enabled */
+      (getreg32(ITM_TER) & (1UL << 0)))            /* ITM Port #0 enabled */
+    {
+      while (getreg32(ITM_PORT(0)) == 0);
+      putreg8((uint8_t)ch, ITM_PORT(0));
+    }
+
+  return ch;
+}
+
+/****************************************************************************
+ * Name: itm_receivechar
+ *
+ * Description:
+ *
+ * Input Parameters:
+ *  The function inputs a character via the external variable g_itm_rxbuffer.
+ *
+ * Returned Value:
+ *   Received character or -1 No character pending.
+ *
+ ****************************************************************************/
+
+int32_t itm_receivechar(void)
+{
+  int32_t ch = -1;  /* Assume no character available */
+
+  if (g_itm_rxbuffer != ITM_RXBUFFER_EMPTY)
+    {
+      ch = g_itm_rxbuffer;
+      g_itm_rxbuffer = ITM_RXBUFFER_EMPTY; /* Ready for next character */
+    }
+
+  return ch;
+}
+
+/****************************************************************************
+ * Name: itm_checkchar
+ *
+ * Description:
+ *
+ * Input Parameters:
+ *  The function checks whether a character is pending for reading in the
+ *  variable g_itm_rxbuffer.
+ *
+ * Returned Value:
+ *   0  No character available.
+ *   1  Character available.
+ *
+ ****************************************************************************/
+
+int32_t itm_checkchar (void)
+{
+  return (g_itm_rxbuffer != ITM_RXBUFFER_EMPTY);
+}
diff --git a/arch/arm/src/armv8-m/up_itm_syslog.c b/arch/arm/src/armv8-m/up_itm_syslog.c
new file mode 100755
index 0000000..1c74df3
--- /dev/null
+++ b/arch/arm/src/armv8-m/up_itm_syslog.c
@@ -0,0 +1,192 @@
+/****************************************************************************
+ * arch/arm/src/armv8-m/up_itm_syslog.c
+ *
+ *   Copyright (C) 2014 Pierre-noel Bouteville . All rights reserved.
+ *   Copyright (C) 2014, 2016 Gregory Nutt. All rights reserved.
+ *   Authors: Pierre-noel Bouteville <pnb990@gmail.com>
+ *            Gregory Nutt <gnutt@nuttx.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ * 3. Neither the name NuttX nor the names of its contributors may be
+ *    used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ ****************************************************************************/
+
+/****************************************************************************
+ * Included Files
+ ****************************************************************************/
+
+#include <nuttx/config.h>
+
+#include <stdio.h>
+
+#include <nuttx/syslog/syslog.h>
+
+#include "nvic.h"
+#include "itm.h"
+#include "tpi.h"
+#include "dwt.h"
+#include "up_arch.h"
+#include "itm_syslog.h"
+
+#ifdef CONFIG_ARMV8M_ITMSYSLOG
+
+/****************************************************************************
+ * Pre-processor Definitions
+ ****************************************************************************/
+
+#ifndef CONFIG_ARMV8M_ITMSYSLOG_SWODIV
+#  define CONFIG_ARMV8M_ITMSYSLOG_SWODIV 15
+#endif
+
+#if CONFIG_ARMV8M_ITMSYSLOG_SWODIV < 0
+#  error CONFIG_ARMV8M_ITMSYSLOG_SWODIV should be at least equal to 1
+#endif
+
+/* Use Port #0 at default */
+
+#ifndef CONFIG_ARMV8M_ITMSYSLOG_PORT
+#  define CONFIG_ARMV8M_ITMSYSLOG_PORT 0
+#endif
+
+/****************************************************************************
+ * Private Function Prototypes
+ ****************************************************************************/
+
+/* SYSLOG channel methods */
+
+static int itm_putc(int ch);
+static int itm_flush(void);
+
+/****************************************************************************
+ * Private Data
+ ****************************************************************************/
+
+/* This structure describes the ITM SYSLOG channel */
+
+static const struct syslog_channel_s g_itm_channel =
+{
+  .sc_putc  = itm_putc,
+  .sc_force = itm_putc,
+  .sc_flush = itm_flush,
+};
+
+/****************************************************************************
+ * Private Functions
+ ****************************************************************************/
+
+/****************************************************************************
+ * Name: itm_putc
+ *
+ * Description:
+ *   This is the low-level system logging interface.
+ *
+ ****************************************************************************/
+
+static int itm_putc(int ch)
+{
+  /* ITM enabled */
+
+  if ((getreg32(ITM_TCR) & ITM_TCR_ITMENA_MASK) == 0)
+    {
+      return EOF;
+    }
+
+  /* ITM Port "CONFIG_ARMV8M_ITMSYSLOG_PORT" enabled */
+
+  if (getreg32(ITM_TER) & (1 << CONFIG_ARMV8M_ITMSYSLOG_PORT))
+    {
+      while (getreg32(ITM_PORT(CONFIG_ARMV8M_ITMSYSLOG_PORT)) == 0);
+      putreg8((uint8_t)ch, ITM_PORT(CONFIG_ARMV8M_ITMSYSLOG_PORT));
+    }
+
+  return ch;
+}
+
+/****************************************************************************
+ * Name: itm_flush
+ *
+ * Description:
+ *   A dummy FLUSH method
+ *
+ ****************************************************************************/
+
+static int itm_flush(void)
+{
+  return OK;
+}
+
+/****************************************************************************
+ * Public Functions
+ ****************************************************************************/
+
+/****************************************************************************
+ * Name: itm_syslog_initialize
+ *
+ * Description:
+ *   Performs ARM-specific initialize for the ITM SYSLOG functions.
+ *   Additional, board specific logic may be required to:
+ *
+ *   - Enable/configured serial wire output pins
+ *   - Enable debug clocking.
+ *
+ *   Those operations must be performed by MCU-specific logic before this
+ *   function is called.
+ *
+ ****************************************************************************/
+
+void itm_syslog_initialize(void)
+{
+  uint32_t regval;
+
+  /* Enable trace in core debug */
+
+  regval  = getreg32(NVIC_DEMCR);
+  regval |= NVIC_DEMCR_TRCENA;
+  putreg32(regval, NVIC_DEMCR);
+
+  putreg32(0xc5acce55, ITM_LAR);
+  putreg32(0,          ITM_TER);
+  putreg32(0,          ITM_TCR);
+  putreg32(2,          TPI_SPPR); /* Pin protocol: 2=> Manchester (USART) */
+
+  /* Default 880kbps */
+
+  regval = CONFIG_ARMV8M_ITMSYSLOG_SWODIV - 1;
+  putreg32(regval,     TPI_ACPR); /* TRACECLKIN/(ACPR+1) SWO speed */
+
+  putreg32(0,          ITM_TPR);
+  putreg32(0x400003fe, DWT_CTRL);
+  putreg32(0x0001000d, ITM_TCR);
+  putreg32(0x00000100, TPI_FFCR);
+  putreg32(0xffffffff, ITM_TER); /* Enable 32 Ports */
+
+  /* Setup the SYSLOG channel */
+
+  syslog_channel(&g_itm_channel);
+}
+
+#endif /* CONFIG_ARMV8M_ITMSYSLOG */
diff --git a/arch/arm/src/armv8-m/up_lazyexception.S b/arch/arm/src/armv8-m/up_lazyexception.S
new file mode 100755
index 0000000..fb6a152
--- /dev/null
+++ b/arch/arm/src/armv8-m/up_lazyexception.S
@@ -0,0 +1,350 @@
+/************************************************************************************************
+ * arch/arm/src/armv8-m/gnu/up_lazyexcption.S
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.  The
+ * ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ *
+ ************************************************************************************************/
+
+/************************************************************************************************
+ * Included Files
+ ************************************************************************************************/
+
+#include <nuttx/config.h>
+
+#include <arch/irq.h>
+#include <arch/armv8-m/nvicpri.h>
+
+#include "chip.h"
+#include "exc_return.h"
+
+/************************************************************************************************
+ * Pre-processor Definitions
+ ************************************************************************************************/
+
+/* Configuration ********************************************************************************/
+
+#ifdef CONFIG_ARCH_HIPRI_INTERRUPT
+  /* In kernel mode without an interrupt stack, this interrupt handler will set the MSP to the
+   * stack pointer of the interrupted thread.  If the interrupted thread was a privileged
+   * thread, that will be the MSP otherwise it will be the PSP.  If the PSP is used, then the
+   * value of the MSP will be invalid when the interrupt handler returns because it will be a
+   * pointer to an old position in the unprivileged stack.  Then when the high priority
+   * interrupt occurs and uses this stale MSP, there will most likely be a system failure.
+   *
+   * If the interrupt stack is selected, on the other hand, then the interrupt handler will
+   * always set the MSP to the interrupt stack.  So when the high priority interrupt occurs,
+   * it will either use the MSP of the last privileged thread to run or, in the case of the
+   * nested interrupt, the interrupt stack if no privileged task has run.
+   */
+
+#  if defined(CONFIG_BUILD_PROTECTED) && CONFIG_ARCH_INTERRUPTSTACK < 4
+#    error Interrupt stack must be used with high priority interrupts in kernel mode
+#  endif
+
+  /* Use the BASEPRI to control interrupts is required if nested, high
+   * priority interrupts are supported.
+   */
+
+#  ifndef CONFIG_ARMV8M_USEBASEPRI
+#    error CONFIG_ARMV8M_USEBASEPRI must be used with CONFIG_ARCH_HIPRI_INTERRUPT
+#  endif
+#endif
+
+/************************************************************************************************
+ * Public Symbols
+ ************************************************************************************************/
+
+	.globl		exception_common
+
+	.syntax		unified
+	.thumb
+	.file		"up_lazyexception.S"
+
+/************************************************************************************************
+ * Macro Definitions
+ ************************************************************************************************/
+
+/************************************************************************************************
+ * Name: setintstack
+ *
+ * Description:
+ *   Set the current stack pointer to the  "top" the interrupt stack.  Single CPU case.  Must be
+ *   provided by MCU-specific logic in chip.h for the SMP case.
+ *
+ ************************************************************************************************/
+
+#if !defined(CONFIG_SMP) && CONFIG_ARCH_INTERRUPTSTACK > 7
+	.macro	setintstack, tmp1, tmp2
+	ldr		sp, =g_intstackbase
+	.endm
+#endif
+
+/************************************************************************************************
+ * .text
+ ************************************************************************************************/
+
+/* Common IRQ handling logic.  On entry here, the return stack is on either
+ * the PSP or the MSP and looks like the following:
+ *
+ *      REG_XPSR
+ *      REG_R15
+ *      REG_R14
+ *      REG_R12
+ *      REG_R3
+ *      REG_R2
+ *      REG_R1
+ * MSP->REG_R0
+ *
+ * And
+ *      IPSR contains the IRQ number
+ *      R14 Contains the EXC_RETURN value
+ *      We are in handler mode and the current SP is the MSP
+ */
+
+	.text
+	.type		exception_common, function
+
+exception_common:
+
+	/* Get the IRQ number from the IPSR */
+
+	mrs			r0, ipsr			/* R0=exception number */
+
+	/* Complete the context save */
+
+#ifdef CONFIG_BUILD_PROTECTED
+	/* The EXC_RETURN value will be 0xfffffff9 (privileged thread) or 0xfffffff1
+	 * (handler mode) if the stack is on the MSP.  It can only be on the PSP if
+	 * EXC_RETURN is 0xfffffffd (unprivileged thread)
+	 */
+
+	tst		r14, #EXC_RETURN_PROCESS_STACK /* nonzero if context on process stack */
+	beq		1f						/* Branch if context already on the MSP */
+	mrs		r1, psp					/* R1=The process stack pointer (PSP) */
+	mov     sp, r1					/* Set the MSP to the PSP */
+
+1:
+#endif
+
+	/* r1 holds the value of the stack pointer AFTER the exception handling logic
+	 * pushed the various registers onto the stack.  Get r2 = the value of the
+	 * stack pointer BEFORE the interrupt modified it.
+	 */
+
+	mov		r2, sp					/* R2=Copy of the main/process stack pointer */
+	add		r2, #HW_XCPT_SIZE		/* R2=MSP/PSP before the interrupt was taken */
+
+#ifdef CONFIG_ARMV8M_USEBASEPRI
+	mrs		r3, basepri				/* R3=Current BASEPRI setting */
+#else
+	mrs		r3, primask				/* R3=Current PRIMASK setting */
+#endif
+
+#ifdef CONFIG_ARCH_FPU
+	/* Skip over the block of memory reserved for floating pointer register save.
+	 * Lazy FPU register saving is used.  FPU registers will be saved in this
+	 * block only if a context switch occurs (this means, of course, that the FPU
+	 * cannot be used in interrupt processing).
+	 */
+
+	sub		sp, #(4*SW_FPU_REGS)
+#endif
+
+	/* Save the remaining registers on the stack after the registers pushed
+	 * by the exception handling logic. r2=SP and r3=primask or basepri, r4-r11,
+	 * r14=register values.
+	 */
+
+#ifdef CONFIG_BUILD_PROTECTED
+	stmdb	sp!, {r2-r11,r14}		/* Save the remaining registers plus the SP value */
+#else
+	stmdb	sp!, {r2-r11}			/* Save the remaining registers plus the SP value */
+#endif
+
+	/* There are two arguments to up_doirq:
+	 *
+	 *   R0 = The IRQ number
+	 *   R1 = The top of the stack points to the saved state
+	 */
+
+	mov		r1, sp
+
+	/* Also save the top of the stack in a preserved register */
+
+	mov		r4, sp
+
+#if CONFIG_ARCH_INTERRUPTSTACK > 7
+	/* If CONFIG_ARCH_INTERRUPTSTACK is defined, we will set the MSP to use
+	 * a special special interrupt stack pointer.  The way that this is done
+	 * here prohibits nested interrupts without some additional logic!
+	 */
+
+	setintstack	r2, r3
+
+#else
+	/* Otherwise, we will re-use the interrupted thread's stack.  That may
+	 * mean using either MSP or PSP stack for interrupt level processing (in
+	 * kernel mode).
+	 */
+
+	bic		r2, r4, #7				/* Get the stack pointer with 8-byte alignment */
+	mov		sp, r2					/* Instantiate the aligned stack */
+
+#endif
+
+	bl		up_doirq				/* R0=IRQ, R1=register save (msp) */
+	mov		r1, r4					/* Recover R1=main stack pointer */
+
+	/* On return from up_doirq, R0 will hold a pointer to register context
+	 * array to use for the interrupt return.  If that return value is the same
+	 * as current stack pointer, then things are relatively easy.
+	 */
+
+	cmp		r0, r1					/* Context switch? */
+	beq		2f						/* Branch if no context switch */
+
+	/* We are returning with a pending context switch.
+	 *
+	 * If the FPU is enabled, then we will need to restore FPU registers.
+	 * This is not done in normal interrupt save/restore because the cost
+	 * is prohibitive.  This is only done when switching contexts.  A
+	 * consequence of this is that floating point operations may not be
+	 * performed in interrupt handling logic.
+	 *
+	 * Here:
+	 *   r0 = Address of the register save area
+	 *
+	 * NOTE: It is a requirement that up_restorefpu() preserve the value of
+	 * r0!
+	 */
+
+#ifdef CONFIG_ARCH_FPU
+	bl		up_restorefpu			/* Restore the FPU registers */
+#endif
+
+	/* We are returning with a pending context switch.  This case is different
+	 * because in this case, the register save structure does not lie in the
+	 * stack but, rather, within a TCB structure.  We'll have to copy some
+	 * values to the stack.
+	 */
+
+	add		r1, r0, #SW_XCPT_SIZE	/* R1=Address of HW save area in reg array */
+	ldmia	r1, {r4-r11}			/* Fetch eight registers in HW save area */
+	ldr		r1, [r0, #(4*REG_SP)]	/* R1=Value of SP before interrupt */
+	stmdb	r1!, {r4-r11}			/* Store eight registers in HW save area */
+#ifdef CONFIG_BUILD_PROTECTED
+	ldmia	r0, {r2-r11,r14}		/* Recover R4-R11, r14 + 2 temp values */
+#else
+	ldmia	r0, {r2-r11}			/* Recover R4-R11 + 2 temp values */
+#endif
+	b		3f						/* Re-join common logic */
+
+	/* We are returning with no context switch.  We simply need to "unwind"
+	 * the same stack frame that we created
+	 *
+	 * Here:
+	 *   r1 = Address of the return stack (same as r0)
+	 */
+
+2:
+#ifdef CONFIG_BUILD_PROTECTED
+	ldmia	r1!, {r2-r11,r14}		/* Recover R4-R11, r14 + 2 temp values */
+#else
+	ldmia	r1!, {r2-r11}			/* Recover R4-R11 + 2 temp values */
+#endif
+
+#ifdef CONFIG_ARCH_FPU
+	/* Skip over the block of memory reserved for floating pointer register
+	 * save. Then R1 is the address of the HW save area
+	 */
+
+	add		r1, #(4*SW_FPU_REGS)
+#endif
+
+	/* Set up to return from the exception
+	 *
+	 * Here:
+	 *   r1 = Address on the target thread's stack position at the start of
+	 *        the registers saved by hardware
+	 *   r3 = primask or basepri
+	 *   r4-r11 = restored register values
+	 */
+
+3:
+
+#ifdef CONFIG_BUILD_PROTECTED
+	/* The EXC_RETURN value will be 0xfffffff9 (privileged thread) or 0xfffffff1
+	 * (handler mode) if the stack is on the MSP.  It can only be on the PSP if
+	 * EXC_RETURN is 0xfffffffd (unprivileged thread)
+	 */
+
+	mrs		r2, control				/* R2=Contents of the control register */
+	tst		r14, #EXC_RETURN_PROCESS_STACK /* nonzero if context on process stack */
+	beq		4f						/* Branch if privileged */
+
+	orr		r2, r2, #1				/* Unprivileged mode */
+	msr		psp, r1					/* R1=The process stack pointer */
+	b		5f
+4:
+	bic		r2, r2, #1				/* Privileged mode */
+	msr		msp, r1					/* R1=The main stack pointer */
+5:
+	msr		control, r2				/* Save the updated control register */
+#else
+	msr		msp, r1					/* Recover the return MSP value */
+
+	/* Preload r14 with the special return value first (so that the return
+	 * actually occurs with interrupts still disabled).
+	 */
+
+	ldr		r14, =EXC_RETURN_PRIVTHR	/* Load the special value */
+#endif
+
+	/* Restore the interrupt state */
+
+#ifdef CONFIG_ARMV8M_USEBASEPRI
+	msr		basepri, r3				/* Restore interrupts priority masking */
+#else
+	msr		primask, r3				/* Restore interrupts */
+#endif
+
+	/* Always return with R14 containing the special value that will: (1)
+	 * return to thread mode, and (2) continue to use the MSP
+	 */
+
+	bx		r14						/* And return */
+	.size	exception_common, .-exception_common
+
+/************************************************************************************************
+ *  Name: g_intstackalloc/g_intstackbase
+ *
+ * Description:
+ *   Shouldn't happen
+ *
+ ************************************************************************************************/
+
+#if !defined(CONFIG_SMP) && CONFIG_ARCH_INTERRUPTSTACK > 7
+	.bss
+	.global	g_intstackalloc
+	.global	g_intstackbase
+	.align	8
+g_intstackalloc:
+	.skip	((CONFIG_ARCH_INTERRUPTSTACK + 4) & ~7)
+g_intstackbase:
+	.size	g_intstackalloc, .-g_intstackalloc
+#endif
+
+	.end
diff --git a/arch/arm/src/armv8-m/up_memfault.c b/arch/arm/src/armv8-m/up_memfault.c
new file mode 100755
index 0000000..70a0e61
--- /dev/null
+++ b/arch/arm/src/armv8-m/up_memfault.c
@@ -0,0 +1,77 @@
+/****************************************************************************
+ * arch/arm/src/armv8-m/up_memfault.c
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.  The
+ * ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ *
+ ****************************************************************************/
+
+/****************************************************************************
+ * Included Files
+ ****************************************************************************/
+
+#include <nuttx/config.h>
+
+#include <assert.h>
+#include <debug.h>
+
+#include <arch/irq.h>
+
+#include "up_arch.h"
+#include "nvic.h"
+#include "up_internal.h"
+
+/****************************************************************************
+ * Pre-processor Definitions
+ ****************************************************************************/
+
+#ifdef CONFIG_DEBUG_MEMFAULT
+# define mferr(format, ...)  _alert(format, ##__VA_ARGS__)
+# define mfinfo(format, ...) _alert(format, ##__VA_ARGS__)
+#else
+# define mferr(x...)
+# define mfinfo(x...)
+#endif
+
+/****************************************************************************
+ * Public Functions
+ ****************************************************************************/
+
+/****************************************************************************
+ * Name: up_memfault
+ *
+ * Description:
+ *   This is Memory Management Fault exception handler.  Normally we get
+ *   here when the Cortex M3 MPU is enabled and an MPU fault is detected.
+ *   However, I understand that there are other error conditions that can
+ *   also generate memory management faults.
+ *
+ ****************************************************************************/
+
+int up_memfault(int irq, FAR void *context, FAR void *arg)
+{
+  /* Dump some memory management fault info */
+
+  up_irq_save();
+  _alert("PANIC!!! Memory Management Fault:\n");
+  mfinfo("  IRQ: %d context: %p\n", irq, context);
+  _alert("  CFAULTS: %08x MMFAR: %08x\n",
+        getreg32(NVIC_CFAULTS), getreg32(NVIC_MEMMANAGE_ADDR));
+  mfinfo("  BASEPRI: %08x PRIMASK: %08x IPSR: %08x CONTROL: %08x\n",
+         getbasepri(), getprimask(), getipsr(), getcontrol());
+
+  PANIC();
+  return OK; /* Won't get here */
+}
diff --git a/arch/arm/src/armv8-m/up_mpu.c b/arch/arm/src/armv8-m/up_mpu.c
new file mode 100755
index 0000000..3bbaa98
--- /dev/null
+++ b/arch/arm/src/armv8-m/up_mpu.c
@@ -0,0 +1,388 @@
+/*****************************************************************************
+ * arch/arm/src/armv8-m/up_mpu.c
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.  The
+ * ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ *
+ *****************************************************************************/
+
+/*****************************************************************************
+ * Included Files
+ *****************************************************************************/
+
+#include <nuttx/config.h>
+
+#include <stdint.h>
+#include <assert.h>
+
+#include "mpu.h"
+#include "up_internal.h"
+
+/*****************************************************************************
+ * Pre-processor Definitions
+ *****************************************************************************/
+
+/* Configuration *************************************************************/
+
+#ifndef CONFIG_ARM_MPU_NREGIONS
+#  define CONFIG_ARM_MPU_NREGIONS 8
+#endif
+
+/*****************************************************************************
+ * Private Data
+ *****************************************************************************/
+
+/* These sets represent the set of disabled memory sub-regions.  A bit set
+ * corresponds to a disabled sub-region; the LS bit corresponds to the first
+ * region.
+ *
+ * The g_ms_regionmask array is indexed by the number of subregions at the
+ * end of the region:  0 means no sub-regions are available(0xff) and 8 means
+ * all subregions are available (0x00).
+ */
+
+static const uint8_t g_ms_regionmask[9] =
+{
+  0xff, 0xfe, 0xfc, 0xf8, 0xf0, 0xe0, 0xc0, 0x80, 0x00
+};
+
+/* The g_ls_regionmask array is indexed by the number of subregions at the
+ * beginning of the region:  0 means no sub-regions need be disabled (0x00)
+ * and 8 means all subregions must be disabled (0xff).
+ */
+
+static const uint8_t g_ls_regionmask[9] =
+{
+  0x00, 0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f, 0x7f, 0xff
+};
+
+/* The next available region number */
+
+static uint8_t g_region;
+
+/*****************************************************************************
+ * Private Functions
+ *****************************************************************************/
+
+/*****************************************************************************
+ * Name: mpu_subregion_ms
+ *
+ * Description:
+ *   Given (1) the size of the memory to be mapped and (2) the log2 size
+ *   of the mapping to use, determine the minimal sub-region set at the
+ *   to be disabled at the higher end of the region.
+ *
+ * Assumption:
+ *   l2size has the same properties as the return value from
+ *   mpu_log2regionceil()
+ *
+ *****************************************************************************/
+
+static inline uint32_t mpu_subregion_ms(size_t size, uint8_t l2size)
+{
+  unsigned int nsrs;
+  uint32_t     asize;
+  uint32_t     mask;
+
+  /* Examples with l2size = 12:
+   *
+   *         Shifted Adjusted        Number      Sub-Region
+   * Size    Mask    Size      Shift Sub-Regions Bitset
+   * 0x1000  0x01ff  0x1000    9     8           0x00
+   * 0x0c00  0x01ff  0x0c00    9     6           0xc0
+   * 0x0c40  0x01ff  0x0e00    9     7           0x80
+   */
+
+  if (l2size < 32)
+    {
+      mask  = ((1 << l2size) - 1) >> 3; /* Shifted mask */
+    }
+
+  /* The 4Gb region size is a special case */
+
+  else
+    {
+      /* NOTE: There is no way to represent a 4Gb region size in the 32-bit
+       * input.
+       */
+
+      mask = 0x1fffffff;          /* Shifted mask */
+    }
+
+  asize = (size + mask) & ~mask;  /* Adjusted size */
+  nsrs  = asize >> (l2size - 3);  /* Number of subregions */
+  return g_ms_regionmask[nsrs];
+}
+
+/*****************************************************************************
+ * Name: mpu_subregion_ls
+ *
+ * Description:
+ *   Given (1) the offset to the beginning of data in the region and (2) the
+ *   log2 size of the mapping to use, determine the minimal sub-region set
+ *   to span that memory region sub-region set at the to be disabled at the
+ *   lower end of the region
+ *
+ * Assumption:
+ *   l2size has the same properties as the return value from
+ *   mpu_log2regionceil()
+ *
+ *****************************************************************************/
+
+static inline uint32_t mpu_subregion_ls(size_t offset, uint8_t l2size)
+{
+  unsigned int nsrs;
+  uint32_t     aoffset;
+  uint32_t     mask;
+
+  /* Examples with l2size = 12:
+   *
+   *         Shifted Adjusted        Number      Sub-Region
+   * Offset  Mask    Offset    Shift Sub-Regions Bitset
+   * 0x0000  0x01ff  0x0000    9     8           0x00
+   * 0x0400  0x01ff  0x0400    9     6           0x03
+   * 0x02c0  0x01ff  0x0200    9     7           0x01
+   */
+
+  if (l2size < 32)
+    {
+      mask  = ((1 << l2size)-1) >> 3; /* Shifted mask */
+    }
+
+  /* The 4Gb region size is a special case */
+
+  else
+    {
+      /* NOTE: There is no way to represent a 4Gb region size in the 32-bit
+       * input.
+       */
+
+      mask = 0x1fffffff;              /* Shifted mask */
+    }
+
+  aoffset = offset & ~mask;           /* Adjusted offset */
+  nsrs    = aoffset >> (l2size - 3);  /* Number of subregions */
+  return g_ls_regionmask[nsrs];
+}
+
+/*****************************************************************************
+ * Public Functions
+ *****************************************************************************/
+
+/*****************************************************************************
+ * Name: mpu_allocregion
+ *
+ * Description:
+ *   Allocate the next region
+ *
+ * Assumptions:
+ *   - Regions are never deallocated
+ *   - Regions are only allocated early in initialization, so no special
+ *     protection against re-entrancy is required;
+ *
+ *****************************************************************************/
+
+unsigned int mpu_allocregion(void)
+{
+  DEBUGASSERT(g_region < CONFIG_ARM_MPU_NREGIONS);
+  return (unsigned int)g_region++;
+}
+
+/*****************************************************************************
+ * Name: mpu_log2regionceil
+ *
+ * Description:
+ *   Determine the smallest value of l2size (log base 2 size) such that the
+ *   following is true:
+ *
+ *   size <= (1 << l2size)
+ *
+ *****************************************************************************/
+
+uint8_t mpu_log2regionceil(size_t size)
+{
+  uint8_t l2size;
+
+  /* The minimum permitted region size is 32 bytes (log2(32) = 5. */
+
+  for (l2size = 5; l2size < 32 && size > (1 << l2size); l2size++);
+  return l2size;
+}
+
+/*****************************************************************************
+ * Name: mpu_log2regionfloor
+ *
+ * Description:
+ *   Determine the largest value of l2size (log base 2 size) such that the
+ *   following is true:
+ *
+ *   size >= (1 << l2size)
+ *
+ *****************************************************************************/
+
+uint8_t mpu_log2regionfloor(size_t size)
+{
+  uint8_t l2size = mpu_log2regionceil(size);
+
+  if (l2size > 4 && size < (1 << l2size))
+    {
+      l2size--;
+    }
+
+  return l2size;
+}
+
+/*****************************************************************************
+ * Name: mpu_subregion
+ *
+ * Description:
+ *   Given the size of the (1) memory to be mapped and (2) the log2 size
+ *   of the mapping to use, determine the minimal sub-region set to span
+ *   that memory region.
+ *
+ * Assumption:
+ *   l2size has the same properties as the return value from
+ *   mpu_log2regionceil()
+ *
+ *****************************************************************************/
+
+uint32_t mpu_subregion(uintptr_t base, size_t size, uint8_t l2size)
+{
+  uint32_t mask;
+  size_t offset;
+  uint32_t ret;
+
+  /* Eight subregions are supported.  The representation is as an 8-bit
+   * value with the LS bit corresponding to subregion 0.  A bit is set
+   * to disable the sub-region.
+   *
+   * l2size: Log2 of the actual region size is <= (1 << l2size);
+   */
+
+  DEBUGASSERT(l2size > 4 && size <= (1 << l2size));
+
+  /* For region sizes of 32, 64, and 128 bytes, the effect of setting
+   * one or more bits of the SRD field to 1 is UNPREDICTABLE.
+   */
+
+  if (l2size < 8)
+    {
+      return 0;
+    }
+
+  /* Calculate the offset of the base address into the aligned region. */
+
+  mask   = (1 << l2size) - 1;
+  offset = base & mask;
+
+  /* Calculate the mask need to handle disabled subregions at the end of the
+   * region
+   */
+
+  ret = mpu_subregion_ms(size + offset, l2size);
+
+  /* Then OR in the mask need to handle disabled subregions at the beginning
+   * of the region.
+   */
+
+  ret |= mpu_subregion_ls(offset, l2size);
+  return ret;
+}
+
+/*****************************************************************************
+ * Name: mpu_control
+ *
+ * Description:
+ *   Configure and enable (or disable) the MPU
+ *
+ *****************************************************************************/
+
+void mpu_control(bool enable, bool hfnmiena, bool privdefena)
+{
+  uint32_t regval = 0;
+
+  if (enable)
+    {
+      regval |= MPU_CTRL_ENABLE; /* Enable the MPU */
+
+      if (hfnmiena)
+        {
+           regval |= MPU_CTRL_HFNMIENA; /* Enable MPU during hard fault, NMI, and FAULTMAS */
+        }
+
+      if (privdefena)
+        {
+          regval |= MPU_CTRL_PRIVDEFENA; /* Enable privileged access to default memory map */
+        }
+    }
+
+  putreg32(regval, MPU_CTRL);
+}
+
+/*****************************************************************************
+ * Name: mpu_configure_region
+ *
+ * Description:
+ *   Configure a region for privileged, strongly ordered memory
+ *
+ *****************************************************************************/
+
+void mpu_configure_region(uintptr_t base, size_t size,
+                                        uint32_t flags)
+{
+  unsigned int region = mpu_allocregion();
+  uint32_t     regval;
+  uint8_t      l2size;
+  uint8_t      subregions;
+  uintptr_t    alignedbase;
+
+  /* Ensure the base address alignment
+   *
+   * ARMv8-M Architecture Reference Manual
+   * B3.5.8 MPU Region Base Address Register, MPU_RBAR
+   * "Software must ensure that the value written to the ADDR field
+   * aligns with the size of the selected region."
+   */
+
+  alignedbase  = base & MPU_RBAR_ADDR_MASK;
+  l2size       = mpu_log2regionceil(size + base - alignedbase);
+  alignedbase &= ~((1 << l2size) - 1);
+  l2size       = mpu_log2regionceil(size + base - alignedbase);
+
+  DEBUGASSERT(alignedbase + (1 << l2size) >= base + size);
+  DEBUGASSERT(l2size == 5 || alignedbase + (1 << (l2size - 1)) < base + size);
+  DEBUGASSERT((alignedbase & MPU_RBAR_ADDR_MASK) == alignedbase);
+  DEBUGASSERT((alignedbase & ((1 << l2size) - 1)) == 0);
+
+  /* Select the region */
+
+  putreg32(region, MPU_RNR);
+
+  /* Select the region base address */
+
+  putreg32(alignedbase | region | MPU_RBAR_VALID, MPU_RBAR);
+
+  /* Select the region size and the sub-region map */
+
+  subregions = mpu_subregion(base, size, l2size);
+
+  /* The configure the region */
+
+  regval = MPU_RASR_ENABLE                              | /* Enable region  */
+           MPU_RASR_SIZE_LOG2((uint32_t)l2size)         | /* Region size    */
+           ((uint32_t)subregions << MPU_RASR_SRD_SHIFT) | /* Sub-regions    */
+           flags;
+  putreg32(regval, MPU_RASR);
+}
diff --git a/arch/arm/src/armv8-m/up_ramvec_attach.c b/arch/arm/src/armv8-m/up_ramvec_attach.c
new file mode 100755
index 0000000..fa83f4c
--- /dev/null
+++ b/arch/arm/src/armv8-m/up_ramvec_attach.c
@@ -0,0 +1,95 @@
+/****************************************************************************
+ * arch/arm/irq/up_ramvec_attach.c
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.  The
+ * ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ *
+ ****************************************************************************/
+
+/****************************************************************************
+ * Included Files
+ ****************************************************************************/
+
+#include <nuttx/config.h>
+
+#include <errno.h>
+#include <debug.h>
+
+#include <nuttx/irq.h>
+#include <nuttx/arch.h>
+
+#include "ram_vectors.h"
+
+#ifdef CONFIG_ARCH_RAMVECTORS
+
+/****************************************************************************
+ * Public Functions
+ ****************************************************************************/
+
+/* Common exception entrypoint */
+
+void exception_common(void);
+
+/****************************************************************************
+ * Name: up_ramvec_attach
+ *
+ * Description:
+ *   Configure the ram vector table so that IRQ number 'irq' will be
+ *   dispatched by hardware to 'vector'
+ *
+ ****************************************************************************/
+
+int up_ramvec_attach(int irq, up_vector_t vector)
+{
+  int ret = -EINVAL;
+
+  irqinfo("%s IRQ%d\n", vector ? "Attaching" : "Detaching", irq);
+
+  if ((unsigned)irq < ARMV8M_VECTAB_SIZE)
+    {
+      irqstate_t flags;
+
+      /* If the new vector is NULL, then the vector is being detached. In
+       * this case, disable the itnerrupt and direct any interrupts to the
+       * common exception handler.
+       */
+
+      flags = enter_critical_section();
+      if (vector == NULL)
+        {
+          /* Disable the interrupt if we can before detaching it.  We might
+           * not be able to do this for all interrupts.
+           */
+
+          up_disable_irq(irq);
+
+          /* Detaching the vector really means re-attaching it to the
+           * common exception handler.
+           */
+
+           vector = exception_common;
+        }
+
+      /* Save the new vector in the vector table */
+
+      g_ram_vectors[irq] = vector;
+      leave_critical_section(flags);
+      ret = OK;
+    }
+
+  return ret;
+}
+
+#endif /* !CONFIG_ARCH_RAMVECTORS */
diff --git a/arch/arm/src/armv8-m/up_ramvec_initialize.c b/arch/arm/src/armv8-m/up_ramvec_initialize.c
new file mode 100755
index 0000000..2c16d03
--- /dev/null
+++ b/arch/arm/src/armv8-m/up_ramvec_initialize.c
@@ -0,0 +1,154 @@
+/****************************************************************************
+ * arch/arm/src/armv8-m/up_ramvec_initialize.c
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.  The
+ * ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ *
+ ****************************************************************************/
+
+/****************************************************************************
+ * Included Files
+ ****************************************************************************/
+
+#include <nuttx/config.h>
+
+#include <debug.h>
+
+#include <nuttx/arch.h>
+#include <nuttx/irq.h>
+
+#include "nvic.h"
+#include "ram_vectors.h"
+
+#include "chip.h"             /* May redefine VECTAB fields */
+#include "up_arch.h"
+#include "up_internal.h"
+
+#ifdef CONFIG_ARCH_RAMVECTORS
+
+/****************************************************************************
+ * Pre-processor Definitions
+ ****************************************************************************/
+
+/* Vector Table Offset Register (VECTAB).  This mask seems to vary among
+ * ARMv8-M implementations.  It may need to be redefined in some
+ * architecture-specific header file. By default, the base address of the
+ * new vector table must be aligned to the size of the vector table extended
+ * to the next larger power of 2.
+ */
+
+#ifndef NVIC_VECTAB_TBLOFF_MASK
+#  if ARMV8M_VECTAB_SIZE > 512
+#    define NVIC_VECTAB_TBLOFF_MASK     (0xfffff000)
+#  elif ARMV8M_VECTAB_SIZE > 256
+#    define NVIC_VECTAB_TBLOFF_MASK     (0xfffff800)
+#  elif ARMV8M_VECTAB_SIZE > 128
+#    define NVIC_VECTAB_TBLOFF_MASK     (0xfffffc00)
+#  elif ARMV8M_VECTAB_SIZE > 64
+#    define NVIC_VECTAB_TBLOFF_MASK     (0xfffffe00)
+#  elif ARMV8M_VECTAB_SIZE > 32
+#    define NVIC_VECTAB_TBLOFF_MASK     (0xffffff00)
+#  else
+#    define NVIC_VECTAB_TBLOFF_MASK     (0xffffff80)
+#  endif
+#endif
+
+/* Alignment ****************************************************************/
+
+/* Per the ARMv8M Architecture reference manual, the NVIC vector table
+ * requires 7-bit address alignment (i.e, bits 0-6 of the address of the
+ * vector table must be zero).  In this case alignment to a 128 byte address
+ * boundary is sufficient.
+ *
+ * Some parts, such as the LPC17xx/LPC40xx family, require alignment to a 256
+ * byte address boundary.  Any other unusual alignment requirements for the
+ * vector can be specified for a given architecture be redefining
+ * NVIC_VECTAB_TBLOFF_MASK in the chip-specific chip.h header file for the
+ * appropriate mask.
+ */
+
+#define RAMVEC_ALIGN ((~NVIC_VECTAB_TBLOFF_MASK & 0xffff) + 1)
+
+/****************************************************************************
+ * Public Data
+ ****************************************************************************/
+
+/* If CONFIG_ARCH_RAMVECTORS is defined, then the ARM logic must provide
+ * ARM-specific implementations of up_ramvec_initialize(), irq_attach(), and
+ * irq_dispatch.  In this case, it is also assumed that the ARM vector
+ * table resides in RAM, has the name up_ram_vectors, and has been
+ * properly positioned and aligned in memory by the linker script.
+ *
+ * REVISIT: Can this alignment requirement vary from core-to-core?  Yes, it
+ * depends on the number of vectors supported by the MCU. The safest thing
+ * to do is to put the vector table at the beginning of RAM in order toforce
+ * the highest alignment possible.
+ */
+
+up_vector_t g_ram_vectors[ARMV8M_VECTAB_SIZE]
+  __attribute__ ((section (".ram_vectors"), aligned (RAMVEC_ALIGN)));
+
+/****************************************************************************
+ * Public Functions
+ ****************************************************************************/
+
+/****************************************************************************
+ * Name: up_ramvec_initialize
+ *
+ * Description:
+ *   Copy vectors to RAM an configure the NVIC to use the RAM vectors.
+ *
+ ****************************************************************************/
+
+void up_ramvec_initialize(void)
+{
+  const up_vector_t *src;
+  up_vector_t *dest;
+  int i;
+
+  /* The vector table must be aligned */
+
+  DEBUGASSERT(((uint32_t)g_ram_vectors & ~NVIC_VECTAB_TBLOFF_MASK) == 0);
+
+  /* Copy the ROM vector table at address zero to RAM vector table.
+   *
+   * This must be done BEFORE the MPU is enable if the MPU is being used to
+   * protect against NULL pointer references.
+   */
+
+  src  = (const CODE up_vector_t *)getreg32(NVIC_VECTAB);
+  dest = g_ram_vectors;
+
+  irqinfo("src=%p dest=%p\n", src, dest);
+
+  for (i = 0; i < ARMV8M_VECTAB_SIZE; i++)
+    {
+      *dest++ = *src++;
+    }
+
+  /* Now configure the NVIC to use the new vector table. */
+
+  putreg32((uint32_t)g_ram_vectors, NVIC_VECTAB);
+
+  /* The number bits required to align the RAM vector table seem to vary
+   * from part-to-part.  The following assertion will catch the case where
+   * the table alignment is insufficient.
+   */
+
+  irqinfo("NVIC_VECTAB=%08x\n", getreg32(NVIC_VECTAB));
+  DEBUGASSERT(getreg32(NVIC_VECTAB) == (uint32_t)g_ram_vectors);
+}
+
+#endif /* !CONFIG_ARCH_RAMVECTORS */
diff --git a/arch/arm/src/armv8-m/up_releasepending.c b/arch/arm/src/armv8-m/up_releasepending.c
new file mode 100755
index 0000000..b5576c6
--- /dev/null
+++ b/arch/arm/src/armv8-m/up_releasepending.c
@@ -0,0 +1,120 @@
+/****************************************************************************
+ *  arch/arm/src/armv8-m/up_releasepending.c
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.  The
+ * ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ *
+ ****************************************************************************/
+
+/****************************************************************************
+ * Included Files
+ ****************************************************************************/
+
+#include <nuttx/config.h>
+
+#include <sched.h>
+#include <debug.h>
+#include <nuttx/arch.h>
+#include <nuttx/sched.h>
+
+#include "sched/sched.h"
+#include "up_internal.h"
+
+/****************************************************************************
+ * Public Functions
+ ****************************************************************************/
+
+/****************************************************************************
+ * Name: up_release_pending
+ *
+ * Description:
+ *   Release and ready-to-run tasks that have
+ *   collected in the pending task list.  This can call a
+ *   context switch if a new task is placed at the head of
+ *   the ready to run list.
+ *
+ ****************************************************************************/
+
+void up_release_pending(void)
+{
+  struct tcb_s *rtcb = this_task();
+
+  sinfo("From TCB=%p\n", rtcb);
+
+  /* Merge the g_pendingtasks list into the ready-to-run task list */
+
+#if 0
+  sched_lock();
+#endif
+
+  if (sched_mergepending())
+    {
+      /* The currently active task has changed!  We will need to switch
+       * contexts.
+       */
+
+      /* Update scheduler parameters */
+
+      sched_suspend_scheduler(rtcb);
+
+      /* Are we operating in interrupt context? */
+
+      if (CURRENT_REGS)
+        {
+          /* Yes, then we have to do things differently. Just copy the
+           * CURRENT_REGS into the OLD rtcb.
+           */
+
+           up_savestate(rtcb->xcp.regs);
+
+          /* Restore the exception context of the rtcb at the (new) head
+           * of the ready-to-run task list.
+           */
+
+          rtcb = this_task();
+
+          /* Update scheduler parameters */
+
+          sched_resume_scheduler(rtcb);
+
+          /* Then switch contexts */
+
+          up_restorestate(rtcb->xcp.regs);
+        }
+
+      /* No, then we will need to perform the user context switch */
+
+      else
+        {
+          struct tcb_s *nexttcb = this_task();
+
+          /* Update scheduler parameters */
+
+          sched_resume_scheduler(nexttcb);
+
+          /* Switch context to the context of the task at the head of the
+           * ready to run list.
+           */
+
+          up_switchcontext(rtcb->xcp.regs, nexttcb->xcp.regs);
+
+          /* up_switchcontext forces a context switch to the task at the
+           * head of the ready-to-run list.  It does not 'return' in the
+           * normal sense.  When it does return, it is because the blocked
+           * task is again ready to run and has execution priority.
+           */
+        }
+    }
+}
diff --git a/arch/arm/src/armv8-m/up_reprioritizertr.c b/arch/arm/src/armv8-m/up_reprioritizertr.c
new file mode 100755
index 0000000..55d9488
--- /dev/null
+++ b/arch/arm/src/armv8-m/up_reprioritizertr.c
@@ -0,0 +1,174 @@
+/****************************************************************************
+ *  arch/arm/src/armv8-m/up_reprioritizertr.c
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.  The
+ * ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ *
+ ****************************************************************************/
+
+/****************************************************************************
+ * Included Files
+ ****************************************************************************/
+
+#include <nuttx/config.h>
+
+#include <stdint.h>
+#include <stdbool.h>
+#include <sched.h>
+#include <debug.h>
+#include <nuttx/arch.h>
+#include <nuttx/sched.h>
+
+#include "sched/sched.h"
+#include "up_internal.h"
+
+/****************************************************************************
+ * Public Functions
+ ****************************************************************************/
+
+/****************************************************************************
+ * Name: up_reprioritize_rtr
+ *
+ * Description:
+ *   Called when the priority of a running or
+ *   ready-to-run task changes and the reprioritization will
+ *   cause a context switch.  Two cases:
+ *
+ *   1) The priority of the currently running task drops and the next
+ *      task in the ready to run list has priority.
+ *   2) An idle, ready to run task's priority has been raised above the
+ *      the priority of the current, running task and it now has the
+ *      priority.
+ *
+ * Input Parameters:
+ *   tcb: The TCB of the task that has been reprioritized
+ *   priority: The new task priority
+ *
+ ****************************************************************************/
+
+void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
+{
+  /* Verify that the caller is sane */
+
+  if (tcb->task_state < FIRST_READY_TO_RUN_STATE ||
+      tcb->task_state > LAST_READY_TO_RUN_STATE
+#if SCHED_PRIORITY_MIN > 0
+      || priority < SCHED_PRIORITY_MIN
+#endif
+#if SCHED_PRIORITY_MAX < UINT8_MAX
+      || priority > SCHED_PRIORITY_MAX
+#endif
+    )
+    {
+       DEBUGPANIC();
+    }
+  else
+    {
+      struct tcb_s *rtcb = this_task();
+      bool switch_needed;
+
+      sinfo("TCB=%p PRI=%d\n", tcb, priority);
+
+      /* Remove the tcb task from the ready-to-run list.
+       * sched_removereadytorun will return true if we just removed the head
+       * of the ready to run list.
+       */
+
+      switch_needed = sched_removereadytorun(tcb);
+
+      /* Setup up the new task priority */
... 2910 lines suppressed ...


Mime
View raw message