CC Maintainers.

> -----Original Message-----
> From: devel@edk2.groups.io [mailto:devel@edk2.groups.io] On Behalf Of
> Abner Chang
> Sent: Monday, September 23, 2019 8:32 AM
> To: devel@edk2.groups.io
> Cc: Chang, Abner (HPS SW/FW Technologist) <abner.ch...@hpe.com>
> Subject: [edk2-devel] [edk2-staging/RISC-V-V2 PATCH v2 07/29]
> MdePkg/BaseLib: BaseLib for RISC-V RV64 Processor.
> 
> Add RISC-V RV64 BaseLib functions.
> 
> Signed-off-by: Abner Chang <abner.ch...@hpe.com>
> ---
>  MdePkg/Include/Library/BaseLib.h                   |  26 ++
>  MdePkg/Library/BaseLib/BaseLib.inf                 |  18 +-
>  MdePkg/Library/BaseLib/RiscV64/CpuBreakpoint.c     |  27 +++
>  MdePkg/Library/BaseLib/RiscV64/CpuPause.c          |  29 +++
>  MdePkg/Library/BaseLib/RiscV64/DisableInterrupts.c |  24 ++
> MdePkg/Library/BaseLib/RiscV64/EnableInterrupts.c  |  25 ++
>  MdePkg/Library/BaseLib/RiscV64/FlushCache.S        |  21 ++
>  MdePkg/Library/BaseLib/RiscV64/GetInterruptState.c |  35
> +++  .../Library/BaseLib/RiscV64/InternalSwitchStack.c  |  55 +++++
>  MdePkg/Library/BaseLib/RiscV64/LongJump.c          |  32 +++
>  .../Library/BaseLib/RiscV64/RiscVCpuBreakpoint.S   |  14 ++
>  MdePkg/Library/BaseLib/RiscV64/RiscVCpuPause.S     |  14 ++
>  MdePkg/Library/BaseLib/RiscV64/RiscVInterrupt.S    |  32 +++
>  .../Library/BaseLib/RiscV64/RiscVSetJumpLongJump.S |  55 +++++
>  MdePkg/Library/BaseLib/RiscV64/Unaligned.c         | 264
> +++++++++++++++++++++
>  15 files changed, 670 insertions(+), 1 deletion(-)  create mode 100644
> MdePkg/Library/BaseLib/RiscV64/CpuBreakpoint.c
>  create mode 100644 MdePkg/Library/BaseLib/RiscV64/CpuPause.c
>  create mode 100644 MdePkg/Library/BaseLib/RiscV64/DisableInterrupts.c
>  create mode 100644 MdePkg/Library/BaseLib/RiscV64/EnableInterrupts.c
>  create mode 100644 MdePkg/Library/BaseLib/RiscV64/FlushCache.S
>  create mode 100644 MdePkg/Library/BaseLib/RiscV64/GetInterruptState.c
>  create mode 100644 MdePkg/Library/BaseLib/RiscV64/InternalSwitchStack.c
>  create mode 100644 MdePkg/Library/BaseLib/RiscV64/LongJump.c
>  create mode 100644
> MdePkg/Library/BaseLib/RiscV64/RiscVCpuBreakpoint.S
>  create mode 100644 MdePkg/Library/BaseLib/RiscV64/RiscVCpuPause.S
>  create mode 100644 MdePkg/Library/BaseLib/RiscV64/RiscVInterrupt.S
>  create mode 100644
> MdePkg/Library/BaseLib/RiscV64/RiscVSetJumpLongJump.S
>  create mode 100644 MdePkg/Library/BaseLib/RiscV64/Unaligned.c
> 
> diff --git a/MdePkg/Include/Library/BaseLib.h
> b/MdePkg/Include/Library/BaseLib.h
> index 2a75bc0..b8c8512 100644
> --- a/MdePkg/Include/Library/BaseLib.h
> +++ b/MdePkg/Include/Library/BaseLib.h
> @@ -4,6 +4,8 @@
> 
>  Copyright (c) 2006 - 2019, Intel Corporation. All rights reserved.<BR>
> Portions copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>
> +Portions Copyright (c) 2016 - 2019, Hewlett Packard Enterprise
> +Development LP. All rights reserved.<BR>
> +
>  SPDX-License-Identifier: BSD-2-Clause-Patent
> 
>  **/
> @@ -124,6 +126,30 @@ typedef struct {
> 
>  #endif  // defined (MDE_CPU_AARCH64)
> 
> +#if defined (MDE_CPU_RISCV64)
> +///
> +/// The RISC-V architecture context buffer used by SetJump() and
> LongJump().
> +///
> +typedef struct {
> +  UINT64                            RA;
> +  UINT64                            S0;
> +  UINT64                            S1;
> +  UINT64                            S2;
> +  UINT64                            S3;
> +  UINT64                            S4;
> +  UINT64                            S5;
> +  UINT64                            S6;
> +  UINT64                            S7;
> +  UINT64                            S8;
> +  UINT64                            S9;
> +  UINT64                            S10;
> +  UINT64                            S11;
> +  UINT64                            SP;
> +} BASE_LIBRARY_JUMP_BUFFER;
> +
> +#define BASE_LIBRARY_JUMP_BUFFER_ALIGNMENT 8
> +
> +#endif // defined (MDE_CPU_RISCV64)
> 
>  //
>  // String Services
> diff --git a/MdePkg/Library/BaseLib/BaseLib.inf
> b/MdePkg/Library/BaseLib/BaseLib.inf
> index 3586beb..28d5795 100644
> --- a/MdePkg/Library/BaseLib/BaseLib.inf
> +++ b/MdePkg/Library/BaseLib/BaseLib.inf
> @@ -4,6 +4,7 @@
>  #  Copyright (c) 2007 - 2019, Intel Corporation. All rights reserved.<BR>  #
> Portions copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>  #
> Portions copyright (c) 2011 - 2013, ARM Ltd. All rights reserved.<BR>
> +#  Copyright (c) 2016, Hewlett Packard Enterprise Development LP. All
> +rights reserved.<BR>
>  #
>  #  SPDX-License-Identifier: BSD-2-Clause-Patent  # @@ -20,7 +21,7 @@
>    LIBRARY_CLASS                  = BaseLib
> 
>  #
> -#  VALID_ARCHITECTURES           = IA32 X64 EBC ARM AARCH64
> +#  VALID_ARCHITECTURES           = IA32 X64 EBC ARM AARCH64 RISCV64
>  #
> 
>  [Sources]
> @@ -381,6 +382,21 @@
>    AArch64/CpuBreakpoint.asm         | MSFT
>    AArch64/SpeculationBarrier.asm    | MSFT
> 
> +[Sources.RISCV64]
> +  Math64.c
> +  RiscV64/Unaligned.c
> +  RiscV64/InternalSwitchStack.c
> +  RiscV64/CpuBreakpoint.c
> +  RiscV64/GetInterruptState.c
> +  RiscV64/DisableInterrupts.c
> +  RiscV64/EnableInterrupts.c
> +  RiscV64/CpuPause.c
> +  RiscV64/RiscVSetJumpLongJump.S    | GCC
> +  RiscV64/RiscVCpuBreakpoint.S      | GCC
> +  RiscV64/RiscVCpuPause.S           | GCC
> +  RiscV64/RiscVInterrupt.S          | GCC
> +  RiscV64/FlushCache.S              | GCC
> +
>  [Packages]
>    MdePkg/MdePkg.dec
> 
> diff --git a/MdePkg/Library/BaseLib/RiscV64/CpuBreakpoint.c
> b/MdePkg/Library/BaseLib/RiscV64/CpuBreakpoint.c
> new file mode 100644
> index 0000000..d82b1d5
> --- /dev/null
> +++ b/MdePkg/Library/BaseLib/RiscV64/CpuBreakpoint.c
> @@ -0,0 +1,27 @@
> +/** @file
> +  CPU breakpoint for RISC-V
> +
> +  Copyright (c) 2016 - 2019, Hewlett Packard Enterprise Development LP.
> + All rights reserved.<BR>
> +
> +  SPDX-License-Identifier: BSD-2-Clause-Patent **/
> +
> +#include "BaseLibInternals.h"
> +
> +extern VOID RiscVCpuBreakpoint (VOID);
> +
> +/**
> +  Generates a breakpoint on the CPU.
> +
> +  Generates a breakpoint on the CPU. The breakpoint must be implemented
> + such  that code can resume normal execution after the breakpoint.
> +
> +**/
> +VOID
> +EFIAPI
> +CpuBreakpoint (
> +  VOID
> +  )
> +{
> +  RiscVCpuBreakpoint ();
> +}
> diff --git a/MdePkg/Library/BaseLib/RiscV64/CpuPause.c
> b/MdePkg/Library/BaseLib/RiscV64/CpuPause.c
> new file mode 100644
> index 0000000..8eb6b65
> --- /dev/null
> +++ b/MdePkg/Library/BaseLib/RiscV64/CpuPause.c
> @@ -0,0 +1,29 @@
> +/** @file
> +  CPU pause for RISC-V
> +
> +  Copyright (c) 2016 - 2019, Hewlett Packard Enterprise Development LP.
> + All rights reserved.<BR>
> +
> +  SPDX-License-Identifier: BSD-2-Clause-Patent **/
> +
> +#include "BaseLibInternals.h"
> +
> +extern VOID RiscVCpuPause (VOID);
> +
> +
> +/**
> +  Requests CPU to pause for a short period of time.
> +
> +  Requests CPU to pause for a short period of time. Typically used in
> + MP  systems to prevent memory starvation while waiting for a spin lock.
> +
> +**/
> +VOID
> +EFIAPI
> +CpuPause (
> +  VOID
> +  )
> +{
> +  RiscVCpuPause ();
> +}
> +
> diff --git a/MdePkg/Library/BaseLib/RiscV64/DisableInterrupts.c
> b/MdePkg/Library/BaseLib/RiscV64/DisableInterrupts.c
> new file mode 100644
> index 0000000..7ee5eb1
> --- /dev/null
> +++ b/MdePkg/Library/BaseLib/RiscV64/DisableInterrupts.c
> @@ -0,0 +1,24 @@
> +/** @file
> +  CPU disable interrupt function for RISC-V
> +
> +  Copyright (c) 2016 - 2019, Hewlett Packard Enterprise Development LP.
> + All rights reserved.<BR>
> +
> +  SPDX-License-Identifier: BSD-2-Clause-Patent **/ #include
> +"BaseLibInternals.h"
> +
> +extern VOID RiscVDisableSupervisorModeInterrupts (VOID);
> +
> +/**
> +  Disables CPU interrupts.
> +
> +**/
> +VOID
> +EFIAPI
> +DisableInterrupts (
> +  VOID
> +  )
> +{
> +  RiscVDisableSupervisorModeInterrupts (); }
> +
> diff --git a/MdePkg/Library/BaseLib/RiscV64/EnableInterrupts.c
> b/MdePkg/Library/BaseLib/RiscV64/EnableInterrupts.c
> new file mode 100644
> index 0000000..9aa0d9a
> --- /dev/null
> +++ b/MdePkg/Library/BaseLib/RiscV64/EnableInterrupts.c
> @@ -0,0 +1,25 @@
> +/** @file
> +  CPU enable interrupt function for RISC-V
> +
> +  Copyright (c) 2016-2019, Hewlett Packard Enterprise Development LP.
> + All rights reserved.<BR>
> +
> +  SPDX-License-Identifier: BSD-2-Clause-Patent **/
> +
> +#include "BaseLibInternals.h"
> +
> +extern VOID RiscVEnableSupervisorModeInterrupt (VOID);
> +
> +/**
> +  Enables CPU interrupts.
> +
> +**/
> +VOID
> +EFIAPI
> +EnableInterrupts (
> +  VOID
> +  )
> +{
> +  RiscVEnableSupervisorModeInterrupt (); }
> +
> diff --git a/MdePkg/Library/BaseLib/RiscV64/FlushCache.S
> b/MdePkg/Library/BaseLib/RiscV64/FlushCache.S
> new file mode 100644
> index 0000000..0ef0213
> --- /dev/null
> +++ b/MdePkg/Library/BaseLib/RiscV64/FlushCache.S
> @@ -0,0 +1,21 @@
> +//---------------------------------------------------------------------
> +---------
> +//
> +// RISC-V cache operation.
> +//
> +// Copyright (c) 2016 - 2019, Hewlett Packard Enterprise Development
> +LP. All rights reserved.<BR> // // SPDX-License-Identifier:
> +BSD-2-Clause-Patent //
> +//---------------------------------------------------------------------
> +---------
> +
> +.align 3
> +ASM_GLOBAL ASM_PFX(RiscVInvalidateInstCacheAsm)
> +ASM_GLOBAL ASM_PFX(RiscVInvalidateDataCacheAsm)
> +
> +ASM_PFX(RiscVInvalidateInstCacheAsm):
> +    fence.i
> +    ret
> +
> +ASM_PFX(RiscVInvalidateDataCacheAsm):
> +    fence
> +    ret
> diff --git a/MdePkg/Library/BaseLib/RiscV64/GetInterruptState.c
> b/MdePkg/Library/BaseLib/RiscV64/GetInterruptState.c
> new file mode 100644
> index 0000000..8f764fb
> --- /dev/null
> +++ b/MdePkg/Library/BaseLib/RiscV64/GetInterruptState.c
> @@ -0,0 +1,35 @@
> +/** @file
> +  CPU get interrupt state function for RISC-V
> +
> +  Copyright (c) 2016 - 2019, Hewlett Packard Enterprise Development LP.
> + All rights reserved.<BR>
> +
> +  SPDX-License-Identifier: BSD-2-Clause-Patent **/
> +
> +#include "BaseLibInternals.h"
> +
> +extern UINT32 RiscVGetSupervisorModeInterrupts (VOID);
> +
> +/**
> +  Retrieves the current CPU interrupt state.
> +
> +  Returns TRUE is interrupts are currently enabled. Otherwise  returns
> + FALSE.
> +
> +  @retval TRUE  CPU interrupts are enabled.
> +  @retval FALSE CPU interrupts are disabled.
> +
> +**/
> +BOOLEAN
> +EFIAPI
> +GetInterruptState (
> +  VOID
> +  )
> +{
> +  unsigned long RetValue;
> +
> +  RetValue = RiscVGetSupervisorModeInterrupts ();
> +  return RetValue? TRUE: FALSE;
> +}
> +
> +
> diff --git a/MdePkg/Library/BaseLib/RiscV64/InternalSwitchStack.c
> b/MdePkg/Library/BaseLib/RiscV64/InternalSwitchStack.c
> new file mode 100644
> index 0000000..1082d4e
> --- /dev/null
> +++ b/MdePkg/Library/BaseLib/RiscV64/InternalSwitchStack.c
> @@ -0,0 +1,55 @@
> +/** @file
> +  Switch stack function for RISC-V
> +
> +  Copyright (c) 2016, Hewlett Packard Enterprise Development LP. All
> + rights reserved.<BR>
> +
> +  SPDX-License-Identifier: BSD-2-Clause-Patent **/
> +
> +#include "BaseLibInternals.h"
> +
> +/**
> +  Transfers control to a function starting with a new stack.
> +
> +  Transfers control to the function specified by EntryPoint using the
> + new stack specified by NewStack and passing in the parameters
> + specified  by Context1 and Context2.  Context1 and Context2 are
> + optional and may  be NULL.  The function EntryPoint must never return.
> +  Marker will be ignored on IA-32, x64, and EBC.
> +  IPF CPUs expect one additional parameter of type VOID * that
> + specifies  the new backing store pointer.
> +
> +  If EntryPoint is NULL, then ASSERT().
> +  If NewStack is NULL, then ASSERT().
> +
> +  @param  EntryPoint  A pointer to function to call with the new stack.
> +  @param  Context1    A pointer to the context to pass into the EntryPoint
> +                      function.
> +  @param  Context2    A pointer to the context to pass into the EntryPoint
> +                      function.
> +  @param  NewStack    A pointer to the new stack to use for the EntryPoint
> +                      function.
> +  @param  Marker      VA_LIST marker for the variable argument list.
> +
> +**/
> +VOID
> +EFIAPI
> +InternalSwitchStack (
> +  IN      SWITCH_STACK_ENTRY_POINT  EntryPoint,
> +  IN      VOID                      *Context1,   OPTIONAL
> +  IN      VOID                      *Context2,   OPTIONAL
> +  IN      VOID                      *NewStack,
> +  IN      VA_LIST                   Marker
> +  )
> +{
> +  BASE_LIBRARY_JUMP_BUFFER  JumpBuffer;
> +
> +  DEBUG ((DEBUG_INFO, "RISC-V InternalSwitchStack Entry:%x Context1:%x
> Context2:%x NewStack%x\n", \
> +          EntryPoint, Context1, Context2, NewStack));
> +  JumpBuffer.RA = (UINTN)EntryPoint;
> +  JumpBuffer.SP = (UINTN)NewStack - sizeof (VOID *);
> +  JumpBuffer.S0 = (UINT64)(UINTN)Context1;
> +  JumpBuffer.S1 = (UINT64)(UINTN)Context2;
> +  LongJump (&JumpBuffer, (UINTN)-1);
> +  ASSERT(FALSE);
> +}
> diff --git a/MdePkg/Library/BaseLib/RiscV64/LongJump.c
> b/MdePkg/Library/BaseLib/RiscV64/LongJump.c
> new file mode 100644
> index 0000000..a62b882
> --- /dev/null
> +++ b/MdePkg/Library/BaseLib/RiscV64/LongJump.c
> @@ -0,0 +1,32 @@
> +/** @file
> +  Long jump implementation of RISC-V
> +
> +  Copyright (c) 2016 - 2019, Hewlett Packard Enterprise Development LP.
> + All rights reserved.<BR>
> +
> +  SPDX-License-Identifier: BSD-2-Clause-Patent **/
> +
> +#include "BaseLibInternals.h"
> +
> +
> +/**
> +  Restores the CPU context that was saved with SetJump().
> +
> +  Restores the CPU context from the buffer specified by JumpBuffer.
> +  This function never returns to the caller.
> +  Instead is resumes execution based on the state of JumpBuffer.
> +
> +  @param  JumpBuffer    A pointer to CPU context buffer.
> +  @param  Value         The value to return when the SetJump() context is
> restored.
> +
> +**/
> +VOID
> +EFIAPI
> +InternalLongJump (
> +  IN      BASE_LIBRARY_JUMP_BUFFER  *JumpBuffer,
> +  IN      UINTN                     Value
> +  )
> +{
> +    ASSERT (FALSE);
> +}
> +
> diff --git a/MdePkg/Library/BaseLib/RiscV64/RiscVCpuBreakpoint.S
> b/MdePkg/Library/BaseLib/RiscV64/RiscVCpuBreakpoint.S
> new file mode 100644
> index 0000000..1a45e2a
> --- /dev/null
> +++ b/MdePkg/Library/BaseLib/RiscV64/RiscVCpuBreakpoint.S
> @@ -0,0 +1,14 @@
> +//---------------------------------------------------------------------
> +---------
> +//
> +// CpuBreakpoint for RISC-V
> +//
> +// Copyright (c) 2016 - 2019, Hewlett Packard Enterprise Development
> +LP. All rights reserved.<BR> // // SPDX-License-Identifier:
> +BSD-2-Clause-Patent //
> +//---------------------------------------------------------------------
> +---------
> +
> +ASM_GLOBAL ASM_PFX(RiscVCpuBreakpoint)
> +ASM_PFX(RiscVCpuBreakpoint):
> +  ebreak
> +  ret
> diff --git a/MdePkg/Library/BaseLib/RiscV64/RiscVCpuPause.S
> b/MdePkg/Library/BaseLib/RiscV64/RiscVCpuPause.S
> new file mode 100644
> index 0000000..ceba0c0
> --- /dev/null
> +++ b/MdePkg/Library/BaseLib/RiscV64/RiscVCpuPause.S
> @@ -0,0 +1,14 @@
> +//---------------------------------------------------------------------
> +---------
> +//
> +// CpuPause for RISC-V
> +//
> +// Copyright (c) 2016 - 2019, Hewlett Packard Enterprise Development
> +LP. All rights reserved.<BR> // // SPDX-License-Identifier:
> +BSD-2-Clause-Patent //
> +//---------------------------------------------------------------------
> +---------
> +
> +ASM_GLOBAL ASM_PFX(RiscVCpuPause)
> +ASM_PFX(RiscVCpuPause):
> +  nop
> +  ret
> diff --git a/MdePkg/Library/BaseLib/RiscV64/RiscVInterrupt.S
> b/MdePkg/Library/BaseLib/RiscV64/RiscVInterrupt.S
> new file mode 100644
> index 0000000..8fdb544
> --- /dev/null
> +++ b/MdePkg/Library/BaseLib/RiscV64/RiscVInterrupt.S
> @@ -0,0 +1,32 @@
> +//---------------------------------------------------------------------
> +---------
> +//
> +// RISC-V Supervisor Mode interrupt enable/disable // // Copyright (c)
> +2016 - 2019, Hewlett Packard Enterprise Development LP. All rights
> +reserved.<BR> // // SPDX-License-Identifier: BSD-2-Clause-Patent //
> +//---------------------------------------------------------------------
> +---------
> +
> +ASM_GLOBAL ASM_PFX(RiscVDisableSupervisorModeInterrupts)
> +ASM_GLOBAL ASM_PFX(RiscVEnableSupervisorModeInterrupt)
> +ASM_GLOBAL ASM_PFX(RiscVGetSupervisorModeInterrupts)
> +
> +# define  MSTATUS_SIE    0x00000002
> +# define  CSR_SSTATUS    0x100
> +
> +ASM_PFX(RiscVDisableSupervisorModeInterrupts):
> +  li   a1, MSTATUS_SIE
> +  csrc CSR_SSTATUS, a1
> +  ret
> +
> +ASM_PFX(RiscVEnableSupervisorModeInterrupt):
> +  li   a1, MSTATUS_SIE
> +  csrs CSR_SSTATUS, a1
> +  ret
> +
> +ASM_PFX(RiscVGetSupervisorModeInterrupts):
> +  csrr a0, CSR_SSTATUS
> +  andi a0, a0, MSTATUS_SIE
> +  ret
> +
> diff --git a/MdePkg/Library/BaseLib/RiscV64/RiscVSetJumpLongJump.S
> b/MdePkg/Library/BaseLib/RiscV64/RiscVSetJumpLongJump.S
> new file mode 100644
> index 0000000..e72dd7f
> --- /dev/null
> +++ b/MdePkg/Library/BaseLib/RiscV64/RiscVSetJumpLongJump.S
> @@ -0,0 +1,55 @@
> +//---------------------------------------------------------------------
> +---------
> +//
> +// Set/Long jump for RISC-V
> +//
> +// Copyright (c) 2016 - 2019, Hewlett Packard Enterprise Development
> +LP. All rights reserved.<BR> // // SPDX-License-Identifier:
> +BSD-2-Clause-Patent //
> +//---------------------------------------------------------------------
> +---------
> +# define REG_S  sd
> +# define REG_L  ld
> +# define SZREG  8
> +.align 3
> +    .globl  SetJump
> +
> +SetJump:
> +    REG_S ra,  0*SZREG(a0)
> +    REG_S s0,  1*SZREG(a0)
> +    REG_S s1,  2*SZREG(a0)
> +    REG_S s2,  3*SZREG(a0)
> +    REG_S s3,  4*SZREG(a0)
> +    REG_S s4,  5*SZREG(a0)
> +    REG_S s5,  6*SZREG(a0)
> +    REG_S s6,  7*SZREG(a0)
> +    REG_S s7,  8*SZREG(a0)
> +    REG_S s8,  9*SZREG(a0)
> +    REG_S s9, 10*SZREG(a0)
> +    REG_S s10,11*SZREG(a0)
> +    REG_S s11,12*SZREG(a0)
> +    REG_S sp, 13*SZREG(a0)
> +    li    a0, 0
> +    ret
> +
> +    .globl  InternalLongJump
> +InternalLongJump:
> +    REG_L ra,  0*SZREG(a0)
> +    REG_L s0,  1*SZREG(a0)
> +    REG_L s1,  2*SZREG(a0)
> +    REG_L s2,  3*SZREG(a0)
> +    REG_L s3,  4*SZREG(a0)
> +    REG_L s4,  5*SZREG(a0)
> +    REG_L s5,  6*SZREG(a0)
> +    REG_L s6,  7*SZREG(a0)
> +    REG_L s7,  8*SZREG(a0)
> +    REG_L s8,  9*SZREG(a0)
> +    REG_L s9, 10*SZREG(a0)
> +    REG_L s10,11*SZREG(a0)
> +    REG_L s11,12*SZREG(a0)
> +    REG_L sp, 13*SZREG(a0)
> +
> +    add a0, s0, 0
> +    add a1, s1, 0
> +    add a2, s2, 0
> +    add a3, s3, 0
> +    ret
> diff --git a/MdePkg/Library/BaseLib/RiscV64/Unaligned.c
> b/MdePkg/Library/BaseLib/RiscV64/Unaligned.c
> new file mode 100644
> index 0000000..012d913
> --- /dev/null
> +++ b/MdePkg/Library/BaseLib/RiscV64/Unaligned.c
> @@ -0,0 +1,264 @@
> +/** @file
> +  RISC-V specific functionality for (un)aligned memory read/write.
> +
> +  Copyright (c) 2016 - 2019, Hewlett Packard Enterprise Development LP.
> + All rights reserved.<BR>
> +
> +  SPDX-License-Identifier: BSD-2-Clause-Patent **/
> +
> +#include "BaseLibInternals.h"
> +
> +/**
> +  Reads a 16-bit value from memory that may be unaligned.
> +
> +  This function returns the 16-bit value pointed to by Buffer. The
> + function  guarantees that the read operation does not produce an
> alignment fault.
> +
> +  If the Buffer is NULL, then ASSERT().
> +
> +  @param  Buffer  A pointer to a 16-bit value that may be unaligned.
> +
> +  @return The 16-bit value read from Buffer.
> +
> +**/
> +UINT16
> +EFIAPI
> +ReadUnaligned16 (
> +  IN CONST UINT16              *Buffer
> +  )
> +{
> +  UINT16 Value;
> +  INT8 Count;
> +
> +  ASSERT (Buffer != NULL);
> +
> +  for (Count = sizeof (UINT16) - 1, Value = 0; Count >= 0 ; Count --) {
> +    Value = Value << 8;
> +    Value |= *((UINT8*)Buffer + Count);
> +  }
> +  return Value;
> +}
> +
> +/**
> +  Writes a 16-bit value to memory that may be unaligned.
> +
> +  This function writes the 16-bit value specified by Value to Buffer.
> + Value is  returned. The function guarantees that the write operation
> + does not produce  an alignment fault.
> +
> +  If the Buffer is NULL, then ASSERT().
> +
> +  @param  Buffer  A pointer to a 16-bit value that may be unaligned.
> +  @param  Value   16-bit value to write to Buffer.
> +
> +  @return The 16-bit value to write to Buffer.
> +
> +**/
> +UINT16
> +EFIAPI
> +WriteUnaligned16 (
> +  OUT UINT16                    *Buffer,
> +  IN  UINT16                    Value
> +  )
> +{
> +  INT8 Count;
> +  UINT16 ValueTemp;
> +
> +  ASSERT (Buffer != NULL);
> +
> +  for (Count = 0, ValueTemp = Value; Count < sizeof (UINT16) ; Count ++) {
> +    *((UINT8*)Buffer + Count) = (UINT8)(ValueTemp & 0xff);
> +    ValueTemp = ValueTemp >> 8;
> +  }
> +  return Value;
> +}
> +
> +/**
> +  Reads a 24-bit value from memory that may be unaligned.
> +
> +  This function returns the 24-bit value pointed to by Buffer. The
> + function  guarantees that the read operation does not produce an
> alignment fault.
> +
> +  If the Buffer is NULL, then ASSERT().
> +
> +  @param  Buffer  A pointer to a 24-bit value that may be unaligned.
> +
> +  @return The 24-bit value read from Buffer.
> +
> +**/
> +UINT32
> +EFIAPI
> +ReadUnaligned24 (
> +  IN CONST UINT32              *Buffer
> +  )
> +{
> +  UINT32 Value;
> +  INT8 Count;
> +
> +  ASSERT (Buffer != NULL);
> +  for (Count = 2, Value = 0; Count >= 0 ; Count --) {
> +    Value = Value << 8;
> +    Value |= *((UINT8*)Buffer + Count);
> +  }
> +  return Value;
> +}
> +
> +/**
> +  Writes a 24-bit value to memory that may be unaligned.
> +
> +  This function writes the 24-bit value specified by Value to Buffer.
> + Value is  returned. The function guarantees that the write operation
> + does not produce  an alignment fault.
> +
> +  If the Buffer is NULL, then ASSERT().
> +
> +  @param  Buffer  A pointer to a 24-bit value that may be unaligned.
> +  @param  Value   24-bit value to write to Buffer.
> +
> +  @return The 24-bit value to write to Buffer.
> +
> +**/
> +UINT32
> +EFIAPI
> +WriteUnaligned24 (
> +  OUT UINT32                    *Buffer,
> +  IN  UINT32                    Value
> +  )
> +{
> +  INT8 Count;
> +  UINT32 ValueTemp;
> +
> +  ASSERT (Buffer != NULL);
> +  for (Count = 0, ValueTemp = Value; Count < 3 ; Count ++) {
> +    *((UINT8*)Buffer + Count) = (UINT8)(ValueTemp & 0xff);
> +    ValueTemp = ValueTemp >> 8;
> +  }
> +  return Value;
> +}
> +
> +/**
> +  Reads a 32-bit value from memory that may be unaligned.
> +
> +  This function returns the 32-bit value pointed to by Buffer. The
> + function  guarantees that the read operation does not produce an
> alignment fault.
> +
> +  If the Buffer is NULL, then ASSERT().
> +
> +  @param  Buffer  A pointer to a 32-bit value that may be unaligned.
> +
> +  @return The 32-bit value read from Buffer.
> +
> +**/
> +UINT32
> +EFIAPI
> +ReadUnaligned32 (
> +  IN CONST UINT32              *Buffer
> +  )
> +{
> +  UINT32 Value;
> +  INT8 Count;
> +
> +  ASSERT (Buffer != NULL);
> +
> +  for (Count = sizeof (UINT32) - 1, Value = 0; Count >= 0 ; Count --) {
> +    Value = Value << 8;
> +    Value |= *((UINT8*)Buffer + Count);
> +  }
> +  return Value;
> +}
> +
> +/**
> +  Writes a 32-bit value to memory that may be unaligned.
> +
> +  This function writes the 32-bit value specified by Value to Buffer.
> + Value is  returned. The function guarantees that the write operation
> + does not produce  an alignment fault.
> +
> +  If the Buffer is NULL, then ASSERT().
> +
> +  @param  Buffer  A pointer to a 32-bit value that may be unaligned.
> +  @param  Value   The 32-bit value to write to Buffer.
> +
> +  @return The 32-bit value to write to Buffer.
> +
> +**/
> +UINT32
> +EFIAPI
> +WriteUnaligned32 (
> +  OUT UINT32                    *Buffer,
> +  IN  UINT32                    Value
> +  )
> +{
> +  INT8 Count;
> +  UINT32 ValueTemp;
> +
> +  ASSERT (Buffer != NULL);
> +  for (Count = 0, ValueTemp = Value; Count < sizeof (UINT32) ; Count ++) {
> +    *((UINT8*)Buffer + Count) = (UINT8)(ValueTemp & 0xff);
> +    ValueTemp = ValueTemp >> 8;
> +  }
> +  return Value;
> +}
> +
> +/**
> +  Reads a 64-bit value from memory that may be unaligned.
> +
> +  This function returns the 64-bit value pointed to by Buffer. The
> + function  guarantees that the read operation does not produce an
> alignment fault.
> +
> +  If the Buffer is NULL, then ASSERT().
> +
> +  @param  Buffer  A pointer to a 64-bit value that may be unaligned.
> +
> +  @return The 64-bit value read from Buffer.
> +
> +**/
> +UINT64
> +EFIAPI
> +ReadUnaligned64 (
> +  IN CONST UINT64              *Buffer
> +  )
> +{
> +  UINT64 Value;
> +  INT8 Count;
> +
> +  ASSERT (Buffer != NULL);
> +  for (Count = sizeof (UINT64) - 1, Value = 0; Count >= 0 ; Count --) {
> +    Value = Value << 8;
> +    Value |= *((UINT8*)Buffer + Count);
> +  }
> +  return Value;
> +}
> +
> +/**
> +  Writes a 64-bit value to memory that may be unaligned.
> +
> +  This function writes the 64-bit value specified by Value to Buffer.
> + Value is  returned. The function guarantees that the write operation
> + does not produce  an alignment fault.
> +
> +  If the Buffer is NULL, then ASSERT().
> +
> +  @param  Buffer  A pointer to a 64-bit value that may be unaligned.
> +  @param  Value   The 64-bit value to write to Buffer.
> +
> +  @return The 64-bit value to write to Buffer.
> +
> +**/
> +UINT64
> +EFIAPI
> +WriteUnaligned64 (
> +  OUT UINT64                    *Buffer,
> +  IN  UINT64                    Value
> +  )
> +{
> +  INT8 Count;
> +  UINT64 ValueTemp;
> +
> +  ASSERT (Buffer != NULL);
> +  for (Count = 0, ValueTemp = Value; Count < sizeof (UINT64) ; Count ++) {
> +    *((UINT8*)Buffer + Count) = (UINT8)(ValueTemp & 0xff);
> +    ValueTemp = ValueTemp >> 8;
> +  }
> +  return Value;
> +}
> --
> 2.7.4
> 
> 
> 


-=-=-=-=-=-=-=-=-=-=-=-
Groups.io Links: You receive all messages sent to this group.

View/Reply Online (#47891): https://edk2.groups.io/g/devel/message/47891
Mute This Topic: https://groups.io/mt/34271942/21656
Group Owner: devel+ow...@edk2.groups.io
Unsubscribe: https://edk2.groups.io/g/devel/unsub  [arch...@mail-archive.com]
-=-=-=-=-=-=-=-=-=-=-=-

Reply via email to