The function ArmCleanDataCacheToPoU() has no users, and its purpose
is unclear, since it uses cache maintenance by set/way to perform
the clean to PoU, which is a dubious practice to begin with. So
remove the declaration and all definitions.

Contributed-under: TianoCore Contribution Agreement 1.0
Signed-off-by: Ard Biesheuvel <ard.biesheu...@linaro.org>
Acked-by: Mark Rutland <mark.rutl...@arm.com>
---
 ArmPkg/Include/Library/ArmLib.h                |  6 ---
 ArmPkg/Library/ArmLib/AArch64/AArch64Lib.c     | 30 ------------
 ArmPkg/Library/ArmLib/AArch64/AArch64Lib.h     |  6 ---
 ArmPkg/Library/ArmLib/AArch64/AArch64Support.S | 14 ------
 ArmPkg/Library/ArmLib/ArmV7/ArmV7Lib.c         | 30 ------------
 ArmPkg/Library/ArmLib/ArmV7/ArmV7Lib.h         |  6 ---
 ArmPkg/Library/ArmLib/ArmV7/ArmV7Support.S     | 50 --------------------
 ArmPkg/Library/ArmLib/ArmV7/ArmV7Support.asm   | 50 --------------------
 8 files changed, 192 deletions(-)

diff --git a/ArmPkg/Include/Library/ArmLib.h b/ArmPkg/Include/Library/ArmLib.h
index 58116663b28d..f1de303d952d 100644
--- a/ArmPkg/Include/Library/ArmLib.h
+++ b/ArmPkg/Include/Library/ArmLib.h
@@ -243,12 +243,6 @@ ArmCleanDataCache (
 
 VOID
 EFIAPI
-ArmCleanDataCacheToPoU (
-  VOID
-  );
-
-VOID
-EFIAPI
 ArmInvalidateInstructionCache (
   VOID
   );
diff --git a/ArmPkg/Library/ArmLib/AArch64/AArch64Lib.c 
b/ArmPkg/Library/ArmLib/AArch64/AArch64Lib.c
index a4e1f20ad910..f795a2f896b2 100644
--- a/ArmPkg/Library/ArmLib/AArch64/AArch64Lib.c
+++ b/ArmPkg/Library/ArmLib/AArch64/AArch64Lib.c
@@ -206,26 +206,6 @@ AArch64DataCacheOperation (
   }
 }
 
-
-VOID
-AArch64PoUDataCacheOperation (
-  IN  AARCH64_CACHE_OPERATION  DataCacheOperation
-  )
-{
-  UINTN     SavedInterruptState;
-
-  SavedInterruptState = ArmGetInterruptState ();
-  ArmDisableInterrupts ();
-
-  AArch64PerformPoUDataCacheOperation (DataCacheOperation);
-
-  ArmDrainWriteBuffer ();
-
-  if (SavedInterruptState) {
-    ArmEnableInterrupts ();
-  }
-}
-
 VOID
 EFIAPI
 ArmInvalidateDataCache (
@@ -255,13 +235,3 @@ ArmCleanDataCache (
   ArmDrainWriteBuffer ();
   AArch64DataCacheOperation (ArmCleanDataCacheEntryBySetWay);
 }
-
-VOID
-EFIAPI
-ArmCleanDataCacheToPoU (
-  VOID
-  )
-{
-  ArmDrainWriteBuffer ();
-  AArch64PoUDataCacheOperation (ArmCleanDataCacheEntryBySetWay);
-}
diff --git a/ArmPkg/Library/ArmLib/AArch64/AArch64Lib.h 
b/ArmPkg/Library/ArmLib/AArch64/AArch64Lib.h
index c8bb84365bb6..7b9b9c371531 100644
--- a/ArmPkg/Library/ArmLib/AArch64/AArch64Lib.h
+++ b/ArmPkg/Library/ArmLib/AArch64/AArch64Lib.h
@@ -18,12 +18,6 @@
 
 typedef VOID (*AARCH64_CACHE_OPERATION)(UINTN);
 
-
-VOID
-AArch64PerformPoUDataCacheOperation (
-  IN  AARCH64_CACHE_OPERATION  DataCacheOperation
-  );
-
 VOID
 AArch64AllDataCachesOperation (
   IN  AARCH64_CACHE_OPERATION  DataCacheOperation
diff --git a/ArmPkg/Library/ArmLib/AArch64/AArch64Support.S 
b/ArmPkg/Library/ArmLib/AArch64/AArch64Support.S
index 8b5e0fb6e7fe..f973a35c21d6 100644
--- a/ArmPkg/Library/ArmLib/AArch64/AArch64Support.S
+++ b/ArmPkg/Library/ArmLib/AArch64/AArch64Support.S
@@ -40,7 +40,6 @@ GCC_ASM_EXPORT (ArmEnableAlignmentCheck)
 GCC_ASM_EXPORT (ArmEnableBranchPrediction)
 GCC_ASM_EXPORT (ArmDisableBranchPrediction)
 GCC_ASM_EXPORT (AArch64AllDataCachesOperation)
-GCC_ASM_EXPORT (AArch64PerformPoUDataCacheOperation)
 GCC_ASM_EXPORT (ArmDataMemoryBarrier)
 GCC_ASM_EXPORT (ArmDataSynchronizationBarrier)
 GCC_ASM_EXPORT (ArmInstructionSynchronizationBarrier)
@@ -324,19 +323,6 @@ ASM_PFX(AArch64AllDataCachesOperation):
                                 // right to ease the access to CSSELR and the 
Set/Way operation.
   cbz   x3, L_Finished          // No need to clean if LoC is 0
   mov   x10, #0                 // Start clean at cache level 0
-  b     Loop1
-
-ASM_PFX(AArch64PerformPoUDataCacheOperation):
-// We can use regs 0-7 and 9-15 without having to save/restore.
-// Save our link register on the stack. - The stack must always be quad-word 
aligned
-  str   x30, [sp, #-16]!
-  mov   x1, x0                  // Save Function call in x1
-  mrs   x6, clidr_el1           // Read EL1 CLIDR
-  and   x3, x6, #0x38000000     // Mask out all but Point of Unification (PoU)
-  lsr   x3, x3, #26             // Left align cache level value - the level is 
shifted by 1 to the
-                                // right to ease the access to CSSELR and the 
Set/Way operation.
-  cbz   x3, L_Finished          // No need to clean if LoC is 0
-  mov   x10, #0                 // Start clean at cache level 0
 
 Loop1:
   add   x2, x10, x10, lsr #1    // Work out 3x cachelevel for cache info
diff --git a/ArmPkg/Library/ArmLib/ArmV7/ArmV7Lib.c 
b/ArmPkg/Library/ArmLib/ArmV7/ArmV7Lib.c
index feb60881bdcd..6a8f0d3fdd98 100644
--- a/ArmPkg/Library/ArmLib/ArmV7/ArmV7Lib.c
+++ b/ArmPkg/Library/ArmLib/ArmV7/ArmV7Lib.c
@@ -208,26 +208,6 @@ ArmV7DataCacheOperation (
   }
 }
 
-
-VOID
-ArmV7PoUDataCacheOperation (
-  IN  ARM_V7_CACHE_OPERATION  DataCacheOperation
-  )
-{
-  UINTN     SavedInterruptState;
-
-  SavedInterruptState = ArmGetInterruptState ();
-  ArmDisableInterrupts ();
-
-  ArmV7PerformPoUDataCacheOperation (DataCacheOperation);
-
-  ArmDrainWriteBuffer ();
-
-  if (SavedInterruptState) {
-    ArmEnableInterrupts ();
-  }
-}
-
 VOID
 EFIAPI
 ArmInvalidateDataCache (
@@ -257,13 +237,3 @@ ArmCleanDataCache (
   ArmDrainWriteBuffer ();
   ArmV7DataCacheOperation (ArmCleanDataCacheEntryBySetWay);
 }
-
-VOID
-EFIAPI
-ArmCleanDataCacheToPoU (
-  VOID
-  )
-{
-  ArmDrainWriteBuffer ();
-  ArmV7PoUDataCacheOperation (ArmCleanDataCacheEntryBySetWay);
-}
diff --git a/ArmPkg/Library/ArmLib/ArmV7/ArmV7Lib.h 
b/ArmPkg/Library/ArmLib/ArmV7/ArmV7Lib.h
index 1398d75071ca..50fba3824024 100644
--- a/ArmPkg/Library/ArmLib/ArmV7/ArmV7Lib.h
+++ b/ArmPkg/Library/ArmLib/ArmV7/ArmV7Lib.h
@@ -17,12 +17,6 @@
 
 typedef VOID (*ARM_V7_CACHE_OPERATION)(UINT32);
 
-
-VOID
-ArmV7PerformPoUDataCacheOperation (
-  IN  ARM_V7_CACHE_OPERATION  DataCacheOperation
-  );
-
 VOID
 ArmV7AllDataCachesOperation (
   IN  ARM_V7_CACHE_OPERATION  DataCacheOperation
diff --git a/ArmPkg/Library/ArmLib/ArmV7/ArmV7Support.S 
b/ArmPkg/Library/ArmLib/ArmV7/ArmV7Support.S
index f59cd5f32e6b..fdc4d03776c8 100644
--- a/ArmPkg/Library/ArmLib/ArmV7/ArmV7Support.S
+++ b/ArmPkg/Library/ArmLib/ArmV7/ArmV7Support.S
@@ -38,7 +38,6 @@ GCC_ASM_EXPORT (ArmDisableBranchPrediction)
 GCC_ASM_EXPORT (ArmSetLowVectors)
 GCC_ASM_EXPORT (ArmSetHighVectors)
 GCC_ASM_EXPORT (ArmV7AllDataCachesOperation)
-GCC_ASM_EXPORT (ArmV7PerformPoUDataCacheOperation)
 GCC_ASM_EXPORT (ArmDataMemoryBarrier)
 GCC_ASM_EXPORT (ArmDataSynchronizationBarrier)
 GCC_ASM_EXPORT (ArmInstructionSynchronizationBarrier)
@@ -268,55 +267,6 @@ L_Finished:
   ldmfd SP!, {r4-r12, lr}
   bx    LR
 
-ASM_PFX(ArmV7PerformPoUDataCacheOperation):
-  stmfd SP!,{r4-r12, LR}
-  mov   R1, R0                @ Save Function call in R1
-  mrc   p15, 1, R6, c0, c0, 1 @ Read CLIDR
-  ands  R3, R6, #0x38000000    @ Mask out all but Level of Unification (LoU)
-  mov   R3, R3, LSR #26       @ Cache level value (naturally aligned)
-  beq   Finished2
-  mov   R10, #0
-
-Loop4:
-  add   R2, R10, R10, LSR #1    @ Work out 3xcachelevel
-  mov   R12, R6, LSR R2         @ bottom 3 bits are the Cache type for this 
level
-  and   R12, R12, #7            @ get those 3 bits alone
-  cmp   R12, #2
-  blt   Skip2                   @ no cache or only instruction cache at this 
level
-  mcr   p15, 2, R10, c0, c0, 0  @ write the Cache Size selection register 
(CSSELR) // OR in 1 for Instruction
-  isb                           @ isb to sync the change to the CacheSizeID reg
-  mrc   p15, 1, R12, c0, c0, 0  @ reads current Cache Size ID register (CCSIDR)
-  and   R2, R12, #0x7            @ extract the line length field
-  add   R2, R2, #4              @ add 4 for the line length offset (log2 16 
bytes)
-  ldr   R4, =0x3FF
-  ands  R4, R4, R12, LSR #3     @ R4 is the max number on the way size (right 
aligned)
-  clz   R5, R4                  @ R5 is the bit position of the way size 
increment
-  ldr   R7, =0x00007FFF
-  ands  R7, R7, R12, LSR #13    @ R7 is the max number of the index size 
(right aligned)
-
-Loop5:
-  mov   R9, R4                  @ R9 working copy of the max way size (right 
aligned)
-
-Loop6:
-  orr   R0, R10, R9, LSL R5     @ factor in the way number and cache number 
into R11
-  orr   R0, R0, R7, LSL R2      @ factor in the index number
-
-  blx   R1
-
-  subs  R9, R9, #1              @ decrement the way number
-  bge   Loop6
-  subs  R7, R7, #1              @ decrement the index
-  bge   Loop5
-Skip2:
-  add   R10, R10, #2            @ increment the cache number
-  cmp   R3, R10
-  bgt   Loop4
-
-Finished2:
-  dsb
-  ldmfd SP!, {r4-r12, lr}
-  bx    LR
-
 ASM_PFX(ArmDataMemoryBarrier):
   dmb
   bx      LR
diff --git a/ArmPkg/Library/ArmLib/ArmV7/ArmV7Support.asm 
b/ArmPkg/Library/ArmLib/ArmV7/ArmV7Support.asm
index 07ff1ae15a6a..f16dd4a4ab01 100644
--- a/ArmPkg/Library/ArmLib/ArmV7/ArmV7Support.asm
+++ b/ArmPkg/Library/ArmLib/ArmV7/ArmV7Support.asm
@@ -35,7 +35,6 @@
     EXPORT  ArmSetLowVectors
     EXPORT  ArmSetHighVectors
     EXPORT  ArmV7AllDataCachesOperation
-    EXPORT  ArmV7PerformPoUDataCacheOperation
     EXPORT  ArmDataMemoryBarrier
     EXPORT  ArmDataSynchronizationBarrier
     EXPORT  ArmInstructionSynchronizationBarrier
@@ -262,55 +261,6 @@ Finished
   ldmfd SP!, {r4-r12, lr}
   bx    LR
 
-ArmV7PerformPoUDataCacheOperation
-  stmfd SP!,{r4-r12, LR}
-  mov   R1, R0                ; Save Function call in R1
-  mrc   p15, 1, R6, c0, c0, 1 ; Read CLIDR
-  ands  R3, R6, #&38000000    ; Mask out all but Level of Unification (LoU)
-  mov   R3, R3, LSR #26       ; Cache level value (naturally aligned)
-  beq   Finished2
-  mov   R10, #0
-
-Loop4
-  add   R2, R10, R10, LSR #1    ; Work out 3xcachelevel
-  mov   R12, R6, LSR R2         ; bottom 3 bits are the Cache type for this 
level
-  and   R12, R12, #7            ; get those 3 bits alone
-  cmp   R12, #2
-  blt   Skip2                   ; no cache or only instruction cache at this 
level
-  mcr   p15, 2, R10, c0, c0, 0  ; write the Cache Size selection register 
(CSSELR) // OR in 1 for Instruction
-  isb                           ; isb to sync the change to the CacheSizeID reg
-  mrc   p15, 1, R12, c0, c0, 0  ; reads current Cache Size ID register (CCSIDR)
-  and   R2, R12, #&7            ; extract the line length field
-  add   R2, R2, #4              ; add 4 for the line length offset (log2 16 
bytes)
-  ldr   R4, =0x3FF
-  ands  R4, R4, R12, LSR #3     ; R4 is the max number on the way size (right 
aligned)
-  clz   R5, R4                  ; R5 is the bit position of the way size 
increment
-  ldr   R7, =0x00007FFF
-  ands  R7, R7, R12, LSR #13    ; R7 is the max number of the index size 
(right aligned)
-
-Loop5
-  mov   R9, R4                  ; R9 working copy of the max way size (right 
aligned)
-
-Loop6
-  orr   R0, R10, R9, LSL R5     ; factor in the way number and cache number 
into R11
-  orr   R0, R0, R7, LSL R2      ; factor in the index number
-
-  blx   R1
-
-  subs  R9, R9, #1              ; decrement the way number
-  bge   Loop6
-  subs  R7, R7, #1              ; decrement the index
-  bge   Loop5
-Skip2
-  add   R10, R10, #2            ; increment the cache number
-  cmp   R3, R10
-  bgt   Loop4
-
-Finished2
-  dsb
-  ldmfd SP!, {r4-r12, lr}
-  bx    LR
-
 ArmDataMemoryBarrier
   dmb
   bx      LR
-- 
1.9.1

_______________________________________________
edk2-devel mailing list
edk2-devel@lists.01.org
https://lists.01.org/mailman/listinfo/edk2-devel

Reply via email to