This patch adds system suspend/resume support, when linux kernel enters deep sleep mode, SoC will go into below mode:
- CA7 platform goes into STOP mode; - SoC goes into DSM mode; - DDR goes into self-refresh mode; - CPU0/SCU will be powered down. When wake up event arrives: - SoC DSM mdoe exits; - CA7 platform exit STOP mode, SCU/CPU0 power up; - Invalidate L1 cache; - DDR exit self-refresh mode; - Do secure monitor mode related initialization; - Jump to linux kernel resume entry. Belwo is the log of 1 iteration of system suspend/resume: [ 338.824862] PM: suspend entry (deep) [ 338.828853] PM: Syncing filesystems ... done. [ 338.834433] Freezing user space processes ... (elapsed 0.001 seconds) done. [ 338.842939] OOM killer disabled. [ 338.846182] Freezing remaining freezable tasks ... (elapsed 0.001 seconds) done. [ 338.869717] PM: suspend devices took 0.010 seconds [ 338.877846] Disabling non-boot CPUs ... [ 338.960301] Retrying again to check for CPU kill [ 338.964953] CPU1 killed. [ 338.968104] Enabling non-boot CPUs ... [ 338.973598] CPU1 is up [ 339.267155] mmc1: queuing unknown CIS tuple 0x80 (2 bytes) [ 339.275833] mmc1: queuing unknown CIS tuple 0x80 (7 bytes) [ 339.284158] mmc1: queuing unknown CIS tuple 0x80 (6 bytes) [ 339.385065] PM: resume devices took 0.400 seconds [ 339.389836] OOM killer enabled. [ 339.392986] Restarting tasks ... done. [ 339.398990] PM: suspend exit Signed-off-by: Anson Huang <anson.hu...@nxp.com> --- arch/arm/mach-imx/mx7/psci-mx7.c | 343 ++++++++++++++++++++++++++++++++++++++- arch/arm/mach-imx/mx7/psci.S | 200 +++++++++++++++++++++++ 2 files changed, 541 insertions(+), 2 deletions(-) diff --git a/arch/arm/mach-imx/mx7/psci-mx7.c b/arch/arm/mach-imx/mx7/psci-mx7.c index 6f69fd3..7786b4e 100644 --- a/arch/arm/mach-imx/mx7/psci-mx7.c +++ b/arch/arm/mach-imx/mx7/psci-mx7.c @@ -8,16 +8,68 @@ #include <asm/psci.h> #include <asm/secure.h> #include <asm/arch/imx-regs.h> +#include <asm/armv7.h> +#include <asm/gic.h> #include <common.h> #include <fsl_wdog.h> -#define GPC_CPU_PGC_SW_PDN_REQ 0xfc +#define GPC_LPCR_A7_BSC 0x0 +#define GPC_LPCR_A7_AD 0x4 +#define GPC_SLPCR 0x14 +#define GPC_PGC_ACK_SEL_A7 0x24 +#define GPC_IMR1_CORE0 0x30 +#define GPC_SLOT0_CFG 0xb0 #define GPC_CPU_PGC_SW_PUP_REQ 0xf0 +#define GPC_CPU_PGC_SW_PDN_REQ 0xfc +#define GPC_PGC_C0 0x800 #define GPC_PGC_C1 0x840 +#define GPC_PGC_SCU 0x880 + +#define BM_LPCR_A7_BSC_CPU_CLK_ON_LPM 0x4000 +#define BM_LPCR_A7_BSC_LPM1 0xc +#define BM_LPCR_A7_BSC_LPM0 0x3 +#define BP_LPCR_A7_BSC_LPM0 0 +#define BM_SLPCR_EN_DSM 0x80000000 +#define BM_SLPCR_RBC_EN 0x40000000 +#define BM_SLPCR_REG_BYPASS_COUNT 0x3f000000 +#define BM_SLPCR_VSTBY 0x4 +#define BM_SLPCR_SBYOS 0x2 +#define BM_SLPCR_BYPASS_PMIC_READY 0x1 +#define BM_LPCR_A7_AD_L2PGE 0x10000 +#define BM_LPCR_A7_AD_EN_C1_PUP 0x800 +#define BM_LPCR_A7_AD_EN_C0_PUP 0x200 +#define BM_LPCR_A7_AD_EN_PLAT_PDN 0x10 +#define BM_LPCR_A7_AD_EN_C1_PDN 0x8 +#define BM_LPCR_A7_AD_EN_C0_PDN 0x2 #define BM_CPU_PGC_SW_PDN_PUP_REQ_CORE1_A7 0x2 -/* below is for i.MX7D */ +#define BM_GPC_PGC_ACK_SEL_A7_PD_DUMMY_ACK 0x8000 +#define BM_GPC_PGC_ACK_SEL_A7_PU_DUMMY_ACK 0x80000000 + +#define MAX_SLOT_NUMBER 10 +#define A7_LPM_WAIT 0x5 +#define A7_LPM_STOP 0xa + +#define BM_SYS_COUNTER_CNTCR_FCR1 0x200 +#define BM_SYS_COUNTER_CNTCR_FCR0 0x100 + +#define REG_SET 0x4 +#define REG_CLR 0x8 + +#define ANADIG_ARM_PLL 0x60 +#define ANADIG_DDR_PLL 0x70 +#define ANADIG_SYS_PLL 0xb0 +#define ANADIG_ENET_PLL 0xe0 +#define ANADIG_AUDIO_PLL 0xf0 +#define ANADIG_VIDEO_PLL 0x130 +#define BM_ANATOP_ARM_PLL_OVERRIDE BIT(20) +#define BM_ANATOP_DDR_PLL_OVERRIDE BIT(19) +#define BM_ANATOP_SYS_PLL_OVERRIDE (0x1ff << 17) +#define BM_ANATOP_ENET_PLL_OVERRIDE BIT(13) +#define BM_ANATOP_AUDIO_PLL_OVERRIDE BIT(24) +#define BM_ANATOP_VIDEO_PLL_OVERRIDE BIT(24) + #define SRC_GPR1_MX7D 0x074 #define SRC_A7RCR0 0x004 #define SRC_A7RCR1 0x008 @@ -34,6 +86,172 @@ #define CCM_ROOT_WDOG 0xbb80 #define CCM_CCGR_WDOG1 0x49c0 +enum imx_gpc_slot { + CORE0_A7, + CORE1_A7, + SCU_A7, + FAST_MEGA_MIX, + MIPI_PHY, + PCIE_PHY, + USB_OTG1_PHY, + USB_OTG2_PHY, + USB_HSIC_PHY, + CORE0_M4, +}; + +enum mxc_cpu_pwr_mode { + RUN, + WAIT, + STOP, +}; + +inline void imx_pll_suspend(void) +{ + writel(BM_ANATOP_ARM_PLL_OVERRIDE, + ANATOP_BASE_ADDR + ANADIG_ARM_PLL + REG_SET); + writel(BM_ANATOP_DDR_PLL_OVERRIDE, + ANATOP_BASE_ADDR + ANADIG_DDR_PLL + REG_SET); + writel(BM_ANATOP_SYS_PLL_OVERRIDE, + ANATOP_BASE_ADDR + ANADIG_SYS_PLL + REG_SET); + writel(BM_ANATOP_ENET_PLL_OVERRIDE, + ANATOP_BASE_ADDR + ANADIG_ENET_PLL + REG_SET); + writel(BM_ANATOP_AUDIO_PLL_OVERRIDE, + ANATOP_BASE_ADDR + ANADIG_AUDIO_PLL + REG_SET); + writel(BM_ANATOP_VIDEO_PLL_OVERRIDE, + ANATOP_BASE_ADDR + ANADIG_VIDEO_PLL + REG_SET); +} + +inline void imx_pll_resume(void) +{ + writel(BM_ANATOP_ARM_PLL_OVERRIDE, + ANATOP_BASE_ADDR + ANADIG_ARM_PLL + REG_CLR); + writel(BM_ANATOP_DDR_PLL_OVERRIDE, + ANATOP_BASE_ADDR + ANADIG_DDR_PLL + REG_CLR); + writel(BM_ANATOP_SYS_PLL_OVERRIDE, + ANATOP_BASE_ADDR + ANADIG_SYS_PLL + REG_CLR); + writel(BM_ANATOP_ENET_PLL_OVERRIDE, + ANATOP_BASE_ADDR + ANADIG_ENET_PLL + REG_CLR); + writel(BM_ANATOP_AUDIO_PLL_OVERRIDE, + ANATOP_BASE_ADDR + ANADIG_AUDIO_PLL + REG_CLR); + writel(BM_ANATOP_VIDEO_PLL_OVERRIDE, + ANATOP_BASE_ADDR + ANADIG_VIDEO_PLL + REG_CLR); +} + +__secure void imx_gpcv2_set_lpm_mode(enum mxc_cpu_pwr_mode mode) +{ + u32 val1, val2, val3; + + val1 = readl(GPC_IPS_BASE_ADDR + GPC_LPCR_A7_BSC); + val2 = readl(GPC_IPS_BASE_ADDR + GPC_SLPCR); + + /* all cores' LPM settings must be same */ + val1 &= ~(BM_LPCR_A7_BSC_LPM0 | BM_LPCR_A7_BSC_LPM1); + val1 |= BM_LPCR_A7_BSC_CPU_CLK_ON_LPM; + + val2 &= ~(BM_SLPCR_EN_DSM | BM_SLPCR_VSTBY | BM_SLPCR_RBC_EN | + BM_SLPCR_SBYOS | BM_SLPCR_BYPASS_PMIC_READY); + /* + * GPC: When improper low-power sequence is used, + * the SoC enters low power mode before the ARM core executes WFI. + * + * Software workaround: + * 1) Software should trigger IRQ #32 (IOMUX) to be always pending + * by setting IOMUX_GPR1_IRQ. + * 2) Software should then unmask IRQ #32 in GPC before setting GPC + * Low-Power mode. + * 3) Software should mask IRQ #32 right after GPC Low-Power mode + * is set. + */ + switch (mode) { + case RUN: + val3 = readl(GPC_IPS_BASE_ADDR + GPC_IMR1_CORE0); + val3 &= ~0x1; + writel(val3, GPC_IPS_BASE_ADDR + GPC_IMR1_CORE0); + break; + case WAIT: + val1 |= A7_LPM_WAIT << BP_LPCR_A7_BSC_LPM0; + val1 &= ~BM_LPCR_A7_BSC_CPU_CLK_ON_LPM; + val3 = readl(GPC_IPS_BASE_ADDR + GPC_IMR1_CORE0); + val3 &= ~0x1; + writel(val3, GPC_IPS_BASE_ADDR + GPC_IMR1_CORE0); + break; + case STOP: + val1 |= A7_LPM_STOP << BP_LPCR_A7_BSC_LPM0; + val1 &= ~BM_LPCR_A7_BSC_CPU_CLK_ON_LPM; + val2 |= BM_SLPCR_EN_DSM; + val2 |= BM_SLPCR_SBYOS; + val2 |= BM_SLPCR_VSTBY; + val2 |= BM_SLPCR_BYPASS_PMIC_READY; + val3 = readl(GPC_IPS_BASE_ADDR + GPC_IMR1_CORE0); + val3 |= 0x1; + writel(val3, GPC_IPS_BASE_ADDR + GPC_IMR1_CORE0); + break; + default: + return; + } + writel(val1, GPC_IPS_BASE_ADDR + GPC_LPCR_A7_BSC); + writel(val2, GPC_IPS_BASE_ADDR + GPC_SLPCR); +} + +__secure void imx_gpcv2_set_plat_power_gate_by_lpm(bool pdn) +{ + u32 val = readl(GPC_IPS_BASE_ADDR + GPC_LPCR_A7_AD); + + val &= ~(BM_LPCR_A7_AD_EN_PLAT_PDN | BM_LPCR_A7_AD_L2PGE); + if (pdn) + val |= BM_LPCR_A7_AD_EN_PLAT_PDN | BM_LPCR_A7_AD_L2PGE; + + writel(val, GPC_IPS_BASE_ADDR + GPC_LPCR_A7_AD); +} + +__secure void imx_gpcv2_set_cpu_power_gate_by_lpm(u32 cpu, bool pdn) +{ + u32 val; + + val = readl(GPC_IPS_BASE_ADDR + GPC_LPCR_A7_AD); + if (cpu == 0) { + if (pdn) + val |= BM_LPCR_A7_AD_EN_C0_PDN | + BM_LPCR_A7_AD_EN_C0_PUP; + else + val &= ~(BM_LPCR_A7_AD_EN_C0_PDN | + BM_LPCR_A7_AD_EN_C0_PUP); + } + if (cpu == 1) { + if (pdn) + val |= BM_LPCR_A7_AD_EN_C1_PDN | + BM_LPCR_A7_AD_EN_C1_PUP; + else + val &= ~(BM_LPCR_A7_AD_EN_C1_PDN | + BM_LPCR_A7_AD_EN_C1_PUP); + } + writel(val, GPC_IPS_BASE_ADDR + GPC_LPCR_A7_AD); +} + +__secure void imx_gpcv2_set_slot_ack(u32 index, enum imx_gpc_slot m_core, + bool mode, bool ack) +{ + u32 val; + + if (index >= MAX_SLOT_NUMBER) + return; + + /* set slot */ + writel(readl(GPC_IPS_BASE_ADDR + GPC_SLOT0_CFG + index * 4) | + ((mode + 1) << (m_core * 2)), + GPC_IPS_BASE_ADDR + GPC_SLOT0_CFG + index * 4); + + if (ack) { + /* set ack */ + val = readl(GPC_IPS_BASE_ADDR + GPC_PGC_ACK_SEL_A7); + /* clear dummy ack */ + val &= ~(mode ? BM_GPC_PGC_ACK_SEL_A7_PU_DUMMY_ACK : + BM_GPC_PGC_ACK_SEL_A7_PD_DUMMY_ACK); + val |= 1 << (m_core + (mode ? 16 : 0)); + writel(val, GPC_IPS_BASE_ADDR + GPC_PGC_ACK_SEL_A7); + } +} + static inline void imx_gpcv2_set_m_core_pgc(bool enable, u32 offset) { writel(enable, GPC_IPS_BASE_ADDR + offset); @@ -129,3 +347,124 @@ __secure void imx_system_off(void) val |= BP_SNVS_LPCR_DP_EN | BP_SNVS_LPCR_TOP; writel(val, SNVS_BASE_ADDR + SNVS_LPCR); } + +__secure void imx_system_counter_resume(void) +{ + u32 val; + + val = readl(SYSCNT_CTRL_IPS_BASE_ADDR); + val &= ~BM_SYS_COUNTER_CNTCR_FCR1; + val |= BM_SYS_COUNTER_CNTCR_FCR0; + writel(val, SYSCNT_CTRL_IPS_BASE_ADDR); +} + +__secure void imx_system_counter_suspend(void) +{ + u32 val; + + val = readl(SYSCNT_CTRL_IPS_BASE_ADDR); + val &= ~BM_SYS_COUNTER_CNTCR_FCR0; + val |= BM_SYS_COUNTER_CNTCR_FCR1; + writel(val, SYSCNT_CTRL_IPS_BASE_ADDR); +} + +__secure void gic_resume(void) +{ + u32 itlinesnr, i; + u32 gic_dist_addr = GIC400_ARB_BASE_ADDR + GIC_DIST_OFFSET; + + /* enable the GIC distributor */ + writel(readl(gic_dist_addr + GICD_CTLR) | 0x03, + gic_dist_addr + GICD_CTLR); + + /* TYPER[4:0] contains an encoded number of available interrupts */ + itlinesnr = readl(gic_dist_addr + GICD_TYPER) & 0x1f; + + /* set all bits in the GIC group registers to one to allow access + * from non-secure state. The first 32 interrupts are private per + * CPU and will be set later when enabling the GIC for each core + */ + for (i = 1; i <= itlinesnr; i++) + writel((u32)-1, gic_dist_addr + GICD_IGROUPRn + 4 * i); +} + +__secure void imx_udelay(u32 usec) +{ + u32 freq; + u64 start, end; + + asm volatile("mrc p15, 0, %0, c14, c0, 0" : "=r" (freq)); + asm volatile("mrrc p15, 0, %Q0, %R0, c14" : "=r" (start)); + do { + asm volatile("mrrc p15, 0, %Q0, %R0, c14" : "=r" (end)); + if ((end - start) > usec * (freq / 1000000)) + break; + } while (1); +} + +__secure void imx_system_resume(void) +{ + unsigned int i, val, imr[4]; + + imx_pll_resume(); + imx_system_counter_resume(); + imx_gpcv2_set_lpm_mode(RUN); + imx_gpcv2_set_cpu_power_gate_by_lpm(0, false); + imx_gpcv2_set_plat_power_gate_by_lpm(false); + imx_gpcv2_set_m_core_pgc(false, GPC_PGC_C0); + imx_gpcv2_set_m_core_pgc(false, GPC_PGC_SCU); + + /* + * need to mask all interrupts in GPC before + * operating RBC configurations + */ + for (i = 0; i < 4; i++) { + imr[i] = readl(GPC_IPS_BASE_ADDR + GPC_IMR1_CORE0 + i * 4); + writel(~0, GPC_IPS_BASE_ADDR + GPC_IMR1_CORE0 + i * 4); + } + + /* configure RBC enable bit */ + val = readl(GPC_IPS_BASE_ADDR + GPC_SLPCR); + val &= ~BM_SLPCR_RBC_EN; + writel(val, GPC_IPS_BASE_ADDR + GPC_SLPCR); + + /* configure RBC count */ + val = readl(GPC_IPS_BASE_ADDR + GPC_SLPCR); + val &= ~BM_SLPCR_REG_BYPASS_COUNT; + writel(val, GPC_IPS_BASE_ADDR + GPC_SLPCR); + + /* + * need to delay at least 2 cycles of CKIL(32K) + * due to hardware design requirement, which is + * ~61us, here we use 65us for safe + */ + imx_udelay(65); + + /* restore GPC interrupt mask settings */ + for (i = 0; i < 4; i++) + writel(imr[i], GPC_IPS_BASE_ADDR + GPC_IMR1_CORE0 + i * 4); + + /* initialize gic distributor */ + gic_resume(); + _nonsec_init(); +} + +__secure void imx_system_suspend(void) +{ + /* overwrite PLL to be controlled by low power mode */ + imx_pll_suspend(); + imx_system_counter_suspend(); + /* set CA7 platform to enter STOP mode */ + imx_gpcv2_set_lpm_mode(STOP); + /* enable core0/scu power down/up with low power mode */ + imx_gpcv2_set_cpu_power_gate_by_lpm(0, true); + imx_gpcv2_set_plat_power_gate_by_lpm(true); + /* time slot settings for core0 and scu */ + imx_gpcv2_set_slot_ack(0, CORE0_A7, false, false); + imx_gpcv2_set_slot_ack(1, SCU_A7, false, true); + imx_gpcv2_set_slot_ack(5, SCU_A7, true, false); + imx_gpcv2_set_slot_ack(6, CORE0_A7, true, true); + imx_gpcv2_set_m_core_pgc(true, GPC_PGC_C0); + imx_gpcv2_set_m_core_pgc(true, GPC_PGC_SCU); + psci_v7_flush_dcache_all(); +} diff --git a/arch/arm/mach-imx/mx7/psci.S b/arch/arm/mach-imx/mx7/psci.S index d6d19d5..cbe6781 100644 --- a/arch/arm/mach-imx/mx7/psci.S +++ b/arch/arm/mach-imx/mx7/psci.S @@ -10,11 +10,108 @@ #include <asm/armv7.h> #include <asm/arch-armv7/generictimer.h> #include <asm/psci.h> +#include <asm/gic.h> + +#define DDRC_STAT 0x4 +#define DDRC_PWRCTL 0x30 +#define DDRC_PSTAT 0x3fc + +#define SRC_GPR1 0x74 +#define SRC_GPR2 0x78 .pushsection ._secure.text, "ax" .arch_extension sec +.global ddrc_enter_self_refresh +ddrc_enter_self_refresh: + /* let DDR out of self-refresh */ + ldr r1, =0x0 + str r1, [r0, #DDRC_PWRCTL] + + /* wait rw port_busy clear */ + ldr r2, =(0x1 << 16) + orr r2, r2, #0x1 +1: + ldr r1, [r0, #DDRC_PSTAT] + ands r1, r1, r2 + bne 1b + + /* enter self-refresh bit 5 */ + ldr r1, =(0x1 << 5) + str r1, [r0, #DDRC_PWRCTL] + + /* wait until self-refresh mode entered */ +2: + ldr r1, [r0, #DDRC_STAT] + and r1, r1, #0x3 + cmp r1, #0x3 + bne 2b +3: + ldr r1, [r0, #DDRC_STAT] + ands r1, r1, #0x20 + beq 3b + + /* disable dram clk */ + ldr r1, [r0, #DDRC_PWRCTL] + orr r1, r1, #(1 << 3) + str r1, [r0, #DDRC_PWRCTL] + + mov pc, lr + +.global ddrc_exit_self_refresh +ddrc_exit_self_refresh: + /* let DDR out of self-refresh */ + ldr r1, =0x0 + str r1, [r0, #DDRC_PWRCTL] + + /* wait until self-refresh mode entered */ +1: + ldr r1, [r0, #DDRC_STAT] + and r1, r1, #0x3 + cmp r1, #0x3 + beq 1b + + /* enable auto self-refresh */ + ldr r1, [r0, #DDRC_PWRCTL] + orr r1, r1, #(1 << 0) + str r1, [r0, #DDRC_PWRCTL] + + mov pc, lr + +.globl v7_invalidate_l1 +v7_invalidate_l1: + mov r0, #0 + mcr p15, 2, r0, c0, c0, 0 + mrc p15, 1, r0, c0, c0, 0 + + movw r1, #0x7fff + and r2, r1, r0, lsr #13 + + movw r1, #0x3ff + + and r3, r1, r0, lsr #3 @ NumWays - 1 + add r2, r2, #1 @ NumSets + + and r0, r0, #0x7 + add r0, r0, #4 @ SetShift + + clz r1, r3 @ WayShift + add r4, r3, #1 @ NumWays +1: sub r2, r2, #1 @ NumSets-- + mov r3, r4 @ Temp = NumWays +2: subs r3, r3, #1 @ Temp-- + mov r5, r3, lsl r1 + mov r6, r2, lsl r0 + orr r5, r5, r6 @ Reg = (Temp<<WayShift)|(NumSets<<SetShift) + mcr p15, 0, r5, c7, c6, 2 + bgt 2b + cmp r2, #0 + bgt 1b + dsb st + isb + mov pc, lr + .globl psci_cpu_on psci_cpu_on: push {r4, r5, lr} @@ -71,4 +168,107 @@ psci_affinity_info: out_affinity: pop {pc} +.globl psci_system_resume +psci_system_resume: + mov sp, r0 + + /* force DDR exit self-refresh */ + ldr r0, =DDRC_IPS_BASE_ADDR + bl ddrc_exit_self_refresh + + /* invalidate L1 I-cache first */ + mov r6, #0x0 + mcr p15, 0, r6, c7, c5, 0 + mcr p15, 0, r6, c7, c5, 6 + /* enable the Icache and branch prediction */ + mov r6, #0x1800 + mcr p15, 0, r6, c1, c0, 0 + isb + bl v7_invalidate_l1 + + mov r0, #0 + bl psci_get_target_pc @ target PC => r0 + push {r0} @ save cpu_resume since _nonsec_init will clean it + bl imx_system_resume + pop {r1} + /* save cpu0 entry r1 */ + mov r0, #0 + mov r2, #0 + bl psci_save + + ldr lr, =psci_cpu_entry + mov pc, lr + +.globl psci_system_suspend +psci_system_suspend: + /* save cpu0 entry r1 */ + mov r0, #0 + mov r2, #0 + bl psci_save + + bl imx_system_suspend + + ldr r0, =SRC_BASE_ADDR + ldr r1, =psci_system_resume + str r1, [r0, #SRC_GPR1] + str sp, [r0, #SRC_GPR2] + + /* disable GIC distributor */ + ldr r0, =GIC400_ARB_BASE_ADDR + ldr r1, =0x0 + ldr r2, =GIC_DIST_OFFSET + str r1, [r0, r2] + + /* force DDR into self-refresh */ + ldr r0, =DDRC_IPS_BASE_ADDR + bl ddrc_enter_self_refresh + + ldr r11, =GPC_IPS_BASE_ADDR + ldr r4, [r11, #0x30] + ldr r5, [r11, #0x34] + ldr r6, [r11, #0x38] + ldr r7, [r11, #0x3c] + + /* + * enable the RBC bypass counter here + * to hold off the interrupts. RBC counter + * = 8 (240us). With this setting, the latency + * from wakeup interrupt to ARM power up + * is ~250uS. + */ + ldr r8, [r11, #0x14] + bic r8, r8, #(0x3f << 24) + orr r8, r8, #(0x8 << 24) + str r8, [r11, #0x14] + + /* enable the counter. */ + ldr r8, [r11, #0x14] + orr r8, r8, #(0x1 << 30) + str r8, [r11, #0x14] + + /* unmask all the GPC interrupts. */ + str r4, [r11, #0x30] + str r5, [r11, #0x34] + str r6, [r11, #0x38] + str r7, [r11, #0x3c] + + /* + * now delay for a short while (3usec) + * ARM is at 1GHz at this point + * so a short loop should be enough. + * this delay is required to ensure that + * the RBC counter can start counting in + * case an interrupt is already pending + * or in case an interrupt arrives just + * as ARM is about to assert DSM_request. + */ + ldr r7, =2000 +rbc_loop: + subs r7, r7, #0x1 + bne rbc_loop + +dsm: + wfi + b dsm + .popsection -- 2.7.4 _______________________________________________ U-Boot mailing list U-Boot@lists.denx.de https://lists.denx.de/listinfo/u-boot