Update for v1.0alpha2

This commit is contained in:
James Deng 2024-03-01 19:54:35 +08:00
parent a82a7b11ed
commit 59ac4b4bb0
71 changed files with 6067 additions and 33 deletions

View file

@ -114,6 +114,7 @@ endif
CPP = $(CC) -E
AS = $(CC)
DTC = dtc
MKIMAGE = mkimage
ifneq ($(shell $(CC) --version 2>&1 | head -n 1 | grep clang),)
CC_IS_CLANG = y
@ -245,6 +246,9 @@ ifdef PLATFORM
libsbiutils-objs-path-y=$(foreach obj,$(libsbiutils-objs-y),$(platform_build_dir)/lib/utils/$(obj))
platform-objs-path-y=$(foreach obj,$(platform-objs-y),$(platform_build_dir)/$(obj))
firmware-bins-path-y=$(foreach bin,$(firmware-bins-y),$(platform_build_dir)/firmware/$(bin))
firmware-itb-path-y=$(foreach its,$(firmware-its-y),$(platform_build_dir)/firmware/$(basename $(notdir $(its))).itb)
platform_build_itb_dir=$(patsubst %/,%,$(dir $(firstword $(firmware-itb-path-y))))
platform_src_its_dir=$(patsubst %/,%,$(platform_src_dir)/$(dir $(firstword $(firmware-its-y))))
endif
firmware-elfs-path-y=$(firmware-bins-path-y:.bin=.elf)
firmware-objs-path-y=$(firmware-bins-path-y:.bin=.o)
@ -342,7 +346,7 @@ CFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls -mstrict-align
ifeq ($(CC_SUPPORT_SAVE_RESTORE),y)
CFLAGS += -mno-save-restore
endif
CFLAGS += -mabi=$(PLATFORM_RISCV_ABI) -march=$(PLATFORM_RISCV_ISA)
CFLAGS += -mabi=$(PLATFORM_RISCV_ABI) -march=$(PLATFORM_RISCV_ISA)_zicbom
CFLAGS += -mcmodel=$(PLATFORM_RISCV_CODE_MODEL)
CFLAGS += $(RELAX_FLAG)
CFLAGS += $(GENFLAGS)
@ -360,7 +364,7 @@ ASFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls -mstrict-align
ifeq ($(CC_SUPPORT_SAVE_RESTORE),y)
ASFLAGS += -mno-save-restore
endif
ASFLAGS += -mabi=$(PLATFORM_RISCV_ABI) -march=$(PLATFORM_RISCV_ISA)
ASFLAGS += -mabi=$(PLATFORM_RISCV_ABI) -march=$(PLATFORM_RISCV_ISA)_zicbom
ASFLAGS += -mcmodel=$(PLATFORM_RISCV_CODE_MODEL)
ASFLAGS += $(RELAX_FLAG)
ifneq ($(CC_IS_CLANG),y)
@ -468,12 +472,16 @@ compile_carray = $(CMD_PREFIX)mkdir -p `dirname $(1)`; \
compile_gen_dep = $(CMD_PREFIX)mkdir -p `dirname $(1)`; \
echo " GEN-DEP $(subst $(build_dir)/,,$(1))"; \
echo "$(1:.dep=$(2)): $(3)" >> $(1)
compile_itb = \
$(CMD_PREFIX)echo " ITB $(subst $(build_dir)/,,$(1))"; \
$(MKIMAGE) -f $(2) -r $(1)
targets-y = $(build_dir)/lib/libsbi.a
ifdef PLATFORM
targets-y += $(platform_build_dir)/lib/libplatsbi.a
endif
targets-y += $(firmware-bins-path-y)
targets-y += $(firmware-itb-path-y)
# The default "make all" rule
.PHONY: all
@ -579,6 +587,11 @@ $(platform_build_dir)/%.dep: $(src_dir)/%.S $(KCONFIG_CONFIG)
$(platform_build_dir)/%.o: $(src_dir)/%.S
$(call compile_as,$@,$<)
# Rules for fit image sources
$(platform_build_itb_dir)/%.itb: $(platform_src_its_dir)/%.its $(firmware-bins-path-y)
$(call copy_file,$(dir $@)/$(notdir $<),$<)
$(call compile_itb,$@,$(basename $@).its)
# Rule for "make docs"
$(build_dir)/docs/latex/refman.pdf: $(build_dir)/docs/latex/refman.tex
$(CMD_PREFIX)mkdir -p $(build_dir)/docs

View file

@ -96,6 +96,11 @@
PROVIDE(_bss_end = .);
}
/DISCARD/ : {
*(.eh_frame*)
*(.debug*)
}
/* End of the read-write data sections */
. = ALIGN(0x1000); /* Need this to create proper sections */

View file

@ -708,6 +708,8 @@
#define CSR_MVIPH 0x319
#define CSR_MIPH 0x354
#define CSR_TCMCFG 0x5DB
/* ===== Trap/Exception Causes ===== */
#define CAUSE_MISALIGNED_FETCH 0x0

View file

@ -42,6 +42,10 @@
#define SBI_EXT_BASE_GET_MARCHID 0x5
#define SBI_EXT_BASE_GET_MIMPID 0x6
#if defined(CONFIG_PLATFORM_SPACEMIT_K1PRO) || defined(CONFIG_PLATFORM_SPACEMIT_K1X)
#define SBI_EXT_BASE_FLUSH_CACHE_ALL 0x7
#endif
/* SBI function IDs for TIME extension*/
#define SBI_EXT_TIME_SET_TIMER 0x0

View file

@ -74,10 +74,16 @@ bool sbi_hsm_hart_change_state(struct sbi_scratch *scratch, long oldstate,
long newstate);
int __sbi_hsm_hart_get_state(u32 hartid);
int sbi_hsm_hart_get_state(const struct sbi_domain *dom, u32 hartid);
#ifdef CONFIG_ARM_PSCI_SUPPORT
int __sbi_hsm_hart_get_psci_state(u32 hartid);
int sbi_hsm_hart_get_psci_state(const struct sbi_domain *dom, u32 hartid);
#endif
int sbi_hsm_hart_interruptible_mask(const struct sbi_domain *dom,
ulong hbase, ulong *out_hmask);
void __sbi_hsm_suspend_non_ret_save(struct sbi_scratch *scratch);
void __noreturn sbi_hsm_hart_start_finish(struct sbi_scratch *scratch,
u32 hartid);
u32 hartid, bool cool_boot);
#endif

192
include/sbi_utils/cache/cacheflush.h vendored Normal file
View file

@ -0,0 +1,192 @@
#ifndef __CACHE_FLUSH__H__
#define __CACHE_FLUSH__H__
#include <sbi/sbi_types.h>
#include <sbi/riscv_io.h>
#include <sbi/riscv_asm.h>
#include <sbi/riscv_encoding.h>
#include <sbi_utils/psci/psci.h>
#include <spacemit/spacemit_config.h>
#define __ALWAYS_STATIC_INLINE __attribute__((always_inline)) static inline
/**
\brief Clear Dcache by addr
\details Clear Dcache by addr.
\param [in] addr operate addr
*/
__ALWAYS_STATIC_INLINE void __DCACHE_CPA(uintptr_t addr)
{
uintptr_t __v = addr;
asm volatile ("cbo.clean" " 0(%0)" : : "rK"(__v) : "memory");
}
/**
\brief Invalid Dcache by addr
\details Invalid Dcache by addr.
\param [in] addr operate addr
*/
__ALWAYS_STATIC_INLINE void __DCACHE_IPA(uintptr_t addr)
{
uintptr_t __v = addr;
asm volatile ("cbo.inval" " 0(%0)" : : "rK"(__v) : "memory");
}
/**
\brief Clear & Invalid Dcache by addr
\details Clear & Invalid Dcache by addr.
\param [in] addr operate addr
*/
__ALWAYS_STATIC_INLINE void __DCACHE_CIPA(uintptr_t addr)
{
uintptr_t __v = addr;
asm volatile ("cbo.flush" " 0(%0)" : : "rK"(__v) : "memory");
}
/**
\brief Get MSTATUS
\details Returns the content of the MSTATUS Register.
\return MSTATUS Register value
*/
__ALWAYS_STATIC_INLINE uintptr_t __get_CurrentSP(void)
{
uintptr_t result;
asm volatile("move %0, sp" : "=r"(result));
return (result);
}
__ALWAYS_STATIC_INLINE uintptr_t __get_Supervisor_isr(void)
{
uintptr_t result;
asm volatile("csrr %0, mip" : "=r"(result));
return (result & 0x222);
}
/**
\brief D-Cache Clean by address
\details Cleans D-Cache for the given address
\param[in] addr address (aligned to 32-byte boundary)
\param[in] dsize size of memory block (in number of bytes)
*/
static inline void csi_dcache_clean_range (uintptr_t addr, unsigned int dsize)
{
int op_size = dsize + addr % CACHE_LINE_SIZE;
uintptr_t op_addr = addr & CACHE_INV_ADDR_Msk;
asm volatile("fence rw, rw");
while (op_size > 0) {
__DCACHE_CPA(op_addr);
op_addr += CACHE_LINE_SIZE;
op_size -= CACHE_LINE_SIZE;
}
asm volatile("fence rw, rw");
asm volatile("fence.i");
}
/**
\brief D-Cache Clean and Invalidate by address
\details Cleans and invalidates D_Cache for the given address
\param[in] addr address (aligned to 32-byte boundary)
\param[in] dsize size of memory block (aligned to 16-byte boundary)
*/
static inline void csi_dcache_clean_invalid_range (uintptr_t addr, unsigned int dsize)
{
int op_size = dsize + addr % CACHE_LINE_SIZE;
uintptr_t op_addr = addr & CACHE_INV_ADDR_Msk;
asm volatile("fence rw, rw");
while (op_size > 0) {
__DCACHE_CIPA(op_addr);
op_addr += CACHE_LINE_SIZE;
op_size -= CACHE_LINE_SIZE;
}
asm volatile("fence rw, rw");
asm volatile("fence.i");
}
/**
\brief D-Cache Invalidate by address
\details Invalidates D-Cache for the given address
\param[in] addr address (aligned to 32-byte boundary)
\param[in] dsize size of memory block (in number of bytes)
*/
static inline void csi_dcache_invalid_range (uintptr_t addr, unsigned int dsize)
{
int op_size = dsize + addr % CACHE_LINE_SIZE;
uintptr_t op_addr = addr & CACHE_INV_ADDR_Msk;
asm volatile("fence rw, rw");
while (op_size > 0) {
__DCACHE_IPA(op_addr);
op_addr += CACHE_LINE_SIZE;
op_size -= CACHE_LINE_SIZE;
}
asm volatile("fence rw, rw");
asm volatile("fence.i");
}
static inline void csi_enable_dcache(void)
{
csr_set(CSR_MSETUP, 0x10073);
}
static inline void csi_disable_data_preftch(void)
{
csr_clear(CSR_MSETUP, 32);
}
static inline void csi_disable_dcache(void)
{
csr_clear(CSR_MSETUP, 1);
}
static inline void csi_flush_dcache_all(void)
{
asm volatile ("csrwi 0x7c2, 0x3");
}
static inline void csi_invalidate_dcache_all(void)
{
asm volatile ("csrwi 0x7c2, 0x2");
}
static inline void __mdelay(void)
{
unsigned long long i;
for (i = 0; i < 0xffffffff; ++i)
cpu_relax();
}
static inline void csi_flush_l2_cache(void)
{
unsigned int hartid = current_hartid();
uintptr_t *cr =(MPIDR_AFFLVL1_VAL(hartid) == 0) ? (uintptr_t *)CLUSTER0_L2_CACHE_FLUSH_REG_BASE :
(uintptr_t *)CLUSTER1_L2_CACHE_FLUSH_REG_BASE;
/* flush l2 cache */
writel(readl(cr) | (1 << L2_CACHE_FLUSH_REQUEST_BIT_OFFSET), cr);
/* k1pro */
if (L2_CACHE_FLUSH_REQUEST_BIT_OFFSET == L2_CACHE_FLUSH_DONE_BIT_OFFSET)
while (readl(cr) & (1 << L2_CACHE_FLUSH_DONE_BIT_OFFSET));
else /* k1x */ {
/* clear the request */
while (1) {
if ((readl(cr) & (1 << L2_CACHE_FLUSH_DONE_BIT_OFFSET)) == 0)
break;
__mdelay();
}
writel(readl(cr) & ~(1 << L2_CACHE_FLUSH_REQUEST_BIT_OFFSET), cr);
}
}
#endif

View file

@ -0,0 +1,27 @@
/*
* Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef __CCI_H__
#define __CCI_H__
/* Function declarations */
/*
* The ARM CCI driver needs the following:
* 1. Base address of the CCI product
* 2. An array of map between AMBA 4 master ids and ACE/ACE lite slave
* interfaces.
* 3. Size of the array.
*
* SLAVE_IF_UNUSED should be used in the map to represent no AMBA 4 master exists
* for that interface.
*/
void cci_init(uintptr_t base, const int *map, unsigned int num_cci_masters);
void cci_enable_snoop_dvm_reqs(unsigned int master_id);
void cci_disable_snoop_dvm_reqs(unsigned int master_id);
#endif /* CCI_H */

View file

@ -28,6 +28,8 @@ void fdt_plic_context_save(bool smode, u32 *enable, u32 *threshold, u32 num);
void fdt_plic_context_restore(bool smode, const u32 *enable, u32 threshold,
u32 num);
void fdt_plic_context_exit(void);
void thead_plic_restore(void);
#endif

View file

@ -0,0 +1,14 @@
/*
* Copyright (c) 2014-2020, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef CSS_MHU_DOORBELL_H
#define CSS_MHU_DOORBELL_H
#include <sbi_utils/psci/drivers/arm/css/scmi.h>
void mhu_ring_doorbell(struct scmi_channel_plat_info *plat_info);
#endif /* CSS_MHU_DOORBELL_H */

View file

@ -0,0 +1,10 @@
#ifndef __CSS_SCP_H__
#define __CSS_SCP_H__
#include <sbi_utils/psci/psci.h>
void css_scp_off(const struct psci_power_state *target_state);
void css_scp_on(u_register_t mpidr);
void css_scp_suspend(const struct psci_power_state *target_state);
#endif

View file

@ -0,0 +1,141 @@
#ifndef __DRIVER_SCMI_H__
#define __DRIVER_SCMI_H__
#include <sbi_utils/psci/psci.h>
#include <sbi/riscv_locks.h>
#include <sbi/sbi_types.h>
#define GET_SCMI_MAJOR_VER(ver) (((ver) >> 16) & 0xffff)
#define GET_SCMI_MINOR_VER(ver) ((ver) & 0xffff)
#define MAKE_SCMI_VERSION(maj, min) \
((((maj) & 0xffff) << 16) | ((min) & 0xffff))
/* Supported SCMI Protocol Versions */
#define SCMI_AP_CORE_PROTO_VER MAKE_SCMI_VERSION(1, 0)
#define SCMI_PWR_DMN_PROTO_VER MAKE_SCMI_VERSION(2, 0)
#define SCMI_SYS_PWR_PROTO_VER MAKE_SCMI_VERSION(1, 0)
/*
* Check that the driver's version is same or higher than the reported SCMI
* version. We accept lower major version numbers, as all affected protocols
* so far stay backwards compatible. This might need to be revisited in the
* future.
*/
#define is_scmi_version_compatible(drv, scmi) \
((GET_SCMI_MAJOR_VER(drv) > GET_SCMI_MAJOR_VER(scmi)) || \
((GET_SCMI_MAJOR_VER(drv) == GET_SCMI_MAJOR_VER(scmi)) && \
(GET_SCMI_MINOR_VER(drv) <= GET_SCMI_MINOR_VER(scmi))))
/* Mandatory messages IDs for all SCMI protocols */
#define SCMI_PROTO_VERSION_MSG 0x0
#define SCMI_PROTO_ATTR_MSG 0x1
#define SCMI_PROTO_MSG_ATTR_MSG 0x2
/* SCMI power domain management protocol message IDs */
#define SCMI_PWR_STATE_SET_MSG 0x4
#define SCMI_PWR_STATE_GET_MSG 0x5
/* SCMI system power management protocol message IDs */
#define SCMI_SYS_PWR_STATE_SET_MSG 0x3
#define SCMI_SYS_PWR_STATE_GET_MSG 0x4
/* SCMI Protocol identifiers */
#define SCMI_PWR_DMN_PROTO_ID 0x11
#define SCMI_SYS_PWR_PROTO_ID 0x12
/*
* Macros to describe the bit-fields of the `attribute` of system power domain
* protocol PROTOCOL_MSG_ATTRIBUTE message.
*/
#define SYS_PWR_ATTR_WARM_RESET_SHIFT 31
#define SCMI_SYS_PWR_WARM_RESET_SUPPORTED (1U << SYS_PWR_ATTR_WARM_RESET_SHIFT)
#define SYS_PWR_ATTR_SUSPEND_SHIFT 30
#define SCMI_SYS_PWR_SUSPEND_SUPPORTED (1 << SYS_PWR_ATTR_SUSPEND_SHIFT)
/*
* Macros to describe the bit-fields of the `flags` parameter of system power
* domain protocol SYSTEM_POWER_STATE_SET message.
*/
#define SYS_PWR_SET_GRACEFUL_REQ_SHIFT 0
#define SCMI_SYS_PWR_GRACEFUL_REQ (1 << SYS_PWR_SET_GRACEFUL_REQ_SHIFT)
#define SCMI_SYS_PWR_FORCEFUL_REQ (0 << SYS_PWR_SET_GRACEFUL_REQ_SHIFT)
/*
* Macros to describe the `system_state` parameter of system power
* domain protocol SYSTEM_POWER_STATE_SET message.
*/
#define SCMI_SYS_PWR_SHUTDOWN 0x0
#define SCMI_SYS_PWR_COLD_RESET 0x1
#define SCMI_SYS_PWR_WARM_RESET 0x2
#define SCMI_SYS_PWR_POWER_UP 0x3
#define SCMI_SYS_PWR_SUSPEND 0x4
/* SCMI Error code definitions */
#define SCMI_E_QUEUED 1
#define SCMI_E_SUCCESS 0
#define SCMI_E_NOT_SUPPORTED -1
#define SCMI_E_INVALID_PARAM -2
#define SCMI_E_DENIED -3
#define SCMI_E_NOT_FOUND -4
#define SCMI_E_OUT_OF_RANGE -5
#define SCMI_E_BUSY -6
/*
* SCMI driver platform information. The details of the doorbell mechanism
* can be found in the SCMI specification.
*/
typedef struct scmi_channel_plat_info {
/* SCMI mailbox memory */
uintptr_t scmi_mbx_mem;
/* The door bell register address */
uintptr_t db_reg_addr;
/* The bit mask that need to be preserved when ringing doorbell */
uint32_t db_preserve_mask;
/* The bit mask that need to be set to ring doorbell */
uint32_t db_modify_mask;
/* The handler for ringing doorbell */
void (*ring_doorbell)(struct scmi_channel_plat_info *plat_info);
/* cookie is unused now. But added for future enhancements. */
void *cookie;
} scmi_channel_plat_info_t;
typedef spinlock_t scmi_lock_t;
/*
* Structure to represent an SCMI channel.
*/
typedef struct scmi_channel {
scmi_channel_plat_info_t *info;
/* The lock for channel access */
scmi_lock_t *lock;
/* Indicate whether the channel is initialized */
int is_initialized;
} scmi_channel_t;
/* External Common API */
void *scmi_init(scmi_channel_t *ch);
/* API to override default PSCI callbacks for platforms that support SCMI. */
const plat_psci_ops_t *css_scmi_override_pm_ops(plat_psci_ops_t *ops);
/*
* Power domain protocol commands. Refer to the SCMI specification for more
* details on these commands.
*/
int scmi_pwr_state_set(void *p, uint32_t domain_id, uint32_t scmi_pwr_state);
int scmi_pwr_state_get(void *p, uint32_t domain_id, uint32_t *scmi_pwr_state);
int scmi_proto_version(void *p, uint32_t proto_id, uint32_t *version);
int scmi_proto_msg_attr(void *p, uint32_t proto_id, uint32_t command_id,
uint32_t *attr);
scmi_channel_plat_info_t *plat_css_get_scmi_info(unsigned int channel_id);
/*
* System power management protocol commands. Refer SCMI specification for more
* details on these commands.
*/
int scmi_sys_pwr_state_set(void *p, uint32_t flags, uint32_t system_state);
int scmi_sys_pwr_state_get(void *p, uint32_t *system_state);
#endif

View file

@ -0,0 +1,146 @@
#ifndef __SCMI_PRIVATE_H__
#define __SCMI_PRIVATE_H__
#include <sbi/sbi_types.h>
#include <sbi/sbi_console.h>
#include <sbi/sbi_hart.h>
#include <sbi_utils/psci/drivers/arm/css/scmi.h>
/*
* SCMI power domain management protocol message and response lengths. It is
* calculated as sum of length in bytes of the message header (4) and payload
* area (the number of bytes of parameters or return values in the payload).
*/
#define SCMI_PROTO_VERSION_MSG_LEN 4
#define SCMI_PROTO_VERSION_RESP_LEN 12
#define SCMI_PROTO_MSG_ATTR_MSG_LEN 8
#define SCMI_PROTO_MSG_ATTR_RESP_LEN 12
#define SCMI_PWR_STATE_GET_MSG_LEN 8
#define SCMI_PWR_STATE_GET_RESP_LEN 12
/* SCMI power domain protocol `POWER_STATE_SET` message flags */
#define SCMI_PWR_STATE_SET_FLAG_SYNC 0
#define SCMI_PWR_STATE_SET_FLAG_ASYNC 1
/* SCMI message header format bit field */
#define SCMI_MSG_ID_SHIFT 0
#define SCMI_MSG_ID_WIDTH 8
#define SCMI_MSG_ID_MASK ((1 << SCMI_MSG_ID_WIDTH) - 1)
#define SCMI_MSG_PROTO_ID_SHIFT 10
#define SCMI_MSG_PROTO_ID_WIDTH 8
#define SCMI_MSG_PROTO_ID_MASK ((1 << SCMI_MSG_PROTO_ID_WIDTH) - 1)
#define SCMI_MSG_TOKEN_SHIFT 18
#define SCMI_MSG_TOKEN_WIDTH 10
#define SCMI_MSG_TOKEN_MASK ((1 << SCMI_MSG_TOKEN_WIDTH) - 1)
#define SCMI_PWR_STATE_SET_MSG_LEN 16
#define SCMI_PWR_STATE_SET_RESP_LEN 8
#define SCMI_SYS_PWR_STATE_SET_MSG_LEN 12
#define SCMI_SYS_PWR_STATE_SET_RESP_LEN 8
#define SCMI_SYS_PWR_STATE_GET_MSG_LEN 4
#define SCMI_SYS_PWR_STATE_GET_RESP_LEN 12
/* SCMI mailbox flags */
#define SCMI_FLAG_RESP_POLL 0
#define SCMI_FLAG_RESP_INT 1
/* Helper macros to copy arguments to the mailbox payload */
#define SCMI_PAYLOAD_ARG1(payld_arr, arg1) \
*((uint32_t *)&payld_arr[0]) = arg1
#define SCMI_PAYLOAD_ARG2(payld_arr, arg1, arg2) do { \
SCMI_PAYLOAD_ARG1(payld_arr, arg1); \
*((uint32_t *)&payld_arr[1]) = arg2; \
} while (0)
#define SCMI_PAYLOAD_ARG3(payld_arr, arg1, arg2, arg3) do { \
SCMI_PAYLOAD_ARG2(payld_arr, arg1, arg2); \
*((uint32_t *)&payld_arr[2]) = arg3; \
} while (0)
/* Helper macros to read return values from the mailbox payload */
#define SCMI_PAYLOAD_RET_VAL1(payld_arr, val1) \
(val1) = *((uint32_t *)&payld_arr[0])
#define SCMI_PAYLOAD_RET_VAL2(payld_arr, val1, val2) do { \
SCMI_PAYLOAD_RET_VAL1(payld_arr, val1); \
(val2) = *((uint32_t *)&payld_arr[1]); \
} while (0)
#define SCMI_PAYLOAD_RET_VAL3(payld_arr, val1, val2, val3) do { \
SCMI_PAYLOAD_RET_VAL2(payld_arr, val1, val2); \
(val3) = *((uint32_t *)&payld_arr[2]); \
} while (0)
#define SCMI_PAYLOAD_RET_VAL4(payld_arr, val1, val2, val3, val4) do { \
SCMI_PAYLOAD_RET_VAL3(payld_arr, val1, val2, val3); \
(val4) = *((uint32_t *)&payld_arr[3]); \
} while (0)
/* Helper macro to get the token from a SCMI message header */
#define SCMI_MSG_GET_TOKEN(_msg) \
(((_msg) >> SCMI_MSG_TOKEN_SHIFT) & SCMI_MSG_TOKEN_MASK)
/* SCMI Channel Status bit fields */
#define SCMI_CH_STATUS_RES0_MASK 0xFFFFFFFE
#define SCMI_CH_STATUS_FREE_SHIFT 0
#define SCMI_CH_STATUS_FREE_WIDTH 1
#define SCMI_CH_STATUS_FREE_MASK ((1 << SCMI_CH_STATUS_FREE_WIDTH) - 1)
/* Helper macros to check and write the channel status */
#define SCMI_IS_CHANNEL_FREE(status) \
(!!(((status) >> SCMI_CH_STATUS_FREE_SHIFT) & SCMI_CH_STATUS_FREE_MASK))
#define SCMI_MARK_CHANNEL_BUSY(status) do { \
if (!SCMI_IS_CHANNEL_FREE(status)) \
sbi_hart_hang(); \
(status) &= ~(SCMI_CH_STATUS_FREE_MASK << \
SCMI_CH_STATUS_FREE_SHIFT); \
} while (0)
/*
* Helper macro to create an SCMI message header given protocol, message id
* and token.
*/
#define SCMI_MSG_CREATE(_protocol, _msg_id, _token) \
((((_protocol) & SCMI_MSG_PROTO_ID_MASK) << SCMI_MSG_PROTO_ID_SHIFT) | \
(((_msg_id) & SCMI_MSG_ID_MASK) << SCMI_MSG_ID_SHIFT) | \
(((_token) & SCMI_MSG_TOKEN_MASK) << SCMI_MSG_TOKEN_SHIFT))
#define MAILBOX_MEM_PAYLOAD_SIZE (0x80)
#define MAILBOX_SECURE_PSCI_CHANNEL (0x1)
/*
* Private data structure for representing the mailbox memory layout. Refer
* the SCMI specification for more details.
*/
typedef struct mailbox_mem {
uint32_t res_a; /* Reserved */
volatile uint32_t status;
uint64_t res_b; /* Reserved */
uint32_t flags;
volatile uint32_t len;
volatile uint32_t msg_header;
uint32_t payload[];
} mailbox_mem_t;
static inline void validate_scmi_channel(scmi_channel_t *ch)
{
if (!ch || !ch->is_initialized)
sbi_hart_hang();
if (!ch->info || !ch->info->scmi_mbx_mem)
sbi_hart_hang();
}
void scmi_send_sync_command(scmi_channel_t *ch);
void scmi_get_channel(scmi_channel_t *ch);
void scmi_put_channel(scmi_channel_t *ch);
#endif

View file

@ -0,0 +1,10 @@
#ifndef __PLATFORM_DEFINE_H__
#define __PLATFORM_DEFINE_H__
/* System power domain level */
#define CSS_SYSTEM_PWR_DMN_LVL ARM_PWR_LVL2
/* Number of SCMI channels on the platform */
#define PLAT_ARM_SCMI_CHANNEL_COUNT 1U
#endif

View file

@ -0,0 +1,19 @@
#ifndef __ARM_DEF_H__
#define __ARM_DEF_H__
#define MPIDR_AFFLVL0 0ULL
#define MPIDR_AFFLVL1 1ULL
#define MPIDR_AFFLVL2 2ULL
#define MPIDR_AFFLVL3 3ULL
/*
* Macros mapping the MPIDR Affinity levels to ARM Platform Power levels. The
* power levels have a 1:1 mapping with the MPIDR affinity levels.
*/
#define ARM_PWR_LVL0 MPIDR_AFFLVL0
#define ARM_PWR_LVL1 MPIDR_AFFLVL1
#define ARM_PWR_LVL2 MPIDR_AFFLVL2
#define ARM_PWR_LVL3 MPIDR_AFFLVL3
#endif

View file

@ -0,0 +1,21 @@
#ifndef __PLAT_ARM_H__
#define __PLAT_ARM_H__
#include <sbi_utils/psci/psci.h>
#include <sbi/riscv_locks.h>
#include <sbi_utils/psci/plat/arm/common/plat_arm.h>
#define ARM_SCMI_INSTANTIATE_LOCK spinlock_t arm_scmi_lock
#define ARM_SCMI_LOCK_GET_INSTANCE (&arm_scmi_lock)
extern plat_psci_ops_t plat_arm_psci_pm_ops;
const plat_psci_ops_t *plat_arm_psci_override_pm_ops(plat_psci_ops_t *ops);
void plat_arm_pwrc_setup(void);
int arm_validate_power_state(unsigned int power_state,
psci_power_state_t *req_state);
#endif

View file

@ -0,0 +1,36 @@
#ifndef __CSS_ARM_H__
#define __CSS_ARM_H__
#include <sbi/sbi_types.h>
#include <sbi_utils/psci/psci.h>
#include <sbi_utils/psci/plat/arm/common/arm_def.h>
#include <sbi_utils/psci/plat/arm/board/spacemit/include/platform_def.h>
#include <spacemit/spacemit_config.h>
#define SCMI_DOMAIN_ID_MASK 0xFFFFU
#define SCMI_CHANNEL_ID_MASK 0xFFFFU
#define SCMI_CHANNEL_ID_SHIFT 16U
#define SET_SCMI_CHANNEL_ID(n) (((n) & SCMI_CHANNEL_ID_MASK) << \
SCMI_CHANNEL_ID_SHIFT)
#define SET_SCMI_DOMAIN_ID(n) ((n) & SCMI_DOMAIN_ID_MASK)
#define GET_SCMI_CHANNEL_ID(n) (((n) >> SCMI_CHANNEL_ID_SHIFT) & \
SCMI_CHANNEL_ID_MASK)
#define GET_SCMI_DOMAIN_ID(n) ((n) & SCMI_DOMAIN_ID_MASK)
/* Macros to read the CSS power domain state */
#define CSS_CORE_PWR_STATE(state) (state)->pwr_domain_state[ARM_PWR_LVL0]
#define CSS_CLUSTER_PWR_STATE(state) (state)->pwr_domain_state[ARM_PWR_LVL1]
static inline unsigned int css_system_pwr_state(const psci_power_state_t *state)
{
#if (PLAT_MAX_PWR_LVL == CSS_SYSTEM_PWR_DMN_LVL)
return state->pwr_domain_state[CSS_SYSTEM_PWR_DMN_LVL];
#else
return 0;
#endif
}
extern uint32_t plat_css_core_pos_to_scmi_dmn_id_map[PLATFORM_CLUSTER_COUNT][PLATFORM_CORE_COUNT];
#endif

View file

@ -0,0 +1,13 @@
#ifndef __PSCI_PLAT_COMMON_H__
#define __PSCI_PLAT_COMMON_H__
#include <sbi/sbi_types.h>
#include <sbi_utils/psci/psci.h>
unsigned char *plat_get_power_domain_tree_desc(void);
int plat_setup_psci_ops(uintptr_t sec_entrypoint,
const struct plat_psci_ops **psci_ops);
int plat_core_pos_by_mpidr(u_register_t mpidr);
#endif

View file

@ -0,0 +1,223 @@
#ifndef __PSCI_H__
#define __PSCI_H__
#include <sbi/sbi_types.h>
#include <spacemit/spacemit_config.h>
#define MPIDR_AFFLVL0_VAL(mpidr) \
(((mpidr) >> MPIDR_AFF0_SHIFT) & MPIDR_AFFINITY0_MASK)
#define MPIDR_AFFLVL1_VAL(mpidr) \
(((mpidr) >> MPIDR_AFF1_SHIFT) & MPIDR_AFFINITY1_MASK)
/*
* Macros for local power states in ARM platforms encoded by State-ID field
* within the power-state parameter.
*/
/* Local power state for power domains in Run state. */
#define ARM_LOCAL_STATE_RUN 0U
/* Local power state for retention. Valid only for CPU power domains */
#define ARM_LOCAL_STATE_RET 1U
/* Local power state for OFF/power-down. Valid for CPU and cluster power
domains */
#define ARM_LOCAL_STATE_OFF 2U
/*
* This macro defines the deepest retention state possible. A higher state
* id will represent an invalid or a power down state.
*/
#define PLAT_MAX_RET_STATE ARM_LOCAL_STATE_RET
/*
* This macro defines the deepest power down states possible. Any state ID
* higher than this is invalid.
*/
#define PLAT_MAX_OFF_STATE ARM_LOCAL_STATE_OFF
/*
* Type for representing the local power state at a particular level.
*/
typedef unsigned char plat_local_state_t;
/* The local state macro used to represent RUN state. */
#define PSCI_LOCAL_STATE_RUN 0U
typedef unsigned long u_register_t;
/*******************************************************************************
* PSCI error codes
******************************************************************************/
#define PSCI_E_SUCCESS 0
#define PSCI_E_NOT_SUPPORTED -1
#define PSCI_E_INVALID_PARAMS -2
#define PSCI_E_DENIED -3
#define PSCI_E_ALREADY_ON -4
#define PSCI_E_ON_PENDING -5
#define PSCI_E_INTERN_FAIL -6
#define PSCI_E_NOT_PRESENT -7
#define PSCI_E_DISABLED -8
#define PSCI_E_INVALID_ADDRESS -9
#define PSCI_INVALID_MPIDR ~((u_register_t)0)
/*
* These are the states reported by the PSCI_AFFINITY_INFO API for the specified
* CPU. The definitions of these states can be found in Section 5.7.1 in the
* PSCI specification (ARM DEN 0022C).
*/
typedef enum {
AFF_STATE_ON = 0U,
AFF_STATE_OFF = 1U,
AFF_STATE_ON_PENDING = 2U
} aff_info_state_t;
/*******************************************************************************
* Structure used to store per-cpu information relevant to the PSCI service.
* It is populated in the per-cpu data array. In return we get a guarantee that
* this information will not reside on a cache line shared with another cpu.
******************************************************************************/
typedef struct psci_cpu_data {
/* State as seen by PSCI Affinity Info API */
aff_info_state_t aff_info_state;
/*
* Highest power level which takes part in a power management
* operation.
*/
unsigned int target_pwrlvl;
/* The local power state of this CPU */
plat_local_state_t local_state;
} psci_cpu_data_t;
/*
* Macro to represent invalid affinity level within PSCI.
*/
#define PSCI_INVALID_PWR_LVL (PLAT_MAX_PWR_LVL + 1U)
/*
* These are the power states reported by PSCI_NODE_HW_STATE API for the
* specified CPU. The definitions of these states can be found in Section 5.15.3
* of PSCI specification (ARM DEN 0022C).
*/
#define HW_ON 0
#define HW_OFF 1
#define HW_STANDBY 2
#define PSTATE_ID_SHIFT (0U)
#define PSTATE_VALID_MASK (0xFCFE0000U)
#define PSTATE_TYPE_SHIFT (16U)
#define PSTATE_PWR_LVL_SHIFT (24U)
#define PSTATE_ID_MASK (0xffffU)
#define PSTATE_PWR_LVL_MASK (0x3U)
#define psci_get_pstate_pwrlvl(pstate) (((pstate) >> PSTATE_PWR_LVL_SHIFT) & \
PSTATE_PWR_LVL_MASK)
#define psci_make_powerstate(state_id, type, pwrlvl) \
(((state_id) & PSTATE_ID_MASK) << PSTATE_ID_SHIFT) |\
(((type) & PSTATE_TYPE_MASK) << PSTATE_TYPE_SHIFT) |\
(((pwrlvl) & PSTATE_PWR_LVL_MASK) << PSTATE_PWR_LVL_SHIFT)
#define PSTATE_TYPE_STANDBY (0x0U)
#define PSTATE_TYPE_POWERDOWN (0x1U)
#define PSTATE_TYPE_MASK (0x1U)
/* RISCV suspend power state */
#define RSTATE_TYPE_SHIFT (31U)
#define RSTATE_PWR_LVL_SHIFT (24U)
#define RSTATE_COMMON_SHIFT (28U)
/*****************************************************************************
* This data structure defines the representation of the power state parameter
* for its exchange between the generic PSCI code and the platform port. For
* example, it is used by the platform port to specify the requested power
* states during a power management operation. It is used by the generic code to
* inform the platform about the target power states that each level should
* enter.
****************************************************************************/
typedef struct psci_power_state {
/*
* The pwr_domain_state[] stores the local power state at each level
* for the CPU.
*/
plat_local_state_t pwr_domain_state[PLAT_MAX_PWR_LVL + 1U ];
} psci_power_state_t;
/*
* Function to test whether the plat_local_state is RUN state
*/
static inline int is_local_state_run(unsigned int plat_local_state)
{
return (plat_local_state == PSCI_LOCAL_STATE_RUN) ? 1 : 0;
}
/*
* Function to test whether the plat_local_state is OFF state
*/
static inline int is_local_state_off(unsigned int plat_local_state)
{
return ((plat_local_state > PLAT_MAX_RET_STATE) &&
(plat_local_state <= PLAT_MAX_OFF_STATE)) ? 1 : 0;
}
/* Power state helper functions */
static inline unsigned int psci_check_power_state(unsigned int power_state)
{
return ((power_state) & PSTATE_VALID_MASK);
}
static inline unsigned int psci_get_pstate_id(unsigned int power_state)
{
return ((power_state) >> PSTATE_ID_SHIFT) & PSTATE_ID_MASK;
}
static inline unsigned int psci_get_pstate_type(unsigned int power_state)
{
return ((power_state) >> PSTATE_TYPE_SHIFT) & PSTATE_TYPE_MASK;
}
/*******************************************************************************
* Structure populated by platform specific code to export routines which
* perform common low level power management functions
******************************************************************************/
typedef struct plat_psci_ops {
void (*cpu_standby)(plat_local_state_t cpu_state);
int (*pwr_domain_on)(u_register_t mpidr);
void (*pwr_domain_off)(const psci_power_state_t *target_state);
int (*pwr_domain_off_early)(const psci_power_state_t *target_state);
void (*pwr_domain_suspend_pwrdown_early)(
const psci_power_state_t *target_state);
void (*pwr_domain_suspend)(const psci_power_state_t *target_state);
void (*pwr_domain_on_finish)(const psci_power_state_t *target_state);
void (*pwr_domain_on_finish_late)(
const psci_power_state_t *target_state);
void (*pwr_domain_suspend_finish)(
const psci_power_state_t *target_state);
void (*pwr_domain_pwr_down_wfi)(
const psci_power_state_t *target_state);
void (*system_off)(void);
void (*system_reset)(void);
int (*validate_power_state)(unsigned int power_state,
psci_power_state_t *req_state);
int (*validate_ns_entrypoint)(uintptr_t ns_entrypoint);
void (*get_sys_suspend_power_state)(
psci_power_state_t *req_state);
int (*get_pwr_lvl_state_idx)(plat_local_state_t pwr_domain_state,
int pwrlvl);
int (*translate_power_state_by_mpidr)(u_register_t mpidr,
unsigned int power_state,
psci_power_state_t *output_state);
int (*get_node_hw_state)(u_register_t mpidr, unsigned int power_level);
int (*mem_protect_chk)(uintptr_t base, u_register_t length);
int (*read_mem_protect)(int *val);
int (*write_mem_protect)(int val);
int (*system_reset2)(int is_vendor,
int reset_type, u_register_t cookie);
} plat_psci_ops_t;
int psci_cpu_on(u_register_t target_cpu, uintptr_t entrypoint);
int psci_cpu_off(void);
int psci_affinity_info(u_register_t target_affinity, unsigned int lowest_affinity_level);
int psci_cpu_suspend(unsigned int power_state, uintptr_t entrypoint, u_register_t context_id);
#endif

View file

@ -0,0 +1,8 @@
#ifndef __PSCI_LIB_H__
#define __PSCI_LIB_H__
int psci_setup(void);
void psci_print_power_domain_map(void);
void psci_warmboot_entrypoint(void);
#endif

View file

@ -14,6 +14,9 @@
#include <sbi/sbi_trap.h>
#include <sbi/sbi_version.h>
#include <sbi/riscv_asm.h>
#if defined(CONFIG_PLATFORM_SPACEMIT_K1PRO) || defined(CONFIG_PLATFORM_SPACEMIT_K1X)
#include <sbi_utils/cache/cacheflush.h>
#endif
static int sbi_ecall_base_probe(unsigned long extid, unsigned long *out_val)
{
@ -62,6 +65,13 @@ static int sbi_ecall_base_handler(unsigned long extid, unsigned long funcid,
case SBI_EXT_BASE_GET_MIMPID:
*out_val = csr_read(CSR_MIMPID);
break;
#if defined(CONFIG_PLATFORM_SPACEMIT_K1PRO) || defined(CONFIG_PLATFORM_SPACEMIT_K1X)
case SBI_EXT_BASE_FLUSH_CACHE_ALL:
csi_flush_dcache_all();
/* there has no need to flush l2 cache here */
/* csi_flush_l2_cache(); */
break;
#endif
case SBI_EXT_BASE_PROBE_EXT:
ret = sbi_ecall_base_probe(regs->a0, out_val);
break;

View file

@ -35,8 +35,13 @@ static int sbi_ecall_hsm_handler(unsigned long extid, unsigned long funcid,
ret = sbi_hsm_hart_stop(scratch, true);
break;
case SBI_EXT_HSM_HART_GET_STATUS:
#ifndef CONFIG_ARM_PSCI_SUPPORT
ret = sbi_hsm_hart_get_state(sbi_domain_thishart_ptr(),
regs->a0);
#else
ret = sbi_hsm_hart_get_psci_state(sbi_domain_thishart_ptr(),
regs->a0);
#endif
break;
case SBI_EXT_HSM_HART_SUSPEND:
ret = sbi_hsm_hart_suspend(scratch, regs->a0, regs->a1,

View file

@ -818,6 +818,8 @@ sbi_hart_switch_mode(unsigned long arg0, unsigned long arg1,
}
}
csr_write(CSR_TCMCFG, 1);
register unsigned long a0 asm("a0") = arg0;
register unsigned long a1 asm("a1") = arg1;
__asm__ __volatile__("mret" : : "r"(a0), "r"(a1));

View file

@ -25,6 +25,8 @@
#include <sbi/sbi_system.h>
#include <sbi/sbi_timer.h>
#include <sbi/sbi_console.h>
#include <sbi_utils/cache/cacheflush.h>
#include <sbi_utils/psci/psci.h>
#define __sbi_hsm_hart_change_state(hdata, oldstate, newstate) \
({ \
@ -76,6 +78,21 @@ int sbi_hsm_hart_get_state(const struct sbi_domain *dom, u32 hartid)
return __sbi_hsm_hart_get_state(hartid);
}
#ifdef CONFIG_ARM_PSCI_SUPPORT
int __sbi_hsm_hart_get_psci_state(u32 hartid)
{
return psci_affinity_info(hartid, 0);
}
int sbi_hsm_hart_get_psci_state(const struct sbi_domain *dom, u32 hartid)
{
if (!sbi_domain_is_assigned_hart(dom, hartid))
return SBI_EINVAL;
return __sbi_hsm_hart_get_psci_state(hartid);
}
#endif
/*
* Try to acquire the ticket for the given target hart to make sure only
* one hart prepares the start of the target hart.
@ -137,8 +154,13 @@ int sbi_hsm_hart_interruptible_mask(const struct sbi_domain *dom,
return 0;
}
extern unsigned char _data_start[];
extern unsigned char _data_end[];
extern unsigned char _bss_start[];
extern unsigned char _bss_end[];
void __noreturn sbi_hsm_hart_start_finish(struct sbi_scratch *scratch,
u32 hartid)
u32 hartid, bool cool_boot)
{
unsigned long next_arg1;
unsigned long next_addr;
@ -155,33 +177,53 @@ void __noreturn sbi_hsm_hart_start_finish(struct sbi_scratch *scratch,
next_mode = scratch->next_mode;
hsm_start_ticket_release(hdata);
/**
* clean the cache : .data/bss section & local scratch & local sp
* let the second hart can view the data
* */
if (cool_boot) {
csi_flush_dcache_all();
csi_flush_l2_cache();
}
sbi_hart_switch_mode(hartid, next_arg1, next_addr, next_mode, false);
}
#ifdef CONFIG_ARM_PSCI_SUPPORT
static void sbi_hsm_hart_wait(struct sbi_scratch *scratch, u32 hartid)
{
unsigned long saved_mie;
struct sbi_hsm_data *hdata = sbi_scratch_offset_ptr(scratch,
hart_data_offset);
/* Save MIE CSR */
saved_mie = csr_read(CSR_MIE);
/* Set MSIE and MEIE bits to receive IPI */
csr_set(CSR_MIE, MIP_MSIP | MIP_MEIP);
/* Wait for state transition requested by sbi_hsm_hart_start() */
while (atomic_read(&hdata->state) != SBI_HSM_STATE_START_PENDING) {
wfi();
}
/* Restore MIE CSR */
csr_write(CSR_MIE, saved_mie);
/*
* No need to clear IPI here because the sbi_ipi_init() will
* clear it for current HART via sbi_platform_ipi_init().
*/
while (atomic_read(&hdata->state) != SBI_HSM_STATE_START_PENDING);
}
#else
static void sbi_hsm_hart_wait(struct sbi_scratch *scratch, u32 hartid)
{
unsigned long saved_mie;
struct sbi_hsm_data *hdata = sbi_scratch_offset_ptr(scratch,
hart_data_offset);
/* Save MIE CSR */
saved_mie = csr_read(CSR_MIE);
/* Set MSIE and MEIE bits to receive IPI */
csr_set(CSR_MIE, MIP_MSIP | MIP_MEIP);
/* Wait for state transition requested by sbi_hsm_hart_start() */
while (atomic_read(&hdata->state) != SBI_HSM_STATE_START_PENDING) {
wfi();
}
/* Restore MIE CSR */
csr_write(CSR_MIE, saved_mie);
/*
* No need to clear IPI here because the sbi_ipi_init() will
* clear it for current HART via sbi_platform_ipi_init().
*/
}
#endif
const struct sbi_hsm_device *sbi_hsm_get_device(void)
{

View file

@ -185,6 +185,7 @@ static void sbi_boot_print_hart(struct sbi_scratch *scratch, u32 hartid)
sbi_hart_delegation_dump(scratch, "Boot HART ", " ");
}
#ifndef CONFIG_ARM_PSCI_SUPPORT
static spinlock_t coldboot_lock = SPIN_LOCK_INITIALIZER;
static struct sbi_hartmask coldboot_wait_hmask = { 0 };
@ -257,6 +258,7 @@ static void wake_coldboot_harts(struct sbi_scratch *scratch, u32 hartid)
/* Release coldboot lock */
spin_unlock(&coldboot_lock);
}
#endif
static unsigned long entry_count_offset;
static unsigned long init_count_offset;
@ -392,12 +394,14 @@ static void __noreturn init_coldboot(struct sbi_scratch *scratch, u32 hartid)
sbi_boot_print_hart(scratch, hartid);
#ifndef CONFIG_ARM_PSCI_SUPPORT
wake_coldboot_harts(scratch, hartid);
#endif
count = sbi_scratch_offset_ptr(scratch, init_count_offset);
(*count)++;
sbi_hsm_hart_start_finish(scratch, hartid);
sbi_hsm_hart_start_finish(scratch, hartid, true);
}
static void __noreturn init_warm_startup(struct sbi_scratch *scratch,
@ -456,7 +460,7 @@ static void __noreturn init_warm_startup(struct sbi_scratch *scratch,
count = sbi_scratch_offset_ptr(scratch, init_count_offset);
(*count)++;
sbi_hsm_hart_start_finish(scratch, hartid);
sbi_hsm_hart_start_finish(scratch, hartid, false);
}
static void __noreturn init_warm_resume(struct sbi_scratch *scratch,
@ -481,7 +485,9 @@ static void __noreturn init_warmboot(struct sbi_scratch *scratch, u32 hartid)
{
int hstate;
#ifndef CONFIG_ARM_PSCI_SUPPORT
wait_for_coldboot(scratch, hartid);
#endif
hstate = sbi_hsm_hart_get_state(sbi_domain_thishart_ptr(), hartid);
if (hstate < 0)

View file

@ -557,8 +557,25 @@ int sbi_pmu_ctr_stop(unsigned long cbase, unsigned long cmask,
return ret;
}
#ifdef CONFIG_PLATFORM_SPACEMIT_K1X
static inline int spacemit_mhpmevent_inhibit_flags_are_invalid(uint64_t mhpmevent_val)
{
uint64_t event_hw_idx = mhpmevent_val & ~MHPMEVENT_SSCOF_MASK;
/* Inhibit flags in mhpmevents of L2 cache events are invalid. */
if (event_hw_idx >= 184 && event_hw_idx <= 189)
return 1;
return 0;
}
#endif /* CONFIG_PLATFORM_SPACEMIT_K1X */
static void pmu_update_inhibit_flags(unsigned long flags, uint64_t *mhpmevent_val)
{
#ifdef CONFIG_PLATFORM_SPACEMIT_K1X
if (spacemit_mhpmevent_inhibit_flags_are_invalid(*mhpmevent_val))
return;
#endif
if (flags & SBI_PMU_CFG_FLAG_SET_VUINH)
*mhpmevent_val |= MHPMEVENT_VUINH;
if (flags & SBI_PMU_CFG_FLAG_SET_VSINH)
@ -587,9 +604,16 @@ static int pmu_update_hw_mhpmevent(struct sbi_pmu_hw_event *hw_evt, int ctr_idx,
* Always set the OVF bit(disable interrupts) and inhibit counting of
* events in M-mode. The OVF bit should be enabled during the start call.
*/
if (sbi_hart_has_extension(scratch, SBI_HART_EXT_SSCOFPMF))
mhpmevent_val = (mhpmevent_val & ~MHPMEVENT_SSCOF_MASK) |
MHPMEVENT_MINH | MHPMEVENT_OF;
if (sbi_hart_has_extension(scratch, SBI_HART_EXT_SSCOFPMF)) {
#ifdef CONFIG_PLATFORM_SPACEMIT_K1X
if (spacemit_mhpmevent_inhibit_flags_are_invalid(mhpmevent_val))
mhpmevent_val = (mhpmevent_val & ~MHPMEVENT_SSCOF_MASK) |
MHPMEVENT_OF;
else
#endif
mhpmevent_val = (mhpmevent_val & ~MHPMEVENT_SSCOF_MASK) |
MHPMEVENT_MINH | MHPMEVENT_OF;
}
if (pmu_dev && pmu_dev->hw_counter_disable_irq)
pmu_dev->hw_counter_disable_irq(ctr_idx);

View file

@ -13,6 +13,7 @@
#include <sbi/sbi_platform.h>
#include <sbi/sbi_scratch.h>
#include <sbi/sbi_string.h>
#include <spacemit/spacemit_config.h>
u32 last_hartid_having_scratch = SBI_HARTMASK_MAX_BITS - 1;
struct sbi_scratch *hartid_to_scratch_table[SBI_HARTMASK_MAX_BITS] = { 0 };
@ -59,11 +60,14 @@ unsigned long sbi_scratch_alloc_offset(unsigned long size)
if (!size)
return 0;
size += __SIZEOF_POINTER__ - 1;
size &= ~((unsigned long)__SIZEOF_POINTER__ - 1);
size += CACHE_LINE_SIZE - 1;
size &= ~((unsigned long)CACHE_LINE_SIZE - 1);
spin_lock(&extra_lock);
extra_offset += CACHE_LINE_SIZE - 1;
extra_offset &= ~((unsigned long)CACHE_LINE_SIZE - 1);
if (SBI_SCRATCH_SIZE < (extra_offset + size))
goto done;

View file

@ -22,4 +22,6 @@ source "$(OPENSBI_SRC_DIR)/lib/utils/sys/Kconfig"
source "$(OPENSBI_SRC_DIR)/lib/utils/timer/Kconfig"
source "$(OPENSBI_SRC_DIR)/lib/utils/psci/Kconfig"
endmenu

View file

@ -0,0 +1,41 @@
/*
* Copyright (c) 2018-2019, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <spacemit/spacemit_config.h>
#include <sbi_utils/psci/drivers/arm/css/scmi.h>
#include <sbi_utils/psci/psci.h>
#include <sbi_utils/psci/plat/arm/common/plat_arm.h>
#include <sbi_utils/psci/drivers/arm/css/scmi.h>
#include <sbi_utils/psci/drivers/arm/css/css_mhu_doorbell.h>
const plat_psci_ops_t *plat_arm_psci_override_pm_ops(plat_psci_ops_t *ops)
{
return css_scmi_override_pm_ops(ops);
}
static scmi_channel_plat_info_t spacemit_scmi_plat_info = {
.scmi_mbx_mem = SCMI_MAILBOX_SHARE_MEM,
.db_reg_addr = PLAT_MAILBOX_REG_BASE,
/* no used */
.db_preserve_mask = 0xfffffffe,
/* no used */
.db_modify_mask = 0x1,
.ring_doorbell = &mhu_ring_doorbell,
};
scmi_channel_plat_info_t *plat_css_get_scmi_info(unsigned int channel_id)
{
return &spacemit_scmi_plat_info;
}
/*
* The array mapping platform core position (implemented by plat_my_core_pos())
* to the SCMI power domain ID implemented by SCP.
*/
uint32_t plat_css_core_pos_to_scmi_dmn_id_map[PLATFORM_CLUSTER_COUNT][PLATFORM_CORE_COUNT] = {
PLAT_SCMI_SINGLE_CLUSTER_DOMAIN_MAP,
PLAT_SCMI_DOUBLE_CLUSTER_DOMAIN_MAP
};

View file

@ -0,0 +1,68 @@
/*
* Copyright (c) 2015-2020, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <sbi_utils/psci/psci.h>
#include <sbi/sbi_console.h>
#include <sbi/sbi_hart.h>
#include <sbi_utils/psci/plat/arm/common/plat_arm.h>
#include <sbi_utils/psci/plat/arm/common/arm_def.h>
/*******************************************************************************
* ARM standard platform handler called to check the validity of the power state
* parameter.
******************************************************************************/
int arm_validate_power_state(unsigned int power_state,
psci_power_state_t *req_state)
{
unsigned int pstate = psci_get_pstate_type(power_state);
unsigned int pwr_lvl = psci_get_pstate_pwrlvl(power_state);
unsigned int i;
if (req_state == NULL) {
sbi_printf("%s:%d\n", __func__, __LINE__);
sbi_hart_hang();
}
if (pwr_lvl > PLAT_MAX_PWR_LVL)
return PSCI_E_INVALID_PARAMS;
/* Sanity check the requested state */
if (pstate == PSTATE_TYPE_STANDBY) {
/*
* It's possible to enter standby only on power level 0
* Ignore any other power level.
*/
if (pwr_lvl != ARM_PWR_LVL0)
return PSCI_E_INVALID_PARAMS;
req_state->pwr_domain_state[ARM_PWR_LVL0] =
ARM_LOCAL_STATE_RET;
} else {
for (i = ARM_PWR_LVL0; i <= pwr_lvl; i++)
req_state->pwr_domain_state[i] =
ARM_LOCAL_STATE_OFF;
}
/*
* We expect the 'state id' to be zero.
*/
if (psci_get_pstate_id(power_state) != 0U)
return PSCI_E_INVALID_PARAMS;
return PSCI_E_SUCCESS;
}
/*******************************************************************************
* The ARM Standard platform definition of platform porting API
* `plat_setup_psci_ops`.
******************************************************************************/
int plat_setup_psci_ops(uintptr_t sec_entrypoint,
const plat_psci_ops_t **psci_ops)
{
*psci_ops = plat_arm_psci_override_pm_ops(&plat_arm_psci_pm_ops);
return 0;
}

View file

@ -0,0 +1,298 @@
/*
* Copyright (c) 2015-2022, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <sbi/sbi_console.h>
#include <sbi/sbi_hart.h>
#include <sbi/riscv_asm.h>
#include <sbi_utils/psci/psci.h>
#include <sbi_utils/cci/cci.h>
#include <sbi/riscv_encoding.h>
#include <sbi/sbi_scratch.h>
#include <sbi_utils/irqchip/fdt_irqchip_plic.h>
#include <sbi_utils/psci/plat/arm/common/arm_def.h>
#include <sbi_utils/psci/plat/arm/css/common/css_pm.h>
#include <sbi_utils/psci/drivers/arm/css/css_scp.h>
#include <sbi_utils/psci/plat/arm/common/plat_arm.h>
/* Allow CSS platforms to override `plat_arm_psci_pm_ops` */
#pragma weak plat_arm_psci_pm_ops
/*******************************************************************************
* Handler called when a power domain is about to be turned on. The
* level and mpidr determine the affinity instance.
******************************************************************************/
int css_pwr_domain_on(u_register_t mpidr)
{
css_scp_on(mpidr);
return PSCI_E_SUCCESS;
}
static void css_pwr_domain_on_finisher_common(
const psci_power_state_t *target_state)
{
unsigned int clusterid;
unsigned int hartid = current_hartid();
if (CSS_CORE_PWR_STATE(target_state) != ARM_LOCAL_STATE_OFF) {
sbi_printf("%s:%d\n", __func__, __LINE__);
sbi_hart_hang();
}
/*
* Perform the common cluster specific operations i.e enable coherency
* if this cluster was off.
*/
if (CSS_CLUSTER_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF) {
clusterid = MPIDR_AFFLVL1_VAL(hartid);
cci_enable_snoop_dvm_reqs(clusterid);
}
}
/*******************************************************************************
* Handler called when a power level has just been powered on after
* being turned off earlier. The target_state encodes the low power state that
* each level has woken up from. This handler would never be invoked with
* the system power domain uninitialized as either the primary would have taken
* care of it as part of cold boot or the first core awakened from system
* suspend would have already initialized it.
******************************************************************************/
void css_pwr_domain_on_finish(const psci_power_state_t *target_state)
{
/* Assert that the system power domain need not be initialized */
if (css_system_pwr_state(target_state) != ARM_LOCAL_STATE_RUN) {
sbi_printf("%s:%d\n", __func__, __LINE__);
sbi_hart_hang();
}
css_pwr_domain_on_finisher_common(target_state);
}
/*******************************************************************************
* Handler called when a power domain has just been powered on and the cpu
* and its cluster are fully participating in coherent transaction on the
* interconnect. Data cache must be enabled for CPU at this point.
******************************************************************************/
void css_pwr_domain_on_finish_late(const psci_power_state_t *target_state)
{
#if 0
/* Program the gic per-cpu distributor or re-distributor interface */
plat_arm_gic_pcpu_init();
/* Enable the gic cpu interface */
plat_arm_gic_cpuif_enable();
/* Setup the CPU power down request interrupt for secondary core(s) */
css_setup_cpu_pwr_down_intr();
#endif
}
/*******************************************************************************
* Common function called while turning a cpu off or suspending it. It is called
* from css_off() or css_suspend() when these functions in turn are called for
* power domain at the highest power level which will be powered down. It
* performs the actions common to the OFF and SUSPEND calls.
******************************************************************************/
static void css_power_down_common(const psci_power_state_t *target_state)
{
unsigned int clusterid;
unsigned int hartid = current_hartid();
#if 0
/* Prevent interrupts from spuriously waking up this cpu */
plat_arm_gic_cpuif_disable();
#endif
/* Cluster is to be turned off, so disable coherency */
if (CSS_CLUSTER_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF) {
clusterid = MPIDR_AFFLVL1_VAL(hartid);
cci_disable_snoop_dvm_reqs(clusterid);
}
}
static int css_pwr_domain_off_early(const psci_power_state_t *target_state)
{
/* the ipi's pending is cleared before */
/* disable the plic irq */
fdt_plic_context_exit();
/* clear the external irq pending */
csr_clear(CSR_MIP, MIP_MEIP);
csr_clear(CSR_MIP, MIP_SEIP);
/* here we clear the sstimer pending if this core have */
if (sbi_hart_has_extension(sbi_scratch_thishart_ptr(), SBI_HART_EXT_SSTC)) {
csr_write(CSR_STIMECMP, 0xffffffffffffffff);
}
return 0;
}
/*******************************************************************************
* Handler called when a power domain is about to be turned off. The
* target_state encodes the power state that each level should transition to.
******************************************************************************/
void css_pwr_domain_off(const psci_power_state_t *target_state)
{
if (CSS_CORE_PWR_STATE(target_state) != ARM_LOCAL_STATE_OFF) {
sbi_printf("%s:%d\n", __func__, __LINE__);
sbi_hart_hang();
}
css_power_down_common(target_state);
css_scp_off(target_state);
}
void css_pwr_down_wfi(const psci_power_state_t *target_state)
{
while (1)
wfi();
}
/*
* The system power domain suspend is only supported only via
* PSCI SYSTEM_SUSPEND API. PSCI CPU_SUSPEND request to system power domain
* will be downgraded to the lower level.
*/
static int css_validate_power_state(unsigned int power_state,
psci_power_state_t *req_state)
{
int rc;
rc = arm_validate_power_state(power_state, req_state);
/*
* Ensure that we don't overrun the pwr_domain_state array in the case
* where the platform supported max power level is less than the system
* power level
*/
#if (PLAT_MAX_PWR_LVL == CSS_SYSTEM_PWR_DMN_LVL)
/*
* Ensure that the system power domain level is never suspended
* via PSCI CPU SUSPEND API. Currently system suspend is only
* supported via PSCI SYSTEM SUSPEND API.
*/
req_state->pwr_domain_state[CSS_SYSTEM_PWR_DMN_LVL] =
ARM_LOCAL_STATE_RUN;
#endif
return rc;
}
/*******************************************************************************
* Handler called when the CPU power domain is about to enter standby.
******************************************************************************/
void css_cpu_standby(plat_local_state_t cpu_state)
{
/* unsigned int scr; */
if (cpu_state != ARM_LOCAL_STATE_RET) {
sbi_printf("%s:%d\n", __func__, __LINE__);
sbi_hart_hang();
}
wfi();
#if 0
scr = read_scr_el3();
/*
* Enable the Non secure interrupt to wake the CPU.
* In GICv3 affinity routing mode, the non secure group1 interrupts use
* the PhysicalFIQ at EL3 whereas in GICv2, it uses the PhysicalIRQ.
* Enabling both the bits works for both GICv2 mode and GICv3 affinity
* routing mode.
*/
write_scr_el3(scr | SCR_IRQ_BIT | SCR_FIQ_BIT);
isb();
dsb();
wfi();
/*
* Restore SCR to the original value, synchronisation of scr_el3 is
* done by eret while el3_exit to save some execution cycles.
*/
write_scr_el3(scr);
#endif
}
/*******************************************************************************
* Handler called when a power domain is about to be suspended. The
* target_state encodes the power state that each level should transition to.
******************************************************************************/
void css_pwr_domain_suspend(const psci_power_state_t *target_state)
{
/*
* CSS currently supports retention only at cpu level. Just return
* as nothing is to be done for retention.
*/
if (CSS_CORE_PWR_STATE(target_state) == ARM_LOCAL_STATE_RET)
return;
if (CSS_CORE_PWR_STATE(target_state) != ARM_LOCAL_STATE_OFF) {
sbi_printf("%s:%d\n", __func__, __LINE__);
sbi_hart_hang();
}
css_power_down_common(target_state);
csr_clear(CSR_MIE, MIP_SSIP | MIP_MSIP | MIP_STIP | MIP_MTIP | MIP_SEIP | MIP_MEIP);
/* Perform system domain state saving if issuing system suspend */
if (css_system_pwr_state(target_state) == ARM_LOCAL_STATE_OFF) {
/* arm_system_pwr_domain_save(); */
/* Power off the Redistributor after having saved its context */
/* plat_arm_gic_redistif_off(); */
}
css_scp_suspend(target_state);
}
/*******************************************************************************
* Handler called when a power domain has just been powered on after
* having been suspended earlier. The target_state encodes the low power state
* that each level has woken up from.
* TODO: At the moment we reuse the on finisher and reinitialize the secure
* context. Need to implement a separate suspend finisher.
******************************************************************************/
void css_pwr_domain_suspend_finish(
const psci_power_state_t *target_state)
{
/* Return as nothing is to be done on waking up from retention. */
if (CSS_CORE_PWR_STATE(target_state) == ARM_LOCAL_STATE_RET)
return;
/* Perform system domain restore if woken up from system suspend */
if (css_system_pwr_state(target_state) == ARM_LOCAL_STATE_OFF)
/*
* At this point, the Distributor must be powered on to be ready
* to have its state restored. The Redistributor will be powered
* on as part of gicv3_rdistif_init_restore.
*/
/* arm_system_pwr_domain_resume() */;
css_pwr_domain_on_finisher_common(target_state);
/* Enable the gic cpu interface */
/* plat_arm_gic_cpuif_enable() */;
}
/*******************************************************************************
* Export the platform handlers via plat_arm_psci_pm_ops. The ARM Standard
* platform will take care of registering the handlers with PSCI.
******************************************************************************/
plat_psci_ops_t plat_arm_psci_pm_ops = {
.pwr_domain_on = css_pwr_domain_on,
.pwr_domain_on_finish = css_pwr_domain_on_finish,
.pwr_domain_on_finish_late = css_pwr_domain_on_finish_late,
.pwr_domain_off = css_pwr_domain_off,
.pwr_domain_off_early = css_pwr_domain_off_early,
.pwr_domain_pwr_down_wfi = css_pwr_down_wfi,
.validate_power_state = css_validate_power_state,
.cpu_standby = css_cpu_standby,
.pwr_domain_suspend = css_pwr_domain_suspend,
.pwr_domain_suspend_finish = css_pwr_domain_suspend_finish,
};

View file

@ -0,0 +1,27 @@
/*
* Copyright (c) 2014-2020, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <sbi_utils/psci/drivers/arm/css/scmi.h>
#include <sbi_utils/psci/drivers/arm/css/scmi_private.h>
#include "mhu.h"
void mhu_ring_doorbell(struct scmi_channel_plat_info *plat_info)
{
unsigned int msg;
mbox_reg_desc_t *regs = (mbox_reg_desc_t *)plat_info->db_reg_addr;
/* clear the fifo */
while (regs->msg_status[MAILBOX_SECURE_PSCI_CHANNEL + 2].bits.num_msg) {
msg = regs->mbox_msg[MAILBOX_SECURE_PSCI_CHANNEL + 2].val;
}
/* clear pending */
msg = regs->mbox_irq[0].irq_status_clr.val;
msg |= (1 << ((MAILBOX_SECURE_PSCI_CHANNEL + 2) * 2));
regs->mbox_irq[0].irq_status_clr.val = msg;
/* door bell the esos */
regs->mbox_msg[MAILBOX_SECURE_PSCI_CHANNEL].val = 'c';
}

View file

@ -0,0 +1,130 @@
/*
* Arm SCP/MCP Software
* Copyright (c) 2015-2021, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef INTERNAL_MHU_H
#define INTERNAL_MHU_H
/* mailbox register description */
/* mailbox sysconfig */
typedef union mbox_sysconfig {
unsigned int val;
struct {
unsigned int resetn:1;
unsigned int reserved:31;
} bits;
} mbox_sysconfig_t;
typedef union mbox_msg {
unsigned int val;
struct {
unsigned int msg:32;
} bits;
} mbox_msg_t;
typedef union mbox_fifo_status {
unsigned int val;
struct {
unsigned int is_full:1;
unsigned int reserved:31;
} bits;
} mbox_fifo_status_t;
typedef union mbox_msg_status {
unsigned int val;
struct {
unsigned int num_msg:4;
unsigned int reserved:28;
} bits;
} mbox_msg_status_t;
typedef union mbox_irq_status {
unsigned int val;
struct {
unsigned int new_msg0_status:1;
unsigned int not_msg0_full:1;
unsigned int new_msg1_status:1;
unsigned int not_msg1_full:1;
unsigned int new_msg2_status:1;
unsigned int not_msg2_full:1;
unsigned int new_msg3_status:1;
unsigned int not_msg3_full:1;
unsigned int reserved:24;
} bits;
} mbox_irq_status_t;
typedef union mbox_irq_status_clr {
unsigned int val;
struct {
unsigned int new_msg0_clr:1;
unsigned int not_msg0_full_clr:1;
unsigned int new_msg1_clr:1;
unsigned int not_msg1_full_clr:1;
unsigned int new_msg2_clr:1;
unsigned int not_msg2_full_clr:1;
unsigned int new_msg3_clr:1;
unsigned int not_msg3_full_clr:1;
unsigned int reserved:24;
} bits;
} mbox_irq_status_clr_t;
typedef union mbox_irq_enable_set {
unsigned int val;
struct {
unsigned int new_msg0_irq_en:1;
unsigned int not_msg0_full_irq_en:1;
unsigned int new_msg1_irq_en:1;
unsigned int not_msg1_full_irq_en:1;
unsigned int new_msg2_irq_en:1;
unsigned int not_msg2_full_irq_en:1;
unsigned int new_msg3_irq_en:1;
unsigned int not_msg3_full_irq_en:1;
unsigned int reserved:24;
} bits;
} mbox_irq_enable_set_t;
typedef union mbox_irq_enable_clr {
unsigned int val;
struct {
unsigned int new_msg0_irq_clr:1;
unsigned int not_msg0_full_irq_clr:1;
unsigned int new_msg1_irq_clr:1;
unsigned int not_msg1_full_irq_clr:1;
unsigned int new_msg2_irq_clr:1;
unsigned int not_msg2_full_irq_clr:1;
unsigned int new_msg3_irq_clr:1;
unsigned int not_msg3_full_irq_clr:1;
unsigned int reserved:24;
} bits;
} mbox_irq_enable_clr_t;
typedef struct mbox_irq {
mbox_irq_status_t irq_status;
mbox_irq_status_clr_t irq_status_clr;
mbox_irq_enable_set_t irq_en_set;
mbox_irq_enable_clr_t irq_en_clr;
} mbox_irq_t;
/*!
* \brief MHU Register Definitions
*/
typedef struct mhu_reg {
unsigned int mbox_version; /* 0x00 */
unsigned int reserved0[3]; /* 0x4 0x8 0xc */
mbox_sysconfig_t mbox_sysconfig; /* 0x10 */
unsigned int reserved1[11]; /* 0x14, 0x18, 0x1c, 0x20, 0x24, 0x28, 0x2c, 0x30, 0x34, 0x38, 0x3c */
mbox_msg_t mbox_msg[4]; /* 0x40, 0x44, 0x48, 0x4c */
unsigned int reserved2[12];
mbox_fifo_status_t fifo_status[4]; /* 0x80, 0x84, 0x88, 0x8c */
unsigned int reserved3[12];
mbox_msg_status_t msg_status[4]; /* 0xc0 */
unsigned int reserved4[12];
mbox_irq_t mbox_irq[2]; /* 0x100 */
} mbox_reg_desc_t;
#endif /* INTERNAL_MHU_H */

View file

@ -0,0 +1,228 @@
/*
* Copyright (c) 2017-2019, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <sbi/sbi_platform.h>
#include <sbi/sbi_hart.h>
#include <sbi/sbi_console.h>
#include <sbi/sbi_scratch.h>
#include <sbi_utils/cache/cacheflush.h>
#include <sbi_utils/psci/drivers/arm/css/scmi.h>
#include <sbi_utils/psci/drivers/arm/css/scmi_private.h>
#define scmi_lock_init(lock)
#define scmi_lock_get(lock) spin_lock(lock)
#define scmi_lock_release(lock) spin_unlock(lock)
/*
* Private helper function to get exclusive access to SCMI channel.
*/
void scmi_get_channel(scmi_channel_t *ch)
{
if (!ch->lock)
sbi_hart_hang();
scmi_lock_get(ch->lock);
/* Make sure any previous command has finished */
if (!SCMI_IS_CHANNEL_FREE(
((mailbox_mem_t *)(ch->info->scmi_mbx_mem))->status))
sbi_hart_hang();
}
/*
* Private helper function to transfer ownership of channel from AP to SCP.
*/
void scmi_send_sync_command(scmi_channel_t *ch)
{
mailbox_mem_t *mbx_mem = (mailbox_mem_t *)(ch->info->scmi_mbx_mem);
SCMI_MARK_CHANNEL_BUSY(mbx_mem->status);
/*
* Ensure that any write to the SCMI payload area is seen by SCP before
* we write to the doorbell register. If these 2 writes were reordered
* by the CPU then SCP would read stale payload data
*/
/* dmbst(); */
asm volatile ("fence iorw, iorw");
ch->info->ring_doorbell(ch->info);
/*
* Ensure that the write to the doorbell register is ordered prior to
* checking whether the channel is free.
*/
/* dmbsy(); */
asm volatile ("fence iorw, iorw");
/* Wait for channel to be free */
while (!SCMI_IS_CHANNEL_FREE(mbx_mem->status))
;
/*
* Ensure that any read to the SCMI payload area is done after reading
* mailbox status. If these 2 reads were reordered then the CPU would
* read invalid payload data
*/
/* dmbld(); */
asm volatile ("fence iorw, iorw");
}
/*
* Private helper function to release exclusive access to SCMI channel.
*/
void scmi_put_channel(scmi_channel_t *ch)
{
/* Make sure any previous command has finished */
if (!SCMI_IS_CHANNEL_FREE(
((mailbox_mem_t *)(ch->info->scmi_mbx_mem))->status))
sbi_hart_hang();
if (!ch->lock)
sbi_hart_hang();
scmi_lock_release(ch->lock);
}
/*
* API to query the SCMI protocol version.
*/
int scmi_proto_version(void *p, uint32_t proto_id, uint32_t *version)
{
mailbox_mem_t *mbx_mem;
unsigned int token = 0;
int ret;
scmi_channel_t *ch = (scmi_channel_t *)p;
validate_scmi_channel(ch);
scmi_get_channel(ch);
mbx_mem = (mailbox_mem_t *)(ch->info->scmi_mbx_mem);
mbx_mem->msg_header = SCMI_MSG_CREATE(proto_id, SCMI_PROTO_VERSION_MSG,
token);
mbx_mem->len = SCMI_PROTO_VERSION_MSG_LEN;
mbx_mem->flags = SCMI_FLAG_RESP_POLL;
csi_dcache_clean_invalid_range((uintptr_t)ch->info->scmi_mbx_mem, 0x80);
scmi_send_sync_command(ch);
/* Get the return values */
SCMI_PAYLOAD_RET_VAL2(mbx_mem->payload, ret, *version);
if (mbx_mem->len != SCMI_PROTO_VERSION_RESP_LEN) {
sbi_printf("%s:%d\n", __func__, __LINE__);
sbi_hart_hang();
}
if (token != SCMI_MSG_GET_TOKEN(mbx_mem->msg_header)) {
sbi_printf("%s:%d\n", __func__, __LINE__);
sbi_hart_hang();
}
scmi_put_channel(ch);
return ret;
}
/*
* API to query the protocol message attributes for a SCMI protocol.
*/
int scmi_proto_msg_attr(void *p, uint32_t proto_id,
uint32_t command_id, uint32_t *attr)
{
mailbox_mem_t *mbx_mem;
unsigned int token = 0;
int ret;
scmi_channel_t *ch = (scmi_channel_t *)p;
validate_scmi_channel(ch);
scmi_get_channel(ch);
mbx_mem = (mailbox_mem_t *)(ch->info->scmi_mbx_mem);
mbx_mem->msg_header = SCMI_MSG_CREATE(proto_id,
SCMI_PROTO_MSG_ATTR_MSG, token);
mbx_mem->len = SCMI_PROTO_MSG_ATTR_MSG_LEN;
mbx_mem->flags = SCMI_FLAG_RESP_POLL;
SCMI_PAYLOAD_ARG1(mbx_mem->payload, command_id);
csi_dcache_clean_invalid_range((uintptr_t)ch->info->scmi_mbx_mem, 0x80);
scmi_send_sync_command(ch);
/* Get the return values */
SCMI_PAYLOAD_RET_VAL2(mbx_mem->payload, ret, *attr);
if (mbx_mem->len != SCMI_PROTO_MSG_ATTR_RESP_LEN) {
sbi_printf("%s:%d\n", __func__, __LINE__);
sbi_hart_hang();
}
if (token != SCMI_MSG_GET_TOKEN(mbx_mem->msg_header)) {
sbi_printf("%s:%d\n", __func__, __LINE__);
sbi_hart_hang();
}
scmi_put_channel(ch);
return ret;
}
/*
* SCMI Driver initialization API. Returns initialized channel on success
* or NULL on error. The return type is an opaque void pointer.
*/
void *scmi_init(scmi_channel_t *ch)
{
uint32_t version;
int ret;
if (!ch || !ch->info || !ch->info->db_reg_addr || !ch->info->db_modify_mask ||
!ch->info->db_preserve_mask || !ch->info->ring_doorbell ||
!ch->lock)
sbi_hart_hang();
scmi_lock_init(ch->lock);
ch->is_initialized = 1;
ret = scmi_proto_version(ch, SCMI_PWR_DMN_PROTO_ID, &version);
if (ret != SCMI_E_SUCCESS) {
sbi_printf("SCMI power domain protocol version message failed\n");
goto error;
}
if (!is_scmi_version_compatible(SCMI_PWR_DMN_PROTO_VER, version)) {
sbi_printf("SCMI power domain protocol version 0x%x incompatible with driver version 0x%x\n",
version, SCMI_PWR_DMN_PROTO_VER);
goto error;
}
sbi_printf("SCMI power domain protocol version 0x%x detected\n", version);
ret = scmi_proto_version(ch, SCMI_SYS_PWR_PROTO_ID, &version);
if ((ret != SCMI_E_SUCCESS)) {
sbi_printf("SCMI system power protocol version message failed\n");
goto error;
}
if (!is_scmi_version_compatible(SCMI_SYS_PWR_PROTO_VER, version)) {
sbi_printf("SCMI system power management protocol version 0x%x incompatible with driver version 0x%x\n",
version, SCMI_SYS_PWR_PROTO_VER);
goto error;
}
sbi_printf("SCMI system power management protocol version 0x%x detected\n",
version);
sbi_printf("SCMI driver initialized\n");
return (void *)ch;
error:
ch->is_initialized = 0;
return NULL;
}

View file

@ -0,0 +1,102 @@
/*
* Copyright (c) 2017-2019, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <sbi/sbi_console.h>
#include <sbi/sbi_hart.h>
#include <sbi_utils/cache/cacheflush.h>
#include <sbi_utils/psci/drivers/arm/css/scmi.h>
#include <sbi_utils/psci/drivers/arm/css/scmi_private.h>
/*
* API to set the SCMI power domain power state.
*/
int scmi_pwr_state_set(void *p, uint32_t domain_id,
uint32_t scmi_pwr_state)
{
mailbox_mem_t *mbx_mem;
unsigned int token = 0;
int ret;
/*
* Only asynchronous mode of `set power state` command is allowed on
* application processors.
*/
uint32_t pwr_state_set_msg_flag = SCMI_PWR_STATE_SET_FLAG_ASYNC;
scmi_channel_t *ch = (scmi_channel_t *)p;
validate_scmi_channel(ch);
scmi_get_channel(ch);
mbx_mem = (mailbox_mem_t *)(ch->info->scmi_mbx_mem);
mbx_mem->msg_header = SCMI_MSG_CREATE(SCMI_PWR_DMN_PROTO_ID,
SCMI_PWR_STATE_SET_MSG, token);
mbx_mem->len = SCMI_PWR_STATE_SET_MSG_LEN;
mbx_mem->flags = SCMI_FLAG_RESP_POLL;
SCMI_PAYLOAD_ARG3(mbx_mem->payload, pwr_state_set_msg_flag,
domain_id, scmi_pwr_state);
csi_dcache_clean_invalid_range((uintptr_t)ch->info->scmi_mbx_mem, 0x80);
scmi_send_sync_command(ch);
/* Get the return values */
SCMI_PAYLOAD_RET_VAL1(mbx_mem->payload, ret);
if (mbx_mem->len != SCMI_PWR_STATE_SET_RESP_LEN) {
sbi_printf("%s:%d\n", __func__, __LINE__);
sbi_hart_hang();
}
if (token != SCMI_MSG_GET_TOKEN(mbx_mem->msg_header)) {
sbi_printf("%s:%d\n", __func__, __LINE__);
sbi_hart_hang();
}
scmi_put_channel(ch);
return ret;
}
/*
* API to get the SCMI power domain power state.
*/
int scmi_pwr_state_get(void *p, uint32_t domain_id,
uint32_t *scmi_pwr_state)
{
mailbox_mem_t *mbx_mem;
unsigned int token = 0;
int ret;
scmi_channel_t *ch = (scmi_channel_t *)p;
validate_scmi_channel(ch);
scmi_get_channel(ch);
mbx_mem = (mailbox_mem_t *)(ch->info->scmi_mbx_mem);
mbx_mem->msg_header = SCMI_MSG_CREATE(SCMI_PWR_DMN_PROTO_ID,
SCMI_PWR_STATE_GET_MSG, token);
mbx_mem->len = SCMI_PWR_STATE_GET_MSG_LEN;
mbx_mem->flags = SCMI_FLAG_RESP_POLL;
SCMI_PAYLOAD_ARG1(mbx_mem->payload, domain_id);
csi_dcache_clean_invalid_range((uintptr_t)ch->info->scmi_mbx_mem, 0x80);
scmi_send_sync_command(ch);
/* Get the return values */
SCMI_PAYLOAD_RET_VAL2(mbx_mem->payload, ret, *scmi_pwr_state);
if (mbx_mem->len != SCMI_PWR_STATE_GET_RESP_LEN) {
sbi_printf("%s:%d\n", __func__, __LINE__);
sbi_hart_hang();
}
if (token != SCMI_MSG_GET_TOKEN(mbx_mem->msg_header)) {
sbi_printf("%s:%d\n", __func__, __LINE__);
sbi_hart_hang();
}
scmi_put_channel(ch);
return ret;
}

View file

@ -0,0 +1,90 @@
/*
* Copyright (c) 2017-2019, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <sbi/sbi_console.h>
#include <sbi/sbi_hart.h>
#include <sbi_utils/cache/cacheflush.h>
#include <sbi_utils/psci/drivers/arm/css/scmi.h>
#include <sbi_utils/psci/drivers/arm/css/scmi_private.h>
/*
* API to set the SCMI system power state
*/
int scmi_sys_pwr_state_set(void *p, uint32_t flags, uint32_t system_state)
{
mailbox_mem_t *mbx_mem;
unsigned int token = 0;
int ret;
scmi_channel_t *ch = (scmi_channel_t *)p;
validate_scmi_channel(ch);
scmi_get_channel(ch);
mbx_mem = (mailbox_mem_t *)(ch->info->scmi_mbx_mem);
mbx_mem->msg_header = SCMI_MSG_CREATE(SCMI_SYS_PWR_PROTO_ID,
SCMI_SYS_PWR_STATE_SET_MSG, token);
mbx_mem->len = SCMI_SYS_PWR_STATE_SET_MSG_LEN;
mbx_mem->flags = SCMI_FLAG_RESP_POLL;
SCMI_PAYLOAD_ARG2(mbx_mem->payload, flags, system_state);
scmi_send_sync_command(ch);
/* Get the return values */
SCMI_PAYLOAD_RET_VAL1(mbx_mem->payload, ret);
if (mbx_mem->len != SCMI_SYS_PWR_STATE_SET_RESP_LEN) {
sbi_printf("%s:%d\n", __func__, __LINE__);
sbi_hart_hang();
}
if (token != SCMI_MSG_GET_TOKEN(mbx_mem->msg_header)) {
sbi_printf("%s:%d\n", __func__, __LINE__);
sbi_hart_hang();
}
scmi_put_channel(ch);
return ret;
}
/*
* API to get the SCMI system power state
*/
int scmi_sys_pwr_state_get(void *p, uint32_t *system_state)
{
mailbox_mem_t *mbx_mem;
unsigned int token = 0;
int ret;
scmi_channel_t *ch = (scmi_channel_t *)p;
validate_scmi_channel(ch);
scmi_get_channel(ch);
mbx_mem = (mailbox_mem_t *)(ch->info->scmi_mbx_mem);
mbx_mem->msg_header = SCMI_MSG_CREATE(SCMI_SYS_PWR_PROTO_ID,
SCMI_SYS_PWR_STATE_GET_MSG, token);
mbx_mem->len = SCMI_SYS_PWR_STATE_GET_MSG_LEN;
mbx_mem->flags = SCMI_FLAG_RESP_POLL;
scmi_send_sync_command(ch);
/* Get the return values */
SCMI_PAYLOAD_RET_VAL2(mbx_mem->payload, ret, *system_state);
if (mbx_mem->len != SCMI_SYS_PWR_STATE_GET_RESP_LEN) {
sbi_printf("%s:%d\n", __func__, __LINE__);
sbi_hart_hang();
}
if (token != SCMI_MSG_GET_TOKEN(mbx_mem->msg_header)) {
sbi_printf("%s:%d\n", __func__, __LINE__);
sbi_hart_hang();
}
scmi_put_channel(ch);
return ret;
}

View file

@ -0,0 +1,418 @@
/*
* Copyright (c) 2017-2022, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <sbi/sbi_console.h>
#include <sbi/riscv_asm.h>
#include <sbi/sbi_hart.h>
#include <sbi/sbi_platform.h>
#include <sbi_utils/psci/psci.h>
#include <sbi_utils/psci/plat/arm/common/plat_arm.h>
#include <sbi_utils/psci/drivers/arm/css/scmi.h>
#include <sbi_utils/psci/plat/arm/css/common/css_pm.h>
#include <sbi_utils/psci/plat/arm/common/arm_def.h>
#include <sbi_utils/psci/plat/arm/board/spacemit/include/platform_def.h>
#include <sbi_utils/psci/plat/common/platform.h>
#include <../../../psci/psci_private.h>
/*
* This file implements the SCP helper functions using SCMI protocol.
*/
/*
* SCMI power state parameter bit field encoding for ARM CSS platforms.
*
* 31 20 19 16 15 12 11 8 7 4 3 0
* +-------------------------------------------------------------+
* | SBZ | Max level | Level 3 | Level 2 | Level 1 | Level 0 |
* | | | state | state | state | state |
* +-------------------------------------------------------------+
*
* `Max level` encodes the highest level that has a valid power state
* encoded in the power state.
*/
#define SCMI_PWR_STATE_MAX_PWR_LVL_SHIFT 16
#define SCMI_PWR_STATE_MAX_PWR_LVL_WIDTH 4
#define SCMI_PWR_STATE_MAX_PWR_LVL_MASK \
((1 << SCMI_PWR_STATE_MAX_PWR_LVL_WIDTH) - 1)
#define SCMI_SET_PWR_STATE_MAX_PWR_LVL(_power_state, _max_level) \
(_power_state) |= ((_max_level) & SCMI_PWR_STATE_MAX_PWR_LVL_MASK)\
<< SCMI_PWR_STATE_MAX_PWR_LVL_SHIFT
#define SCMI_GET_PWR_STATE_MAX_PWR_LVL(_power_state) \
(((_power_state) >> SCMI_PWR_STATE_MAX_PWR_LVL_SHIFT) \
& SCMI_PWR_STATE_MAX_PWR_LVL_MASK)
#define SCMI_PWR_STATE_LVL_WIDTH 4
#define SCMI_PWR_STATE_LVL_MASK \
((1 << SCMI_PWR_STATE_LVL_WIDTH) - 1)
#define SCMI_SET_PWR_STATE_LVL(_power_state, _level, _level_state) \
(_power_state) |= ((_level_state) & SCMI_PWR_STATE_LVL_MASK) \
<< (SCMI_PWR_STATE_LVL_WIDTH * (_level))
#define SCMI_GET_PWR_STATE_LVL(_power_state, _level) \
(((_power_state) >> (SCMI_PWR_STATE_LVL_WIDTH * (_level))) & \
SCMI_PWR_STATE_LVL_MASK)
/*
* The SCMI power state enumeration for a power domain level
*/
typedef enum {
scmi_power_state_off = 0,
scmi_power_state_on = 1,
scmi_power_state_sleep = 2,
} scmi_power_state_t;
/*
* The global handles for invoking the SCMI driver APIs after the driver
* has been initialized.
*/
static void *scmi_handles[PLAT_ARM_SCMI_CHANNEL_COUNT];
/* The global SCMI channels array */
static scmi_channel_t scmi_channels[PLAT_ARM_SCMI_CHANNEL_COUNT];
/*
* Channel ID for the default SCMI channel.
* The default channel is used to issue SYSTEM level SCMI requests and is
* initialized to the channel which has the boot cpu as its resource.
*/
static uint32_t default_scmi_channel_id;
/*
* TODO: Allow use of channel specific lock instead of using a single lock for
* all the channels.
*/
ARM_SCMI_INSTANTIATE_LOCK;
/*
* Function to obtain the SCMI Domain ID and SCMI Channel number from the linear
* core position. The SCMI Channel number is encoded in the upper 16 bits and
* the Domain ID is encoded in the lower 16 bits in each entry of the mapping
* array exported by the platform.
*/
static void css_scp_core_pos_to_scmi_channel(unsigned int core_pos,
unsigned int *scmi_domain_id, unsigned int *scmi_channel_id)
{
unsigned int composite_id;
unsigned int *map_id = plat_get_power_domain_tree_desc()[CLUSTER_INDEX_IN_CPU_TOPOLOGY] > 1 ?
plat_css_core_pos_to_scmi_dmn_id_map[1] :
plat_css_core_pos_to_scmi_dmn_id_map[0];
composite_id = map_id[core_pos];
*scmi_channel_id = GET_SCMI_CHANNEL_ID(composite_id);
*scmi_domain_id = GET_SCMI_DOMAIN_ID(composite_id);
}
/*
* Helper function to turn off a CPU power domain and its parent power domains
* if applicable.
*/
void css_scp_off(const struct psci_power_state *target_state)
{
unsigned int lvl = 0, channel_id, domain_id;
int ret;
uint32_t scmi_pwr_state = 0, cpu_idx;
unsigned int hartid = current_hartid();
cpu_idx = plat_core_pos_by_mpidr(hartid);
/* At-least the CPU level should be specified to be OFF */
if (target_state->pwr_domain_state[ARM_PWR_LVL0] != ARM_LOCAL_STATE_OFF) {
sbi_printf("%s:%d, wrong power domain state\n",
__func__, __LINE__);
sbi_hart_hang();
}
/* PSCI CPU OFF cannot be used to turn OFF system power domain */
if (css_system_pwr_state(target_state) != ARM_LOCAL_STATE_RUN) {
sbi_printf("%s:%d, wrong power domain state\n",
__func__, __LINE__);
sbi_hart_hang();
}
for (; lvl <= PLAT_MAX_PWR_LVL; lvl++) {
if (target_state->pwr_domain_state[lvl] == ARM_LOCAL_STATE_RUN)
break;
if (target_state->pwr_domain_state[lvl] != ARM_LOCAL_STATE_OFF) {
sbi_printf("%s:%d, wrong power domain state\n",
__func__, __LINE__);
sbi_hart_hang();
}
SCMI_SET_PWR_STATE_LVL(scmi_pwr_state, lvl,
scmi_power_state_off);
}
SCMI_SET_PWR_STATE_MAX_PWR_LVL(scmi_pwr_state, lvl - 1);
css_scp_core_pos_to_scmi_channel(cpu_idx, &domain_id, &channel_id);
ret = scmi_pwr_state_set(scmi_handles[channel_id],
domain_id, scmi_pwr_state);
if (ret != SCMI_E_QUEUED && ret != SCMI_E_SUCCESS) {
sbi_printf("SCMI set power state command return 0x%x unexpected\n",
ret);
sbi_hart_hang();
}
}
/*
* Helper function to turn ON a CPU power domain and its parent power domains
* if applicable.
*/
void css_scp_on(u_register_t mpidr)
{
unsigned int lvl = 0, channel_id, core_pos, domain_id;
int ret;
uint32_t scmi_pwr_state = 0;
core_pos = plat_core_pos_by_mpidr(mpidr);
if (core_pos >= PLATFORM_CORE_COUNT) {
sbi_printf("%s:%d, node_idx beyond the boundary\n",
__func__, __LINE__);
sbi_hart_hang();
}
for (; lvl <= PLAT_MAX_PWR_LVL; lvl++)
SCMI_SET_PWR_STATE_LVL(scmi_pwr_state, lvl,
scmi_power_state_on);
SCMI_SET_PWR_STATE_MAX_PWR_LVL(scmi_pwr_state, lvl - 1);
css_scp_core_pos_to_scmi_channel(core_pos, &domain_id,
&channel_id);
ret = scmi_pwr_state_set(scmi_handles[channel_id],
domain_id, scmi_pwr_state);
if (ret != SCMI_E_QUEUED && ret != SCMI_E_SUCCESS) {
sbi_printf("SCMI set power state command return 0x%x unexpected\n",
ret);
sbi_hart_hang();
}
}
/*
* Helper function to get the power state of a power domain node as reported
* by the SCP.
*/
int css_scp_get_power_state(u_register_t mpidr, unsigned int power_level)
{
int ret;
uint32_t scmi_pwr_state = 0, lvl_state;
unsigned int channel_id, cpu_idx, domain_id;
cpu_idx = plat_core_pos_by_mpidr(mpidr);
if (cpu_idx >= PLATFORM_CORE_COUNT) {
sbi_printf("%s:%d, node_idx beyond the boundary\n",
__func__, __LINE__);
sbi_hart_hang();
}
/* We don't support get power state at the system power domain level */
if ((power_level > PLAT_MAX_PWR_LVL) ||
(power_level == CSS_SYSTEM_PWR_DMN_LVL)) {
sbi_printf("Invalid power level %u specified for SCMI get power state\n",
power_level);
return PSCI_E_INVALID_PARAMS;
}
css_scp_core_pos_to_scmi_channel(cpu_idx, &domain_id, &channel_id);
ret = scmi_pwr_state_get(scmi_handles[channel_id],
domain_id, &scmi_pwr_state);
if (ret != SCMI_E_SUCCESS) {
sbi_printf("SCMI get power state command return 0x%x unexpected\n",
ret);
return PSCI_E_INVALID_PARAMS;
}
/*
* Find the maximum power level described in the get power state
* command. If it is less than the requested power level, then assume
* the requested power level is ON.
*/
if (SCMI_GET_PWR_STATE_MAX_PWR_LVL(scmi_pwr_state) < power_level)
return HW_ON;
lvl_state = SCMI_GET_PWR_STATE_LVL(scmi_pwr_state, power_level);
if (lvl_state == scmi_power_state_on)
return HW_ON;
if ((lvl_state != scmi_power_state_off) &&
(lvl_state != scmi_power_state_sleep)) {
sbi_printf("wrong power state, :%d\n", ret);
sbi_hart_hang();
}
return HW_OFF;
}
void plat_arm_pwrc_setup(void)
{
unsigned int composite_id, idx, cpu_idx;
unsigned int hartid = current_hartid();
cpu_idx = plat_core_pos_by_mpidr(hartid);
for (idx = 0; idx < PLAT_ARM_SCMI_CHANNEL_COUNT; idx++) {
sbi_printf("Initializing SCMI driver on channel %d\n", idx);
scmi_channels[idx].info = plat_css_get_scmi_info(idx);
scmi_channels[idx].lock = ARM_SCMI_LOCK_GET_INSTANCE;
scmi_handles[idx] = scmi_init(&scmi_channels[idx]);
if (scmi_handles[idx] == NULL) {
sbi_printf("SCMI Initialization failed on channel %d\n", idx);
sbi_hart_hang();
}
}
unsigned int *map_id = plat_get_power_domain_tree_desc()[CLUSTER_INDEX_IN_CPU_TOPOLOGY] > 1 ?
plat_css_core_pos_to_scmi_dmn_id_map[1] :
plat_css_core_pos_to_scmi_dmn_id_map[0];
composite_id = map_id[cpu_idx];
default_scmi_channel_id = GET_SCMI_CHANNEL_ID(composite_id);
}
/******************************************************************************
* This function overrides the default definition for ARM platforms. Initialize
* the SCMI driver, query capability via SCMI and modify the PSCI capability
* based on that.
*****************************************************************************/
const plat_psci_ops_t *css_scmi_override_pm_ops(plat_psci_ops_t *ops)
{
uint32_t msg_attr;
int ret;
void *scmi_handle = scmi_handles[default_scmi_channel_id];
/* Check that power domain POWER_STATE_SET message is supported */
ret = scmi_proto_msg_attr(scmi_handle, SCMI_PWR_DMN_PROTO_ID,
SCMI_PWR_STATE_SET_MSG, &msg_attr);
if (ret != SCMI_E_SUCCESS) {
sbi_printf("Set power state command is not supported by SCMI\n");
sbi_hart_hang();
}
/*
* Don't support PSCI NODE_HW_STATE call if SCMI doesn't support
* POWER_STATE_GET message.
*/
ret = scmi_proto_msg_attr(scmi_handle, SCMI_PWR_DMN_PROTO_ID,
SCMI_PWR_STATE_GET_MSG, &msg_attr);
if (ret != SCMI_E_SUCCESS)
ops->get_node_hw_state = NULL;
/* Check if the SCMI SYSTEM_POWER_STATE_SET message is supported */
ret = scmi_proto_msg_attr(scmi_handle, SCMI_SYS_PWR_PROTO_ID,
SCMI_SYS_PWR_STATE_SET_MSG, &msg_attr);
if (ret != SCMI_E_SUCCESS) {
/* System power management operations are not supported */
ops->system_off = NULL;
ops->system_reset = NULL;
ops->get_sys_suspend_power_state = NULL;
} else {
if (!(msg_attr & SCMI_SYS_PWR_SUSPEND_SUPPORTED)) {
/*
* System power management protocol is available, but
* it does not support SYSTEM SUSPEND.
*/
ops->get_sys_suspend_power_state = NULL;
}
if (!(msg_attr & SCMI_SYS_PWR_WARM_RESET_SUPPORTED)) {
/*
* WARM reset is not available.
*/
ops->system_reset2 = NULL;
}
}
return ops;
}
/*
* Helper function to suspend a CPU power domain and its parent power domains
* if applicable.
*/
void css_scp_suspend(const struct psci_power_state *target_state)
{
int ret;
unsigned int curr_hart = current_hartid();
unsigned int core_pos = plat_core_pos_by_mpidr(curr_hart);
if (core_pos >= PLATFORM_CORE_COUNT) {
sbi_printf("%s:%d, node_idx beyond the boundary\n",
__func__, __LINE__);
sbi_hart_hang();
}
/* At least power domain level 0 should be specified to be suspended */
if (target_state->pwr_domain_state[ARM_PWR_LVL0] !=
ARM_LOCAL_STATE_OFF) {
sbi_printf("%s:%d\n", __func__, __LINE__);
sbi_hart_hang();
}
/* Check if power down at system power domain level is requested */
if (css_system_pwr_state(target_state) == ARM_LOCAL_STATE_OFF) {
/* Issue SCMI command for SYSTEM_SUSPEND on all SCMI channels */
ret = scmi_sys_pwr_state_set(
scmi_handles[default_scmi_channel_id],
SCMI_SYS_PWR_FORCEFUL_REQ, SCMI_SYS_PWR_SUSPEND);
if (ret != SCMI_E_SUCCESS) {
sbi_printf("SCMI system power domain suspend return 0x%x unexpected\n",
ret);
sbi_hart_hang();
}
return;
}
unsigned int lvl, channel_id, domain_id;
uint32_t scmi_pwr_state = 0;
/*
* If we reach here, then assert that power down at system power domain
* level is running.
*/
if (css_system_pwr_state(target_state) != ARM_LOCAL_STATE_RUN) {
sbi_printf("%s:%d\n", __func__, __LINE__);
sbi_hart_hang();
}
/* For level 0, specify `scmi_power_state_sleep` as the power state */
SCMI_SET_PWR_STATE_LVL(scmi_pwr_state, ARM_PWR_LVL0,
scmi_power_state_sleep);
for (lvl = ARM_PWR_LVL1; lvl <= PLAT_MAX_PWR_LVL; lvl++) {
if (target_state->pwr_domain_state[lvl] == ARM_LOCAL_STATE_RUN)
break;
if (target_state->pwr_domain_state[lvl] !=
ARM_LOCAL_STATE_OFF) {
sbi_printf("%s:%d\n", __func__, __LINE__);
sbi_hart_hang();
}
/*
* Specify `scmi_power_state_off` as power state for higher
* levels.
*/
SCMI_SET_PWR_STATE_LVL(scmi_pwr_state, lvl,
scmi_power_state_off);
}
SCMI_SET_PWR_STATE_MAX_PWR_LVL(scmi_pwr_state, lvl - 1);
css_scp_core_pos_to_scmi_channel(core_pos,
&domain_id, &channel_id);
ret = scmi_pwr_state_set(scmi_handles[channel_id],
domain_id, scmi_pwr_state);
if (ret != SCMI_E_SUCCESS) {
sbi_printf("SCMI set power state command return 0x%x unexpected\n",
ret);
sbi_hart_hang();
}
}

View file

@ -0,0 +1,24 @@
#
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2020 Western Digital Corporation or its affiliates.
#
# Authors:
# Anup Patel <anup.patel@wdc.com>
#
libsbiutils-objs-$(CONFIG_ARM_SCMI_PROTOCOL_SUPPORT) += arm_scmi/common/arm_pm.o
libsbiutils-objs-$(CONFIG_ARM_SCMI_PROTOCOL_SUPPORT) += arm_scmi/css/common/css_pm.o
libsbiutils-objs-$(CONFIG_ARM_SCMI_PROTOCOL_SUPPORT) += arm_scmi/css/scmi/scmi_common.o
libsbiutils-objs-$(CONFIG_ARM_SCMI_PROTOCOL_SUPPORT) += arm_scmi/css/scmi/scmi_pwr_dmn_proto.o
libsbiutils-objs-$(CONFIG_ARM_SCMI_PROTOCOL_SUPPORT) += arm_scmi/css/scmi/scmi_sys_pwr_proto.o
libsbiutils-objs-$(CONFIG_ARM_SCMI_PROTOCOL_SUPPORT) += arm_scmi/css/scp/css_pm_scmi.o
libsbiutils-objs-$(CONFIG_ARM_SCMI_PROTOCOL_SUPPORT) += arm_scmi/css/mhu/css_mhu_doorbell.o
libsbiutils-objs-$(CONFIG_ARM_SCMI_PROTOCOL_SUPPORT) += arm_scmi/board/spacemit/spacemit_pm.o

168
lib/utils/cci/bus-cci.c Normal file
View file

@ -0,0 +1,168 @@
/*
* Copyright (c) 2015-2020, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <sbi/riscv_io.h>
#include <sbi/sbi_console.h>
/* Slave interface offsets from PERIPHBASE */
#define SLAVE_IFACE6_OFFSET (0x7000UL)
#define SLAVE_IFACE5_OFFSET (0x6000UL)
#define SLAVE_IFACE4_OFFSET (0x5000UL)
#define SLAVE_IFACE3_OFFSET (0x4000UL)
#define SLAVE_IFACE2_OFFSET (0x3000UL)
#define SLAVE_IFACE1_OFFSET (0x2000UL)
#define SLAVE_IFACE0_OFFSET (0x1000UL)
#define SLAVE_IFACE_OFFSET(index) (SLAVE_IFACE0_OFFSET + \
((0x1000UL) * (index)))
/* Slave interface event and count register offsets from PERIPHBASE */
#define EVENT_SELECT7_OFFSET (0x80000UL)
#define EVENT_SELECT6_OFFSET (0x70000UL)
#define EVENT_SELECT5_OFFSET (0x60000UL)
#define EVENT_SELECT4_OFFSET (0x50000UL)
#define EVENT_SELECT3_OFFSET (0x40000UL)
#define EVENT_SELECT2_OFFSET (0x30000UL)
#define EVENT_SELECT1_OFFSET (0x20000UL)
#define EVENT_SELECT0_OFFSET (0x10000UL)
#define EVENT_OFFSET(index) (EVENT_SELECT0_OFFSET + \
((0x10000UL) * (index)))
/* Control and ID register offsets */
#define CTRL_OVERRIDE_REG (0x0U)
#define SECURE_ACCESS_REG (0x8U)
#define STATUS_REG (0xcU)
#define IMPRECISE_ERR_REG (0x10U)
#define PERFMON_CTRL_REG (0x100U)
#define IFACE_MON_CTRL_REG (0x104U)
/* Component and peripheral ID registers */
#define PERIPHERAL_ID0 (0xFE0U)
#define PERIPHERAL_ID1 (0xFE4U)
#define PERIPHERAL_ID2 (0xFE8U)
#define PERIPHERAL_ID3 (0xFECU)
#define PERIPHERAL_ID4 (0xFD0U)
#define PERIPHERAL_ID5 (0xFD4U)
#define PERIPHERAL_ID6 (0xFD8U)
#define PERIPHERAL_ID7 (0xFDCU)
#define COMPONENT_ID0 (0xFF0U)
#define COMPONENT_ID1 (0xFF4U)
#define COMPONENT_ID2 (0xFF8U)
#define COMPONENT_ID3 (0xFFCU)
#define COMPONENT_ID4 (0x1000U)
#define COMPONENT_ID5 (0x1004U)
#define COMPONENT_ID6 (0x1008U)
#define COMPONENT_ID7 (0x100CU)
/* Slave interface register offsets */
#define SNOOP_CTRL_REG (0x0U)
#define SH_OVERRIDE_REG (0x4U)
#define READ_CHNL_QOS_VAL_OVERRIDE_REG (0x100U)
#define WRITE_CHNL_QOS_VAL_OVERRIDE_REG (0x104U)
#define MAX_OT_REG (0x110U)
/* Snoop Control register bit definitions */
#define DVM_EN_BIT (1U<<1)
#define SNOOP_EN_BIT (1U<<0)
#define SUPPORT_SNOOPS (1U<<30)
#define SUPPORT_DVM (1U<<31)
/* Status register bit definitions */
#define CHANGE_PENDING_BIT (1U<<0)
/* Event and count register offsets */
#define EVENT_SELECT_REG (0x0U)
#define EVENT_COUNT_REG (0x4U)
#define COUNT_CNTRL_REG (0x8U)
#define COUNT_OVERFLOW_REG (0xCU)
/* Slave interface monitor registers */
#define INT_MON_REG_SI0 (0x90000U)
#define INT_MON_REG_SI1 (0x90004U)
#define INT_MON_REG_SI2 (0x90008U)
#define INT_MON_REG_SI3 (0x9000CU)
#define INT_MON_REG_SI4 (0x90010U)
#define INT_MON_REG_SI5 (0x90014U)
#define INT_MON_REG_SI6 (0x90018U)
/* Master interface monitor registers */
#define INT_MON_REG_MI0 (0x90100U)
#define INT_MON_REG_MI1 (0x90104U)
#define INT_MON_REG_MI2 (0x90108U)
#define INT_MON_REG_MI3 (0x9010cU)
#define INT_MON_REG_MI4 (0x90110U)
#define INT_MON_REG_MI5 (0x90114U)
#define SLAVE_IF_UNUSED (-1)
#define MAKE_CCI_PART_NUMBER(hi, lo) (((hi) << 8) | (lo))
#define CCI_PART_LO_MASK (0xffU)
#define CCI_PART_HI_MASK (0xfU)
/* CCI part number codes read from Peripheral ID registers 0 and 1 */
#define CCI400_PART_NUM (0x420)
#define CCI500_PART_NUM (0x422)
#define CCI550_PART_NUM (0x423)
#define CCI400_SLAVE_PORTS (5)
#define CCI500_SLAVE_PORTS (7)
#define CCI550_SLAVE_PORTS (7)
static void *cci_base;
static const int *cci_slave_if_map;
void cci_init(u32 base, const int *map, unsigned int num_cci_masters)
{
cci_base = (void *)(u64)base;
cci_slave_if_map = map;
}
void cci_enable_snoop_dvm_reqs(unsigned int master_id)
{
int slave_if_id = cci_slave_if_map[master_id];
/*
* Enable Snoops and DVM messages, no need for Read/Modify/Write as
* rest of bits are write ignore
*/
writel(DVM_EN_BIT | SNOOP_EN_BIT, cci_base +
SLAVE_IFACE_OFFSET(slave_if_id) + SNOOP_CTRL_REG);
/*
* Wait for the completion of the write to the Snoop Control Register
* before testing the change_pending bit
*/
mb();
/* Wait for the dust to settle down */
while ((readl(cci_base + STATUS_REG) & CHANGE_PENDING_BIT) != 0U)
;
}
void cci_disable_snoop_dvm_reqs(unsigned int master_id)
{
int slave_if_id = cci_slave_if_map[master_id];
/*
* Disable Snoops and DVM messages, no need for Read/Modify/Write as
* rest of bits are write ignore.
*/
writel(~(DVM_EN_BIT | SNOOP_EN_BIT), cci_base +
SLAVE_IFACE_OFFSET(slave_if_id) + SNOOP_CTRL_REG);
/*
* Wait for the completion of the write to the Snoop Control Register
* before testing the change_pending bit
*/
mb();
/* Wait for the dust to settle down */
while ((readl(cci_base + STATUS_REG) & CHANGE_PENDING_BIT) != 0U)
;
}

7
lib/utils/cci/objects.mk Normal file
View file

@ -0,0 +1,7 @@
#
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (C) 2020 Bin Meng <bmeng.cn@gmail.com>
#
libsbiutils-objs-y += cci/bus-cci.o

View file

@ -15,6 +15,7 @@
#include <sbi/sbi_ipi.h>
#include <sbi/sbi_scratch.h>
#include <sbi/sbi_timer.h>
#include <sbi/sbi_platform.h>
#include <sbi_utils/ipi/aclint_mswi.h>
static unsigned long mswi_ptr_offset;
@ -84,6 +85,7 @@ int aclint_mswi_cold_init(struct aclint_mswi_data *mswi)
struct sbi_scratch *scratch;
unsigned long pos, region_size;
struct sbi_domain_memregion reg;
const struct sbi_platform *sbi = sbi_platform_thishart_ptr();
/* Sanity checks */
if (!mswi || (mswi->addr & (ACLINT_MSWI_ALIGN - 1)) ||
@ -100,7 +102,7 @@ int aclint_mswi_cold_init(struct aclint_mswi_data *mswi)
/* Update MSWI pointer in scratch space */
for (i = 0; i < mswi->hart_count; i++) {
scratch = sbi_hartid_to_scratch(mswi->first_hartid + i);
scratch = sbi_hartid_to_scratch(sbi->hart_index2id[i]);
if (!scratch)
return SBI_ENOENT;
mswi_set_hart_data_ptr(scratch, mswi);

View file

@ -85,6 +85,11 @@ static int irqchip_plic_warm_init(void)
plic_get_hart_scontext(scratch));
}
void fdt_plic_context_exit(void)
{
irqchip_plic_warm_init();
}
static int irqchip_plic_update_hartid_table(void *fdt, int nodeoff,
struct plic_data *pd)
{

21
lib/utils/psci/Kconfig Normal file
View file

@ -0,0 +1,21 @@
# SPDX-License-Identifier: BSD-2-Clause
menu "ARM's power management framework Support"
config ARM_PSCI_SUPPORT
bool "Support psci protocol"
default n
if ARM_PSCI_SUPPORT
config ARM_SCMI_PROTOCOL_SUPPORT
bool "Using r-core and arm's scmi protocol to dealing with the pwr management"
default n
config ARM_NON_SCMI_SUPPORT
bool "dealing with the pwr management in Machine mode-opensbi"
default n
endif
endmenu

30
lib/utils/psci/objects.mk Normal file
View file

@ -0,0 +1,30 @@
#
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2020 Western Digital Corporation or its affiliates.
#
# Authors:
# Anup Patel <anup.patel@wdc.com>
#
libsbiutils-objs-$(CONFIG_ARM_PSCI_SUPPORT) += psci/psci_common.o
libsbiutils-objs-$(CONFIG_ARM_PSCI_SUPPORT) += psci/psci_setup.o
libsbiutils-objs-$(CONFIG_ARM_PSCI_SUPPORT) += psci/psci_main.o
libsbiutils-objs-$(CONFIG_ARM_PSCI_SUPPORT) += psci/psci_on.o
libsbiutils-objs-$(CONFIG_ARM_PSCI_SUPPORT) += psci/psci_off.o
libsbiutils-objs-${CONFIG_ARM_PSCI_SUPPORT} += psci/psci_suspend.o
libsbiutils-objs-$(CONFIG_ARM_PSCI_SUPPORT) += psci/spacemit/spacemit_topology.o
ifeq ($(CONFIG_ARM_NON_SCMI_SUPPORT), y)
# common
libsbiutils-objs-$(CONFIG_ARM_NON_SCMI_SUPPORT) += psci/spacemit/plat/plat_pm.o
# platform
libsbiutils-objs-$(CONFIG_PLATFORM_SPACEMIT_K1X) += psci/spacemit/plat/k1x/underly_implement.o
endif

View file

@ -0,0 +1,872 @@
/*
* Copyright (c) 2013-2022, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <sbi_utils/psci/psci.h>
#include <sbi_utils/cache/cacheflush.h>
#include <sbi_utils/psci/plat/common/platform.h>
#include <sbi/sbi_scratch.h>
#include <sbi/sbi_platform.h>
#include <sbi/sbi_hart.h>
#include <sbi/sbi_console.h>
#include <spacemit/spacemit_config.h>
#include "psci_private.h"
/*
* PSCI requested local power state map. This array is used to store the local
* power states requested by a CPU for power levels from level 1 to
* PLAT_MAX_PWR_LVL. It does not store the requested local power state for power
* level 0 (PSCI_CPU_PWR_LVL) as the requested and the target power state for a
* CPU are the same.
*
* During state coordination, the platform is passed an array containing the
* local states requested for a particular non cpu power domain by each cpu
* within the domain.
*
* TODO: Dense packing of the requested states will cause cache thrashing
* when multiple power domains write to it. If we allocate the requested
* states at each power level in a cache-line aligned per-domain memory,
* the cache thrashing can be avoided.
*/
static plat_local_state_t
/* psci_req_local_pwr_states[PLAT_MAX_PWR_LVL][PLATFORM_CORE_COUNT] */
psci_req_local_pwr_states[PLAT_MAX_PWR_LVL][CACHE_LINE_SIZE] __attribute__((aligned(CACHE_LINE_SIZE)));
unsigned int psci_plat_core_count;
unsigned long psci_delta_off;
/*******************************************************************************
* Arrays that hold the platform's power domain tree information for state
* management of power domains.
* Each node in the array 'psci_non_cpu_pd_nodes' corresponds to a power domain
* which is an ancestor of a CPU power domain.
* Each node in the array 'psci_cpu_pd_nodes' corresponds to a cpu power domain
******************************************************************************/
non_cpu_pd_node_t psci_non_cpu_pd_nodes[PSCI_NUM_NON_CPU_PWR_DOMAINS];
/* Lock for PSCI state coordination */
DEFINE_PSCI_LOCK(psci_locks[PSCI_NUM_NON_CPU_PWR_DOMAINS]);
cpu_pd_node_t psci_cpu_pd_nodes[PLATFORM_CORE_COUNT];
/*******************************************************************************
* Pointer to functions exported by the platform to complete power mgmt. ops
******************************************************************************/
const plat_psci_ops_t *psci_plat_pm_ops;
/*
* The plat_local_state used by the platform is one of these types: RUN,
* RETENTION and OFF. The platform can define further sub-states for each type
* apart from RUN. This categorization is done to verify the sanity of the
* psci_power_state passed by the platform and to print debug information. The
* categorization is done on the basis of the following conditions:
*
* 1. If (plat_local_state == 0) then the category is STATE_TYPE_RUN.
*
* 2. If (0 < plat_local_state <= PLAT_MAX_RET_STATE), then the category is
* STATE_TYPE_RETN.
*
* 3. If (plat_local_state > PLAT_MAX_RET_STATE), then the category is
* STATE_TYPE_OFF.
*/
typedef enum plat_local_state_type {
STATE_TYPE_RUN = 0,
STATE_TYPE_RETN,
STATE_TYPE_OFF
} plat_local_state_type_t;
/* Function used to categorize plat_local_state. */
plat_local_state_type_t find_local_state_type(plat_local_state_t state)
{
if (state != 0U) {
if (state > PLAT_MAX_RET_STATE) {
return STATE_TYPE_OFF;
} else {
return STATE_TYPE_RETN;
}
} else {
return STATE_TYPE_RUN;
}
}
/*******************************************************************************
* PSCI helper function to get the parent nodes corresponding to a cpu_index.
******************************************************************************/
void psci_get_parent_pwr_domain_nodes(unsigned int cpu_idx,
unsigned int end_lvl,
unsigned int *node_index)
{
unsigned int parent_node = psci_cpu_pd_nodes[cpu_idx].parent_node;
unsigned int i;
unsigned int *node = node_index;
for (i = PSCI_CPU_PWR_LVL + 1U; i <= end_lvl; i++) {
*node = parent_node;
node++;
parent_node = psci_non_cpu_pd_nodes[parent_node].parent_node;
}
}
/******************************************************************************
* This function initializes the psci_req_local_pwr_states.
*****************************************************************************/
void psci_init_req_local_pwr_states(void)
{
/* Initialize the requested state of all non CPU power domains as OFF */
unsigned int pwrlvl;
unsigned int core;
for (pwrlvl = 0U; pwrlvl < PLAT_MAX_PWR_LVL; pwrlvl++) {
for (core = 0; core < psci_plat_core_count; core++) {
psci_req_local_pwr_states[pwrlvl][core] =
PLAT_MAX_OFF_STATE;
}
csi_dcache_clean_invalid_range(
(uintptr_t) psci_req_local_pwr_states[pwrlvl],
CACHE_LINE_SIZE);
}
}
void set_non_cpu_pd_node_local_state(unsigned int parent_idx,
plat_local_state_t state)
{
psci_non_cpu_pd_nodes[parent_idx].local_state = state;
csi_dcache_clean_invalid_range(
(uintptr_t) &psci_non_cpu_pd_nodes[parent_idx],
sizeof(psci_non_cpu_pd_nodes[parent_idx]));
}
/******************************************************************************
* Helper function to update the requested local power state array. This array
* does not store the requested state for the CPU power level. Hence an
* assertion is added to prevent us from accessing the CPU power level.
*****************************************************************************/
void psci_set_req_local_pwr_state(unsigned int pwrlvl,
unsigned int cpu_idx,
plat_local_state_t req_pwr_state)
{
if ((pwrlvl > PSCI_CPU_PWR_LVL) && (pwrlvl <= PLAT_MAX_PWR_LVL) &&
(cpu_idx < psci_plat_core_count)) {
psci_req_local_pwr_states[pwrlvl - 1U][cpu_idx] = req_pwr_state;
csi_dcache_clean_invalid_range(
(uintptr_t) psci_req_local_pwr_states[pwrlvl - 1U],
CACHE_LINE_SIZE);
}
}
/******************************************************************************
* Helper function to set the target local power state that each power domain
* from the current cpu power domain to its ancestor at the 'end_pwrlvl' will
* enter. This function will be called after coordination of requested power
* states has been done for each power level.
*****************************************************************************/
static void psci_set_target_local_pwr_states(unsigned int end_pwrlvl,
const psci_power_state_t *target_state)
{
unsigned int parent_idx, lvl;
psci_cpu_data_t *svc_cpu_data;
const plat_local_state_t *pd_state = target_state->pwr_domain_state;
unsigned int hartid = current_hartid();
struct sbi_scratch *scratch = sbi_hartid_to_scratch(hartid);
svc_cpu_data = sbi_scratch_offset_ptr(scratch, psci_delta_off);
psci_set_cpu_local_state(pd_state[PSCI_CPU_PWR_LVL]);
/*
* Need to flush as local_state might be accessed with Data Cache
* disabled during power on
*/
csi_dcache_clean_invalid_range((uintptr_t)&svc_cpu_data->local_state, sizeof(plat_local_state_t));
parent_idx = psci_cpu_pd_nodes[plat_core_pos_by_mpidr(hartid)].parent_node;
/* Copy the local_state from state_info */
for (lvl = 1U; lvl <= end_pwrlvl; lvl++) {
set_non_cpu_pd_node_local_state(parent_idx, pd_state[lvl]);
parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
}
}
/******************************************************************************
* Helper function to return a reference to an array containing the local power
* states requested by each cpu for a power domain at 'pwrlvl'. The size of the
* array will be the number of cpu power domains of which this power domain is
* an ancestor. These requested states will be used to determine a suitable
* target state for this power domain during psci state coordination. An
* assertion is added to prevent us from accessing the CPU power level.
*****************************************************************************/
static plat_local_state_t *psci_get_req_local_pwr_states(unsigned int pwrlvl,
unsigned int cpu_idx)
{
if (pwrlvl <= PSCI_CPU_PWR_LVL) {
sbi_printf("%s:%d, err\n", __func__, __LINE__);
sbi_hart_hang();
}
if ((pwrlvl > PSCI_CPU_PWR_LVL) && (pwrlvl <= PLAT_MAX_PWR_LVL) &&
(cpu_idx < psci_plat_core_count)) {
return &psci_req_local_pwr_states[pwrlvl - 1U][cpu_idx];
} else
return NULL;
}
/*
* Helper functions to get/set the fields of PSCI per-cpu data.
*/
void psci_set_aff_info_state(aff_info_state_t aff_state)
{
psci_cpu_data_t *svc_cpu_data;
unsigned int hartid = current_hartid();
struct sbi_scratch *scratch = sbi_hartid_to_scratch(hartid);
svc_cpu_data = sbi_scratch_offset_ptr(scratch, psci_delta_off);
svc_cpu_data->aff_info_state = aff_state;
}
aff_info_state_t psci_get_aff_info_state(void)
{
psci_cpu_data_t *svc_cpu_data;
unsigned int hartid = current_hartid();
struct sbi_scratch *scratch = sbi_hartid_to_scratch(hartid);
svc_cpu_data = sbi_scratch_offset_ptr(scratch, psci_delta_off);
return svc_cpu_data->aff_info_state;
}
aff_info_state_t psci_get_aff_info_state_by_idx(unsigned int idx)
{
psci_cpu_data_t *svc_cpu_data;
const struct sbi_platform *sbi = sbi_platform_thishart_ptr();
unsigned int hartid = sbi->hart_index2id[idx];
struct sbi_scratch *scratch = sbi_hartid_to_scratch(hartid);
svc_cpu_data = sbi_scratch_offset_ptr(scratch, psci_delta_off);
return svc_cpu_data->aff_info_state;
}
void psci_set_aff_info_state_by_idx(unsigned int idx,
aff_info_state_t aff_state)
{
psci_cpu_data_t *svc_cpu_data;
const struct sbi_platform *sbi = sbi_platform_thishart_ptr();
unsigned int hartid = sbi->hart_index2id[idx];
struct sbi_scratch *scratch = sbi_hartid_to_scratch(hartid);
svc_cpu_data = sbi_scratch_offset_ptr(scratch, psci_delta_off);
svc_cpu_data->aff_info_state = aff_state;
}
void psci_set_cpu_local_state(plat_local_state_t state)
{
psci_cpu_data_t *svc_cpu_data;
unsigned int hartid = current_hartid();
struct sbi_scratch *scratch = sbi_hartid_to_scratch(hartid);
svc_cpu_data = sbi_scratch_offset_ptr(scratch, psci_delta_off);
svc_cpu_data->local_state = state;
}
void psci_set_suspend_pwrlvl(unsigned int target_lvl)
{
psci_cpu_data_t *svc_cpu_data;
unsigned int hartid = current_hartid();
struct sbi_scratch *scratch = sbi_hartid_to_scratch(hartid);
svc_cpu_data = sbi_scratch_offset_ptr(scratch, psci_delta_off);
svc_cpu_data->target_pwrlvl = target_lvl;
}
static inline plat_local_state_t psci_get_cpu_local_state_by_idx(
unsigned int idx)
{
psci_cpu_data_t *svc_cpu_data;
const struct sbi_platform *sbi = sbi_platform_thishart_ptr();
unsigned int hartid = sbi->hart_index2id[idx];
struct sbi_scratch *scratch = sbi_hartid_to_scratch(hartid);
svc_cpu_data = sbi_scratch_offset_ptr(scratch, psci_delta_off);
return svc_cpu_data->local_state;
}
static inline plat_local_state_t psci_get_cpu_local_state(void)
{
psci_cpu_data_t *svc_cpu_data;
unsigned int hartid = current_hartid();
struct sbi_scratch *scratch = sbi_hartid_to_scratch(hartid);
svc_cpu_data = sbi_scratch_offset_ptr(scratch, psci_delta_off);
return svc_cpu_data->local_state;
}
/******************************************************************************
* This function is invoked post CPU power up and initialization. It sets the
* affinity info state, target power state and requested power state for the
* current CPU and all its ancestor power domains to RUN.
*****************************************************************************/
void psci_set_pwr_domains_to_run(unsigned int end_pwrlvl)
{
unsigned int parent_idx, lvl;
unsigned int cpu_idx;
psci_cpu_data_t *svc_cpu_data;
unsigned int hartid = current_hartid();
struct sbi_scratch *scratch = sbi_hartid_to_scratch(hartid);
svc_cpu_data = sbi_scratch_offset_ptr(scratch, psci_delta_off);
cpu_idx = plat_core_pos_by_mpidr(hartid);
parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;
/* Reset the local_state to RUN for the non cpu power domains. */
for (lvl = PSCI_CPU_PWR_LVL + 1U; lvl <= end_pwrlvl; lvl++) {
set_non_cpu_pd_node_local_state(parent_idx,
PSCI_LOCAL_STATE_RUN);
psci_set_req_local_pwr_state(lvl,
cpu_idx,
PSCI_LOCAL_STATE_RUN);
parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
}
/* Set the affinity info state to ON */
psci_set_aff_info_state(AFF_STATE_ON);
psci_set_cpu_local_state(PSCI_LOCAL_STATE_RUN);
csi_dcache_clean_invalid_range((uintptr_t)svc_cpu_data, sizeof(psci_cpu_data_t));
}
/*******************************************************************************
* This function prints the state of all power domains present in the
* system
******************************************************************************/
void psci_print_power_domain_map(void)
{
unsigned int idx;
plat_local_state_t state;
plat_local_state_type_t state_type;
/* This array maps to the PSCI_STATE_X definitions in psci.h */
static const char * const psci_state_type_str[] = {
"ON",
"RETENTION",
"OFF",
};
sbi_printf("PSCI Power Domain Map:\n");
for (idx = 0; idx < (PSCI_NUM_PWR_DOMAINS - psci_plat_core_count);
idx++) {
state_type = find_local_state_type(
psci_non_cpu_pd_nodes[idx].local_state);
sbi_printf(" Domain Node : Level %u, parent_node %u,"
" State %s (0x%x)\n",
psci_non_cpu_pd_nodes[idx].level,
psci_non_cpu_pd_nodes[idx].parent_node,
psci_state_type_str[state_type],
psci_non_cpu_pd_nodes[idx].local_state);
}
for (idx = 0; idx < psci_plat_core_count; idx++) {
state = psci_get_cpu_local_state_by_idx(idx);
state_type = find_local_state_type(state);
sbi_printf(" CPU Node : MPID 0x%llx, parent_node %u,"
" State %s (0x%x)\n",
(unsigned long long)psci_cpu_pd_nodes[idx].mpidr,
psci_cpu_pd_nodes[idx].parent_node,
psci_state_type_str[state_type],
psci_get_cpu_local_state_by_idx(idx));
}
}
/*******************************************************************************
* Simple routine to determine whether a mpidr is valid or not.
******************************************************************************/
int psci_validate_mpidr(u_register_t mpidr)
{
if (plat_core_pos_by_mpidr(mpidr) < 0)
return PSCI_E_INVALID_PARAMS;
return PSCI_E_SUCCESS;
}
static unsigned int psci_get_suspend_pwrlvl(void)
{
psci_cpu_data_t *svc_cpu_data;
unsigned int hartid = current_hartid();
struct sbi_scratch *scratch = sbi_hartid_to_scratch(hartid);
svc_cpu_data = sbi_scratch_offset_ptr(scratch, psci_delta_off);
return svc_cpu_data->target_pwrlvl;
}
/*******************************************************************************
* Routine to return the maximum power level to traverse to after a cpu has
* been physically powered up. It is expected to be called immediately after
* reset from assembler code.
******************************************************************************/
static unsigned int get_power_on_target_pwrlvl(void)
{
unsigned int pwrlvl;
/*
* Assume that this cpu was suspended and retrieve its target power
* level. If it is invalid then it could only have been turned off
* earlier. PLAT_MAX_PWR_LVL will be the highest power level a
* cpu can be turned off to.
*/
pwrlvl = psci_get_suspend_pwrlvl();
if (pwrlvl == PSCI_INVALID_PWR_LVL)
pwrlvl = PLAT_MAX_PWR_LVL;
if (pwrlvl >= PSCI_INVALID_PWR_LVL) {
sbi_printf("%s:%d,\n", __func__, __LINE__);
sbi_hart_hang();
}
return pwrlvl;
}
/*******************************************************************************
* This function is passed the highest level in the topology tree that the
* operation should be applied to and a list of node indexes. It picks up locks
* from the node index list in order of increasing power domain level in the
* range specified.
******************************************************************************/
void psci_acquire_pwr_domain_locks(unsigned int end_pwrlvl,
const unsigned int *parent_nodes)
{
unsigned int parent_idx;
unsigned int level;
/* No locking required for level 0. Hence start locking from level 1 */
for (level = PSCI_CPU_PWR_LVL + 1U; level <= end_pwrlvl; level++) {
parent_idx = parent_nodes[level - 1U];
psci_lock_get(&psci_non_cpu_pd_nodes[parent_idx]);
}
}
/*******************************************************************************
* This function is passed the highest level in the topology tree that the
* operation should be applied to and a list of node indexes. It releases the
* locks in order of decreasing power domain level in the range specified.
******************************************************************************/
void psci_release_pwr_domain_locks(unsigned int end_pwrlvl,
const unsigned int *parent_nodes)
{
unsigned int parent_idx;
unsigned int level;
/* Unlock top down. No unlocking required for level 0. */
for (level = end_pwrlvl; level >= (PSCI_CPU_PWR_LVL + 1U); level--) {
parent_idx = parent_nodes[level - 1U];
psci_lock_release(&psci_non_cpu_pd_nodes[parent_idx]);
}
}
/******************************************************************************
* This function finds the highest power level which will be powered down
* amongst all the power levels specified in the 'state_info' structure
*****************************************************************************/
unsigned int psci_find_max_off_lvl(const psci_power_state_t *state_info)
{
int i;
for (i = (int) PLAT_MAX_PWR_LVL; i >= (int) PSCI_CPU_PWR_LVL; i--) {
if (is_local_state_off(state_info->pwr_domain_state[i]) != 0)
return (unsigned int) i;
}
return PSCI_INVALID_PWR_LVL;
}
/*
* The PSCI generic code uses this API to let the platform participate in state
* coordination during a power management operation. It compares the platform
* specific local power states requested by each cpu for a given power domain
* and returns the coordinated target power state that the domain should
* enter. A platform assigns a number to a local power state. This default
* implementation assumes that the platform assigns these numbers in order of
* increasing depth of the power state i.e. for two power states X & Y, if X < Y
* then X represents a shallower power state than Y. As a result, the
* coordinated target local power state for a power domain will be the minimum
* of the requested local power states.
*/
plat_local_state_t plat_get_target_pwr_state(unsigned int lvl,
const plat_local_state_t *states,
unsigned int ncpu)
{
plat_local_state_t target = PLAT_MAX_OFF_STATE, temp;
const plat_local_state_t *st = states;
unsigned int n = ncpu;
if (ncpu <= 0U) {
sbi_printf("%s:%d, err\n", __func__, __LINE__);
sbi_hart_hang();
}
do {
temp = *st;
st++;
if (temp < target)
target = temp;
n--;
} while (n > 0U);
return target;
}
/*
* psci_non_cpu_pd_nodes can be placed either in normal memory or coherent
* memory.
*
* With !USE_COHERENT_MEM, psci_non_cpu_pd_nodes is placed in normal memory,
* it's accessed by both cached and non-cached participants. To serve the common
* minimum, perform a cache flush before read and after write so that non-cached
* participants operate on latest data in main memory.
*
* When USE_COHERENT_MEM is used, psci_non_cpu_pd_nodes is placed in coherent
* memory. With HW_ASSISTED_COHERENCY, all PSCI participants are cache-coherent.
* In both cases, no cache operations are required.
*/
/*
* Retrieve local state of non-CPU power domain node from a non-cached CPU,
* after any required cache maintenance operation.
*/
static plat_local_state_t get_non_cpu_pd_node_local_state(
unsigned int parent_idx)
{
return psci_non_cpu_pd_nodes[parent_idx].local_state;
}
/******************************************************************************
* Helper function to return the current local power state of each power domain
* from the current cpu power domain to its ancestor at the 'end_pwrlvl'. This
* function will be called after a cpu is powered on to find the local state
* each power domain has emerged from.
*****************************************************************************/
void psci_get_target_local_pwr_states(unsigned int end_pwrlvl,
psci_power_state_t *target_state)
{
unsigned int parent_idx, lvl, cpu_idx;
plat_local_state_t *pd_state = target_state->pwr_domain_state;
unsigned int hartid = current_hartid();
cpu_idx = plat_core_pos_by_mpidr(hartid);
pd_state[PSCI_CPU_PWR_LVL] = psci_get_cpu_local_state();
parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;
/* Copy the local power state from node to state_info */
for (lvl = PSCI_CPU_PWR_LVL + 1U; lvl <= end_pwrlvl; lvl++) {
pd_state[lvl] = get_non_cpu_pd_node_local_state(parent_idx);
parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
}
/* Set the the higher levels to RUN */
for (; lvl <= PLAT_MAX_PWR_LVL; lvl++)
target_state->pwr_domain_state[lvl] = PSCI_LOCAL_STATE_RUN;
}
/*******************************************************************************
* Generic handler which is called when a cpu is physically powered on. It
* traverses the node information and finds the highest power level powered
* off and performs generic, architectural, platform setup and state management
* to power on that power level and power levels below it.
* e.g. For a cpu that's been powered on, it will call the platform specific
* code to enable the gic cpu interface and for a cluster it will enable
* coherency at the interconnect level in addition to gic cpu interface.
******************************************************************************/
void psci_warmboot_entrypoint(void)
{
unsigned int end_pwrlvl;
unsigned int cpu_idx;
unsigned int parent_nodes[PLAT_MAX_PWR_LVL] = {0};
psci_power_state_t state_info = { {PSCI_LOCAL_STATE_RUN} };
unsigned int hartid = current_hartid();
cpu_idx = plat_core_pos_by_mpidr(hartid);
/* if we resumed directly from CPU-non-ret because of the wakeup source in suspending process */
if (psci_get_cpu_local_state() == PSCI_LOCAL_STATE_RUN) {
/* sbi_printf("%s:%d\n", __func__, __LINE__); */
return;
}
/*
* Verify that we have been explicitly turned ON or resumed from
* suspend.
*/
if (psci_get_aff_info_state() == AFF_STATE_OFF) {
sbi_printf("Unexpected affinity info state.\n");
sbi_hart_hang();
}
/*
* Get the maximum power domain level to traverse to after this cpu
* has been physically powered up.
*/
end_pwrlvl = get_power_on_target_pwrlvl();
/* Get the parent nodes */
psci_get_parent_pwr_domain_nodes(cpu_idx, end_pwrlvl, parent_nodes);
/*
* This function acquires the lock corresponding to each power level so
* that by the time all locks are taken, the system topology is snapshot
* and state management can be done safely.
*/
psci_acquire_pwr_domain_locks(end_pwrlvl, parent_nodes);
psci_get_target_local_pwr_states(end_pwrlvl, &state_info);
#if ENABLE_PSCI_STAT
plat_psci_stat_accounting_stop(&state_info);
#endif
/*
* This CPU could be resuming from suspend or it could have just been
* turned on. To distinguish between these 2 cases, we examine the
* affinity state of the CPU:
* - If the affinity state is ON_PENDING then it has just been
* turned on.
* - Else it is resuming from suspend.
*
* Depending on the type of warm reset identified, choose the right set
* of power management handler and perform the generic, architecture
* and platform specific handling.
*/
if (psci_get_aff_info_state() == AFF_STATE_ON_PENDING)
psci_cpu_on_finish(cpu_idx, &state_info);
else
psci_cpu_suspend_finish(cpu_idx, &state_info);
/*
* Set the requested and target state of this CPU and all the higher
* power domains which are ancestors of this CPU to run.
*/
psci_set_pwr_domains_to_run(end_pwrlvl);
#if ENABLE_PSCI_STAT
/*
* Update PSCI stats.
* Caches are off when writing stats data on the power down path.
* Since caches are now enabled, it's necessary to do cache
* maintenance before reading that same data.
*/
psci_stats_update_pwr_up(end_pwrlvl, &state_info);
#endif
/*
* This loop releases the lock corresponding to each power level
* in the reverse order to which they were acquired.
*/
psci_release_pwr_domain_locks(end_pwrlvl, parent_nodes);
}
/******************************************************************************
* This function is used in platform-coordinated mode.
*
* This function is passed the local power states requested for each power
* domain (state_info) between the current CPU domain and its ancestors until
* the target power level (end_pwrlvl). It updates the array of requested power
* states with this information.
*
* Then, for each level (apart from the CPU level) until the 'end_pwrlvl', it
* retrieves the states requested by all the cpus of which the power domain at
* that level is an ancestor. It passes this information to the platform to
* coordinate and return the target power state. If the target state for a level
* is RUN then subsequent levels are not considered. At the CPU level, state
* coordination is not required. Hence, the requested and the target states are
* the same.
*
* The 'state_info' is updated with the target state for each level between the
* CPU and the 'end_pwrlvl' and returned to the caller.
*
* This function will only be invoked with data cache enabled and while
* powering down a core.
*****************************************************************************/
void psci_do_state_coordination(unsigned int end_pwrlvl,
psci_power_state_t *state_info)
{
unsigned int lvl, parent_idx;
unsigned int start_idx;
unsigned int ncpus;
plat_local_state_t target_state, *req_states;
unsigned int hartid = current_hartid();
unsigned int cpu_idx = plat_core_pos_by_mpidr(hartid);;
if (end_pwrlvl > PLAT_MAX_PWR_LVL) {
sbi_printf("%s:%d, err\n", __func__, __LINE__);
sbi_hart_hang();
}
parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;
/* For level 0, the requested state will be equivalent
to target state */
for (lvl = PSCI_CPU_PWR_LVL + 1U; lvl <= end_pwrlvl; lvl++) {
/* First update the requested power state */
psci_set_req_local_pwr_state(lvl, cpu_idx,
state_info->pwr_domain_state[lvl]);
/* Get the requested power states for this power level */
start_idx = psci_non_cpu_pd_nodes[parent_idx].cpu_start_idx;
req_states = psci_get_req_local_pwr_states(lvl, start_idx);
/*
* Let the platform coordinate amongst the requested states at
* this power level and return the target local power state.
*/
ncpus = psci_non_cpu_pd_nodes[parent_idx].ncpus;
target_state = plat_get_target_pwr_state(lvl,
req_states,
ncpus);
state_info->pwr_domain_state[lvl] = target_state;
/* Break early if the negotiated target power state is RUN */
if (is_local_state_run(state_info->pwr_domain_state[lvl]) != 0)
break;
parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
}
/*
* This is for cases when we break out of the above loop early because
* the target power state is RUN at a power level < end_pwlvl.
* We update the requested power state from state_info and then
* set the target state as RUN.
*/
for (lvl = lvl + 1U; lvl <= end_pwrlvl; lvl++) {
psci_set_req_local_pwr_state(lvl, cpu_idx,
state_info->pwr_domain_state[lvl]);
state_info->pwr_domain_state[lvl] = PSCI_LOCAL_STATE_RUN;
}
/* Update the target state in the power domain nodes */
psci_set_target_local_pwr_states(end_pwrlvl, state_info);
}
/******************************************************************************
* This function ensures that the power state parameter in a CPU_SUSPEND request
* is valid. If so, it returns the requested states for each power level.
*****************************************************************************/
int psci_validate_power_state(unsigned int power_state,
psci_power_state_t *state_info)
{
/* Check SBZ bits in power state are zero */
if (psci_check_power_state(power_state) != 0U)
return PSCI_E_INVALID_PARAMS;
if (psci_plat_pm_ops->validate_power_state == NULL) {
sbi_printf("%s:%d\n", __func__, __LINE__);
sbi_hart_hang();
}
/* Validate the power_state using platform pm_ops */
return psci_plat_pm_ops->validate_power_state(power_state, state_info);
}
/******************************************************************************
* This functions finds the level of the highest power domain which will be
* placed in a low power state during a suspend operation.
*****************************************************************************/
unsigned int psci_find_target_suspend_lvl(const psci_power_state_t *state_info)
{
int i;
for (i = (int) PLAT_MAX_PWR_LVL; i >= (int) PSCI_CPU_PWR_LVL; i--) {
if (is_local_state_run(state_info->pwr_domain_state[i]) == 0)
return (unsigned int) i;
}
return PSCI_INVALID_PWR_LVL;
}
/******************************************************************************
* This function validates a suspend request by making sure that if a standby
* state is requested then no power level is turned off and the highest power
* level is placed in a standby/retention state.
*
* It also ensures that the state level X will enter is not shallower than the
* state level X + 1 will enter.
*
* This validation will be enabled only for DEBUG builds as the platform is
* expected to perform these validations as well.
*****************************************************************************/
int psci_validate_suspend_req(const psci_power_state_t *state_info,
unsigned int is_power_down_state)
{
unsigned int max_off_lvl, target_lvl, max_retn_lvl;
plat_local_state_t state;
plat_local_state_type_t req_state_type, deepest_state_type;
int i;
/* Find the target suspend power level */
target_lvl = psci_find_target_suspend_lvl(state_info);
if (target_lvl == PSCI_INVALID_PWR_LVL)
return PSCI_E_INVALID_PARAMS;
/* All power domain levels are in a RUN state to begin with */
deepest_state_type = STATE_TYPE_RUN;
for (i = (int) target_lvl; i >= (int) PSCI_CPU_PWR_LVL; i--) {
state = state_info->pwr_domain_state[i];
req_state_type = find_local_state_type(state);
/*
* While traversing from the highest power level to the lowest,
* the state requested for lower levels has to be the same or
* deeper i.e. equal to or greater than the state at the higher
* levels. If this condition is true, then the requested state
* becomes the deepest state encountered so far.
*/
if (req_state_type < deepest_state_type)
return PSCI_E_INVALID_PARAMS;
deepest_state_type = req_state_type;
}
/* Find the highest off power level */
max_off_lvl = psci_find_max_off_lvl(state_info);
/* The target_lvl is either equal to the max_off_lvl or max_retn_lvl */
max_retn_lvl = PSCI_INVALID_PWR_LVL;
if (target_lvl != max_off_lvl)
max_retn_lvl = target_lvl;
/*
* If this is not a request for a power down state then max off level
* has to be invalid and max retention level has to be a valid power
* level.
*/
if ((is_power_down_state == 0U) &&
((max_off_lvl != PSCI_INVALID_PWR_LVL) ||
(max_retn_lvl == PSCI_INVALID_PWR_LVL)))
return PSCI_E_INVALID_PARAMS;
return PSCI_E_SUCCESS;
}
void riscv_pwr_state_to_psci(unsigned int rstate, unsigned int *pstate)
{
*pstate = 0;
/* suspend ? */
if (rstate & (1 << RSTATE_TYPE_SHIFT))
*pstate |= (1 << PSTATE_TYPE_SHIFT);
/* cluster ? */
if (rstate & (PSTATE_PWR_LVL_MASK << RSTATE_PWR_LVL_SHIFT))
*pstate |= (rstate & (PSTATE_PWR_LVL_MASK << RSTATE_PWR_LVL_SHIFT));
}

188
lib/utils/psci/psci_main.c Normal file
View file

@ -0,0 +1,188 @@
#include <sbi_utils/psci/psci.h>
#include <sbi/sbi_platform.h>
#include <sbi/sbi_hart.h>
#include <sbi/sbi_console.h>
#include <sbi/sbi_types.h>
#include <sbi/sbi_scratch.h>
#include "psci_private.h"
/*******************************************************************************
* PSCI frontend api for servicing SMCs. Described in the PSCI spec.
******************************************************************************/
int psci_cpu_on(u_register_t target_cpu,
uintptr_t entrypoint)
{
int rc;
/* Determine if the cpu exists of not */
rc = psci_validate_mpidr(target_cpu);
if (rc != PSCI_E_SUCCESS)
return PSCI_E_INVALID_PARAMS;
/*
* To turn this cpu on, specify which power
* levels need to be turned on
*/
return psci_cpu_on_start(target_cpu, entrypoint);
}
int psci_affinity_info(u_register_t target_affinity,
unsigned int lowest_affinity_level)
{
int ret;
unsigned int target_idx;
psci_cpu_data_t *svc_cpu_data;
struct sbi_scratch *scratch = sbi_hartid_to_scratch(target_affinity);
svc_cpu_data = sbi_scratch_offset_ptr(scratch, psci_delta_off);
/* We dont support level higher than PSCI_CPU_PWR_LVL */
if (lowest_affinity_level > PSCI_CPU_PWR_LVL)
return PSCI_E_INVALID_PARAMS;
/* Calculate the cpu index of the target */
ret = plat_core_pos_by_mpidr(target_affinity);
if (ret == -1) {
return PSCI_E_INVALID_PARAMS;
}
target_idx = (unsigned int)ret;
/*
* Generic management:
* Perform cache maintanence ahead of reading the target CPU state to
* ensure that the data is not stale.
* There is a theoretical edge case where the cache may contain stale
* data for the target CPU data - this can occur under the following
* conditions:
* - the target CPU is in another cluster from the current
* - the target CPU was the last CPU to shutdown on its cluster
* - the cluster was removed from coherency as part of the CPU shutdown
*
* In this case the cache maintenace that was performed as part of the
* target CPUs shutdown was not seen by the current CPU's cluster. And
* so the cache may contain stale data for the target CPU.
*/
csi_dcache_clean_invalid_range((uintptr_t)&svc_cpu_data->aff_info_state, sizeof(aff_info_state_t));
return psci_get_aff_info_state_by_idx(target_idx);
}
int psci_cpu_off(void)
{
int rc;
unsigned int target_pwrlvl = PLAT_MAX_PWR_LVL;
/*
* Do what is needed to power off this CPU and possible higher power
* levels if it able to do so. Upon success, enter the final wfi
* which will power down this CPU.
*/
rc = psci_do_cpu_off(target_pwrlvl);
/*
* The only error cpu_off can return is E_DENIED. So check if that's
* indeed the case.
*/
if (rc != PSCI_E_DENIED) {
sbi_printf("%s:%d, err\n", __func__, __LINE__);
sbi_hart_hang();
}
return rc;
}
int psci_cpu_suspend(unsigned int power_state,
uintptr_t entrypoint,
u_register_t context_id)
{
int rc;
unsigned int target_pwrlvl, is_power_down_state, pwr_state;
/* entry_point_info_t ep; */
psci_power_state_t state_info = { {PSCI_LOCAL_STATE_RUN} };
plat_local_state_t cpu_pd_state;
riscv_pwr_state_to_psci(power_state, &pwr_state);
/* Validate the power_state parameter */
rc = psci_validate_power_state(pwr_state, &state_info);
if (rc != PSCI_E_SUCCESS) {
if (rc != PSCI_E_INVALID_PARAMS) {
sbi_printf("%s:%d\n", __func__, __LINE__);
sbi_hart_hang();
}
return rc;
}
/*
* Get the value of the state type bit from the power state parameter.
*/
is_power_down_state = psci_get_pstate_type(pwr_state);
/* Sanity check the requested suspend levels */
if (psci_validate_suspend_req(&state_info, is_power_down_state)
!= PSCI_E_SUCCESS) {
sbi_printf("%s:%d\n", __func__, __LINE__);
sbi_hart_hang();
}
target_pwrlvl = psci_find_target_suspend_lvl(&state_info);
if (target_pwrlvl == PSCI_INVALID_PWR_LVL) {
sbi_printf("Invalid target power level for suspend operation\n");
sbi_hart_hang();
}
/* Fast path for CPU standby.*/
if (is_cpu_standby_req(is_power_down_state, target_pwrlvl)) {
if (psci_plat_pm_ops->cpu_standby == NULL)
return PSCI_E_INVALID_PARAMS;
/*
* Set the state of the CPU power domain to the platform
* specific retention state and enter the standby state.
*/
cpu_pd_state = state_info.pwr_domain_state[PSCI_CPU_PWR_LVL];
psci_set_cpu_local_state(cpu_pd_state);
#if ENABLE_PSCI_STAT
plat_psci_stat_accounting_start(&state_info);
#endif
psci_plat_pm_ops->cpu_standby(cpu_pd_state);
/* Upon exit from standby, set the state back to RUN. */
psci_set_cpu_local_state(PSCI_LOCAL_STATE_RUN);
#if ENABLE_PSCI_STAT
plat_psci_stat_accounting_stop(&state_info);
/* Update PSCI stats */
psci_stats_update_pwr_up(PSCI_CPU_PWR_LVL, &state_info);
#endif
return PSCI_E_SUCCESS;
}
/*
* If a power down state has been requested, we need to verify entry
* point and program entry information.
*/
if (is_power_down_state != 0U) {
/* rc = psci_validate_entry_point(&ep, entrypoint, context_id);
if (rc != PSCI_E_SUCCESS)
return rc; */;
}
/*
* Do what is needed to enter the power down state. Upon success,
* enter the final wfi which will power down this CPU. This function
* might return if the power down was abandoned for any reason, e.g.
* arrival of an interrupt
*/
rc = psci_cpu_suspend_start(/* &ep */entrypoint,
target_pwrlvl,
&state_info,
is_power_down_state);
return rc;
}

173
lib/utils/psci/psci_off.c Normal file
View file

@ -0,0 +1,173 @@
#include <sbi_utils/psci/psci.h>
#include <sbi_utils/cache/cacheflush.h>
#include <sbi_utils/psci/plat/common/platform.h>
#include <sbi/sbi_scratch.h>
#include <sbi/sbi_platform.h>
#include <sbi/sbi_console.h>
#include <sbi/sbi_hart.h>
#include "psci_private.h"
/******************************************************************************
* Construct the psci_power_state to request power OFF at all power levels.
******************************************************************************/
static void psci_set_power_off_state(psci_power_state_t *state_info)
{
unsigned int lvl;
for (lvl = PSCI_CPU_PWR_LVL; lvl <= PLAT_MAX_PWR_LVL; lvl++)
state_info->pwr_domain_state[lvl] = PLAT_MAX_OFF_STATE;
}
/******************************************************************************
* Top level handler which is called when a cpu wants to power itself down.
* It's assumed that along with turning the cpu power domain off, power
* domains at higher levels will be turned off as far as possible. It finds
* the highest level where a domain has to be powered off by traversing the
* node information and then performs generic, architectural, platform setup
* and state management required to turn OFF that power domain and domains
* below it. e.g. For a cpu that's to be powered OFF, it could mean programming
* the power controller whereas for a cluster that's to be powered off, it will
* call the platform specific code which will disable coherency at the
* interconnect level if the cpu is the last in the cluster and also the
* program the power controller.
******************************************************************************/
int psci_do_cpu_off(unsigned int end_pwrlvl)
{
int rc = PSCI_E_SUCCESS;
unsigned int hartid = current_hartid();
psci_cpu_data_t *svc_cpu_data;
unsigned int idx = plat_core_pos_by_mpidr(hartid);;
psci_power_state_t state_info;
unsigned int parent_nodes[PLAT_MAX_PWR_LVL] = {0};
struct sbi_scratch *scratch = sbi_hartid_to_scratch(hartid);
svc_cpu_data = sbi_scratch_offset_ptr(scratch, psci_delta_off);
/*
* This function must only be called on platforms where the
* CPU_OFF platform hooks have been implemented.
*/
if (psci_plat_pm_ops->pwr_domain_off == NULL) {
sbi_printf("%s:%d, err\n", __func__, __LINE__);
sbi_hart_hang();
}
/* Construct the psci_power_state for CPU_OFF */
psci_set_power_off_state(&state_info);
/*
* Call the platform provided early CPU_OFF handler to allow
* platforms to perform any housekeeping activities before
* actually powering the CPU off. PSCI_E_DENIED indicates that
* the CPU off sequence should be aborted at this time.
*/
if (psci_plat_pm_ops->pwr_domain_off_early) {
rc = psci_plat_pm_ops->pwr_domain_off_early(&state_info);
if (rc == PSCI_E_DENIED) {
return rc;
}
}
/*
* Get the parent nodes here, this is important to do before we
* initiate the power down sequence as after that point the core may
* have exited coherency and its cache may be disabled, any access to
* shared memory after that (such as the parent node lookup in
* psci_cpu_pd_nodes) can cause coherency issues on some platforms.
*/
psci_get_parent_pwr_domain_nodes(idx, end_pwrlvl, parent_nodes);
/*
* This function acquires the lock corresponding to each power
* level so that by the time all locks are taken, the system topology
* is snapshot and state management can be done safely.
*/
psci_acquire_pwr_domain_locks(end_pwrlvl, parent_nodes);
#if 0
/*
* Call the cpu off handler registered by the Secure Payload Dispatcher
* to let it do any bookkeeping. Assume that the SPD always reports an
* E_DENIED error if SP refuse to power down
*/
if ((psci_spd_pm != NULL) && (psci_spd_pm->svc_off != NULL)) {
rc = psci_spd_pm->svc_off(0);
if (rc != 0)
goto exit;
}
#endif
/*
* This function is passed the requested state info and
* it returns the negotiated state info for each power level upto
* the end level specified.
*/
psci_do_state_coordination(end_pwrlvl, &state_info);
#if ENABLE_PSCI_STAT
/* Update the last cpu for each level till end_pwrlvl */
psci_stats_update_pwr_down(end_pwrlvl, &state_info);
#endif
/*
* Without hardware-assisted coherency, the CPU drivers disable data
* caches, then perform cache-maintenance operations in software.
*
* This also calls prepare_cpu_pwr_dwn() to initiate power down
* sequence, but that function will return with data caches disabled.
* We must ensure that the stack memory is flushed out to memory before
* we start popping from it again.
*/
psci_do_pwrdown_cache_maintenance(hartid, (uintptr_t)scratch, psci_find_max_off_lvl(&state_info));
/*
* Plat. management: Perform platform specific actions to turn this
* cpu off e.g. exit cpu coherency, program the power controller etc.
*/
psci_plat_pm_ops->pwr_domain_off(&state_info);
#if ENABLE_PSCI_STAT
plat_psci_stat_accounting_start(&state_info);
#endif
#if 0
exit:
#endif
/*
* Release the locks corresponding to each power level in the
* reverse order to which they were acquired.
*/
psci_release_pwr_domain_locks(end_pwrlvl, parent_nodes);
/*
* Check if all actions needed to safely power down this cpu have
* successfully completed.
*/
if (rc == PSCI_E_SUCCESS) {
/*
* Set the affinity info state to OFF. When caches are disabled,
* this writes directly to main memory, so cache maintenance is
* required to ensure that later cached reads of aff_info_state
* return AFF_STATE_OFF. A dsbish() ensures ordering of the
* update to the affinity info state prior to cache line
* invalidation.
*/
csi_dcache_clean_invalid_range((uintptr_t)&svc_cpu_data->aff_info_state, sizeof(aff_info_state_t));
psci_set_aff_info_state(AFF_STATE_OFF);
/* psci_dsbish(); */
asm volatile ("fence rw, rw");
csi_dcache_invalid_range((uintptr_t)&svc_cpu_data->aff_info_state, sizeof(aff_info_state_t));
if (psci_plat_pm_ops->pwr_domain_pwr_down_wfi != NULL) {
/* This function must not return */
psci_plat_pm_ops->pwr_domain_pwr_down_wfi(&state_info);
} else {
/*
* Enter a wfi loop which will allow the power
* controller to physically power down this cpu.
*/
//psci_power_down_wfi();
}
}
return rc;
}

246
lib/utils/psci/psci_on.c Normal file
View file

@ -0,0 +1,246 @@
#include <sbi_utils/psci/psci.h>
#include <sbi_utils/cache/cacheflush.h>
#include <sbi_utils/psci/plat/common/platform.h>
#include <sbi/sbi_scratch.h>
#include <sbi/sbi_platform.h>
#include <sbi/sbi_console.h>
#include <sbi/sbi_hart.h>
#include "psci_private.h"
/*
* Helper functions for the CPU level spinlocks
*/
static inline void psci_spin_lock_cpu(unsigned int idx)
{
spin_lock(&psci_cpu_pd_nodes[idx].cpu_lock);
}
static inline void psci_spin_unlock_cpu(unsigned int idx)
{
spin_unlock(&psci_cpu_pd_nodes[idx].cpu_lock);
}
/*******************************************************************************
* This function checks whether a cpu which has been requested to be turned on
* is OFF to begin with.
******************************************************************************/
static int cpu_on_validate_state(aff_info_state_t aff_state)
{
if (aff_state == AFF_STATE_ON)
return PSCI_E_ALREADY_ON;
if (aff_state == AFF_STATE_ON_PENDING)
return PSCI_E_ON_PENDING;
if (aff_state != AFF_STATE_OFF) {
sbi_printf("wrong aff state.\n");
sbi_hart_hang();
}
return PSCI_E_SUCCESS;
}
/*******************************************************************************
* Generic handler which is called to physically power on a cpu identified by
* its mpidr. It performs the generic, architectural, platform setup and state
* management to power on the target cpu e.g. it will ensure that
* enough information is stashed for it to resume execution in the non-secure
* security state.
*
* The state of all the relevant power domains are changed after calling the
* platform handler as it can return error.
******************************************************************************/
int psci_cpu_on_start(u_register_t target, uintptr_t entrypoint)
{
int rc;
aff_info_state_t target_aff_state;
int ret = 0;
unsigned int target_idx;
psci_cpu_data_t *svc_cpu_data;
struct sbi_scratch *scratch = sbi_hartid_to_scratch(target);
svc_cpu_data = sbi_scratch_offset_ptr(scratch, psci_delta_off);
ret = plat_core_pos_by_mpidr(target);
if ((ret < 0) || (ret >= (int)PLATFORM_CORE_COUNT)) {
sbi_printf("Unexpected core index.\n");
sbi_hart_hang();
}
target_idx = (unsigned int)ret;
/*
* This function must only be called on platforms where the
* CPU_ON platform hooks have been implemented.
*/
if (psci_plat_pm_ops->pwr_domain_on == NULL ||
psci_plat_pm_ops->pwr_domain_on_finish == NULL) {
sbi_printf("%s:%d, invalid psci ops\n", __func__, __LINE__);
sbi_hart_hang();
}
/* Protect against multiple CPUs trying to turn ON the same target CPU */
psci_spin_lock_cpu(target_idx);
/*
* Generic management: Ensure that the cpu is off to be
* turned on.
* Perform cache maintanence ahead of reading the target CPU state to
* ensure that the data is not stale.
* There is a theoretical edge case where the cache may contain stale
* data for the target CPU data - this can occur under the following
* conditions:
* - the target CPU is in another cluster from the current
* - the target CPU was the last CPU to shutdown on its cluster
* - the cluster was removed from coherency as part of the CPU shutdown
*
* In this case the cache maintenace that was performed as part of the
* target CPUs shutdown was not seen by the current CPU's cluster. And
* so the cache may contain stale data for the target CPU.
*/
csi_dcache_clean_invalid_range((uintptr_t)&svc_cpu_data->aff_info_state, sizeof(aff_info_state_t));
rc = cpu_on_validate_state(psci_get_aff_info_state_by_idx(target_idx));
if (rc != PSCI_E_SUCCESS) {
goto exit;
}
#if 0
/*
* Call the cpu on handler registered by the Secure Payload Dispatcher
* to let it do any bookeeping. If the handler encounters an error, it's
* expected to assert within
*/
if ((psci_spd_pm != NULL) && (psci_spd_pm->svc_on != NULL))
psci_spd_pm->svc_on(target_cpu);
#endif
/*
* Set the Affinity info state of the target cpu to ON_PENDING.
* Flush aff_info_state as it will be accessed with caches
* turned OFF.
*/
psci_set_aff_info_state_by_idx((uintptr_t)target_idx, AFF_STATE_ON_PENDING);
csi_dcache_clean_invalid_range((uintptr_t)&svc_cpu_data->aff_info_state, sizeof(aff_info_state_t));
/*
* The cache line invalidation by the target CPU after setting the
* state to OFF (see psci_do_cpu_off()), could cause the update to
* aff_info_state to be invalidated. Retry the update if the target
* CPU aff_info_state is not ON_PENDING.
*/
target_aff_state = psci_get_aff_info_state_by_idx(target_idx);
if (target_aff_state != AFF_STATE_ON_PENDING) {
if (target_aff_state != AFF_STATE_OFF) {
sbi_printf("%s:%d, invalid psci state\n", __func__, __LINE__);
sbi_hart_hang();
}
psci_set_aff_info_state_by_idx(target_idx, AFF_STATE_ON_PENDING);
csi_dcache_clean_invalid_range((uintptr_t)&svc_cpu_data->aff_info_state, sizeof(aff_info_state_t));
if (psci_get_aff_info_state_by_idx(target_idx) !=
AFF_STATE_ON_PENDING) {
sbi_printf("%s:%d, invalid psci state\n", __func__, __LINE__);
sbi_hart_hang();
}
}
/*
* Perform generic, architecture and platform specific handling.
*/
/*
* Plat. management: Give the platform the current state
* of the target cpu to allow it to perform the necessary
* steps to power on.
*/
rc = psci_plat_pm_ops->pwr_domain_on(target);
if ((rc != PSCI_E_SUCCESS) && (rc != PSCI_E_INTERN_FAIL)) {
sbi_printf("%s:%d, power-on domain err\n", __func__, __LINE__);
sbi_hart_hang();
}
if (rc == PSCI_E_SUCCESS) {
/* Store the re-entry information for the non-secure world. */
/**/;
} else {
/* Restore the state on error. */
psci_set_aff_info_state_by_idx(target_idx, AFF_STATE_OFF);
csi_dcache_clean_invalid_range((uintptr_t)&svc_cpu_data->aff_info_state, sizeof(aff_info_state_t));
}
exit:
psci_spin_unlock_cpu(target_idx);
return rc;
}
/*******************************************************************************
* The following function finish an earlier power on request. They
* are called by the common finisher routine in psci_common.c. The `state_info`
* is the psci_power_state from which this CPU has woken up from.
******************************************************************************/
void psci_cpu_on_finish(unsigned int cpu_idx, const psci_power_state_t *state_info)
{
const struct sbi_platform *sbi = sbi_platform_thishart_ptr();
struct sbi_scratch *scratch = sbi_hartid_to_scratch(sbi->hart_index2id[cpu_idx]);
/*
* Plat. management: Perform the platform specific actions
* for this cpu e.g. enabling the gic or zeroing the mailbox
* register. The actual state of this cpu has already been
* changed.
*/
psci_plat_pm_ops->pwr_domain_on_finish(state_info);
/*
* Arch. management: Enable data cache and manage stack memory
*/
psci_do_pwrup_cache_maintenance((uintptr_t)scratch);
/*
* Plat. management: Perform any platform specific actions which
* can only be done with the cpu and the cluster guaranteed to
* be coherent.
*/
if (psci_plat_pm_ops->pwr_domain_on_finish_late != NULL)
psci_plat_pm_ops->pwr_domain_on_finish_late(state_info);
#if 0
/*
* All the platform specific actions for turning this cpu
* on have completed. Perform enough arch.initialization
* to run in the non-secure address space.
*/
psci_arch_setup();
#endif
/*
* Lock the CPU spin lock to make sure that the context initialization
* is done. Since the lock is only used in this function to create
* a synchronization point with cpu_on_start(), it can be released
* immediately.
*/
psci_spin_lock_cpu(cpu_idx);
psci_spin_unlock_cpu(cpu_idx);
/* Ensure we have been explicitly woken up by another cpu */
if (psci_get_aff_info_state() != AFF_STATE_ON_PENDING) {
sbi_printf("%s:%d, err\n", __func__, __LINE__);
sbi_hart_hang();
}
#if 0
/*
* Call the cpu on finish handler registered by the Secure Payload
* Dispatcher to let it do any bookeeping. If the handler encounters an
* error, it's expected to assert within
*/
if ((psci_spd_pm != NULL) && (psci_spd_pm->svc_on_finish != NULL))
psci_spd_pm->svc_on_finish(0);
PUBLISH_EVENT(psci_cpu_on_finish);
#endif
/* Populate the mpidr field within the cpu node array */
/* This needs to be done only once */
psci_cpu_pd_nodes[cpu_idx].mpidr = current_hartid();
}

View file

@ -0,0 +1,198 @@
#ifndef __PSCI_PRIVATE_H__
#define __PSCI_PRIVATE_H__
#include <sbi/riscv_locks.h>
#include <sbi/sbi_platform.h>
#include <sbi_utils/psci/psci.h>
#include <sbi_utils/cache/cacheflush.h>
/*******************************************************************************
* The following two data structures implement the power domain tree. The tree
* is used to track the state of all the nodes i.e. power domain instances
* described by the platform. The tree consists of nodes that describe CPU power
* domains i.e. leaf nodes and all other power domains which are parents of a
* CPU power domain i.e. non-leaf nodes.
******************************************************************************/
typedef struct non_cpu_pwr_domain_node {
/*
* Index of the first CPU power domain node level 0 which has this node
* as its parent.
*/
unsigned int cpu_start_idx;
/*
* Number of CPU power domains which are siblings of the domain indexed
* by 'cpu_start_idx' i.e. all the domains in the range 'cpu_start_idx
* -> cpu_start_idx + ncpus' have this node as their parent.
*/
unsigned int ncpus;
/*
* Index of the parent power domain node.
* TODO: Figure out whether to whether using pointer is more efficient.
*/
unsigned int parent_node;
plat_local_state_t local_state;
unsigned char level;
/* For indexing the psci_lock array*/
unsigned short lock_index;
} __aligned(CACHE_LINE_SIZE) non_cpu_pd_node_t;
typedef struct cpu_pwr_domain_node {
u_register_t mpidr;
/*
* Index of the parent power domain node.
* TODO: Figure out whether to whether using pointer is more efficient.
*/
unsigned int parent_node;
/*
* A CPU power domain does not require state coordination like its
* parent power domains. Hence this node does not include a bakery
* lock. A spinlock is required by the CPU_ON handler to prevent a race
* when multiple CPUs try to turn ON the same target CPU.
*/
spinlock_t cpu_lock;
} cpu_pd_node_t;
/*
* On systems where participant CPUs are cache-coherent, we can use spinlocks
* instead of bakery locks.
*/
typedef struct psci_spinlock_t {
spinlock_t lock;
} __aligned(CACHE_LINE_SIZE) _psci_spinlock_t;
#define DEFINE_PSCI_LOCK(_name) _psci_spinlock_t _name
#define DECLARE_PSCI_LOCK(_name) extern DEFINE_PSCI_LOCK(_name)
/* One lock is required per non-CPU power domain node */
DECLARE_PSCI_LOCK(psci_locks[PSCI_NUM_NON_CPU_PWR_DOMAINS]);
static inline void psci_lock_init(non_cpu_pd_node_t *non_cpu_pd_node, unsigned short idx)
{
non_cpu_pd_node[idx].lock_index = idx;
}
static inline void psci_lock_get(non_cpu_pd_node_t *non_cpu_pd_node)
{
spin_lock(&psci_locks[non_cpu_pd_node->lock_index].lock);
}
static inline void psci_lock_release(non_cpu_pd_node_t *non_cpu_pd_node)
{
spin_unlock(&psci_locks[non_cpu_pd_node->lock_index].lock);
}
/* common */
extern non_cpu_pd_node_t psci_non_cpu_pd_nodes[PSCI_NUM_NON_CPU_PWR_DOMAINS];
extern cpu_pd_node_t psci_cpu_pd_nodes[PLATFORM_CORE_COUNT];
extern unsigned int psci_plat_core_count;
extern unsigned long psci_delta_off;
extern const plat_psci_ops_t *psci_plat_pm_ops;
void psci_acquire_pwr_domain_locks(unsigned int end_pwrlvl,
const unsigned int *parent_nodes);
void psci_release_pwr_domain_locks(unsigned int end_pwrlvl,
const unsigned int *parent_nodes);
unsigned int psci_find_max_off_lvl(const psci_power_state_t *state_info);
int psci_validate_mpidr(u_register_t mpidr);
void psci_get_parent_pwr_domain_nodes(unsigned int cpu_idx,
unsigned int end_lvl,
unsigned int *node_index);
void psci_init_req_local_pwr_states(void);
void set_non_cpu_pd_node_local_state(unsigned int parent_idx,
plat_local_state_t state);
void psci_set_req_local_pwr_state(unsigned int pwrlvl,
unsigned int cpu_idx,
plat_local_state_t req_pwr_state);
void psci_set_aff_info_state(aff_info_state_t aff_state);
aff_info_state_t psci_get_aff_info_state(void);
aff_info_state_t psci_get_aff_info_state_by_idx(unsigned int idx);
void psci_set_aff_info_state_by_idx(unsigned int idx, aff_info_state_t aff_state);
void psci_set_cpu_local_state(plat_local_state_t state);
void psci_set_pwr_domains_to_run(unsigned int end_pwrlvl);
void psci_get_target_local_pwr_states(unsigned int end_pwrlvl,
psci_power_state_t *target_state);
void psci_do_state_coordination(unsigned int end_pwrlvl,
psci_power_state_t *state_info);
int plat_core_pos_by_mpidr(u_register_t mpidr);
int psci_validate_power_state(unsigned int power_state,
psci_power_state_t *state_info);
int psci_validate_suspend_req(const psci_power_state_t *state_info,
unsigned int is_power_down_state);
unsigned int psci_find_max_off_lvl(const psci_power_state_t *state_info);
unsigned int psci_find_target_suspend_lvl(const psci_power_state_t *state_info);
void psci_set_suspend_pwrlvl(unsigned int target_lvl);
/* Private exported functions from psci_suspend.c */
int psci_cpu_suspend_start(/* const entry_point_info_t *ep */ uintptr_t entrypoint,
unsigned int end_pwrlvl,
psci_power_state_t *state_info,
unsigned int is_power_down_state);
void psci_cpu_suspend_finish(unsigned int cpu_idx, const psci_power_state_t *state_info);
void riscv_pwr_state_to_psci(unsigned int rstate, unsigned int *pstate);
/* Helper function to identify a CPU standby request in PSCI Suspend call */
static inline bool is_cpu_standby_req(unsigned int is_power_down_state,
unsigned int retn_lvl)
{
return (is_power_down_state == 0U) && (retn_lvl == 0U);
}
static inline void psci_do_pwrup_cache_maintenance(uintptr_t scratch)
{
/* invalidate local cache */
csi_invalidate_dcache_all();
/* enable dcache */
csi_enable_dcache();
}
static inline void psci_disable_core_snoop(void)
{
unsigned int hartid = current_hartid();
csr_clear(CSR_ML2SETUP, 1 << (hartid % PLATFORM_MAX_CPUS_PER_CLUSTER));
}
static inline void psci_do_pwrdown_cache_maintenance(int hartid, uintptr_t scratch, int power_level)
{
/* disable the data preftch */
csi_disable_data_preftch();
/* flush dacache all */
csi_flush_dcache_all();
if (power_level >= PSCI_CPU_PWR_LVL + 1) {
#if defined(CONFIG_PLATFORM_SPACEMIT_K1X)
/* disable the tcm */
csr_write(CSR_TCMCFG, 0);
#endif
csi_flush_l2_cache();
}
/* disable dcache */
csi_disable_dcache();
/* disable core snoop */
psci_disable_core_snoop();
asm volatile ("fence iorw, iorw");
}
/* psci cpu */
int psci_cpu_on_start(u_register_t target, uintptr_t entrypoint);
void psci_cpu_on_finish(unsigned int cpu_idx, const psci_power_state_t *state_info);
int psci_do_cpu_off(unsigned int end_pwrlvl);
#endif

242
lib/utils/psci/psci_setup.c Normal file
View file

@ -0,0 +1,242 @@
/*
* Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <sbi/sbi_console.h>
#include <sbi_utils/cache/cacheflush.h>
#include <sbi_utils/psci/plat/common/platform.h>
#include <sbi/sbi_hart.h>
#include <sbi/sbi_platform.h>
#include <sbi/sbi_scratch.h>
#include <sbi_utils/psci/psci_lib.h>
#include "psci_private.h"
/*******************************************************************************
* Function which initializes the 'psci_non_cpu_pd_nodes' or the
* 'psci_cpu_pd_nodes' corresponding to the power level.
******************************************************************************/
static void psci_init_pwr_domain_node(uint16_t node_idx,
unsigned int parent_idx,
unsigned char level)
{
if (level > PSCI_CPU_PWR_LVL) {
if (node_idx >= PSCI_NUM_NON_CPU_PWR_DOMAINS) {
sbi_printf("%s:%d, node_idx beyond the boundary\n",
__func__, __LINE__);
sbi_hart_hang();
}
psci_non_cpu_pd_nodes[node_idx].level = level;
psci_lock_init(psci_non_cpu_pd_nodes, node_idx);
psci_non_cpu_pd_nodes[node_idx].parent_node = parent_idx;
psci_non_cpu_pd_nodes[node_idx].local_state =
PLAT_MAX_OFF_STATE;
} else {
psci_cpu_data_t *svc_cpu_data;
const struct sbi_platform *sbi = sbi_platform_thishart_ptr();
if (node_idx >= PLATFORM_CORE_COUNT) {
sbi_printf("%s:%d, node_idx beyond the boundary\n",
__func__, __LINE__);
sbi_hart_hang();
}
unsigned int hartid = sbi->hart_index2id[node_idx];
psci_cpu_pd_nodes[node_idx].parent_node = parent_idx;
/* Initialize with an invalid mpidr */
psci_cpu_pd_nodes[node_idx].mpidr = PSCI_INVALID_MPIDR;
struct sbi_scratch *scratch = sbi_hartid_to_scratch(hartid);
svc_cpu_data = sbi_scratch_offset_ptr(scratch, psci_delta_off);
/* Set the Affinity Info for the cores as OFF */
svc_cpu_data->aff_info_state = AFF_STATE_OFF;
/* Invalidate the suspend level for the cpu */
svc_cpu_data->target_pwrlvl = PSCI_INVALID_PWR_LVL;
/* Set the power state to OFF state */
svc_cpu_data->local_state = PLAT_MAX_OFF_STATE;
csi_dcache_clean_invalid_range((uintptr_t)svc_cpu_data, sizeof(psci_cpu_data_t));
}
}
/*******************************************************************************
* This functions updates cpu_start_idx and ncpus field for each of the node in
* psci_non_cpu_pd_nodes[]. It does so by comparing the parent nodes of each of
* the CPUs and check whether they match with the parent of the previous
* CPU. The basic assumption for this work is that children of the same parent
* are allocated adjacent indices. The platform should ensure this though proper
* mapping of the CPUs to indices via plat_core_pos_by_mpidr() and
* plat_my_core_pos() APIs.
*******************************************************************************/
static void psci_update_pwrlvl_limits(void)
{
unsigned int cpu_idx;
int j;
unsigned int nodes_idx[PLAT_MAX_PWR_LVL] = {0};
unsigned int temp_index[PLAT_MAX_PWR_LVL];
for (cpu_idx = 0; cpu_idx < psci_plat_core_count; cpu_idx++) {
psci_get_parent_pwr_domain_nodes(cpu_idx,
PLAT_MAX_PWR_LVL,
temp_index);
for (j = (int)PLAT_MAX_PWR_LVL - 1; j >= 0; j--) {
if (temp_index[j] != nodes_idx[j]) {
nodes_idx[j] = temp_index[j];
psci_non_cpu_pd_nodes[nodes_idx[j]].cpu_start_idx
= cpu_idx;
}
psci_non_cpu_pd_nodes[nodes_idx[j]].ncpus++;
}
}
}
/*******************************************************************************
* Core routine to populate the power domain tree. The tree descriptor passed by
* the platform is populated breadth-first and the first entry in the map
* informs the number of root power domains. The parent nodes of the root nodes
* will point to an invalid entry(-1).
******************************************************************************/
static unsigned int populate_power_domain_tree(const unsigned char
*topology)
{
unsigned int i, j = 0U, num_nodes_at_lvl = 1U, num_nodes_at_next_lvl;
unsigned int node_index = 0U, num_children;
unsigned int parent_node_index = 0U;
int level = (int)PLAT_MAX_PWR_LVL;
/*
* For each level the inputs are:
* - number of nodes at this level in plat_array i.e. num_nodes_at_level
* This is the sum of values of nodes at the parent level.
* - Index of first entry at this level in the plat_array i.e.
* parent_node_index.
* - Index of first free entry in psci_non_cpu_pd_nodes[] or
* psci_cpu_pd_nodes[] i.e. node_index depending upon the level.
*/
while (level >= (int) PSCI_CPU_PWR_LVL) {
num_nodes_at_next_lvl = 0U;
/*
* For each entry (parent node) at this level in the plat_array:
* - Find the number of children
* - Allocate a node in a power domain array for each child
* - Set the parent of the child to the parent_node_index - 1
* - Increment parent_node_index to point to the next parent
* - Accumulate the number of children at next level.
*/
for (i = 0U; i < num_nodes_at_lvl; i++) {
if (parent_node_index > PSCI_NUM_NON_CPU_PWR_DOMAINS) {
sbi_printf("%s:%d, node_idx beyond the boundary\n",
__func__, __LINE__);
sbi_hart_hang();
}
num_children = topology[parent_node_index];
for (j = node_index;
j < (node_index + num_children); j++)
psci_init_pwr_domain_node((uint16_t)j,
parent_node_index - 1U,
(unsigned char)level);
node_index = j;
num_nodes_at_next_lvl += num_children;
parent_node_index++;
}
num_nodes_at_lvl = num_nodes_at_next_lvl;
level--;
/* Reset the index for the cpu power domain array */
if (level == (int) PSCI_CPU_PWR_LVL)
node_index = 0;
}
/* Validate the sanity of array exported by the platform */
if (j > PLATFORM_CORE_COUNT) {
sbi_printf("%s:%d, invalidate core count\n",
__func__, __LINE__);
sbi_hart_hang();
}
return j;
}
/*******************************************************************************
* This function does the architectural setup and takes the warm boot
* entry-point `mailbox_ep` as an argument. The function also initializes the
* power domain topology tree by querying the platform. The power domain nodes
* higher than the CPU are populated in the array psci_non_cpu_pd_nodes[] and
* the CPU power domains are populated in psci_cpu_pd_nodes[]. The platform
* exports its static topology map through the
* populate_power_domain_topology_tree() API. The algorithm populates the
* psci_non_cpu_pd_nodes and psci_cpu_pd_nodes iteratively by using this
* topology map. On a platform that implements two clusters of 2 cpus each,
* and supporting 3 domain levels, the populated psci_non_cpu_pd_nodes would
* look like this:
*
* ---------------------------------------------------
* | system node | cluster 0 node | cluster 1 node |
* ---------------------------------------------------
*
* And populated psci_cpu_pd_nodes would look like this :
* <- cpus cluster0 -><- cpus cluster1 ->
* ------------------------------------------------
* | CPU 0 | CPU 1 | CPU 2 | CPU 3 |
* ------------------------------------------------
******************************************************************************/
int psci_setup(void)
{
unsigned int cpu_idx;
const unsigned char *topology_tree;
unsigned int hartid = current_hartid();
cpu_idx = plat_core_pos_by_mpidr(hartid);
psci_delta_off = sbi_scratch_alloc_offset(sizeof(psci_cpu_data_t));
if (!psci_delta_off)
return SBI_ENOMEM;
/* Query the topology map from the platform */
topology_tree = plat_get_power_domain_tree_desc();
/* Populate the power domain arrays using the platform topology map */
psci_plat_core_count = populate_power_domain_tree(topology_tree);
/* Update the CPU limits for each node in psci_non_cpu_pd_nodes */
psci_update_pwrlvl_limits();
/* Populate the mpidr field of cpu node for this CPU */
psci_cpu_pd_nodes[cpu_idx].mpidr = hartid;
psci_init_req_local_pwr_states();
/*
* Set the requested and target state of this CPU and all the higher
* power domain levels for this CPU to run.
*/
psci_set_pwr_domains_to_run(PLAT_MAX_PWR_LVL);
psci_print_power_domain_map();
(void) plat_setup_psci_ops(0, &psci_plat_pm_ops);
if (psci_plat_pm_ops == NULL) {
sbi_printf("%s:%d, invalid psci ops\n", __func__, __LINE__);
sbi_hart_hang();
}
/*
* Flush `psci_plat_pm_ops` as it will be accessed by secondary CPUs
* during warm boot, possibly before data cache is enabled.
*/
csi_dcache_clean_invalid_range((uintptr_t)&psci_plat_pm_ops, sizeof(*psci_plat_pm_ops));
return 0;
}

View file

@ -0,0 +1,298 @@
#include <sbi_utils/psci/psci.h>
#include <sbi_utils/cache/cacheflush.h>
#include <sbi_utils/psci/plat/common/platform.h>
#include <sbi/sbi_scratch.h>
#include <sbi/sbi_platform.h>
#include <sbi/sbi_console.h>
#include <sbi/sbi_hart.h>
#include "psci_private.h"
/*******************************************************************************
* This function does generic and platform specific operations after a wake-up
* from standby/retention states at multiple power levels.
******************************************************************************/
static void psci_suspend_to_standby_finisher(unsigned int cpu_idx,
unsigned int end_pwrlvl)
{
unsigned int parent_nodes[PLAT_MAX_PWR_LVL] = {0};
psci_power_state_t state_info;
/* Get the parent nodes */
psci_get_parent_pwr_domain_nodes(cpu_idx, end_pwrlvl, parent_nodes);
psci_acquire_pwr_domain_locks(end_pwrlvl, parent_nodes);
/*
* Find out which retention states this CPU has exited from until the
* 'end_pwrlvl'. The exit retention state could be deeper than the entry
* state as a result of state coordination amongst other CPUs post wfi.
*/
psci_get_target_local_pwr_states(end_pwrlvl, &state_info);
#if ENABLE_PSCI_STAT
plat_psci_stat_accounting_stop(&state_info);
psci_stats_update_pwr_up(end_pwrlvl, &state_info);
#endif
/*
* Plat. management: Allow the platform to do operations
* on waking up from retention.
*/
psci_plat_pm_ops->pwr_domain_suspend_finish(&state_info);
/*
* Set the requested and target state of this CPU and all the higher
* power domain levels for this CPU to run.
*/
psci_set_pwr_domains_to_run(end_pwrlvl);
psci_release_pwr_domain_locks(end_pwrlvl, parent_nodes);
}
/*******************************************************************************
* This function does generic and platform specific suspend to power down
* operations.
******************************************************************************/
static void psci_suspend_to_pwrdown_start(unsigned int end_pwrlvl,
/* const entry_point_info_t *ep */ uintptr_t ep,
const psci_power_state_t *state_info)
{
unsigned int hartid = current_hartid();
psci_cpu_data_t *svc_cpu_data;
/* unsigned int max_off_lvl = psci_find_max_off_lvl(state_info); */
struct sbi_scratch *scratch = sbi_hartid_to_scratch(hartid);
svc_cpu_data = sbi_scratch_offset_ptr(scratch, psci_delta_off);
/* save something ???? */
/* PUBLISH_EVENT(psci_suspend_pwrdown_start); */
/* Save PSCI target power level for the suspend finisher handler */
psci_set_suspend_pwrlvl(end_pwrlvl);
/*
* Flush the target power level as it might be accessed on power up with
* Data cache disabled.
*/
csi_dcache_clean_invalid_range((uintptr_t)&svc_cpu_data->target_pwrlvl, sizeof(unsigned int));
#if 0
/*
* Call the cpu suspend handler registered by the Secure Payload
* Dispatcher to let it do any book-keeping. If the handler encounters an
* error, it's expected to assert within
*/
if ((psci_spd_pm != NULL) && (psci_spd_pm->svc_suspend != NULL))
psci_spd_pm->svc_suspend(max_off_lvl);
#endif
/*
* Plat. management: Allow the platform to perform any early
* actions required to power down the CPU. This might be useful for
* HW_ASSISTED_COHERENCY = 0 platforms that can safely perform these
* actions with data caches enabled.
*/
if (psci_plat_pm_ops->pwr_domain_suspend_pwrdown_early != NULL)
psci_plat_pm_ops->pwr_domain_suspend_pwrdown_early(state_info);
/*
* Store the re-entry information for the non-secure world.
*/
/* cm_init_my_context(ep); */
/*
* Arch. management. Initiate power down sequence.
* TODO : Introduce a mechanism to query the cache level to flush
* and the cpu-ops power down to perform from the platform.
*/
/* psci_pwrdown_cpu(max_off_lvl); */
psci_do_pwrdown_cache_maintenance(hartid, (uintptr_t)scratch, psci_find_max_off_lvl(state_info));
}
/*******************************************************************************
* Top level handler which is called when a cpu wants to suspend its execution.
* It is assumed that along with suspending the cpu power domain, power domains
* at higher levels until the target power level will be suspended as well. It
* coordinates with the platform to negotiate the target state for each of
* the power domain level till the target power domain level. It then performs
* generic, architectural, platform setup and state management required to
* suspend that power domain level and power domain levels below it.
* e.g. For a cpu that's to be suspended, it could mean programming the
* power controller whereas for a cluster that's to be suspended, it will call
* the platform specific code which will disable coherency at the interconnect
* level if the cpu is the last in the cluster and also the program the power
* controller.
*
* All the required parameter checks are performed at the beginning and after
* the state transition has been done, no further error is expected and it is
* not possible to undo any of the actions taken beyond that point.
******************************************************************************/
int psci_cpu_suspend_start(/* const entry_point_info_t *ep */uintptr_t ep,
unsigned int end_pwrlvl,
psci_power_state_t *state_info,
unsigned int is_power_down_state)
{
int rc = PSCI_E_SUCCESS;
bool skip_wfi = false;
unsigned int hartid = current_hartid();
unsigned int idx = plat_core_pos_by_mpidr(hartid);
unsigned int parent_nodes[PLAT_MAX_PWR_LVL] = {0};
/*
* This function must only be called on platforms where the
* CPU_SUSPEND platform hooks have been implemented.
*/
if ((psci_plat_pm_ops->pwr_domain_suspend == NULL) ||
(psci_plat_pm_ops->pwr_domain_suspend_finish == NULL)) {
sbi_printf("%s:%d\n", __func__, __LINE__);
sbi_hart_hang();
}
/* Get the parent nodes */
psci_get_parent_pwr_domain_nodes(idx, end_pwrlvl, parent_nodes);
/*
* This function acquires the lock corresponding to each power
* level so that by the time all locks are taken, the system topology
* is snapshot and state management can be done safely.
*/
psci_acquire_pwr_domain_locks(end_pwrlvl, parent_nodes);
/*
* We check if there are any pending interrupts after the delay
* introduced by lock contention to increase the chances of early
* detection that a wake-up interrupt has fired.
*/
if (__get_Supervisor_isr() != 0U) {
skip_wfi = true;
goto exit;
}
/*
* This function is passed the requested state info and
* it returns the negotiated state info for each power level upto
* the end level specified.
*/
psci_do_state_coordination(end_pwrlvl, state_info);
#if ENABLE_PSCI_STAT
/* Update the last cpu for each level till end_pwrlvl */
psci_stats_update_pwr_down(end_pwrlvl, state_info);
#endif
if (is_power_down_state != 0U)
psci_suspend_to_pwrdown_start(end_pwrlvl, ep, state_info);
/*
* Plat. management: Allow the platform to perform the
* necessary actions to turn off this cpu e.g. set the
* platform defined mailbox with the psci entrypoint,
* program the power controller etc.
*/
psci_plat_pm_ops->pwr_domain_suspend(state_info);
#if ENABLE_PSCI_STAT
plat_psci_stat_accounting_start(state_info);
#endif
exit:
/*
* Release the locks corresponding to each power level in the
* reverse order to which they were acquired.
*/
psci_release_pwr_domain_locks(end_pwrlvl, parent_nodes);
if (skip_wfi) {
return rc;
}
if (is_power_down_state != 0U) {
/* The function calls below must not return */
if (psci_plat_pm_ops->pwr_domain_pwr_down_wfi != NULL)
psci_plat_pm_ops->pwr_domain_pwr_down_wfi(state_info);
else
/* psci_power_down_wfi() */;
}
/*
* We will reach here if only retention/standby states have been
* requested at multiple power levels. This means that the cpu
* context will be preserved.
*/
/* wfi(); */
asm volatile ("wfi");
/*
* After we wake up from context retaining suspend, call the
* context retaining suspend finisher.
*/
psci_suspend_to_standby_finisher(idx, end_pwrlvl);
return rc;
}
/*******************************************************************************
* The following functions finish an earlier suspend request. They
* are called by the common finisher routine in psci_common.c. The `state_info`
* is the psci_power_state from which this CPU has woken up from.
******************************************************************************/
void psci_cpu_suspend_finish(unsigned int cpu_idx, const psci_power_state_t *state_info)
{
/* unsigned int counter_freq; */
/* unsigned int max_off_lvl; */
unsigned int hartid = current_hartid();
psci_cpu_data_t *svc_cpu_data;
struct sbi_scratch *scratch = sbi_hartid_to_scratch(hartid);
svc_cpu_data = sbi_scratch_offset_ptr(scratch, psci_delta_off);
/* Ensure we have been woken up from a suspended state */
if ((psci_get_aff_info_state() != AFF_STATE_ON) ||
(is_local_state_off(
state_info->pwr_domain_state[PSCI_CPU_PWR_LVL]) == 0)) {
sbi_printf("%s:%d\n", __func__, __LINE__);
sbi_hart_hang();
}
/*
* Plat. management: Perform the platform specific actions
* before we change the state of the cpu e.g. enabling the
* gic or zeroing the mailbox register. If anything goes
* wrong then assert as there is no way to recover from this
* situation.
*/
psci_plat_pm_ops->pwr_domain_suspend_finish(state_info);
/* Arch. management: Enable the data cache, stack memory maintenance. */
psci_do_pwrup_cache_maintenance((uintptr_t)scratch);
#if 0
/* Re-init the cntfrq_el0 register */
counter_freq = plat_get_syscnt_freq2();
write_cntfrq_el0(counter_freq);
#endif
/*
* Call the cpu suspend finish handler registered by the Secure Payload
* Dispatcher to let it do any bookeeping. If the handler encounters an
* error, it's expected to assert within
*/
#if 0
if ((psci_spd_pm != NULL) && (psci_spd_pm->svc_suspend_finish != NULL)) {
max_off_lvl = psci_find_max_off_lvl(state_info);
assert(max_off_lvl != PSCI_INVALID_PWR_LVL);
psci_spd_pm->svc_suspend_finish(max_off_lvl);
}
#endif
/* Invalidate the suspend level for the cpu */
psci_set_suspend_pwrlvl(PSCI_INVALID_PWR_LVL);
csi_dcache_clean_invalid_range((uintptr_t)&svc_cpu_data->target_pwrlvl, sizeof(unsigned int));
/* PUBLISH_EVENT(psci_suspend_pwrdown_finish); */
/*
* Generic management: Now we just need to retrieve the
* information that we had stashed away during the suspend
* call to set this cpu on its way.
*/
/* cm_prepare_el3_exit_ns(); */
}

View file

@ -0,0 +1,345 @@
#include <sbi/riscv_io.h>
#include <sbi/sbi_types.h>
#include <sbi/riscv_asm.h>
#include <sbi_utils/psci/psci.h>
#include <sbi/sbi_console.h>
#include <spacemit/spacemit_config.h>
#define C1_CPU_RESET_BASE_ADDR (0xD4282B24)
#define PMU_CAP_CORE0_IDLE_CFG (0xd4282924)
#define PMU_CAP_CORE1_IDLE_CFG (0xd4282928)
#define PMU_CAP_CORE2_IDLE_CFG (0xd4282960)
#define PMU_CAP_CORE3_IDLE_CFG (0xd4282964)
#define PMU_CAP_CORE4_IDLE_CFG (0xd4282b04)
#define PMU_CAP_CORE5_IDLE_CFG (0xd4282b08)
#define PMU_CAP_CORE6_IDLE_CFG (0xd4282b0c)
#define PMU_CAP_CORE7_IDLE_CFG (0xd4282b10)
#define PMU_C0_CAPMP_IDLE_CFG0 (0xd4282920)
#define PMU_C0_CAPMP_IDLE_CFG1 (0xd42828e4)
#define PMU_C0_CAPMP_IDLE_CFG2 (0xd4282950)
#define PMU_C0_CAPMP_IDLE_CFG3 (0xd4282954)
#define PMU_C1_CAPMP_IDLE_CFG0 (0xd4282b14)
#define PMU_C1_CAPMP_IDLE_CFG1 (0xd4282b18)
#define PMU_C1_CAPMP_IDLE_CFG2 (0xd4282b1c)
#define PMU_C1_CAPMP_IDLE_CFG3 (0xd4282b20)
#define PMU_ACPR_CLUSTER0_REG (0xd4051090)
#define PMU_ACPR_CLUSTER1_REG (0xd4051094)
#define CPU_PWR_DOWN_VALUE (0x3)
#define CLUSTER_PWR_DOWN_VALUE (0x3)
#define CLUSTER_AXISDO_OFFSET (31)
struct pmu_cap_wakeup {
unsigned int pmu_cap_core0_wakeup;
unsigned int pmu_cap_core1_wakeup;
unsigned int pmu_cap_core2_wakeup;
unsigned int pmu_cap_core3_wakeup;
};
/* D1P */
void spacemit_top_on(u_register_t mpidr)
{
unsigned int *cluster0_acpr = NULL;
unsigned int *cluster1_acpr = NULL;
cluster0_acpr = (unsigned int *)PMU_ACPR_CLUSTER0_REG;
cluster1_acpr = (unsigned int *)PMU_ACPR_CLUSTER1_REG;
unsigned int value = readl(cluster0_acpr);
value &= ~(1 << CLUSTER_AXISDO_OFFSET);
writel(value, cluster0_acpr);
value = readl(cluster1_acpr);
value &= ~(1 << CLUSTER_AXISDO_OFFSET);
writel(value, cluster1_acpr);
}
/* D1P */
void spacemit_top_off(u_register_t mpidr)
{
unsigned int *cluster0_acpr = NULL;
unsigned int *cluster1_acpr = NULL;
cluster0_acpr = (unsigned int *)PMU_ACPR_CLUSTER0_REG;
cluster1_acpr = (unsigned int *)PMU_ACPR_CLUSTER1_REG;
unsigned int value = readl(cluster0_acpr);
value |= (1 << CLUSTER_AXISDO_OFFSET);
writel(value, cluster0_acpr);
value = readl(cluster1_acpr);
value |= (1 << CLUSTER_AXISDO_OFFSET);
writel(value, cluster1_acpr);
}
/* M2 */
void spacemit_cluster_on(u_register_t mpidr)
{
unsigned int target_cpu_idx, value;
unsigned int *cluster_assert_base0 = NULL;
unsigned int *cluster_assert_base1 = NULL;
unsigned int *cluster_assert_base2 = NULL;
unsigned int *cluster_assert_base3 = NULL;
unsigned int *cluster_assert_base4 = NULL;
unsigned int *cluster_assert_base5 = NULL;
unsigned int *cluster_assert_base6 = NULL;
unsigned int *cluster_assert_base7 = NULL;
target_cpu_idx = MPIDR_AFFLVL1_VAL(mpidr) * PLATFORM_MAX_CPUS_PER_CLUSTER
+ MPIDR_AFFLVL0_VAL(mpidr);
switch (target_cpu_idx) {
case 0:
case 1:
case 2:
case 3:
cluster_assert_base0 = (unsigned int *)PMU_C0_CAPMP_IDLE_CFG0;
cluster_assert_base1 = (unsigned int *)PMU_C0_CAPMP_IDLE_CFG1;
cluster_assert_base2 = (unsigned int *)PMU_C0_CAPMP_IDLE_CFG2;
cluster_assert_base3 = (unsigned int *)PMU_C0_CAPMP_IDLE_CFG3;
/* cluster vote */
/* M2 */
value = readl(cluster_assert_base0);
value &= ~CLUSTER_PWR_DOWN_VALUE;
writel(value, cluster_assert_base0);
value = readl(cluster_assert_base1);
value &= ~CLUSTER_PWR_DOWN_VALUE;
writel(value, cluster_assert_base1);
value = readl(cluster_assert_base2);
value &= ~CLUSTER_PWR_DOWN_VALUE;
writel(value, cluster_assert_base2);
value = readl(cluster_assert_base3);
value &= ~CLUSTER_PWR_DOWN_VALUE;
writel(value, cluster_assert_base3);
break;
case 4:
case 5:
case 6:
case 7:
cluster_assert_base4 = (unsigned int *)PMU_C1_CAPMP_IDLE_CFG0;
cluster_assert_base5 = (unsigned int *)PMU_C1_CAPMP_IDLE_CFG1;
cluster_assert_base6 = (unsigned int *)PMU_C1_CAPMP_IDLE_CFG2;
cluster_assert_base7 = (unsigned int *)PMU_C1_CAPMP_IDLE_CFG3;
/* cluster vote */
/* M2 */
value = readl(cluster_assert_base4);
value &= ~CLUSTER_PWR_DOWN_VALUE;
writel(value, cluster_assert_base4);
value = readl(cluster_assert_base5);
value &= ~CLUSTER_PWR_DOWN_VALUE;
writel(value, cluster_assert_base5);
value = readl(cluster_assert_base6);
value &= ~CLUSTER_PWR_DOWN_VALUE;
writel(value, cluster_assert_base6);
value = readl(cluster_assert_base7);
value &= ~CLUSTER_PWR_DOWN_VALUE;
writel(value, cluster_assert_base7);
break;
}
}
/* M2 */
void spacemit_cluster_off(u_register_t mpidr)
{
unsigned int target_cpu_idx, value;
unsigned int *cluster_assert_base0 = NULL;
unsigned int *cluster_assert_base1 = NULL;
unsigned int *cluster_assert_base2 = NULL;
unsigned int *cluster_assert_base3 = NULL;
unsigned int *cluster_assert_base4 = NULL;
unsigned int *cluster_assert_base5 = NULL;
unsigned int *cluster_assert_base6 = NULL;
unsigned int *cluster_assert_base7 = NULL;
target_cpu_idx = MPIDR_AFFLVL1_VAL(mpidr) * PLATFORM_MAX_CPUS_PER_CLUSTER
+ MPIDR_AFFLVL0_VAL(mpidr);
switch (target_cpu_idx) {
case 0:
case 1:
case 2:
case 3:
cluster_assert_base0 = (unsigned int *)PMU_C0_CAPMP_IDLE_CFG0;
cluster_assert_base1 = (unsigned int *)PMU_C0_CAPMP_IDLE_CFG1;
cluster_assert_base2 = (unsigned int *)PMU_C0_CAPMP_IDLE_CFG2;
cluster_assert_base3 = (unsigned int *)PMU_C0_CAPMP_IDLE_CFG3;
/* cluster vote */
/* M2 */
value = readl(cluster_assert_base0);
value |= CLUSTER_PWR_DOWN_VALUE;
writel(value, cluster_assert_base0);
value = readl(cluster_assert_base1);
value |= CLUSTER_PWR_DOWN_VALUE;
writel(value, cluster_assert_base1);
value = readl(cluster_assert_base2);
value |= CLUSTER_PWR_DOWN_VALUE;
writel(value, cluster_assert_base2);
value = readl(cluster_assert_base3);
value |= CLUSTER_PWR_DOWN_VALUE;
writel(value, cluster_assert_base3);
break;
case 4:
case 5:
case 6:
case 7:
cluster_assert_base4 = (unsigned int *)PMU_C1_CAPMP_IDLE_CFG0;
cluster_assert_base5 = (unsigned int *)PMU_C1_CAPMP_IDLE_CFG1;
cluster_assert_base6 = (unsigned int *)PMU_C1_CAPMP_IDLE_CFG2;
cluster_assert_base7 = (unsigned int *)PMU_C1_CAPMP_IDLE_CFG3;
/* cluster vote */
/* M2 */
value = readl(cluster_assert_base4);
value |= CLUSTER_PWR_DOWN_VALUE;
writel(value, cluster_assert_base4);
value = readl(cluster_assert_base5);
value |= CLUSTER_PWR_DOWN_VALUE;
writel(value, cluster_assert_base5);
value = readl(cluster_assert_base6);
value |= CLUSTER_PWR_DOWN_VALUE;
writel(value, cluster_assert_base6);
value = readl(cluster_assert_base7);
value |= CLUSTER_PWR_DOWN_VALUE;
writel(value, cluster_assert_base7);
break;
}
}
void spacemit_wakeup_cpu(u_register_t mpidr)
{
unsigned int *cpu_reset_base;
struct pmu_cap_wakeup *pmu_cap_wakeup;
unsigned int cur_cluster, cur_cpu;
unsigned int target_cpu_idx;
unsigned int cur_hartid = current_hartid();
cur_cluster = MPIDR_AFFLVL1_VAL(cur_hartid);
cur_cpu = MPIDR_AFFLVL0_VAL(cur_hartid);
pmu_cap_wakeup = (struct pmu_cap_wakeup *)((cur_cluster == 0) ? (unsigned int *)CPU_RESET_BASE_ADDR :
(unsigned int *)C1_CPU_RESET_BASE_ADDR);
switch (cur_cpu) {
case 0:
cpu_reset_base = &pmu_cap_wakeup->pmu_cap_core0_wakeup;
break;
case 1:
cpu_reset_base = &pmu_cap_wakeup->pmu_cap_core1_wakeup;
break;
case 2:
cpu_reset_base = &pmu_cap_wakeup->pmu_cap_core2_wakeup;
break;
case 3:
cpu_reset_base = &pmu_cap_wakeup->pmu_cap_core3_wakeup;
break;
}
target_cpu_idx = MPIDR_AFFLVL1_VAL(mpidr) * PLATFORM_MAX_CPUS_PER_CLUSTER
+ MPIDR_AFFLVL0_VAL(mpidr);
writel(1 << target_cpu_idx, cpu_reset_base);
}
void spacemit_assert_cpu(u_register_t mpidr)
{
unsigned int target_cpu_idx;
unsigned int *cpu_assert_base = NULL;
target_cpu_idx = MPIDR_AFFLVL1_VAL(mpidr) * PLATFORM_MAX_CPUS_PER_CLUSTER
+ MPIDR_AFFLVL0_VAL(mpidr);
switch (target_cpu_idx) {
case 0:
cpu_assert_base = (unsigned int *)PMU_CAP_CORE0_IDLE_CFG;
break;
case 1:
cpu_assert_base = (unsigned int *)PMU_CAP_CORE1_IDLE_CFG;
break;
case 2:
cpu_assert_base = (unsigned int *)PMU_CAP_CORE2_IDLE_CFG;
break;
case 3:
cpu_assert_base = (unsigned int *)PMU_CAP_CORE3_IDLE_CFG;
break;
case 4:
cpu_assert_base = (unsigned int *)PMU_CAP_CORE4_IDLE_CFG;
break;
case 5:
cpu_assert_base = (unsigned int *)PMU_CAP_CORE5_IDLE_CFG;
break;
case 6:
cpu_assert_base = (unsigned int *)PMU_CAP_CORE6_IDLE_CFG;
break;
case 7:
cpu_assert_base = (unsigned int *)PMU_CAP_CORE7_IDLE_CFG;
break;
}
/* cpu vote */
/* C2 */
unsigned int value = readl(cpu_assert_base);
value |= CPU_PWR_DOWN_VALUE;
writel(value, cpu_assert_base);
}
void spacemit_deassert_cpu(void)
{
unsigned int mpidr = current_hartid();
/* clear the idle bit */
unsigned int target_cpu_idx;
unsigned int *cpu_assert_base = NULL;
target_cpu_idx = MPIDR_AFFLVL1_VAL(mpidr) * PLATFORM_MAX_CPUS_PER_CLUSTER
+ MPIDR_AFFLVL0_VAL(mpidr);
switch (target_cpu_idx) {
case 0:
cpu_assert_base = (unsigned int *)PMU_CAP_CORE0_IDLE_CFG;
break;
case 1:
cpu_assert_base = (unsigned int *)PMU_CAP_CORE1_IDLE_CFG;
break;
case 2:
cpu_assert_base = (unsigned int *)PMU_CAP_CORE2_IDLE_CFG;
break;
case 3:
cpu_assert_base = (unsigned int *)PMU_CAP_CORE3_IDLE_CFG;
break;
case 4:
cpu_assert_base = (unsigned int *)PMU_CAP_CORE4_IDLE_CFG;
break;
case 5:
cpu_assert_base = (unsigned int *)PMU_CAP_CORE5_IDLE_CFG;
break;
case 6:
cpu_assert_base = (unsigned int *)PMU_CAP_CORE6_IDLE_CFG;
break;
case 7:
cpu_assert_base = (unsigned int *)PMU_CAP_CORE7_IDLE_CFG;
break;
}
/* de-vote cpu */
unsigned int value = readl(cpu_assert_base);
value &= ~CPU_PWR_DOWN_VALUE;
writel(value, cpu_assert_base);
}

View file

@ -0,0 +1,258 @@
#include <sbi/sbi_types.h>
#include <sbi/riscv_asm.h>
#include <sbi_utils/cci/cci.h>
#include <sbi_utils/psci/psci.h>
#include <sbi/sbi_scratch.h>
#include <sbi/sbi_hart.h>
#include <sbi/sbi_console.h>
#include <sbi_utils/psci/plat/arm/common/arm_def.h>
#include <sbi_utils/irqchip/fdt_irqchip_plic.h>
#include "underly_implement.h"
#define CORE_PWR_STATE(state) \
((state)->pwr_domain_state[MPIDR_AFFLVL0])
#define CLUSTER_PWR_STATE(state) \
((state)->pwr_domain_state[MPIDR_AFFLVL1])
#define SYSTEM_PWR_STATE(state) \
((state)->pwr_domain_state[PLAT_MAX_PWR_LVL])
static int spacemit_pwr_domain_on(u_register_t mpidr)
{
/* wakeup the cpu */
spacemit_wakeup_cpu(mpidr);
return 0;
}
static void spacemit_pwr_domain_on_finish(const psci_power_state_t *target_state)
{
unsigned int hartid = current_hartid();
if (SYSTEM_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF) {
/* D1P */
spacemit_top_on(hartid);
}
/*
* Enable CCI coherency for this cluster.
* No need for locks as no other cpu is active at the moment.
*/
if (CLUSTER_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE) {
spacemit_cluster_on(hartid);
#if defined(CONFIG_PLATFORM_SPACEMIT_K1X)
/* disable the tcm */
csr_write(CSR_TCMCFG, 0);
#endif
cci_enable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(hartid));
#if defined(CONFIG_PLATFORM_SPACEMIT_K1X)
/* enable the tcm */
csr_write(CSR_TCMCFG, 1);
#endif
}
}
static int spacemit_pwr_domain_off_early(const psci_power_state_t *target_state)
{
/* the ipi's pending is cleared before */
/* disable the plic irq */
fdt_plic_context_exit();
/* clear the external irq pending */
csr_clear(CSR_MIP, MIP_MEIP);
csr_clear(CSR_MIP, MIP_SEIP);
/* here we clear the sstimer pending if this core have */
if (sbi_hart_has_extension(sbi_scratch_thishart_ptr(), SBI_HART_EXT_SSTC)) {
csr_write(CSR_STIMECMP, 0xffffffffffffffff);
}
return 0;
}
static void spacemit_pwr_domain_off(const psci_power_state_t *target_state)
{
unsigned int hartid = current_hartid();
if (CLUSTER_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE) {
#if defined(CONFIG_PLATFORM_SPACEMIT_K1X)
/* disable the tcm */
csr_write(CSR_TCMCFG, 0);
#endif
cci_disable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(hartid));
spacemit_cluster_off(hartid);
}
if (SYSTEM_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF) {
spacemit_top_off(hartid);
}
spacemit_assert_cpu(hartid);
}
static void spacemit_pwr_domain_pwr_down_wfi(const psci_power_state_t *target_state)
{
while (1) {
asm volatile ("wfi");
}
}
static void spacemit_pwr_domain_on_finish_late(const psci_power_state_t *target_state)
{
spacemit_deassert_cpu();
}
static int _spacemit_validate_power_state(unsigned int power_state,
psci_power_state_t *req_state)
{
unsigned int pstate = psci_get_pstate_type(power_state);
unsigned int pwr_lvl = psci_get_pstate_pwrlvl(power_state);
unsigned int i;
if (req_state == NULL) {
sbi_printf("%s:%d\n", __func__, __LINE__);
sbi_hart_hang();
}
if (pwr_lvl > PLAT_MAX_PWR_LVL)
return PSCI_E_INVALID_PARAMS;
/* Sanity check the requested state */
if (pstate == PSTATE_TYPE_STANDBY) {
/*
* It's possible to enter standby only on power level 0
* Ignore any other power level.
*/
if (pwr_lvl != ARM_PWR_LVL0)
return PSCI_E_INVALID_PARAMS;
req_state->pwr_domain_state[ARM_PWR_LVL0] =
ARM_LOCAL_STATE_RET;
} else {
for (i = ARM_PWR_LVL0; i <= pwr_lvl; i++)
req_state->pwr_domain_state[i] =
ARM_LOCAL_STATE_OFF;
}
/*
* We expect the 'state id' to be zero.
*/
if (psci_get_pstate_id(power_state) != 0U)
return PSCI_E_INVALID_PARAMS;
return PSCI_E_SUCCESS;
}
static int spacemit_validate_power_state(unsigned int power_state,
psci_power_state_t *req_state)
{
int rc;
rc = _spacemit_validate_power_state(power_state, req_state);
return rc;
}
static void spacemit_pwr_domain_suspend(const psci_power_state_t *target_state)
{
unsigned int clusterid;
unsigned int hartid = current_hartid();
/*
* CSS currently supports retention only at cpu level. Just return
* as nothing is to be done for retention.
*/
if (CORE_PWR_STATE(target_state) == ARM_LOCAL_STATE_RET)
return;
if (CORE_PWR_STATE(target_state) != ARM_LOCAL_STATE_OFF) {
sbi_printf("%s:%d\n", __func__, __LINE__);
sbi_hart_hang();
}
/* Cluster is to be turned off, so disable coherency */
if (CLUSTER_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF) {
clusterid = MPIDR_AFFLVL1_VAL(hartid);
#if defined(CONFIG_PLATFORM_SPACEMIT_K1X)
/* disable the tcm */
csr_write(CSR_TCMCFG, 0);
#endif
cci_disable_snoop_dvm_reqs(clusterid);
spacemit_cluster_off(hartid);
}
if (SYSTEM_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF) {
/* D1P */
spacemit_top_off(hartid);
}
spacemit_assert_cpu(hartid);
}
static void spacemit_pwr_domain_suspend_finish(const psci_power_state_t *target_state)
{
unsigned int clusterid;
unsigned int hartid = current_hartid();
/* Return as nothing is to be done on waking up from retention. */
if (CORE_PWR_STATE(target_state) == ARM_LOCAL_STATE_RET)
return;
if (CORE_PWR_STATE(target_state) != ARM_LOCAL_STATE_OFF) {
sbi_printf("%s:%d\n", __func__, __LINE__);
sbi_hart_hang();
}
/*
* Perform the common cluster specific operations i.e enable coherency
* if this cluster was off.
*/
if (CLUSTER_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF) {
clusterid = MPIDR_AFFLVL1_VAL(hartid);
#if defined(CONFIG_PLATFORM_SPACEMIT_K1X)
/* disable the tcm */
csr_write(CSR_TCMCFG, 0);
#endif
cci_enable_snoop_dvm_reqs(clusterid);
#if defined(CONFIG_PLATFORM_SPACEMIT_K1X)
/* enable the tcm */
csr_write(CSR_TCMCFG, 1);
#endif
spacemit_cluster_on(hartid);
}
if (SYSTEM_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF) {
/* D1P */
spacemit_top_on(hartid);
}
/* Do something */
spacemit_deassert_cpu();
}
static void spacemit_pwr_domain_suspend_pwrdown_early(const psci_power_state_t *target_state)
{
csr_clear(CSR_MIE, MIP_SSIP | MIP_MSIP | MIP_STIP | MIP_MTIP | MIP_SEIP | MIP_MEIP);
}
static const plat_psci_ops_t spacemit_psci_ops = {
.cpu_standby = NULL,
.pwr_domain_on = spacemit_pwr_domain_on,
.pwr_domain_on_finish = spacemit_pwr_domain_on_finish,
.pwr_domain_off_early = spacemit_pwr_domain_off_early,
.pwr_domain_off = spacemit_pwr_domain_off,
.pwr_domain_pwr_down_wfi = spacemit_pwr_domain_pwr_down_wfi,
.pwr_domain_on_finish_late = spacemit_pwr_domain_on_finish_late,
.validate_power_state = spacemit_validate_power_state,
.pwr_domain_suspend = spacemit_pwr_domain_suspend,
.pwr_domain_suspend_pwrdown_early = spacemit_pwr_domain_suspend_pwrdown_early,
.pwr_domain_suspend_finish = spacemit_pwr_domain_suspend_finish,
};
int plat_setup_psci_ops(uintptr_t sec_entrypoint, const plat_psci_ops_t **psci_ops)
{
*psci_ops = &spacemit_psci_ops;
return 0;
}

View file

@ -0,0 +1,14 @@
#ifndef __UNDERLY_IMPLEMENT__H__
#define __UNDERLY_IMPLEMENT__H__
#include <sbi/sbi_types.h>
void spacemit_top_on(u_register_t mpidr);
void spacemit_top_off(u_register_t mpidr);
void spacemit_cluster_on(u_register_t mpidr);
void spacemit_cluster_off(u_register_t mpidr);
void spacemit_wakeup_cpu(u_register_t mpidr);
void spacemit_assert_cpu(u_register_t mpidr);
void spacemit_deassert_cpu(void);
#endif

View file

@ -0,0 +1,26 @@
#include <sbi_utils/psci/psci.h>
static unsigned char plat_power_domain_tree_desc[] = {
/* No of root nodes */
1,
/* Num of children for the root node */
0,
/* Num of children for the first cluster node */
0,
/* Num of children for the second cluster node */
0,
};
int plat_core_pos_by_mpidr(u_register_t mpidr)
{
unsigned int cluster = MPIDR_AFFLVL1_VAL(mpidr);
unsigned int core = MPIDR_AFFLVL0_VAL(mpidr);
return (cluster == 0) ? core :
(plat_power_domain_tree_desc[2] + core);
}
unsigned char *plat_get_power_domain_tree_desc(void)
{
return plat_power_domain_tree_desc;
}

View file

@ -131,6 +131,11 @@ int uart8250_init(unsigned long base, u32 in_freq, u32 baudrate, u32 reg_shift,
/* Set scratchpad */
set_reg(UART_SCR_OFFSET, 0x00);
#ifdef CONFIG_PLATFORM_SPACEMIT_K1X
/* enable uart. */
set_reg(UART_IER_OFFSET, 0x40);
#endif
sbi_console_set_device(&uart8250_console);
return 0;

View file

@ -15,6 +15,7 @@
#include <sbi/sbi_error.h>
#include <sbi/sbi_scratch.h>
#include <sbi/sbi_timer.h>
#include <sbi/sbi_platform.h>
#include <sbi_utils/timer/aclint_mtimer.h>
static unsigned long mtimer_ptr_offset;
@ -183,6 +184,7 @@ int aclint_mtimer_cold_init(struct aclint_mtimer_data *mt,
u32 i;
int rc;
struct sbi_scratch *scratch;
const struct sbi_platform *sbi = sbi_platform_thishart_ptr();
/* Sanity checks */
if (!mt ||
@ -218,7 +220,7 @@ int aclint_mtimer_cold_init(struct aclint_mtimer_data *mt,
/* Update MTIMER pointer in scratch space */
for (i = 0; i < mt->hart_count; i++) {
scratch = sbi_hartid_to_scratch(mt->first_hartid + i);
scratch = sbi_hartid_to_scratch(sbi->hart_index2id[i]);
if (!scratch)
return SBI_ENOENT;
mtimer_set_hart_data_ptr(scratch, mt);

View file

@ -52,6 +52,42 @@ config PLATFORM_STARFIVE_JH7110
bool "StarFive JH7110 support"
default n
config PLATFORM_SPACEMIT_K1PRO
bool "Spacemit K1pro support"
default n
if PLATFORM_SPACEMIT_K1PRO
config PLATFORM_SPACEMIT_K1PRO_FPGA
bool "Spacemit K1pro board fpga"
default n
config PLATFORM_SPACEMIT_K1PRO_QEMU
bool "Spacemit K1pro board qemu"
default n
config PLATFORM_SPACEMIT_K1PRO_SIM
bool "Spacemit K1pro board sim"
default n
config PLATFORM_SPACEMIT_K1PRO_VERIFY
bool "Spacemit K1pro board verify"
default n
endif
config PLATFORM_SPACEMIT_K1X
bool "Spacemit K1x support"
default n
if PLATFORM_SPACEMIT_K1X
config PLATFORM_SPACEMIT_K1X_FPGA
bool "Spacemit K1x board fpag"
default n
config PLATFORM_SPACEMIT_K1X_EVB
bool "Spacemit K1x board evb"
default n
endif
source "$(OPENSBI_SRC_DIR)/platform/generic/andes/Kconfig"
endif

View file

@ -4,6 +4,7 @@ CONFIG_PLATFORM_RENESAS_RZFIVE=y
CONFIG_PLATFORM_SIFIVE_FU540=y
CONFIG_PLATFORM_SIFIVE_FU740=y
CONFIG_PLATFORM_STARFIVE_JH7110=y
CONFIG_PLATFORM_SPACEMIT_K1PRO=y
CONFIG_FDT_GPIO=y
CONFIG_FDT_GPIO_SIFIVE=y
CONFIG_FDT_GPIO_STARFIVE=y
@ -37,4 +38,4 @@ CONFIG_FDT_SERIAL_XILINX_UARTLITE=y
CONFIG_FDT_TIMER=y
CONFIG_FDT_TIMER_MTIMER=y
CONFIG_FDT_TIMER_PLMT=y
CONFIG_SERIAL_SEMIHOSTING=y
CONFIG_SERIAL_SEMIHOSTING=n

View file

@ -0,0 +1,16 @@
CONFIG_PLATFORM_SPACEMIT_K1X=y
CONFIG_PLATFORM_SPACEMIT_K1X_FPGA=y
# CONFIG_SBI_ECALL_TIME is not set
CONFIG_FDT_IPI=y
CONFIG_FDT_IPI_MSWI=y
CONFIG_FDT_IRQCHIP=y
CONFIG_FDT_IRQCHIP_PLIC=y
CONFIG_FDT_RESET=y
CONFIG_FDT_RESET_HTIF=y
CONFIG_FDT_RESET_SIFIVE_TEST=y
CONFIG_FDT_RESET_SUNXI_WDT=y
CONFIG_FDT_RESET_THEAD=y
CONFIG_FDT_SERIAL=y
CONFIG_FDT_SERIAL_UART8250=y
CONFIG_ARM_PSCI_SUPPORT=y
CONFIG_ARM_NON_SCMI_SUPPORT=y

View file

@ -0,0 +1,16 @@
CONFIG_PLATFORM_SPACEMIT_K1X=y
CONFIG_PLATFORM_SPACEMIT_K1X_FPGA=y
# CONFIG_SBI_ECALL_TIME is not set
CONFIG_FDT_IPI=y
CONFIG_FDT_IPI_MSWI=y
CONFIG_FDT_IRQCHIP=y
CONFIG_FDT_IRQCHIP_PLIC=y
CONFIG_FDT_RESET=y
CONFIG_FDT_RESET_HTIF=y
CONFIG_FDT_RESET_SIFIVE_TEST=y
CONFIG_FDT_RESET_SUNXI_WDT=y
CONFIG_FDT_RESET_THEAD=y
CONFIG_FDT_SERIAL=y
CONFIG_FDT_SERIAL_UART8250=y
CONFIG_ARM_PSCI_SUPPORT=y
CONFIG_ARM_NON_SCMI_SUPPORT=y

View file

@ -0,0 +1,16 @@
CONFIG_PLATFORM_SPACEMIT_K1X=y
CONFIG_PLATFORM_SPACEMIT_K1X_FPGA=y
# CONFIG_SBI_ECALL_TIME is not set
CONFIG_FDT_IPI=y
CONFIG_FDT_IPI_MSWI=y
CONFIG_FDT_IRQCHIP=y
CONFIG_FDT_IRQCHIP_PLIC=y
CONFIG_FDT_RESET=y
CONFIG_FDT_RESET_HTIF=y
CONFIG_FDT_RESET_SIFIVE_TEST=y
CONFIG_FDT_RESET_SUNXI_WDT=y
CONFIG_FDT_RESET_THEAD=y
CONFIG_FDT_SERIAL=y
CONFIG_FDT_SERIAL_UART8250=y
CONFIG_ARM_PSCI_SUPPORT=y
CONFIG_ARM_NON_SCMI_SUPPORT=y

View file

@ -0,0 +1,16 @@
CONFIG_PLATFORM_SPACEMIT_K1X=y
CONFIG_PLATFORM_SPACEMIT_K1X_EVB=y
# CONFIG_SBI_ECALL_TIME is not set
CONFIG_FDT_IPI=y
CONFIG_FDT_IPI_MSWI=y
CONFIG_FDT_IRQCHIP=y
CONFIG_FDT_IRQCHIP_PLIC=y
CONFIG_FDT_RESET=y
CONFIG_FDT_RESET_HTIF=y
CONFIG_FDT_RESET_SIFIVE_TEST=y
CONFIG_FDT_RESET_SUNXI_WDT=y
CONFIG_FDT_RESET_THEAD=y
CONFIG_FDT_SERIAL=y
CONFIG_FDT_SERIAL_UART8250=y
CONFIG_ARM_PSCI_SUPPORT=y
CONFIG_ARM_NON_SCMI_SUPPORT=y

View file

@ -0,0 +1,13 @@
#ifndef __K1X_CORE_COMMON_H__
#define __K1X_CORE_COMMON_H__
#define CSR_MHCR 0x7c1
#define CSR_MSETUP 0x7c0
#define CSR_MHINT 0x7c5
#define CSR_ML2SETUP 0x7F0
#define CACHE_LINE_SIZE (64)
#define CACHE_INV_ADDR_Msk (0xffffffffffffffff << 6)
#endif /* __K1X_CORE_COMMON_H__ */

View file

@ -0,0 +1,72 @@
#ifndef __K1X_EVB_CONFIG_H__
#define __K1X_EVB_CONFIG_H__
/***************************cci******************************/
#define PLATFORM_CCI_ADDR (0xD8500000)
#define PLAT_CCI_CLUSTER0_IFACE_IX 0
#define PLAT_CCI_CLUSTER1_IFACE_IX 1
#define PLAT_CCI_CLUSTER2_IFACE_IX 2
#define PLAT_CCI_CLUSTER3_IFACE_IX 3
#define PLAT_CCI_MAP static const int cci_map[] = { \
PLAT_CCI_CLUSTER0_IFACE_IX, \
PLAT_CCI_CLUSTER1_IFACE_IX, \
PLAT_CCI_CLUSTER2_IFACE_IX, \
PLAT_CCI_CLUSTER3_IFACE_IX, \
};
/***************************cpu******************************/
#define CPU_RESET_BASE_ADDR (0xD428292C)
#define C0_RVBADDR_LO_ADDR (0xD4282DB0)
#define C0_RVBADDR_HI_ADDR (0xD4282DB4)
#define C1_RVBADDR_LO_ADDR (0xD4282C00 + 0x2B0)
#define C1_RVBADDR_HI_ADDR (0xD4282C00 + 0X2B4)
/***************************mailbox***************************/
#define SCMI_MAILBOX_SHARE_MEM (0x2f902080)
#define PLAT_MAILBOX_REG_BASE (0x2f824000)
/****************************scmi*****************************/
#define PLAT_SCMI_DOMAIN_MAP {0, 1, 2, 3}
/*************************cpu topology************************/
#define ARM_SYSTEM_COUNT (1U)
/* this is the max cluster count of this platform */
#define PLATFORM_CLUSTER_COUNT (2U)
/* this is the max core count of this platform */
#define PLATFORM_CORE_COUNT (8U)
/* this is the max NUN CPU power domains */
#define PSCI_NUM_NON_CPU_PWR_DOMAINS (3U)
/* this is the max cpu cores per cluster*/
#define PLATFORM_MAX_CPUS_PER_CLUSTER (4U)
#define CLUSTER_INDEX_IN_CPU_TOPOLOGY (1U)
#define CLUSTER0_INDEX_IN_CPU_TOPOLOGY (2U)
#define CLUSTER1_INDEX_IN_CPU_TOPOLOGY (3U)
#define PSCI_NUM_PWR_DOMAINS \
(ARM_SYSTEM_COUNT + plat_get_power_domain_tree_desc()[CLUSTER_INDEX_IN_CPU_TOPOLOGY] \
+ plat_get_power_domain_tree_desc()[CLUSTER0_INDEX_IN_CPU_TOPOLOGY] + \
plat_get_power_domain_tree_desc()[CLUSTER1_INDEX_IN_CPU_TOPOLOGY])
/***************************psci pwr level********************/
/* This is the power level corresponding to a CPU */
#define PSCI_CPU_PWR_LVL 0U
#define PLAT_MAX_PWR_LVL 2U
/***************************cpu affin*************************/
#define MPIDR_AFFINITY0_MASK 0x3U
#define MPIDR_AFFINITY1_MASK 0xfU
#define MPIDR_AFF0_SHIFT 0U
#define MPIDR_AFF1_SHIFT 2U
/**************************cluster power domain***************/
#define CLUSTER0_L2_CACHE_FLUSH_REG_BASE (0xD84401B0)
#define CLUSTER1_L2_CACHE_FLUSH_REG_BASE (0xD84401B4)
#define L2_CACHE_FLUSH_REQUEST_BIT_OFFSET (0x1)
#define L2_CACHE_FLUSH_DONE_BIT_OFFSET (0x3)
#endif /* __K1X_EVB_CONFIG_H__ */

View file

@ -0,0 +1,73 @@
#ifndef __K1X_FPGA_CONFIG_H__
#define __K1X_FPGA_CONFIG_H__
/***************************cci******************************/
#define PLATFORM_CCI_ADDR (0xD8500000)
#define PLAT_CCI_CLUSTER0_IFACE_IX 0
#define PLAT_CCI_CLUSTER1_IFACE_IX 1
#define PLAT_CCI_CLUSTER2_IFACE_IX 2
#define PLAT_CCI_CLUSTER3_IFACE_IX 3
#define PLAT_CCI_MAP static const int cci_map[] = { \
PLAT_CCI_CLUSTER0_IFACE_IX, \
PLAT_CCI_CLUSTER1_IFACE_IX, \
PLAT_CCI_CLUSTER2_IFACE_IX, \
PLAT_CCI_CLUSTER3_IFACE_IX, \
};
/***************************cpu******************************/
#define CPU_RESET_BASE_ADDR (0xD428292C)
#define C0_RVBADDR_LO_ADDR (0xD4282DB0)
#define C0_RVBADDR_HI_ADDR (0xD4282DB4)
#define C1_RVBADDR_LO_ADDR (0xD4282C00 + 0x2B0)
#define C1_RVBADDR_HI_ADDR (0xD4282C00 + 0X2B4)
/***************************mailbox***************************/
#define SCMI_MAILBOX_SHARE_MEM (0x2f902080)
#define PLAT_MAILBOX_REG_BASE (0x2f824000)
/****************************scmi*****************************/
#define PLAT_SCMI_SINGLE_CLUSTER_DOMAIN_MAP {0, 1, 2, 3}
#define PLAT_SCMI_DOUBLE_CLUSTER_DOMAIN_MAP {0, 1, 4, 5}
/*************************cpu topology************************/
#define ARM_SYSTEM_COUNT (1U)
/* this is the max cluster count of this platform */
#define PLATFORM_CLUSTER_COUNT (2U)
/* this is the max core count of this platform */
#define PLATFORM_CORE_COUNT (8U)
/* this is the max NUN CPU power domains */
#define PSCI_NUM_NON_CPU_PWR_DOMAINS (3U)
/* this is the max cpu cores per cluster*/
#define PLATFORM_MAX_CPUS_PER_CLUSTER (4U)
#define CLUSTER_INDEX_IN_CPU_TOPOLOGY (1U)
#define CLUSTER0_INDEX_IN_CPU_TOPOLOGY (2U)
#define CLUSTER1_INDEX_IN_CPU_TOPOLOGY (3U)
#define PSCI_NUM_PWR_DOMAINS \
(ARM_SYSTEM_COUNT + plat_get_power_domain_tree_desc()[CLUSTER_INDEX_IN_CPU_TOPOLOGY] \
+ plat_get_power_domain_tree_desc()[CLUSTER0_INDEX_IN_CPU_TOPOLOGY] + \
plat_get_power_domain_tree_desc()[CLUSTER1_INDEX_IN_CPU_TOPOLOGY])
/***************************psci pwr level********************/
/* This is the power level corresponding to a CPU */
#define PSCI_CPU_PWR_LVL 0U
#define PLAT_MAX_PWR_LVL 2U
/***************************cpu affin*************************/
#define MPIDR_AFFINITY0_MASK 0x3U
#define MPIDR_AFFINITY1_MASK 0xfU
#define MPIDR_AFF0_SHIFT 0U
#define MPIDR_AFF1_SHIFT 2U
/**************************cluster power domain***************/
#define CLUSTER0_L2_CACHE_FLUSH_REG_BASE (0xD84401B0)
#define CLUSTER1_L2_CACHE_FLUSH_REG_BASE (0xD84401B4)
#define L2_CACHE_FLUSH_REQUEST_BIT_OFFSET (0x1)
#define L2_CACHE_FLUSH_DONE_BIT_OFFSET (0x3)
#endif /* __K1X_FPGA_CONFIG_H__ */

View file

@ -0,0 +1,30 @@
#ifndef __SPACEMIT_CONFIG_H__
#define __SPACEMIT_CONFIG_H__
#if defined(CONFIG_PLATFORM_SPACEMIT_K1PRO)
#include "./k1pro/core_common.h"
#if defined(CONFIG_PLATFORM_SPACEMIT_K1PRO_FPGA)
#include "./k1pro/k1pro_fpga.h"
#elif defined(CONFIG_PLATFORM_SPACEMIT_K1PRO_QEMU)
#include "./k1pro/k1pro_qemu.h"
#elif defined(CONFIG_PLATFORM_SPACEMIT_K1PRO_SIM)
#include "./k1pro/k1pro_sim.h"
#elif defined(CONFIG_PLATFORM_SPACEMIT_K1PRO_VERIFY)
#include "./k1pro/k1pro_verify.h"
#endif
#endif
#if defined(CONFIG_PLATFORM_SPACEMIT_K1X)
#include "./k1x/core_common.h"
#if defined(CONFIG_PLATFORM_SPACEMIT_K1X_FPGA)
#include "./k1x/k1x_fpga.h"
#elif defined(CONFIG_PLATFORM_SPACEMIT_K1X_EVB)
#include "./k1x/k1x_evb.h"
#endif
#endif
#endif /* __SPACEMIT_CONFIG_H__ */

View file

@ -22,7 +22,7 @@ platform-objs-y += platform.o
platform-objs-y += platform_override_modules.o
# Blobs to build
FW_TEXT_START=0x80000000
FW_TEXT_START?=0x80000000
FW_DYNAMIC=y
FW_JUMP=y
ifeq ($(PLATFORM_RISCV_XLEN), 32)

View file

@ -0,0 +1,31 @@
/dts-v1/;
/ {
description = "Configuration to load OpenSBI before U-Boot";
#address-cells = <2>;
fit,fdt-list = "of-list";
images {
opensbi {
description = "OpenSBI fw_dynamic Firmware";
type = "firmware";
os = "opensbi";
arch = "riscv";
compression = "none";
load = <0x0 0x0>;
entry = <0x0 0x0>;
data = /incbin/("./fw_dynamic.bin");
hash-1 {
algo = "crc32";
};
};
};
configurations {
default = "config_1";
config_1 {
description = "opensbi FIT config";
firmware = "opensbi";
};
};
};

View file

@ -0,0 +1,7 @@
#
# SPDX-License-Identifier: BSD-2-Clause
#
carray-platform_override_modules-$(CONFIG_PLATFORM_SPACEMIT_K1PRO)$(CONFIG_PLATFORM_SPACEMIT_K1X) += spacemit_k1
platform-objs-$(CONFIG_PLATFORM_SPACEMIT_K1PRO)$(CONFIG_PLATFORM_SPACEMIT_K1X) += spacemit/spacemit_k1.o
firmware-its-$(CONFIG_PLATFORM_SPACEMIT_K1PRO)$(CONFIG_PLATFORM_SPACEMIT_K1X) += spacemit/fw_dynamic.its

View file

@ -0,0 +1,194 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2022 Spacemit.
*/
#include <libfdt.h>
#include <platform_override.h>
#include <sbi/riscv_asm.h>
#include <sbi/riscv_encoding.h>
#include <sbi/riscv_io.h>
#include <sbi/sbi_const.h>
#include <sbi/sbi_hart.h>
#include <sbi/sbi_hartmask.h>
#include <sbi/riscv_atomic.h>
#include <sbi/sbi_platform.h>
#include <sbi_utils/fdt/fdt_helper.h>
#include <sbi_utils/psci/psci_lib.h>
#include <sbi_utils/cci/cci.h>
#include <sbi/sbi_hsm.h>
#include <sbi/sbi_ecall_interface.h>
#include <sbi_utils/psci/psci.h>
#include <sbi/sbi_scratch.h>
#include <sbi_utils/cache/cacheflush.h>
#include <../../../lib/utils/psci/psci_private.h>
#include <sbi_utils/psci/plat/arm/common/plat_arm.h>
#include <sbi_utils/psci/plat/common/platform.h>
#include <spacemit/spacemit_config.h>
extern struct sbi_platform platform;
PLAT_CCI_MAP
static void wakeup_other_core(void)
{
int i;
u32 hartid, clusterid, cluster_enabled = 0;
unsigned int cur_hartid = current_hartid();
struct sbi_scratch *scratch = sbi_hartid_to_scratch(cur_hartid);
#if defined(CONFIG_PLATFORM_SPACEMIT_K1X)
/* set other cpu's boot-entry */
writel(scratch->warmboot_addr & 0xffffffff, (u32 *)C0_RVBADDR_LO_ADDR);
writel((scratch->warmboot_addr >> 32) & 0xffffffff, (u32 *)C0_RVBADDR_HI_ADDR);
writel(scratch->warmboot_addr & 0xffffffff, (u32 *)C1_RVBADDR_LO_ADDR);
writel((scratch->warmboot_addr >> 32) & 0xffffffff, (u32 *)C1_RVBADDR_HI_ADDR);
#elif defined(CONFIG_PLATFORM_SPACEMIT_K1PRO)
for (i = 0; i < platform.hart_count; i++) {
hartid = platform.hart_index2id[i];
unsigned long core_index = MPIDR_AFFLVL1_VAL(hartid) * PLATFORM_MAX_CPUS_PER_CLUSTER
+ MPIDR_AFFLVL0_VAL(hartid);
writel(scratch->warmboot_addr & 0xffffffff, (u32 *)(CORE0_RVBADDR_LO_ADDR + core_index * CORE_RVBADDR_STEP));
writel((scratch->warmboot_addr >> 32) & 0xffffffff, (u32 *)(CORE0_RVBADDR_HI_ADDR + core_index * CORE_RVBADDR_STEP));
}
#endif
#ifdef CONFIG_ARM_PSCI_SUPPORT
unsigned char *cpu_topology = plat_get_power_domain_tree_desc();
#endif
// hart0 is already boot up
for (i = 0; i < platform.hart_count; i++) {
hartid = platform.hart_index2id[i];
clusterid = MPIDR_AFFLVL1_VAL(hartid);
/* we only enable snoop of cluster0 */
if (0 == (cluster_enabled & (1 << clusterid))) {
cluster_enabled |= 1 << clusterid;
if (0 == clusterid) {
cci_enable_snoop_dvm_reqs(clusterid);
}
#ifdef CONFIG_ARM_PSCI_SUPPORT
cpu_topology[CLUSTER_INDEX_IN_CPU_TOPOLOGY]++;
#endif
}
#ifdef CONFIG_ARM_PSCI_SUPPORT
/* we only support 2 cluster by now */
if (clusterid == PLATFORM_CLUSTER_COUNT - 1)
cpu_topology[CLUSTER1_INDEX_IN_CPU_TOPOLOGY]++;
else
cpu_topology[CLUSTER0_INDEX_IN_CPU_TOPOLOGY]++;
#endif
}
}
/*
* Platform early initialization.
*/
static int spacemit_k1_early_init(bool cold_boot, const struct fdt_match *match)
{
if (cold_boot) {
/* initiate cci */
cci_init(PLATFORM_CCI_ADDR, cci_map, array_size(cci_map));
/* enable dcache */
csi_enable_dcache();
/* wakeup other core ? */
wakeup_other_core();
/* initialize */
#ifdef CONFIG_ARM_SCMI_PROTOCOL_SUPPORT
plat_arm_pwrc_setup();
#endif
} else {
#ifdef CONFIG_ARM_PSCI_SUPPORT
psci_warmboot_entrypoint();
#endif
}
return 0;
}
#ifdef CONFIG_ARM_PSCI_SUPPORT
/** Start (or power-up) the given hart */
static int spacemit_hart_start(unsigned int hartid, unsigned long saddr)
{
return psci_cpu_on_start(hartid, saddr);
}
/**
* Stop (or power-down) the current hart from running. This call
* doesn't expect to return if success.
*/
static int spacemit_hart_stop(void)
{
psci_cpu_off();
return 0;
}
static int spacemit_hart_suspend(unsigned int suspend_type)
{
psci_cpu_suspend(suspend_type, 0, 0);
return 0;
}
static void spacemit_hart_resume(void)
{
psci_warmboot_entrypoint();
}
static const struct sbi_hsm_device spacemit_hsm_ops = {
.name = "spacemit-hsm",
.hart_start = spacemit_hart_start,
.hart_stop = spacemit_hart_stop,
.hart_suspend = spacemit_hart_suspend,
.hart_resume = spacemit_hart_resume,
};
#endif
/*
* Platform final initialization.
*/
static int spacemit_k1_final_init(bool cold_boot, const struct fdt_match *match)
{
#ifdef CONFIG_ARM_PSCI_SUPPORT
/* for clod boot, we build the cpu topology structure */
if (cold_boot) {
sbi_hsm_set_device(&spacemit_hsm_ops);
return psci_setup();
}
#endif
return 0;
}
static bool spacemit_cold_boot_allowed(u32 hartid, const struct fdt_match *match)
{
/* enable core snoop */
csr_set(CSR_ML2SETUP, 1 << (hartid % PLATFORM_MAX_CPUS_PER_CLUSTER));
/* dealing with resuming process */
if ((__sbi_hsm_hart_get_state(hartid) == SBI_HSM_STATE_SUSPENDED) && (hartid == 0))
return false;
return ((hartid == 0) ? true : false);
}
static const struct fdt_match spacemit_k1_match[] = {
{ .compatible = "spacemit,k1-pro" },
{ .compatible = "spacemit,k1x" },
{ },
};
const struct platform_override spacemit_k1 = {
.match_table = spacemit_k1_match,
.early_init = spacemit_k1_early_init,
.final_init = spacemit_k1_final_init,
.cold_boot_allowed = spacemit_cold_boot_allowed,
};

BIN
tools/mkimage Executable file

Binary file not shown.