Merge pull request #667 from soby-mathew/sm/PSCI_lib

Introduce PSCI library
This commit is contained in:
danh-arm 2016-07-25 12:29:52 +01:00 committed by GitHub
commit 3dd9835f8a
72 changed files with 761 additions and 675 deletions

View File

@ -226,20 +226,24 @@ BL_COMMON_SOURCES += common/bl_common.c \
plat/common/aarch64/platform_helpers.S \
${STDLIB_SRCS}
INCLUDES += -Iinclude/bl1 \
-Iinclude/bl31 \
-Iinclude/bl31/services \
-Iinclude/common \
-Iinclude/drivers \
-Iinclude/drivers/arm \
-Iinclude/drivers/auth \
-Iinclude/drivers/io \
-Iinclude/drivers/ti/uart \
-Iinclude/lib \
-Iinclude/lib/aarch64 \
-Iinclude/lib/cpus/aarch64 \
-Iinclude/plat/common \
${PLAT_INCLUDES} \
INCLUDES += -Iinclude/bl1 \
-Iinclude/bl31 \
-Iinclude/common \
-Iinclude/common/aarch64 \
-Iinclude/drivers \
-Iinclude/drivers/arm \
-Iinclude/drivers/auth \
-Iinclude/drivers/io \
-Iinclude/drivers/ti/uart \
-Iinclude/lib \
-Iinclude/lib/aarch64 \
-Iinclude/lib/cpus/aarch64 \
-Iinclude/lib/el3_runtime \
-Iinclude/lib/el3_runtime/aarch64 \
-Iinclude/lib/psci \
-Iinclude/plat/common \
-Iinclude/services \
${PLAT_INCLUDES} \
${SPD_INCLUDES}

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
# Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
@ -33,9 +33,9 @@ BL1_SOURCES += bl1/bl1_main.c \
bl1/aarch64/bl1_entrypoint.S \
bl1/aarch64/bl1_exceptions.S \
bl1/bl1_context_mgmt.c \
common/aarch64/context.S \
common/context_mgmt.c \
lib/cpus/aarch64/cpu_helpers.S \
lib/el3_runtime/aarch64/context.S \
lib/el3_runtime/aarch64/context_mgmt.c \
plat/common/plat_bl1_common.c
ifeq (${TRUSTED_BOARD_BOOT},1)

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -31,9 +31,10 @@
#include <arch.h>
#include <bl_common.h>
#include <el3_common_macros.S>
#include <xlat_tables.h>
.globl bl31_entrypoint
.globl bl31_warm_entrypoint
/* -----------------------------------------------------
* bl31_entrypoint() is the cold boot entrypoint,
@ -132,3 +133,60 @@ func bl31_entrypoint
b el3_exit
endfunc bl31_entrypoint
/* --------------------------------------------------------------------
* This CPU has been physically powered up. It is either resuming from
* suspend or has simply been turned on. In both cases, call the BL31
* warmboot entrypoint
* --------------------------------------------------------------------
*/
func bl31_warm_entrypoint
/*
* On the warm boot path, most of the EL3 initialisations performed by
* 'el3_entrypoint_common' must be skipped:
*
* - Only when the platform bypasses the BL1/BL31 entrypoint by
* programming the reset address do we need to set the CPU endianness.
* In other cases, we assume this has been taken care by the
* entrypoint code.
*
* - No need to determine the type of boot, we know it is a warm boot.
*
* - Do not try to distinguish between primary and secondary CPUs, this
* notion only exists for a cold boot.
*
* - No need to initialise the memory or the C runtime environment,
* it has been done once and for all on the cold boot path.
*/
el3_entrypoint_common \
_set_endian=PROGRAMMABLE_RESET_ADDRESS \
_warm_boot_mailbox=0 \
_secondary_cold_boot=0 \
_init_memory=0 \
_init_c_runtime=0 \
_exception_vectors=runtime_exceptions
/* --------------------------------------------
* Enable the MMU with the DCache disabled. It
* is safe to use stacks allocated in normal
* memory as a result. All memory accesses are
* marked nGnRnE when the MMU is disabled. So
* all the stack writes will make it to memory.
* All memory accesses are marked Non-cacheable
* when the MMU is enabled but D$ is disabled.
* So used stack memory is guaranteed to be
* visible immediately after the MMU is enabled
* Enabling the DCache at the same time as the
* MMU can lead to speculatively fetched and
* possibly stale stack memory being read from
* other caches. This can lead to coherency
* issues.
* --------------------------------------------
*/
mov x0, #DISABLE_DCACHE
bl bl31_plat_enable_mmu
bl psci_warmboot_entrypoint
b el3_exit
endfunc bl31_warm_entrypoint

View File

@ -28,45 +28,22 @@
# POSSIBILITY OF SUCH DAMAGE.
#
include lib/psci/psci_lib.mk
BL31_SOURCES += bl31/bl31_main.c \
bl31/cpu_data_array.c \
bl31/runtime_svc.c \
bl31/interrupt_mgmt.c \
bl31/aarch64/bl31_arch_setup.c \
bl31/aarch64/bl31_entrypoint.S \
bl31/aarch64/cpu_data.S \
bl31/aarch64/runtime_exceptions.S \
bl31/aarch64/crash_reporting.S \
bl31/bl31_context_mgmt.c \
common/aarch64/context.S \
common/context_mgmt.c \
lib/cpus/aarch64/cpu_helpers.S \
lib/locks/exclusive/spinlock.S \
common/runtime_svc.c \
services/std_svc/std_svc_setup.c \
services/std_svc/psci/psci_off.c \
services/std_svc/psci/psci_on.c \
services/std_svc/psci/psci_suspend.c \
services/std_svc/psci/psci_common.c \
services/std_svc/psci/psci_entry.S \
services/std_svc/psci/psci_helpers.S \
services/std_svc/psci/psci_main.c \
services/std_svc/psci/psci_setup.c \
services/std_svc/psci/psci_system_off.c
ifeq (${USE_COHERENT_MEM}, 1)
BL31_SOURCES += lib/locks/bakery/bakery_lock_coherent.c
else
BL31_SOURCES += lib/locks/bakery/bakery_lock_normal.c
endif
${PSCI_LIB_SOURCES}
ifeq (${ENABLE_PMF}, 1)
BL31_SOURCES += lib/pmf/pmf_main.c
endif
ifeq (${ENABLE_PSCI_STAT}, 1)
BL31_SOURCES += services/std_svc/psci/psci_stat.c
endif
BL31_LINKERFILE := bl31/bl31.ld.S
# Flag used to indicate if Crash reporting via console should be included

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -30,6 +30,7 @@
#include <assert.h>
#include <bl31.h>
#include <bl_common.h>
#include <context.h>
#include <context_mgmt.h>
#include <cpu_data.h>
@ -130,4 +131,4 @@ void cm_init_context(unsigned long mpidr, const entry_point_info_t *ep)
else
cm_init_context_by_index(platform_get_core_pos(mpidr), ep);
}
#endif
#endif

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -59,6 +59,12 @@ static uint32_t next_image_type = NON_SECURE;
void bl31_lib_init(void)
{
cm_init();
/*
* Initialize the PSCI library here. This also does EL3 architectural
* setup.
*/
psci_setup((uintptr_t)bl31_warm_entrypoint);
}
/*******************************************************************************
@ -74,9 +80,6 @@ void bl31_main(void)
NOTICE("BL31: %s\n", version_string);
NOTICE("BL31: %s\n", build_message);
/* Perform remaining generic architectural setup from EL3 */
bl31_arch_setup();
/* Perform platform setup in BL31 */
bl31_platform_setup();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -40,24 +40,20 @@
#include <string.h>
#include <xlat_tables.h>
unsigned long page_align(unsigned long value, unsigned dir)
uintptr_t page_align(uintptr_t value, unsigned dir)
{
unsigned long page_size = 1 << FOUR_KB_SHIFT;
/* Round up the limit to the next page boundary */
if (value & (page_size - 1)) {
value &= ~(page_size - 1);
if (value & (PAGE_SIZE - 1)) {
value &= ~(PAGE_SIZE - 1);
if (dir == UP)
value += page_size;
value += PAGE_SIZE;
}
return value;
}
static inline unsigned int is_page_aligned (unsigned long addr) {
const unsigned long page_size = 1 << FOUR_KB_SHIFT;
return (addr & (page_size - 1)) == 0;
static inline unsigned int is_page_aligned (uintptr_t addr) {
return (addr & (PAGE_SIZE - 1)) == 0;
}
/******************************************************************************
@ -65,8 +61,8 @@ static inline unsigned int is_page_aligned (unsigned long addr) {
* given the extents of free memory.
* Return 1 if it is free, 0 otherwise.
*****************************************************************************/
static int is_mem_free(uint64_t free_base, size_t free_size,
uint64_t addr, size_t size)
static int is_mem_free(uintptr_t free_base, size_t free_size,
uintptr_t addr, size_t size)
{
return (addr >= free_base) && (addr + size <= free_base + free_size);
}
@ -77,9 +73,9 @@ static int is_mem_free(uint64_t free_base, size_t free_size,
* size of the smallest chunk of free memory surrounding the sub-region in
* 'small_chunk_size'.
*****************************************************************************/
static unsigned int choose_mem_pos(uint64_t mem_start, uint64_t mem_end,
uint64_t submem_start, uint64_t submem_end,
size_t *small_chunk_size)
static unsigned int choose_mem_pos(uintptr_t mem_start, uintptr_t mem_end,
uintptr_t submem_start, uintptr_t submem_end,
size_t *small_chunk_size)
{
size_t top_chunk_size, bottom_chunk_size;
@ -106,8 +102,8 @@ static unsigned int choose_mem_pos(uint64_t mem_start, uint64_t mem_end,
* reflect the memory usage.
* The caller must ensure the memory to reserve is free.
*****************************************************************************/
void reserve_mem(uint64_t *free_base, size_t *free_size,
uint64_t addr, size_t size)
void reserve_mem(uintptr_t *free_base, size_t *free_size,
uintptr_t addr, size_t size)
{
size_t discard_size;
size_t reserved_size;
@ -127,26 +123,26 @@ void reserve_mem(uint64_t *free_base, size_t *free_size,
if (pos == BOTTOM)
*free_base = addr + size;
VERBOSE("Reserved 0x%lx bytes (discarded 0x%lx bytes %s)\n",
VERBOSE("Reserved 0x%zx bytes (discarded 0x%zx bytes %s)\n",
reserved_size, discard_size,
pos == TOP ? "above" : "below");
}
static void dump_load_info(unsigned long image_load_addr,
unsigned long image_size,
static void dump_load_info(uintptr_t image_load_addr,
size_t image_size,
const meminfo_t *mem_layout)
{
INFO("Trying to load image at address 0x%lx, size = 0x%lx\n",
image_load_addr, image_size);
INFO("Trying to load image at address %p, size = 0x%zx\n",
(void *)image_load_addr, image_size);
INFO("Current memory layout:\n");
INFO(" total region = [0x%lx, 0x%lx]\n", mem_layout->total_base,
mem_layout->total_base + mem_layout->total_size);
INFO(" free region = [0x%lx, 0x%lx]\n", mem_layout->free_base,
mem_layout->free_base + mem_layout->free_size);
INFO(" total region = [%p, %p]\n", (void *)mem_layout->total_base,
(void *)(mem_layout->total_base + mem_layout->total_size));
INFO(" free region = [%p, %p]\n", (void *)mem_layout->free_base,
(void *)(mem_layout->free_base + mem_layout->free_size));
}
/* Generic function to return the size of an image */
unsigned long image_size(unsigned int image_id)
size_t image_size(unsigned int image_id)
{
uintptr_t dev_handle;
uintptr_t image_handle;
@ -367,9 +363,8 @@ int load_auth_image(meminfo_t *mem_layout,
******************************************************************************/
void print_entry_point_info(const entry_point_info_t *ep_info)
{
INFO("Entry point address = 0x%llx\n",
(unsigned long long) ep_info->pc);
INFO("SPSR = 0x%lx\n", (unsigned long) ep_info->spsr);
INFO("Entry point address = %p\n", (void *)ep_info->pc);
INFO("SPSR = 0x%x\n", ep_info->spsr);
#define PRINT_IMAGE_ARG(n) \
VERBOSE("Argument #" #n " = 0x%llx\n", \

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -28,6 +28,7 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <assert.h>
#include <debug.h>
#include <errno.h>
#include <runtime_svc.h>
@ -42,11 +43,14 @@
* 'rt_svc_descs_indices' array. This gives the index of the descriptor in the
* 'rt_svc_descs' array which contains the SMC handler.
******************************************************************************/
#define RT_SVC_DESCS_START ((uint64_t) (&__RT_SVC_DESCS_START__))
#define RT_SVC_DESCS_END ((uint64_t) (&__RT_SVC_DESCS_END__))
#define RT_SVC_DESCS_START ((uintptr_t) (&__RT_SVC_DESCS_START__))
#define RT_SVC_DESCS_END ((uintptr_t) (&__RT_SVC_DESCS_END__))
uint8_t rt_svc_descs_indices[MAX_RT_SVCS];
static rt_svc_desc_t *rt_svc_descs;
#define RT_SVC_DECS_NUM ((RT_SVC_DESCS_END - RT_SVC_DESCS_START)\
/ sizeof(rt_svc_desc_t))
/*******************************************************************************
* Simple routine to sanity check a runtime service descriptor before using it
******************************************************************************/
@ -80,21 +84,20 @@ static int32_t validate_rt_svc_desc(rt_svc_desc_t *desc)
******************************************************************************/
void runtime_svc_init(void)
{
int32_t rc = 0;
uint32_t index, start_idx, end_idx;
uint64_t rt_svc_descs_num;
int rc = 0, index, start_idx, end_idx;
/* Assert the number of descriptors detected are less than maximum indices */
assert((RT_SVC_DECS_NUM >= 0) && (RT_SVC_DECS_NUM < MAX_RT_SVCS));
/* If no runtime services are implemented then simply bail out */
rt_svc_descs_num = RT_SVC_DESCS_END - RT_SVC_DESCS_START;
rt_svc_descs_num /= sizeof(rt_svc_desc_t);
if (rt_svc_descs_num == 0)
if (RT_SVC_DECS_NUM == 0)
return;
/* Initialise internal variables to invalid state */
memset(rt_svc_descs_indices, -1, sizeof(rt_svc_descs_indices));
rt_svc_descs = (rt_svc_desc_t *) RT_SVC_DESCS_START;
for (index = 0; index < rt_svc_descs_num; index++) {
for (index = 0; index < RT_SVC_DECS_NUM; index++) {
/*
* An invalid descriptor is an error condition since it is

View File

@ -1779,10 +1779,11 @@ following categories (present as directories in the source code):
the platform.
* **Common code.** This is platform and architecture agnostic code.
* **Library code.** This code comprises of functionality commonly used by all
other code.
other code. The PSCI implementation and other EL3 runtime frameworks reside
as Library components.
* **Stage specific.** Code specific to a boot stage.
* **Drivers.**
* **Services.** EL3 runtime services, e.g. PSCI or SPD. Specific SPD services
* **Services.** EL3 runtime services (eg: SPD). Specific SPD services
reside in the `services/spd` directory (e.g. `services/spd/tspd`).
Each boot loader stage uses code from one or more of the above mentioned

View File

@ -545,7 +545,7 @@ reset vector code to perform the above tasks.
### Function : plat_get_my_entrypoint() [mandatory when PROGRAMMABLE_RESET_ADDRESS == 0]
Argument : void
Return : unsigned long
Return : uintptr_t
This function is called with the called with the MMU and caches disabled
(`SCTLR_EL3.M` = 0 and `SCTLR_EL3.C` = 0). The function is responsible for
@ -748,7 +748,7 @@ provided in [plat/common/aarch64/platform_up_stack.S] and
### Function : plat_get_my_stack()
Argument : void
Return : unsigned long
Return : uintptr_t
This function returns the base address of the normal memory stack that
has been allocated for the current CPU. For BL images that only require a
@ -966,7 +966,7 @@ This function helps fulfill requirements 4 and 5 above.
### Function : bl1_init_bl2_mem_layout() [optional]
Argument : meminfo *, meminfo *, unsigned int, unsigned long
Argument : meminfo *, meminfo *
Return : void
BL1 needs to tell the next stage the amount of secure RAM available

View File

@ -203,7 +203,7 @@ typedef struct non_cpu_pwr_domain_node {
} non_cpu_pd_node_t;
typedef struct cpu_pwr_domain_node {
unsigned long mpidr;
u_register_t mpidr;
/* Index of the parent power domain node */
unsigned int parent_node;

View File

@ -95,8 +95,7 @@ handler will be responsible for all SMC Functions within a given service type.
ARM Trusted Firmware has a [`services`] directory in the source tree under which
each owning entity can place the implementation of its runtime service. The
[PSCI] implementation is located here in the [`services/std_svc/psci`]
directory.
[PSCI] implementation is located here in the [`lib/psci`] directory.
Runtime service sources will need to include the [`runtime_svc.h`] header file.
@ -114,7 +113,7 @@ initialization and call handler functions.
is also used for diagnostic purposes
* `_start` and `_end` values must be based on the `OEN_*` values defined in
[`smcc_helpers.h`]
[`smcc.h`]
* `_type` must be one of `SMC_TYPE_FAST` or `SMC_TYPE_STD`
@ -124,12 +123,12 @@ initialization and call handler functions.
* `_smch` is the SMC handler function with the `rt_svc_handle` signature:
typedef uint64_t (*rt_svc_handle)(uint32_t smc_fid,
uint64_t x1, uint64_t x2,
uint64_t x3, uint64_t x4,
typedef uintptr_t (*rt_svc_handle_t)(uint32_t smc_fid,
u_register_t x1, u_register_t x2,
u_register_t x3, u_register_t x4,
void *cookie,
void *handle,
uint64_t flags);
u_register_t flags);
Details of the requirements and behavior of the two callbacks is provided in
the following sections.
@ -189,12 +188,12 @@ SMC calls for a service are forwarded by the framework to the service's SMC
handler function (`_smch` in the service declaration). This function must have
the following signature:
typedef uint64_t (*rt_svc_handle)(uint32_t smc_fid,
uint64_t x1, uint64_t x2,
uint64_t x3, uint64_t x4,
void *reserved,
void *handle,
uint64_t flags);
typedef uintptr_t (*rt_svc_handle_t)(uint32_t smc_fid,
u_register_t x1, u_register_t x2,
u_register_t x3, u_register_t x4,
void *cookie,
void *handle,
u_register_t flags);
The handler is responsible for:
@ -253,10 +252,9 @@ The handler is responsible for:
SMC_RET3(handle, x0, x1, x2);
SMC_RET4(handle, x0, x1, x2, x3);
The `reserved` parameter to the handler is reserved for future use and can be
ignored. The value returned by a SMC handler is also reserved for future use -
completion of the handler function must always be via one of the `SMC_RETn()`
macros.
The `cookie` parameter to the handler is reserved for future use and can be
ignored. The `handle` is returned by the SMC handler - completion of the
handler function must always be via one of the `SMC_RETn()` macros.
NOTE: The PSCI and Test Secure-EL1 Payload Dispatcher services do not follow
all of the above requirements yet.
@ -299,12 +297,11 @@ provide this information....
_Copyright (c) 2014-2015, ARM Limited and Contributors. All rights reserved._
[Firmware Design]: ./firmware-design.md
[Firmware Design]: ./firmware-design.md
[`services`]: ../services
[`services/std_svc/psci`]: ../services/std_svc/psci
[`lib/psci`]: ../lib/psci
[`std_svc_setup.c`]: ../services/std_svc/std_svc_setup.c
[`runtime_svc.h`]: ../include/bl31/runtime_svc.h
[`smcc_helpers.h`]: ../include/common/smcc_helpers.h
[`runtime_svc.h`]: ../include/common/runtime_svc.h
[`smcc.h`]: ../include/lib/smcc.h
[PSCI]: http://infocenter.arm.com/help/topic/com.arm.doc.den0022c/DEN0022C_Power_State_Coordination_Interface.pdf "Power State Coordination Interface PDD (ARM DEN 0022C)"
[SMCCC]: http://infocenter.arm.com/help/topic/com.arm.doc.den0028a/index.html "SMC Calling Convention PDD (ARM DEN 0028A)"

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -104,7 +104,7 @@ typedef enum rn_types {
#define WAIT_FOR_DOMAIN_CTRL_OP_COMPLETION(region_id, stat_reg_offset, \
op_reg_offset, rn_id_map) \
{ \
uint64_t status_reg; \
unsigned long long status_reg; \
do { \
status_reg = ccn_reg_read((ccn_plat_desc->periphbase), \
(region_id), \
@ -208,7 +208,7 @@ typedef enum rn_types {
/*
* Helper function to return number of set bits in bitmap
*/
static inline unsigned int count_set_bits(uint64_t bitmap)
static inline unsigned int count_set_bits(unsigned long long bitmap)
{
unsigned int count = 0;

View File

@ -250,7 +250,7 @@ void gicv3_rdistif_base_addrs_probe(uintptr_t *rdistif_base_addrs,
uintptr_t gicr_base,
mpidr_hash_fn mpidr_to_core_pos)
{
unsigned long mpidr;
u_register_t mpidr;
unsigned int proc_num;
unsigned long long typer_val;
uintptr_t rdistif_base = gicr_base;
@ -320,7 +320,7 @@ void gicv3_secure_spis_configure(uintptr_t gicd_base,
unsigned int int_grp)
{
unsigned int index, irq_num;
uint64_t gic_affinity_val;
unsigned long long gic_affinity_val;
assert((int_grp == INTR_GROUP1S) || (int_grp == INTR_GROUP0));
/* If `num_ints` is not 0, ensure that `sec_intr_list` is not NULL */

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -36,11 +36,11 @@
/*******************************************************************************
* Function prototypes
******************************************************************************/
void bl31_arch_setup(void);
void bl31_next_el_arch_setup(uint32_t security_state);
void bl31_set_next_image_type(uint32_t type);
uint32_t bl31_get_next_image_type(void);
void bl31_prepare_next_image_entry(void);
void bl31_register_bl32_init(int32_t (*)(void));
void bl31_warm_entrypoint(void);
#endif /* __BL31_H__ */

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -31,6 +31,7 @@
#define __ASM_MACROS_S__
#include <arch.h>
#include <asm_macros_common.S>
.macro func_prologue
@ -104,81 +105,6 @@
.endif
.endm
/*
* This macro is used to create a function label and place the
* code into a separate text section based on the function name
* to enable elimination of unused code during linking
*/
.macro func _name
.section .text.\_name, "ax"
.type \_name, %function
.func \_name
\_name:
.endm
/*
* This macro is used to mark the end of a function.
*/
.macro endfunc _name
.endfunc
.size \_name, . - \_name
.endm
/*
* Theses macros are used to create function labels for deprecated
* APIs. If ERROR_DEPRECATED is non zero, the callers of these APIs
* will fail to link and cause build failure.
*/
#if ERROR_DEPRECATED
.macro func_deprecated _name
func deprecated\_name
.endm
.macro endfunc_deprecated _name
endfunc deprecated\_name
.endm
#else
.macro func_deprecated _name
func \_name
.endm
.macro endfunc_deprecated _name
endfunc \_name
.endm
#endif
/*
* Helper assembler macro to count trailing zeros. The output is
* populated in the `TZ_COUNT` symbol.
*/
.macro count_tz _value, _tz_count
.if \_value
count_tz "(\_value >> 1)", "(\_tz_count + 1)"
.else
.equ TZ_COUNT, (\_tz_count - 1)
.endif
.endm
/*
* This macro declares an array of 1 or more stacks, properly
* aligned and in the requested section
*/
#define DEFAULT_STACK_ALIGN (1 << 6) /* In case the caller doesnt provide alignment */
.macro declare_stack _name, _section, _size, _count, _align=DEFAULT_STACK_ALIGN
count_tz \_align, 0
.if (\_align - (1 << TZ_COUNT))
.error "Incorrect stack alignment specified (Must be a power of 2)."
.endif
.if ((\_size & ((1 << TZ_COUNT) - 1)) <> 0)
.error "Stack size not correctly aligned"
.endif
.section \_section, "aw", %nobits
.align TZ_COUNT
\_name:
.space ((\_count) * (\_size)), 0
.endm
#if ENABLE_PLAT_COMPAT
/*
* This macro calculates the base address of an MP stack using the

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014-2015, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:

View File

@ -0,0 +1,111 @@
/*
* Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __ASM_MACROS_COMMON_S__
#define __ASM_MACROS_COMMON_S__
#include <arch.h>
/*
* This macro is used to create a function label and place the
* code into a separate text section based on the function name
* to enable elimination of unused code during linking
*/
.macro func _name
.section .text.\_name, "ax"
.type \_name, %function
.func \_name
\_name:
.endm
/*
* This macro is used to mark the end of a function.
*/
.macro endfunc _name
.endfunc
.size \_name, . - \_name
.endm
/*
* Theses macros are used to create function labels for deprecated
* APIs. If ERROR_DEPRECATED is non zero, the callers of these APIs
* will fail to link and cause build failure.
*/
#if ERROR_DEPRECATED
.macro func_deprecated _name
func deprecated\_name
.endm
.macro endfunc_deprecated _name
endfunc deprecated\_name
.endm
#else
.macro func_deprecated _name
func \_name
.endm
.macro endfunc_deprecated _name
endfunc \_name
.endm
#endif
/*
* Helper assembler macro to count trailing zeros. The output is
* populated in the `TZ_COUNT` symbol.
*/
.macro count_tz _value, _tz_count
.if \_value
count_tz "(\_value >> 1)", "(\_tz_count + 1)"
.else
.equ TZ_COUNT, (\_tz_count - 1)
.endif
.endm
/*
* This macro declares an array of 1 or more stacks, properly
* aligned and in the requested section
*/
#define DEFAULT_STACK_ALIGN (1 << 6) /* In case the caller doesnt provide alignment */
.macro declare_stack _name, _section, _size, _count, _align=DEFAULT_STACK_ALIGN
count_tz \_align, 0
.if (\_align - (1 << TZ_COUNT))
.error "Incorrect stack alignment specified (Must be a power of 2)."
.endif
.if ((\_size & ((1 << TZ_COUNT) - 1)) <> 0)
.error "Stack size not correctly aligned"
.endif
.section \_section, "aw", %nobits
.align TZ_COUNT
\_name:
.space ((\_count) * (\_size)), 0
.endm
#endif /* __ASM_MACROS_COMMON_S__ */

View File

@ -137,6 +137,7 @@
#include <cassert.h>
#include <stdint.h>
#include <stddef.h>
#include <types.h>
#include <utils.h> /* To retain compatibility */
/*
@ -144,28 +145,28 @@
* BL images
*/
#if SEPARATE_CODE_AND_RODATA
extern unsigned long __TEXT_START__;
extern unsigned long __TEXT_END__;
extern unsigned long __RODATA_START__;
extern unsigned long __RODATA_END__;
extern uintptr_t __TEXT_START__;
extern uintptr_t __TEXT_END__;
extern uintptr_t __RODATA_START__;
extern uintptr_t __RODATA_END__;
#else
extern unsigned long __RO_START__;
extern unsigned long __RO_END__;
extern uintptr_t __RO_START__;
extern uintptr_t __RO_END__;
#endif
#if IMAGE_BL2
extern unsigned long __BL2_END__;
extern uintptr_t __BL2_END__;
#elif IMAGE_BL2U
extern unsigned long __BL2U_END__;
extern uintptr_t __BL2U_END__;
#elif IMAGE_BL31
extern unsigned long __BL31_END__;
extern uintptr_t __BL31_END__;
#elif IMAGE_BL32
extern unsigned long __BL32_END__;
extern uintptr_t __BL32_END__;
#endif /* IMAGE_BLX */
#if USE_COHERENT_MEM
extern unsigned long __COHERENT_RAM_START__;
extern unsigned long __COHERENT_RAM_END__;
extern uintptr_t __COHERENT_RAM_START__;
extern uintptr_t __COHERENT_RAM_END__;
#endif
@ -174,21 +175,21 @@ extern unsigned long __COHERENT_RAM_END__;
* memory is available for its use and how much is already used.
******************************************************************************/
typedef struct meminfo {
uint64_t total_base;
uintptr_t total_base;
size_t total_size;
uint64_t free_base;
uintptr_t free_base;
size_t free_size;
} meminfo_t;
typedef struct aapcs64_params {
unsigned long arg0;
unsigned long arg1;
unsigned long arg2;
unsigned long arg3;
unsigned long arg4;
unsigned long arg5;
unsigned long arg6;
unsigned long arg7;
u_register_t arg0;
u_register_t arg1;
u_register_t arg2;
u_register_t arg3;
u_register_t arg4;
u_register_t arg5;
u_register_t arg6;
u_register_t arg7;
} aapcs64_params_t;
/***************************************************************************
@ -284,7 +285,7 @@ CASSERT(ENTRY_POINT_INFO_ARGS_OFFSET == \
__builtin_offsetof(entry_point_info_t, args), \
assert_BL31_args_offset_mismatch);
CASSERT(sizeof(unsigned long) ==
CASSERT(sizeof(uintptr_t) ==
__builtin_offsetof(entry_point_info_t, spsr) - \
__builtin_offsetof(entry_point_info_t, pc), \
assert_entrypoint_and_spsr_should_be_adjacent);
@ -292,8 +293,8 @@ CASSERT(sizeof(unsigned long) ==
/*******************************************************************************
* Function & variable prototypes
******************************************************************************/
unsigned long page_align(unsigned long, unsigned);
unsigned long image_size(unsigned int image_id);
uintptr_t page_align(uintptr_t, unsigned);
size_t image_size(unsigned int image_id);
int load_image(meminfo_t *mem_layout,
unsigned int image_id,
uintptr_t image_base,
@ -307,8 +308,8 @@ int load_auth_image(meminfo_t *mem_layout,
extern const char build_message[];
extern const char version_string[];
void reserve_mem(uint64_t *free_base, size_t *free_size,
uint64_t addr, size_t size);
void reserve_mem(uintptr_t *free_base, size_t *free_size,
uintptr_t addr, size_t size);
void print_entry_point_info(const entry_point_info_t *ep_info);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -66,14 +66,14 @@ typedef int32_t (*rt_svc_init_t)(void);
* can be accessed using the handle pointer. The cookie parameter is reserved
* for future use
*/
typedef uint64_t (*rt_svc_handle_t)(uint32_t smc_fid,
uint64_t x1,
uint64_t x2,
uint64_t x3,
uint64_t x4,
typedef uintptr_t (*rt_svc_handle_t)(uint32_t smc_fid,
u_register_t x1,
u_register_t x2,
u_register_t x3,
u_register_t x4,
void *cookie,
void *handle,
uint64_t flags);
u_register_t flags);
typedef struct rt_svc_desc {
uint8_t start_oen;
uint8_t end_oen;
@ -127,8 +127,8 @@ CASSERT(RT_SVC_DESC_HANDLE == __builtin_offsetof(rt_svc_desc_t, handle), \
* Function & variable prototypes
******************************************************************************/
void runtime_svc_init(void);
extern uint64_t __RT_SVC_DESCS_START__;
extern uint64_t __RT_SVC_DESCS_END__;
extern uintptr_t __RT_SVC_DESCS_START__;
extern uintptr_t __RT_SVC_DESCS_END__;
void init_crash_reporting(void);
#endif /*__ASSEMBLY__*/

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -41,6 +41,7 @@
#include <mmio.h>
#include <stdint.h>
#include <types.h>
/* GICv3 Re-distributor interface registers & shifts */
@ -74,7 +75,7 @@
/*******************************************************************************
* Function prototypes
******************************************************************************/
uintptr_t gicv3_get_rdist(uintptr_t gicr_base, uint64_t mpidr);
uintptr_t gicv3_get_rdist(uintptr_t gicr_base, u_register_t mpidr);
/*******************************************************************************
* GIC Redistributor interface accessors

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -170,6 +170,7 @@
#ifndef __ASSEMBLY__
#include <stdint.h>
#include <types.h>
#define gicv3_is_intr_id_special_identifier(id) \
(((id) >= PENDING_G1S_INTID) && ((id) <= GIC_SPURIOUS_INTERRUPT))
@ -234,7 +235,7 @@
* a hash function. Otherwise, the "Processor Number" field will be used to
* access the array elements.
******************************************************************************/
typedef unsigned int (*mpidr_hash_fn)(unsigned long mpidr);
typedef unsigned int (*mpidr_hash_fn)(u_register_t mpidr);
typedef struct gicv3_driver_data {
uintptr_t gicd_base;

View File

@ -0,0 +1,86 @@
/*
* Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __SMCC_HELPERS_H__
#define __SMCC_HELPERS_H__
#include <smcc.h>
#ifndef __ASSEMBLY__
#include <context.h>
/* Convenience macros to return from SMC handler */
#define SMC_RET0(_h) { \
return (uint64_t) (_h); \
}
#define SMC_RET1(_h, _x0) { \
write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X0, (_x0)); \
SMC_RET0(_h); \
}
#define SMC_RET2(_h, _x0, _x1) { \
write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X1, (_x1)); \
SMC_RET1(_h, (_x0)); \
}
#define SMC_RET3(_h, _x0, _x1, _x2) { \
write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X2, (_x2)); \
SMC_RET2(_h, (_x0), (_x1)); \
}
#define SMC_RET4(_h, _x0, _x1, _x2, _x3) { \
write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X3, (_x3)); \
SMC_RET3(_h, (_x0), (_x1), (_x2)); \
}
/*
* Convenience macros to access general purpose registers using handle provided
* to SMC handler. These take the offset values defined in context.h
*/
#define SMC_GET_GP(_h, _g) \
read_ctx_reg(get_gpregs_ctx(_h), (_g))
#define SMC_SET_GP(_h, _g, _v) \
write_ctx_reg(get_gpregs_ctx(_h), (_g), (_v))
/*
* Convenience macros to access EL3 context registers using handle provided to
* SMC handler. These take the offset values defined in context.h
*/
#define SMC_GET_EL3(_h, _e) \
read_ctx_reg(get_el3state_ctx(_h), (_e))
#define SMC_SET_EL3(_h, _e, _v) \
write_ctx_reg(get_el3state_ctx(_h), (_e), (_v))
/* Return a UUID in the SMC return registers */
#define SMC_UUID_RET(_h, _uuid) \
SMC_RET4(handle, ((const uint32_t *) &(_uuid))[0], \
((const uint32_t *) &(_uuid))[1], \
((const uint32_t *) &(_uuid))[2], \
((const uint32_t *) &(_uuid))[3])
#endif /*__ASSEMBLY__*/
#endif /* __SMCC_HELPERS_H__ */

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -298,35 +298,35 @@ CASSERT(CTX_EL3STATE_OFFSET == __builtin_offsetof(cpu_context_t, el3state_ctx),
*/
#define set_aapcs_args0(ctx, x0) do { \
write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X0, x0); \
} while (0);
} while (0)
#define set_aapcs_args1(ctx, x0, x1) do { \
write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X1, x1); \
set_aapcs_args0(ctx, x0); \
} while (0);
} while (0)
#define set_aapcs_args2(ctx, x0, x1, x2) do { \
write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X2, x2); \
set_aapcs_args1(ctx, x0, x1); \
} while (0);
} while (0)
#define set_aapcs_args3(ctx, x0, x1, x2, x3) do { \
write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X3, x3); \
set_aapcs_args2(ctx, x0, x1, x2); \
} while (0);
} while (0)
#define set_aapcs_args4(ctx, x0, x1, x2, x3, x4) do { \
write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X4, x4); \
set_aapcs_args3(ctx, x0, x1, x2, x3); \
} while (0);
} while (0)
#define set_aapcs_args5(ctx, x0, x1, x2, x3, x4, x5) do { \
write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X5, x5); \
set_aapcs_args4(ctx, x0, x1, x2, x3, x4); \
} while (0);
} while (0)
#define set_aapcs_args6(ctx, x0, x1, x2, x3, x4, x5, x6) do { \
write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X6, x6); \
set_aapcs_args5(ctx, x0, x1, x2, x3, x4, x5); \
} while (0);
} while (0)
#define set_aapcs_args7(ctx, x0, x1, x2, x3, x4, x5, x6, x7) do { \
write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X7, x7); \
set_aapcs_args6(ctx, x0, x1, x2, x3, x4, x5, x6); \
} while (0);
} while (0)
/*******************************************************************************
* Function prototypes

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -32,7 +32,6 @@
#define __CM_H__
#include <arch.h>
#include <bl_common.h>
/*******************************************************************************
* Forward declarations
@ -63,9 +62,9 @@ void cm_init_context_by_index(unsigned int cpu_idx,
void cm_prepare_el3_exit(uint32_t security_state);
void cm_el1_sysregs_context_save(uint32_t security_state);
void cm_el1_sysregs_context_restore(uint32_t security_state);
void cm_set_elr_el3(uint32_t security_state, uint64_t entrypoint);
void cm_set_elr_el3(uint32_t security_state, uintptr_t entrypoint);
void cm_set_elr_spsr_el3(uint32_t security_state,
uint64_t entrypoint, uint32_t spsr);
uintptr_t entrypoint, uint32_t spsr);
void cm_write_scr_el3_bit(uint32_t security_state,
uint32_t bit_pos,
uint32_t value);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014-2015, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -78,9 +78,9 @@
******************************************************************************/
typedef struct cpu_data {
void *cpu_context[2];
uint64_t cpu_ops_ptr;
uintptr_t cpu_ops_ptr;
#if CRASH_REPORTING
uint64_t crash_buf[CPU_DATA_CRASH_BUF_SIZE >> 3];
u_register_t crash_buf[CPU_DATA_CRASH_BUF_SIZE >> 3];
#endif
struct psci_cpu_data psci_svc_cpu_data;
#if PLAT_PCPU_DATA_SIZE
@ -123,14 +123,14 @@ void init_cpu_ops(void);
#define get_cpu_data_by_index(_ix, _m) _cpu_data_by_index(_ix)->_m
#define set_cpu_data_by_index(_ix, _m, _v) _cpu_data_by_index(_ix)->_m = _v
#define flush_cpu_data(_m) flush_dcache_range((uint64_t) \
#define flush_cpu_data(_m) flush_dcache_range((uintptr_t) \
&(_cpu_data()->_m), \
sizeof(_cpu_data()->_m))
#define inv_cpu_data(_m) inv_dcache_range((uint64_t) \
#define inv_cpu_data(_m) inv_dcache_range((uintptr_t) \
&(_cpu_data()->_m), \
sizeof(_cpu_data()->_m))
#define flush_cpu_data_by_index(_ix, _m) \
flush_dcache_range((uint64_t) \
flush_dcache_range((uintptr_t) \
&(_cpu_data_by_index(_ix)->_m), \
sizeof(_cpu_data_by_index(_ix)->_m))

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -97,6 +97,12 @@
#define PSCI_NUM_CALLS 18
#endif
/* The macros below are used to identify PSCI calls from the SMC function ID */
#define PSCI_FID_MASK 0xffe0u
#define PSCI_FID_VALUE 0u
#define is_psci_fid(_fid) \
(((_fid) & PSCI_FID_MASK) == PSCI_FID_VALUE)
/*******************************************************************************
* PSCI Migrate and friends
******************************************************************************/
@ -296,13 +302,13 @@ typedef struct plat_psci_ops {
* migrate capability etc.
******************************************************************************/
typedef struct spd_pm_ops {
void (*svc_on)(uint64_t target_cpu);
int32_t (*svc_off)(uint64_t __unused);
void (*svc_suspend)(uint64_t max_off_pwrlvl);
void (*svc_on_finish)(uint64_t __unused);
void (*svc_suspend_finish)(uint64_t max_off_pwrlvl);
int32_t (*svc_migrate)(uint64_t from_cpu, uint64_t to_cpu);
int32_t (*svc_migrate_info)(uint64_t *resident_cpu);
void (*svc_on)(u_register_t target_cpu);
int32_t (*svc_off)(u_register_t __unused);
void (*svc_suspend)(u_register_t max_off_pwrlvl);
void (*svc_on_finish)(u_register_t __unused);
void (*svc_suspend_finish)(u_register_t max_off_pwrlvl);
int32_t (*svc_migrate)(u_register_t from_cpu, u_register_t to_cpu);
int32_t (*svc_migrate_info)(u_register_t *resident_cpu);
void (*svc_system_off)(void);
void (*svc_system_reset)(void);
} spd_pm_ops_t;
@ -326,19 +332,33 @@ int psci_migrate_info_type(void);
long psci_migrate_info_up_cpu(void);
int psci_features(unsigned int psci_fid);
void __dead2 psci_power_down_wfi(void);
void psci_entrypoint(void);
void psci_register_spd_pm_hook(const spd_pm_ops_t *);
uint64_t psci_smc_handler(uint32_t smc_fid,
uint64_t x1,
uint64_t x2,
uint64_t x3,
uint64_t x4,
void psci_arch_setup(void);
/*
* The below API is deprecated. This is now replaced by bl31_warmboot_entry in
* AArch64.
*/
void psci_entrypoint(void) __deprecated;
/*******************************************************************************
* Forward declarations
******************************************************************************/
struct entry_point_info;
/******************************************************************************
* PSCI Library Interfaces
*****************************************************************************/
u_register_t psci_smc_handler(uint32_t smc_fid,
u_register_t x1,
u_register_t x2,
u_register_t x3,
u_register_t x4,
void *cookie,
void *handle,
uint64_t flags);
/* PSCI setup function */
int psci_setup(void);
u_register_t flags);
int psci_setup(uintptr_t mailbox_ep);
void psci_warmboot_entrypoint(void);
void psci_register_spd_pm_hook(const spd_pm_ops_t *pm);
#endif /*__ASSEMBLY__*/

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -28,8 +28,8 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __SMCC_HELPERS_H__
#define __SMCC_HELPERS_H__
#ifndef __SMCC_H__
#define __SMCC_H__
/*******************************************************************************
* Bit definitions inside the function id as per the SMC calling convention
@ -83,7 +83,6 @@
#ifndef __ASSEMBLY__
#include <cassert.h>
#include <context.h>
#include <stdint.h>
/* Various flags passed to SMC handlers */
@ -93,45 +92,6 @@
#define is_caller_non_secure(_f) (!!(_f & SMC_FROM_NON_SECURE))
#define is_caller_secure(_f) (!(is_caller_non_secure(_f)))
/* Convenience macros to return from SMC handler */
#define SMC_RET0(_h) { \
return (uint64_t) (_h); \
}
#define SMC_RET1(_h, _x0) { \
write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X0, (_x0)); \
SMC_RET0(_h); \
}
#define SMC_RET2(_h, _x0, _x1) { \
write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X1, (_x1)); \
SMC_RET1(_h, (_x0)); \
}
#define SMC_RET3(_h, _x0, _x1, _x2) { \
write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X2, (_x2)); \
SMC_RET2(_h, (_x0), (_x1)); \
}
#define SMC_RET4(_h, _x0, _x1, _x2, _x3) { \
write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X3, (_x3)); \
SMC_RET3(_h, (_x0), (_x1), (_x2)); \
}
/*
* Convenience macros to access general purpose registers using handle provided
* to SMC handler. These takes the offset values defined in context.h
*/
#define SMC_GET_GP(_h, _g) \
read_ctx_reg(get_gpregs_ctx(_h), (_g));
#define SMC_SET_GP(_h, _g, _v) \
write_ctx_reg(get_gpregs_ctx(_h), (_g), (_v));
/*
* Convenience macros to access EL3 context registers using handle provided to
* SMC handler. These takes the offset values defined in context.h
*/
#define SMC_GET_EL3(_h, _e) \
read_ctx_reg(get_el3state_ctx(_h), (_e));
#define SMC_SET_EL3(_h, _e, _v) \
write_ctx_reg(get_el3state_ctx(_h), (_e), (_v));
/* The macro below is used to identify a Standard Service SMC call */
#define is_std_svc_call(_fid) ((((_fid) >> FUNCID_OEN_SHIFT) & \
FUNCID_OEN_MASK) == OEN_STD_START)
@ -154,12 +114,5 @@
{ _n0, _n1, _n2, _n3, _n4, _n5 } \
}
/* Return a UUID in the SMC return registers */
#define SMC_UUID_RET(_h, _uuid) \
SMC_RET4(handle, ((const uint32_t *) &(_uuid))[0], \
((const uint32_t *) &(_uuid))[1], \
((const uint32_t *) &(_uuid))[2], \
((const uint32_t *) &(_uuid))[3])
#endif /*__ASSEMBLY__*/
#endif /* __SMCC_HELPERS_H__ */
#endif /* __SMCC_H__ */

View File

@ -30,6 +30,11 @@
* $FreeBSD$
*/
/*
* Portions copyright (c) 2016, ARM Limited and Contributors.
* All rights reserved.
*/
#ifndef _MACHINE__STDINT_H_
#define _MACHINE__STDINT_H_
@ -38,12 +43,12 @@
#define INT8_C(c) (c)
#define INT16_C(c) (c)
#define INT32_C(c) (c)
#define INT64_C(c) (c ## L)
#define INT64_C(c) (c ## LL)
#define UINT8_C(c) (c)
#define UINT16_C(c) (c)
#define UINT32_C(c) (c ## U)
#define UINT64_C(c) (c ## UL)
#define UINT64_C(c) (c ## ULL)
#define INTMAX_C(c) INT64_C(c)
#define UINTMAX_C(c) UINT64_C(c)
@ -60,19 +65,19 @@
#define INT8_MIN (-0x7f-1)
#define INT16_MIN (-0x7fff-1)
#define INT32_MIN (-0x7fffffff-1)
#define INT64_MIN (-0x7fffffffffffffffL-1)
#define INT64_MIN (-0x7fffffffffffffffLL-1)
/* Maximum values of exact-width signed integer types. */
#define INT8_MAX 0x7f
#define INT16_MAX 0x7fff
#define INT32_MAX 0x7fffffff
#define INT64_MAX 0x7fffffffffffffffL
#define INT64_MAX 0x7fffffffffffffffLL
/* Maximum values of exact-width unsigned integer types. */
#define UINT8_MAX 0xff
#define UINT16_MAX 0xffff
#define UINT32_MAX 0xffffffffU
#define UINT64_MAX 0xffffffffffffffffUL
#define UINT64_MAX 0xffffffffffffffffULL
/*
* ISO/IEC 9899:1999

View File

@ -45,15 +45,15 @@
/*
* Utility functions common to ARM standard platforms
*/
void arm_setup_page_tables(unsigned long total_base,
unsigned long total_size,
unsigned long code_start,
unsigned long code_limit,
unsigned long rodata_start,
unsigned long rodata_limit
void arm_setup_page_tables(uintptr_t total_base,
size_t total_size,
uintptr_t code_start,
uintptr_t code_limit,
uintptr_t rodata_start,
uintptr_t rodata_limit
#if USE_COHERENT_MEM
, unsigned long coh_start,
unsigned long coh_limit
, uintptr_t coh_start,
uintptr_t coh_limit
#endif
);

View File

@ -83,7 +83,7 @@ uint32_t plat_interrupt_type_to_line(uint32_t type,
/*******************************************************************************
* Optional common functions (may be overridden)
******************************************************************************/
unsigned long plat_get_my_stack(void);
uintptr_t plat_get_my_stack(void);
void plat_report_exception(unsigned long);
int plat_crash_console_init(void);
int plat_crash_console_putc(int c);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -42,10 +42,4 @@
#define STD_SVC_VERSION_MAJOR 0x0
#define STD_SVC_VERSION_MINOR 0x1
/* The macros below are used to identify PSCI calls from the SMC function ID */
#define PSCI_FID_MASK 0xffe0u
#define PSCI_FID_VALUE 0u
#define is_psci_fid(_fid) \
(((_fid) & PSCI_FID_MASK) == PSCI_FID_VALUE)
#endif /* __STD_SVC_H__ */

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -285,7 +285,7 @@ void cm_el1_sysregs_context_restore(uint32_t security_state)
* This function populates ELR_EL3 member of 'cpu_context' pertaining to the
* given security state with the given entrypoint
******************************************************************************/
void cm_set_elr_el3(uint32_t security_state, uint64_t entrypoint)
void cm_set_elr_el3(uint32_t security_state, uintptr_t entrypoint)
{
cpu_context_t *ctx;
el3_state_t *state;
@ -303,7 +303,7 @@ void cm_set_elr_el3(uint32_t security_state, uint64_t entrypoint)
* pertaining to the given security state
******************************************************************************/
void cm_set_elr_spsr_el3(uint32_t security_state,
uint64_t entrypoint, uint32_t spsr)
uintptr_t entrypoint, uint32_t spsr)
{
cpu_context_t *ctx;
el3_state_t *state;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014-2015, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:

View File

@ -82,13 +82,13 @@ extern void *__PERCPU_BAKERY_LOCK_SIZE__;
#define write_cache_op(addr, cached) \
do { \
(cached ? dccvac((uint64_t)addr) :\
dcivac((uint64_t)addr));\
(cached ? dccvac((uintptr_t)addr) :\
dcivac((uintptr_t)addr));\
dsbish();\
} while (0)
#define read_cache_op(addr, cached) if (cached) \
dccivac((uint64_t)addr)
dccivac((uintptr_t)addr)
static unsigned int bakery_get_ticket(bakery_lock_t *lock,
unsigned int me, int is_cached)

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014-2015, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -35,6 +35,10 @@
.globl psci_do_pwrdown_cache_maintenance
.globl psci_do_pwrup_cache_maintenance
.globl psci_power_down_wfi
#if !ERROR_DEPRECATED
.globl psci_entrypoint
#endif
/* -----------------------------------------------------------------------
* void psci_do_pwrdown_cache_maintenance(unsigned int power level);
@ -152,3 +156,25 @@ func psci_do_pwrup_cache_maintenance
ldp x29, x30, [sp], #16
ret
endfunc psci_do_pwrup_cache_maintenance
/* -----------------------------------------------------------------------
* void psci_power_down_wfi(void);
* This function is called to indicate to the power controller that it
* is safe to power down this cpu. It should not exit the wfi and will
* be released from reset upon power up.
* -----------------------------------------------------------------------
*/
func psci_power_down_wfi
dsb sy // ensure write buffer empty
wfi
bl plat_panic_handler
endfunc psci_power_down_wfi
/* -----------------------------------------------------------------------
* void psci_entrypoint(void);
* The deprecated entry point for PSCI on warm boot for AArch64.
* -----------------------------------------------------------------------
*/
func_deprecated psci_entrypoint
b bl31_warm_entrypoint
endfunc_deprecated psci_entrypoint

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -596,10 +596,10 @@ static int psci_get_ns_ep_info(entry_point_info_t *ep,
uintptr_t entrypoint,
u_register_t context_id)
{
unsigned long ep_attr, sctlr;
u_register_t ep_attr, sctlr;
unsigned int daif, ee, mode;
unsigned long ns_scr_el3 = read_scr_el3();
unsigned long ns_sctlr_el1 = read_sctlr_el1();
u_register_t ns_scr_el3 = read_scr_el3();
u_register_t ns_sctlr_el1 = read_sctlr_el1();
sctlr = ns_scr_el3 & SCR_HCE_BIT ? read_sctlr_el2() : ns_sctlr_el1;
ee = 0;
@ -683,7 +683,7 @@ int psci_validate_entry_point(entry_point_info_t *ep,
* code to enable the gic cpu interface and for a cluster it will enable
* coherency at the interconnect level in addition to gic cpu interface.
******************************************************************************/
void psci_power_up_finish(void)
void psci_warmboot_entrypoint(void)
{
unsigned int end_pwrlvl, cpu_idx = plat_my_core_pos();
psci_power_state_t state_info = { {PSCI_LOCAL_STATE_RUN} };
@ -817,7 +817,7 @@ void psci_print_power_domain_map(void)
plat_local_state_type_t state_type;
/* This array maps to the PSCI_STATE_X definitions in psci.h */
static const char *psci_state_type_str[] = {
static const char * const psci_state_type_str[] = {
"ON",
"RETENTION",
"OFF",
@ -839,9 +839,9 @@ void psci_print_power_domain_map(void)
for (idx = 0; idx < PLATFORM_CORE_COUNT; idx++) {
state = psci_get_cpu_local_state_by_idx(idx);
state_type = find_local_state_type(state);
INFO(" CPU Node : MPID 0x%lx, parent_node %d,"
INFO(" CPU Node : MPID 0x%llx, parent_node %d,"
" State %s (0x%x)\n",
psci_cpu_pd_nodes[idx].mpidr,
(unsigned long long)psci_cpu_pd_nodes[idx].mpidr,
psci_cpu_pd_nodes[idx].parent_node,
psci_state_type_str[state_type],
psci_get_cpu_local_state_by_idx(idx));

54
lib/psci/psci_lib.mk Normal file
View File

@ -0,0 +1,54 @@
#
# Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of ARM nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
PSCI_LIB_SOURCES := lib/el3_runtime/cpu_data_array.c \
lib/el3_runtime/aarch64/context.S \
lib/el3_runtime/aarch64/cpu_data.S \
lib/el3_runtime/aarch64/context_mgmt.c \
lib/cpus/aarch64/cpu_helpers.S \
lib/locks/exclusive/spinlock.S \
lib/psci/psci_off.c \
lib/psci/psci_on.c \
lib/psci/psci_suspend.c \
lib/psci/psci_common.c \
lib/psci/psci_main.c \
lib/psci/psci_setup.c \
lib/psci/psci_system_off.c \
lib/psci/aarch64/psci_helpers.S
ifeq (${USE_COHERENT_MEM}, 1)
PSCI_LIB_SOURCES += lib/locks/bakery/bakery_lock_coherent.c
else
PSCI_LIB_SOURCES += lib/locks/bakery/bakery_lock_normal.c
endif
ifeq (${ENABLE_PSCI_STAT}, 1)
PSCI_LIB_SOURCES += lib/psci/psci_stat.c
endif

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -33,8 +33,7 @@
#include <assert.h>
#include <debug.h>
#include <platform.h>
#include <runtime_svc.h>
#include <std_svc.h>
#include <smcc.h>
#include <string.h>
#include "psci_private.h"
@ -94,7 +93,7 @@ int psci_cpu_suspend(unsigned int power_state,
is_power_down_state = psci_get_pstate_type(power_state);
/* Sanity check the requested suspend levels */
assert (psci_validate_suspend_req(&state_info, is_power_down_state)
assert(psci_validate_suspend_req(&state_info, is_power_down_state)
== PSCI_E_SUCCESS);
target_pwrlvl = psci_find_target_suspend_lvl(&state_info);
@ -217,7 +216,7 @@ int psci_cpu_off(void)
* The only error cpu_off can return is E_DENIED. So check if that's
* indeed the case.
*/
assert (rc == PSCI_E_DENIED);
assert(rc == PSCI_E_DENIED);
return rc;
}
@ -327,21 +326,21 @@ int psci_features(unsigned int psci_fid)
/*******************************************************************************
* PSCI top level handler for servicing SMCs.
******************************************************************************/
uint64_t psci_smc_handler(uint32_t smc_fid,
uint64_t x1,
uint64_t x2,
uint64_t x3,
uint64_t x4,
u_register_t psci_smc_handler(uint32_t smc_fid,
u_register_t x1,
u_register_t x2,
u_register_t x3,
u_register_t x4,
void *cookie,
void *handle,
uint64_t flags)
u_register_t flags)
{
if (is_caller_secure(flags))
SMC_RET1(handle, SMC_UNK);
return SMC_UNK;
/* Check the fid against the capabilities */
if (!(psci_caps & define_psci_cap(smc_fid)))
SMC_RET1(handle, SMC_UNK);
return SMC_UNK;
if (((smc_fid >> FUNCID_CC_SHIFT) & FUNCID_CC_MASK) == SMC_32) {
/* 32-bit PSCI function, clear top parameter bits */
@ -352,31 +351,31 @@ uint64_t psci_smc_handler(uint32_t smc_fid,
switch (smc_fid) {
case PSCI_VERSION:
SMC_RET1(handle, psci_version());
return psci_version();
case PSCI_CPU_OFF:
SMC_RET1(handle, psci_cpu_off());
return psci_cpu_off();
case PSCI_CPU_SUSPEND_AARCH32:
SMC_RET1(handle, psci_cpu_suspend(x1, x2, x3));
return psci_cpu_suspend(x1, x2, x3);
case PSCI_CPU_ON_AARCH32:
SMC_RET1(handle, psci_cpu_on(x1, x2, x3));
return psci_cpu_on(x1, x2, x3);
case PSCI_AFFINITY_INFO_AARCH32:
SMC_RET1(handle, psci_affinity_info(x1, x2));
return psci_affinity_info(x1, x2);
case PSCI_MIG_AARCH32:
SMC_RET1(handle, psci_migrate(x1));
return psci_migrate(x1);
case PSCI_MIG_INFO_TYPE:
SMC_RET1(handle, psci_migrate_info_type());
return psci_migrate_info_type();
case PSCI_MIG_INFO_UP_CPU_AARCH32:
SMC_RET1(handle, psci_migrate_info_up_cpu());
return psci_migrate_info_up_cpu();
case PSCI_SYSTEM_SUSPEND_AARCH32:
SMC_RET1(handle, psci_system_suspend(x1, x2));
return psci_system_suspend(x1, x2);
case PSCI_SYSTEM_OFF:
psci_system_off();
@ -387,14 +386,14 @@ uint64_t psci_smc_handler(uint32_t smc_fid,
/* We should never return from psci_system_reset() */
case PSCI_FEATURES:
SMC_RET1(handle, psci_features(x1));
return psci_features(x1);
#if ENABLE_PSCI_STAT
case PSCI_STAT_RESIDENCY_AARCH32:
SMC_RET1(handle, psci_stat_residency(x1, x2));
return psci_stat_residency(x1, x2);
case PSCI_STAT_COUNT_AARCH32:
SMC_RET1(handle, psci_stat_count(x1, x2));
return psci_stat_count(x1, x2);
#endif
default:
@ -405,29 +404,29 @@ uint64_t psci_smc_handler(uint32_t smc_fid,
switch (smc_fid) {
case PSCI_CPU_SUSPEND_AARCH64:
SMC_RET1(handle, psci_cpu_suspend(x1, x2, x3));
return psci_cpu_suspend(x1, x2, x3);
case PSCI_CPU_ON_AARCH64:
SMC_RET1(handle, psci_cpu_on(x1, x2, x3));
return psci_cpu_on(x1, x2, x3);
case PSCI_AFFINITY_INFO_AARCH64:
SMC_RET1(handle, psci_affinity_info(x1, x2));
return psci_affinity_info(x1, x2);
case PSCI_MIG_AARCH64:
SMC_RET1(handle, psci_migrate(x1));
return psci_migrate(x1);
case PSCI_MIG_INFO_UP_CPU_AARCH64:
SMC_RET1(handle, psci_migrate_info_up_cpu());
return psci_migrate_info_up_cpu();
case PSCI_SYSTEM_SUSPEND_AARCH64:
SMC_RET1(handle, psci_system_suspend(x1, x2));
return psci_system_suspend(x1, x2);
#if ENABLE_PSCI_STAT
case PSCI_STAT_RESIDENCY_AARCH64:
SMC_RET1(handle, psci_stat_residency(x1, x2));
return psci_stat_residency(x1, x2);
case PSCI_STAT_COUNT_AARCH64:
SMC_RET1(handle, psci_stat_count(x1, x2));
return psci_stat_count(x1, x2);
#endif
default:
@ -436,5 +435,5 @@ uint64_t psci_smc_handler(uint32_t smc_fid,
}
WARN("Unimplemented PSCI Call: 0x%x \n", smc_fid);
SMC_RET1(handle, SMC_UNK);
return SMC_UNK;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -32,11 +32,9 @@
#include <arch_helpers.h>
#include <assert.h>
#include <bl_common.h>
#include <bl31.h>
#include <debug.h>
#include <context_mgmt.h>
#include <platform.h>
#include <runtime_svc.h>
#include <stddef.h>
#include "psci_private.h"
@ -177,7 +175,7 @@ void psci_cpu_on_finish(unsigned int cpu_idx,
* on have completed. Perform enough arch.initialization
* to run in the non-secure address space.
*/
bl31_arch_setup();
psci_arch_setup();
/*
* Lock the CPU spin lock to make sure that the context initialization

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -192,7 +192,6 @@ int psci_validate_power_state(unsigned int power_state,
void psci_query_sys_suspend_pwrstate(psci_power_state_t *state_info);
int psci_validate_mpidr(u_register_t mpidr);
void psci_init_req_local_pwr_states(void);
void psci_power_up_finish(void);
int psci_validate_entry_point(entry_point_info_t *ep,
uintptr_t entrypoint, u_register_t context_id);
void psci_get_parent_pwr_domain_nodes(unsigned int cpu_idx,
@ -214,7 +213,7 @@ unsigned int psci_is_last_on_cpu(void);
int psci_spd_migrate_info(u_register_t *mpidr);
/* Private exported functions from psci_on.c */
int psci_cpu_on_start(unsigned long target_cpu,
int psci_cpu_on_start(u_register_t target_cpu,
entry_point_info_t *ep);
void psci_cpu_on_finish(unsigned int cpu_idx,

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -184,15 +184,17 @@ static void populate_power_domain_tree(const unsigned char *topology)
}
/*******************************************************************************
* This function initializes the power domain topology tree by querying the
* platform. The power domain nodes higher than the CPU are populated in the
* array psci_non_cpu_pd_nodes[] and the CPU power domains are populated in
* psci_cpu_pd_nodes[]. The platform exports its static topology map through the
* This function does the architectural setup and takes the warm boot
* entry-point `mailbox_ep` as an argument. The function also initializes the
* power domain topology tree by querying the platform. The power domain nodes
* higher than the CPU are populated in the array psci_non_cpu_pd_nodes[] and
* the CPU power domains are populated in psci_cpu_pd_nodes[]. The platform
* exports its static topology map through the
* populate_power_domain_topology_tree() API. The algorithm populates the
* psci_non_cpu_pd_nodes and psci_cpu_pd_nodes iteratively by using this
* topology map. On a platform that implements two clusters of 2 cpus each, and
* supporting 3 domain levels, the populated psci_non_cpu_pd_nodes would look
* like this:
* topology map. On a platform that implements two clusters of 2 cpus each,
* and supporting 3 domain levels, the populated psci_non_cpu_pd_nodes would
* look like this:
*
* ---------------------------------------------------
* | system node | cluster 0 node | cluster 1 node |
@ -204,10 +206,13 @@ static void populate_power_domain_tree(const unsigned char *topology)
* | CPU 0 | CPU 1 | CPU 2 | CPU 3 |
* ------------------------------------------------
******************************************************************************/
int psci_setup(void)
int psci_setup(uintptr_t mailbox_ep)
{
const unsigned char *topology_tree;
/* Do the Architectural initialization */
psci_arch_setup();
/* Query the topology map from the platform */
topology_tree = plat_get_power_domain_tree_desc();
@ -229,8 +234,8 @@ int psci_setup(void)
*/
psci_set_pwr_domains_to_run(PLAT_MAX_PWR_LVL);
plat_setup_psci_ops((uintptr_t)psci_entrypoint,
&psci_plat_pm_ops);
assert(mailbox_ep);
plat_setup_psci_ops(mailbox_ep, &psci_plat_pm_ops);
assert(psci_plat_pm_ops);
/* Initialize the psci capability */
@ -259,3 +264,17 @@ int psci_setup(void)
return 0;
}
/*******************************************************************************
* This duplicates what the primary cpu did after a cold boot in BL1. The same
* needs to be done when a cpu is hotplugged in. This function could also over-
* ride any EL3 setup done by BL1 as this code resides in rw memory.
******************************************************************************/
void psci_arch_setup(void)
{
/* Program the counter frequency */
write_cntfrq_el0(plat_get_syscnt_freq2());
/* Initialize the cpu_ops pointer. */
init_cpu_ops();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -37,7 +37,6 @@
#include <cpu_data.h>
#include <debug.h>
#include <platform.h>
#include <runtime_svc.h>
#include <stddef.h>
#include "psci_private.h"

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014-2015, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:

View File

@ -47,7 +47,6 @@
CASSERT(ADDR_SPACE_SIZE >= (1ull << 31) && ADDR_SPACE_SIZE <= (1ull << 39) &&
IS_POWER_OF_TWO(ADDR_SPACE_SIZE), assert_valid_addr_space_size);
#define UNSET_DESC ~0ul
#define NUM_L1_ENTRIES (ADDR_SPACE_SIZE >> L1_XLAT_ADDRESS_SHIFT)
static uint64_t l1_xlation_table[NUM_L1_ENTRIES]

View File

@ -35,6 +35,7 @@
#include <debug.h>
#include <platform_def.h>
#include <string.h>
#include <types.h>
#include <utils.h>
#include <xlat_tables.h>
@ -52,7 +53,7 @@
#define debug_print(...) ((void)0)
#endif
#define UNSET_DESC ~0ul
#define UNSET_DESC ~0ull
static uint64_t xlat_tables[MAX_XLAT_TABLES][XLAT_TABLE_ENTRIES]
__aligned(XLAT_TABLE_SIZE) __section("xlat_table");
@ -313,9 +314,9 @@ static mmap_region_t *init_xlation_table_inner(mmap_region_t *mm,
unsigned level_size_shift = L1_XLAT_ADDRESS_SHIFT - (level - 1) *
XLAT_TABLE_ENTRIES_SHIFT;
unsigned level_size = 1 << level_size_shift;
unsigned long long level_index_mask =
((unsigned long long) XLAT_TABLE_ENTRIES_MASK)
<< level_size_shift;
u_register_t level_index_mask =
(u_register_t)(((u_register_t) XLAT_TABLE_ENTRIES_MASK)
<< level_size_shift);
assert(level > 0 && level <= 3);
@ -357,7 +358,7 @@ static mmap_region_t *init_xlation_table_inner(mmap_region_t *mm,
/* Area not covered by a region so need finer table */
uint64_t *new_table = xlat_tables[next_xlat++];
assert(next_xlat <= MAX_XLAT_TABLES);
desc = TABLE_DESC | (uint64_t)new_table;
desc = TABLE_DESC | (uintptr_t)new_table;
/* Recurse to fill in new table */
mm = init_xlation_table_inner(mm, base_va,

View File

@ -127,7 +127,7 @@ poll_mailbox:
endfunc plat_secondary_cold_boot_setup
/* ---------------------------------------------------------------------
* unsigned long plat_get_my_entrypoint (void);
* uintptr_t plat_get_my_entrypoint (void);
*
* Main job of this routine is to distinguish between a cold and warm
* boot. On FVP, this information can be queried from the power

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -41,12 +41,12 @@
*/
ARM_INSTANTIATE_LOCK
unsigned int fvp_pwrc_get_cpu_wkr(unsigned long mpidr)
unsigned int fvp_pwrc_get_cpu_wkr(u_register_t mpidr)
{
return PSYSR_WK(fvp_pwrc_read_psysr(mpidr));
}
unsigned int fvp_pwrc_read_psysr(unsigned long mpidr)
unsigned int fvp_pwrc_read_psysr(u_register_t mpidr)
{
unsigned int rc;
arm_lock_get();
@ -56,21 +56,21 @@ unsigned int fvp_pwrc_read_psysr(unsigned long mpidr)
return rc;
}
void fvp_pwrc_write_pponr(unsigned long mpidr)
void fvp_pwrc_write_pponr(u_register_t mpidr)
{
arm_lock_get();
mmio_write_32(PWRC_BASE + PPONR_OFF, (unsigned int) mpidr);
arm_lock_release();
}
void fvp_pwrc_write_ppoffr(unsigned long mpidr)
void fvp_pwrc_write_ppoffr(u_register_t mpidr)
{
arm_lock_get();
mmio_write_32(PWRC_BASE + PPOFFR_OFF, (unsigned int) mpidr);
arm_lock_release();
}
void fvp_pwrc_set_wen(unsigned long mpidr)
void fvp_pwrc_set_wen(u_register_t mpidr)
{
arm_lock_get();
mmio_write_32(PWRC_BASE + PWKUPR_OFF,
@ -78,7 +78,7 @@ void fvp_pwrc_set_wen(unsigned long mpidr)
arm_lock_release();
}
void fvp_pwrc_clr_wen(unsigned long mpidr)
void fvp_pwrc_clr_wen(u_register_t mpidr)
{
arm_lock_get();
mmio_write_32(PWRC_BASE + PWKUPR_OFF,
@ -86,7 +86,7 @@ void fvp_pwrc_clr_wen(unsigned long mpidr)
arm_lock_release();
}
void fvp_pwrc_write_pcoffr(unsigned long mpidr)
void fvp_pwrc_write_pcoffr(u_register_t mpidr)
{
arm_lock_get();
mmio_write_32(PWRC_BASE + PCOFFR_OFF, (unsigned int) mpidr);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -64,13 +64,13 @@
/*******************************************************************************
* Function & variable prototypes
******************************************************************************/
void fvp_pwrc_write_pcoffr(unsigned long);
void fvp_pwrc_write_ppoffr(unsigned long);
void fvp_pwrc_write_pponr(unsigned long);
void fvp_pwrc_set_wen(unsigned long);
void fvp_pwrc_clr_wen(unsigned long);
unsigned int fvp_pwrc_read_psysr(unsigned long);
unsigned int fvp_pwrc_get_cpu_wkr(unsigned long);
void fvp_pwrc_write_pcoffr(u_register_t);
void fvp_pwrc_write_ppoffr(u_register_t);
void fvp_pwrc_write_pponr(u_register_t);
void fvp_pwrc_set_wen(u_register_t);
void fvp_pwrc_clr_wen(u_register_t);
unsigned int fvp_pwrc_read_psysr(u_register_t);
unsigned int fvp_pwrc_get_cpu_wkr(u_register_t);
#endif /*__ASSEMBLY__*/

View File

@ -206,7 +206,7 @@ func plat_reset_handler
endfunc plat_reset_handler
/* -----------------------------------------------------
* unsigned int plat_arm_calc_core_pos(uint64_t mpidr)
* unsigned int plat_arm_calc_core_pos(u_register_t mpidr)
* Helper function to calculate the core position.
* -----------------------------------------------------
*/

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -49,7 +49,7 @@ func plat_my_core_pos
endfunc plat_my_core_pos
/* -----------------------------------------------------
* unsigned int plat_arm_calc_core_pos(uint64_t mpidr)
* unsigned int plat_arm_calc_core_pos(u_register_t mpidr)
* Helper function to calculate the core position.
* With this function: CorePos = (ClusterId * 4) +
* CoreId

View File

@ -38,7 +38,7 @@
#include <plat_arm.h>
#include <platform.h>
#define BL31_END (unsigned long)(&__BL31_END__)
#define BL31_END (uintptr_t)(&__BL31_END__)
#if USE_COHERENT_MEM
/*
@ -48,8 +48,8 @@
* __COHERENT_RAM_START__ and __COHERENT_RAM_END__ linker symbols
* refer to page-aligned addresses.
*/
#define BL31_COHERENT_RAM_BASE (unsigned long)(&__COHERENT_RAM_START__)
#define BL31_COHERENT_RAM_LIMIT (unsigned long)(&__COHERENT_RAM_END__)
#define BL31_COHERENT_RAM_BASE (uintptr_t)(&__COHERENT_RAM_START__)
#define BL31_COHERENT_RAM_LIMIT (uintptr_t)(&__COHERENT_RAM_END__)
#endif
/*
@ -130,11 +130,8 @@ void arm_bl31_early_platform_setup(bl31_params_t *from_bl2,
* Tell BL31 where the non-trusted software image
* is located and the entry state information
*/
#ifdef PRELOADED_BL33_BASE
bl33_image_ep_info.pc = PRELOADED_BL33_BASE;
#else
bl33_image_ep_info.pc = plat_get_ns_image_entrypoint();
#endif /* PRELOADED_BL33_BASE */
bl33_image_ep_info.spsr = arm_get_spsr_for_bl33_entry();
SET_SECURITY_STATE(bl33_image_ep_info.h.attr, NON_SECURE);

View File

@ -46,8 +46,6 @@ extern const mmap_region_t plat_arm_mmap[];
* conflicts with the definition in plat/common. */
#if ERROR_DEPRECATED
#pragma weak plat_get_syscnt_freq2
#else
#pragma weak plat_get_syscnt_freq
#endif
/*
@ -59,16 +57,16 @@ extern const mmap_region_t plat_arm_mmap[];
* - Read-only data section;
* - Coherent memory region, if applicable.
*/
void arm_setup_page_tables(unsigned long total_base,
unsigned long total_size,
unsigned long code_start,
unsigned long code_limit,
unsigned long rodata_start,
unsigned long rodata_limit
void arm_setup_page_tables(uintptr_t total_base,
size_t total_size,
uintptr_t code_start,
uintptr_t code_limit,
uintptr_t rodata_start,
uintptr_t rodata_limit
#if USE_COHERENT_MEM
,
unsigned long coh_start,
unsigned long coh_limit
uintptr_t coh_start,
uintptr_t coh_limit
#endif
)
{
@ -114,7 +112,11 @@ void arm_setup_page_tables(unsigned long total_base,
uintptr_t plat_get_ns_image_entrypoint(void)
{
#ifdef PRELOADED_BL33_BASE
return PRELOADED_BL33_BASE;
#else
return PLAT_ARM_NS_IMAGE_OFFSET;
#endif
}
/*******************************************************************************
@ -183,15 +185,9 @@ const mmap_region_t *plat_arm_get_mmap(void)
#ifdef ARM_SYS_CNTCTL_BASE
#if ERROR_DEPRECATED
unsigned int plat_get_syscnt_freq2(void)
{
unsigned int counter_base_frequency;
#else
unsigned long long plat_get_syscnt_freq(void)
{
unsigned long long counter_base_frequency;
#endif /* ERROR_DEPRECATED */
/* Read the frequency from Frequency modes table */
counter_base_frequency = mmio_read_32(ARM_SYS_CNTCTL_BASE + CNTFID_OFF);

View File

@ -97,8 +97,8 @@ PLAT_INCLUDES += -Iinclude/common/tbbr \
PLAT_BL_COMMON_SOURCES += lib/xlat_tables/xlat_tables_common.c \
lib/xlat_tables/aarch64/xlat_tables.c \
plat/arm/common/aarch64/arm_common.c \
plat/arm/common/aarch64/arm_helpers.S \
plat/arm/common/arm_common.c \
plat/common/aarch64/plat_common.c
BL1_SOURCES += drivers/arm/sp805/sp805.c \
@ -128,7 +128,7 @@ BL31_SOURCES += plat/arm/common/arm_bl31_setup.c \
plat/arm/common/arm_pm.c \
plat/arm/common/arm_topology.c \
plat/common/aarch64/platform_mp_stack.S \
plat/common/aarch64/plat_psci_common.c
plat/common/plat_psci_common.c
ifneq (${TRUSTED_BOARD_BOOT},0)

View File

@ -70,7 +70,7 @@ poll_mailbox:
endfunc plat_secondary_cold_boot_setup
/* ---------------------------------------------------------------------
* unsigned long plat_get_my_entrypoint (void);
* uintptr_t plat_get_my_entrypoint (void);
*
* Main job of this routine is to distinguish between a cold and a warm
* boot. On CSS platforms, this distinction is based on the contents of
@ -90,7 +90,7 @@ func plat_get_my_entrypoint
endfunc plat_get_my_entrypoint
/* -----------------------------------------------------------
* unsigned int css_calc_core_pos_swap_cluster(uint64_t mpidr)
* unsigned int css_calc_core_pos_swap_cluster(u_register_t mpidr)
* Utility function to calculate the core position by
* swapping the cluster order. This is necessary in order to
* match the format of the boot information passed by the SCP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -28,36 +28,6 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <assert.h>
#include <platform.h>
#include <psci.h>
/*
* The PSCI generic code uses this API to let the platform participate in state
* coordination during a power management operation. It compares the platform
* specific local power states requested by each cpu for a given power domain
* and returns the coordinated target power state that the domain should
* enter. A platform assigns a number to a local power state. This default
* implementation assumes that the platform assigns these numbers in order of
* increasing depth of the power state i.e. for two power states X & Y, if X < Y
* then X represents a shallower power state than Y. As a result, the
* coordinated target local power state for a power domain will be the minimum
* of the requested local power states.
*/
plat_local_state_t plat_get_target_pwr_state(unsigned int lvl,
const plat_local_state_t *states,
unsigned int ncpu)
{
plat_local_state_t target = PLAT_MAX_OFF_STATE, temp;
assert(ncpu);
do {
temp = *states++;
if (temp < target)
target = temp;
} while (--ncpu);
return target;
}
#if !ERROR_DEPRECATED
#include "../plat_psci_common.c"
#endif

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -159,7 +159,7 @@ func_deprecated platform_set_stack
endfunc_deprecated platform_set_stack
/* -----------------------------------------------------
* unsigned long plat_get_my_stack ()
* uintptr_t plat_get_my_stack ()
*
* For the current CPU, this function returns the stack
* pointer for a stack allocated in device memory.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -40,7 +40,7 @@
.globl platform_get_stack
/* -----------------------------------------------------
* unsigned long plat_get_my_stack ()
* uintptr_t plat_get_my_stack ()
*
* For cold-boot BL images, only the primary CPU needs a
* stack. This function returns the stack pointer for a

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -29,23 +29,35 @@
*/
#include <arch.h>
#include <arch_helpers.h>
#include <assert.h>
#include <bl_common.h>
#include <bl31.h>
#include <cpu_data.h>
#include <platform.h>
#include <psci.h>
/*******************************************************************************
* This duplicates what the primary cpu did after a cold boot in BL1. The same
* needs to be done when a cpu is hotplugged in. This function could also over-
* ride any EL3 setup done by BL1 as this code resides in rw memory.
******************************************************************************/
void bl31_arch_setup(void)
/*
* The PSCI generic code uses this API to let the platform participate in state
* coordination during a power management operation. It compares the platform
* specific local power states requested by each cpu for a given power domain
* and returns the coordinated target power state that the domain should
* enter. A platform assigns a number to a local power state. This default
* implementation assumes that the platform assigns these numbers in order of
* increasing depth of the power state i.e. for two power states X & Y, if X < Y
* then X represents a shallower power state than Y. As a result, the
* coordinated target local power state for a power domain will be the minimum
* of the requested local power states.
*/
plat_local_state_t plat_get_target_pwr_state(unsigned int lvl,
const plat_local_state_t *states,
unsigned int ncpu)
{
/* Program the counter frequency */
write_cntfrq_el0(plat_get_syscnt_freq2());
plat_local_state_t target = PLAT_MAX_OFF_STATE, temp;
/* Initialize the cpu_ops pointer. */
init_cpu_ops();
assert(ncpu);
do {
temp = *states++;
if (temp < target)
target = temp;
} while (--ncpu);
return target;
}

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
# Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
@ -36,6 +36,6 @@ endif
PLAT_BL_COMMON_SOURCES += plat/compat/aarch64/plat_helpers_compat.S
BL31_SOURCES += plat/common/aarch64/plat_psci_common.c \
BL31_SOURCES += plat/common/plat_psci_common.c \
plat/compat/plat_pm_compat.c \
plat/compat/plat_topology_compat.c

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
# Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
@ -52,7 +52,7 @@ BL31_SOURCES += drivers/arm/gic/gic_v2.c \
drivers/delay_timer/delay_timer.c \
drivers/ti/uart/16550_console.S \
plat/common/aarch64/platform_mp_stack.S \
plat/common/aarch64/plat_psci_common.c \
plat/common/plat_psci_common.c \
${COMMON_DIR}/aarch64/tegra_helpers.S \
${COMMON_DIR}/drivers/memctrl/memctrl.c \
${COMMON_DIR}/drivers/pmc/pmc.c \

View File

@ -51,7 +51,7 @@ RK_GIC_SOURCES := drivers/arm/gic/common/gic_common.c \
PLAT_BL_COMMON_SOURCES := lib/xlat_tables/xlat_tables_common.c \
lib/xlat_tables/aarch64/xlat_tables.c \
plat/common/aarch64/plat_common.c \
plat/common/aarch64/plat_psci_common.c
plat/common/plat_psci_common.c
BL31_SOURCES += ${RK_GIC_SOURCES} \
drivers/arm/cci/cci.c \

View File

@ -50,7 +50,7 @@ RK_GIC_SOURCES := drivers/arm/gic/common/gic_common.c \
PLAT_BL_COMMON_SOURCES := lib/xlat_tables/xlat_tables_common.c \
lib/xlat_tables/aarch64/xlat_tables.c \
plat/common/aarch64/plat_common.c \
plat/common/aarch64/plat_psci_common.c
plat/common/plat_psci_common.c
BL31_SOURCES += ${RK_GIC_SOURCES} \
drivers/arm/cci/cci.c \

View File

@ -69,9 +69,9 @@ PLAT_BL_COMMON_SOURCES := lib/xlat_tables/xlat_tables_common.c \
drivers/arm/gic/v2/gicv2_helpers.c \
drivers/cadence/uart/cdns_console.S \
drivers/console/console.S \
plat/arm/common/aarch64/arm_common.c \
plat/arm/common/aarch64/arm_helpers.S \
plat/arm/common/arm_cci.c \
plat/arm/common/arm_common.c \
plat/arm/common/arm_gicv2.c \
plat/common/plat_gicv2.c \
plat/common/aarch64/plat_common.c \
@ -81,7 +81,7 @@ PLAT_BL_COMMON_SOURCES := lib/xlat_tables/xlat_tables_common.c \
BL31_SOURCES += drivers/arm/cci/cci.c \
lib/cpus/aarch64/aem_generic.S \
lib/cpus/aarch64/cortex_a53.S \
plat/common/aarch64/plat_psci_common.c \
plat/common/plat_psci_common.c \
plat/common/aarch64/platform_mp_stack.S \
plat/xilinx/zynqmp/bl31_zynqmp_setup.c \
plat/xilinx/zynqmp/plat_psci.c \

View File

@ -1,111 +0,0 @@
/*
* Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <asm_macros.S>
#include <el3_common_macros.S>
#include <psci.h>
#include <xlat_tables.h>
.globl psci_entrypoint
.globl psci_power_down_wfi
/* --------------------------------------------------------------------
* This CPU has been physically powered up. It is either resuming from
* suspend or has simply been turned on. In both cases, call the power
* on finisher.
* --------------------------------------------------------------------
*/
func psci_entrypoint
/*
* On the warm boot path, most of the EL3 initialisations performed by
* 'el3_entrypoint_common' must be skipped:
*
* - Only when the platform bypasses the BL1/BL31 entrypoint by
* programming the reset address do we need to set the CPU endianness.
* In other cases, we assume this has been taken care by the
* entrypoint code.
*
* - No need to determine the type of boot, we know it is a warm boot.
*
* - Do not try to distinguish between primary and secondary CPUs, this
* notion only exists for a cold boot.
*
* - No need to initialise the memory or the C runtime environment,
* it has been done once and for all on the cold boot path.
*/
el3_entrypoint_common \
_set_endian=PROGRAMMABLE_RESET_ADDRESS \
_warm_boot_mailbox=0 \
_secondary_cold_boot=0 \
_init_memory=0 \
_init_c_runtime=0 \
_exception_vectors=runtime_exceptions
/* --------------------------------------------
* Enable the MMU with the DCache disabled. It
* is safe to use stacks allocated in normal
* memory as a result. All memory accesses are
* marked nGnRnE when the MMU is disabled. So
* all the stack writes will make it to memory.
* All memory accesses are marked Non-cacheable
* when the MMU is enabled but D$ is disabled.
* So used stack memory is guaranteed to be
* visible immediately after the MMU is enabled
* Enabling the DCache at the same time as the
* MMU can lead to speculatively fetched and
* possibly stale stack memory being read from
* other caches. This can lead to coherency
* issues.
* --------------------------------------------
*/
mov x0, #DISABLE_DCACHE
bl bl31_plat_enable_mmu
bl psci_power_up_finish
b el3_exit
endfunc psci_entrypoint
/* --------------------------------------------
* This function is called to indicate to the
* power controller that it is safe to power
* down this cpu. It should not exit the wfi
* and will be released from reset upon power
* up. 'wfi_spill' is used to catch erroneous
* exits from wfi.
* --------------------------------------------
*/
func psci_power_down_wfi
dsb sy // ensure write buffer empty
wfi
bl plat_panic_handler
endfunc psci_power_down_wfi

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -31,6 +31,7 @@
#include <debug.h>
#include <psci.h>
#include <runtime_svc.h>
#include <smcc_helpers.h>
#include <std_svc.h>
#include <stdint.h>
#include <uuid.h>
@ -40,36 +41,27 @@ DEFINE_SVC_UUID(arm_svc_uid,
0x108d905b, 0xf863, 0x47e8, 0xae, 0x2d,
0xc0, 0xfb, 0x56, 0x41, 0xf6, 0xe2);
/* Setup Standard Services */
static int32_t std_svc_setup(void)
{
/*
* PSCI is the only specification implemented as a Standard Service.
* Invoke PSCI setup from here
*/
return psci_setup();
}
/*
* Top-level Standard Service SMC handler. This handler will in turn dispatch
* calls to PSCI SMC handler
*/
uint64_t std_svc_smc_handler(uint32_t smc_fid,
uint64_t x1,
uint64_t x2,
uint64_t x3,
uint64_t x4,
uintptr_t std_svc_smc_handler(uint32_t smc_fid,
u_register_t x1,
u_register_t x2,
u_register_t x3,
u_register_t x4,
void *cookie,
void *handle,
uint64_t flags)
u_register_t flags)
{
/*
* Dispatch PSCI calls to PSCI SMC handler and return its return
* value
*/
if (is_psci_fid(smc_fid)) {
return psci_smc_handler(smc_fid, x1, x2, x3, x4, cookie,
handle, flags);
SMC_RET1(handle,
psci_smc_handler(smc_fid, x1, x2, x3, x4,
cookie, handle, flags));
}
switch (smc_fid) {
@ -101,6 +93,6 @@ DECLARE_RT_SVC(
OEN_STD_START,
OEN_STD_END,
SMC_TYPE_FAST,
std_svc_setup,
NULL,
std_svc_smc_handler
);