Tegra210B01: SE1 and SE2/PKA1 context save (atomic)

This patch adds the implementation of the SE atomic context save
sequence. The atomic context-save consistently saves to the TZRAM
carveout; thus there is no need to declare context save buffer or
map MMU region in TZRAM for context save. The atomic context-save
routine is responsible to validate the context-save progress
counter, where CTX_SAVE_CNT=133(SE1)/646(SE2), and the SE error
status to ensure the context save procedure complete successfully.

Change-Id: Ic80843902af70e76415530266cb158f668976c42
Signed-off-by: Marvin Hsu <marvinh@nvidia.com>
Signed-off-by: Varun Wadekar <vwadekar@nvidia.com>
This commit is contained in:
Marvin Hsu 2017-04-11 11:00:48 +08:00 committed by Varun Wadekar
parent 1d49112b2a
commit ce3c97c95b
7 changed files with 757 additions and 8 deletions

View File

@ -0,0 +1,51 @@
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef SECURITY_ENGINE_H
#define SECURITY_ENGINE_H
/*******************************************************************************
* Structure definition
******************************************************************************/
/* Security Engine Linked List */
struct tegra_se_ll {
/* DMA buffer address */
uint32_t addr;
/* Data length in DMA buffer */
uint32_t data_len;
};
#define SE_LL_MAX_BUFFER_NUM 4
typedef struct tegra_se_io_lst {
volatile uint32_t last_buff_num;
volatile struct tegra_se_ll buffer[SE_LL_MAX_BUFFER_NUM];
} tegra_se_io_lst_t __attribute__((aligned(4)));
/* SE device structure */
typedef struct tegra_se_dev {
/* Security Engine ID */
const int se_num;
/* SE base address */
const uint64_t se_base;
/* SE context size in AES blocks */
const uint32_t ctx_size_blks;
/* pointer to source linked list buffer */
tegra_se_io_lst_t *src_ll_buf;
/* pointer to destination linked list buffer */
tegra_se_io_lst_t *dst_ll_buf;
} tegra_se_dev_t;
/*******************************************************************************
* Public interface
******************************************************************************/
void tegra_se_init(void);
int tegra_se_suspend(void);
void tegra_se_resume(void);
int tegra_se_save_tzram(void);
#endif /* SECURITY_ENGINE_H */

View File

@ -73,6 +73,11 @@
******************************************************************************/
#define TEGRA_FLOWCTRL_BASE U(0x60007000)
/*******************************************************************************
* Tegra AHB arbitration controller
******************************************************************************/
#define TEGRA_AHB_ARB_BASE 0x6000C000UL
/*******************************************************************************
* Tegra Secure Boot Controller constants
******************************************************************************/
@ -118,6 +123,15 @@
#define MC_VIDEO_PROTECT_BASE_LO U(0x648)
#define MC_VIDEO_PROTECT_SIZE_MB U(0x64c)
/*******************************************************************************
* Tegra SE constants
******************************************************************************/
#define TEGRA_SE1_BASE U(0x70012000)
#define TEGRA_SE2_BASE U(0x70412000)
#define TEGRA_PKA1_BASE U(0x70420000)
#define TEGRA_SE2_RANGE_SIZE U(0x2000)
#define SE_TZRAM_SECURITY U(0x4)
/*******************************************************************************
* Tegra TZRAM constants
******************************************************************************/

View File

@ -0,0 +1,226 @@
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef SE_PRIVATE_H
#define SE_PRIVATE_H
#include <stdbool.h>
#include <security_engine.h>
/*
* PMC registers
*/
/* Secure scratch registers */
#define PMC_SECURE_SCRATCH4_OFFSET 0xC0U
#define PMC_SECURE_SCRATCH5_OFFSET 0xC4U
#define PMC_SECURE_SCRATCH6_OFFSET 0x224U
#define PMC_SECURE_SCRATCH7_OFFSET 0x228U
#define PMC_SECURE_SCRATCH120_OFFSET 0xB38U
#define PMC_SECURE_SCRATCH121_OFFSET 0xB3CU
#define PMC_SECURE_SCRATCH122_OFFSET 0xB40U
#define PMC_SECURE_SCRATCH123_OFFSET 0xB44U
/*
* AHB arbitration memory write queue
*/
#define ARAHB_MEM_WRQUE_MST_ID_OFFSET 0xFCU
#define ARAHB_MST_ID_SE2_MASK (0x1U << 13)
#define ARAHB_MST_ID_SE_MASK (0x1U << 14)
/* SE Status register */
#define SE_STATUS_OFFSET 0x800U
#define SE_STATUS_SHIFT 0
#define SE_STATUS_IDLE \
((0U) << SE_STATUS_SHIFT)
#define SE_STATUS_BUSY \
((1U) << SE_STATUS_SHIFT)
#define SE_STATUS(x) \
((x) & ((0x3U) << SE_STATUS_SHIFT))
/* SE config register */
#define SE_CONFIG_REG_OFFSET 0x14U
#define SE_CONFIG_ENC_ALG_SHIFT 12
#define SE_CONFIG_ENC_ALG_AES_ENC \
((1U) << SE_CONFIG_ENC_ALG_SHIFT)
#define SE_CONFIG_ENC_ALG_RNG \
((2U) << SE_CONFIG_ENC_ALG_SHIFT)
#define SE_CONFIG_ENC_ALG_SHA \
((3U) << SE_CONFIG_ENC_ALG_SHIFT)
#define SE_CONFIG_ENC_ALG_RSA \
((4U) << SE_CONFIG_ENC_ALG_SHIFT)
#define SE_CONFIG_ENC_ALG_NOP \
((0U) << SE_CONFIG_ENC_ALG_SHIFT)
#define SE_CONFIG_ENC_ALG(x) \
((x) & ((0xFU) << SE_CONFIG_ENC_ALG_SHIFT))
#define SE_CONFIG_DEC_ALG_SHIFT 8
#define SE_CONFIG_DEC_ALG_AES \
((1U) << SE_CONFIG_DEC_ALG_SHIFT)
#define SE_CONFIG_DEC_ALG_NOP \
((0U) << SE_CONFIG_DEC_ALG_SHIFT)
#define SE_CONFIG_DEC_ALG(x) \
((x) & ((0xFU) << SE_CONFIG_DEC_ALG_SHIFT))
#define SE_CONFIG_DST_SHIFT 2
#define SE_CONFIG_DST_MEMORY \
((0U) << SE_CONFIG_DST_SHIFT)
#define SE_CONFIG_DST_HASHREG \
((1U) << SE_CONFIG_DST_SHIFT)
#define SE_CONFIG_DST_KEYTAB \
((2U) << SE_CONFIG_DST_SHIFT)
#define SE_CONFIG_DST_SRK \
((3U) << SE_CONFIG_DST_SHIFT)
#define SE_CONFIG_DST_RSAREG \
((4U) << SE_CONFIG_DST_SHIFT)
#define SE_CONFIG_DST(x) \
((x) & ((0x7U) << SE_CONFIG_DST_SHIFT))
/* DRNG random number generator config */
#define SE_RNG_SRC_CONFIG_REG_OFFSET 0x344U
#define DRBG_RO_ENT_SRC_SHIFT 1
#define DRBG_RO_ENT_SRC_ENABLE \
((1U) << DRBG_RO_ENT_SRC_SHIFT)
#define DRBG_RO_ENT_SRC_DISABLE \
((0U) << DRBG_RO_ENT_SRC_SHIFT)
#define SE_RNG_SRC_CONFIG_RO_ENT_SRC(x) \
((x) & ((0x1U) << DRBG_RO_ENT_SRC_SHIFT))
#define DRBG_RO_ENT_SRC_LOCK_SHIFT 0
#define DRBG_RO_ENT_SRC_LOCK_ENABLE \
((1U) << DRBG_RO_ENT_SRC_LOCK_SHIFT)
#define DRBG_RO_ENT_SRC_LOCK_DISABLE \
((0U) << DRBG_RO_ENT_SRC_LOCK_SHIFT)
#define SE_RNG_SRC_CONFIG_RO_ENT_SRC_LOCK(x) \
((x) & ((0x1U) << DRBG_RO_ENT_SRC_LOCK_SHIFT))
#define DRBG_RO_ENT_IGNORE_MEM_SHIFT 12
#define DRBG_RO_ENT_IGNORE_MEM_ENABLE \
((1U) << DRBG_RO_ENT_IGNORE_MEM_SHIFT)
#define DRBG_RO_ENT_IGNORE_MEM_DISABLE \
((0U) << DRBG_RO_ENT_IGNORE_MEM_SHIFT)
#define SE_RNG_SRC_CONFIG_RO_ENT_IGNORE_MEM(x) \
((x) & ((0x1U) << DRBG_RO_ENT_IGNORE_MEM_SHIFT))
/* SE OPERATION */
#define SE_OPERATION_REG_OFFSET 0x8U
#define SE_OPERATION_SHIFT 0
#define SE_OP_ABORT \
((0x0U) << SE_OPERATION_SHIFT)
#define SE_OP_START \
((0x1U) << SE_OPERATION_SHIFT)
#define SE_OP_RESTART \
((0x2U) << SE_OPERATION_SHIFT)
#define SE_OP_CTX_SAVE \
((0x3U) << SE_OPERATION_SHIFT)
#define SE_OP_RESTART_IN \
((0x4U) << SE_OPERATION_SHIFT)
#define SE_OPERATION(x) \
((x) & ((0x7U) << SE_OPERATION_SHIFT))
/* SE_CTX_SAVE_AUTO */
#define SE_CTX_SAVE_AUTO_REG_OFFSET 0x74U
/* Enable */
#define SE_CTX_SAVE_AUTO_ENABLE_SHIFT 0
#define SE_CTX_SAVE_AUTO_DIS \
((0U) << SE_CTX_SAVE_AUTO_ENABLE_SHIFT)
#define SE_CTX_SAVE_AUTO_EN \
((1U) << SE_CTX_SAVE_AUTO_ENABLE_SHIFT)
#define SE_CTX_SAVE_AUTO_ENABLE(x) \
((x) & ((0x1U) << SE_CTX_SAVE_AUTO_ENABLE_SHIFT))
/* Lock */
#define SE_CTX_SAVE_AUTO_LOCK_SHIFT 8
#define SE_CTX_SAVE_AUTO_LOCK_EN \
((1U) << SE_CTX_SAVE_AUTO_LOCK_SHIFT)
#define SE_CTX_SAVE_AUTO_LOCK_DIS \
((0U) << SE_CTX_SAVE_AUTO_LOCK_SHIFT)
#define SE_CTX_SAVE_AUTO_LOCK(x) \
((x) & ((0x1U) << SE_CTX_SAVE_AUTO_LOCK_SHIFT))
/* Current context save number of blocks */
#define SE_CTX_SAVE_AUTO_CURR_CNT_SHIFT 16
#define SE_CTX_SAVE_AUTO_CURR_CNT_MASK 0x3FFU
#define SE_CTX_SAVE_GET_BLK_COUNT(x) \
(((x) >> SE_CTX_SAVE_AUTO_CURR_CNT_SHIFT) & \
SE_CTX_SAVE_AUTO_CURR_CNT_MASK)
#define SE_CTX_SAVE_SIZE_BLOCKS_SE1 133
#define SE_CTX_SAVE_SIZE_BLOCKS_SE2 646
/* SE TZRAM OPERATION - only for SE1 */
#define SE_TZRAM_OPERATION 0x540U
#define SE_TZRAM_OP_MODE_SHIFT 1
#define SE_TZRAM_OP_MODE_SAVE \
((0U) << SE_TZRAM_OP_MODE_SHIFT)
#define SE_TZRAM_OP_MODE_RESTORE \
((1U) << SE_TZRAM_OP_MODE_SHIFT)
#define SE_TZRAM_OP_MODE(x) \
((x) & ((0x1U) << SE_TZRAM_OP_MODE_SHIFT))
#define SE_TZRAM_OP_BUSY_SHIFT 2
#define SE_TZRAM_OP_BUSY_OFF \
((0U) << SE_TZRAM_OP_BUSY_SHIFT)
#define SE_TZRAM_OP_BUSY_ON \
((1U) << SE_TZRAM_OP_BUSY_SHIFT)
#define SE_TZRAM_OP_BUSY(x) \
((x) & ((0x1U) << SE_TZRAM_OP_BUSY_SHIFT))
#define SE_TZRAM_OP_REQ_SHIFT 0
#define SE_TZRAM_OP_REQ_IDLE \
((0U) << SE_TZRAM_OP_REQ_SHIFT)
#define SE_TZRAM_OP_REQ_INIT \
((1U) << SE_TZRAM_OP_REQ_SHIFT)
#define SE_TZRAM_OP_REQ(x) \
((x) & ((0x1U) << SE_TZRAM_OP_REQ_SHIFT))
/* SE Interrupt */
#define SE_INT_STATUS_REG_OFFSET 0x10U
#define SE_INT_OP_DONE_SHIFT 4
#define SE_INT_OP_DONE_CLEAR \
((0U) << SE_INT_OP_DONE_SHIFT)
#define SE_INT_OP_DONE_ACTIVE \
((1U) << SE_INT_OP_DONE_SHIFT)
#define SE_INT_OP_DONE(x) \
((x) & ((0x1U) << SE_INT_OP_DONE_SHIFT))
/* SE error status */
#define SE_ERR_STATUS_REG_OFFSET 0x804U
/* SE linked list (LL) register */
#define SE_IN_LL_ADDR_REG_OFFSET 0x18U
#define SE_OUT_LL_ADDR_REG_OFFSET 0x24U
#define SE_BLOCK_COUNT_REG_OFFSET 0x318U
/* AES data sizes */
#define TEGRA_SE_AES_BLOCK_SIZE 16
#define TEGRA_SE_AES_MIN_KEY_SIZE 16
#define TEGRA_SE_AES_MAX_KEY_SIZE 32
#define TEGRA_SE_AES_IV_SIZE 16
/*******************************************************************************
* Inline functions definition
******************************************************************************/
static inline uint32_t tegra_se_read_32(const tegra_se_dev_t *dev, uint32_t offset)
{
return mmio_read_32(dev->se_base + offset);
}
static inline void tegra_se_write_32(const tegra_se_dev_t *dev, uint32_t offset, uint32_t val)
{
mmio_write_32(dev->se_base + offset, val);
}
/*******************************************************************************
* Prototypes
******************************************************************************/
#endif /* SE_PRIVATE_H */

View File

@ -0,0 +1,410 @@
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <arch_helpers.h>
#include <assert.h>
#include <common/debug.h>
#include <delay_timer.h>
#include <errno.h>
#include <mmio.h>
#include <psci.h>
#include <se_private.h>
#include <security_engine.h>
#include <tegra_platform.h>
/*******************************************************************************
* Constants and Macros
******************************************************************************/
#define TIMEOUT_100MS 100UL // Timeout in 100ms
/*******************************************************************************
* Data structure and global variables
******************************************************************************/
/* The security engine contexts are formatted as follows:
*
* SE1 CONTEXT:
* #--------------------------------#
* | Random Data 1 Block |
* #--------------------------------#
* | Sticky Bits 2 Blocks |
* #--------------------------------#
* | Key Table 64 Blocks |
* | For each Key (x16): |
* | Key: 2 Blocks |
* | Original-IV: 1 Block |
* | Updated-IV: 1 Block |
* #--------------------------------#
* | RSA Keys 64 Blocks |
* #--------------------------------#
* | Known Pattern 1 Block |
* #--------------------------------#
*
* SE2/PKA1 CONTEXT:
* #--------------------------------#
* | Random Data 1 Block |
* #--------------------------------#
* | Sticky Bits 2 Blocks |
* #--------------------------------#
* | Key Table 64 Blocks |
* | For each Key (x16): |
* | Key: 2 Blocks |
* | Original-IV: 1 Block |
* | Updated-IV: 1 Block |
* #--------------------------------#
* | RSA Keys 64 Blocks |
* #--------------------------------#
* | PKA sticky bits 1 Block |
* #--------------------------------#
* | PKA keys 512 Blocks |
* #--------------------------------#
* | Known Pattern 1 Block |
* #--------------------------------#
*/
/* SE input and output linked list buffers */
static tegra_se_io_lst_t se1_src_ll_buf;
static tegra_se_io_lst_t se1_dst_ll_buf;
/* SE2 input and output linked list buffers */
static tegra_se_io_lst_t se2_src_ll_buf;
static tegra_se_io_lst_t se2_dst_ll_buf;
/* SE1 security engine device handle */
static tegra_se_dev_t se_dev_1 = {
.se_num = 1,
/* setup base address for se */
.se_base = TEGRA_SE1_BASE,
/* Setup context size in AES blocks */
.ctx_size_blks = SE_CTX_SAVE_SIZE_BLOCKS_SE1,
/* Setup SRC buffers for SE operations */
.src_ll_buf = &se1_src_ll_buf,
/* Setup DST buffers for SE operations */
.dst_ll_buf = &se1_dst_ll_buf,
};
/* SE2 security engine device handle */
static tegra_se_dev_t se_dev_2 = {
.se_num = 2,
/* setup base address for se */
.se_base = TEGRA_SE2_BASE,
/* Setup context size in AES blocks */
.ctx_size_blks = SE_CTX_SAVE_SIZE_BLOCKS_SE2,
/* Setup SRC buffers for SE operations */
.src_ll_buf = &se2_src_ll_buf,
/* Setup DST buffers for SE operations */
.dst_ll_buf = &se2_dst_ll_buf,
};
/*******************************************************************************
* Functions Definition
******************************************************************************/
static void tegra_se_make_data_coherent(const tegra_se_dev_t *se_dev)
{
flush_dcache_range(((uint64_t)(se_dev->src_ll_buf)),
sizeof(tegra_se_io_lst_t));
flush_dcache_range(((uint64_t)(se_dev->dst_ll_buf)),
sizeof(tegra_se_io_lst_t));
}
/*
* Check for context save operation complete
* This function is invoked after the context save operation,
* and it checks the following conditions:
* 1. SE_INT_STATUS = SE_OP_DONE
* 2. SE_STATUS = IDLE
* 3. AHB bus data transfer complete.
* 4. SE_ERR_STATUS is clean.
*/
static int32_t tegra_se_context_save_complete(const tegra_se_dev_t *se_dev)
{
uint32_t val = 0;
int32_t ret = 0;
uint32_t timeout;
/* Poll the SE interrupt register to ensure H/W operation complete */
val = tegra_se_read_32(se_dev, SE_INT_STATUS_REG_OFFSET);
for (timeout = 0; (SE_INT_OP_DONE(val) == SE_INT_OP_DONE_CLEAR) &&
(timeout < TIMEOUT_100MS); timeout++) {
mdelay(1);
val = tegra_se_read_32(se_dev, SE_INT_STATUS_REG_OFFSET);
}
if (timeout == TIMEOUT_100MS) {
ERROR("%s: ERR: Atomic context save operation timeout!\n",
__func__);
ret = -ETIMEDOUT;
}
/* Poll the SE status idle to ensure H/W operation complete */
if (ret == 0) {
val = tegra_se_read_32(se_dev, SE_STATUS_OFFSET);
for (timeout = 0; (val != 0U) && (timeout < TIMEOUT_100MS);
timeout++) {
mdelay(1);
val = tegra_se_read_32(se_dev, SE_STATUS_OFFSET);
}
if (timeout == TIMEOUT_100MS) {
ERROR("%s: ERR: MEM_INTERFACE and SE state "
"idle state timeout.\n", __func__);
ret = -ETIMEDOUT;
}
}
/* Check AHB bus transfer complete */
if (ret == 0) {
val = mmio_read_32(TEGRA_AHB_ARB_BASE + ARAHB_MEM_WRQUE_MST_ID_OFFSET);
for (timeout = 0; ((val & (ARAHB_MST_ID_SE_MASK | ARAHB_MST_ID_SE2_MASK)) != 0U) &&
(timeout < TIMEOUT_100MS); timeout++) {
mdelay(1);
val = mmio_read_32(TEGRA_AHB_ARB_BASE + ARAHB_MEM_WRQUE_MST_ID_OFFSET);
}
if (timeout == TIMEOUT_100MS) {
ERROR("%s: SE write over AHB timeout.\n", __func__);
ret = -ETIMEDOUT;
}
}
/* Ensure that no errors are thrown during operation */
if (ret == 0) {
val = tegra_se_read_32(se_dev, SE_ERR_STATUS_REG_OFFSET);
if (val != 0U) {
ERROR("%s: error during SE operation! 0x%x", __func__, val);
ret = -ENOTSUP;
}
}
return ret;
}
/*
* Verify the SE context save auto has been enabled.
* SE_CTX_SAVE_AUTO.ENABLE == ENABLE
* If the SE context save auto is not enabled, then set
* the context save auto enable and lock the setting.
* If the SE context save auto is not enabled and the
* enable setting is locked, then return an error.
*/
static inline int32_t tegra_se_ctx_save_auto_enable(const tegra_se_dev_t *se_dev)
{
uint32_t val;
int32_t ret = 0;
val = tegra_se_read_32(se_dev, SE_CTX_SAVE_AUTO_REG_OFFSET);
if (SE_CTX_SAVE_AUTO_ENABLE(val) == SE_CTX_SAVE_AUTO_DIS) {
if (SE_CTX_SAVE_AUTO_LOCK(val) == SE_CTX_SAVE_AUTO_LOCK_EN) {
ERROR("%s: ERR: Cannot enable atomic. Write locked!\n",
__func__);
ret = -EACCES;
}
/* Program SE_CTX_SAVE_AUTO */
if (ret == 0) {
tegra_se_write_32(se_dev, SE_CTX_SAVE_AUTO_REG_OFFSET,
SE_CTX_SAVE_AUTO_LOCK_EN |
SE_CTX_SAVE_AUTO_EN);
}
}
return ret;
}
/*
* SE atomic context save. At SC7 entry, SE driver triggers the
* hardware automatically performs the context save operation.
*/
static int32_t tegra_se_context_save_atomic(const tegra_se_dev_t *se_dev)
{
int32_t ret = 0;
uint32_t val = 0;
uint32_t blk_count_limit = 0;
uint32_t block_count, timeout;
/* Wait for previous operation to finish */
val = tegra_se_read_32(se_dev, SE_STATUS_OFFSET);
for (timeout = 0; (val != 0U) && (timeout < TIMEOUT_100MS); timeout++) {
mdelay(1);
val = tegra_se_read_32(se_dev, SE_STATUS_OFFSET);
}
if (timeout == TIMEOUT_100MS) {
ERROR("%s: ERR: SE status is not idle!\n", __func__);
ret = -ETIMEDOUT;
}
/* Clear any pending interrupts */
if (ret == 0) {
val = tegra_se_read_32(se_dev, SE_INT_STATUS_REG_OFFSET);
tegra_se_write_32(se_dev, SE_INT_STATUS_REG_OFFSET, val);
/* Ensure HW atomic context save has been enabled
* This should have been done at boot time.
* SE_CTX_SAVE_AUTO.ENABLE == ENABLE
*/
ret = tegra_se_ctx_save_auto_enable(se_dev);
}
/* Read the context save progress counter: block_count
* Ensure no previous context save has been triggered
* SE_CTX_SAVE_AUTO.CURR_CNT == 0
*/
if (ret == 0) {
val = tegra_se_read_32(se_dev, SE_CTX_SAVE_AUTO_REG_OFFSET);
block_count = SE_CTX_SAVE_GET_BLK_COUNT(val);
if (block_count != 0U) {
ERROR("%s: ctx_save triggered multiple times\n",
__func__);
ret = -EALREADY;
}
}
/* Set the destination block count when the context save complete */
if (ret == 0) {
blk_count_limit = block_count + se_dev->ctx_size_blks;
}
/* Program SE_CONFIG register as for RNG operation
* SE_CONFIG.ENC_ALG = RNG
* SE_CONFIG.DEC_ALG = NOP
* SE_CONFIG.ENC_MODE is ignored
* SE_CONFIG.DEC_MODE is ignored
* SE_CONFIG.DST = MEMORY
*/
if (ret == 0) {
val = (SE_CONFIG_ENC_ALG_RNG |
SE_CONFIG_DEC_ALG_NOP |
SE_CONFIG_DST_MEMORY);
tegra_se_write_32(se_dev, SE_CONFIG_REG_OFFSET, val);
tegra_se_make_data_coherent(se_dev);
/* SE_CTX_SAVE operation */
tegra_se_write_32(se_dev, SE_OPERATION_REG_OFFSET,
SE_OP_CTX_SAVE);
ret = tegra_se_context_save_complete(se_dev);
}
/* Check that context has written the correct number of blocks */
if (ret == 0) {
val = tegra_se_read_32(se_dev, SE_CTX_SAVE_AUTO_REG_OFFSET);
if (SE_CTX_SAVE_GET_BLK_COUNT(val) != blk_count_limit) {
ERROR("%s: expected %d blocks but %d were written\n",
__func__, blk_count_limit, val);
ret = -ECANCELED;
}
}
return ret;
}
/*
* Initialize the SE engine handle
*/
void tegra_se_init(void)
{
INFO("%s: start SE init\n", __func__);
/* TODO: Bug 1854340. Generate random SRK */
INFO("%s: SE init done\n", __func__);
}
/*
* Security engine power suspend entry point.
* This function is invoked from PSCI power domain suspend handler.
*/
int32_t tegra_se_suspend(void)
{
int32_t ret = 0;
/* Atomic context save se2 and pka1 */
INFO("%s: SE2/PKA1 atomic context save\n", __func__);
ret = tegra_se_context_save_atomic(&se_dev_2);
/* Atomic context save se */
if (ret == 0) {
INFO("%s: SE1 atomic context save\n", __func__);
ret = tegra_se_context_save_atomic(&se_dev_1);
}
if (ret == 0) {
INFO("%s: SE atomic context save done\n", __func__);
}
return ret;
}
/*
* Save TZRAM to shadow TZRAM in AON
*/
int32_t tegra_se_save_tzram(void)
{
uint32_t val = 0;
int32_t ret = 0;
uint32_t timeout;
INFO("%s: SE TZRAM save start\n", __func__);
val = (SE_TZRAM_OP_REQ_INIT | SE_TZRAM_OP_MODE_SAVE);
tegra_se_write_32(&se_dev_1, SE_TZRAM_OPERATION, val);
val = tegra_se_read_32(&se_dev_1, SE_TZRAM_OPERATION);
for (timeout = 0; (SE_TZRAM_OP_BUSY(val) == SE_TZRAM_OP_BUSY_ON) &&
(timeout < TIMEOUT_100MS); timeout++) {
mdelay(1);
val = tegra_se_read_32(&se_dev_1, SE_TZRAM_OPERATION);
}
if (timeout == TIMEOUT_100MS) {
ERROR("%s: ERR: TZRAM save timeout!\n", __func__);
ret = -ETIMEDOUT;
}
if (ret == 0) {
INFO("%s: SE TZRAM save done!\n", __func__);
}
return ret;
}
/*
* The function is invoked by SE resume
*/
static void tegra_se_warm_boot_resume(const tegra_se_dev_t *se_dev)
{
uint32_t val;
assert(se_dev);
/* Lock RNG source to ENTROPY on resume */
val = DRBG_RO_ENT_IGNORE_MEM_ENABLE |
DRBG_RO_ENT_SRC_LOCK_ENABLE |
DRBG_RO_ENT_SRC_ENABLE;
tegra_se_write_32(se_dev, SE_RNG_SRC_CONFIG_REG_OFFSET, val);
/* Enable and lock the SE atomic context save setting */
if (tegra_se_ctx_save_auto_enable(se_dev) != 0) {
ERROR("%s: ERR: enable SE%d context save auto failed!\n",
__func__, se_dev->se_num);
}
/* TODO: Bug 1854340. Set a random value to SRK */
}
/*
* The function is invoked on SC7 resume
*/
void tegra_se_resume(void)
{
tegra_se_warm_boot_resume(&se_dev_1);
tegra_se_warm_boot_resume(&se_dev_2);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -19,6 +19,8 @@
#include <pmc.h>
#include <tegra_def.h>
#include <tegra_private.h>
#include <tegra_platform.h>
#include <security_engine.h>
/*
* Register used to clear CPU reset signals. Each CPU has two reset
@ -120,16 +122,31 @@ int tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state)
unsigned int stateid_afflvl2 = pwr_domain_state[MPIDR_AFFLVL2];
unsigned int stateid_afflvl1 = pwr_domain_state[MPIDR_AFFLVL1];
unsigned int stateid_afflvl0 = pwr_domain_state[MPIDR_AFFLVL0];
int ret = PSCI_E_SUCCESS;
if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
assert((stateid_afflvl0 == PLAT_MAX_OFF_STATE) ||
(stateid_afflvl0 == PSTATE_ID_SOC_POWERDN));
(stateid_afflvl0 == PSTATE_ID_SOC_POWERDN));
assert((stateid_afflvl1 == PLAT_MAX_OFF_STATE) ||
(stateid_afflvl1 == PSTATE_ID_SOC_POWERDN));
(stateid_afflvl1 == PSTATE_ID_SOC_POWERDN));
if (tegra_chipid_is_t210_b01()) {
/* Suspend se/se2 and pka1 */
if (tegra_se_suspend() != 0) {
ret = PSCI_E_INTERN_FAIL;
}
/* Save tzram contents */
if (tegra_se_save_tzram() != 0) {
ret = PSCI_E_INTERN_FAIL;
}
}
/* suspend the entire soc */
tegra_fc_soc_powerdn(mpidr);
if (ret == PSCI_E_SUCCESS) {
tegra_fc_soc_powerdn(mpidr);
}
} else if (stateid_afflvl1 == PSTATE_ID_CLUSTER_IDLE) {
@ -152,10 +169,10 @@ int tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state)
} else {
ERROR("%s: Unknown state id\n", __func__);
return PSCI_E_NOT_SUPPORTED;
ret = PSCI_E_NOT_SUPPORTED;
}
return PSCI_E_SUCCESS;
return ret;
}
int tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state)
@ -168,6 +185,13 @@ int tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state)
if (target_state->pwr_domain_state[PLAT_MAX_PWR_LVL] ==
PLAT_SYS_SUSPEND_STATE_ID) {
/*
* Security engine resume
*/
if (tegra_chipid_is_t210_b01()) {
tegra_se_resume();
}
/*
* Lock scratch registers which hold the CPU vectors
*/
@ -231,7 +255,7 @@ int tegra_soc_prepare_system_reset(void)
* for the PMC APB clock would not be changed due to system reset.
*/
mmio_write_32((uintptr_t)TEGRA_CAR_RESET_BASE + SCLK_BURST_POLICY,
SCLK_BURST_POLICY_DEFAULT);
SCLK_BURST_POLICY_DEFAULT);
mmio_write_32((uintptr_t)TEGRA_CAR_RESET_BASE + SCLK_RATE, 0);
/* Wait 1 ms to make sure clock source/device logic is stabilized. */

View File

@ -9,7 +9,9 @@
#include <drivers/console.h>
#include <lib/xlat_tables/xlat_tables_v2.h>
#include <platform.h>
#include <security_engine.h>
#include <tegra_def.h>
#include <tegra_platform.h>
#include <tegra_private.h>
/* sets of MMIO ranges setup */
@ -36,6 +38,14 @@ static const mmap_region_t tegra_mmap[] = {
******************************************************************************/
const mmap_region_t *plat_get_mmio_map(void)
{
/* Add the map region for security engine SE2 */
if (tegra_chipid_is_t210_b01()) {
mmap_add_region((uint64_t)TEGRA_SE2_BASE,
(uint64_t)TEGRA_SE2_BASE,
(uint64_t)TEGRA_SE2_RANGE_SIZE,
MT_DEVICE | MT_RW | MT_SECURE);
}
/* MMIO space */
return tegra_mmap;
}
@ -101,6 +111,17 @@ uint32_t plat_get_console_from_id(int id)
return tegra210_uart_addresses[id];
}
/*******************************************************************************
* Handler for early platform setup
******************************************************************************/
void plat_early_platform_setup(void)
{
/* Initialize security engine driver */
if (tegra_chipid_is_t210_b01()) {
tegra_se_init();
}
}
/*******************************************************************************
* Initialize the GIC and SGIs
******************************************************************************/

View File

@ -16,18 +16,21 @@ $(eval $(call add_define,PLATFORM_CLUSTER_COUNT))
PLATFORM_MAX_CPUS_PER_CLUSTER := 4
$(eval $(call add_define,PLATFORM_MAX_CPUS_PER_CLUSTER))
MAX_XLAT_TABLES := 4
MAX_XLAT_TABLES := 8
$(eval $(call add_define,MAX_XLAT_TABLES))
MAX_MMAP_REGIONS := 8
$(eval $(call add_define,MAX_MMAP_REGIONS))
PLAT_INCLUDES += -I${SOC_DIR}/drivers/se
BL31_SOURCES += lib/cpus/aarch64/cortex_a53.S \
lib/cpus/aarch64/cortex_a57.S \
${COMMON_DIR}/drivers/flowctrl/flowctrl.c \
${COMMON_DIR}/drivers/memctrl/memctrl_v1.c \
${SOC_DIR}/plat_psci_handlers.c \
${SOC_DIR}/plat_setup.c \
${SOC_DIR}/drivers/se/security_engine.c \
${SOC_DIR}/plat_secondary.c
# Enable workarounds for selected Cortex-A57 erratas.