Merge pull request #1623 from MISL-EBU-System-SW/a3700-support

Add support for Armada 3700 and COMPHY porting layer
This commit is contained in:
Antonio Niño Díaz 2018-11-01 12:44:24 +01:00 committed by GitHub
commit eb47f14d73
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
58 changed files with 5391 additions and 288 deletions

View File

@ -1,3 +1,5 @@
.. _porting:
TF-A Porting Guide
=================
@ -64,3 +66,53 @@ Armada-70x0/Armada-80x0 Porting
- Please refer to '<path_to_mv_ddr_sources>/doc/porting_guide.txt' for detailed porting description.
- The build target directory is "build/<platform>/release/ble".
- Comphy Porting (phy-porting-layer.h or phy-default-porting-layer.h)
- Background:
Some of the comphy's parameters value depend on the HW connection between the SoC and the PHY. Every
board type has specific HW characteristics like wire length. Due to those differences some comphy
parameters vary between board types. Therefore each board type can have its own list of values for
all relevant comphy parameters. The PHY porting layer specifies which parameters need to be suited and
the board designer should provide relevant values.
.. seealso::
For XFI/SFI comphy type there is procedure "rx_training" which eases process of suiting some of
the parameters. Please see :ref:`uboot_cmd` section: rx_training.
The PHY porting layer simplifies updating static values per board type, which are now grouped in one place.
.. note::
The parameters for the same type of comphy may vary even for the same board type, it is because
the lanes from comphy-x to some PHY may have different HW characteristic than lanes from
comphy-y to the same (multiplexed) or other PHY.
- Porting:
The porting layer for PHY was introduced in TF-A. There is one file
``drivers/marvell/comphy/phy-default-porting-layer.h`` which contains the defaults. Those default
parameters are used only if there is no appropriate phy-porting-layer.h file under:
``plat/marvell/<soc family>/<platform>/board/phy-porting-layer.h``. If the phy-porting-layer.h exists,
the phy-default-porting-layer.h is not going to be included.
.. warning::
Not all comphy types are already reworked to support the PHY porting layer, currently the porting
layer is supported for XFI/SFI and SATA comphy types.
The easiest way to prepare the PHY porting layer for custom board is to copy existing example to a new
platform:
- cp ``plat/marvell/a8k/a80x0/board/phy-porting-layer.h`` "plat/marvell/<soc family>/<platform>/board/phy-porting-layer.h"
- adjust relevant parameters or
- if different comphy index is used for specific feature, move it to proper table entry and then adjust.
.. note::
The final table size with comphy parameters can be different, depending on the CP module count for
given SoC type.
- Example:
Example porting layer for armada-8040-db is under: ``plat/marvell/a8k/a80x0/board/phy-porting-layer.h``
.. note::
If there is no PHY porting layer for new platform (missing phy-porting-layer.h), the default
values are used (drivers/marvell/comphy/phy-default-porting-layer.h) and the user is warned:
.. warning::
"Using default comphy parameters - it may be required to suit them for your board".

View File

@ -199,6 +199,11 @@
#define HPIPE_DFE_F3_F5_DFE_CTRL_MASK \
(0x1 << HPIPE_DFE_F3_F5_DFE_CTRL_OFFSET)
#define HPIPE_ADAPTED_DFE_COEFFICIENT_1_REG 0x30
#define HPIPE_ADAPTED_DFE_RES_OFFSET 13
#define HPIPE_ADAPTED_DFE_RES_MASK \
(0x3 << HPIPE_ADAPTED_DFE_RES_OFFSET)
#define HPIPE_G1_SET_0_REG 0x34
#define HPIPE_G1_SET_0_G1_TX_AMP_OFFSET 1
#define HPIPE_G1_SET_0_G1_TX_AMP_MASK \
@ -326,6 +331,16 @@
#define HPIPE_PHY_TEST_DATA_MASK \
(0xffff << HPIPE_PHY_TEST_DATA_OFFSET)
#define HPIPE_PHY_TEST_PRBS_ERROR_COUNTER_1_REG 0x80
#define HPIPE_PHY_TEST_OOB_0_REGISTER 0x84
#define HPIPE_PHY_PT_OOB_EN_OFFSET 14
#define HPIPE_PHY_PT_OOB_EN_MASK \
(0x1 << HPIPE_PHY_PT_OOB_EN_OFFSET)
#define HPIPE_PHY_TEST_PT_TESTMODE_OFFSET 12
#define HPIPE_PHY_TEST_PT_TESTMODE_MASK \
(0x3 << HPIPE_PHY_TEST_PT_TESTMODE_OFFSET)
#define HPIPE_LOOPBACK_REG 0x8c
#define HPIPE_LOOPBACK_SEL_OFFSET 1
#define HPIPE_LOOPBACK_SEL_MASK \
@ -357,10 +372,27 @@
(0x1 << HPIPE_G1_SET_2_G1_TX_EMPH0_EN_OFFSET)
#define HPIPE_G2_SET_2_REG 0xf8
#define HPIPE_G2_SET_2_G2_TX_EMPH0_OFFSET 0
#define HPIPE_G2_SET_2_G2_TX_EMPH0_MASK \
(0xf << HPIPE_G2_SET_2_G2_TX_EMPH0_OFFSET)
#define HPIPE_G2_SET_2_G2_TX_EMPH0_EN_OFFSET 4
#define HPIPE_G2_SET_2_G2_TX_EMPH0_EN_MASK \
(0x1 << HPIPE_G2_SET_2_G2_TX_EMPH0_EN_OFFSET)
#define HPIPE_G2_TX_SSC_AMP_OFFSET 9
#define HPIPE_G2_TX_SSC_AMP_MASK \
(0x7f << HPIPE_G2_TX_SSC_AMP_OFFSET)
#define HPIPE_G3_SET_2_REG 0xfc
#define HPIPE_G3_SET_2_G3_TX_EMPH0_OFFSET 0
#define HPIPE_G3_SET_2_G3_TX_EMPH0_MASK \
(0xf << HPIPE_G3_SET_2_G3_TX_EMPH0_OFFSET)
#define HPIPE_G3_SET_2_G3_TX_EMPH0_EN_OFFSET 4
#define HPIPE_G3_SET_2_G3_TX_EMPH0_EN_MASK \
(0x1 << HPIPE_G3_SET_2_G3_TX_EMPH0_EN_OFFSET)
#define HPIPE_G3_TX_SSC_AMP_OFFSET 9
#define HPIPE_G3_TX_SSC_AMP_MASK \
(0x7f << HPIPE_G3_TX_SSC_AMP_OFFSET)
#define HPIPE_VDD_CAL_0_REG 0x108
#define HPIPE_CAL_VDD_CONT_MODE_OFFSET 15
#define HPIPE_CAL_VDD_CONT_MODE_MASK \
@ -434,6 +466,15 @@
#define HPIPE_SPD_DIV_FORCE_TX_SPD_DIV_FORCE_MASK \
(0x1 << HPIPE_SPD_DIV_FORCE_TX_SPD_DIV_FORCE_OFFSET)
/* HPIPE_RX_CLK_ALIGN90_AND_TX_IDLE_CALIBRATION_CTRL_REG */
#define HPIPE_RX_CLK_ALIGN90_AND_TX_IDLE_CALIB_CTRL_REG 0x168
#define HPIPE_CAL_RXCLKALIGN_90_EXT_EN_OFFSET 15
#define HPIPE_CAL_RXCLKALIGN_90_EXT_EN_MASK \
(0x1 << HPIPE_CAL_RXCLKALIGN_90_EXT_EN_OFFSET)
#define HPIPE_CAL_OS_PH_EXT_OFFSET 8
#define HPIPE_CAL_OS_PH_EXT_MASK \
(0x7f << HPIPE_CAL_OS_PH_EXT_OFFSET)
#define HPIPE_SAMPLER_N_PROC_CALIB_CTRL_REG 0x16C
#define HPIPE_RX_SAMPLER_OS_GAIN_OFFSET 6
#define HPIPE_RX_SAMPLER_OS_GAIN_MASK \
@ -484,6 +525,19 @@
#define HPIPE_OS_PH_VALID_MASK \
(0x1 << HPIPE_OS_PH_VALID_OFFSET)
#define HPIPE_DATA_PHASE_OFF_CTRL_REG 0x1A0
#define HPIPE_DATA_PHASE_ADAPTED_OS_PH_OFFSET 9
#define HPIPE_DATA_PHASE_ADAPTED_OS_PH_MASK \
(0x7f << HPIPE_DATA_PHASE_ADAPTED_OS_PH_OFFSET)
#define HPIPE_ADAPTED_FFE_CAPACITOR_COUNTER_CTRL_REG 0x1A4
#define HPIPE_ADAPTED_FFE_ADAPTED_FFE_RES_OFFSET 12
#define HPIPE_ADAPTED_FFE_ADAPTED_FFE_RES_MASK \
(0x3 << HPIPE_ADAPTED_FFE_ADAPTED_FFE_RES_OFFSET)
#define HPIPE_ADAPTED_FFE_ADAPTED_FFE_CAP_OFFSET 8
#define HPIPE_ADAPTED_FFE_ADAPTED_FFE_CAP_MASK \
(0xf << HPIPE_ADAPTED_FFE_ADAPTED_FFE_CAP_OFFSET)
#define HPIPE_SQ_GLITCH_FILTER_CTRL 0x1c8
#define HPIPE_SQ_DEGLITCH_WIDTH_P_OFFSET 0
#define HPIPE_SQ_DEGLITCH_WIDTH_P_MASK \
@ -510,6 +564,26 @@
#define HPIPE_DME_ETHERNET_MODE_MASK \
(0x1 << HPIPE_DME_ETHERNET_MODE_OFFSET)
#define HPIPE_TRX_TRAIN_CTRL_0_REG 0x22c
#define HPIPE_TRX_TX_F0T_EO_BASED_OFFSET 14
#define HPIPE_TRX_TX_F0T_EO_BASED_MASK \
(1 << HPIPE_TRX_TX_F0T_EO_BASED_OFFSET)
#define HPIPE_TRX_UPDATE_THEN_HOLD_OFFSET 6
#define HPIPE_TRX_UPDATE_THEN_HOLD_MASK \
(1 << HPIPE_TRX_UPDATE_THEN_HOLD_OFFSET)
#define HPIPE_TRX_TX_CTRL_CLK_EN_OFFSET 5
#define HPIPE_TRX_TX_CTRL_CLK_EN_MASK \
(1 << HPIPE_TRX_TX_CTRL_CLK_EN_OFFSET)
#define HPIPE_TRX_RX_ANA_IF_CLK_ENE_OFFSET 4
#define HPIPE_TRX_RX_ANA_IF_CLK_ENE_MASK \
(1 << HPIPE_TRX_RX_ANA_IF_CLK_ENE_OFFSET)
#define HPIPE_TRX_TX_TRAIN_EN_OFFSET 1
#define HPIPE_TRX_TX_TRAIN_EN_MASK \
(1 << HPIPE_TRX_TX_TRAIN_EN_OFFSET)
#define HPIPE_TRX_RX_TRAIN_EN_OFFSET 0
#define HPIPE_TRX_RX_TRAIN_EN_MASK \
(1 << HPIPE_TRX_RX_TRAIN_EN_OFFSET)
#define HPIPE_TX_TRAIN_CTRL_0_REG 0x268
#define HPIPE_TX_TRAIN_P2P_HOLD_OFFSET 15
#define HPIPE_TX_TRAIN_P2P_HOLD_MASK \
@ -548,6 +622,23 @@
#define HPIPE_TX_TRAIN_WAIT_TIME_EN_MASK \
(0x1 << HPIPE_TX_TRAIN_WAIT_TIME_EN_OFFSET)
#define HPIPE_INTERRUPT_1_REGISTER 0x2AC
#define HPIPE_TRX_TRAIN_FAILED_OFFSET 6
#define HPIPE_TRX_TRAIN_FAILED_MASK \
(1 << HPIPE_TRX_TRAIN_FAILED_OFFSET)
#define HPIPE_TRX_TRAIN_TIME_OUT_INT_OFFSET 5
#define HPIPE_TRX_TRAIN_TIME_OUT_INT_MASK \
(1 << HPIPE_TRX_TRAIN_TIME_OUT_INT_OFFSET)
#define HPIPE_INTERRUPT_TRX_TRAIN_DONE_OFFSET 4
#define HPIPE_INTERRUPT_TRX_TRAIN_DONE_MASK \
(1 << HPIPE_INTERRUPT_TRX_TRAIN_DONE_OFFSET)
#define HPIPE_INTERRUPT_DFE_DONE_INT_OFFSET 3
#define HPIPE_INTERRUPT_DFE_DONE_INT_MASK \
(1 << HPIPE_INTERRUPT_DFE_DONE_INT_OFFSET)
#define HPIPE_INTERRUPT_RX_TRAIN_COMPLETE_INT_OFFSET 1
#define HPIPE_INTERRUPT_RX_TRAIN_COMPLETE_INT_MASK \
(1 << HPIPE_INTERRUPT_RX_TRAIN_COMPLETE_INT_OFFSET)
#define HPIPE_TX_TRAIN_REG 0x31C
#define HPIPE_TX_TRAIN_CHK_INIT_OFFSET 4
#define HPIPE_TX_TRAIN_CHK_INIT_MASK \

View File

@ -0,0 +1,975 @@
/*
* Copyright (C) 2018 Marvell International Ltd.
*
* SPDX-License-Identifier: BSD-3-Clause
* https://spdx.org/licenses
*/
#include <debug.h>
#include <delay_timer.h>
#include <errno.h>
#include <mmio.h>
#include <mvebu.h>
#include <mvebu_def.h>
#include <spinlock.h>
#include "phy-comphy-3700.h"
#include "phy-comphy-common.h"
/*
* COMPHY_INDIRECT_REG points to ahci address space but the ahci region used in
* Linux is up to 0x178 so none will access it from Linux in runtime
* concurrently.
*/
#define COMPHY_INDIRECT_REG (MVEBU_REGS_BASE + 0xE0178)
/* The USB3_GBE1_PHY range is above USB3 registers used in dts */
#define USB3_GBE1_PHY (MVEBU_REGS_BASE + 0x5C000)
#define COMPHY_SD_ADDR (MVEBU_REGS_BASE + 0x1F000)
/*
* Below address in used only for reading, therefore no problem with concurrent
* Linux access.
*/
#define MVEBU_TEST_PIN_LATCH_N (MVEBU_NB_GPIO_REG_BASE + 0x8)
#define MVEBU_XTAL_MODE_MASK BIT(9)
#define MVEBU_XTAL_MODE_OFFS 9
#define MVEBU_XTAL_CLOCK_25MHZ 0x0
struct sgmii_phy_init_data_fix {
uint16_t addr;
uint16_t value;
};
/* Changes to 40M1G25 mode data required for running 40M3G125 init mode */
static struct sgmii_phy_init_data_fix sgmii_phy_init_fix[] = {
{0x005, 0x07CC}, {0x015, 0x0000}, {0x01B, 0x0000}, {0x01D, 0x0000},
{0x01E, 0x0000}, {0x01F, 0x0000}, {0x020, 0x0000}, {0x021, 0x0030},
{0x026, 0x0888}, {0x04D, 0x0152}, {0x04F, 0xA020}, {0x050, 0x07CC},
{0x053, 0xE9CA}, {0x055, 0xBD97}, {0x071, 0x3015}, {0x076, 0x03AA},
{0x07C, 0x0FDF}, {0x0C2, 0x3030}, {0x0C3, 0x8000}, {0x0E2, 0x5550},
{0x0E3, 0x12A4}, {0x0E4, 0x7D00}, {0x0E6, 0x0C83}, {0x101, 0xFCC0},
{0x104, 0x0C10}
};
/* 40M1G25 mode init data */
static uint16_t sgmii_phy_init[512] = {
/* 0 1 2 3 4 5 6 7 */
/*-----------------------------------------------------------*/
/* 8 9 A B C D E F */
0x3110, 0xFD83, 0x6430, 0x412F, 0x82C0, 0x06FA, 0x4500, 0x6D26, /* 00 */
0xAFC0, 0x8000, 0xC000, 0x0000, 0x2000, 0x49CC, 0x0BC9, 0x2A52, /* 08 */
0x0BD2, 0x0CDE, 0x13D2, 0x0CE8, 0x1149, 0x10E0, 0x0000, 0x0000, /* 10 */
0x0000, 0x0000, 0x0000, 0x0001, 0x0000, 0x4134, 0x0D2D, 0xFFFF, /* 18 */
0xFFE0, 0x4030, 0x1016, 0x0030, 0x0000, 0x0800, 0x0866, 0x0000, /* 20 */
0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, /* 28 */
0xFFFF, 0xFFFF, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /* 30 */
0x0000, 0x0000, 0x000F, 0x6A62, 0x1988, 0x3100, 0x3100, 0x3100, /* 38 */
0x3100, 0xA708, 0x2430, 0x0830, 0x1030, 0x4610, 0xFF00, 0xFF00, /* 40 */
0x0060, 0x1000, 0x0400, 0x0040, 0x00F0, 0x0155, 0x1100, 0xA02A, /* 48 */
0x06FA, 0x0080, 0xB008, 0xE3ED, 0x5002, 0xB592, 0x7A80, 0x0001, /* 50 */
0x020A, 0x8820, 0x6014, 0x8054, 0xACAA, 0xFC88, 0x2A02, 0x45CF, /* 58 */
0x000F, 0x1817, 0x2860, 0x064F, 0x0000, 0x0204, 0x1800, 0x6000, /* 60 */
0x810F, 0x4F23, 0x4000, 0x4498, 0x0850, 0x0000, 0x000E, 0x1002, /* 68 */
0x9D3A, 0x3009, 0xD066, 0x0491, 0x0001, 0x6AB0, 0x0399, 0x3780, /* 70 */
0x0040, 0x5AC0, 0x4A80, 0x0000, 0x01DF, 0x0000, 0x0007, 0x0000, /* 78 */
0x2D54, 0x00A1, 0x4000, 0x0100, 0xA20A, 0x0000, 0x0000, 0x0000, /* 80 */
0x0000, 0x0000, 0x0000, 0x7400, 0x0E81, 0x1000, 0x1242, 0x0210, /* 88 */
0x80DF, 0x0F1F, 0x2F3F, 0x4F5F, 0x6F7F, 0x0F1F, 0x2F3F, 0x4F5F, /* 90 */
0x6F7F, 0x4BAD, 0x0000, 0x0000, 0x0800, 0x0000, 0x2400, 0xB651, /* 98 */
0xC9E0, 0x4247, 0x0A24, 0x0000, 0xAF19, 0x1004, 0x0000, 0x0000, /* A0 */
0x0000, 0x0013, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /* A8 */
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /* B0 */
0x0000, 0x0000, 0x0000, 0x0060, 0x0000, 0x0000, 0x0000, 0x0000, /* B8 */
0x0000, 0x0000, 0x3010, 0xFA00, 0x0000, 0x0000, 0x0000, 0x0003, /* C0 */
0x1618, 0x8200, 0x8000, 0x0400, 0x050F, 0x0000, 0x0000, 0x0000, /* C8 */
0x4C93, 0x0000, 0x1000, 0x1120, 0x0010, 0x1242, 0x1242, 0x1E00, /* D0 */
0x0000, 0x0000, 0x0000, 0x00F8, 0x0000, 0x0041, 0x0800, 0x0000, /* D8 */
0x82A0, 0x572E, 0x2490, 0x14A9, 0x4E00, 0x0000, 0x0803, 0x0541, /* E0 */
0x0C15, 0x0000, 0x0000, 0x0400, 0x2626, 0x0000, 0x0000, 0x4200, /* E8 */
0x0000, 0xAA55, 0x1020, 0x0000, 0x0000, 0x5010, 0x0000, 0x0000, /* F0 */
0x0000, 0x0000, 0x5000, 0x0000, 0x0000, 0x0000, 0x02F2, 0x0000, /* F8 */
0x101F, 0xFDC0, 0x4000, 0x8010, 0x0110, 0x0006, 0x0000, 0x0000, /*100 */
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*108 */
0x04CF, 0x0000, 0x04CF, 0x0000, 0x04CF, 0x0000, 0x04C6, 0x0000, /*110 */
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*118 */
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*120 */
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*128 */
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*130 */
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*138 */
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*140 */
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*148 */
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*150 */
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*158 */
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*160 */
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*168 */
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*170 */
0x0000, 0x0000, 0x0000, 0x00F0, 0x08A2, 0x3112, 0x0A14, 0x0000, /*178 */
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*180 */
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*188 */
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*190 */
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*198 */
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*1A0 */
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*1A8 */
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*1B0 */
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*1B8 */
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*1C0 */
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*1C8 */
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*1D0 */
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*1D8 */
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*1E0 */
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*1E8 */
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /*1F0 */
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 /*1F8 */
};
/* returns reference clock in MHz (25 or 40) */
static uint32_t get_ref_clk(void)
{
uint32_t val;
val = (mmio_read_32(MVEBU_TEST_PIN_LATCH_N) & MVEBU_XTAL_MODE_MASK) >>
MVEBU_XTAL_MODE_OFFS;
if (val == MVEBU_XTAL_CLOCK_25MHZ)
return 25;
else
return 40;
}
/* PHY selector configures with corresponding modes */
static void mvebu_a3700_comphy_set_phy_selector(uint8_t comphy_index,
uint32_t comphy_mode)
{
uint32_t reg;
int mode = COMPHY_GET_MODE(comphy_mode);
reg = mmio_read_32(MVEBU_COMPHY_REG_BASE + COMPHY_SELECTOR_PHY_REG);
switch (mode) {
case (COMPHY_SATA_MODE):
/* SATA must be in Lane2 */
if (comphy_index == COMPHY_LANE2)
reg &= ~COMPHY_SELECTOR_USB3_PHY_SEL_BIT;
else
goto error;
break;
case (COMPHY_SGMII_MODE):
case (COMPHY_HS_SGMII_MODE):
if (comphy_index == COMPHY_LANE0)
reg &= ~COMPHY_SELECTOR_USB3_GBE1_SEL_BIT;
else if (comphy_index == COMPHY_LANE1)
reg &= ~COMPHY_SELECTOR_PCIE_GBE0_SEL_BIT;
else
goto error;
break;
case (COMPHY_USB3H_MODE):
case (COMPHY_USB3D_MODE):
case (COMPHY_USB3_MODE):
if (comphy_index == COMPHY_LANE2)
reg |= COMPHY_SELECTOR_USB3_PHY_SEL_BIT;
else if (comphy_index == COMPHY_LANE0)
reg |= COMPHY_SELECTOR_USB3_GBE1_SEL_BIT;
else
goto error;
break;
case (COMPHY_PCIE_MODE):
/* PCIE must be in Lane1 */
if (comphy_index == COMPHY_LANE1)
reg |= COMPHY_SELECTOR_PCIE_GBE0_SEL_BIT;
else
goto error;
break;
default:
goto error;
}
mmio_write_32(MVEBU_COMPHY_REG_BASE + COMPHY_SELECTOR_PHY_REG, reg);
return;
error:
ERROR("COMPHY[%d] mode[%d] is invalid\n", comphy_index, mode);
}
/* It is only used for SATA and USB3 on comphy lane2. */
static void comphy_set_indirect(uintptr_t addr, uint32_t offset, uint16_t data,
uint16_t mask, int mode)
{
/*
* When Lane 2 PHY is for USB3, access the PHY registers
* through indirect Address and Data registers:
* INDIR_ACC_PHY_ADDR (RD00E0178h [31:0]),
* INDIR_ACC_PHY_DATA (RD00E017Ch [31:0]),
* within the SATA Host Controller registers, Lane 2 base register
* offset is 0x200
*/
if (mode == COMPHY_UNUSED)
return;
if (mode == COMPHY_SATA_MODE)
mmio_write_32(addr + COMPHY_LANE2_INDIR_ADDR_OFFSET, offset);
else
mmio_write_32(addr + COMPHY_LANE2_INDIR_ADDR_OFFSET,
offset + USB3PHY_LANE2_REG_BASE_OFFSET);
reg_set(addr + COMPHY_LANE2_INDIR_DATA_OFFSET, data, mask);
}
/* It is only used USB3 direct access not on comphy lane2. */
static void comphy_usb3_set_direct(uintptr_t addr, uint32_t reg_offset,
uint16_t data, uint16_t mask, int mode)
{
reg_set16((reg_offset * PHY_SHFT(USB3) + addr), data, mask);
}
static void comphy_sgmii_phy_init(uint32_t comphy_index, uint32_t mode,
uintptr_t sd_ip_addr)
{
const int fix_arr_sz = ARRAY_SIZE(sgmii_phy_init_fix);
int addr, fix_idx;
uint16_t val;
fix_idx = 0;
for (addr = 0; addr < 512; addr++) {
/*
* All PHY register values are defined in full for 3.125Gbps
* SERDES speed. The values required for 1.25 Gbps are almost
* the same and only few registers should be "fixed" in
* comparison to 3.125 Gbps values. These register values are
* stored in "sgmii_phy_init_fix" array.
*/
if ((mode != COMPHY_SGMII_MODE) &&
(sgmii_phy_init_fix[fix_idx].addr == addr)) {
/* Use new value */
val = sgmii_phy_init_fix[fix_idx].value;
if (fix_idx < fix_arr_sz)
fix_idx++;
} else {
val = sgmii_phy_init[addr];
}
reg_set16(SGMIIPHY_ADDR(addr, sd_ip_addr), val, 0xFFFF);
}
}
static int mvebu_a3700_comphy_sata_power_on(uint8_t comphy_index,
uint32_t comphy_mode)
{
int ret = 0;
uint32_t offset, data = 0, ref_clk;
uintptr_t comphy_indir_regs = COMPHY_INDIRECT_REG;
int mode = COMPHY_GET_MODE(comphy_mode);
int invert = COMPHY_GET_POLARITY_INVERT(comphy_mode);
debug_enter();
/* Configure phy selector for SATA */
mvebu_a3700_comphy_set_phy_selector(comphy_index, comphy_mode);
/* Clear phy isolation mode to make it work in normal mode */
offset = COMPHY_ISOLATION_CTRL_REG + SATAPHY_LANE2_REG_BASE_OFFSET;
comphy_set_indirect(comphy_indir_regs, offset, 0, PHY_ISOLATE_MODE,
mode);
/* 0. Check the Polarity invert bits */
if (invert & COMPHY_POLARITY_TXD_INVERT)
data |= TXD_INVERT_BIT;
if (invert & COMPHY_POLARITY_RXD_INVERT)
data |= RXD_INVERT_BIT;
offset = COMPHY_SYNC_PATTERN_REG + SATAPHY_LANE2_REG_BASE_OFFSET;
comphy_set_indirect(comphy_indir_regs, offset, data, TXD_INVERT_BIT |
RXD_INVERT_BIT, mode);
/* 1. Select 40-bit data width width */
offset = COMPHY_LOOPBACK_REG0 + SATAPHY_LANE2_REG_BASE_OFFSET;
comphy_set_indirect(comphy_indir_regs, offset, DATA_WIDTH_40BIT,
SEL_DATA_WIDTH_MASK, mode);
/* 2. Select reference clock(25M) and PHY mode (SATA) */
offset = COMPHY_POWER_PLL_CTRL + SATAPHY_LANE2_REG_BASE_OFFSET;
if (get_ref_clk() == 40)
ref_clk = REF_CLOCK_SPEED_40M;
else
ref_clk = REF_CLOCK_SPEED_25M;
comphy_set_indirect(comphy_indir_regs, offset, ref_clk | PHY_MODE_SATA,
REF_FREF_SEL_MASK | PHY_MODE_MASK, mode);
/* 3. Use maximum PLL rate (no power save) */
offset = COMPHY_KVCO_CAL_CTRL + SATAPHY_LANE2_REG_BASE_OFFSET;
comphy_set_indirect(comphy_indir_regs, offset, USE_MAX_PLL_RATE_BIT,
USE_MAX_PLL_RATE_BIT, mode);
/* 4. Reset reserved bit */
comphy_set_indirect(comphy_indir_regs, COMPHY_RESERVED_REG, 0,
PHYCTRL_FRM_PIN_BIT, mode);
/* 5. Set vendor-specific configuration (It is done in sata driver) */
/* XXX: in U-Boot below sequence was executed in this place, in Linux
* not. Now it is done only in U-Boot before this comphy
* initialization - tests shows that it works ok, but in case of any
* future problem it is left for reference.
* reg_set(MVEBU_REGS_BASE + 0xe00a0, 0, 0xffffffff);
* reg_set(MVEBU_REGS_BASE + 0xe00a4, BIT(6), BIT(6));
*/
/* Wait for > 55 us to allow PLL be enabled */
udelay(PLL_SET_DELAY_US);
/* Polling status */
mmio_write_32(comphy_indir_regs + COMPHY_LANE2_INDIR_ADDR_OFFSET,
COMPHY_LOOPBACK_REG0 + SATAPHY_LANE2_REG_BASE_OFFSET);
ret = polling_with_timeout(comphy_indir_regs +
COMPHY_LANE2_INDIR_DATA_OFFSET,
PLL_READY_TX_BIT, PLL_READY_TX_BIT,
COMPHY_PLL_TIMEOUT, REG_32BIT);
debug_exit();
return ret;
}
static int mvebu_a3700_comphy_sgmii_power_on(uint8_t comphy_index,
uint32_t comphy_mode)
{
int ret = 0;
uint32_t mask, data, offset;
uintptr_t sd_ip_addr;
int mode = COMPHY_GET_MODE(comphy_mode);
int invert = COMPHY_GET_POLARITY_INVERT(comphy_mode);
debug_enter();
/* Set selector */
mvebu_a3700_comphy_set_phy_selector(comphy_index, comphy_mode);
/* Serdes IP Base address
* COMPHY Lane0 -- USB3/GBE1
* COMPHY Lane1 -- PCIe/GBE0
*/
if (comphy_index == COMPHY_LANE0) {
/* Get usb3 and gbe */
sd_ip_addr = USB3_GBE1_PHY;
} else
sd_ip_addr = COMPHY_SD_ADDR;
/*
* 1. Reset PHY by setting PHY input port PIN_RESET=1.
* 2. Set PHY input port PIN_TX_IDLE=1, PIN_PU_IVREF=1 to keep
* PHY TXP/TXN output to idle state during PHY initialization
* 3. Set PHY input port PIN_PU_PLL=0, PIN_PU_RX=0, PIN_PU_TX=0.
*/
data = PIN_PU_IVEREF_BIT | PIN_TX_IDLE_BIT | PIN_RESET_COMPHY_BIT;
mask = PIN_RESET_CORE_BIT | PIN_PU_PLL_BIT | PIN_PU_RX_BIT |
PIN_PU_TX_BIT;
offset = MVEBU_COMPHY_REG_BASE + COMPHY_PHY_CFG1_OFFSET(comphy_index);
reg_set(offset, data, mask);
/* 4. Release reset to the PHY by setting PIN_RESET=0. */
data = 0;
mask = PIN_RESET_COMPHY_BIT;
reg_set(offset, data, mask);
/*
* 5. Set PIN_PHY_GEN_TX[3:0] and PIN_PHY_GEN_RX[3:0] to decide COMPHY
* bit rate
*/
if (mode == COMPHY_SGMII_MODE) {
/* SGMII 1G, SerDes speed 1.25G */
data |= SD_SPEED_1_25_G << GEN_RX_SEL_OFFSET;
data |= SD_SPEED_1_25_G << GEN_TX_SEL_OFFSET;
} else if (mode == COMPHY_HS_SGMII_MODE) {
/* HS SGMII (2.5G), SerDes speed 3.125G */
data |= SD_SPEED_2_5_G << GEN_RX_SEL_OFFSET;
data |= SD_SPEED_2_5_G << GEN_TX_SEL_OFFSET;
} else {
/* Other rates are not supported */
ERROR("unsupported SGMII speed on comphy lane%d\n",
comphy_index);
return -EINVAL;
}
mask = GEN_RX_SEL_MASK | GEN_TX_SEL_MASK;
reg_set(offset, data, mask);
/*
* 6. Wait 10mS for bandgap and reference clocks to stabilize; then
* start SW programming.
*/
mdelay(10);
/* 7. Program COMPHY register PHY_MODE */
data = PHY_MODE_SGMII;
mask = PHY_MODE_MASK;
reg_set16(SGMIIPHY_ADDR(COMPHY_POWER_PLL_CTRL, sd_ip_addr), data, mask);
/*
* 8. Set COMPHY register REFCLK_SEL to select the correct REFCLK
* source
*/
data = 0;
mask = PHY_REF_CLK_SEL;
reg_set16(SGMIIPHY_ADDR(COMPHY_MISC_REG0_ADDR, sd_ip_addr), data, mask);
/*
* 9. Set correct reference clock frequency in COMPHY register
* REF_FREF_SEL.
*/
if (get_ref_clk() == 40)
data = REF_CLOCK_SPEED_50M;
else
data = REF_CLOCK_SPEED_25M;
mask = REF_FREF_SEL_MASK;
reg_set16(SGMIIPHY_ADDR(COMPHY_POWER_PLL_CTRL, sd_ip_addr), data, mask);
/* 10. Program COMPHY register PHY_GEN_MAX[1:0]
* This step is mentioned in the flow received from verification team.
* However the PHY_GEN_MAX value is only meaningful for other interfaces
* (not SGMII). For instance, it selects SATA speed 1.5/3/6 Gbps or PCIe
* speed 2.5/5 Gbps
*/
/*
* 11. Program COMPHY register SEL_BITS to set correct parallel data
* bus width
*/
data = DATA_WIDTH_10BIT;
mask = SEL_DATA_WIDTH_MASK;
reg_set16(SGMIIPHY_ADDR(COMPHY_LOOPBACK_REG0, sd_ip_addr), data, mask);
/*
* 12. As long as DFE function needs to be enabled in any mode,
* COMPHY register DFE_UPDATE_EN[5:0] shall be programmed to 0x3F
* for real chip during COMPHY power on.
* The step 14 exists (and empty) in the original initialization flow
* obtained from the verification team. According to the functional
* specification DFE_UPDATE_EN already has the default value 0x3F
*/
/*
* 13. Program COMPHY GEN registers.
* These registers should be programmed based on the lab testing result
* to achieve optimal performance. Please contact the CEA group to get
* the related GEN table during real chip bring-up. We only required to
* run though the entire registers programming flow defined by
* "comphy_sgmii_phy_init" when the REF clock is 40 MHz. For REF clock
* 25 MHz the default values stored in PHY registers are OK.
*/
debug("Running C-DPI phy init %s mode\n",
mode == COMPHY_HS_SGMII_MODE ? "2G5" : "1G");
if (get_ref_clk() == 40)
comphy_sgmii_phy_init(comphy_index, mode, sd_ip_addr);
/*
* 14. [Simulation Only] should not be used for real chip.
* By pass power up calibration by programming EXT_FORCE_CAL_DONE
* (R02h[9]) to 1 to shorten COMPHY simulation time.
*/
/*
* 15. [Simulation Only: should not be used for real chip]
* Program COMPHY register FAST_DFE_TIMER_EN=1 to shorten RX training
* simulation time.
*/
/*
* 16. Check the PHY Polarity invert bit
*/
data = 0x0;
if (invert & COMPHY_POLARITY_TXD_INVERT)
data |= TXD_INVERT_BIT;
if (invert & COMPHY_POLARITY_RXD_INVERT)
data |= RXD_INVERT_BIT;
reg_set16(SGMIIPHY_ADDR(COMPHY_SYNC_PATTERN_REG, sd_ip_addr), data, 0);
/*
* 17. Set PHY input ports PIN_PU_PLL, PIN_PU_TX and PIN_PU_RX to 1 to
* start PHY power up sequence. All the PHY register programming should
* be done before PIN_PU_PLL=1. There should be no register programming
* for normal PHY operation from this point.
*/
reg_set(MVEBU_COMPHY_REG_BASE + COMPHY_PHY_CFG1_OFFSET(comphy_index),
PIN_PU_PLL_BIT | PIN_PU_RX_BIT | PIN_PU_TX_BIT,
PIN_PU_PLL_BIT | PIN_PU_RX_BIT | PIN_PU_TX_BIT);
/*
* 18. Wait for PHY power up sequence to finish by checking output ports
* PIN_PLL_READY_TX=1 and PIN_PLL_READY_RX=1.
*/
ret = polling_with_timeout(MVEBU_COMPHY_REG_BASE +
COMPHY_PHY_STATUS_OFFSET(comphy_index),
PHY_PLL_READY_TX_BIT | PHY_PLL_READY_RX_BIT,
PHY_PLL_READY_TX_BIT | PHY_PLL_READY_RX_BIT,
COMPHY_PLL_TIMEOUT, REG_32BIT);
if (ret)
ERROR("Failed to lock PLL for SGMII PHY %d\n", comphy_index);
/*
* 19. Set COMPHY input port PIN_TX_IDLE=0
*/
reg_set(MVEBU_COMPHY_REG_BASE + COMPHY_PHY_CFG1_OFFSET(comphy_index),
0x0, PIN_TX_IDLE_BIT);
/*
* 20. After valid data appear on PIN_RXDATA bus, set PIN_RX_INIT=1. To
* start RX initialization. PIN_RX_INIT_DONE will be cleared to 0 by the
* PHY After RX initialization is done, PIN_RX_INIT_DONE will be set to
* 1 by COMPHY Set PIN_RX_INIT=0 after PIN_RX_INIT_DONE= 1. Please
* refer to RX initialization part for details.
*/
reg_set(MVEBU_COMPHY_REG_BASE + COMPHY_PHY_CFG1_OFFSET(comphy_index),
PHY_RX_INIT_BIT, 0x0);
ret = polling_with_timeout(MVEBU_COMPHY_REG_BASE +
COMPHY_PHY_STATUS_OFFSET(comphy_index),
PHY_PLL_READY_TX_BIT | PHY_PLL_READY_RX_BIT,
PHY_PLL_READY_TX_BIT | PHY_PLL_READY_RX_BIT,
COMPHY_PLL_TIMEOUT, REG_32BIT);
if (ret)
ERROR("Failed to lock PLL for SGMII PHY %d\n", comphy_index);
ret = polling_with_timeout(MVEBU_COMPHY_REG_BASE +
COMPHY_PHY_STATUS_OFFSET(comphy_index),
PHY_RX_INIT_DONE_BIT, PHY_RX_INIT_DONE_BIT,
COMPHY_PLL_TIMEOUT, REG_32BIT);
if (ret)
ERROR("Failed to init RX of SGMII PHY %d\n", comphy_index);
debug_exit();
return ret;
}
static int mvebu_a3700_comphy_usb3_power_on(uint8_t comphy_index,
uint32_t comphy_mode)
{
int ret = 0;
uintptr_t reg_base = 0;
uint32_t mask, data, addr, cfg, ref_clk;
void (*usb3_reg_set)(uintptr_t addr, uint32_t reg_offset, uint16_t data,
uint16_t mask, int mode);
int mode = COMPHY_GET_MODE(comphy_mode);
int invert = COMPHY_GET_POLARITY_INVERT(comphy_mode);
debug_enter();
/* Set phy seclector */
mvebu_a3700_comphy_set_phy_selector(comphy_index, comphy_mode);
/* Set usb3 reg access func, Lane2 is indirect access */
if (comphy_index == COMPHY_LANE2) {
usb3_reg_set = &comphy_set_indirect;
reg_base = COMPHY_INDIRECT_REG;
} else {
/* Get the direct access register resource and map */
usb3_reg_set = &comphy_usb3_set_direct;
reg_base = USB3_GBE1_PHY;
}
/*
* 0. Set PHY OTG Control(0x5d034), bit 4, Power up OTG module The
* register belong to UTMI module, so it is set in UTMI phy driver.
*/
/*
* 1. Set PRD_TXDEEMPH (3.5db de-emph)
*/
mask = PRD_TXDEEMPH0_MASK | PRD_TXMARGIN_MASK | PRD_TXSWING_MASK |
CFG_TX_ALIGN_POS_MASK;
usb3_reg_set(reg_base, COMPHY_REG_LANE_CFG0_ADDR, PRD_TXDEEMPH0_MASK,
mask, mode);
/*
* 2. Set BIT0: enable transmitter in high impedance mode
* Set BIT[3:4]: delay 2 clock cycles for HiZ off latency
* Set BIT6: Tx detect Rx at HiZ mode
* Unset BIT15: set to 0 to set USB3 De-emphasize level to -3.5db
* together with bit 0 of COMPHY_REG_LANE_CFG0_ADDR register
*/
mask = PRD_TXDEEMPH1_MASK | TX_DET_RX_MODE | GEN2_TX_DATA_DLY_MASK |
TX_ELEC_IDLE_MODE_EN;
data = TX_DET_RX_MODE | GEN2_TX_DATA_DLY_DEFT | TX_ELEC_IDLE_MODE_EN;
usb3_reg_set(reg_base, COMPHY_REG_LANE_CFG1_ADDR, data, mask, mode);
/*
* 3. Set Spread Spectrum Clock Enabled
*/
usb3_reg_set(reg_base, COMPHY_REG_LANE_CFG4_ADDR,
SPREAD_SPECTRUM_CLK_EN, SPREAD_SPECTRUM_CLK_EN, mode);
/*
* 4. Set Override Margining Controls From the MAC:
* Use margining signals from lane configuration
*/
usb3_reg_set(reg_base, COMPHY_REG_TEST_MODE_CTRL_ADDR,
MODE_MARGIN_OVERRIDE, REG_16_BIT_MASK, mode);
/*
* 5. Set Lane-to-Lane Bundle Clock Sampling Period = per PCLK cycles
* set Mode Clock Source = PCLK is generated from REFCLK
*/
usb3_reg_set(reg_base, COMPHY_REG_GLOB_CLK_SRC_LO_ADDR, 0x0,
(MODE_CLK_SRC | BUNDLE_PERIOD_SEL | BUNDLE_PERIOD_SCALE |
BUNDLE_SAMPLE_CTRL | PLL_READY_DLY), mode);
/*
* 6. Set G2 Spread Spectrum Clock Amplitude at 4K
*/
usb3_reg_set(reg_base, COMPHY_REG_GEN2_SET_2,
G2_TX_SSC_AMP_VALUE_20, G2_TX_SSC_AMP_MASK, mode);
/*
* 7. Unset G3 Spread Spectrum Clock Amplitude
* set G3 TX and RX Register Master Current Select
*/
mask = G3_TX_SSC_AMP_MASK | G3_VREG_RXTX_MAS_ISET_MASK |
RSVD_PH03FH_6_0_MASK;
usb3_reg_set(reg_base, COMPHY_REG_GEN2_SET_3,
G3_VREG_RXTX_MAS_ISET_60U, mask, mode);
/*
* 8. Check crystal jumper setting and program the Power and PLL Control
* accordingly Change RX wait
*/
if (get_ref_clk() == 40) {
ref_clk = REF_CLOCK_SPEED_40M;
cfg = CFG_PM_RXDLOZ_WAIT_12_UNIT;
} else {
/* 25 MHz */
ref_clk = USB3_REF_CLOCK_SPEED_25M;
cfg = CFG_PM_RXDLOZ_WAIT_7_UNIT;
}
mask = PU_IVREF_BIT | PU_PLL_BIT | PU_RX_BIT | PU_TX_BIT |
PU_TX_INTP_BIT | PU_DFE_BIT | PLL_LOCK_BIT | PHY_MODE_MASK |
REF_FREF_SEL_MASK;
data = PU_IVREF_BIT | PU_PLL_BIT | PU_RX_BIT | PU_TX_BIT |
PU_TX_INTP_BIT | PU_DFE_BIT | PHY_MODE_USB3 | ref_clk;
usb3_reg_set(reg_base, COMPHY_POWER_PLL_CTRL, data, mask, mode);
mask = CFG_PM_OSCCLK_WAIT_MASK | CFG_PM_RXDEN_WAIT_MASK |
CFG_PM_RXDLOZ_WAIT_MASK;
data = CFG_PM_RXDEN_WAIT_1_UNIT | cfg;
usb3_reg_set(reg_base, COMPHY_REG_PWR_MGM_TIM1_ADDR, data, mask, mode);
/*
* 9. Enable idle sync
*/
data = UNIT_CTRL_DEFAULT_VALUE | IDLE_SYNC_EN;
usb3_reg_set(reg_base, COMPHY_REG_UNIT_CTRL_ADDR, data, REG_16_BIT_MASK,
mode);
/*
* 10. Enable the output of 500M clock
*/
data = MISC_REG0_DEFAULT_VALUE | CLK500M_EN;
usb3_reg_set(reg_base, COMPHY_MISC_REG0_ADDR, data, REG_16_BIT_MASK,
mode);
/*
* 11. Set 20-bit data width
*/
usb3_reg_set(reg_base, COMPHY_LOOPBACK_REG0, DATA_WIDTH_20BIT,
REG_16_BIT_MASK, mode);
/*
* 12. Override Speed_PLL value and use MAC PLL
*/
usb3_reg_set(reg_base, COMPHY_KVCO_CAL_CTRL,
(SPEED_PLL_VALUE_16 | USE_MAX_PLL_RATE_BIT),
REG_16_BIT_MASK, mode);
/*
* 13. Check the Polarity invert bit
*/
if (invert & COMPHY_POLARITY_TXD_INVERT)
usb3_reg_set(reg_base, COMPHY_SYNC_PATTERN_REG, TXD_INVERT_BIT,
TXD_INVERT_BIT, mode);
if (invert & COMPHY_POLARITY_RXD_INVERT)
usb3_reg_set(reg_base, COMPHY_SYNC_PATTERN_REG, RXD_INVERT_BIT,
RXD_INVERT_BIT, mode);
/*
* 14. Set max speed generation to USB3.0 5Gbps
*/
usb3_reg_set(reg_base, COMPHY_SYNC_MASK_GEN_REG, PHY_GEN_USB3_5G,
PHY_GEN_MAX_MASK, mode);
/*
* 15. Set capacitor value for FFE gain peaking to 0xF
*/
usb3_reg_set(reg_base, COMPHY_REG_GEN3_SETTINGS_3,
COMPHY_GEN_FFE_CAP_SEL_VALUE, COMPHY_GEN_FFE_CAP_SEL_MASK,
mode);
/*
* 16. Release SW reset
*/
data = MODE_CORE_CLK_FREQ_SEL | MODE_PIPE_WIDTH_32 | MODE_REFDIV_BY_4;
usb3_reg_set(reg_base, COMPHY_REG_GLOB_PHY_CTRL0_ADDR, data,
REG_16_BIT_MASK, mode);
/* Wait for > 55 us to allow PCLK be enabled */
udelay(PLL_SET_DELAY_US);
if (comphy_index == COMPHY_LANE2) {
data = COMPHY_LOOPBACK_REG0 + USB3PHY_LANE2_REG_BASE_OFFSET;
mmio_write_32(reg_base + COMPHY_LANE2_INDIR_ADDR_OFFSET,
data);
addr = COMPHY_LOOPBACK_REG0 + USB3PHY_LANE2_REG_BASE_OFFSET;
ret = polling_with_timeout(addr, TXDCLK_PCLK_EN, TXDCLK_PCLK_EN,
COMPHY_PLL_TIMEOUT, REG_32BIT);
} else {
ret = polling_with_timeout(LANE_STATUS1_ADDR(USB3) + reg_base,
TXDCLK_PCLK_EN, TXDCLK_PCLK_EN,
COMPHY_PLL_TIMEOUT, REG_16BIT);
}
if (ret)
ERROR("Failed to lock USB3 PLL\n");
debug_exit();
return ret;
}
static int mvebu_a3700_comphy_pcie_power_on(uint8_t comphy_index,
uint32_t comphy_mode)
{
int ret;
uint32_t ref_clk;
int invert = COMPHY_GET_POLARITY_INVERT(comphy_mode);
debug_enter();
/* 1. Enable max PLL. */
reg_set16(LANE_CFG1_ADDR(PCIE) + COMPHY_SD_ADDR,
USE_MAX_PLL_RATE_EN, 0x0);
/* 2. Select 20 bit SERDES interface. */
reg_set16(GLOB_CLK_SRC_LO_ADDR(PCIE) + COMPHY_SD_ADDR,
CFG_SEL_20B, 0);
/* 3. Force to use reg setting for PCIe mode */
reg_set16(MISC_REG1_ADDR(PCIE) + COMPHY_SD_ADDR,
SEL_BITS_PCIE_FORCE, 0);
/* 4. Change RX wait */
reg_set16(PWR_MGM_TIM1_ADDR(PCIE) + COMPHY_SD_ADDR,
CFG_PM_RXDEN_WAIT_1_UNIT | CFG_PM_RXDLOZ_WAIT_12_UNIT,
(CFG_PM_OSCCLK_WAIT_MASK | CFG_PM_RXDEN_WAIT_MASK |
CFG_PM_RXDLOZ_WAIT_MASK));
/* 5. Enable idle sync */
reg_set16(UNIT_CTRL_ADDR(PCIE) + COMPHY_SD_ADDR,
UNIT_CTRL_DEFAULT_VALUE | IDLE_SYNC_EN, REG_16_BIT_MASK);
/* 6. Enable the output of 100M/125M/500M clock */
reg_set16(MISC_REG0_ADDR(PCIE) + COMPHY_SD_ADDR,
MISC_REG0_DEFAULT_VALUE | CLK500M_EN | CLK100M_125M_EN,
REG_16_BIT_MASK);
/*
* 7. Enable TX, PCIE global register, 0xd0074814, it is done in
* PCI-E driver
*/
/*
* 8. Check crystal jumper setting and program the Power and PLL
* Control accordingly
*/
if (get_ref_clk() == 40)
ref_clk = REF_CLOCK_SPEED_40M;
else
ref_clk = PCIE_REF_CLOCK_SPEED_25M;
reg_set16(PWR_PLL_CTRL_ADDR(PCIE) + COMPHY_SD_ADDR,
(PU_IVREF_BIT | PU_PLL_BIT | PU_RX_BIT | PU_TX_BIT |
PU_TX_INTP_BIT | PU_DFE_BIT | ref_clk | PHY_MODE_PCIE),
REG_16_BIT_MASK);
/* 9. Override Speed_PLL value and use MAC PLL */
reg_set16(KVCO_CAL_CTRL_ADDR(PCIE) + COMPHY_SD_ADDR,
SPEED_PLL_VALUE_16 | USE_MAX_PLL_RATE_BIT, REG_16_BIT_MASK);
/* 10. Check the Polarity invert bit */
if (invert & COMPHY_POLARITY_TXD_INVERT)
reg_set16(SYNC_PATTERN_REG_ADDR(PCIE) + COMPHY_SD_ADDR,
TXD_INVERT_BIT, 0x0);
if (invert & COMPHY_POLARITY_RXD_INVERT)
reg_set16(SYNC_PATTERN_REG_ADDR(PCIE) + COMPHY_SD_ADDR,
RXD_INVERT_BIT, 0x0);
/* 11. Release SW reset */
reg_set16(GLOB_PHY_CTRL0_ADDR(PCIE) + COMPHY_SD_ADDR,
MODE_CORE_CLK_FREQ_SEL | MODE_PIPE_WIDTH_32,
SOFT_RESET | MODE_REFDIV);
/* Wait for > 55 us to allow PCLK be enabled */
udelay(PLL_SET_DELAY_US);
ret = polling_with_timeout(LANE_STATUS1_ADDR(PCIE) + COMPHY_SD_ADDR,
TXDCLK_PCLK_EN, TXDCLK_PCLK_EN,
COMPHY_PLL_TIMEOUT, REG_16BIT);
if (ret)
ERROR("Failed to lock PCIE PLL\n");
debug_exit();
return ret;
}
int mvebu_3700_comphy_power_on(uint8_t comphy_index, uint32_t comphy_mode)
{
int mode = COMPHY_GET_MODE(comphy_mode);
int ret = 0;
debug_enter();
switch (mode) {
case(COMPHY_SATA_MODE):
ret = mvebu_a3700_comphy_sata_power_on(comphy_index,
comphy_mode);
break;
case(COMPHY_SGMII_MODE):
case(COMPHY_HS_SGMII_MODE):
ret = mvebu_a3700_comphy_sgmii_power_on(comphy_index,
comphy_mode);
break;
case (COMPHY_USB3_MODE):
case (COMPHY_USB3H_MODE):
ret = mvebu_a3700_comphy_usb3_power_on(comphy_index,
comphy_mode);
break;
case (COMPHY_PCIE_MODE):
ret = mvebu_a3700_comphy_pcie_power_on(comphy_index,
comphy_mode);
break;
default:
ERROR("comphy%d: unsupported comphy mode\n", comphy_index);
ret = -EINVAL;
break;
}
debug_exit();
return ret;
}
static int mvebu_a3700_comphy_usb3_power_off(void)
{
/*
* Currently the USB3 MAC will control the USB3 PHY to set it to low
* state, thus do not need to power off USB3 PHY again.
*/
debug_enter();
debug_exit();
return 0;
}
static int mvebu_a3700_comphy_sata_power_off(uint32_t comphy_mode)
{
uintptr_t comphy_indir_regs = COMPHY_INDIRECT_REG;
int mode = COMPHY_GET_MODE(comphy_mode);
uint32_t offset;
debug_enter();
/* Set phy isolation mode */
offset = COMPHY_ISOLATION_CTRL_REG + SATAPHY_LANE2_REG_BASE_OFFSET;
comphy_set_indirect(comphy_indir_regs, offset, PHY_ISOLATE_MODE,
PHY_ISOLATE_MODE, mode);
/* Power off PLL, Tx, Rx */
offset = COMPHY_POWER_PLL_CTRL + SATAPHY_LANE2_REG_BASE_OFFSET;
comphy_set_indirect(comphy_indir_regs, offset, 0,
PU_PLL_BIT | PU_RX_BIT | PU_TX_BIT, mode);
debug_exit();
return 0;
}
int mvebu_3700_comphy_power_off(uint8_t comphy_index, uint32_t comphy_mode)
{
int mode = COMPHY_GET_MODE(comphy_mode);
int err = 0;
debug_enter();
switch (mode) {
case (COMPHY_USB3_MODE):
case (COMPHY_USB3H_MODE):
err = mvebu_a3700_comphy_usb3_power_off();
break;
case (COMPHY_SATA_MODE):
err = mvebu_a3700_comphy_sata_power_off(comphy_mode);
break;
default:
debug("comphy%d: power off is not implemented for mode %d\n",
comphy_index, mode);
break;
}
debug_exit();
return err;
}
static int mvebu_a3700_comphy_sata_is_pll_locked(void)
{
uint32_t data, addr;
uintptr_t comphy_indir_regs = COMPHY_INDIRECT_REG;
int ret = 0;
debug_enter();
/* Polling status */
mmio_write_32(comphy_indir_regs + COMPHY_LANE2_INDIR_ADDR_OFFSET,
COMPHY_LOOPBACK_REG0 + SATAPHY_LANE2_REG_BASE_OFFSET);
addr = comphy_indir_regs + COMPHY_LANE2_INDIR_DATA_OFFSET;
data = polling_with_timeout(addr, PLL_READY_TX_BIT, PLL_READY_TX_BIT,
COMPHY_PLL_TIMEOUT, REG_32BIT);
if (data != 0) {
ERROR("TX PLL is not locked\n");
ret = -ETIMEDOUT;
}
debug_exit();
return ret;
}
int mvebu_3700_comphy_is_pll_locked(uint8_t comphy_index, uint32_t comphy_mode)
{
int mode = COMPHY_GET_MODE(comphy_mode);
int ret = 0;
debug_enter();
switch (mode) {
case(COMPHY_SATA_MODE):
ret = mvebu_a3700_comphy_sata_is_pll_locked();
break;
default:
ERROR("comphy[%d] mode[%d] doesn't support PLL lock check\n",
comphy_index, mode);
ret = -EINVAL;
break;
}
debug_exit();
return ret;
}

View File

@ -0,0 +1,258 @@
/*
* Copyright (C) 2018 Marvell International Ltd.
*
* SPDX-License-Identifier: BSD-3-Clause
* https://spdx.org/licenses
*/
#ifndef _PHY_COMPHY_3700_H
#define _PHY_COMPHY_3700_H
#define PLL_SET_DELAY_US 600
#define COMPHY_PLL_TIMEOUT 1000
#define REG_16_BIT_MASK 0xFFFF
#define COMPHY_SELECTOR_PHY_REG 0xFC
/* bit0: 0: Lane0 is GBE0; 1: Lane1 is PCIE */
#define COMPHY_SELECTOR_PCIE_GBE0_SEL_BIT BIT(0)
/* bit4: 0: Lane1 is GBE1; 1: Lane1 is USB3 */
#define COMPHY_SELECTOR_USB3_GBE1_SEL_BIT BIT(4)
/* bit8: 0: Lane1 is USB, Lane2 is SATA; 1: Lane2 is USB3 */
#define COMPHY_SELECTOR_USB3_PHY_SEL_BIT BIT(8)
/* SATA PHY register offset */
#define SATAPHY_LANE2_REG_BASE_OFFSET 0x200
/* USB3 PHY offset compared to SATA PHY */
#define USB3PHY_LANE2_REG_BASE_OFFSET 0x200
/* Comphy lane2 indirect access register offset */
#define COMPHY_LANE2_INDIR_ADDR_OFFSET 0x0
#define COMPHY_LANE2_INDIR_DATA_OFFSET 0x4
/* PHY shift to get related register address */
enum {
PCIE = 1,
USB3,
};
#define PCIEPHY_SHFT 2
#define USB3PHY_SHFT 2
#define PHY_SHFT(unit) ((unit == PCIE) ? PCIEPHY_SHFT : USB3PHY_SHFT)
/* PHY register */
#define COMPHY_POWER_PLL_CTRL 0x01
#define PWR_PLL_CTRL_ADDR(unit) (COMPHY_POWER_PLL_CTRL * PHY_SHFT(unit))
#define PU_IVREF_BIT BIT(15)
#define PU_PLL_BIT BIT(14)
#define PU_RX_BIT BIT(13)
#define PU_TX_BIT BIT(12)
#define PU_TX_INTP_BIT BIT(11)
#define PU_DFE_BIT BIT(10)
#define RESET_DTL_RX_BIT BIT(9)
#define PLL_LOCK_BIT BIT(8)
#define REF_FREF_SEL_OFFSET 0
#define REF_FREF_SEL_MASK (0x1F << REF_FREF_SEL_OFFSET)
#define REF_CLOCK_SPEED_25M (0x1 << REF_FREF_SEL_OFFSET)
#define REF_CLOCK_SPEED_30M (0x2 << REF_FREF_SEL_OFFSET)
#define PCIE_REF_CLOCK_SPEED_25M REF_CLOCK_SPEED_30M
#define USB3_REF_CLOCK_SPEED_25M REF_CLOCK_SPEED_30M
#define REF_CLOCK_SPEED_40M (0x3 << REF_FREF_SEL_OFFSET)
#define REF_CLOCK_SPEED_50M (0x4 << REF_FREF_SEL_OFFSET)
#define PHY_MODE_OFFSET 5
#define PHY_MODE_MASK (7 << PHY_MODE_OFFSET)
#define PHY_MODE_SATA (0x0 << PHY_MODE_OFFSET)
#define PHY_MODE_PCIE (0x3 << PHY_MODE_OFFSET)
#define PHY_MODE_SGMII (0x4 << PHY_MODE_OFFSET)
#define PHY_MODE_USB3 (0x5 << PHY_MODE_OFFSET)
#define COMPHY_KVCO_CAL_CTRL 0x02
#define KVCO_CAL_CTRL_ADDR(unit) (COMPHY_KVCO_CAL_CTRL * PHY_SHFT(unit))
#define USE_MAX_PLL_RATE_BIT BIT(12)
#define SPEED_PLL_OFFSET 2
#define SPEED_PLL_MASK (0x3F << SPEED_PLL_OFFSET)
#define SPEED_PLL_VALUE_16 (0x10 << SPEED_PLL_OFFSET)
#define COMPHY_RESERVED_REG 0x0E
#define PHYCTRL_FRM_PIN_BIT BIT(13)
#define COMPHY_LOOPBACK_REG0 0x23
#define DIG_LB_EN_ADDR(unit) (COMPHY_LOOPBACK_REG0 * PHY_SHFT(unit))
#define SEL_DATA_WIDTH_OFFSET 10
#define SEL_DATA_WIDTH_MASK (0x3 << SEL_DATA_WIDTH_OFFSET)
#define DATA_WIDTH_10BIT (0x0 << SEL_DATA_WIDTH_OFFSET)
#define DATA_WIDTH_20BIT (0x1 << SEL_DATA_WIDTH_OFFSET)
#define DATA_WIDTH_40BIT (0x2 << SEL_DATA_WIDTH_OFFSET)
#define PLL_READY_TX_BIT BIT(4)
#define COMPHY_SYNC_PATTERN_REG 0x24
#define SYNC_PATTERN_REG_ADDR(unit) (COMPHY_SYNC_PATTERN_REG * \
PHY_SHFT(unit))
#define TXD_INVERT_BIT BIT(10)
#define RXD_INVERT_BIT BIT(11)
#define COMPHY_SYNC_MASK_GEN_REG 0x25
#define PHY_GEN_MAX_OFFSET 10
#define PHY_GEN_MAX_MASK (3 << PHY_GEN_MAX_OFFSET)
#define PHY_GEN_USB3_5G (1 << PHY_GEN_MAX_OFFSET)
#define COMPHY_ISOLATION_CTRL_REG 0x26
#define ISOLATION_CTRL_REG_ADDR(unit) (COMPHY_ISOLATION_CTRL_REG * \
PHY_SHFT(unit))
#define PHY_ISOLATE_MODE BIT(15)
#define COMPHY_MISC_REG0_ADDR 0x4F
#define MISC_REG0_ADDR(unit) (COMPHY_MISC_REG0_ADDR * PHY_SHFT(unit))
#define CLK100M_125M_EN BIT(4)
#define CLK500M_EN BIT(7)
#define PHY_REF_CLK_SEL BIT(10)
#define MISC_REG0_DEFAULT_VALUE 0xA00D
#define COMPHY_REG_GEN2_SET_2 0x3e
#define GEN2_SETTING_2_ADDR(unit) (COMPHY_REG_GEN2_SET_2 * PHY_SHFT(unit))
#define G2_TX_SSC_AMP_VALUE_20 BIT(14)
#define G2_TX_SSC_AMP_OFF 9
#define G2_TX_SSC_AMP_LEN 7
#define G2_TX_SSC_AMP_MASK (((1 << G2_TX_SSC_AMP_LEN) - 1) << \
G2_TX_SSC_AMP_OFF)
#define COMPHY_REG_GEN2_SET_3 0x3f
#define GEN2_SETTING_3_ADDR(unit) (COMPHY_REG_GEN2_SET_3 * PHY_SHFT(unit))
#define G3_TX_SSC_AMP_OFF 9
#define G3_TX_SSC_AMP_LEN 7
#define G3_TX_SSC_AMP_MASK (((1 << G2_TX_SSC_AMP_LEN) - 1) << \
G2_TX_SSC_AMP_OFF)
#define G3_VREG_RXTX_MAS_ISET_OFF 7
#define G3_VREG_RXTX_MAS_ISET_60U (0 << G3_VREG_RXTX_MAS_ISET_OFF)
#define G3_VREG_RXTX_MAS_ISET_80U (1 << G3_VREG_RXTX_MAS_ISET_OFF)
#define G3_VREG_RXTX_MAS_ISET_100U (2 << G3_VREG_RXTX_MAS_ISET_OFF)
#define G3_VREG_RXTX_MAS_ISET_120U (3 << G3_VREG_RXTX_MAS_ISET_OFF)
#define G3_VREG_RXTX_MAS_ISET_MASK (BIT(7) | BIT(8))
#define RSVD_PH03FH_6_0_OFF 0
#define RSVD_PH03FH_6_0_LEN 7
#define RSVD_PH03FH_6_0_MASK (((1 << RSVD_PH03FH_6_0_LEN) - 1) << \
RSVD_PH03FH_6_0_OFF)
#define COMPHY_REG_UNIT_CTRL_ADDR 0x48
#define UNIT_CTRL_ADDR(unit) (COMPHY_REG_UNIT_CTRL_ADDR * \
PHY_SHFT(unit))
#define IDLE_SYNC_EN BIT(12)
#define UNIT_CTRL_DEFAULT_VALUE 0x60
#define COMPHY_MISC_REG1_ADDR 0x73
#define MISC_REG1_ADDR(unit) (COMPHY_MISC_REG1_ADDR * PHY_SHFT(unit))
#define SEL_BITS_PCIE_FORCE BIT(15)
#define COMPHY_REG_GEN3_SETTINGS_3 0x112
#define COMPHY_GEN_FFE_CAP_SEL_MASK 0xF
#define COMPHY_GEN_FFE_CAP_SEL_VALUE 0xF
#define COMPHY_REG_LANE_CFG0_ADDR 0x180
#define LANE_CFG0_ADDR(unit) (COMPHY_REG_LANE_CFG0_ADDR * \
PHY_SHFT(unit))
#define PRD_TXDEEMPH0_MASK BIT(0)
#define PRD_TXMARGIN_MASK (BIT(1) | BIT(2) | BIT(3))
#define PRD_TXSWING_MASK BIT(4)
#define CFG_TX_ALIGN_POS_MASK (BIT(5) | BIT(6) | BIT(7) | BIT(8))
#define COMPHY_REG_LANE_CFG1_ADDR 0x181
#define LANE_CFG1_ADDR(unit) (COMPHY_REG_LANE_CFG1_ADDR * \
PHY_SHFT(unit))
#define PRD_TXDEEMPH1_MASK BIT(15)
#define USE_MAX_PLL_RATE_EN BIT(9)
#define TX_DET_RX_MODE BIT(6)
#define GEN2_TX_DATA_DLY_MASK (BIT(3) | BIT(4))
#define GEN2_TX_DATA_DLY_DEFT (2 << 3)
#define TX_ELEC_IDLE_MODE_EN BIT(0)
#define COMPHY_REG_LANE_STATUS1_ADDR 0x183
#define LANE_STATUS1_ADDR(unit) (COMPHY_REG_LANE_STATUS1_ADDR * \
PHY_SHFT(unit))
#define TXDCLK_PCLK_EN BIT(0)
#define COMPHY_REG_LANE_CFG4_ADDR 0x188
#define LANE_CFG4_ADDR(unit) (COMPHY_REG_LANE_CFG4_ADDR * \
PHY_SHFT(unit))
#define SPREAD_SPECTRUM_CLK_EN BIT(7)
#define COMPHY_REG_GLOB_PHY_CTRL0_ADDR 0x1C1
#define GLOB_PHY_CTRL0_ADDR(unit) (COMPHY_REG_GLOB_PHY_CTRL0_ADDR * \
PHY_SHFT(unit))
#define SOFT_RESET BIT(0)
#define MODE_REFDIV 0x30
#define MODE_CORE_CLK_FREQ_SEL BIT(9)
#define MODE_PIPE_WIDTH_32 BIT(3)
#define MODE_REFDIV_OFFSET 4
#define MODE_REFDIV_LEN 2
#define MODE_REFDIV_MASK (0x3 << MODE_REFDIV_OFFSET)
#define MODE_REFDIV_BY_4 (0x2 << MODE_REFDIV_OFFSET)
#define COMPHY_REG_TEST_MODE_CTRL_ADDR 0x1C2
#define TEST_MODE_CTRL_ADDR(unit) (COMPHY_REG_TEST_MODE_CTRL_ADDR * \
PHY_SHFT(unit))
#define MODE_MARGIN_OVERRIDE BIT(2)
#define COMPHY_REG_GLOB_CLK_SRC_LO_ADDR 0x1C3
#define GLOB_CLK_SRC_LO_ADDR(unit) (COMPHY_REG_GLOB_CLK_SRC_LO_ADDR * \
PHY_SHFT(unit))
#define MODE_CLK_SRC BIT(0)
#define BUNDLE_PERIOD_SEL BIT(1)
#define BUNDLE_PERIOD_SCALE (BIT(2) | BIT(3))
#define BUNDLE_SAMPLE_CTRL BIT(4)
#define PLL_READY_DLY (BIT(5) | BIT(6) | BIT(7))
#define CFG_SEL_20B BIT(15)
#define COMPHY_REG_PWR_MGM_TIM1_ADDR 0x1D0
#define PWR_MGM_TIM1_ADDR(unit) (COMPHY_REG_PWR_MGM_TIM1_ADDR * \
PHY_SHFT(unit))
#define CFG_PM_OSCCLK_WAIT_OFF 12
#define CFG_PM_OSCCLK_WAIT_LEN 4
#define CFG_PM_OSCCLK_WAIT_MASK (((1 << CFG_PM_OSCCLK_WAIT_LEN) - 1) \
<< CFG_PM_OSCCLK_WAIT_OFF)
#define CFG_PM_RXDEN_WAIT_OFF 8
#define CFG_PM_RXDEN_WAIT_LEN 4
#define CFG_PM_RXDEN_WAIT_MASK (((1 << CFG_PM_RXDEN_WAIT_LEN) - 1) \
<< CFG_PM_RXDEN_WAIT_OFF)
#define CFG_PM_RXDEN_WAIT_1_UNIT (1 << CFG_PM_RXDEN_WAIT_OFF)
#define CFG_PM_RXDLOZ_WAIT_OFF 0
#define CFG_PM_RXDLOZ_WAIT_LEN 8
#define CFG_PM_RXDLOZ_WAIT_MASK (((1 << CFG_PM_RXDLOZ_WAIT_LEN) - 1) \
<< CFG_PM_RXDLOZ_WAIT_OFF)
#define CFG_PM_RXDLOZ_WAIT_7_UNIT (7 << CFG_PM_RXDLOZ_WAIT_OFF)
#define CFG_PM_RXDLOZ_WAIT_12_UNIT (0xC << CFG_PM_RXDLOZ_WAIT_OFF)
/* SGMII */
#define COMPHY_PHY_CFG1_OFFSET(lane) ((1 - (lane)) * 0x28)
#define PIN_PU_IVEREF_BIT BIT(1)
#define PIN_RESET_CORE_BIT BIT(11)
#define PIN_RESET_COMPHY_BIT BIT(12)
#define PIN_PU_PLL_BIT BIT(16)
#define PIN_PU_RX_BIT BIT(17)
#define PIN_PU_TX_BIT BIT(18)
#define PIN_TX_IDLE_BIT BIT(19)
#define GEN_RX_SEL_OFFSET 22
#define GEN_RX_SEL_MASK (0xF << GEN_RX_SEL_OFFSET)
#define GEN_TX_SEL_OFFSET 26
#define GEN_TX_SEL_MASK (0xF << GEN_TX_SEL_OFFSET)
#define PHY_RX_INIT_BIT BIT(30)
#define SD_SPEED_1_25_G 0x6
#define SD_SPEED_2_5_G 0x8
/* COMPHY status reg:
* lane0: PCIe/GbE0 PHY Status 1
* lane1: USB3/GbE1 PHY Status 1
*/
#define COMPHY_PHY_STATUS_OFFSET(lane) (0x18 + (1 - (lane)) * 0x28)
#define PHY_RX_INIT_DONE_BIT BIT(0)
#define PHY_PLL_READY_RX_BIT BIT(2)
#define PHY_PLL_READY_TX_BIT BIT(3)
#define SGMIIPHY_ADDR(off, base) ((((off) & 0x00007FF) * 2) + (base))
#define MAX_LANE_NR 3
/* comphy API */
int mvebu_3700_comphy_is_pll_locked(uint8_t comphy_index, uint32_t comphy_mode);
int mvebu_3700_comphy_power_off(uint8_t comphy_index, uint32_t comphy_mode);
int mvebu_3700_comphy_power_on(uint8_t comphy_index, uint32_t comphy_mode);
#endif /* _PHY_COMPHY_3700_H */

View File

@ -0,0 +1,156 @@
/*
* Copyright (C) 2018 Marvell International Ltd.
*
* SPDX-License-Identifier: BSD-3-Clause
* https://spdx.org/licenses
*/
/* Marvell CP110 ana A3700 common */
#ifndef _PHY_COMPHY_COMMON_H
#define _PHY_COMPHY_COMMON_H
/* #define DEBUG_COMPHY */
#ifdef DEBUG_COMPHY
#define debug(format...) printf(format)
#else
#define debug(format, arg...)
#endif
/* A lane is described by 4 fields:
* - bit 1~0 represent comphy polarity invert
* - bit 7~2 represent comphy speed
* - bit 11~8 represent unit index
* - bit 16~12 represent mode
* - bit 17 represent comphy indication of clock source
* - bit 19-18 represents pcie width (in case of pcie comphy config.)
* - bit 31~20 reserved
*/
#define COMPHY_INVERT_OFFSET 0
#define COMPHY_INVERT_LEN 2
#define COMPHY_INVERT_MASK COMPHY_MASK(COMPHY_INVERT_OFFSET, \
COMPHY_INVERT_LEN)
#define COMPHY_SPEED_OFFSET (COMPHY_INVERT_OFFSET + COMPHY_INVERT_LEN)
#define COMPHY_SPEED_LEN 6
#define COMPHY_SPEED_MASK COMPHY_MASK(COMPHY_SPEED_OFFSET, \
COMPHY_SPEED_LEN)
#define COMPHY_UNIT_ID_OFFSET (COMPHY_SPEED_OFFSET + COMPHY_SPEED_LEN)
#define COMPHY_UNIT_ID_LEN 4
#define COMPHY_UNIT_ID_MASK COMPHY_MASK(COMPHY_UNIT_ID_OFFSET, \
COMPHY_UNIT_ID_LEN)
#define COMPHY_MODE_OFFSET (COMPHY_UNIT_ID_OFFSET + COMPHY_UNIT_ID_LEN)
#define COMPHY_MODE_LEN 5
#define COMPHY_MODE_MASK COMPHY_MASK(COMPHY_MODE_OFFSET, COMPHY_MODE_LEN)
#define COMPHY_CLK_SRC_OFFSET (COMPHY_MODE_OFFSET + COMPHY_MODE_LEN)
#define COMPHY_CLK_SRC_LEN 1
#define COMPHY_CLK_SRC_MASK COMPHY_MASK(COMPHY_CLK_SRC_OFFSET, \
COMPHY_CLK_SRC_LEN)
#define COMPHY_PCI_WIDTH_OFFSET (COMPHY_CLK_SRC_OFFSET + COMPHY_CLK_SRC_LEN)
#define COMPHY_PCI_WIDTH_LEN 3
#define COMPHY_PCI_WIDTH_MASK COMPHY_MASK(COMPHY_PCI_WIDTH_OFFSET, \
COMPHY_PCI_WIDTH_LEN)
#define COMPHY_MASK(offset, len) (((1 << (len)) - 1) << (offset))
/* Macro which extracts mode from lane description */
#define COMPHY_GET_MODE(x) (((x) & COMPHY_MODE_MASK) >> \
COMPHY_MODE_OFFSET)
/* Macro which extracts unit index from lane description */
#define COMPHY_GET_ID(x) (((x) & COMPHY_UNIT_ID_MASK) >> \
COMPHY_UNIT_ID_OFFSET)
/* Macro which extracts speed from lane description */
#define COMPHY_GET_SPEED(x) (((x) & COMPHY_SPEED_MASK) >> \
COMPHY_SPEED_OFFSET)
/* Macro which extracts clock source indication from lane description */
#define COMPHY_GET_CLK_SRC(x) (((x) & COMPHY_CLK_SRC_MASK) >> \
COMPHY_CLK_SRC_OFFSET)
/* Macro which extracts pcie width indication from lane description */
#define COMPHY_GET_PCIE_WIDTH(x) (((x) & COMPHY_PCI_WIDTH_MASK) >> \
COMPHY_PCI_WIDTH_OFFSET)
/* Macro which extracts the polarity invert from lane description */
#define COMPHY_GET_POLARITY_INVERT(x) (((x) & COMPHY_INVERT_MASK) >> \
COMPHY_INVERT_OFFSET)
#define COMPHY_SATA_MODE 0x1
#define COMPHY_SGMII_MODE 0x2 /* SGMII 1G */
#define COMPHY_HS_SGMII_MODE 0x3 /* SGMII 2.5G */
#define COMPHY_USB3H_MODE 0x4
#define COMPHY_USB3D_MODE 0x5
#define COMPHY_PCIE_MODE 0x6
#define COMPHY_RXAUI_MODE 0x7
#define COMPHY_XFI_MODE 0x8
#define COMPHY_SFI_MODE 0x9
#define COMPHY_USB3_MODE 0xa
#define COMPHY_AP_MODE 0xb
#define COMPHY_UNUSED 0xFFFFFFFF
/* Polarity invert macro */
#define COMPHY_POLARITY_NO_INVERT 0
#define COMPHY_POLARITY_TXD_INVERT 1
#define COMPHY_POLARITY_RXD_INVERT 2
#define COMPHY_POLARITY_ALL_INVERT (COMPHY_POLARITY_TXD_INVERT | \
COMPHY_POLARITY_RXD_INVERT)
enum reg_width_type {
REG_16BIT = 0,
REG_32BIT,
};
enum {
COMPHY_LANE0 = 0,
COMPHY_LANE1,
COMPHY_LANE2,
COMPHY_LANE3,
COMPHY_LANE4,
COMPHY_LANE5,
COMPHY_LANE_MAX,
};
static inline uint32_t polling_with_timeout(uintptr_t addr, uint32_t val,
uint32_t mask,
uint32_t usec_timeout,
enum reg_width_type type)
{
uint32_t data;
do {
udelay(1);
if (type == REG_16BIT)
data = mmio_read_16(addr) & mask;
else
data = mmio_read_32(addr) & mask;
} while (data != val && --usec_timeout > 0);
if (usec_timeout == 0)
return data;
return 0;
}
static inline void reg_set(uintptr_t addr, uint32_t data, uint32_t mask)
{
debug("<atf>: WR to addr = 0x%lx, data = 0x%x (mask = 0x%x) - ",
addr, data, mask);
debug("old value = 0x%x ==> ", mmio_read_32(addr));
mmio_clrsetbits_32(addr, mask, data);
debug("new val 0x%x\n", mmio_read_32(addr));
}
static inline void __unused reg_set16(uintptr_t addr, uint16_t data,
uint16_t mask)
{
debug("<atf>: WR to addr = 0x%lx, data = 0x%x (mask = 0x%x) - ",
addr, data, mask);
debug("old value = 0x%x ==> ", mmio_read_16(addr));
mmio_clrsetbits_16(addr, mask, data);
debug("new val 0x%x\n", mmio_read_16(addr));
}
#endif /* _PHY_COMPHY_COMMON_H */

View File

@ -15,78 +15,15 @@
#include <spinlock.h>
#include "mvebu.h"
#include "comphy-cp110.h"
#include "phy-comphy-cp110.h"
#include "phy-comphy-common.h"
/* #define DEBUG_COMPHY */
#ifdef DEBUG_COMPHY
#define debug(format...) printf(format)
#if __has_include("phy-porting-layer.h")
#include "phy-porting-layer.h"
#else
#define debug(format, arg...)
#include "phy-default-porting-layer.h"
#endif
/* A lane is described by 4 fields:
* - bit 1~0 represent comphy polarity invert
* - bit 7~2 represent comphy speed
* - bit 11~8 represent unit index
* - bit 16~12 represent mode
* - bit 17 represent comphy indication of clock source
* - bit 19-18 represents pcie width (in case of pcie comphy config.)
* - bit 31~20 reserved
*/
#define COMPHY_INVERT_OFFSET 0
#define COMPHY_INVERT_LEN 2
#define COMPHY_INVERT_MASK COMPHY_MASK(COMPHY_INVERT_OFFSET, \
COMPHY_INVERT_LEN)
#define COMPHY_SPEED_OFFSET (COMPHY_INVERT_OFFSET + COMPHY_INVERT_LEN)
#define COMPHY_SPEED_LEN 6
#define COMPHY_SPEED_MASK COMPHY_MASK(COMPHY_SPEED_OFFSET, \
COMPHY_SPEED_LEN)
#define COMPHY_UNIT_ID_OFFSET (COMPHY_SPEED_OFFSET + COMPHY_SPEED_LEN)
#define COMPHY_UNIT_ID_LEN 4
#define COMPHY_UNIT_ID_MASK COMPHY_MASK(COMPHY_UNIT_ID_OFFSET, \
COMPHY_UNIT_ID_LEN)
#define COMPHY_MODE_OFFSET (COMPHY_UNIT_ID_OFFSET + COMPHY_UNIT_ID_LEN)
#define COMPHY_MODE_LEN 5
#define COMPHY_MODE_MASK COMPHY_MASK(COMPHY_MODE_OFFSET, COMPHY_MODE_LEN)
#define COMPHY_CLK_SRC_OFFSET (COMPHY_MODE_OFFSET + COMPHY_MODE_LEN)
#define COMPHY_CLK_SRC_LEN 1
#define COMPHY_CLK_SRC_MASK COMPHY_MASK(COMPHY_CLK_SRC_OFFSET, \
COMPHY_CLK_SRC_LEN)
#define COMPHY_PCI_WIDTH_OFFSET (COMPHY_CLK_SRC_OFFSET + COMPHY_CLK_SRC_LEN)
#define COMPHY_PCI_WIDTH_LEN 3
#define COMPHY_PCI_WIDTH_MASK COMPHY_MASK(COMPHY_PCI_WIDTH_OFFSET, \
COMPHY_PCI_WIDTH_LEN)
#define COMPHY_MASK(offset, len) (((1 << (len)) - 1) << (offset))
/* Macro which extracts mode from lane description */
#define COMPHY_GET_MODE(x) (((x) & COMPHY_MODE_MASK) >> \
COMPHY_MODE_OFFSET)
/* Macro which extracts unit index from lane description */
#define COMPHY_GET_ID(x) (((x) & COMPHY_UNIT_ID_MASK) >> \
COMPHY_UNIT_ID_OFFSET)
/* Macro which extracts speed from lane description */
#define COMPHY_GET_SPEED(x) (((x) & COMPHY_SPEED_MASK) >> \
COMPHY_SPEED_OFFSET)
/* Macro which extracts clock source indication from lane description */
#define COMPHY_GET_CLK_SRC(x) (((x) & COMPHY_CLK_SRC_MASK) >> \
COMPHY_CLK_SRC_OFFSET)
/* Macro which extracts pcie width indication from lane description */
#define COMPHY_GET_PCIE_WIDTH(x) (((x) & COMPHY_PCI_WIDTH_MASK) >> \
COMPHY_PCI_WIDTH_OFFSET)
#define COMPHY_SATA_MODE 0x1
#define COMPHY_SGMII_MODE 0x2 /* SGMII 1G */
#define COMPHY_HS_SGMII_MODE 0x3 /* SGMII 2.5G */
#define COMPHY_USB3H_MODE 0x4
#define COMPHY_USB3D_MODE 0x5
#define COMPHY_PCIE_MODE 0x6
#define COMPHY_RXAUI_MODE 0x7
#define COMPHY_XFI_MODE 0x8
#define COMPHY_SFI_MODE 0x9
#define COMPHY_USB3_MODE 0xa
#define COMPHY_AP_MODE 0xb
/* COMPHY speed macro */
#define COMPHY_SPEED_1_25G 0 /* SGMII 1G */
#define COMPHY_SPEED_2_5G 1
@ -129,21 +66,6 @@
*/
spinlock_t cp110_mac_reset_lock;
enum reg_width_type {
REG_16BIT = 0,
REG_32BIT,
};
enum {
COMPHY_LANE0 = 0,
COMPHY_LANE1,
COMPHY_LANE2,
COMPHY_LANE3,
COMPHY_LANE4,
COMPHY_LANE5,
COMPHY_LANE_MAX,
};
/* These values come from the PCI Express Spec */
enum pcie_link_width {
PCIE_LNK_WIDTH_RESRV = 0x00,
@ -157,36 +79,24 @@ enum pcie_link_width {
PCIE_LNK_WIDTH_UNKNOWN = 0xFF,
};
static inline uint32_t polling_with_timeout(uintptr_t addr,
uint32_t val,
uint32_t mask,
uint32_t usec_timeout,
enum reg_width_type type)
_Bool rx_trainng_done[AP_NUM][CP_NUM][MAX_LANE_NR] = {0};
static void mvebu_cp110_get_ap_and_cp_nr(uint8_t *ap_nr, uint8_t *cp_nr,
uint64_t comphy_base)
{
uint32_t data;
#if (AP_NUM == 1)
*ap_nr = 0;
#else
*ap_nr = (((comphy_base & ~0xffffff) - MVEBU_AP_IO_BASE(0)) /
AP_IO_OFFSET);
#endif
do {
udelay(1);
if (type == REG_16BIT)
data = mmio_read_16(addr) & mask;
else
data = mmio_read_32(addr) & mask;
} while (data != val && --usec_timeout > 0);
*cp_nr = (((comphy_base & ~0xffffff) - MVEBU_AP_IO_BASE(*ap_nr)) /
MVEBU_CP_OFFSET);
if (usec_timeout == 0)
return data;
return 0;
}
static inline void reg_set(uintptr_t addr, uint32_t data, uint32_t mask)
{
debug("<atf>: WR to addr = %#010lx, data = %#010x (mask = %#010x) - ",
addr, data, mask);
debug("old value = %#010x ==> ", mmio_read_32(addr));
mmio_clrsetbits_32(addr, mask, data);
debug("new val %#010x\n", mmio_read_32(addr));
debug("cp_base 0x%llx, ap_io_base 0x%lx, cp_offset 0x%lx\n",
comphy_base, (unsigned long)MVEBU_AP_IO_BASE(*ap_nr),
(unsigned long)MVEBU_CP_OFFSET);
}
/* Clear PIPE selector - avoid collision with previous configuration */
@ -413,10 +323,17 @@ static int mvebu_cp110_comphy_sata_power_on(uint64_t comphy_base,
{
uintptr_t hpipe_addr, sd_ip_addr, comphy_addr;
uint32_t mask, data;
uint8_t ap_nr, cp_nr;
int ret = 0;
debug_enter();
mvebu_cp110_get_ap_and_cp_nr(&ap_nr, &cp_nr, comphy_base);
const struct sata_params *sata_static_values =
&sata_static_values_tab[ap_nr][cp_nr][comphy_index];
/* configure phy selector for SATA */
mvebu_cp110_comphy_set_phy_selector(comphy_base,
comphy_index, comphy_mode);
@ -480,13 +397,17 @@ static int mvebu_cp110_comphy_sata_power_on(uint64_t comphy_base,
debug("stage: Analog parameters from ETP(HW)\n");
/* G1 settings */
mask = HPIPE_G1_SET_1_G1_RX_SELMUPI_MASK;
data = 0x0 << HPIPE_G1_SET_1_G1_RX_SELMUPI_OFFSET;
data = sata_static_values->g1_rx_selmupi <<
HPIPE_G1_SET_1_G1_RX_SELMUPI_OFFSET;
mask |= HPIPE_G1_SET_1_G1_RX_SELMUPF_MASK;
data |= 0x1 << HPIPE_G1_SET_1_G1_RX_SELMUPF_OFFSET;
data |= sata_static_values->g1_rx_selmupf <<
HPIPE_G1_SET_1_G1_RX_SELMUPF_OFFSET;
mask |= HPIPE_G1_SET_1_G1_RX_SELMUFI_MASK;
data |= 0x0 << HPIPE_G1_SET_1_G1_RX_SELMUFI_OFFSET;
data |= sata_static_values->g1_rx_selmufi <<
HPIPE_G1_SET_1_G1_RX_SELMUFI_OFFSET;
mask |= HPIPE_G1_SET_1_G1_RX_SELMUFF_MASK;
data |= 0x3 << HPIPE_G1_SET_1_G1_RX_SELMUFF_OFFSET;
data |= sata_static_values->g1_rx_selmuff <<
HPIPE_G1_SET_1_G1_RX_SELMUFF_OFFSET;
mask |= HPIPE_G1_SET_1_G1_RX_DIGCK_DIV_MASK;
data |= 0x1 << HPIPE_G1_SET_1_G1_RX_DIGCK_DIV_OFFSET;
reg_set(hpipe_addr + HPIPE_G1_SET_1_REG, data, mask);
@ -505,26 +426,34 @@ static int mvebu_cp110_comphy_sata_power_on(uint64_t comphy_base,
/* G2 settings */
mask = HPIPE_G2_SET_1_G2_RX_SELMUPI_MASK;
data = 0x0 << HPIPE_G2_SET_1_G2_RX_SELMUPI_OFFSET;
data = sata_static_values->g2_rx_selmupi <<
HPIPE_G2_SET_1_G2_RX_SELMUPI_OFFSET;
mask |= HPIPE_G2_SET_1_G2_RX_SELMUPF_MASK;
data |= 0x1 << HPIPE_G2_SET_1_G2_RX_SELMUPF_OFFSET;
data |= sata_static_values->g2_rx_selmupf <<
HPIPE_G2_SET_1_G2_RX_SELMUPF_OFFSET;
mask |= HPIPE_G2_SET_1_G2_RX_SELMUFI_MASK;
data |= 0x0 << HPIPE_G2_SET_1_G2_RX_SELMUFI_OFFSET;
data |= sata_static_values->g2_rx_selmufi <<
HPIPE_G2_SET_1_G2_RX_SELMUFI_OFFSET;
mask |= HPIPE_G2_SET_1_G2_RX_SELMUFF_MASK;
data |= 0x3 << HPIPE_G2_SET_1_G2_RX_SELMUFF_OFFSET;
data |= sata_static_values->g2_rx_selmuff <<
HPIPE_G2_SET_1_G2_RX_SELMUFF_OFFSET;
mask |= HPIPE_G2_SET_1_G2_RX_DIGCK_DIV_MASK;
data |= 0x1 << HPIPE_G2_SET_1_G2_RX_DIGCK_DIV_OFFSET;
reg_set(hpipe_addr + HPIPE_G2_SET_1_REG, data, mask);
/* G3 settings */
mask = HPIPE_G3_SET_1_G3_RX_SELMUPI_MASK;
data = 0x2 << HPIPE_G3_SET_1_G3_RX_SELMUPI_OFFSET;
data = sata_static_values->g3_rx_selmupi <<
HPIPE_G3_SET_1_G3_RX_SELMUPI_OFFSET;
mask |= HPIPE_G3_SET_1_G3_RX_SELMUPF_MASK;
data |= 0x2 << HPIPE_G3_SET_1_G3_RX_SELMUPF_OFFSET;
data |= sata_static_values->g3_rx_selmupf <<
HPIPE_G3_SET_1_G3_RX_SELMUPF_OFFSET;
mask |= HPIPE_G3_SET_1_G3_RX_SELMUFI_MASK;
data |= 0x3 << HPIPE_G3_SET_1_G3_RX_SELMUFI_OFFSET;
data |= sata_static_values->g3_rx_selmufi <<
HPIPE_G3_SET_1_G3_RX_SELMUFI_OFFSET;
mask |= HPIPE_G3_SET_1_G3_RX_SELMUFF_MASK;
data |= 0x3 << HPIPE_G3_SET_1_G3_RX_SELMUFF_OFFSET;
data |= sata_static_values->g3_rx_selmuff <<
HPIPE_G3_SET_1_G3_RX_SELMUFF_OFFSET;
mask |= HPIPE_G3_SET_1_G3_RX_DFE_EN_MASK;
data |= 0x1 << HPIPE_G3_SET_1_G3_RX_DFE_EN_OFFSET;
mask |= HPIPE_G3_SET_1_G3_RX_DIGCK_DIV_MASK;
@ -577,9 +506,11 @@ static int mvebu_cp110_comphy_sata_power_on(uint64_t comphy_base,
/* G3 Setting 3 */
mask = HPIPE_G3_FFE_CAP_SEL_MASK;
data = 0xf << HPIPE_G3_FFE_CAP_SEL_OFFSET;
data = sata_static_values->g3_ffe_cap_sel <<
HPIPE_G3_FFE_CAP_SEL_OFFSET;
mask |= HPIPE_G3_FFE_RES_SEL_MASK;
data |= 0x4 << HPIPE_G3_FFE_RES_SEL_OFFSET;
data |= sata_static_values->g3_ffe_res_sel <<
HPIPE_G3_FFE_RES_SEL_OFFSET;
mask |= HPIPE_G3_FFE_SETTING_FORCE_MASK;
data |= 0x1 << HPIPE_G3_FFE_SETTING_FORCE_OFFSET;
mask |= HPIPE_G3_FFE_DEG_RES_LEVEL_MASK;
@ -590,12 +521,12 @@ static int mvebu_cp110_comphy_sata_power_on(uint64_t comphy_base,
/* G3 Setting 4 */
mask = HPIPE_G3_DFE_RES_MASK;
data = 0x1 << HPIPE_G3_DFE_RES_OFFSET;
data = sata_static_values->g3_dfe_res << HPIPE_G3_DFE_RES_OFFSET;
reg_set(hpipe_addr + HPIPE_G3_SETTING_4_REG, data, mask);
/* Offset Phase Control */
mask = HPIPE_OS_PH_OFFSET_MASK;
data = 0x61 << HPIPE_OS_PH_OFFSET_OFFSET;
data = sata_static_values->align90 << HPIPE_OS_PH_OFFSET_OFFSET;
mask |= HPIPE_OS_PH_OFFSET_FORCE_MASK;
data |= 0x1 << HPIPE_OS_PH_OFFSET_FORCE_OFFSET;
mask |= HPIPE_OS_PH_VALID_MASK;
@ -610,41 +541,77 @@ static int mvebu_cp110_comphy_sata_power_on(uint64_t comphy_base,
/* Set G1 TX amplitude and TX post emphasis value */
mask = HPIPE_G1_SET_0_G1_TX_AMP_MASK;
data = 0x8 << HPIPE_G1_SET_0_G1_TX_AMP_OFFSET;
data = sata_static_values->g1_amp << HPIPE_G1_SET_0_G1_TX_AMP_OFFSET;
mask |= HPIPE_G1_SET_0_G1_TX_AMP_ADJ_MASK;
data |= 0x1 << HPIPE_G1_SET_0_G1_TX_AMP_ADJ_OFFSET;
data |= sata_static_values->g1_tx_amp_adj <<
HPIPE_G1_SET_0_G1_TX_AMP_ADJ_OFFSET;
mask |= HPIPE_G1_SET_0_G1_TX_EMPH1_MASK;
data |= 0x1 << HPIPE_G1_SET_0_G1_TX_EMPH1_OFFSET;
data |= sata_static_values->g1_emph <<
HPIPE_G1_SET_0_G1_TX_EMPH1_OFFSET;
mask |= HPIPE_G1_SET_0_G1_TX_EMPH1_EN_MASK;
data |= 0x1 << HPIPE_G1_SET_0_G1_TX_EMPH1_EN_OFFSET;
data |= sata_static_values->g1_emph_en <<
HPIPE_G1_SET_0_G1_TX_EMPH1_EN_OFFSET;
reg_set(hpipe_addr + HPIPE_G1_SET_0_REG, data, mask);
/* Set G1 emph */
mask = HPIPE_G1_SET_2_G1_TX_EMPH0_EN_MASK;
data = sata_static_values->g1_tx_emph_en <<
HPIPE_G1_SET_2_G1_TX_EMPH0_EN_OFFSET;
mask |= HPIPE_G1_SET_2_G1_TX_EMPH0_MASK;
data |= sata_static_values->g1_tx_emph <<
HPIPE_G1_SET_2_G1_TX_EMPH0_OFFSET;
reg_set(hpipe_addr + HPIPE_G1_SET_2_REG, data, mask);
/* Set G2 TX amplitude and TX post emphasis value */
mask = HPIPE_G2_SET_0_G2_TX_AMP_MASK;
data = 0xa << HPIPE_G2_SET_0_G2_TX_AMP_OFFSET;
data = sata_static_values->g2_amp << HPIPE_G2_SET_0_G2_TX_AMP_OFFSET;
mask |= HPIPE_G2_SET_0_G2_TX_AMP_ADJ_MASK;
data |= 0x1 << HPIPE_G2_SET_0_G2_TX_AMP_ADJ_OFFSET;
data |= sata_static_values->g2_tx_amp_adj <<
HPIPE_G2_SET_0_G2_TX_AMP_ADJ_OFFSET;
mask |= HPIPE_G2_SET_0_G2_TX_EMPH1_MASK;
data |= 0x2 << HPIPE_G2_SET_0_G2_TX_EMPH1_OFFSET;
data |= sata_static_values->g2_emph <<
HPIPE_G2_SET_0_G2_TX_EMPH1_OFFSET;
mask |= HPIPE_G2_SET_0_G2_TX_EMPH1_EN_MASK;
data |= 0x1 << HPIPE_G2_SET_0_G2_TX_EMPH1_EN_OFFSET;
data |= sata_static_values->g2_emph_en <<
HPIPE_G2_SET_0_G2_TX_EMPH1_EN_OFFSET;
reg_set(hpipe_addr + HPIPE_G2_SET_0_REG, data, mask);
/* Set G2 emph */
mask = HPIPE_G2_SET_2_G2_TX_EMPH0_EN_MASK;
data = sata_static_values->g2_tx_emph_en <<
HPIPE_G2_SET_2_G2_TX_EMPH0_EN_OFFSET;
mask |= HPIPE_G2_SET_2_G2_TX_EMPH0_MASK;
data |= sata_static_values->g2_tx_emph <<
HPIPE_G2_SET_2_G2_TX_EMPH0_OFFSET;
reg_set(hpipe_addr + HPIPE_G2_SET_2_REG, data, mask);
/* Set G3 TX amplitude and TX post emphasis value */
mask = HPIPE_G3_SET_0_G3_TX_AMP_MASK;
data = 0x1e << HPIPE_G3_SET_0_G3_TX_AMP_OFFSET;
data = sata_static_values->g3_amp << HPIPE_G3_SET_0_G3_TX_AMP_OFFSET;
mask |= HPIPE_G3_SET_0_G3_TX_AMP_ADJ_MASK;
data |= 0x1 << HPIPE_G3_SET_0_G3_TX_AMP_ADJ_OFFSET;
data |= sata_static_values->g3_tx_amp_adj <<
HPIPE_G3_SET_0_G3_TX_AMP_ADJ_OFFSET;
mask |= HPIPE_G3_SET_0_G3_TX_EMPH1_MASK;
data |= 0xe << HPIPE_G3_SET_0_G3_TX_EMPH1_OFFSET;
data |= sata_static_values->g3_emph <<
HPIPE_G3_SET_0_G3_TX_EMPH1_OFFSET;
mask |= HPIPE_G3_SET_0_G3_TX_EMPH1_EN_MASK;
data |= 0x1 << HPIPE_G3_SET_0_G3_TX_EMPH1_EN_OFFSET;
data |= sata_static_values->g3_emph_en <<
HPIPE_G3_SET_0_G3_TX_EMPH1_EN_OFFSET;
mask |= HPIPE_G3_SET_0_G3_TX_SLEW_RATE_SEL_MASK;
data |= 0x4 << HPIPE_G3_SET_0_G3_TX_SLEW_RATE_SEL_OFFSET;
mask |= HPIPE_G3_SET_0_G3_TX_SLEW_CTRL_EN_MASK;
data |= 0x0 << HPIPE_G3_SET_0_G3_TX_SLEW_CTRL_EN_OFFSET;
reg_set(hpipe_addr + HPIPE_G3_SET_0_REG, data, mask);
/* Set G3 emph */
mask = HPIPE_G3_SET_2_G3_TX_EMPH0_EN_MASK;
data = sata_static_values->g3_tx_emph_en <<
HPIPE_G3_SET_2_G3_TX_EMPH0_EN_OFFSET;
mask |= HPIPE_G3_SET_2_G3_TX_EMPH0_MASK;
data |= sata_static_values->g3_tx_emph <<
HPIPE_G3_SET_2_G3_TX_EMPH0_OFFSET;
reg_set(hpipe_addr + HPIPE_G3_SET_2_REG, data, mask);
/* SERDES External Configuration 2 register */
mask = SD_EXTERNAL_CONFIG2_SSC_ENABLE_MASK;
data = 0x1 << SD_EXTERNAL_CONFIG2_SSC_ENABLE_OFFSET;
@ -779,7 +746,7 @@ static int mvebu_cp110_comphy_sgmii_power_on(uint64_t comphy_base,
data = 0x0 << HPIPE_PWR_CTR_DTL_FLOOP_EN_OFFSET;
reg_set(hpipe_addr + HPIPE_PWR_CTR_DTL_REG, data, mask);
/* Set analog parameters from ETP(HW) - for now use the default datas */
/* Set analog parameters from ETP(HW) - for now use the default data */
debug("stage: Analog parameters from ETP(HW)\n");
reg_set(hpipe_addr + HPIPE_G1_SET_0_REG,
@ -835,9 +802,37 @@ static int mvebu_cp110_comphy_xfi_power_on(uint64_t comphy_base,
uintptr_t hpipe_addr, sd_ip_addr, comphy_addr, addr;
uint32_t mask, data, speed = COMPHY_GET_SPEED(comphy_mode);
int ret = 0;
uint8_t ap_nr, cp_nr;
debug_enter();
mvebu_cp110_get_ap_and_cp_nr(&ap_nr, &cp_nr, comphy_base);
if (rx_trainng_done[ap_nr][cp_nr][comphy_index]) {
debug("Skip %s for comphy[%d][%d][%d], due to rx training\n",
__func__, ap_nr, cp_nr, comphy_index);
return 0;
}
const struct xfi_params *xfi_static_values =
&xfi_static_values_tab[ap_nr][cp_nr][comphy_index];
debug("%s: the ap_nr = %d, cp_nr = %d, comphy_index %d\n",
__func__, ap_nr, cp_nr, comphy_index);
debug("g1_ffe_cap_sel= 0x%x, g1_ffe_res_sel= 0x%x, g1_dfe_res= 0x%x\n",
xfi_static_values->g1_ffe_cap_sel,
xfi_static_values->g1_ffe_res_sel,
xfi_static_values->g1_dfe_res);
if (!xfi_static_values->valid) {
ERROR("[ap%d][cp[%d][comphy:%d]: Has no valid static params\n",
ap_nr, cp_nr, comphy_index);
ERROR("[ap%d][cp[%d][comphy:%d]: porting layer needs update\n",
ap_nr, cp_nr, comphy_index);
return -EINVAL;
}
if ((speed != COMPHY_SPEED_5_15625G) &&
(speed != COMPHY_SPEED_10_3125G) &&
(speed != COMPHY_SPEED_DEFAULT)) {
@ -966,16 +961,27 @@ static int mvebu_cp110_comphy_xfi_power_on(uint64_t comphy_base,
data = 0x6 << HPIPE_G1_SET_0_G1_TX_EMPH1_OFFSET;
} else {
mask = HPIPE_G1_SET_0_G1_TX_AMP_MASK;
data = 0x1c << HPIPE_G1_SET_0_G1_TX_AMP_OFFSET;
data = xfi_static_values->g1_amp <<
HPIPE_G1_SET_0_G1_TX_AMP_OFFSET;
mask |= HPIPE_G1_SET_0_G1_TX_EMPH1_MASK;
data |= 0xe << HPIPE_G1_SET_0_G1_TX_EMPH1_OFFSET;
data |= xfi_static_values->g1_emph <<
HPIPE_G1_SET_0_G1_TX_EMPH1_OFFSET;
mask |= HPIPE_G1_SET_0_G1_TX_EMPH1_EN_MASK;
data |= xfi_static_values->g1_emph_en <<
HPIPE_G1_SET_0_G1_TX_EMPH1_EN_OFFSET;
mask |= HPIPE_G1_SET_0_G1_TX_AMP_ADJ_MASK;
data |= xfi_static_values->g1_tx_amp_adj <<
HPIPE_G1_SET_0_G1_TX_AMP_ADJ_OFFSET;
}
reg_set(hpipe_addr + HPIPE_G1_SET_0_REG, data, mask);
/* Genration 1 setting 2 (G1_Setting_2) */
mask = HPIPE_G1_SET_2_G1_TX_EMPH0_MASK;
data = 0x0 << HPIPE_G1_SET_2_G1_TX_EMPH0_OFFSET;
data = xfi_static_values->g1_tx_emph <<
HPIPE_G1_SET_2_G1_TX_EMPH0_OFFSET;
mask |= HPIPE_G1_SET_2_G1_TX_EMPH0_EN_MASK;
data |= 0x1 << HPIPE_G1_SET_2_G1_TX_EMPH0_EN_OFFSET;
data |= xfi_static_values->g1_tx_emph_en <<
HPIPE_G1_SET_2_G1_TX_EMPH0_EN_OFFSET;
reg_set(hpipe_addr + HPIPE_G1_SET_2_REG, data, mask);
/* Transmitter Slew Rate Control register (tx_reg1) */
mask = HPIPE_TX_REG1_TX_EMPH_RES_MASK;
@ -1004,13 +1010,17 @@ static int mvebu_cp110_comphy_xfi_power_on(uint64_t comphy_base,
data |= 0x1 << HPIPE_G1_SET_1_G1_RX_SELMUPF_OFFSET;
} else {
mask |= HPIPE_G1_SET_1_G1_RX_SELMUPI_MASK;
data |= 0x2 << HPIPE_G1_SET_1_G1_RX_SELMUPI_OFFSET;
data |= xfi_static_values->g1_rx_selmupi <<
HPIPE_G1_SET_1_G1_RX_SELMUPI_OFFSET;
mask |= HPIPE_G1_SET_1_G1_RX_SELMUPF_MASK;
data |= 0x2 << HPIPE_G1_SET_1_G1_RX_SELMUPF_OFFSET;
data |= xfi_static_values->g1_rx_selmupf <<
HPIPE_G1_SET_1_G1_RX_SELMUPF_OFFSET;
mask |= HPIPE_G1_SET_1_G1_RX_SELMUFI_MASK;
data |= 0x0 << HPIPE_G1_SET_1_G1_RX_SELMUFI_OFFSET;
data |= xfi_static_values->g1_rx_selmufi <<
HPIPE_G1_SET_1_G1_RX_SELMUFI_OFFSET;
mask |= HPIPE_G1_SET_1_G1_RX_SELMUFF_MASK;
data |= 0x1 << HPIPE_G1_SET_1_G1_RX_SELMUFF_OFFSET;
data |= xfi_static_values->g1_rx_selmuff <<
HPIPE_G1_SET_1_G1_RX_SELMUFF_OFFSET;
mask |= HPIPE_G1_SET_1_G1_RX_DIGCK_DIV_MASK;
data |= 0x3 << HPIPE_G1_SET_1_G1_RX_DIGCK_DIV_OFFSET;
}
@ -1038,8 +1048,43 @@ static int mvebu_cp110_comphy_xfi_power_on(uint64_t comphy_base,
data |= 0x4 << HPIPE_G1_SETTINGS_3_G1_FFE_RES_SEL_OFFSET;
mask |= HPIPE_G1_SETTINGS_3_G1_FFE_SETTING_FORCE_MASK;
data |= 0x1 << HPIPE_G1_SETTINGS_3_G1_FFE_SETTING_FORCE_OFFSET;
reg_set(hpipe_addr + HPIPE_G1_SETTINGS_3_REG, data, mask);
} else {
mask |= HPIPE_G1_SETTINGS_3_G1_FFE_CAP_SEL_MASK;
data |= xfi_static_values->g1_ffe_cap_sel <<
HPIPE_G1_SETTINGS_3_G1_FFE_CAP_SEL_OFFSET;
mask |= HPIPE_G1_SETTINGS_3_G1_FFE_RES_SEL_MASK;
data |= xfi_static_values->g1_ffe_res_sel <<
HPIPE_G1_SETTINGS_3_G1_FFE_RES_SEL_OFFSET;
mask |= HPIPE_G1_SETTINGS_3_G1_FFE_SETTING_FORCE_MASK;
data |= 0x1 << HPIPE_G1_SETTINGS_3_G1_FFE_SETTING_FORCE_OFFSET;
reg_set(hpipe_addr + HPIPE_G1_SETTINGS_3_REG, data, mask);
/* Use the value from CAL_OS_PH_EXT */
mask = HPIPE_CAL_RXCLKALIGN_90_EXT_EN_MASK;
data = 1 << HPIPE_CAL_RXCLKALIGN_90_EXT_EN_OFFSET;
reg_set(hpipe_addr +
HPIPE_RX_CLK_ALIGN90_AND_TX_IDLE_CALIB_CTRL_REG,
data, mask);
/* Update align90 */
mask = HPIPE_CAL_OS_PH_EXT_MASK;
data = xfi_static_values->align90 << HPIPE_CAL_OS_PH_EXT_OFFSET;
reg_set(hpipe_addr +
HPIPE_RX_CLK_ALIGN90_AND_TX_IDLE_CALIB_CTRL_REG,
data, mask);
/* Force DFE resolution (use gen table value) */
mask = HPIPE_DFE_RES_FORCE_MASK;
data = 0x0 << HPIPE_DFE_RES_FORCE_OFFSET;
reg_set(hpipe_addr + HPIPE_DFE_REG0, data, mask);
/* 0x111-G1 DFE_Setting_4 */
mask = HPIPE_G1_SETTINGS_4_G1_DFE_RES_MASK;
data = xfi_static_values->g1_dfe_res <<
HPIPE_G1_SETTINGS_4_G1_DFE_RES_OFFSET;
reg_set(hpipe_addr + HPIPE_G1_SETTINGS_4_REG, data, mask);
}
reg_set(hpipe_addr + HPIPE_G1_SETTINGS_3_REG, data, mask);
/* Connfigure RX training timer */
mask = HPIPE_RX_TRAIN_TIMER_MASK;
@ -1949,192 +1994,255 @@ static int mvebu_cp110_comphy_usb3_power_on(uint64_t comphy_base,
return ret;
}
/* This function performs RX training for one Feed Forward Equalization (FFE)
* value.
* The RX traiing result is stored in 'Saved DFE values Register' (SAV_F0D).
*
* Return '0' on success, error code in a case of failure.
*/
static int mvebu_cp110_comphy_test_single_ffe(uint64_t comphy_base,
uint8_t comphy_index,
uint32_t ffe, uint32_t *result)
int mvebu_cp110_comphy_xfi_rx_training(uint64_t comphy_base,
uint8_t comphy_index)
{
uint32_t mask, data, timeout;
uint32_t g1_ffe_cap_sel, g1_ffe_res_sel, align90, g1_dfe_res;
uintptr_t hpipe_addr, sd_ip_addr;
uint8_t ap_nr, cp_nr;
mvebu_cp110_get_ap_and_cp_nr(&ap_nr, &cp_nr, comphy_base);
hpipe_addr = HPIPE_ADDR(COMPHY_PIPE_FROM_COMPHY_ADDR(comphy_base),
comphy_index);
sd_ip_addr = SD_ADDR(COMPHY_PIPE_FROM_COMPHY_ADDR(comphy_base),
comphy_index);
debug_enter();
debug("stage: RF Reset\n");
/* Release from hard reset */
mask = SD_EXTERNAL_CONFIG1_RESET_IN_MASK;
data = 0x0 << SD_EXTERNAL_CONFIG1_RESET_IN_OFFSET;
mask |= SD_EXTERNAL_CONFIG1_RESET_CORE_MASK;
data |= 0x0 << SD_EXTERNAL_CONFIG1_RESET_CORE_OFFSET;
mask |= SD_EXTERNAL_CONFIG1_RF_RESET_IN_MASK;
data |= 0x0 << SD_EXTERNAL_CONFIG1_RF_RESET_IN_OFFSET;
reg_set(sd_ip_addr + SD_EXTERNAL_CONFIG1_REG, data, mask);
mask = SD_EXTERNAL_CONFIG1_RESET_IN_MASK;
data = 0x1 << SD_EXTERNAL_CONFIG1_RESET_IN_OFFSET;
mask |= SD_EXTERNAL_CONFIG1_RESET_CORE_MASK;
data |= 0x1 << SD_EXTERNAL_CONFIG1_RESET_CORE_OFFSET;
reg_set(sd_ip_addr + SD_EXTERNAL_CONFIG1_REG, data, mask);
/* Wait 50ms - until band gap and ref clock ready */
mdelay(50);
debug("Preparation for rx_training\n\n");
/* Use the FFE table */
mask = HPIPE_G1_SETTINGS_3_G1_FFE_SETTING_FORCE_MASK;
data = 0 << HPIPE_G1_SETTINGS_3_G1_FFE_SETTING_FORCE_OFFSET;
reg_set(hpipe_addr + HPIPE_G1_SETTINGS_3_REG, data, mask);
/* Use auto-calibration value */
mask = HPIPE_CAL_RXCLKALIGN_90_EXT_EN_MASK;
data = 0 << HPIPE_CAL_RXCLKALIGN_90_EXT_EN_OFFSET;
reg_set(hpipe_addr + HPIPE_RX_CLK_ALIGN90_AND_TX_IDLE_CALIB_CTRL_REG,
data, mask);
/* Use Tx/Rx training results */
mask = HPIPE_DFE_RES_FORCE_MASK;
data = 0 << HPIPE_DFE_RES_FORCE_OFFSET;
reg_set(hpipe_addr + HPIPE_DFE_REG0, data, mask);
debug("PRBS31 loppback\n\n");
/* Configure PRBS counters */
mask = HPIPE_PHY_TEST_PATTERN_SEL_MASK;
data = 0xe << HPIPE_PHY_TEST_PATTERN_SEL_OFFSET;
reg_set(hpipe_addr + HPIPE_PHY_TEST_CONTROL_REG, data, mask);
mask = HPIPE_PHY_TEST_DATA_MASK;
data = 0x64 << HPIPE_PHY_TEST_DATA_OFFSET;
data = 0xc4 << HPIPE_PHY_TEST_DATA_OFFSET;
reg_set(hpipe_addr + HPIPE_PHY_TEST_DATA_REG, data, mask);
mask = HPIPE_PHY_TEST_EN_MASK;
data = 0x1 << HPIPE_PHY_TEST_EN_OFFSET;
reg_set(hpipe_addr + HPIPE_PHY_TEST_CONTROL_REG, data, mask);
mdelay(50);
mdelay(10);
debug("Enable TX/RX training\n\n");
/* Set the FFE value */
mask = HPIPE_G1_SETTINGS_3_G1_FFE_RES_SEL_MASK;
data = ffe << HPIPE_G1_SETTINGS_3_G1_FFE_RES_SEL_OFFSET;
reg_set(hpipe_addr + HPIPE_G1_SETTINGS_3_REG, data, mask);
/* Start RX training */
mask = SD_EXTERNAL_STATUS_START_RX_TRAINING_MASK;
data = 1 << SD_EXTERNAL_STATUS_START_RX_TRAINING_OFFSET;
reg_set(sd_ip_addr + SD_EXTERNAL_STATUS_REG, data, mask);
mask = HPIPE_TRX_RX_TRAIN_EN_MASK;
data = 0x1 << HPIPE_TRX_RX_TRAIN_EN_OFFSET;
mask |= HPIPE_TRX_RX_ANA_IF_CLK_ENE_MASK;
data |= 0x1 << HPIPE_TRX_RX_ANA_IF_CLK_ENE_OFFSET;
mask |= HPIPE_TRX_TX_CTRL_CLK_EN_MASK;
data |= 0x1 << HPIPE_TRX_TX_CTRL_CLK_EN_OFFSET;
mask |= HPIPE_TRX_UPDATE_THEN_HOLD_MASK;
data |= 0x1 << HPIPE_TRX_UPDATE_THEN_HOLD_OFFSET;
mask |= HPIPE_TRX_TX_F0T_EO_BASED_MASK;
data |= 0x1 << HPIPE_TRX_TX_F0T_EO_BASED_OFFSET;
reg_set(hpipe_addr + HPIPE_TRX_TRAIN_CTRL_0_REG, data, mask);
/* Check the result of RX training */
timeout = RX_TRAINING_TIMEOUT;
mask = HPIPE_INTERRUPT_TRX_TRAIN_DONE_OFFSET |
HPIPE_INTERRUPT_DFE_DONE_INT_OFFSET |
HPIPE_INTERRUPT_RX_TRAIN_COMPLETE_INT_MASK;
while (timeout) {
data = mmio_read_32(sd_ip_addr + SD_EXTERNAL_STATAUS1_REG);
if (data & SD_EXTERNAL_STATAUS1_REG_RX_TRAIN_COMP_MASK)
data = mmio_read_32(hpipe_addr + HPIPE_INTERRUPT_1_REGISTER);
if (data & mask)
break;
mdelay(1);
timeout--;
}
if (timeout == 0)
debug("RX training result: interrupt reg 0x%lx = 0x%x\n\n",
hpipe_addr + HPIPE_INTERRUPT_1_REGISTER, data);
if (timeout == 0 || data & HPIPE_TRX_TRAIN_TIME_OUT_INT_MASK) {
ERROR("Rx training timeout...\n");
return -ETIMEDOUT;
}
if (data & SD_EXTERNAL_STATAUS1_REG_RX_TRAIN_FAILED_MASK)
if (data & HPIPE_TRX_TRAIN_FAILED_MASK) {
ERROR("Rx training failed...\n");
return -EINVAL;
}
/* Stop RX training */
mask = SD_EXTERNAL_STATUS_START_RX_TRAINING_MASK;
data = 0 << SD_EXTERNAL_STATUS_START_RX_TRAINING_OFFSET;
reg_set(sd_ip_addr + SD_EXTERNAL_STATUS_REG, data, mask);
mask = HPIPE_TRX_RX_TRAIN_EN_MASK;
data = 0x0 << HPIPE_TRX_RX_TRAIN_EN_OFFSET;
reg_set(hpipe_addr + HPIPE_TRX_TRAIN_CTRL_0_REG, data, mask);
/* Read the result */
data = mmio_read_32(hpipe_addr + HPIPE_SAVED_DFE_VALUES_REG);
data &= HPIPE_SAVED_DFE_VALUES_SAV_F0D_MASK;
data >>= HPIPE_SAVED_DFE_VALUES_SAV_F0D_OFFSET;
*result = data;
debug("Training done, reading results...\n\n");
mask = HPIPE_PHY_TEST_RESET_MASK;
data = 0x1 << HPIPE_PHY_TEST_RESET_OFFSET;
mask |= HPIPE_PHY_TEST_EN_MASK;
data |= 0x0 << HPIPE_PHY_TEST_EN_OFFSET;
reg_set(hpipe_addr + HPIPE_PHY_TEST_CONTROL_REG, data, mask);
mask = HPIPE_ADAPTED_FFE_ADAPTED_FFE_RES_MASK;
g1_ffe_res_sel = ((mmio_read_32(hpipe_addr +
HPIPE_ADAPTED_FFE_CAPACITOR_COUNTER_CTRL_REG)
& mask) >> HPIPE_ADAPTED_FFE_ADAPTED_FFE_RES_OFFSET);
mask = HPIPE_PHY_TEST_RESET_MASK;
data = 0x0 << HPIPE_PHY_TEST_RESET_OFFSET;
reg_set(hpipe_addr + HPIPE_PHY_TEST_CONTROL_REG, data, mask);
mask = HPIPE_ADAPTED_FFE_ADAPTED_FFE_CAP_MASK;
g1_ffe_cap_sel = ((mmio_read_32(hpipe_addr +
HPIPE_ADAPTED_FFE_CAPACITOR_COUNTER_CTRL_REG)
& mask) >> HPIPE_ADAPTED_FFE_ADAPTED_FFE_CAP_OFFSET);
return 0;
}
mask = HPIPE_DATA_PHASE_ADAPTED_OS_PH_MASK;
align90 = ((mmio_read_32(hpipe_addr + HPIPE_DATA_PHASE_OFF_CTRL_REG)
& mask) >> HPIPE_DATA_PHASE_ADAPTED_OS_PH_OFFSET);
/* This function runs complete RX training sequence:
* - Run RX training for all possible Feed Forward Equalization values
* - Choose the FFE which gives the best result.
* - Run RX training again with the best result.
*
* Return '0' on success, error code in a case of failure.
*/
int mvebu_cp110_comphy_xfi_rx_training(uint64_t comphy_base,
uint8_t comphy_index)
{
uint32_t mask, data, max_rx_train = 0, max_rx_train_index = 0;
uintptr_t hpipe_addr;
uint32_t rx_train_result;
int ret, i;
mask = HPIPE_ADAPTED_DFE_RES_MASK;
g1_dfe_res = ((mmio_read_32(hpipe_addr +
HPIPE_ADAPTED_DFE_COEFFICIENT_1_REG)
& mask) >> HPIPE_ADAPTED_DFE_RES_OFFSET);
hpipe_addr = HPIPE_ADDR(COMPHY_PIPE_FROM_COMPHY_ADDR(comphy_base),
comphy_index);
debug("================================================\n");
debug("Switching to static configuration:\n");
debug("FFE_RES = 0x%x FFE_CAP = 0x%x align90 = 0x%x g1_dfe_res 0x%x\n",
g1_ffe_res_sel, g1_ffe_cap_sel, align90, g1_dfe_res);
debug("Result after training: 0x%lx= 0x%x, 0x%lx= 0x%x, 0x%lx = 0x%x\n",
(hpipe_addr + HPIPE_ADAPTED_FFE_CAPACITOR_COUNTER_CTRL_REG),
mmio_read_32(hpipe_addr +
HPIPE_ADAPTED_FFE_CAPACITOR_COUNTER_CTRL_REG),
(hpipe_addr + HPIPE_DATA_PHASE_OFF_CTRL_REG),
mmio_read_32(hpipe_addr + HPIPE_DATA_PHASE_OFF_CTRL_REG),
(hpipe_addr + HPIPE_ADAPTED_DFE_COEFFICIENT_1_REG),
mmio_read_32(hpipe_addr + HPIPE_ADAPTED_DFE_COEFFICIENT_1_REG));
debug("================================================\n");
debug_enter();
/* Update FFE_RES */
mask = HPIPE_G1_SETTINGS_3_G1_FFE_RES_SEL_MASK;
data = g1_ffe_res_sel << HPIPE_G1_SETTINGS_3_G1_FFE_RES_SEL_OFFSET;
reg_set(hpipe_addr + HPIPE_G1_SETTINGS_3_REG, data, mask);
/* Configure SQ threshold and CDR lock */
mask = HPIPE_SQUELCH_THRESH_IN_MASK;
data = 0xc << HPIPE_SQUELCH_THRESH_IN_OFFSET;
reg_set(hpipe_addr + HPIPE_SQUELCH_FFE_SETTING_REG, data, mask);
/* Update FFE_CAP */
mask = HPIPE_G1_SETTINGS_3_G1_FFE_CAP_SEL_MASK;
data = g1_ffe_cap_sel << HPIPE_G1_SETTINGS_3_G1_FFE_CAP_SEL_OFFSET;
reg_set(hpipe_addr + HPIPE_G1_SETTINGS_3_REG, data, mask);
mask = HPIPE_SQ_DEGLITCH_WIDTH_P_MASK;
data = 0xf << HPIPE_SQ_DEGLITCH_WIDTH_P_OFFSET;
mask |= HPIPE_SQ_DEGLITCH_WIDTH_N_MASK;
data |= 0xf << HPIPE_SQ_DEGLITCH_WIDTH_N_OFFSET;
mask |= HPIPE_SQ_DEGLITCH_EN_MASK;
data |= 0x1 << HPIPE_SQ_DEGLITCH_EN_OFFSET;
reg_set(hpipe_addr + HPIPE_SQ_GLITCH_FILTER_CTRL, data, mask);
mask = HPIPE_CDR_LOCK_DET_EN_MASK;
data = 0x1 << HPIPE_CDR_LOCK_DET_EN_OFFSET;
reg_set(hpipe_addr + HPIPE_LOOPBACK_REG, data, mask);
udelay(100);
/* Determine if we have a cable attached to this comphy, if not,
* we can't perform RX training.
/* Bypass the FFE table settings and use the FFE settings directly from
* registers FFE_RES_SEL and FFE_CAP_SEL
*/
data = mmio_read_32(hpipe_addr + HPIPE_SQUELCH_FFE_SETTING_REG);
if (data & HPIPE_SQUELCH_DETECTED_MASK) {
ERROR("Squelsh is not detected, can't perform RX training\n");
return -EINVAL;
}
mask = HPIPE_G1_SETTINGS_3_G1_FFE_SETTING_FORCE_MASK;
data = 1 << HPIPE_G1_SETTINGS_3_G1_FFE_SETTING_FORCE_OFFSET;
reg_set(hpipe_addr + HPIPE_G1_SETTINGS_3_REG, data, mask);
data = mmio_read_32(hpipe_addr + HPIPE_LOOPBACK_REG);
if (!(data & HPIPE_CDR_LOCK_MASK)) {
ERROR("CDR is not locked, can't perform RX training\n");
return -EINVAL;
}
/* Use the value from CAL_OS_PH_EXT */
mask = HPIPE_CAL_RXCLKALIGN_90_EXT_EN_MASK;
data = 1 << HPIPE_CAL_RXCLKALIGN_90_EXT_EN_OFFSET;
reg_set(hpipe_addr + HPIPE_RX_CLK_ALIGN90_AND_TX_IDLE_CALIB_CTRL_REG,
data, mask);
/* Do preparations for RX training */
/* Update align90 */
mask = HPIPE_CAL_OS_PH_EXT_MASK;
data = align90 << HPIPE_CAL_OS_PH_EXT_OFFSET;
reg_set(hpipe_addr + HPIPE_RX_CLK_ALIGN90_AND_TX_IDLE_CALIB_CTRL_REG,
data, mask);
/* Force DFE resolution (use gen table value) */
mask = HPIPE_DFE_RES_FORCE_MASK;
data = 0x0 << HPIPE_DFE_RES_FORCE_OFFSET;
reg_set(hpipe_addr + HPIPE_DFE_REG0, data, mask);
mask = HPIPE_G1_SETTINGS_3_G1_FFE_CAP_SEL_MASK;
data = 0xf << HPIPE_G1_SETTINGS_3_G1_FFE_CAP_SEL_OFFSET;
mask |= HPIPE_G1_SETTINGS_3_G1_FFE_SETTING_FORCE_MASK;
data |= 1 << HPIPE_G1_SETTINGS_3_G1_FFE_SETTING_FORCE_OFFSET;
reg_set(hpipe_addr + HPIPE_G1_SETTINGS_3_REG, data, mask);
/* 0x111-G1 DFE_Setting_4 */
mask = HPIPE_G1_SETTINGS_4_G1_DFE_RES_MASK;
data = g1_dfe_res << HPIPE_G1_SETTINGS_4_G1_DFE_RES_OFFSET;
reg_set(hpipe_addr + HPIPE_G1_SETTINGS_4_REG, data, mask);
/* Perform RX training for all possible FFE (Feed Forward
* Equalization, possible values are 0-7).
* We update the best value reached and the FFE which gave this value.
*/
for (i = 0; i < MAX_NUM_OF_FFE; i++) {
rx_train_result = 0;
ret = mvebu_cp110_comphy_test_single_ffe(comphy_base,
comphy_index, i,
&rx_train_result);
debug("PRBS31 loppback\n\n");
if ((!ret) && (rx_train_result > max_rx_train)) {
max_rx_train = rx_train_result;
max_rx_train_index = i;
}
}
mask = HPIPE_PHY_TEST_PT_TESTMODE_MASK;
data = 0x1 << HPIPE_PHY_TEST_PT_TESTMODE_OFFSET;
reg_set(hpipe_addr + HPIPE_PHY_TEST_OOB_0_REGISTER, data, mask);
/* If we were able to determine which FFE gives the best value,
* now we need to set it and run RX training again (only for this
* FFE).
*/
if (max_rx_train) {
ret = mvebu_cp110_comphy_test_single_ffe(comphy_base,
comphy_index,
max_rx_train_index,
&rx_train_result);
/* Configure PRBS counters */
mask = HPIPE_PHY_TEST_PATTERN_SEL_MASK;
data = 0xe << HPIPE_PHY_TEST_PATTERN_SEL_OFFSET;
reg_set(hpipe_addr + HPIPE_PHY_TEST_CONTROL_REG, data, mask);
if (ret == 0)
debug("RX Training passed (FFE = %d, result = 0x%x)\n",
max_rx_train_index, rx_train_result);
} else {
ERROR("RX Training failed for comphy%d\n", comphy_index);
ret = -EINVAL;
}
mask = HPIPE_PHY_TEST_DATA_MASK;
data = 0xc4 << HPIPE_PHY_TEST_DATA_OFFSET;
reg_set(hpipe_addr + HPIPE_PHY_TEST_DATA_REG, data, mask);
debug_exit();
mask = HPIPE_PHY_TEST_EN_MASK;
data = 0x1 << HPIPE_PHY_TEST_EN_OFFSET;
reg_set(hpipe_addr + HPIPE_PHY_TEST_CONTROL_REG, data, mask);
return ret;
/* Reset PRBS error counter */
mask = HPIPE_PHY_TEST_PATTERN_SEL_MASK;
data = 0x1 << HPIPE_PHY_TEST_RESET_OFFSET;
reg_set(hpipe_addr + HPIPE_PHY_TEST_CONTROL_REG, data, mask);
mask = HPIPE_PHY_TEST_PATTERN_SEL_MASK;
data = 0x0 << HPIPE_PHY_TEST_RESET_OFFSET;
reg_set(hpipe_addr + HPIPE_PHY_TEST_CONTROL_REG, data, mask);
mask = HPIPE_PHY_TEST_PT_TESTMODE_MASK;
data = 0x1 << HPIPE_PHY_TEST_PT_TESTMODE_OFFSET;
reg_set(hpipe_addr + HPIPE_PHY_TEST_OOB_0_REGISTER, data, mask);
printf("########################################################\n");
printf("# To use trained values update the ATF sources:\n");
printf("# plat/marvell/a8k/<board_type>/board/phy-porting-layer.h ");
printf("file\n# with new values as below (for appropriate AP nr %d",
ap_nr);
printf("and CP nr: %d comphy_index %d\n\n",
cp_nr, comphy_index);
printf("static struct xfi_params xfi_static_values_tab[AP_NUM]");
printf("[CP_NUM][MAX_LANE_NR] = {\n");
printf("\t...\n");
printf("\t.g1_ffe_res_sel = 0x%x,\n", g1_ffe_res_sel);
printf("\t.g1_ffe_cap_sel = 0x%x,\n", g1_ffe_cap_sel);
printf("\t.align90 = 0x%x,\n", align90);
printf("\t.g1_dfe_res = 0x%x\n", g1_dfe_res);
printf("\t...\n");
printf("};\n\n");
printf("########################################################\n");
/* check */
debug("PRBS error counter[0x%lx] 0x%x\n\n",
hpipe_addr + HPIPE_PHY_TEST_PRBS_ERROR_COUNTER_1_REG,
mmio_read_32(hpipe_addr +
HPIPE_PHY_TEST_PRBS_ERROR_COUNTER_1_REG));
rx_trainng_done[ap_nr][cp_nr][comphy_index] = 1;
return 0;
}
/* During AP the proper mode is auto-negotiated and the mac, pcs and serdes
@ -2237,6 +2345,7 @@ int mvebu_cp110_comphy_power_on(uint64_t comphy_base, uint8_t comphy_index,
err = mvebu_cp110_comphy_rxaui_power_on(comphy_base,
comphy_index,
comphy_mode);
break;
case (COMPHY_USB3H_MODE):
case (COMPHY_USB3D_MODE):
err = mvebu_cp110_comphy_usb3_power_on(comphy_base,
@ -2261,9 +2370,18 @@ int mvebu_cp110_comphy_power_off(uint64_t comphy_base, uint8_t comphy_index)
{
uintptr_t sd_ip_addr, comphy_ip_addr;
uint32_t mask, data;
uint8_t ap_nr, cp_nr;
debug_enter();
mvebu_cp110_get_ap_and_cp_nr(&ap_nr, &cp_nr, comphy_base);
if (rx_trainng_done[ap_nr][cp_nr][comphy_index]) {
debug("Skip %s for comphy[%d][%d][%d], due to rx training\n",
__func__, ap_nr, cp_nr, comphy_index);
return 0;
}
sd_ip_addr = SD_ADDR(COMPHY_PIPE_FROM_COMPHY_ADDR(comphy_base),
comphy_index);
comphy_ip_addr = COMPHY_ADDR(comphy_base, comphy_index);

View File

@ -5,7 +5,79 @@
* https://spdx.org/licenses
*/
/* Marvell CP110 SoC COMPHY unit driver */
/* Those are parameters for xfi mode, which need to be tune for each board type.
* For known DB boards the parameters was already calibrated and placed under
* the plat/marvell/a8k/<board_type>/board/phy-porting-layer.h
*/
struct xfi_params {
uint8_t g1_ffe_res_sel;
uint8_t g1_ffe_cap_sel;
uint8_t align90;
uint8_t g1_dfe_res;
uint8_t g1_amp;
uint8_t g1_emph;
uint8_t g1_emph_en;
uint8_t g1_tx_amp_adj;
uint8_t g1_tx_emph_en;
uint8_t g1_tx_emph;
uint8_t g1_rx_selmuff;
uint8_t g1_rx_selmufi;
uint8_t g1_rx_selmupf;
uint8_t g1_rx_selmupi;
_Bool valid;
};
struct sata_params {
uint8_t g1_amp;
uint8_t g2_amp;
uint8_t g3_amp;
uint8_t g1_emph;
uint8_t g2_emph;
uint8_t g3_emph;
uint8_t g1_emph_en;
uint8_t g2_emph_en;
uint8_t g3_emph_en;
uint8_t g1_tx_amp_adj;
uint8_t g2_tx_amp_adj;
uint8_t g3_tx_amp_adj;
uint8_t g1_tx_emph_en;
uint8_t g2_tx_emph_en;
uint8_t g3_tx_emph_en;
uint8_t g1_tx_emph;
uint8_t g2_tx_emph;
uint8_t g3_tx_emph;
uint8_t g3_dfe_res;
uint8_t g3_ffe_res_sel;
uint8_t g3_ffe_cap_sel;
uint8_t align90;
uint8_t g1_rx_selmuff;
uint8_t g2_rx_selmuff;
uint8_t g3_rx_selmuff;
uint8_t g1_rx_selmufi;
uint8_t g2_rx_selmufi;
uint8_t g3_rx_selmufi;
uint8_t g1_rx_selmupf;
uint8_t g2_rx_selmupf;
uint8_t g3_rx_selmupf;
uint8_t g1_rx_selmupi;
uint8_t g2_rx_selmupi;
uint8_t g3_rx_selmupi;
_Bool valid;
};
int mvebu_cp110_comphy_is_pll_locked(uint64_t comphy_base,
uint8_t comphy_index);

View File

@ -0,0 +1,51 @@
/*
* Copyright (C) 2018 Marvell International Ltd.
*
* SPDX-License-Identifier: BSD-3-Clause
* https://spdx.org/licenses
*/
#ifndef __PHY_DEFAULT_PORTING_LAYER_H
#define __PHY_DEFAULT_PORTING_LAYER_H
#define MAX_LANE_NR 6
#warning "Using default comphy params - you may need to suit them to your board"
static const struct xfi_params
xfi_static_values_tab[AP_NUM][CP_NUM][MAX_LANE_NR] = {
[0 ... AP_NUM-1][0 ... CP_NUM-1][0 ... MAX_LANE_NR-1] = {
.g1_ffe_res_sel = 0x3, .g1_ffe_cap_sel = 0xf, .align90 = 0x5f,
.g1_dfe_res = 0x2, .g1_amp = 0x1c, .g1_emph = 0xe,
.g1_emph_en = 0x1, .g1_tx_amp_adj = 0x1, .g1_tx_emph_en = 0x1,
.g1_tx_emph = 0x0, .g1_rx_selmuff = 0x1, .g1_rx_selmufi = 0x0,
.g1_rx_selmupf = 0x2, .g1_rx_selmupi = 0x2, .valid = 1
}
};
static const struct sata_params
sata_static_values_tab[AP_NUM][CP_NUM][MAX_LANE_NR] = {
[0 ... AP_NUM-1][0 ... CP_NUM-1][0 ... MAX_LANE_NR-1] = {
.g1_amp = 0x8, .g2_amp = 0xa, .g3_amp = 0x1e,
.g1_emph = 0x1, .g2_emph = 0x2, .g3_emph = 0xe,
.g1_emph_en = 0x1, .g2_emph_en = 0x1, .g3_emph_en = 0x1,
.g1_tx_amp_adj = 0x1, .g2_tx_amp_adj = 0x1,
.g3_tx_amp_adj = 0x1,
.g1_tx_emph_en = 0x0, .g2_tx_emph_en = 0x0,
.g3_tx_emph_en = 0x0,
.g1_tx_emph = 0x1, .g2_tx_emph = 0x1, .g3_tx_emph = 0x1,
.g3_dfe_res = 0x1, .g3_ffe_res_sel = 0x4, .g3_ffe_cap_sel = 0xf,
.align90 = 0x61,
.g1_rx_selmuff = 0x3, .g2_rx_selmuff = 0x3,
.g3_rx_selmuff = 0x3,
.g1_rx_selmufi = 0x0, .g2_rx_selmufi = 0x0,
.g3_rx_selmufi = 0x3,
.g1_rx_selmupf = 0x1, .g2_rx_selmupf = 0x1,
.g3_rx_selmupf = 0x2,
.g1_rx_selmupi = 0x0, .g2_rx_selmupi = 0x0,
.g3_rx_selmupi = 0x2,
.valid = 0x1
},
};
#endif /* __PHY_DEFAULT_PORTING_LAYER_H */

View File

@ -0,0 +1,168 @@
/*
* Copyright (C) 2016 Marvell International Ltd.
*
* SPDX-License-Identifier: BSD-3-Clause
* https://spdx.org/licenses
*/
#include <asm_macros.S>
#include <a3700_console.h>
.globl console_core_init
.globl console_core_putc
.globl console_core_getc
.globl console_core_flush
/* -----------------------------------------------
* int console_core_init(unsigned long base_addr,
* unsigned int uart_clk, unsigned int baud_rate)
* Function to initialize the console without a
* C Runtime to print debug information. This
* function will be accessed by console_init and
* crash reporting.
* In: x0 - console base address
* w1 - Uart clock in Hz
* w2 - Baud rate
* Out: return 1 on success
* Clobber list : x1, x2, x3
* -----------------------------------------------
*/
func console_core_init
/* Check the input base address */
cbz x0, init_fail
/* Check baud rate and uart clock for sanity */
cbz w1, init_fail
cbz w2, init_fail
/* Program the baudrate */
/* Divisor = Uart clock / (16 * baudrate) */
lsl w2, w2, #4
udiv w2, w1, w2
and w2, w2, #0x3ff
ldr w3, [x0, #UART_BAUD_REG]
bic w3, w3, 0x3ff
orr w3, w3, w2
str w3, [x0, #UART_BAUD_REG]/* set baud rate divisor */
/* Set UART to default 16X scheme */
mov w3, #0
str w3, [x0, #UART_POSSR_REG]
/*
* Wait for the TX FIFO to be empty. If wait for 20ms, the TX FIFO is
* still not empty, TX FIFO will reset by all means.
*/
mov w1, #20 /* max time out 20ms */
2:
/* Check whether TX FIFO is empty */
ldr w3, [x0, #UART_STATUS_REG]
and w3, w3, #UARTLSR_TXFIFOEMPTY
cmp w3, #0
b.ne 4f
/* Delay */
mov w2, #30000
3:
sub w2, w2, #1
cmp w2, #0
b.ne 3b
/* Check whether 10ms is waited */
sub w1, w1, #1
cmp w1, #0
b.ne 2b
4:
/* Reset FIFO */
mov w3, #UART_CTRL_RXFIFO_RESET
orr w3, w3, #UART_CTRL_TXFIFO_RESET
str w3, [x0, #UART_CTRL_REG]
/* Delay */
mov w2, #2000
1:
sub w2, w2, #1
cmp w2, #0
b.ne 1b
/* No Parity, 1 Stop */
mov w3, #0
str w3, [x0, #UART_CTRL_REG]
mov w0, #1
ret
init_fail:
mov w0, #0
ret
endfunc console_core_init
/* --------------------------------------------------------
* int console_core_putc(int c, unsigned int base_addr)
* Function to output a character over the console. It
* returns the character printed on success or -1 on error.
* In : w0 - character to be printed
* x1 - console base address
* Out : return -1 on error else return character.
* Clobber list : x2
* --------------------------------------------------------
*/
func console_core_putc
/* Check the input parameter */
cbz x1, putc_error
/* Prepend '\r' to '\n' */
cmp w0, #0xA
b.ne 2f
/* Check if the transmit FIFO is full */
1: ldr w2, [x1, #UART_STATUS_REG]
and w2, w2, #UARTLSR_TXFIFOFULL
cmp w2, #UARTLSR_TXFIFOFULL
b.eq 1b
mov w2, #0xD /* '\r' */
str w2, [x1, #UART_TX_REG]
/* Check if the transmit FIFO is full */
2: ldr w2, [x1, #UART_STATUS_REG]
and w2, w2, #UARTLSR_TXFIFOFULL
cmp w2, #UARTLSR_TXFIFOFULL
b.eq 2b
str w0, [x1, #UART_TX_REG]
ret
putc_error:
mov w0, #-1
ret
endfunc console_core_putc
/* ---------------------------------------------
* int console_core_getc(void)
* Function to get a character from the console.
* It returns the character grabbed on success
* or -1 on error.
* In : w0 - console base address
* Out : return -1 on error else return character.
* Clobber list : x0, x1
* ---------------------------------------------
*/
func console_core_getc
/* Check if the receive FIFO is empty */
ret
getc_error:
mov w0, #-1
ret
endfunc console_core_getc
/* ---------------------------------------------
* int console_core_flush(uintptr_t base_addr)
* Function to force a write of all buffered
* data that hasn't been output.
* In : x0 - console base address
* Out : return -1 on error else return 0.
* Clobber list : x0, x1
* ---------------------------------------------
*/
func console_core_flush
/* Placeholder */
mov w0, #0
ret
endfunc console_core_flush

View File

@ -0,0 +1,55 @@
/*
* Copyright (C) 2016 Marvell International Ltd.
*
* SPDX-License-Identifier: BSD-3-Clause
* https://spdx.org/licenses
*/
#ifndef __A3700_CONSOLE_H__
#define __A3700_CONSOLE_H__
/* MVEBU UART Registers */
#define UART_RX_REG 0x00
#define UART_TX_REG 0x04
#define UART_CTRL_REG 0x08
#define UART_STATUS_REG 0x0c
#define UART_BAUD_REG 0x10
#define UART_POSSR_REG 0x14
/* FIFO Control Register bits */
#define UARTFCR_FIFOMD_16450 (0 << 6)
#define UARTFCR_FIFOMD_16550 (1 << 6)
#define UARTFCR_RXTRIG_1 (0 << 6)
#define UARTFCR_RXTRIG_4 (1 << 6)
#define UARTFCR_RXTRIG_8 (2 << 6)
#define UARTFCR_RXTRIG_16 (3 << 6)
#define UARTFCR_TXTRIG_1 (0 << 4)
#define UARTFCR_TXTRIG_4 (1 << 4)
#define UARTFCR_TXTRIG_8 (2 << 4)
#define UARTFCR_TXTRIG_16 (3 << 4)
#define UARTFCR_DMAEN (1 << 3) /* Enable DMA mode */
#define UARTFCR_TXCLR (1 << 2) /* Clear contents of Tx FIFO */
#define UARTFCR_RXCLR (1 << 1) /* Clear contents of Rx FIFO */
#define UARTFCR_FIFOEN (1 << 0) /* Enable the Tx/Rx FIFO */
/* Line Control Register bits */
#define UARTLCR_DLAB (1 << 7) /* Divisor Latch Access */
#define UARTLCR_SETB (1 << 6) /* Set BREAK Condition */
#define UARTLCR_SETP (1 << 5) /* Set Parity to LCR[4] */
#define UARTLCR_EVEN (1 << 4) /* Even Parity Format */
#define UARTLCR_PAR (1 << 3) /* Parity */
#define UARTLCR_STOP (1 << 2) /* Stop Bit */
#define UARTLCR_WORDSZ_5 0 /* Word Length of 5 */
#define UARTLCR_WORDSZ_6 1 /* Word Length of 6 */
#define UARTLCR_WORDSZ_7 2 /* Word Length of 7 */
#define UARTLCR_WORDSZ_8 3 /* Word Length of 8 */
/* Line Status Register bits */
#define UARTLSR_TXFIFOFULL (1 << 11) /* Tx Fifo Full */
/* UART Control Register bits */
#define UART_CTRL_RXFIFO_RESET (1 << 14)
#define UART_CTRL_TXFIFO_RESET (1 << 15)
#define UARTLSR_TXFIFOEMPTY (1 << 6)
#endif /* __A3700_CONSOLE_H__ */

View File

@ -29,6 +29,13 @@ static inline uint16_t mmio_read_16(uintptr_t addr)
return *(volatile uint16_t*)addr;
}
static inline void mmio_clrsetbits_16(uintptr_t addr,
uint16_t clear,
uint16_t set)
{
mmio_write_16(addr, (mmio_read_16(addr) & ~clear) | set);
}
static inline void mmio_write_32(uintptr_t addr, uint32_t value)
{
*(volatile uint32_t*)addr = value;

View File

@ -0,0 +1,16 @@
/*
* Copyright (C) 2018 Marvell International Ltd.
*
* SPDX-License-Identifier: BSD-3-Clause
* https://spdx.org/licenses
*/
#ifndef __ARMADA_COMMON_H__
#define __ARMADA_COMMON_H__
#include <io_addr_dec.h>
#include <stdint.h>
int marvell_get_io_dec_win_conf(struct dec_win_config **win, uint32_t *size);
#endif /* __ARMADA_COMMON_H__ */

View File

@ -0,0 +1,77 @@
/*
* Copyright (C) 2018 Marvell International Ltd.
*
* SPDX-License-Identifier: BSD-3-Clause
* https://spdx.org/licenses
*/
#ifndef __BOARD_MARVELL_DEF_H__
#define __BOARD_MARVELL_DEF_H__
/*
* Required platform porting definitions common to all ARM
* development platforms
*/
/* Size of cacheable stacks */
#if IMAGE_BL1
#if TRUSTED_BOARD_BOOT
# define PLATFORM_STACK_SIZE 0x1000
#else
# define PLATFORM_STACK_SIZE 0x440
#endif
#elif IMAGE_BL2
# if TRUSTED_BOARD_BOOT
# define PLATFORM_STACK_SIZE 0x1000
# else
# define PLATFORM_STACK_SIZE 0x400
# endif
#elif IMAGE_BL31
# define PLATFORM_STACK_SIZE 0x400
#elif IMAGE_BL32
# define PLATFORM_STACK_SIZE 0x440
#endif
/*
* PLAT_MARVELL_MMAP_ENTRIES depends on the number of entries in the
* plat_arm_mmap array defined for each BL stage.
*/
#if IMAGE_BLE
# define PLAT_MARVELL_MMAP_ENTRIES 3
#endif
#if IMAGE_BL1
# if TRUSTED_BOARD_BOOT
# define PLAT_MARVELL_MMAP_ENTRIES 7
# else
# define PLAT_MARVELL_MMAP_ENTRIES 6
# endif /* TRUSTED_BOARD_BOOT */
#endif
#if IMAGE_BL2
# define PLAT_MARVELL_MMAP_ENTRIES 8
#endif
#if IMAGE_BL31
#define PLAT_MARVELL_MMAP_ENTRIES 5
#endif
/*
* Platform specific page table and MMU setup constants
*/
#if IMAGE_BL1
#define MAX_XLAT_TABLES 4
#elif IMAGE_BLE
# define MAX_XLAT_TABLES 4
#elif IMAGE_BL2
# define MAX_XLAT_TABLES 4
#elif IMAGE_BL31
# define MAX_XLAT_TABLES 4
#elif IMAGE_BL32
# define MAX_XLAT_TABLES 4
#endif
#define MAX_IO_DEVICES 3
#define MAX_IO_HANDLES 4
#define PLAT_MARVELL_TRUSTED_SRAM_SIZE 0x80000 /* 512 KB */
#endif /* __BOARD_MARVELL_DEF_H__ */

View File

@ -0,0 +1,177 @@
/*
* Copyright (C) 2018 Marvell International Ltd.
*
* SPDX-License-Identifier: BSD-3-Clause
* https://spdx.org/licenses
*/
#ifndef __MARVELL_DEF_H__
#define __MARVELL_DEF_H__
#include <arch.h>
#include <common_def.h>
#include <platform_def.h>
#include <tbbr_img_def.h>
#include <xlat_tables.h>
/****************************************************************************
* Definitions common to all MARVELL standard platforms
****************************************************************************
*/
/* Special value used to verify platform parameters from BL2 to BL31 */
#define MARVELL_BL31_PLAT_PARAM_VAL 0x0f1e2d3c4b5a6978ULL
#define PLAT_MARVELL_NORTHB_COUNT 1
#define PLAT_MARVELL_CLUSTER_COUNT 1
#define MARVELL_CACHE_WRITEBACK_SHIFT 6
/*
* Macros mapping the MPIDR Affinity levels to MARVELL Platform Power levels.
* The power levels have a 1:1 mapping with the MPIDR affinity levels.
*/
#define MARVELL_PWR_LVL0 MPIDR_AFFLVL0
#define MARVELL_PWR_LVL1 MPIDR_AFFLVL1
#define MARVELL_PWR_LVL2 MPIDR_AFFLVL2
/*
* Macros for local power states in Marvell platforms encoded by State-ID field
* within the power-state parameter.
*/
/* Local power state for power domains in Run state. */
#define MARVELL_LOCAL_STATE_RUN 0
/* Local power state for retention. Valid only for CPU power domains */
#define MARVELL_LOCAL_STATE_RET 1
/* Local power state for OFF/power-down.
* Valid for CPU and cluster power domains
*/
#define MARVELL_LOCAL_STATE_OFF 2
/* The first 4KB of Trusted SRAM are used as shared memory */
#define MARVELL_TRUSTED_SRAM_BASE PLAT_MARVELL_ATF_BASE
#define MARVELL_SHARED_RAM_BASE MARVELL_TRUSTED_SRAM_BASE
#define MARVELL_SHARED_RAM_SIZE 0x00001000 /* 4 KB */
/* The remaining Trusted SRAM is used to load the BL images */
#define MARVELL_BL_RAM_BASE (MARVELL_SHARED_RAM_BASE + \
MARVELL_SHARED_RAM_SIZE)
#define MARVELL_BL_RAM_SIZE (PLAT_MARVELL_TRUSTED_SRAM_SIZE - \
MARVELL_SHARED_RAM_SIZE)
#define MARVELL_DRAM_BASE ULL(0x0)
#define MARVELL_DRAM_SIZE ULL(0x20000000)
#define MARVELL_DRAM_END (MARVELL_DRAM_BASE + \
MARVELL_DRAM_SIZE - 1)
#define MARVELL_IRQ_SEC_PHY_TIMER 29
#define MARVELL_IRQ_SEC_SGI_0 8
#define MARVELL_IRQ_SEC_SGI_1 9
#define MARVELL_IRQ_SEC_SGI_2 10
#define MARVELL_IRQ_SEC_SGI_3 11
#define MARVELL_IRQ_SEC_SGI_4 12
#define MARVELL_IRQ_SEC_SGI_5 13
#define MARVELL_IRQ_SEC_SGI_6 14
#define MARVELL_IRQ_SEC_SGI_7 15
#define MARVELL_MAP_SHARED_RAM MAP_REGION_FLAT( \
MARVELL_SHARED_RAM_BASE, \
MARVELL_SHARED_RAM_SIZE, \
MT_MEMORY | MT_RW | MT_SECURE)
#define MARVELL_MAP_DRAM MAP_REGION_FLAT( \
MARVELL_DRAM_BASE, \
MARVELL_DRAM_SIZE, \
MT_MEMORY | MT_RW | MT_NS)
/*
* The number of regions like RO(code), coherent and data required by
* different BL stages which need to be mapped in the MMU.
*/
#if USE_COHERENT_MEM
#define MARVELL_BL_REGIONS 3
#else
#define MARVELL_BL_REGIONS 2
#endif
#define MAX_MMAP_REGIONS (PLAT_MARVELL_MMAP_ENTRIES + \
MARVELL_BL_REGIONS)
#define MARVELL_CONSOLE_BAUDRATE 115200
/****************************************************************************
* Required platform porting definitions common to all MARVELL std. platforms
****************************************************************************
*/
#define PLAT_PHY_ADDR_SPACE_SIZE (1ULL << 32)
#define PLAT_VIRT_ADDR_SPACE_SIZE (1ULL << 32)
/*
* This macro defines the deepest retention state possible. A higher state
* id will represent an invalid or a power down state.
*/
#define PLAT_MAX_RET_STATE MARVELL_LOCAL_STATE_RET
/*
* This macro defines the deepest power down states possible. Any state ID
* higher than this is invalid.
*/
#define PLAT_MAX_OFF_STATE MARVELL_LOCAL_STATE_OFF
#define PLATFORM_CORE_COUNT PLAT_MARVELL_CLUSTER_CORE_COUNT
/*
* Some data must be aligned on the biggest cache line size in the platform.
* This is known only to the platform as it might have a combination of
* integrated and external caches.
*/
#define CACHE_WRITEBACK_GRANULE (1 << MARVELL_CACHE_WRITEBACK_SHIFT)
/*****************************************************************************
* BL1 specific defines.
* BL1 RW data is relocated from ROM to RAM at runtime so we need 2 sets of
* addresses.
*****************************************************************************
*/
#define BL1_RO_BASE PLAT_MARVELL_TRUSTED_ROM_BASE
#define BL1_RO_LIMIT (PLAT_MARVELL_TRUSTED_ROM_BASE \
+ PLAT_MARVELL_TRUSTED_ROM_SIZE)
/*
* Put BL1 RW at the top of the Trusted SRAM.
*/
#define BL1_RW_BASE (MARVELL_BL_RAM_BASE + \
MARVELL_BL_RAM_SIZE - \
PLAT_MARVELL_MAX_BL1_RW_SIZE)
#define BL1_RW_LIMIT (MARVELL_BL_RAM_BASE + MARVELL_BL_RAM_SIZE)
/*****************************************************************************
* BL2 specific defines.
*****************************************************************************
*/
/*
* Put BL2 just below BL31.
*/
#define BL2_BASE (BL31_BASE - PLAT_MARVELL_MAX_BL2_SIZE)
#define BL2_LIMIT BL31_BASE
/*****************************************************************************
* BL31 specific defines.
*****************************************************************************
*/
/*
* Put BL31 at the top of the Trusted SRAM.
*/
#define BL31_BASE (MARVELL_BL_RAM_BASE + \
MARVELL_BL_RAM_SIZE - \
PLAT_MARVEL_MAX_BL31_SIZE)
#define BL31_PROGBITS_LIMIT BL1_RW_BASE
#define BL31_LIMIT (MARVELL_BL_RAM_BASE + \
MARVELL_BL_RAM_SIZE)
#endif /* __MARVELL_DEF_H__ */

View File

@ -0,0 +1,96 @@
/*
* Copyright (C) 2016 Marvell International Ltd.
*
* SPDX-License-Identifier: BSD-3-Clause
* https://spdx.org/licenses
*/
#ifndef __PLAT_MARVELL_H__
#define __PLAT_MARVELL_H__
#include <bl_common.h>
#include <cassert.h>
#include <cpu_data.h>
#include <stdint.h>
#include <xlat_tables.h>
/*
* Extern declarations common to Marvell standard platforms
*/
extern const mmap_region_t plat_marvell_mmap[];
#define MARVELL_CASSERT_MMAP \
CASSERT((ARRAY_SIZE(plat_marvell_mmap) + MARVELL_BL_REGIONS) \
<= MAX_MMAP_REGIONS, \
assert_max_mmap_regions)
/*
* Utility functions common to Marvell standard platforms
*/
void marvell_setup_page_tables(uintptr_t total_base,
size_t total_size,
uintptr_t code_start,
uintptr_t code_limit,
uintptr_t rodata_start,
uintptr_t rodata_limit
#if USE_COHERENT_MEM
, uintptr_t coh_start,
uintptr_t coh_limit
#endif
);
/* IO storage utility functions */
void marvell_io_setup(void);
/* Systimer utility function */
void marvell_configure_sys_timer(void);
/* Topology utility function */
int marvell_check_mpidr(u_register_t mpidr);
/* BL1 utility functions */
void marvell_bl1_early_platform_setup(void);
void marvell_bl1_platform_setup(void);
void marvell_bl1_plat_arch_setup(void);
/* BL2 utility functions */
void marvell_bl2_early_platform_setup(meminfo_t *mem_layout);
void marvell_bl2_platform_setup(void);
void marvell_bl2_plat_arch_setup(void);
uint32_t marvell_get_spsr_for_bl32_entry(void);
uint32_t marvell_get_spsr_for_bl33_entry(void);
/* BL31 utility functions */
void marvell_bl31_early_platform_setup(void *from_bl2,
uintptr_t soc_fw_config,
uintptr_t hw_config,
void *plat_params_from_bl2);
void marvell_bl31_platform_setup(void);
void marvell_bl31_plat_runtime_setup(void);
void marvell_bl31_plat_arch_setup(void);
/* FIP TOC validity check */
int marvell_io_is_toc_valid(void);
/*
* PSCI functionality
*/
void marvell_psci_arch_init(int idx);
void plat_marvell_system_reset(void);
/*
* Optional functions required in Marvell standard platforms
*/
void plat_marvell_io_setup(void);
int plat_marvell_get_alt_image_source(
unsigned int image_id,
uintptr_t *dev_handle,
uintptr_t *image_spec);
unsigned int plat_marvell_calc_core_pos(u_register_t mpidr);
void plat_marvell_interconnect_init(void);
void plat_marvell_interconnect_enter_coherency(void);
const mmap_region_t *plat_marvell_get_mmap(void);
#endif /* __PLAT_MARVELL_H__ */

View File

@ -5,8 +5,8 @@
* https://spdx.org/licenses
*/
#ifndef __A8K_COMMON_H__
#define __A8K_COMMON_H__
#ifndef __ARMADA_COMMON_H__
#define __ARMADA_COMMON_H__
#include <amb_adec.h>
#include <io_win.h>

View File

@ -45,7 +45,7 @@ spacer:
* Clobbers: x0 - x10, sp
* ---------------------------------------------
*/
.macro arm_print_gic_regs
.macro marvell_print_gic_regs
/* Check for GICv3 system register access */
mrs x7, id_aa64pfr0_el1
ubfx x7, x7, #ID_AA64PFR0_GIC_SHIFT, #ID_AA64PFR0_GIC_WIDTH

View File

@ -32,7 +32,8 @@
#define ROUND_UP_TO_POW_OF_2(number) (1 << \
(32 - __builtin_clz((number) - 1)))
#define _1MB_ (1024ULL*1024ULL)
#define _1GB_ (_1MB_*1024ULL)
#define _1MB_ (1024ULL * 1024ULL)
#define _1GB_ (_1MB_ * 1024ULL)
#define _2GB_ (2 * _1GB_)
#endif /* MVEBU_H */

View File

@ -89,7 +89,7 @@ Marvell platform ports and SoC drivers
:F: docs/plat/marvell/
:F: plat/marvell/
:F: drivers/marvell/
:F: tools/doimage/
:F: tools/marvell/
NVidia platform ports
---------------------

View File

@ -0,0 +1,36 @@
/*
* Copyright (C) 2018 Marvell International Ltd.
*
* SPDX-License-Identifier: BSD-3-Clause
* https://spdx.org/licenses
*/
#include <a3700_pm.h>
#include <plat_marvell.h>
/* This struct provides the PM wake up src configuration */
static struct pm_wake_up_src_config wake_up_src_cfg = {
.wake_up_src_num = 3,
.wake_up_src[0] = {
.wake_up_src_type = WAKE_UP_SRC_GPIO,
.wake_up_data = {
.gpio_data.bank_num = 0, /* North Bridge */
.gpio_data.gpio_num = 14
}
},
.wake_up_src[1] = {
.wake_up_src_type = WAKE_UP_SRC_GPIO,
.wake_up_data = {
.gpio_data.bank_num = 1, /* South Bridge */
.gpio_data.gpio_num = 2
}
},
.wake_up_src[2] = {
.wake_up_src_type = WAKE_UP_SRC_UART1,
}
};
struct pm_wake_up_src_config *mv_wake_up_src_config_get(void)
{
return &wake_up_src_cfg;
}

View File

@ -0,0 +1,13 @@
/*
* Copyright (C) 2018 Marvell International Ltd.
*
* SPDX-License-Identifier: BSD-3-Clause
* https://spdx.org/licenses
*/
#ifndef __MVEBU_DEF_H__
#define __MVEBU_DEF_H__
#include <a3700_plat_def.h>
#endif /* __MVEBU_DEF_H__ */

View File

@ -0,0 +1,69 @@
/*
* Copyright (C) 2018 Marvell International Ltd.
*
* SPDX-License-Identifier: BSD-3-Clause
* https://spdx.org/licenses
*/
#include <armada_common.h>
#include <dram_win.h>
#include <io_addr_dec.h>
#include <mmio.h>
#include <marvell_plat_priv.h>
#include <plat_marvell.h>
/* This routine does MPP initialization */
static void marvell_bl31_mpp_init(void)
{
mmio_clrbits_32(MVEBU_NB_GPIO_SEL_REG, 1 << MVEBU_GPIO_TW1_GPIO_EN_OFF);
/* Set hidden GPIO setting for SPI.
* In north_bridge_pin_out_en_high register 13804,
* bit 28 is the one which enables CS, CLK pins to be
* output, need to set it to 1.
* The initial value of this bit is 1, but in UART boot mode
* initialization, this bit is disabled and the SPI CS and CLK pins
* are used for downloading image purpose; so after downloading,
* we should set this bit to 1 again to enable SPI CS and CLK pins.
* And anyway, this bit value should be 1 in all modes,
* so here we does not judge boot mode and set this bit to 1 always.
*/
mmio_setbits_32(MVEBU_NB_GPIO_OUTPUT_EN_HIGH_REG,
1 << MVEBU_GPIO_NB_SPI_PIN_MODE_OFF);
}
/* This function overruns the same function in marvell_bl31_setup.c */
void bl31_plat_arch_setup(void)
{
struct dec_win_config *io_dec_map;
uint32_t dec_win_num;
struct dram_win_map dram_wins_map;
marvell_bl31_plat_arch_setup();
/* MPP init */
marvell_bl31_mpp_init();
/* initialize the timer for delay functionality */
plat_delay_timer_init();
/* CPU address decoder windows initialization. */
cpu_wins_init();
/* fetch CPU-DRAM window mapping information by reading
* CPU-DRAM decode windows (only the enabled ones)
*/
dram_win_map_build(&dram_wins_map);
/* Get IO address decoder windows */
if (marvell_get_io_dec_win_conf(&io_dec_map, &dec_win_num)) {
printf("No IO address decoder windows configurations found!\n");
return;
}
/* IO address decoder init */
if (init_io_addr_dec(&dram_wins_map, io_dec_map, dec_win_num)) {
printf("IO address decoder windows initialization failed!\n");
return;
}
}

View File

@ -0,0 +1,10 @@
#
# Copyright (C) 2018 Marvell International Ltd.
#
# SPDX-License-Identifier: BSD-3-Clause
# https://spdx.org/licenses
#
include plat/marvell/a3700/common/a3700_common.mk
include plat/marvell/common/marvell_common.mk

View File

@ -0,0 +1,176 @@
#
# Copyright (C) 2018 Marvell International Ltd.
#
# SPDX-License-Identifier: BSD-3-Clause
# https://spdx.org/licenses
#
MARVELL_PLAT_BASE := plat/marvell
MARVELL_PLAT_INCLUDE_BASE := include/plat/marvell
PLAT_FAMILY := a3700
PLAT_FAMILY_BASE := $(MARVELL_PLAT_BASE)/$(PLAT_FAMILY)
PLAT_INCLUDE_BASE := $(MARVELL_PLAT_INCLUDE_BASE)/$(PLAT_FAMILY)
PLAT_COMMON_BASE := $(PLAT_FAMILY_BASE)/common
MARVELL_DRV_BASE := drivers/marvell
MARVELL_COMMON_BASE := $(MARVELL_PLAT_BASE)/common
include $(MARVELL_PLAT_BASE)/marvell.mk
#*********** A3700 *************
DOIMAGEPATH := $(WTP)
DOIMAGETOOL := $(DOIMAGEPATH)/wtptp/linux/tbb_linux
ifeq ($(MARVELL_SECURE_BOOT),1)
DOIMAGE_CFG := $(DOIMAGEPATH)/atf-tim.txt
IMAGESPATH := $(DOIMAGEPATH)/tim/trusted
TIMNCFG := $(DOIMAGEPATH)/atf-timN.txt
TIMNSIG := $(IMAGESPATH)/timnsign.txt
TIM2IMGARGS := -i $(DOIMAGE_CFG) -n $(TIMNCFG)
TIMN_IMAGE := $$(grep "Image Filename:" -m 1 $(TIMNCFG) | cut -c 17-)
else #MARVELL_SECURE_BOOT
DOIMAGE_CFG := $(DOIMAGEPATH)/atf-ntim.txt
IMAGESPATH := $(DOIMAGEPATH)/tim/untrusted
TIM2IMGARGS := -i $(DOIMAGE_CFG)
endif #MARVELL_SECURE_BOOT
TIMBUILD := $(DOIMAGEPATH)/script/buildtim.sh
TIM2IMG := $(DOIMAGEPATH)/script/tim2img.pl
# WTMI_IMG is used to specify the customized RTOS image running over
# Service CPU (CM3 processor). By the default, it points to a
# baremetal binary of fuse programming in A3700_utils.
WTMI_IMG := $(DOIMAGEPATH)/wtmi/fuse/build/fuse.bin
# WTMI_SYSINIT_IMG is used for the system early initialization,
# such as AVS settings, clock-tree setup and dynamic DDR PHY training.
# After the initialization is done, this image will be wiped out
# from the memory and CM3 will continue with RTOS image or other application.
WTMI_SYSINIT_IMG := $(DOIMAGEPATH)/wtmi/sys_init/build/sys_init.bin
# WTMI_MULTI_IMG is composed of CM3 RTOS image (WTMI_IMG)
# and sys-init image (WTMI_SYSINIT_IMG).
WTMI_MULTI_IMG := $(DOIMAGEPATH)/wtmi/build/wtmi.bin
WTMI_ENC_IMG := $(DOIMAGEPATH)/wtmi/build/wtmi-enc.bin
BUILD_UART := uart-images
SRCPATH := $(dir $(BL33))
CLOCKSPRESET ?= CPU_800_DDR_800
DDR_TOPOLOGY ?= 0
BOOTDEV ?= SPINOR
PARTNUM ?= 0
TIM_IMAGE := $$(grep "Image Filename:" -m 1 $(DOIMAGE_CFG) | cut -c 17-)
TIMBLDARGS := $(MARVELL_SECURE_BOOT) $(BOOTDEV) $(IMAGESPATH) $(DOIMAGEPATH) $(CLOCKSPRESET) \
$(DDR_TOPOLOGY) $(PARTNUM) $(DEBUG) $(DOIMAGE_CFG) $(TIMNCFG) $(TIMNSIG) 1
TIMBLDUARTARGS := $(MARVELL_SECURE_BOOT) UART $(IMAGESPATH) $(DOIMAGEPATH) $(CLOCKSPRESET) \
$(DDR_TOPOLOGY) 0 0 $(DOIMAGE_CFG) $(TIMNCFG) $(TIMNSIG) 0
DOIMAGE_FLAGS := -r $(DOIMAGE_CFG) -v -D
# GICV3
$(eval $(call add_define,CONFIG_GICV3))
# CCI-400
$(eval $(call add_define,USE_CCI))
MARVELL_GIC_SOURCES := drivers/arm/gic/common/gic_common.c \
drivers/arm/gic/v3/gicv3_main.c \
drivers/arm/gic/v3/gicv3_helpers.c \
drivers/arm/gic/v3/arm_gicv3_common.c \
plat/common/plat_gicv3.c \
drivers/arm/gic/v3/gic500.c
ATF_INCLUDES := -Iinclude/common/tbbr \
-Iinclude/drivers
PLAT_INCLUDES := -I$(PLAT_FAMILY_BASE)/$(PLAT) \
-I$(PLAT_COMMON_BASE)/include \
-I$(PLAT_INCLUDE_BASE)/common \
-I$(MARVELL_DRV_BASE)/uart \
-I$(MARVELL_DRV_BASE) \
-I$/drivers/arm/gic/common/ \
$(ATF_INCLUDES)
PLAT_BL_COMMON_SOURCES := $(PLAT_COMMON_BASE)/aarch64/a3700_common.c \
drivers/console/aarch64/console.S \
$(MARVELL_COMMON_BASE)/marvell_cci.c \
$(MARVELL_DRV_BASE)/uart/a3700_console.S
BL1_SOURCES += $(PLAT_COMMON_BASE)/aarch64/plat_helpers.S \
lib/cpus/aarch64/cortex_a53.S
BL31_PORTING_SOURCES := $(PLAT_FAMILY_BASE)/$(PLAT)/board/pm_src.c
MARVELL_DRV := $(MARVELL_DRV_BASE)/comphy/phy-comphy-3700.c
BL31_SOURCES += lib/cpus/aarch64/cortex_a53.S \
$(PLAT_COMMON_BASE)/aarch64/plat_helpers.S \
$(PLAT_COMMON_BASE)/plat_pm.c \
$(PLAT_COMMON_BASE)/dram_win.c \
$(PLAT_COMMON_BASE)/io_addr_dec.c \
$(PLAT_COMMON_BASE)/marvell_plat_config.c \
$(PLAT_FAMILY_BASE)/$(PLAT)/plat_bl31_setup.c \
$(MARVELL_COMMON_BASE)/marvell_ddr_info.c \
$(MARVELL_COMMON_BASE)/marvell_gicv3.c \
$(MARVELL_GIC_SOURCES) \
drivers/arm/cci/cci.c \
$(BL31_PORTING_SOURCES) \
$(PLAT_COMMON_BASE)/a3700_sip_svc.c \
$(MARVELL_DRV)
mrvl_flash: ${BUILD_PLAT}/${FIP_NAME} ${DOIMAGETOOL}
$(shell truncate -s %128K ${BUILD_PLAT}/bl1.bin)
$(shell cat ${BUILD_PLAT}/bl1.bin ${BUILD_PLAT}/${FIP_NAME} > ${BUILD_PLAT}/${BOOT_IMAGE})
$(shell truncate -s %4 ${BUILD_PLAT}/${BOOT_IMAGE})
$(shell truncate -s %4 $(WTMI_IMG))
@echo
@echo "Building uart images"
$(TIMBUILD) $(TIMBLDUARTARGS)
@sed -i 's|WTMI_IMG|$(WTMI_MULTI_IMG)|1' $(DOIMAGE_CFG)
@sed -i 's|BOOT_IMAGE|$(BUILD_PLAT)/$(BOOT_IMAGE)|1' $(DOIMAGE_CFG)
ifeq ($(MARVELL_SECURE_BOOT),1)
@sed -i 's|WTMI_IMG|$(WTMI_MULTI_IMG)|1' $(TIMNCFG)
@sed -i 's|BOOT_IMAGE|$(BUILD_PLAT)/$(BOOT_IMAGE)|1' $(TIMNCFG)
endif
$(DOIMAGETOOL) $(DOIMAGE_FLAGS)
@if [ -e "$(TIMNCFG)" ]; then $(DOIMAGETOOL) -r $(TIMNCFG); fi
@rm -rf $(BUILD_PLAT)/$(BUILD_UART)*
@mkdir $(BUILD_PLAT)/$(BUILD_UART)
@mv -t $(BUILD_PLAT)/$(BUILD_UART) $(TIM_IMAGE) $(DOIMAGE_CFG) $(TIMN_IMAGE) $(TIMNCFG)
@find . -name "*_h.*" |xargs cp -ut $(BUILD_PLAT)/$(BUILD_UART)
@mv $(subst .bin,_h.bin,$(WTMI_MULTI_IMG)) $(BUILD_PLAT)/$(BUILD_UART)/wtmi_h.bin
@tar czf $(BUILD_PLAT)/$(BUILD_UART).tgz -C $(BUILD_PLAT) ./$(BUILD_UART)
@echo
@echo "Building flash image"
$(TIMBUILD) $(TIMBLDARGS)
sed -i 's|WTMI_IMG|$(WTMI_MULTI_IMG)|1' $(DOIMAGE_CFG)
sed -i 's|BOOT_IMAGE|$(BUILD_PLAT)/$(BOOT_IMAGE)|1' $(DOIMAGE_CFG)
ifeq ($(MARVELL_SECURE_BOOT),1)
@sed -i 's|WTMI_IMG|$(WTMI_MULTI_IMG)|1' $(TIMNCFG)
@sed -i 's|BOOT_IMAGE|$(BUILD_PLAT)/$(BOOT_IMAGE)|1' $(TIMNCFG)
@echo -e "\n\t=======================================================\n";
@echo -e "\t Secure boot. Encrypting wtmi and boot-image \n";
@echo -e "\t=======================================================\n";
@truncate -s %16 $(WTMI_MULTI_IMG)
@openssl enc -aes-256-cbc -e -in $(WTMI_MULTI_IMG) \
-out $(WTMI_ENC_IMG) \
-K `cat $(IMAGESPATH)/aes-256.txt` -k 0 -nosalt \
-iv `cat $(IMAGESPATH)/iv.txt` -p
@truncate -s %16 $(BUILD_PLAT)/$(BOOT_IMAGE);
@openssl enc -aes-256-cbc -e -in $(BUILD_PLAT)/$(BOOT_IMAGE) \
-out $(BUILD_PLAT)/$(BOOT_ENC_IMAGE) \
-K `cat $(IMAGESPATH)/aes-256.txt` -k 0 -nosalt \
-iv `cat $(IMAGESPATH)/iv.txt` -p
endif
$(DOIMAGETOOL) $(DOIMAGE_FLAGS)
@if [ -e "$(TIMNCFG)" ]; then $(DOIMAGETOOL) -r $(TIMNCFG); fi
@if [ "$(MARVELL_SECURE_BOOT)" = "1" ]; then sed -i 's|$(WTMI_MULTI_IMG)|$(WTMI_ENC_IMG)|1;s|$(BOOT_IMAGE)|$(BOOT_ENC_IMAGE)|1;' $(TIMNCFG); fi
$(TIM2IMG) $(TIM2IMGARGS) -o $(BUILD_PLAT)/$(FLASH_IMAGE)
@mv -t $(BUILD_PLAT) $(TIM_IMAGE) $(DOIMAGE_CFG) $(TIMN_IMAGE) $(TIMNCFG) $(WTMI_IMG) $(WTMI_SYSINIT_IMG) $(WTMI_MULTI_IMG)
@if [ "$(MARVELL_SECURE_BOOT)" = "1" ]; then mv -t $(BUILD_PLAT) $(WTMI_ENC_IMG) OtpHash.txt; fi
@find . -name "*.txt" | grep -E "CSK[[:alnum:]]_KeyHash.txt|Tim_msg.txt|TIMHash.txt" | xargs rm -f

View File

@ -0,0 +1,82 @@
/*
* Copyright (C) 2018 Marvell International Ltd.
*
* SPDX-License-Identifier: BSD-3-Clause
* https://spdx.org/licenses
*/
#include <debug.h>
#include <marvell_plat_priv.h>
#include <plat_marvell.h>
#include <runtime_svc.h>
#include <smccc.h>
#include "comphy/phy-comphy-3700.h"
/* Comphy related FID's */
#define MV_SIP_COMPHY_POWER_ON 0x82000001
#define MV_SIP_COMPHY_POWER_OFF 0x82000002
#define MV_SIP_COMPHY_PLL_LOCK 0x82000003
/* Miscellaneous FID's' */
#define MV_SIP_DRAM_SIZE 0x82000010
/* This macro is used to identify COMPHY related calls from SMC function ID */
#define is_comphy_fid(fid) \
((fid) >= MV_SIP_COMPHY_POWER_ON && (fid) <= MV_SIP_COMPHY_PLL_LOCK)
uintptr_t mrvl_sip_smc_handler(uint32_t smc_fid,
u_register_t x1,
u_register_t x2,
u_register_t x3,
u_register_t x4,
void *cookie,
void *handle,
u_register_t flags)
{
u_register_t ret;
VERBOSE("%s: got SMC (0x%x) x1 0x%lx, x2 0x%lx\n",
__func__, smc_fid, x1, x2);
if (is_comphy_fid(smc_fid)) {
if (x1 >= MAX_LANE_NR) {
ERROR("%s: Wrong smc (0x%x) lane nr: %lx\n",
__func__, smc_fid, x2);
SMC_RET1(handle, SMC_UNK);
}
}
switch (smc_fid) {
/* Comphy related FID's */
case MV_SIP_COMPHY_POWER_ON:
/* x1: comphy_index, x2: comphy_mode */
ret = mvebu_3700_comphy_power_on(x1, x2);
SMC_RET1(handle, ret);
case MV_SIP_COMPHY_POWER_OFF:
/* x1: comphy_index, x2: comphy_mode */
ret = mvebu_3700_comphy_power_off(x1, x2);
SMC_RET1(handle, ret);
case MV_SIP_COMPHY_PLL_LOCK:
/* x1: comphy_index, x2: comphy_mode */
ret = mvebu_3700_comphy_is_pll_locked(x1, x2);
SMC_RET1(handle, ret);
/* Miscellaneous FID's' */
case MV_SIP_DRAM_SIZE:
/* x1: ap_base_addr */
ret = mvebu_get_dram_size(MVEBU_REGS_BASE);
SMC_RET1(handle, ret);
default:
ERROR("%s: unhandled SMC (0x%x)\n", __func__, smc_fid);
SMC_RET1(handle, SMC_UNK);
}
}
/* Define a runtime service descriptor for fast SMC calls */
DECLARE_RT_SVC(
marvell_sip_svc,
OEN_SIP_START,
OEN_SIP_END,
SMC_TYPE_FAST,
NULL,
mrvl_sip_smc_handler
);

View File

@ -0,0 +1,53 @@
/*
* Copyright (C) 2018 Marvell International Ltd.
*
* SPDX-License-Identifier: BSD-3-Clause
* https://spdx.org/licenses
*/
#include <plat_marvell.h>
/* MMU entry for internal (register) space access */
#define MAP_DEVICE0 MAP_REGION_FLAT(DEVICE0_BASE, \
DEVICE0_SIZE, \
MT_DEVICE | MT_RW | MT_SECURE)
/*
* Table of regions for various BL stages to map using the MMU.
*/
#if IMAGE_BL1
const mmap_region_t plat_marvell_mmap[] = {
MARVELL_MAP_SHARED_RAM,
MAP_DEVICE0,
{0}
};
#endif
#if IMAGE_BL2
const mmap_region_t plat_marvell_mmap[] = {
MARVELL_MAP_SHARED_RAM,
MAP_DEVICE0,
MARVELL_MAP_DRAM,
{0}
};
#endif
#if IMAGE_BL2U
const mmap_region_t plat_marvell_mmap[] = {
MAP_DEVICE0,
{0}
};
#endif
#if IMAGE_BL31
const mmap_region_t plat_marvell_mmap[] = {
MARVELL_MAP_SHARED_RAM,
MAP_DEVICE0,
MARVELL_MAP_DRAM,
{0}
};
#endif
#if IMAGE_BL32
const mmap_region_t plat_marvell_mmap[] = {
MAP_DEVICE0,
{0}
};
#endif
MARVELL_CASSERT_MMAP;

View File

@ -0,0 +1,68 @@
/*
* Copyright (C) 2018 Marvell International Ltd.
*
* SPDX-License-Identifier: BSD-3-Clause
* https://spdx.org/licenses
*/
#include <asm_macros.S>
#include <platform_def.h>
.globl plat_secondary_cold_boot_setup
.globl plat_get_my_entrypoint
.globl plat_is_my_cpu_primary
/* -----------------------------------------------------
* void plat_secondary_cold_boot_setup (void);
*
* This function performs any platform specific actions
* needed for a secondary cpu after a cold reset. Right
* now this is a stub function.
* -----------------------------------------------------
*/
func plat_secondary_cold_boot_setup
mov x0, #0
ret
endfunc plat_secondary_cold_boot_setup
/* ---------------------------------------------------------------------
* unsigned long plat_get_my_entrypoint (void);
*
* Main job of this routine is to distinguish between cold and warm boot
* For a cold boot, return 0.
* For a warm boot, read the mailbox and return the address it contains.
* A magic number is placed before entrypoint to avoid mistake caused by
* uninitialized mailbox data area.
* ---------------------------------------------------------------------
*/
func plat_get_my_entrypoint
/* Read first word and compare it with magic num */
mov_imm x0, PLAT_MARVELL_MAILBOX_BASE
ldr x1, [x0]
mov_imm x2, PLAT_MARVELL_MAILBOX_MAGIC_NUM
cmp x1, x2
/* If compare failed, return 0, i.e. cold boot */
beq entrypoint
mov x0, #0
ret
entrypoint:
/* Second word contains the jump address */
add x0, x0, #8
ldr x0, [x0]
ret
endfunc plat_get_my_entrypoint
/* -----------------------------------------------------
* unsigned int plat_is_my_cpu_primary (void);
*
* Find out whether the current cpu is the primary
* cpu.
* -----------------------------------------------------
*/
func plat_is_my_cpu_primary
mrs x0, mpidr_el1
and x0, x0, #(MPIDR_CLUSTER_MASK | MPIDR_CPU_MASK)
cmp x0, #MVEBU_PRIMARY_CPU
cset w0, eq
ret
endfunc plat_is_my_cpu_primary

View File

@ -0,0 +1,276 @@
/*
* Copyright (C) 2018 Marvell International Ltd.
*
* SPDX-License-Identifier: BSD-3-Clause
* https://spdx.org/licenses
*/
#include <dram_win.h>
#include <marvell_plat_priv.h>
#include <mmio.h>
#include <mvebu.h>
#include <plat_marvell.h>
#include <string.h>
/* Armada 3700 has 5 configurable windows */
#define MV_CPU_WIN_NUM 5
#define CPU_WIN_DISABLED 0
#define CPU_WIN_ENABLED 1
/*
* There are 2 different cpu decode window configuration cases:
* - DRAM size is not over 2GB;
* - DRAM size is 4GB.
*/
enum cpu_win_config_num {
CPU_WIN_CONFIG_DRAM_NOT_OVER_2GB = 0,
CPU_WIN_CONFIG_DRAM_4GB,
CPU_WIN_CONFIG_MAX
};
enum cpu_win_target {
CPU_WIN_TARGET_DRAM = 0,
CPU_WIN_TARGET_INTERNAL_REG,
CPU_WIN_TARGET_PCIE,
CPU_WIN_TARGET_PCIE_OVER_MCI,
CPU_WIN_TARGET_BOOT_ROM,
CPU_WIN_TARGET_MCI_EXTERNAL,
CPU_WIN_TARGET_RWTM_RAM = 7,
CPU_WIN_TARGET_CCI400_REG
};
struct cpu_win_configuration {
uint32_t enabled;
enum cpu_win_target target;
uint64_t base_addr;
uint64_t size;
uint64_t remap_addr;
};
struct cpu_win_configuration mv_cpu_wins[CPU_WIN_CONFIG_MAX][MV_CPU_WIN_NUM] = {
/*
* When total dram size is not over 2GB:
* DDR window 0 is configured in tim header, its size may be not 512MB,
* but the actual dram size, no need to configure it again;
* other cpu windows are kept as default.
*/
{
/* enabled
* target
* base
* size
* remap
*/
{CPU_WIN_ENABLED,
CPU_WIN_TARGET_DRAM,
0x0,
0x08000000,
0x0},
{CPU_WIN_ENABLED,
CPU_WIN_TARGET_MCI_EXTERNAL,
0xe0000000,
0x08000000,
0xe0000000},
{CPU_WIN_ENABLED,
CPU_WIN_TARGET_PCIE,
0xe8000000,
0x08000000,
0xe8000000},
{CPU_WIN_ENABLED,
CPU_WIN_TARGET_RWTM_RAM,
0xf0000000,
0x00020000,
0x1fff0000},
{CPU_WIN_ENABLED,
CPU_WIN_TARGET_PCIE_OVER_MCI,
0x80000000,
0x10000000,
0x80000000},
},
/*
* If total dram size is more than 2GB, now there is only one case - 4GB
* dram; we will use below cpu windows configurations:
* - Internal Regs, CCI-400, Boot Rom and PCIe windows are kept as
* default;
* - Use 4 CPU decode windows for DRAM, which cover 3.375GB DRAM;
* DDR window 0 is configured in tim header with 2GB size, no need to
* configure it again here;
*
* 0xFFFFFFFF ---> |-----------------------|
* | Boot ROM | 64KB
* 0xFFF00000 ---> +-----------------------+
* : :
* 0xF0000000 ---> |-----------------------|
* | PCIE | 128 MB
* 0xE8000000 ---> |-----------------------|
* | DDR window 3 | 128 MB
* 0xE0000000 ---> +-----------------------+
* : :
* 0xD8010000 ---> |-----------------------|
* | CCI Regs | 64 KB
* 0xD8000000 ---> +-----------------------+
* : :
* : :
* 0xD2000000 ---> +-----------------------+
* | Internal Regs | 32MB
* 0xD0000000 ---> |-----------------------|
* | DDR window 2 | 256 MB
* 0xC0000000 ---> |-----------------------|
* | |
* | DDR window 1 | 1 GB
* | |
* 0x80000000 ---> |-----------------------|
* | |
* | |
* | DDR window 0 | 2 GB
* | |
* | |
* 0x00000000 ---> +-----------------------+
*/
{
/* win_id
* target
* base
* size
* remap
*/
{CPU_WIN_ENABLED,
CPU_WIN_TARGET_DRAM,
0x0,
0x80000000,
0x0},
{CPU_WIN_ENABLED,
CPU_WIN_TARGET_DRAM,
0x80000000,
0x40000000,
0x80000000},
{CPU_WIN_ENABLED,
CPU_WIN_TARGET_DRAM,
0xc0000000,
0x10000000,
0xc0000000},
{CPU_WIN_ENABLED,
CPU_WIN_TARGET_DRAM,
0xe0000000,
0x08000000,
0xe0000000},
{CPU_WIN_ENABLED,
CPU_WIN_TARGET_PCIE,
0xe8000000,
0x08000000,
0xe8000000},
},
};
/*
* dram_win_map_build
*
* This function builds cpu dram windows mapping
* which includes base address and window size by
* reading cpu dram decode windows registers.
*
* @input: N/A
*
* @output:
* - win_map: cpu dram windows mapping
*
* @return: N/A
*/
void dram_win_map_build(struct dram_win_map *win_map)
{
int32_t win_id;
struct dram_win *win;
uint32_t base_reg, ctrl_reg, size_reg, enabled, target;
memset(win_map, 0, sizeof(struct dram_win_map));
for (win_id = 0; win_id < DRAM_WIN_MAP_NUM_MAX; win_id++) {
ctrl_reg = mmio_read_32(CPU_DEC_WIN_CTRL_REG(win_id));
target = (ctrl_reg & CPU_DEC_CR_WIN_TARGET_MASK) >>
CPU_DEC_CR_WIN_TARGET_OFFS;
enabled = ctrl_reg & CPU_DEC_CR_WIN_ENABLE;
/* Ignore invalid and non-dram windows*/
if ((enabled == 0) || (target != DRAM_CPU_DEC_TARGET_NUM))
continue;
win = win_map->dram_windows + win_map->dram_win_num;
base_reg = mmio_read_32(CPU_DEC_WIN_BASE_REG(win_id));
size_reg = mmio_read_32(CPU_DEC_WIN_SIZE_REG(win_id));
/* Base reg [15:0] corresponds to transaction address [39:16] */
win->base_addr = (base_reg & CPU_DEC_BR_BASE_MASK) >>
CPU_DEC_BR_BASE_OFFS;
win->base_addr *= CPU_DEC_CR_WIN_SIZE_ALIGNMENT;
/*
* Size reg [15:0] is programmed from LSB to MSB as a sequence
* of 1s followed by a sequence of 0s and the number of 1s
* specifies the size of the window in 64 KB granularity,
* for example, a value of 00FFh specifies 256 x 64 KB = 16 MB
*/
win->win_size = (size_reg & CPU_DEC_CR_WIN_SIZE_MASK) >>
CPU_DEC_CR_WIN_SIZE_OFFS;
win->win_size = (win->win_size + 1) *
CPU_DEC_CR_WIN_SIZE_ALIGNMENT;
win_map->dram_win_num++;
}
}
static void cpu_win_set(uint32_t win_id, struct cpu_win_configuration *win_cfg)
{
uint32_t base_reg, ctrl_reg, size_reg, remap_reg;
/* Disable window */
ctrl_reg = mmio_read_32(CPU_DEC_WIN_CTRL_REG(win_id));
ctrl_reg &= ~CPU_DEC_CR_WIN_ENABLE;
mmio_write_32(CPU_DEC_WIN_CTRL_REG(win_id), ctrl_reg);
/* For an disabled window, only disable it. */
if (!win_cfg->enabled)
return;
/* Set Base Register */
base_reg = (uint32_t)(win_cfg->base_addr /
CPU_DEC_CR_WIN_SIZE_ALIGNMENT);
base_reg <<= CPU_DEC_BR_BASE_OFFS;
base_reg &= CPU_DEC_BR_BASE_MASK;
mmio_write_32(CPU_DEC_WIN_BASE_REG(win_id), base_reg);
/* Set Remap Register with the same value
* as the <Base> field in Base Register
*/
remap_reg = (uint32_t)(win_cfg->remap_addr /
CPU_DEC_CR_WIN_SIZE_ALIGNMENT);
remap_reg <<= CPU_DEC_RLR_REMAP_LOW_OFFS;
remap_reg &= CPU_DEC_RLR_REMAP_LOW_MASK;
mmio_write_32(CPU_DEC_REMAP_LOW_REG(win_id), remap_reg);
/* Set Size Register */
size_reg = (win_cfg->size / CPU_DEC_CR_WIN_SIZE_ALIGNMENT) - 1;
size_reg <<= CPU_DEC_CR_WIN_SIZE_OFFS;
size_reg &= CPU_DEC_CR_WIN_SIZE_MASK;
mmio_write_32(CPU_DEC_WIN_SIZE_REG(win_id), size_reg);
/* Set Control Register - set target id and enable window */
ctrl_reg &= ~CPU_DEC_CR_WIN_TARGET_MASK;
ctrl_reg |= (win_cfg->target << CPU_DEC_CR_WIN_TARGET_OFFS);
ctrl_reg |= CPU_DEC_CR_WIN_ENABLE;
mmio_write_32(CPU_DEC_WIN_CTRL_REG(win_id), ctrl_reg);
}
void cpu_wins_init(void)
{
uint32_t cfg_idx, win_id;
if (mvebu_get_dram_size(MVEBU_REGS_BASE) <= _2GB_)
cfg_idx = CPU_WIN_CONFIG_DRAM_NOT_OVER_2GB;
else
cfg_idx = CPU_WIN_CONFIG_DRAM_4GB;
/* Window 0 is configured always for DRAM in tim header
* already, no need to configure it again here
*/
for (win_id = 1; win_id < MV_CPU_WIN_NUM; win_id++)
cpu_win_set(win_id, &mv_cpu_wins[cfg_idx][win_id]);
}

View File

@ -0,0 +1,122 @@
/*
* Copyright (C) 2018 Marvell International Ltd.
*
* SPDX-License-Identifier: BSD-3-Clause
* https://spdx.org/licenses
*/
#ifndef __A3700_PLAT_DEF_H__
#define __A3700_PLAT_DEF_H__
#include <marvell_def.h>
#define MVEBU_MAX_CPUS_PER_CLUSTER 2
#define MVEBU_PRIMARY_CPU 0x0
/*
* The counter on A3700 is always fed from reference 25M clock (XTAL).
* However minimal CPU counter prescaler is 2, so the counter
* frequency will be divided by 2, the number is 12.5M
*/
#define COUNTER_FREQUENCY 12500000
#define MVEBU_REGS_BASE 0xD0000000
/*****************************************************************************
* MVEBU memory map related constants
*****************************************************************************
*/
/* Aggregate of all devices in the first GB */
#define DEVICE0_BASE MVEBU_REGS_BASE
#define DEVICE0_SIZE 0x10000000
/*****************************************************************************
* GIC-500 & interrupt handling related constants
*****************************************************************************
*/
/* Base MVEBU compatible GIC memory map */
#define MVEBU_GICD_BASE 0x1D00000
#define MVEBU_GICR_BASE 0x1D40000
#define MVEBU_GICC_BASE 0x1D80000
/* CCI-400 */
#define MVEBU_CCI_BASE 0x8000000
/*****************************************************************************
* North and south bridge register base
*****************************************************************************
*/
#define MVEBU_NB_REGS_BASE (MVEBU_REGS_BASE + 0x13000)
#define MVEBU_SB_REGS_BASE (MVEBU_REGS_BASE + 0x18000)
/*****************************************************************************
* GPIO registers related constants
*****************************************************************************
*/
/* North and south bridge GPIO register base address */
#define MVEBU_NB_GPIO_REG_BASE (MVEBU_NB_REGS_BASE + 0x800)
#define MVEBU_NB_GPIO_IRQ_REG_BASE (MVEBU_NB_REGS_BASE + 0xC00)
#define MVEBU_SB_GPIO_REG_BASE (MVEBU_SB_REGS_BASE + 0x800)
#define MVEBU_SB_GPIO_IRQ_REG_BASE (MVEBU_SB_REGS_BASE + 0xC00)
#define MVEBU_NB_SB_IRQ_REG_BASE (MVEBU_REGS_BASE + 0x8A00)
/* North Bridge GPIO selection register */
#define MVEBU_NB_GPIO_SEL_REG (MVEBU_NB_GPIO_REG_BASE + 0x30)
#define MVEBU_NB_GPIO_OUTPUT_EN_HIGH_REG (MVEBU_NB_GPIO_REG_BASE + 0x04)
/* I2C1 GPIO Enable bit offset */
#define MVEBU_GPIO_TW1_GPIO_EN_OFF (10)
/* SPI pins mode bit offset */
#define MVEBU_GPIO_NB_SPI_PIN_MODE_OFF (28)
/*****************************************************************************
* DRAM registers related constants
*****************************************************************************
*/
#define MVEBU_DRAM_REG_BASE (MVEBU_REGS_BASE)
/*****************************************************************************
* SB wake-up registers related constants
*****************************************************************************
*/
#define MVEBU_SB_WAKEUP_REG_BASE (MVEBU_REGS_BASE + 0x19000)
/*****************************************************************************
* PMSU registers related constants
*****************************************************************************
*/
#define MVEBU_PMSU_REG_BASE (MVEBU_REGS_BASE + 0x14000)
/*****************************************************************************
* North Bridge Step-Down Registers
*****************************************************************************
*/
#define MVEBU_NB_STEP_DOWN_REG_BASE (MVEBU_REGS_BASE + 0x12800)
/*****************************************************************************
* DRAM CS memory map register base
*****************************************************************************
*/
#define MVEBU_CS_MMAP_REG_BASE (MVEBU_REGS_BASE + 0x200)
/*****************************************************************************
* CPU decoder window registers related constants
*****************************************************************************
*/
#define MVEBU_CPU_DEC_WIN_REG_BASE (MVEBU_REGS_BASE + 0xCF00)
/*****************************************************************************
* AVS registers related constants
*****************************************************************************
*/
#define MVEBU_AVS_REG_BASE (MVEBU_REGS_BASE + 0x11500)
/*****************************************************************************
* AVS registers related constants
*****************************************************************************
*/
#define MVEBU_COMPHY_REG_BASE (MVEBU_REGS_BASE + 0x18300)
#endif /* __A3700_PLAT_DEF_H__ */

View File

@ -0,0 +1,52 @@
/*
* Copyright (C) 2016 Marvell International Ltd.
*
* SPDX-License-Identifier: BSD-3-Clause
* https://spdx.org/licenses
*/
#ifndef __A3700_PM_H__
#define __A3700_PM_H__
#include <stdint.h>
/* supported wake up sources */
enum pm_wake_up_src_type {
WAKE_UP_SRC_GPIO,
/* FOLLOWING SRC NOT SUPPORTED YET */
WAKE_UP_SRC_TIMER,
WAKE_UP_SRC_UART0,
WAKE_UP_SRC_UART1,
WAKE_UP_SRC_MAX,
};
struct pm_gpio_data {
/*
* bank 0: North bridge GPIO
* bank 1: South bridge GPIO
*/
uint32_t bank_num;
uint32_t gpio_num;
};
union pm_wake_up_src_data {
struct pm_gpio_data gpio_data;
/* delay in seconds */
uint32_t timer_delay;
};
struct pm_wake_up_src {
enum pm_wake_up_src_type wake_up_src_type;
union pm_wake_up_src_data wake_up_data;
};
struct pm_wake_up_src_config {
uint32_t wake_up_src_num;
struct pm_wake_up_src wake_up_src[WAKE_UP_SRC_MAX];
};
struct pm_wake_up_src_config *mv_wake_up_src_config_get(void);
#endif /* __A3700_PM_H__ */

View File

@ -0,0 +1,14 @@
/*
* Copyright (C) 2018 Marvell International Ltd.
*
* SPDX-License-Identifier: BSD-3-Clause
* https://spdx.org/licenses
*/
#ifndef _DDR_INFO_H_
#define _DDR_INFO_H_
#define DRAM_MAX_IFACE 1
#define DRAM_CH0_MMAP_LOW_OFFSET 0x200
#endif /* _DDR_INFO_H_ */

View File

@ -0,0 +1,18 @@
/*
* Copyright (C) 2018 Marvell International Ltd.
*
* SPDX-License-Identifier: BSD-3-Clause
* https://spdx.org/licenses
*/
#ifndef _DRAM_WIN_H_
#define _DRAM_WIN_H_
#include <bl_common.h>
#include <io_addr_dec.h>
void dram_win_map_build(struct dram_win_map *win_map);
void cpu_wins_init(void);
#endif /* _DRAM_WIN_H_ */

View File

@ -0,0 +1,67 @@
/*
* Copyright (C) 2018 Marvell International Ltd.
*
* SPDX-License-Identifier: BSD-3-Clause
* https://spdx.org/licenses
*/
#ifndef _IO_ADDR_DEC_H_
#define _IO_ADDR_DEC_H_
#include <stdint.h>
/* There are 5 configurable cpu decoder windows. */
#define DRAM_WIN_MAP_NUM_MAX 5
/* Target number for dram in cpu decoder windows. */
#define DRAM_CPU_DEC_TARGET_NUM 0
/*
* Not all configurable decode windows could be used for dram, some units have
* to reserve one decode window for other unit they have to communicate with;
* for example, DMA engineer has 3 configurable windows, but only two could be
* for dram while the last one has to be for pcie, so for DMA, its max_dram_win
* is 2.
*/
struct dec_win_config {
uint32_t dec_reg_base; /* IO address decoder register base address */
uint32_t win_attr; /* IO address decoder windows attributes */
/* How many configurable dram decoder windows that this unit has; */
uint32_t max_dram_win;
/* The decoder windows number including remapping that this unit has */
uint32_t max_remap;
/* The offset between continuous decode windows
* within the same unit, typically 0x10
*/
uint32_t win_offset;
};
struct dram_win {
uintptr_t base_addr;
uintptr_t win_size;
};
struct dram_win_map {
int dram_win_num;
struct dram_win dram_windows[DRAM_WIN_MAP_NUM_MAX];
};
/*
* init_io_addr_dec
*
* This function initializes io address decoder windows by
* cpu dram window mapping information
*
* @input: N/A
* - dram_wins_map: cpu dram windows mapping
* - io_dec_config: io address decoder windows configuration
* - io_unit_num: io address decoder unit number
* @output: N/A
*
* @return: 0 on success and others on failure
*/
int init_io_addr_dec(struct dram_win_map *dram_wins_map,
struct dec_win_config *io_dec_config,
uint32_t io_unit_num);
#endif /* _IO_ADDR_DEC_H_ */

View File

@ -0,0 +1,26 @@
/*
* Copyright (C) 2018 Marvell International Ltd.
*
* SPDX-License-Identifier: BSD-3-Clause
* https://spdx.org/licenses
*/
#ifndef __PLAT_MACROS_S__
#define __PLAT_MACROS_S__
#include <marvell_macros.S>
/* ---------------------------------------------
* The below macro prints out relevant GIC and
* CCI registers registers whenever an unhandled
* exception is taken in BL31.
* ---------------------------------------------
*/
.macro plat_crash_print_regs
mov_imm x17, MVEBU_GICC_BASE
mov_imm x16, MVEBU_GICD_BASE
marvell_print_gic_regs
print_cci_regs
.endm
#endif /* __PLAT_MACROS_S__ */

View File

@ -0,0 +1,231 @@
/*
* Copyright (C) 2016 Marvell International Ltd.
*
* SPDX-License-Identifier: BSD-3-Clause
* https://spdx.org/licenses
*/
#ifndef __PLATFORM_DEF_H__
#define __PLATFORM_DEF_H__
#include <board_marvell_def.h>
#include <mvebu_def.h>
#ifndef __ASSEMBLY__
#include <stdio.h>
#endif /* __ASSEMBLY__ */
/*
* Most platform porting definitions provided by included headers
*/
/*
* DRAM Memory layout:
* +-----------------------+
* : :
* : Linux :
* 0x04X00000-->+-----------------------+
* | BL3-3(u-boot) |>>}>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
* |-----------------------| } |
* | BL3-[0,1, 2] | }---------------------------------> |
* |-----------------------| } || |
* | BL2 | }->FIP (loaded by || |
* |-----------------------| } BootROM to DRAM) || |
* | FIP_TOC | } || |
* 0x04120000-->|-----------------------| || |
* | BL1 (RO) | || |
* 0x04100000-->+-----------------------+ || |
* : : || |
* : Trusted SRAM section : \/ |
* 0x04040000-->+-----------------------+ Replaced by BL2 +----------------+ |
* | BL1 (RW) | <<<<<<<<<<<<<<<< | BL3-1 NOBITS | |
* 0x04037000-->|-----------------------| <<<<<<<<<<<<<<<< |----------------| |
* | | <<<<<<<<<<<<<<<< | BL3-1 PROGBITS | |
* 0x04023000-->|-----------------------| +----------------+ |
* | BL2 | |
* |-----------------------| |
* | | |
* 0x04001000-->|-----------------------| |
* | Shared | |
* 0x04000000-->+-----------------------+ |
* : : |
* : Linux : |
* : : |
* |-----------------------| |
* | | U-Boot(BL3-3) Loaded by BL2 |
* | U-Boot | <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
* 0x00000000-->+-----------------------+
*
* Trusted SRAM section 0x4000000..0x4200000:
* ----------------------------------------
* SRAM_BASE = 0x4001000
* BL2_BASE = 0x4006000
* BL2_LIMIT = BL31_BASE
* BL31_BASE = 0x4023000 = (64MB + 256KB - 0x1D000)
* BL31_PROGBITS_LIMIT = BL1_RW_BASE
* BL1_RW_BASE = 0x4037000 = (64MB + 256KB - 0x9000)
* BL1_RW_LIMIT = BL31_LIMIT = 0x4040000
*
*
* PLAT_MARVELL_FIP_BASE = 0x4120000
*/
#define PLAT_MARVELL_ATF_BASE 0x4000000
#define PLAT_MARVELL_ATF_LOAD_ADDR \
(PLAT_MARVELL_ATF_BASE + 0x100000)
#define PLAT_MARVELL_FIP_BASE \
(PLAT_MARVELL_ATF_LOAD_ADDR + 0x20000)
#define PLAT_MARVELL_FIP_MAX_SIZE 0x4000000
#define PLAT_MARVELL_CLUSTER_CORE_COUNT 2
/* DRAM[2MB..66MB] is used as Trusted ROM */
#define PLAT_MARVELL_TRUSTED_ROM_BASE PLAT_MARVELL_ATF_LOAD_ADDR
/* 64 MB TODO: reduce this to minimum needed according to fip image size*/
#define PLAT_MARVELL_TRUSTED_ROM_SIZE 0x04000000
/* Reserve 16M for SCP (Secure PayLoad) Trusted DRAM */
#define PLAT_MARVELL_TRUSTED_DRAM_BASE 0x04400000
#define PLAT_MARVELL_TRUSTED_DRAM_SIZE 0x01000000 /* 16 MB */
/*
* PLAT_ARM_MAX_BL1_RW_SIZE is calculated using the current BL1 RW debug size
* plus a little space for growth.
*/
#define PLAT_MARVELL_MAX_BL1_RW_SIZE 0xA000
/*
* PLAT_ARM_MAX_BL2_SIZE is calculated using the current BL2 debug size plus a
* little space for growth.
*/
#define PLAT_MARVELL_MAX_BL2_SIZE 0xF000
/*
* PLAT_ARM_MAX_BL31_SIZE is calculated using the current BL31 debug size plus a
* little space for growth.
*/
#define PLAT_MARVEL_MAX_BL31_SIZE 0x5D000
#define PLAT_MARVELL_CPU_ENTRY_ADDR BL1_RO_BASE
/* GIC related definitions */
#define PLAT_MARVELL_GICD_BASE (MVEBU_REGS_BASE + MVEBU_GICD_BASE)
#define PLAT_MARVELL_GICR_BASE (MVEBU_REGS_BASE + MVEBU_GICR_BASE)
#define PLAT_MARVELL_GICC_BASE (MVEBU_REGS_BASE + MVEBU_GICC_BASE)
#define PLAT_MARVELL_G0_IRQ_PROPS(grp) \
INTR_PROP_DESC(MARVELL_IRQ_SEC_SGI_0, GIC_HIGHEST_SEC_PRIORITY, grp, \
GIC_INTR_CFG_LEVEL), \
INTR_PROP_DESC(MARVELL_IRQ_SEC_SGI_6, GIC_HIGHEST_SEC_PRIORITY, grp, \
GIC_INTR_CFG_LEVEL)
#define PLAT_MARVELL_G1S_IRQ_PROPS(grp) \
INTR_PROP_DESC(MARVELL_IRQ_SEC_PHY_TIMER, \
GIC_HIGHEST_SEC_PRIORITY, grp, GIC_INTR_CFG_LEVEL), \
INTR_PROP_DESC(MARVELL_IRQ_SEC_SGI_1, GIC_HIGHEST_SEC_PRIORITY, grp, \
GIC_INTR_CFG_LEVEL), \
INTR_PROP_DESC(MARVELL_IRQ_SEC_SGI_2, GIC_HIGHEST_SEC_PRIORITY, grp, \
GIC_INTR_CFG_LEVEL), \
INTR_PROP_DESC(MARVELL_IRQ_SEC_SGI_3, GIC_HIGHEST_SEC_PRIORITY, grp, \
GIC_INTR_CFG_LEVEL), \
INTR_PROP_DESC(MARVELL_IRQ_SEC_SGI_4, GIC_HIGHEST_SEC_PRIORITY, grp, \
GIC_INTR_CFG_LEVEL), \
INTR_PROP_DESC(MARVELL_IRQ_SEC_SGI_5, GIC_HIGHEST_SEC_PRIORITY, grp, \
GIC_INTR_CFG_LEVEL), \
INTR_PROP_DESC(MARVELL_IRQ_SEC_SGI_7, GIC_HIGHEST_SEC_PRIORITY, grp, \
GIC_INTR_CFG_LEVEL)
#define PLAT_MARVELL_SHARED_RAM_CACHED 1
/* CCI related constants */
#define PLAT_MARVELL_CCI_BASE (MVEBU_REGS_BASE + MVEBU_CCI_BASE)
#define PLAT_MARVELL_CCI_CLUSTER0_SL_IFACE_IX 3
#define PLAT_MARVELL_CCI_CLUSTER1_SL_IFACE_IX 4
/*
* Load address of BL3-3 for this platform port
*/
#define PLAT_MARVELL_NS_IMAGE_OFFSET 0x0
/* System Reference Clock*/
#define PLAT_REF_CLK_IN_HZ COUNTER_FREQUENCY
/*
* PL011 related constants
*/
#define PLAT_MARVELL_BOOT_UART_BASE (MVEBU_REGS_BASE + 0x12000)
#define PLAT_MARVELL_BOOT_UART_CLK_IN_HZ 25804800
#define PLAT_MARVELL_CRASH_UART_BASE PLAT_MARVELL_BOOT_UART_BASE
#define PLAT_MARVELL_CRASH_UART_CLK_IN_HZ PLAT_MARVELL_BOOT_UART_CLK_IN_HZ
#define PLAT_MARVELL_BL31_RUN_UART_BASE PLAT_MARVELL_BOOT_UART_BASE
#define PLAT_MARVELL_BL31_RUN_UART_CLK_IN_HZ PLAT_MARVELL_BOOT_UART_CLK_IN_HZ
/* Required platform porting definitions */
#define PLAT_MAX_PWR_LVL MPIDR_AFFLVL1
/* System timer related constants */
#define PLAT_MARVELL_NSTIMER_FRAME_ID 1
/* Mailbox base address */
#define PLAT_MARVELL_MAILBOX_BASE \
(MARVELL_TRUSTED_SRAM_BASE + 0x400)
#define PLAT_MARVELL_MAILBOX_SIZE 0x100
#define PLAT_MARVELL_MAILBOX_MAGIC_NUM 0x6D72766C /* mrvl */
/* DRAM CS memory map registers related constants */
#define MVEBU_CS_MMAP_LOW(cs_num) \
(MVEBU_CS_MMAP_REG_BASE + (cs_num) * 0x8)
#define MVEBU_CS_MMAP_ENABLE 0x1
#define MVEBU_CS_MMAP_AREA_LEN_OFFS 16
#define MVEBU_CS_MMAP_AREA_LEN_MASK \
(0x1f << MVEBU_CS_MMAP_AREA_LEN_OFFS)
#define MVEBU_CS_MMAP_START_ADDR_LOW_OFFS 23
#define MVEBU_CS_MMAP_START_ADDR_LOW_MASK \
(0x1ff << MVEBU_CS_MMAP_START_ADDR_LOW_OFFS)
#define MVEBU_CS_MMAP_HIGH(cs_num) \
(MVEBU_CS_MMAP_REG_BASE + 0x4 + (cs_num) * 0x8)
/* DRAM max CS number */
#define MVEBU_MAX_CS_MMAP_NUM (2)
/* CPU decoder window related constants */
#define CPU_DEC_WIN_CTRL_REG(win_num) \
(MVEBU_CPU_DEC_WIN_REG_BASE + (win_num) * 0x10)
#define CPU_DEC_CR_WIN_ENABLE 0x1
#define CPU_DEC_CR_WIN_TARGET_OFFS 4
#define CPU_DEC_CR_WIN_TARGET_MASK \
(0xf << CPU_DEC_CR_WIN_TARGET_OFFS)
#define CPU_DEC_WIN_SIZE_REG(win_num) \
(MVEBU_CPU_DEC_WIN_REG_BASE + 0x4 + (win_num) * 0x10)
#define CPU_DEC_CR_WIN_SIZE_OFFS 0
#define CPU_DEC_CR_WIN_SIZE_MASK \
(0xffff << CPU_DEC_CR_WIN_SIZE_OFFS)
#define CPU_DEC_CR_WIN_SIZE_ALIGNMENT 0x10000
#define CPU_DEC_WIN_BASE_REG(win_num) \
(MVEBU_CPU_DEC_WIN_REG_BASE + 0x8 + (win_num) * 0x10)
#define CPU_DEC_BR_BASE_OFFS 0
#define CPU_DEC_BR_BASE_MASK \
(0xffff << CPU_DEC_BR_BASE_OFFS)
#define CPU_DEC_REMAP_LOW_REG(win_num) \
(MVEBU_CPU_DEC_WIN_REG_BASE + 0xC + (win_num) * 0x10)
#define CPU_DEC_RLR_REMAP_LOW_OFFS 0
#define CPU_DEC_RLR_REMAP_LOW_MASK \
(0xffff << CPU_DEC_BR_BASE_OFFS)
/* Securities */
#define IRQ_SEC_OS_TICK_INT MARVELL_IRQ_SEC_PHY_TIMER
#define TRUSTED_DRAM_BASE PLAT_MARVELL_TRUSTED_DRAM_BASE
#define TRUSTED_DRAM_SIZE PLAT_MARVELL_TRUSTED_DRAM_SIZE
#ifdef BL32
#define BL32_BASE TRUSTED_DRAM_BASE
#define BL32_LIMIT TRUSTED_DRAM_SIZE
#endif
#endif /* __PLATFORM_DEF_H__ */

View File

@ -0,0 +1,177 @@
/*
* Copyright (C) 2016 Marvell International Ltd.
*
* SPDX-License-Identifier: BSD-3-Clause
* https://spdx.org/licenses
*/
#include <debug.h>
#include <io_addr_dec.h>
#include <mmio.h>
#include <plat_marvell.h>
#define MVEBU_DEC_WIN_CTRL_REG(base, win, off) (MVEBU_REGS_BASE + (base) + \
(win) * (off))
#define MVEBU_DEC_WIN_BASE_REG(base, win, off) (MVEBU_REGS_BASE + (base) + \
(win) * (off) + 0x4)
#define MVEBU_DEC_WIN_REMAP_REG(base, win, off) (MVEBU_REGS_BASE + (base) + \
(win) * (off) + 0x8)
#define MVEBU_DEC_WIN_CTRL_SIZE_OFF (16)
#define MVEBU_DEC_WIN_ENABLE (0x1)
#define MVEBU_DEC_WIN_CTRL_ATTR_OFF (8)
#define MVEBU_DEC_WIN_CTRL_TARGET_OFF (4)
#define MVEBU_DEC_WIN_CTRL_EN_OFF (0)
#define MVEBU_DEC_WIN_BASE_OFF (16)
#define MVEBU_WIN_BASE_SIZE_ALIGNMENT (0x10000)
/* There are up to 14 IO unit which need address decode in Armada-3700 */
#define IO_UNIT_NUM_MAX (14)
#define MVEBU_MAX_ADDRSS_4GB (0x100000000ULL)
static void set_io_addr_dec_win(int win_id, uintptr_t base_addr,
uintptr_t win_size,
struct dec_win_config *dec_win)
{
uint32_t ctrl = 0;
uint32_t base = 0;
/* set size */
ctrl = ((win_size / MVEBU_WIN_BASE_SIZE_ALIGNMENT) - 1) <<
MVEBU_DEC_WIN_CTRL_SIZE_OFF;
/* set attr according to IO decode window */
ctrl |= dec_win->win_attr << MVEBU_DEC_WIN_CTRL_ATTR_OFF;
/* set target */
ctrl |= DRAM_CPU_DEC_TARGET_NUM << MVEBU_DEC_WIN_CTRL_TARGET_OFF;
/* set base */
base = (base_addr / MVEBU_WIN_BASE_SIZE_ALIGNMENT) <<
MVEBU_DEC_WIN_BASE_OFF;
/* set base address*/
mmio_write_32(MVEBU_DEC_WIN_BASE_REG(dec_win->dec_reg_base,
win_id, dec_win->win_offset),
base);
/* set remap window, some unit does not have remap window */
if (win_id < dec_win->max_remap)
mmio_write_32(MVEBU_DEC_WIN_REMAP_REG(dec_win->dec_reg_base,
win_id, dec_win->win_offset), base);
/* set control register */
mmio_write_32(MVEBU_DEC_WIN_CTRL_REG(dec_win->dec_reg_base,
win_id, dec_win->win_offset), ctrl);
/* enable the address decode window at last to make it effective */
ctrl |= MVEBU_DEC_WIN_ENABLE << MVEBU_DEC_WIN_CTRL_EN_OFF;
mmio_write_32(MVEBU_DEC_WIN_CTRL_REG(dec_win->dec_reg_base,
win_id, dec_win->win_offset), ctrl);
INFO("set_io_addr_dec %d result: ctrl(0x%x) base(0x%x)",
win_id, mmio_read_32(MVEBU_DEC_WIN_CTRL_REG(dec_win->dec_reg_base,
win_id, dec_win->win_offset)),
mmio_read_32(MVEBU_DEC_WIN_BASE_REG(dec_win->dec_reg_base,
win_id, dec_win->win_offset)));
if (win_id < dec_win->max_remap)
INFO(" remap(%x)\n",
mmio_read_32(MVEBU_DEC_WIN_REMAP_REG(dec_win->dec_reg_base,
win_id, dec_win->win_offset)));
else
INFO("\n");
}
/* Set io decode window */
static int set_io_addr_dec(struct dram_win_map *win_map,
struct dec_win_config *dec_win)
{
struct dram_win *win;
int id;
/* disable all windows first */
for (id = 0; id < dec_win->max_dram_win; id++)
mmio_write_32(MVEBU_DEC_WIN_CTRL_REG(dec_win->dec_reg_base, id,
dec_win->win_offset), 0);
/* configure IO decode windows for DRAM, inheritate DRAM size,
* base and target from CPU-DRAM decode window and others
* from hard coded IO decode window settings array.
*/
if (win_map->dram_win_num > dec_win->max_dram_win) {
/*
* If cpu dram windows number exceeds the io decode windows
* max number, then fill the first io decode window
* with base(0) and size(4GB).
*/
set_io_addr_dec_win(0, 0, MVEBU_MAX_ADDRSS_4GB, dec_win);
return 0;
}
for (id = 0; id < win_map->dram_win_num; id++, win++) {
win = &win_map->dram_windows[id];
set_io_addr_dec_win(id, win->base_addr, win->win_size, dec_win);
}
return 0;
}
/*
* init_io_addr_dec
*
* This function initializes io address decoder windows by
* cpu dram window mapping information
*
* @input: N/A
* - dram_wins_map: cpu dram windows mapping
* - io_dec_config: io address decoder windows configuration
* - io_unit_num: io address decoder unit number
* @output: N/A
*
* @return: 0 on success and others on failure
*/
int init_io_addr_dec(struct dram_win_map *dram_wins_map,
struct dec_win_config *io_dec_config, uint32_t io_unit_num)
{
int32_t index;
struct dec_win_config *io_dec_win;
int32_t ret;
INFO("Initializing IO address decode windows\n");
if (io_dec_config == NULL || io_unit_num == 0) {
ERROR("No IO address decoder windows configurations!\n");
return -1;
}
if (io_unit_num > IO_UNIT_NUM_MAX) {
ERROR("IO address decoder windows number %d is over max %d\n",
io_unit_num, IO_UNIT_NUM_MAX);
return -1;
}
if (dram_wins_map == NULL) {
ERROR("No cpu dram decoder windows map!\n");
return -1;
}
for (index = 0; index < dram_wins_map->dram_win_num; index++)
INFO("DRAM mapping %d base(0x%lx) size(0x%lx)\n",
index, dram_wins_map->dram_windows[index].base_addr,
dram_wins_map->dram_windows[index].win_size);
/* Set address decode window for each IO */
for (index = 0; index < io_unit_num; index++) {
io_dec_win = io_dec_config + index;
ret = set_io_addr_dec(dram_wins_map, io_dec_win);
if (ret) {
ERROR("Failed to set IO address decode\n");
return -1;
}
INFO("Set IO decode window successfully, base(0x%x)",
io_dec_win->dec_reg_base);
INFO(" win_attr(%x) max_dram_win(%d) max_remap(%d)",
io_dec_win->win_attr, io_dec_win->max_dram_win,
io_dec_win->max_remap);
INFO(" win_offset(%d)\n", io_dec_win->win_offset);
}
return 0;
}

View File

@ -0,0 +1,33 @@
/*
* Copyright (C) 2018 Marvell International Ltd.
*
* SPDX-License-Identifier: BSD-3-Clause
* https://spdx.org/licenses
*/
#include <bl_common.h>
#include <io_addr_dec.h>
#include <mvebu_def.h>
struct dec_win_config io_dec_win_conf[] = {
/* dec_reg_base win_attr max_dram_win max_remap win_offset */
{0xc000, 0x3d, 2, 0, 0x08}, /* USB */
{0xc100, 0x3d, 3, 0, 0x10}, /* USB3 */
{0xc200, 0x3d, 2, 0, 0x10}, /* DMA */
{0xc300, 0x3d, 2, 0, 0x10}, /* NETA0 */
{0xc400, 0x3d, 2, 0, 0x10}, /* NETA1 */
{0xc500, 0x3d, 2, 0, 0x10}, /* PCIe */
{0xc800, 0x3d, 3, 0, 0x10}, /* SATA */
{0xca00, 0x3d, 3, 0, 0x08}, /* SD */
{0xcb00, 0x3d, 3, 0, 0x10}, /* eMMC */
{0xce00, 0x3d, 2, 0, 0x08}, /* EIP97 */
};
int marvell_get_io_dec_win_conf(struct dec_win_config **win, uint32_t *size)
{
*win = io_dec_win_conf;
*size = sizeof(io_dec_win_conf)/sizeof(struct dec_win_config);
return 0;
}

View File

@ -0,0 +1,815 @@
/*
* Copyright (C) 2018 Marvell International Ltd.
*
* SPDX-License-Identifier: BSD-3-Clause
* https://spdx.org/licenses
*/
#include <a3700_pm.h>
#include <arch_helpers.h>
#include <armada_common.h>
#include <debug.h>
#include <dram_win.h>
#include <io_addr_dec.h>
#include <mmio.h>
#include <mvebu.h>
#include <mvebu_def.h>
#include <marvell_plat_priv.h>
#include <platform.h>
#include <plat_marvell.h>
#include <psci.h>
#ifdef USE_CCI
#include <cci.h>
#endif
/* Warm reset register */
#define MVEBU_WARM_RESET_REG (MVEBU_NB_REGS_BASE + 0x840)
#define MVEBU_WARM_RESET_MAGIC 0x1D1E
/* North Bridge GPIO1 SEL register */
#define MVEBU_NB_GPIO1_SEL_REG (MVEBU_NB_REGS_BASE + 0x830)
#define MVEBU_NB_GPIO1_UART1_SEL BIT(19)
#define MVEBU_NB_GPIO1_GPIO_25_26_EN BIT(17)
#define MVEBU_NB_GPIO1_GPIO_19_EN BIT(14)
#define MVEBU_NB_GPIO1_GPIO_18_EN BIT(13)
/* CPU 1 reset register */
#define MVEBU_CPU_1_RESET_VECTOR (MVEBU_REGS_BASE + 0x14044)
#define MVEBU_CPU_1_RESET_REG (MVEBU_REGS_BASE + 0xD00C)
#define MVEBU_CPU_1_RESET_BIT 31
/* IRQ register */
#define MVEBU_NB_IRQ_STATUS_1_REG (MVEBU_NB_SB_IRQ_REG_BASE)
#define MVEBU_NB_IRQ_STATUS_2_REG (MVEBU_NB_SB_IRQ_REG_BASE + \
0x10)
#define MVEBU_NB_IRQ_MASK_2_REG (MVEBU_NB_SB_IRQ_REG_BASE + \
0x18)
#define MVEBU_SB_IRQ_STATUS_1_REG (MVEBU_NB_SB_IRQ_REG_BASE + \
0x40)
#define MVEBU_SB_IRQ_STATUS_2_REG (MVEBU_NB_SB_IRQ_REG_BASE + \
0x50)
#define MVEBU_NB_GPIO_IRQ_MASK_1_REG (MVEBU_NB_SB_IRQ_REG_BASE + \
0xC8)
#define MVEBU_NB_GPIO_IRQ_MASK_2_REG (MVEBU_NB_SB_IRQ_REG_BASE + \
0xD8)
#define MVEBU_SB_GPIO_IRQ_MASK_REG (MVEBU_NB_SB_IRQ_REG_BASE + \
0xE8)
#define MVEBU_NB_GPIO_IRQ_EN_LOW_REG (MVEBU_NB_GPIO_IRQ_REG_BASE)
#define MVEBU_NB_GPIO_IRQ_EN_HIGH_REG (MVEBU_NB_GPIO_IRQ_REG_BASE + \
0x04)
#define MVEBU_NB_GPIO_IRQ_STATUS_LOW_REG (MVEBU_NB_GPIO_IRQ_REG_BASE + \
0x10)
#define MVEBU_NB_GPIO_IRQ_STATUS_HIGH_REG (MVEBU_NB_GPIO_IRQ_REG_BASE + \
0x14)
#define MVEBU_NB_GPIO_IRQ_WK_LOW_REG (MVEBU_NB_GPIO_IRQ_REG_BASE + \
0x18)
#define MVEBU_NB_GPIO_IRQ_WK_HIGH_REG (MVEBU_NB_GPIO_IRQ_REG_BASE + \
0x1C)
#define MVEBU_SB_GPIO_IRQ_EN_REG (MVEBU_SB_GPIO_IRQ_REG_BASE)
#define MVEBU_SB_GPIO_IRQ_STATUS_REG (MVEBU_SB_GPIO_IRQ_REG_BASE + \
0x10)
#define MVEBU_SB_GPIO_IRQ_WK_REG (MVEBU_SB_GPIO_IRQ_REG_BASE + \
0x18)
/* PMU registers */
#define MVEBU_PM_NB_PWR_CTRL_REG (MVEBU_PMSU_REG_BASE)
#define MVEBU_PM_PWR_DN_CNT_SEL BIT(28)
#define MVEBU_PM_SB_PWR_DWN BIT(4)
#define MVEBU_PM_INTERFACE_IDLE BIT(0)
#define MVEBU_PM_NB_CPU_PWR_CTRL_REG (MVEBU_PMSU_REG_BASE + 0x4)
#define MVEBU_PM_L2_FLUSH_EN BIT(22)
#define MVEBU_PM_NB_PWR_OPTION_REG (MVEBU_PMSU_REG_BASE + 0x8)
#define MVEBU_PM_DDR_SR_EN BIT(29)
#define MVEBU_PM_DDR_CLK_DIS_EN BIT(28)
#define MVEBU_PM_WARM_RESET_EN BIT(27)
#define MVEBU_PM_DDRPHY_PWRDWN_EN BIT(23)
#define MVEBU_PM_DDRPHY_PAD_PWRDWN_EN BIT(22)
#define MVEBU_PM_OSC_OFF_EN BIT(21)
#define MVEBU_PM_TBG_OFF_EN BIT(20)
#define MVEBU_PM_CPU_VDDV_OFF_EN BIT(19)
#define MVEBU_PM_AVS_DISABLE_MODE BIT(14)
#define MVEBU_PM_AVS_VDD2_MODE BIT(13)
#define MVEBU_PM_AVS_HOLD_MODE BIT(12)
#define MVEBU_PM_L2_SRAM_LKG_PD_EN BIT(8)
#define MVEBU_PM_EIP_SRAM_LKG_PD_EN BIT(7)
#define MVEBU_PM_DDRMC_SRAM_LKG_PD_EN BIT(6)
#define MVEBU_PM_MCI_SRAM_LKG_PD_EN BIT(5)
#define MVEBU_PM_MMC_SRAM_LKG_PD_EN BIT(4)
#define MVEBU_PM_SATA_SRAM_LKG_PD_EN BIT(3)
#define MVEBU_PM_DMA_SRAM_LKG_PD_EN BIT(2)
#define MVEBU_PM_SEC_SRAM_LKG_PD_EN BIT(1)
#define MVEBU_PM_CPU_SRAM_LKG_PD_EN BIT(0)
#define MVEBU_PM_NB_SRAM_LKG_PD_EN (MVEBU_PM_L2_SRAM_LKG_PD_EN |\
MVEBU_PM_EIP_SRAM_LKG_PD_EN | MVEBU_PM_DDRMC_SRAM_LKG_PD_EN |\
MVEBU_PM_MCI_SRAM_LKG_PD_EN | MVEBU_PM_MMC_SRAM_LKG_PD_EN |\
MVEBU_PM_SATA_SRAM_LKG_PD_EN | MVEBU_PM_DMA_SRAM_LKG_PD_EN |\
MVEBU_PM_SEC_SRAM_LKG_PD_EN | MVEBU_PM_CPU_SRAM_LKG_PD_EN)
#define MVEBU_PM_NB_PWR_DEBUG_REG (MVEBU_PMSU_REG_BASE + 0xC)
#define MVEBU_PM_NB_FORCE_CLK_ON BIT(30)
#define MVEBU_PM_IGNORE_CM3_SLEEP BIT(21)
#define MVEBU_PM_IGNORE_CM3_DEEP BIT(20)
#define MVEBU_PM_NB_WAKE_UP_EN_REG (MVEBU_PMSU_REG_BASE + 0x2C)
#define MVEBU_PM_SB_WKP_NB_EN BIT(31)
#define MVEBU_PM_NB_GPIO_WKP_EN BIT(27)
#define MVEBU_PM_SOC_TIMER_WKP_EN BIT(26)
#define MVEBU_PM_UART_WKP_EN BIT(25)
#define MVEBU_PM_UART2_WKP_EN BIT(19)
#define MVEBU_PM_CPU_TIMER_WKP_EN BIT(17)
#define MVEBU_PM_NB_WKP_EN BIT(16)
#define MVEBU_PM_CORE1_FIQ_IRQ_WKP_EN BIT(13)
#define MVEBU_PM_CORE0_FIQ_IRQ_WKP_EN BIT(12)
#define MVEBU_PM_CPU_0_PWR_CTRL_REG (MVEBU_PMSU_REG_BASE + 0x34)
#define MVEBU_PM_CPU_1_PWR_CTRL_REG (MVEBU_PMSU_REG_BASE + 0x38)
#define MVEBU_PM_CORE_SOC_PD BIT(2)
#define MVEBU_PM_CORE_PROC_PD BIT(1)
#define MVEBU_PM_CORE_PD BIT(0)
#define MVEBU_PM_CORE_1_RETURN_ADDR_REG (MVEBU_PMSU_REG_BASE + 0x44)
#define MVEBU_PM_CPU_VDD_OFF_INFO_1_REG (MVEBU_PMSU_REG_BASE + 0x48)
#define MVEBU_PM_CPU_VDD_OFF_INFO_2_REG (MVEBU_PMSU_REG_BASE + 0x4C)
#define MVEBU_PM_LOW_POWER_STATE BIT(0)
#define MVEBU_PM_CPU_WAKE_UP_CONF_REG (MVEBU_PMSU_REG_BASE + 0x54)
#define MVEBU_PM_CORE1_WAKEUP BIT(13)
#define MVEBU_PM_CORE0_WAKEUP BIT(12)
#define MVEBU_PM_WAIT_DDR_RDY_VALUE (0x15)
#define MVEBU_PM_SB_CPU_PWR_CTRL_REG (MVEBU_SB_WAKEUP_REG_BASE)
#define MVEBU_PM_SB_PM_START BIT(0)
#define MVEBU_PM_SB_PWR_OPTION_REG (MVEBU_SB_WAKEUP_REG_BASE + 0x4)
#define MVEBU_PM_SDIO_PHY_PDWN_EN BIT(17)
#define MVEBU_PM_SB_VDDV_OFF_EN BIT(16)
#define MVEBU_PM_EBM_SRAM_LKG_PD_EN BIT(11)
#define MVEBU_PM_PCIE_SRAM_LKG_PD_EN BIT(10)
#define MVEBU_PM_GBE1_TX_SRAM_LKG_PD_EN BIT(9)
#define MVEBU_PM_GBE1_RX_SRAM_LKG_PD_EN BIT(8)
#define MVEBU_PM_GBE1_MIB_SRAM_LKG_PD_EN BIT(7)
#define MVEBU_PM_GBE0_TX_SRAM_LKG_PD_EN BIT(6)
#define MVEBU_PM_GBE0_RX_SRAM_LKG_PD_EN BIT(5)
#define MVEBU_PM_GBE0_MIB_SRAM_LKG_PD_EN BIT(4)
#define MVEBU_PM_SDIO_SRAM_LKG_PD_EN BIT(3)
#define MVEBU_PM_USB2_SRAM_LKG_PD_EN BIT(2)
#define MVEBU_PM_USB3_H_SRAM_LKG_PD_EN BIT(1)
#define MVEBU_PM_SB_SRAM_LKG_PD_EN (MVEBU_PM_EBM_SRAM_LKG_PD_EN |\
MVEBU_PM_PCIE_SRAM_LKG_PD_EN | MVEBU_PM_GBE1_TX_SRAM_LKG_PD_EN |\
MVEBU_PM_GBE1_RX_SRAM_LKG_PD_EN | MVEBU_PM_GBE1_MIB_SRAM_LKG_PD_EN |\
MVEBU_PM_GBE0_TX_SRAM_LKG_PD_EN | MVEBU_PM_GBE0_RX_SRAM_LKG_PD_EN |\
MVEBU_PM_GBE0_MIB_SRAM_LKG_PD_EN | MVEBU_PM_SDIO_SRAM_LKG_PD_EN |\
MVEBU_PM_USB2_SRAM_LKG_PD_EN | MVEBU_PM_USB3_H_SRAM_LKG_PD_EN)
#define MVEBU_PM_SB_WK_EN_REG (MVEBU_SB_WAKEUP_REG_BASE + 0x10)
#define MVEBU_PM_SB_GPIO_WKP_EN BIT(24)
#define MVEBU_PM_SB_WKP_EN BIT(20)
/* DRAM registers */
#define MVEBU_DRAM_STATS_CH0_REG (MVEBU_DRAM_REG_BASE + 0x4)
#define MVEBU_DRAM_WCP_EMPTY BIT(19)
#define MVEBU_DRAM_CMD_0_REG (MVEBU_DRAM_REG_BASE + 0x20)
#define MVEBU_DRAM_CH0_CMD0 BIT(28)
#define MVEBU_DRAM_CS_CMD0 BIT(24)
#define MVEBU_DRAM_WCB_DRAIN_REQ BIT(1)
#define MVEBU_DRAM_PWR_CTRL_REG (MVEBU_DRAM_REG_BASE + 0x54)
#define MVEBU_DRAM_PHY_CLK_GATING_EN BIT(1)
#define MVEBU_DRAM_PHY_AUTO_AC_OFF_EN BIT(0)
/* AVS registers */
#define MVEBU_AVS_CTRL_2_REG (MVEBU_AVS_REG_BASE + 0x8)
#define MVEBU_LOW_VDD_MODE_EN BIT(6)
/* Clock registers */
#define MVEBU_NB_CLOCK_SEL_REG (MVEBU_NB_REGS_BASE + 0x10)
#define MVEBU_A53_CPU_CLK_SEL BIT(15)
/* North Bridge Step-Down Registers */
#define MVEBU_NB_STEP_DOWN_INT_EN_REG MVEBU_NB_STEP_DOWN_REG_BASE
#define MVEBU_NB_GPIO_INT_WAKE_WCPU_CLK BIT(8)
#define MVEBU_NB_GPIO_18 18
#define MVEBU_NB_GPIO_19 19
#define MVEBU_NB_GPIO_25 25
#define MVEBU_NB_GPIO_26 26
typedef int (*wake_up_src_func)(union pm_wake_up_src_data *);
struct wake_up_src_func_map {
enum pm_wake_up_src_type type;
wake_up_src_func func;
};
void marvell_psci_arch_init(int die_index)
{
}
static void a3700_pm_ack_irq(void)
{
uint32_t reg;
reg = mmio_read_32(MVEBU_NB_IRQ_STATUS_1_REG);
if (reg)
mmio_write_32(MVEBU_NB_IRQ_STATUS_1_REG, reg);
reg = mmio_read_32(MVEBU_NB_IRQ_STATUS_2_REG);
if (reg)
mmio_write_32(MVEBU_NB_IRQ_STATUS_2_REG, reg);
reg = mmio_read_32(MVEBU_SB_IRQ_STATUS_1_REG);
if (reg)
mmio_write_32(MVEBU_SB_IRQ_STATUS_1_REG, reg);
reg = mmio_read_32(MVEBU_SB_IRQ_STATUS_2_REG);
if (reg)
mmio_write_32(MVEBU_SB_IRQ_STATUS_2_REG, reg);
reg = mmio_read_32(MVEBU_NB_GPIO_IRQ_STATUS_LOW_REG);
if (reg)
mmio_write_32(MVEBU_NB_GPIO_IRQ_STATUS_LOW_REG, reg);
reg = mmio_read_32(MVEBU_NB_GPIO_IRQ_STATUS_HIGH_REG);
if (reg)
mmio_write_32(MVEBU_NB_GPIO_IRQ_STATUS_HIGH_REG, reg);
reg = mmio_read_32(MVEBU_SB_GPIO_IRQ_STATUS_REG);
if (reg)
mmio_write_32(MVEBU_SB_GPIO_IRQ_STATUS_REG, reg);
}
/*****************************************************************************
* A3700 handler called to check the validity of the power state
* parameter.
*****************************************************************************
*/
int a3700_validate_power_state(unsigned int power_state,
psci_power_state_t *req_state)
{
ERROR("%s needs to be implemented\n", __func__);
panic();
}
/*****************************************************************************
* A3700 handler called when a CPU is about to enter standby.
*****************************************************************************
*/
void a3700_cpu_standby(plat_local_state_t cpu_state)
{
ERROR("%s needs to be implemented\n", __func__);
panic();
}
/*****************************************************************************
* A3700 handler called when a power domain is about to be turned on. The
* mpidr determines the CPU to be turned on.
*****************************************************************************
*/
int a3700_pwr_domain_on(u_register_t mpidr)
{
/* Set barrier */
dsbsy();
/* Set the cpu start address to BL1 entry point */
mmio_write_32(MVEBU_CPU_1_RESET_VECTOR,
PLAT_MARVELL_CPU_ENTRY_ADDR >> 2);
/* Get the cpu out of reset */
mmio_clrbits_32(MVEBU_CPU_1_RESET_REG, BIT(MVEBU_CPU_1_RESET_BIT));
mmio_setbits_32(MVEBU_CPU_1_RESET_REG, BIT(MVEBU_CPU_1_RESET_BIT));
return 0;
}
/*****************************************************************************
* A3700 handler called to validate the entry point.
*****************************************************************************
*/
int a3700_validate_ns_entrypoint(uintptr_t entrypoint)
{
return PSCI_E_SUCCESS;
}
/*****************************************************************************
* A3700 handler called when a power domain is about to be turned off. The
* target_state encodes the power state that each level should transition to.
*****************************************************************************
*/
void a3700_pwr_domain_off(const psci_power_state_t *target_state)
{
uint32_t cpu_idx = plat_my_core_pos();
/* Prevent interrupts from spuriously waking up this cpu */
plat_marvell_gic_cpuif_disable();
/*
* Enable Core VDD OFF, core is supposed to be powered
* off by PMU when WFI command is issued.
*/
mmio_setbits_32(MVEBU_PM_CPU_0_PWR_CTRL_REG + 4 * cpu_idx,
MVEBU_PM_CORE_PD);
/* Core can not be powered down with pending IRQ,
* acknowledge all the pending IRQ
*/
a3700_pm_ack_irq();
}
static void a3700_set_gen_pwr_off_option(void)
{
/* Enable L2 flush -> processor state-machine option */
mmio_setbits_32(MVEBU_PM_NB_CPU_PWR_CTRL_REG, MVEBU_PM_L2_FLUSH_EN);
/*
* North bridge cannot be VDD off (always ON).
* The NB state machine support low power mode by its state machine.
* This bit MUST be set for north bridge power down, e.g.,
* OSC input cutoff(NOT TEST), SRAM power down, PMIC, etc.
* It is not related to CPU VDD OFF!!
*/
mmio_clrbits_32(MVEBU_PM_NB_PWR_OPTION_REG, MVEBU_PM_CPU_VDDV_OFF_EN);
/*
* MUST: Switch CPU/AXI clock to OSC
* NB state machine clock is always connected to OSC (slow clock).
* But Core0/1/processor state machine's clock are connected to AXI
* clock. Now, AXI clock takes the TBG as clock source.
* If using AXI clock, Core0/1/processor state machine may much faster
* than NB state machine. It will cause problem in this case if cores
* are released before north bridge gets ready.
*/
mmio_clrbits_32(MVEBU_NB_CLOCK_SEL_REG, MVEBU_A53_CPU_CLK_SEL);
/*
* These register bits will trigger north bridge
* power-down state machine regardless CM3 status.
*/
mmio_setbits_32(MVEBU_PM_NB_PWR_DEBUG_REG, MVEBU_PM_IGNORE_CM3_SLEEP);
mmio_setbits_32(MVEBU_PM_NB_PWR_DEBUG_REG, MVEBU_PM_IGNORE_CM3_DEEP);
/*
* SRAM => controlled by north bridge state machine.
* Core VDD OFF is not related to CPU SRAM power down.
*/
mmio_setbits_32(MVEBU_PM_NB_PWR_OPTION_REG, MVEBU_PM_NB_SRAM_LKG_PD_EN);
/*
* Idle AXI interface in order to get L2_WFI
* L2 WFI is only asserted after CORE-0 and CORE-1 WFI asserted.
* (only both core-0/1in WFI, L2 WFI will be issued by CORE.)
* Once L2 WFI asserted, this bit is used for signalling assertion
* to AXI IO masters.
*/
mmio_setbits_32(MVEBU_PM_NB_PWR_CTRL_REG, MVEBU_PM_INTERFACE_IDLE);
/* Enable core0 and core1 VDD_OFF */
mmio_setbits_32(MVEBU_PM_CPU_0_PWR_CTRL_REG, MVEBU_PM_CORE_PD);
mmio_setbits_32(MVEBU_PM_CPU_1_PWR_CTRL_REG, MVEBU_PM_CORE_PD);
/* Enable North bridge power down -
* Both Cores MUST enable this bit to power down north bridge!
*/
mmio_setbits_32(MVEBU_PM_CPU_0_PWR_CTRL_REG, MVEBU_PM_CORE_SOC_PD);
mmio_setbits_32(MVEBU_PM_CPU_1_PWR_CTRL_REG, MVEBU_PM_CORE_SOC_PD);
/* CA53 (processor domain) power down */
mmio_setbits_32(MVEBU_PM_CPU_0_PWR_CTRL_REG, MVEBU_PM_CORE_PROC_PD);
mmio_setbits_32(MVEBU_PM_CPU_1_PWR_CTRL_REG, MVEBU_PM_CORE_PROC_PD);
}
static void a3700_en_ddr_self_refresh(void)
{
/*
* Both count is 16 bits and configurable. By default, osc stb cnt
* is 0xFFF for lower 12 bits.
* Thus, powerdown count is smaller than osc count.
* This count is used for exiting DDR SR mode on wakeup event.
* The powerdown count also has impact on the following
* state changes: idle -> count-down -> ... (power-down, vdd off, etc)
* Here, make stable counter shorter
* Use power down count value instead of osc_stb_cnt to speed up
* DDR self refresh exit
*/
mmio_setbits_32(MVEBU_PM_NB_PWR_CTRL_REG, MVEBU_PM_PWR_DN_CNT_SEL);
/*
* Enable DDR SR mode => controlled by north bridge state machine
* Therefore, we must powerdown north bridge to trigger the DDR SR
* mode switching.
*/
mmio_setbits_32(MVEBU_PM_NB_PWR_OPTION_REG, MVEBU_PM_DDR_SR_EN);
/* Disable DDR clock, otherwise DDR will not enter into SR mode. */
mmio_setbits_32(MVEBU_PM_NB_PWR_OPTION_REG, MVEBU_PM_DDR_CLK_DIS_EN);
/* Power down DDR PHY (PAD) */
mmio_setbits_32(MVEBU_PM_NB_PWR_OPTION_REG, MVEBU_PM_DDRPHY_PWRDWN_EN);
mmio_setbits_32(MVEBU_PM_NB_PWR_OPTION_REG,
MVEBU_PM_DDRPHY_PAD_PWRDWN_EN);
/* Set wait time for DDR ready in ROM code */
mmio_write_32(MVEBU_PM_CPU_VDD_OFF_INFO_1_REG,
MVEBU_PM_WAIT_DDR_RDY_VALUE);
/* DDR flush write buffer - mandatory */
mmio_write_32(MVEBU_DRAM_CMD_0_REG, MVEBU_DRAM_CH0_CMD0 |
MVEBU_DRAM_CS_CMD0 | MVEBU_DRAM_WCB_DRAIN_REQ);
while ((mmio_read_32(MVEBU_DRAM_STATS_CH0_REG) &
MVEBU_DRAM_WCP_EMPTY) != MVEBU_DRAM_WCP_EMPTY)
;
/* Trigger PHY reset after ddr out of self refresh =>
* supply reset pulse for DDR phy after wake up
*/
mmio_setbits_32(MVEBU_DRAM_PWR_CTRL_REG, MVEBU_DRAM_PHY_CLK_GATING_EN |
MVEBU_DRAM_PHY_AUTO_AC_OFF_EN);
}
static void a3700_pwr_dn_avs(void)
{
/*
* AVS power down - controlled by north bridge statemachine
* Enable AVS power down by clear the AVS disable bit.
*/
mmio_clrbits_32(MVEBU_PM_NB_PWR_OPTION_REG, MVEBU_PM_AVS_DISABLE_MODE);
/*
* Should set BIT[12:13] to powerdown AVS.
* 1. Enable AVS VDD2 mode
* 2. After power down AVS, we must hold AVS output voltage.
* 3. We can choose the lower VDD for AVS power down.
*/
mmio_setbits_32(MVEBU_PM_NB_PWR_OPTION_REG, MVEBU_PM_AVS_VDD2_MODE);
mmio_setbits_32(MVEBU_PM_NB_PWR_OPTION_REG, MVEBU_PM_AVS_HOLD_MODE);
/* Enable low VDD mode, AVS will set CPU to lowest core VDD 747mV */
mmio_setbits_32(MVEBU_AVS_CTRL_2_REG, MVEBU_LOW_VDD_MODE_EN);
}
static void a3700_pwr_dn_tbg(void)
{
/* Power down TBG */
mmio_setbits_32(MVEBU_PM_NB_PWR_OPTION_REG, MVEBU_PM_TBG_OFF_EN);
}
static void a3700_pwr_dn_sb(void)
{
/* Enable south bridge power down option */
mmio_setbits_32(MVEBU_PM_NB_PWR_CTRL_REG, MVEBU_PM_SB_PWR_DWN);
/* Enable SDIO_PHY_PWRDWN */
mmio_setbits_32(MVEBU_PM_SB_PWR_OPTION_REG, MVEBU_PM_SDIO_PHY_PDWN_EN);
/* Enable SRAM LRM on SB */
mmio_setbits_32(MVEBU_PM_SB_PWR_OPTION_REG, MVEBU_PM_SB_SRAM_LKG_PD_EN);
/* Enable SB Power Off */
mmio_setbits_32(MVEBU_PM_SB_PWR_OPTION_REG, MVEBU_PM_SB_VDDV_OFF_EN);
/* Kick off South Bridge Power Off */
mmio_setbits_32(MVEBU_PM_SB_CPU_PWR_CTRL_REG, MVEBU_PM_SB_PM_START);
}
static void a3700_set_pwr_off_option(void)
{
/* Set general power off option */
a3700_set_gen_pwr_off_option();
/* Enable DDR self refresh in low power mode */
a3700_en_ddr_self_refresh();
/* Power down AVS */
a3700_pwr_dn_avs();
/* Power down TBG */
a3700_pwr_dn_tbg();
/* Power down south bridge, pay attention south bridge setting
* should be done before
*/
a3700_pwr_dn_sb();
}
static void a3700_set_wake_up_option(void)
{
/*
* Enable the wakeup event for NB SOC => north-bridge
* state-machine enablement on wake-up event
*/
mmio_setbits_32(MVEBU_PM_NB_WAKE_UP_EN_REG, MVEBU_PM_NB_WKP_EN);
/* Enable both core0 and core1 wakeup on demand */
mmio_setbits_32(MVEBU_PM_CPU_WAKE_UP_CONF_REG,
MVEBU_PM_CORE1_WAKEUP | MVEBU_PM_CORE0_WAKEUP);
/* Enable warm reset in low power mode */
mmio_setbits_32(MVEBU_PM_NB_PWR_OPTION_REG, MVEBU_PM_WARM_RESET_EN);
}
static void a3700_pm_en_nb_gpio(uint32_t gpio)
{
/* For GPIO1 interrupt -- North bridge only */
if (gpio >= 32) {
/* GPIO int mask */
mmio_clrbits_32(MVEBU_NB_GPIO_IRQ_MASK_2_REG, BIT(gpio - 32));
/* NB_CPU_WAKE-up ENABLE GPIO int */
mmio_setbits_32(MVEBU_NB_GPIO_IRQ_EN_HIGH_REG, BIT(gpio - 32));
} else {
/* GPIO int mask */
mmio_clrbits_32(MVEBU_NB_GPIO_IRQ_MASK_1_REG, BIT(gpio));
/* NB_CPU_WAKE-up ENABLE GPIO int */
mmio_setbits_32(MVEBU_NB_GPIO_IRQ_EN_LOW_REG, BIT(gpio));
}
mmio_setbits_32(MVEBU_NB_STEP_DOWN_INT_EN_REG,
MVEBU_NB_GPIO_INT_WAKE_WCPU_CLK);
/* Enable using GPIO as wakeup event
* (actually not only for north bridge)
*/
mmio_setbits_32(MVEBU_PM_NB_WAKE_UP_EN_REG, MVEBU_PM_NB_GPIO_WKP_EN |
MVEBU_PM_NB_WKP_EN | MVEBU_PM_CORE1_FIQ_IRQ_WKP_EN |
MVEBU_PM_CORE0_FIQ_IRQ_WKP_EN);
}
static void a3700_pm_en_sb_gpio(uint32_t gpio)
{
/* Enable using GPIO as wakeup event */
mmio_setbits_32(MVEBU_PM_NB_WAKE_UP_EN_REG, MVEBU_PM_SB_WKP_NB_EN |
MVEBU_PM_NB_WKP_EN | MVEBU_PM_CORE1_FIQ_IRQ_WKP_EN |
MVEBU_PM_CORE0_FIQ_IRQ_WKP_EN);
/* SB GPIO Wake UP | South Bridge Wake Up Enable */
mmio_setbits_32(MVEBU_PM_SB_WK_EN_REG, MVEBU_PM_SB_GPIO_WKP_EN |
MVEBU_PM_SB_GPIO_WKP_EN);
/* GPIO int mask */
mmio_clrbits_32(MVEBU_SB_GPIO_IRQ_MASK_REG, BIT(gpio));
/* NB_CPU_WAKE-up ENABLE GPIO int */
mmio_setbits_32(MVEBU_SB_GPIO_IRQ_EN_REG, BIT(gpio));
}
int a3700_pm_src_gpio(union pm_wake_up_src_data *src_data)
{
if (src_data->gpio_data.bank_num == 0)
/* North Bridge GPIO */
a3700_pm_en_nb_gpio(src_data->gpio_data.gpio_num);
else
a3700_pm_en_sb_gpio(src_data->gpio_data.gpio_num);
return 0;
}
int a3700_pm_src_uart1(union pm_wake_up_src_data *src_data)
{
/* Clear Uart1 select */
mmio_clrbits_32(MVEBU_NB_GPIO1_SEL_REG, MVEBU_NB_GPIO1_UART1_SEL);
/* set pin 19 gpio usage*/
mmio_setbits_32(MVEBU_NB_GPIO1_SEL_REG, MVEBU_NB_GPIO1_GPIO_19_EN);
/* Enable gpio wake-up*/
a3700_pm_en_nb_gpio(MVEBU_NB_GPIO_19);
/* set pin 18 gpio usage*/
mmio_setbits_32(MVEBU_NB_GPIO1_SEL_REG, MVEBU_NB_GPIO1_GPIO_18_EN);
/* Enable gpio wake-up*/
a3700_pm_en_nb_gpio(MVEBU_NB_GPIO_18);
return 0;
}
int a3700_pm_src_uart0(union pm_wake_up_src_data *src_data)
{
/* set pin 25/26 gpio usage*/
mmio_setbits_32(MVEBU_NB_GPIO1_SEL_REG, MVEBU_NB_GPIO1_GPIO_25_26_EN);
/* Enable gpio wake-up*/
a3700_pm_en_nb_gpio(MVEBU_NB_GPIO_25);
/* Enable gpio wake-up*/
a3700_pm_en_nb_gpio(MVEBU_NB_GPIO_26);
return 0;
}
struct wake_up_src_func_map src_func_table[WAKE_UP_SRC_MAX] = {
{WAKE_UP_SRC_GPIO, a3700_pm_src_gpio},
{WAKE_UP_SRC_UART1, a3700_pm_src_uart1},
{WAKE_UP_SRC_UART0, a3700_pm_src_uart0},
/* FOLLOWING SRC NOT SUPPORTED YET */
{WAKE_UP_SRC_TIMER, NULL}
};
static wake_up_src_func a3700_get_wake_up_src_func(
enum pm_wake_up_src_type type)
{
uint32_t loop;
for (loop = 0; loop < WAKE_UP_SRC_MAX; loop++) {
if (src_func_table[loop].type == type)
return src_func_table[loop].func;
}
return NULL;
}
static void a3700_set_wake_up_source(void)
{
struct pm_wake_up_src_config *wake_up_src;
uint32_t loop;
wake_up_src_func src_func = NULL;
wake_up_src = mv_wake_up_src_config_get();
for (loop = 0; loop < wake_up_src->wake_up_src_num; loop++) {
src_func = a3700_get_wake_up_src_func(
wake_up_src->wake_up_src[loop].wake_up_src_type);
if (src_func)
src_func(
&(wake_up_src->wake_up_src[loop].wake_up_data));
}
}
static void a3700_pm_save_lp_flag(void)
{
/* Save the flag for enter the low power mode */
mmio_setbits_32(MVEBU_PM_CPU_VDD_OFF_INFO_2_REG,
MVEBU_PM_LOW_POWER_STATE);
}
static void a3700_pm_clear_lp_flag(void)
{
/* Clear the flag for enter the low power mode */
mmio_clrbits_32(MVEBU_PM_CPU_VDD_OFF_INFO_2_REG,
MVEBU_PM_LOW_POWER_STATE);
}
static uint32_t a3700_pm_get_lp_flag(void)
{
/* Get the flag for enter the low power mode */
return mmio_read_32(MVEBU_PM_CPU_VDD_OFF_INFO_2_REG) &
MVEBU_PM_LOW_POWER_STATE;
}
/*****************************************************************************
* A3700 handler called when a power domain is about to be suspended. The
* target_state encodes the power state that each level should transition to.
*****************************************************************************
*/
void a3700_pwr_domain_suspend(const psci_power_state_t *target_state)
{
/* Prevent interrupts from spuriously waking up this cpu */
plat_marvell_gic_cpuif_disable();
/* Save IRQ states */
plat_marvell_gic_irq_save();
/* Set wake up options */
a3700_set_wake_up_option();
/* Set wake up sources */
a3700_set_wake_up_source();
/* SoC can not be powered down with pending IRQ,
* acknowledge all the pending IRQ
*/
a3700_pm_ack_irq();
/* Set power off options */
a3700_set_pwr_off_option();
/* Save the flag for enter the low power mode */
a3700_pm_save_lp_flag();
isb();
}
/*****************************************************************************
* A3700 handler called when a power domain has just been powered on after
* being turned off earlier. The target_state encodes the low power state that
* each level has woken up from.
*****************************************************************************
*/
void a3700_pwr_domain_on_finish(const psci_power_state_t *target_state)
{
/* arch specific configuration */
marvell_psci_arch_init(0);
/* Per-CPU interrupt initialization */
plat_marvell_gic_pcpu_init();
plat_marvell_gic_cpuif_enable();
/* Restore the per-cpu IRQ state */
if (a3700_pm_get_lp_flag())
plat_marvell_gic_irq_pcpu_restore();
}
/*****************************************************************************
* A3700 handler called when a power domain has just been powered on after
* having been suspended earlier. The target_state encodes the low power state
* that each level has woken up from.
* TODO: At the moment we reuse the on finisher and reinitialize the secure
* context. Need to implement a separate suspend finisher.
*****************************************************************************
*/
void a3700_pwr_domain_suspend_finish(const psci_power_state_t *target_state)
{
struct dec_win_config *io_dec_map;
uint32_t dec_win_num;
struct dram_win_map dram_wins_map;
/* arch specific configuration */
marvell_psci_arch_init(0);
/* Interrupt initialization */
plat_marvell_gic_init();
/* Restore IRQ states */
plat_marvell_gic_irq_restore();
/*
* Initialize CCI for this cluster after resume from suspend state.
* No need for locks as no other CPU is active.
*/
plat_marvell_interconnect_init();
/*
* Enable CCI coherency for the primary CPU's cluster.
* Platform specific PSCI code will enable coherency for other
* clusters.
*/
plat_marvell_interconnect_enter_coherency();
/* CPU address decoder windows initialization. */
cpu_wins_init();
/* fetch CPU-DRAM window mapping information by reading
* CPU-DRAM decode windows (only the enabled ones)
*/
dram_win_map_build(&dram_wins_map);
/* Get IO address decoder windows */
if (marvell_get_io_dec_win_conf(&io_dec_map, &dec_win_num)) {
printf("No IO address decoder windows configurations found!\n");
return;
}
/* IO address decoder init */
if (init_io_addr_dec(&dram_wins_map, io_dec_map, dec_win_num)) {
printf("IO address decoder windows initialization failed!\n");
return;
}
/* Clear low power mode flag */
a3700_pm_clear_lp_flag();
}
/*****************************************************************************
* This handler is called by the PSCI implementation during the `SYSTEM_SUSPEND
* call to get the `power_state` parameter. This allows the platform to encode
* the appropriate State-ID field within the `power_state` parameter which can
* be utilized in `pwr_domain_suspend()` to suspend to system affinity level.
*****************************************************************************
*/
void a3700_get_sys_suspend_power_state(psci_power_state_t *req_state)
{
/* lower affinities use PLAT_MAX_OFF_STATE */
for (int i = MPIDR_AFFLVL0; i <= PLAT_MAX_PWR_LVL; i++)
req_state->pwr_domain_state[i] = PLAT_MAX_OFF_STATE;
}
/*****************************************************************************
* A3700 handlers to shutdown/reboot the system
*****************************************************************************
*/
static void __dead2 a3700_system_off(void)
{
ERROR("%s needs to be implemented\n", __func__);
panic();
}
/*****************************************************************************
* A3700 handlers to reset the system
*****************************************************************************
*/
static void __dead2 a3700_system_reset(void)
{
/* Clean the mailbox magic number to let it as act like cold boot */
mmio_write_32(PLAT_MARVELL_MAILBOX_BASE, 0x0);
dsbsy();
/* Flush data cache if the mail box shared RAM is cached */
#if PLAT_MARVELL_SHARED_RAM_CACHED
flush_dcache_range((uintptr_t)PLAT_MARVELL_MAILBOX_BASE,
2 * sizeof(uint64_t));
#endif
/* Trigger the warm reset */
mmio_write_32(MVEBU_WARM_RESET_REG, MVEBU_WARM_RESET_MAGIC);
/* Shouldn't get to this point */
panic();
}
/*****************************************************************************
* Export the platform handlers via plat_arm_psci_pm_ops. The ARM Standard
* platform layer will take care of registering the handlers with PSCI.
*****************************************************************************
*/
const plat_psci_ops_t plat_arm_psci_pm_ops = {
.cpu_standby = a3700_cpu_standby,
.pwr_domain_on = a3700_pwr_domain_on,
.pwr_domain_off = a3700_pwr_domain_off,
.pwr_domain_suspend = a3700_pwr_domain_suspend,
.pwr_domain_on_finish = a3700_pwr_domain_on_finish,
.pwr_domain_suspend_finish = a3700_pwr_domain_suspend_finish,
.get_sys_suspend_power_state = a3700_get_sys_suspend_power_state,
.system_off = a3700_system_off,
.system_reset = a3700_system_reset,
.validate_power_state = a3700_validate_power_state,
.validate_ns_entrypoint = a3700_validate_ns_entrypoint
};

View File

@ -7,6 +7,9 @@
PCI_EP_SUPPORT := 0
CP_NUM := 1
$(eval $(call add_define,CP_NUM))
DOIMAGE_SEC := tools/doimage/secure/sec_img_7K.cfg
MARVELL_MOCHI_DRV := drivers/marvell/mochi/apn806_setup.c

View File

@ -7,6 +7,9 @@
PCI_EP_SUPPORT := 0
CP_NUM := 1
$(eval $(call add_define,CP_NUM))
DOIMAGE_SEC := tools/doimage/secure/sec_img_7K.cfg
MARVELL_MOCHI_DRV := drivers/marvell/mochi/apn806_setup.c

View File

@ -0,0 +1,167 @@
/*
* Copyright (C) 2018 Marvell International Ltd.
*
* SPDX-License-Identifier: BSD-3-Clause
* https://spdx.org/licenses
*/
#ifndef __PHY_PORTING_LAYER_H
#define __PHY_PORTING_LAYER_H
#define MAX_LANE_NR 6
static const struct xfi_params
xfi_static_values_tab[AP_NUM][CP_NUM][MAX_LANE_NR] = {
/* AP0 */
{
/* CP 0 */
{
{ 0 }, /* Comphy0 */
{ 0 }, /* Comphy1 */
{ .g1_ffe_res_sel = 0x3, .g1_ffe_cap_sel = 0xf,
.align90 = 0x5f,
.g1_dfe_res = 0x2, .g1_amp = 0x1c, .g1_emph = 0xe,
.g1_emph_en = 0x1, .g1_tx_amp_adj = 0x1,
.g1_tx_emph_en = 0x1, .g1_tx_emph = 0x0,
.g1_rx_selmuff = 0x1, .g1_rx_selmufi = 0x0,
.g1_rx_selmupf = 0x2, .g1_rx_selmupi = 0x2,
.valid = 0x1 }, /* Comphy2 */
{ 0 }, /* Comphy3 */
{ 0 }, /* Comphy4 */
{ 0 }, /* Comphy5 */
},
/* CP 1 */
{
{ 0 }, /* Comphy0 */
{ 0 }, /* Comphy1 */
{ .g1_ffe_res_sel = 0x3, .g1_ffe_cap_sel = 0xf,
.align90 = 0x5f,
.g1_dfe_res = 0x2, .g1_amp = 0x1c, .g1_emph = 0xe,
.g1_emph_en = 0x1, .g1_tx_amp_adj = 0x1,
.g1_tx_emph_en = 0x1, .g1_tx_emph = 0x0,
.g1_rx_selmuff = 0x1, .g1_rx_selmufi = 0x0,
.g1_rx_selmupf = 0x2, .g1_rx_selmupi = 0x2,
.valid = 0x1 }, /* Comphy2 */
{ 0 }, /* Comphy3 */
{ 0 }, /* Comphy4 */
{ 0 }, /* Comphy5 */
},
},
};
static const struct sata_params
sata_static_values_tab[AP_NUM][CP_NUM][MAX_LANE_NR] = {
/* AP0 */
{
/* CP 0 */
{
{ 0 }, /* Comphy0 */
{ .g1_amp = 0x8, .g2_amp = 0xa, .g3_amp = 0x1e,
.g1_emph = 0x1, .g2_emph = 0x2, .g3_emph = 0xe,
.g1_emph_en = 0x1, .g2_emph_en = 0x1,
.g3_emph_en = 0x1,
.g1_tx_amp_adj = 0x1, .g2_tx_amp_adj = 0x1,
.g3_tx_amp_adj = 0x1,
.g1_tx_emph_en = 0x0, .g2_tx_emph_en = 0x0,
.g3_tx_emph_en = 0x0,
.g1_tx_emph = 0x1, .g2_tx_emph = 0x1,
.g3_tx_emph = 0x1,
.g3_dfe_res = 0x1, .g3_ffe_res_sel = 0x4,
.g3_ffe_cap_sel = 0xf,
.align90 = 0x61,
.g1_rx_selmuff = 0x3, .g2_rx_selmuff = 0x3,
.g3_rx_selmuff = 0x3,
.g1_rx_selmufi = 0x0, .g2_rx_selmufi = 0x0,
.g3_rx_selmufi = 0x3,
.g1_rx_selmupf = 0x1, .g2_rx_selmupf = 0x1,
.g3_rx_selmupf = 0x2,
.g1_rx_selmupi = 0x0, .g2_rx_selmupi = 0x0,
.g3_rx_selmupi = 0x2,
.valid = 0x1
}, /* Comphy1 */
{ 0 }, /* Comphy2 */
{ .g1_amp = 0x8, .g2_amp = 0xa, .g3_amp = 0x1e,
.g1_emph = 0x1, .g2_emph = 0x2, .g3_emph = 0xe,
.g1_emph_en = 0x1, .g2_emph_en = 0x1,
.g3_emph_en = 0x1,
.g1_tx_amp_adj = 0x1, .g2_tx_amp_adj = 0x1,
.g3_tx_amp_adj = 0x1,
.g1_tx_emph_en = 0x0, .g2_tx_emph_en = 0x0,
.g3_tx_emph_en = 0x0,
.g1_tx_emph = 0x1, .g2_tx_emph = 0x1,
.g3_tx_emph = 0x1,
.g3_dfe_res = 0x1, .g3_ffe_res_sel = 0x4,
.g3_ffe_cap_sel = 0xf,
.align90 = 0x61,
.g1_rx_selmuff = 0x3, .g2_rx_selmuff = 0x3,
.g3_rx_selmuff = 0x3,
.g1_rx_selmufi = 0x0, .g2_rx_selmufi = 0x0,
.g3_rx_selmufi = 0x3,
.g1_rx_selmupf = 0x1, .g2_rx_selmupf = 0x1,
.g3_rx_selmupf = 0x2,
.g1_rx_selmupi = 0x0, .g2_rx_selmupi = 0x0,
.g3_rx_selmupi = 0x2,
.valid = 0x1
}, /* Comphy3 */
{ 0 }, /* Comphy4 */
{ 0 }, /* Comphy5 */
},
/* CP 1 */
{
{ 0 }, /* Comphy0 */
{ .g1_amp = 0x8, .g2_amp = 0xa, .g3_amp = 0x1e,
.g1_emph = 0x1, .g2_emph = 0x2, .g3_emph = 0xe,
.g1_emph_en = 0x1, .g2_emph_en = 0x1,
.g3_emph_en = 0x1,
.g1_tx_amp_adj = 0x1, .g2_tx_amp_adj = 0x1,
.g3_tx_amp_adj = 0x1,
.g1_tx_emph_en = 0x0, .g2_tx_emph_en = 0x0,
.g3_tx_emph_en = 0x0,
.g1_tx_emph = 0x1, .g2_tx_emph = 0x1,
.g3_tx_emph = 0x1,
.g3_dfe_res = 0x1, .g3_ffe_res_sel = 0x4,
.g3_ffe_cap_sel = 0xf,
.align90 = 0x61,
.g1_rx_selmuff = 0x3, .g2_rx_selmuff = 0x3,
.g3_rx_selmuff = 0x3,
.g1_rx_selmufi = 0x0, .g2_rx_selmufi = 0x0,
.g3_rx_selmufi = 0x3,
.g1_rx_selmupf = 0x1, .g2_rx_selmupf = 0x1,
.g3_rx_selmupf = 0x2,
.g1_rx_selmupi = 0x0, .g2_rx_selmupi = 0x0,
.g3_rx_selmupi = 0x2,
.valid = 0x1
}, /* Comphy1 */
{ 0 }, /* Comphy2 */
{ .g1_amp = 0x8, .g2_amp = 0xa, .g3_amp = 0x1e,
.g1_emph = 0x1, .g2_emph = 0x2, .g3_emph = 0xe,
.g1_emph_en = 0x1, .g2_emph_en = 0x1,
.g3_emph_en = 0x1,
.g1_tx_amp_adj = 0x1, .g2_tx_amp_adj = 0x1,
.g3_tx_amp_adj = 0x1,
.g1_tx_emph_en = 0x0, .g2_tx_emph_en = 0x0,
.g3_tx_emph_en = 0x0,
.g1_tx_emph = 0x1, .g2_tx_emph = 0x1,
.g3_tx_emph = 0x1,
.g3_dfe_res = 0x1, .g3_ffe_res_sel = 0x4,
.g3_ffe_cap_sel = 0xf,
.align90 = 0x61,
.g1_rx_selmuff = 0x3, .g2_rx_selmuff = 0x3,
.g3_rx_selmuff = 0x3,
.g1_rx_selmufi = 0x0, .g2_rx_selmufi = 0x0,
.g3_rx_selmufi = 0x3,
.g1_rx_selmupf = 0x1, .g2_rx_selmupf = 0x1,
.g3_rx_selmupf = 0x2,
.g1_rx_selmupi = 0x0, .g2_rx_selmupi = 0x0,
.g3_rx_selmupi = 0x2,
.valid = 0x1
}, /* Comphy3 */
{ 0 }, /* Comphy4 */
{ 0 }, /* Comphy5 */
},
},
};
#endif /* __PHY_PORTING_LAYER_H */

View File

@ -7,6 +7,9 @@
PCI_EP_SUPPORT := 0
CP_NUM := 2
$(eval $(call add_define,CP_NUM))
DOIMAGE_SEC := tools/doimage/secure/sec_img_8K.cfg
MARVELL_MOCHI_DRV := drivers/marvell/mochi/apn806_setup.c
@ -14,3 +17,4 @@ MARVELL_MOCHI_DRV := drivers/marvell/mochi/apn806_setup.c
include plat/marvell/a8k/common/a8k_common.mk
include plat/marvell/common/marvell_common.mk
PLAT_INCLUDES += -Iplat/marvell/a8k/a80x0/board

View File

@ -7,6 +7,9 @@
PCI_EP_SUPPORT := 0
CP_NUM := 2
$(eval $(call add_define,CP_NUM))
DOIMAGE_SEC := tools/doimage/secure/sec_img_8K.cfg
MARVELL_MOCHI_DRV := drivers/marvell/mochi/apn806_setup.c

View File

@ -4,7 +4,7 @@
# SPDX-License-Identifier: BSD-3-Clause
# https://spdx.org/licenses
include tools/doimage/doimage.mk
include tools/marvell/doimage/doimage.mk
PLAT_FAMILY := a8k
PLAT_FAMILY_BASE := plat/marvell/$(PLAT_FAMILY)
@ -25,7 +25,11 @@ $(eval $(call add_define,BL31_CACHE_DISABLE))
$(eval $(call add_define,PCI_EP_SUPPORT))
$(eval $(call assert_boolean,PCI_EP_SUPPORT))
DOIMAGEPATH ?= tools/doimage
AP_NUM := 1
$(eval $(call add_define,AP_NUM))
DOIMAGEPATH ?= tools/marvell/doimage
DOIMAGETOOL ?= ${DOIMAGEPATH}/doimage
ROM_BIN_EXT ?= $(BUILD_PLAT)/ble.bin

View File

@ -28,7 +28,10 @@
#define MVEBU_REGS_BASE 0xF0000000
#define MVEBU_REGS_BASE_MASK 0xF0000000
#define MVEBU_REGS_BASE_AP(ap) MVEBU_REGS_BASE
#define MVEBU_CP_REGS_BASE(cp_index) (0xF2000000 + (cp_index) * 0x2000000)
#define MVEBU_AP_IO_BASE(ap) 0xF2000000
#define MVEBU_CP_OFFSET 0x2000000
#define MVEBU_CP_REGS_BASE(cp_index) (MVEBU_AP_IO_BASE(0) + \
(cp_index) * MVEBU_CP_OFFSET)
#define MVEBU_RFU_BASE (MVEBU_REGS_BASE + 0x6F0000)
#define MVEBU_IO_WIN_BASE(ap_index) (MVEBU_RFU_BASE)
#define MVEBU_IO_WIN_GCR_OFFSET (0x70)

View File

@ -0,0 +1,208 @@
/*
* Copyright (C) 2018 Marvell International Ltd.
*
* SPDX-License-Identifier: BSD-3-Clause
* https://spdx.org/licenses
*/
#include <debug.h>
#include <gicv3.h>
#include <interrupt_props.h>
#include <marvell_def.h>
#include <plat_marvell.h>
#include <platform.h>
#include <platform_def.h>
/******************************************************************************
* The following functions are defined as weak to allow a platform to override
* the way the GICv3 driver is initialised and used.
******************************************************************************
*/
#pragma weak plat_marvell_gic_driver_init
#pragma weak plat_marvell_gic_init
#pragma weak plat_marvell_gic_cpuif_enable
#pragma weak plat_marvell_gic_cpuif_disable
#pragma weak plat_marvell_gic_pcpu_init
/* The GICv3 driver only needs to be initialized in EL3 */
static uintptr_t rdistif_base_addrs[PLATFORM_CORE_COUNT];
static const interrupt_prop_t marvell_interrupt_props[] = {
PLAT_MARVELL_G1S_IRQ_PROPS(INTR_GROUP1S),
PLAT_MARVELL_G0_IRQ_PROPS(INTR_GROUP0)
};
/*
* We save and restore the GICv3 context on system suspend. Allocate the
* data in the designated EL3 Secure carve-out memory
*/
static gicv3_redist_ctx_t rdist_ctx __section("arm_el3_tzc_dram");
static gicv3_dist_ctx_t dist_ctx __section("arm_el3_tzc_dram");
/*
* MPIDR hashing function for translating MPIDRs read from GICR_TYPER register
* to core position.
*
* Calculating core position is dependent on MPIDR_EL1.MT bit. However, affinity
* values read from GICR_TYPER don't have an MT field. To reuse the same
* translation used for CPUs, we insert MT bit read from the PE's MPIDR into
* that read from GICR_TYPER.
*
* Assumptions:
*
* - All CPUs implemented in the system have MPIDR_EL1.MT bit set;
* - No CPUs implemented in the system use affinity level 3.
*/
static unsigned int marvell_gicv3_mpidr_hash(u_register_t mpidr)
{
mpidr |= (read_mpidr_el1() & MPIDR_MT_MASK);
return plat_marvell_calc_core_pos(mpidr);
}
const gicv3_driver_data_t marvell_gic_data = {
.gicd_base = PLAT_MARVELL_GICD_BASE,
.gicr_base = PLAT_MARVELL_GICR_BASE,
.interrupt_props = marvell_interrupt_props,
.interrupt_props_num = ARRAY_SIZE(marvell_interrupt_props),
.rdistif_num = PLATFORM_CORE_COUNT,
.rdistif_base_addrs = rdistif_base_addrs,
.mpidr_to_core_pos = marvell_gicv3_mpidr_hash
};
void plat_marvell_gic_driver_init(void)
{
/*
* The GICv3 driver is initialized in EL3 and does not need
* to be initialized again in SEL1. This is because the S-EL1
* can use GIC system registers to manage interrupts and does
* not need GIC interface base addresses to be configured.
*/
#if IMAGE_BL31
gicv3_driver_init(&marvell_gic_data);
#endif
}
/******************************************************************************
* Marvell common helper to initialize the GIC. Only invoked by BL31
******************************************************************************
*/
void plat_marvell_gic_init(void)
{
/* Initialize GIC-600 Multi Chip feature,
* only if the maximum number of north bridges
* is more than 1 - otherwise no need for multi
* chip feature initialization
*/
#if (PLAT_MARVELL_NORTHB_COUNT > 1)
if (gic600_multi_chip_init())
ERROR("GIC-600 Multi Chip initialization failed\n");
#endif
gicv3_distif_init();
gicv3_rdistif_init(plat_my_core_pos());
gicv3_cpuif_enable(plat_my_core_pos());
}
/******************************************************************************
* Marvell common helper to enable the GIC CPU interface
******************************************************************************
*/
void plat_marvell_gic_cpuif_enable(void)
{
gicv3_cpuif_enable(plat_my_core_pos());
}
/******************************************************************************
* Marvell common helper to disable the GIC CPU interface
******************************************************************************
*/
void plat_marvell_gic_cpuif_disable(void)
{
gicv3_cpuif_disable(plat_my_core_pos());
}
/******************************************************************************
* Marvell common helper to init. the per-cpu redistributor interface in GICv3
******************************************************************************
*/
void plat_marvell_gic_pcpu_init(void)
{
gicv3_rdistif_init(plat_my_core_pos());
}
/******************************************************************************
* Marvell common helper to save SPI irq states in GICv3
******************************************************************************
*/
void plat_marvell_gic_irq_save(void)
{
/*
* If an ITS is available, save its context before
* the Redistributor using:
* gicv3_its_save_disable(gits_base, &its_ctx[i])
* Additionally, an implementation-defined sequence may
* be required to save the whole ITS state.
*/
/*
* Save the GIC Redistributors and ITS contexts before the
* Distributor context. As we only handle SYSTEM SUSPEND API,
* we only need to save the context of the CPU that is issuing
* the SYSTEM SUSPEND call, i.e. the current CPU.
*/
gicv3_rdistif_save(plat_my_core_pos(), &rdist_ctx);
/* Save the GIC Distributor context */
gicv3_distif_save(&dist_ctx);
/*
* From here, all the components of the GIC can be safely powered down
* as long as there is an alternate way to handle wakeup interrupt
* sources.
*/
}
/******************************************************************************
* Marvell common helper to restore SPI irq states in GICv3
******************************************************************************
*/
void plat_marvell_gic_irq_restore(void)
{
/* Restore the GIC Distributor context */
gicv3_distif_init_restore(&dist_ctx);
/*
* Restore the GIC Redistributor and ITS contexts after the
* Distributor context. As we only handle SYSTEM SUSPEND API,
* we only need to restore the context of the CPU that issued
* the SYSTEM SUSPEND call.
*/
gicv3_rdistif_init_restore(plat_my_core_pos(), &rdist_ctx);
/*
* If an ITS is available, restore its context after
* the Redistributor using:
* gicv3_its_restore(gits_base, &its_ctx[i])
* An implementation-defined sequence may be required to
* restore the whole ITS state. The ITS must also be
* re-enabled after this sequence has been executed.
*/
}
/******************************************************************************
* Marvell common helper to save per-cpu PPI irq states in GICv3
******************************************************************************
*/
void plat_marvell_gic_irq_pcpu_save(void)
{
gicv3_rdistif_save(plat_my_core_pos(), &rdist_ctx);
}
/******************************************************************************
* Marvell common helper to restore per-cpu PPI irq states in GICv3
******************************************************************************
*/
void plat_marvell_gic_irq_pcpu_restore(void)
{
gicv3_rdistif_init_restore(plat_my_core_pos(), &rdist_ctx);
}

View File

@ -49,6 +49,6 @@ mrvl_clean:
${DOIMAGETOOL}: mrvl_clean
@$(DOIMAGE_LIBS_CHECK)
${Q}${MAKE} --no-print-directory -C ${DOIMAGEPATH} WTMI_IMG=$(WTMI_IMG)
${Q}${MAKE} --no-print-directory -C ${DOIMAGEPATH} VERSION=$(SUBVERSION) WTMI_IMG=$(WTMI_IMG)