Merge pull request #1843 from DavidPu/xlat_tables_v2_non_recursion

Remove recursion from xlat_tables_v2 library
This commit is contained in:
Soby Mathew 2019-03-13 15:26:51 +00:00 committed by GitHub
commit d0759c2c44
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 342 additions and 154 deletions

View File

@ -325,8 +325,9 @@ static action_t xlat_tables_unmap_region_action(const mmap_region_t *mm,
return action;
}
/*
* Recursive function that writes to the translation tables and unmaps the
* Function that writes to the translation tables and unmaps the
* specified region.
*/
static void xlat_tables_unmap_region(xlat_ctx_t *ctx, mmap_region_t *mm,
@ -337,70 +338,137 @@ static void xlat_tables_unmap_region(xlat_ctx_t *ctx, mmap_region_t *mm,
{
assert((level >= ctx->base_level) && (level <= XLAT_TABLE_LEVEL_MAX));
uint64_t *subtable;
uint64_t desc;
/*
* data structure to track DESC_TABLE entry before iterate into subtable
* of next translation level. it will be used to restore previous level
* after finish subtable iteration.
*/
struct desc_table_unmap {
uint64_t *table_base;
uintptr_t table_idx_va;
unsigned int idx;
} desc_tables[XLAT_TABLE_LEVEL_MAX + 1] = {
{NULL, 0U, XLAT_TABLE_ENTRIES}, };
unsigned int this_level = level;
uint64_t *this_base = table_base;
unsigned int max_entries = table_entries;
size_t level_size = XLAT_BLOCK_SIZE(this_level);
unsigned int table_idx;
uintptr_t table_idx_va;
uintptr_t table_idx_end_va; /* End VA of this entry */
uintptr_t region_end_va = mm->base_va + mm->size - 1U;
unsigned int table_idx;
table_idx_va = xlat_tables_find_start_va(mm, table_base_va, level);
table_idx = xlat_tables_va_to_index(table_base_va, table_idx_va, level);
while (table_idx < table_entries) {
while (this_base != NULL) {
table_idx_end_va = table_idx_va + XLAT_BLOCK_SIZE(level) - 1U;
uint64_t desc;
uint64_t desc_type;
uintptr_t table_idx_end_va; /* End VA of this entry */
action_t action;
desc = table_base[table_idx];
uint64_t desc_type = desc & DESC_MASK;
/* finish current xlat level iteration. */
if (table_idx >= max_entries) {
if (this_level > ctx->base_level) {
xlat_table_dec_regions_count(ctx, this_base);
}
action_t action = xlat_tables_unmap_region_action(mm,
table_idx_va, table_idx_end_va, level,
desc_type);
if (this_level > level) {
uint64_t *subtable;
/* back from subtable iteration, restore
* previous DESC_TABLE entry.
*/
this_level--;
this_base = desc_tables[this_level].table_base;
table_idx = desc_tables[this_level].idx;
table_idx_va =
desc_tables[this_level].table_idx_va;
level_size = XLAT_BLOCK_SIZE(this_level);
if (this_level == level) {
max_entries = table_entries;
} else {
max_entries = XLAT_TABLE_ENTRIES;
}
desc = this_base[table_idx];
subtable = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
/*
* If the subtable is now empty, remove its reference.
*/
if (xlat_table_is_empty(ctx, subtable)) {
this_base[table_idx] = INVALID_DESC;
xlat_arch_tlbi_va(table_idx_va,
ctx->xlat_regime);
}
table_idx++;
table_idx_va += level_size;
} else {
/* reached end of top level, exit.*/
this_base = NULL;
break;
}
}
/* If reached the end of the region, stop iterating entries in
* current xlat level.
*/
if (region_end_va <= table_idx_va) {
table_idx = max_entries;
continue;
}
table_idx_end_va = table_idx_va + XLAT_BLOCK_SIZE(this_level) - 1U;
desc = this_base[table_idx];
desc_type = desc & DESC_MASK;
action = xlat_tables_unmap_region_action(mm, table_idx_va,
table_idx_end_va,
this_level,
desc_type);
if (action == ACTION_WRITE_BLOCK_ENTRY) {
table_base[table_idx] = INVALID_DESC;
this_base[table_idx] = INVALID_DESC;
xlat_arch_tlbi_va(table_idx_va, ctx->xlat_regime);
table_idx++;
table_idx_va += level_size;
} else if (action == ACTION_RECURSE_INTO_TABLE) {
uint64_t *subtable;
uintptr_t base_va;
subtable = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
/* Recurse to write into subtable */
xlat_tables_unmap_region(ctx, mm, table_idx_va,
subtable, XLAT_TABLE_ENTRIES,
level + 1U);
#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
xlat_clean_dcache_range((uintptr_t)subtable,
XLAT_TABLE_ENTRIES * sizeof(uint64_t));
#endif
/*
* If the subtable is now empty, remove its reference.
*/
if (xlat_table_is_empty(ctx, subtable)) {
table_base[table_idx] = INVALID_DESC;
xlat_arch_tlbi_va(table_idx_va,
ctx->xlat_regime);
}
desc_tables[this_level].table_base = this_base;
desc_tables[this_level].table_idx_va = table_idx_va;
base_va = table_idx_va;
desc_tables[this_level].idx = table_idx;
this_base = subtable;
this_level++;
max_entries = XLAT_TABLE_ENTRIES;
level_size = XLAT_BLOCK_SIZE(this_level);
table_idx_va = xlat_tables_find_start_va(mm,
base_va, this_level);
table_idx = xlat_tables_va_to_index(base_va,
table_idx_va, this_level);
} else {
assert(action == ACTION_NONE);
table_idx++;
table_idx_va += level_size;
}
table_idx++;
table_idx_va += XLAT_BLOCK_SIZE(level);
/* If reached the end of the region, exit */
if (region_end_va <= table_idx_va)
break;
}
if (level > ctx->base_level)
xlat_table_dec_regions_count(ctx, table_base);
}
#endif /* PLAT_XLAT_TABLES_DYNAMIC */
@ -537,105 +605,169 @@ static action_t xlat_tables_map_region_action(const mmap_region_t *mm,
}
/*
* Recursive function that writes to the translation tables and maps the
* Function that writes to the translation tables and maps the
* specified region. On success, it returns the VA of the last byte that was
* successfully mapped. On error, it returns the VA of the next entry that
* should have been mapped.
*/
static uintptr_t xlat_tables_map_region(xlat_ctx_t *ctx, mmap_region_t *mm,
uintptr_t table_base_va,
const uintptr_t table_base_va,
uint64_t *const table_base,
unsigned int table_entries,
unsigned int level)
{
assert((level >= ctx->base_level) && (level <= XLAT_TABLE_LEVEL_MAX));
/*
* data structure to track DESC_TABLE entry before iterate into subtable
* of next translation level. it will be used to restore previous level
* after finish subtable iteration.
*/
struct desc_table_map {
uint64_t *table_base;
uintptr_t table_idx_va;
unsigned int idx;
} desc_tables[XLAT_TABLE_LEVEL_MAX + 1] = {
{NULL, 0U, XLAT_TABLE_ENTRIES}, };
unsigned int this_level = level;
uint64_t *this_base = table_base;
unsigned int max_entries = table_entries;
size_t level_size = XLAT_BLOCK_SIZE(this_level);
uintptr_t mm_end_va = mm->base_va + mm->size - 1U;
uintptr_t table_idx_va;
unsigned long long table_idx_pa;
uint64_t *subtable;
uint64_t desc;
unsigned int table_idx;
table_idx_va = xlat_tables_find_start_va(mm, table_base_va, level);
table_idx = xlat_tables_va_to_index(table_base_va, table_idx_va, level);
#if PLAT_XLAT_TABLES_DYNAMIC
if (level > ctx->base_level)
xlat_table_inc_regions_count(ctx, table_base);
while (this_base != NULL) {
uint64_t desc;
uint64_t desc_type;
unsigned long long table_idx_pa;
action_t action;
/* finish current xlat level iteration. */
if (table_idx >= max_entries) {
if (this_level <= level) {
this_base = NULL;
break;
} else {
/* back from subtable iteration, restore
* previous DESC_TABLE entry.
*/
this_level--;
level_size = XLAT_BLOCK_SIZE(this_level);
this_base = desc_tables[this_level].table_base;
table_idx = desc_tables[this_level].idx;
if (this_level == level) {
max_entries = table_entries;
} else {
max_entries = XLAT_TABLE_ENTRIES;
}
#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
uintptr_t subtable;
desc = this_base[table_idx];
subtable = (uintptr_t)(desc & TABLE_ADDR_MASK);
xlat_clean_dcache_range(subtable,
XLAT_TABLE_ENTRIES * sizeof(uint64_t));
#endif
while (table_idx < table_entries) {
table_idx++;
table_idx_va =
desc_tables[this_level].table_idx_va +
level_size;
}
}
desc = table_base[table_idx];
desc = this_base[table_idx];
desc_type = desc & DESC_MASK;
table_idx_pa = mm->base_pa + table_idx_va - mm->base_va;
action_t action = xlat_tables_map_region_action(mm,
(uint32_t)(desc & DESC_MASK), table_idx_pa,
table_idx_va, level);
/* If reached the end of the region, simply exit since we
* already write all BLOCK entries and create all required
* subtables.
*/
if (mm_end_va <= table_idx_va) {
this_base = NULL;
break;
}
action = xlat_tables_map_region_action(mm, desc_type,
table_idx_pa, table_idx_va, this_level);
if (action == ACTION_WRITE_BLOCK_ENTRY) {
table_base[table_idx] =
xlat_desc(ctx, (uint32_t)mm->attr, table_idx_pa,
level);
this_base[table_idx] = xlat_desc(ctx, mm->attr,
table_idx_pa, this_level);
table_idx++;
table_idx_va += level_size;
} else if (action == ACTION_CREATE_NEW_TABLE) {
uintptr_t end_va;
subtable = xlat_table_get_empty(ctx);
uintptr_t base_va;
uint64_t *subtable = xlat_table_get_empty(ctx);
if (subtable == NULL) {
/* Not enough free tables to map this region */
/* Not enough free tables to map this region. */
return table_idx_va;
}
/* Point to new subtable from this one. */
table_base[table_idx] = TABLE_DESC | (unsigned long)subtable;
this_base[table_idx] = TABLE_DESC | (unsigned long)subtable;
/* Recurse to write into subtable */
end_va = xlat_tables_map_region(ctx, mm, table_idx_va,
subtable, XLAT_TABLE_ENTRIES,
level + 1U);
#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
xlat_clean_dcache_range((uintptr_t)subtable,
XLAT_TABLE_ENTRIES * sizeof(uint64_t));
desc_tables[this_level].table_base = this_base;
desc_tables[this_level].table_idx_va = table_idx_va;
desc_tables[this_level].idx = table_idx;
base_va = table_idx_va;
this_level++;
this_base = subtable;
level_size = XLAT_BLOCK_SIZE(this_level);
table_idx_va = xlat_tables_find_start_va(mm, base_va,
this_level);
table_idx = xlat_tables_va_to_index(base_va,
table_idx_va, this_level);
max_entries = XLAT_TABLE_ENTRIES;
#if PLAT_XLAT_TABLES_DYNAMIC
if (this_level > ctx->base_level) {
xlat_table_inc_regions_count(ctx, subtable);
}
#endif
if (end_va !=
(table_idx_va + XLAT_BLOCK_SIZE(level) - 1U))
return end_va;
} else if (action == ACTION_RECURSE_INTO_TABLE) {
uintptr_t end_va;
subtable = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
/* Recurse to write into subtable */
end_va = xlat_tables_map_region(ctx, mm, table_idx_va,
subtable, XLAT_TABLE_ENTRIES,
level + 1U);
#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
xlat_clean_dcache_range((uintptr_t)subtable,
XLAT_TABLE_ENTRIES * sizeof(uint64_t));
uintptr_t base_va;
uint64_t *subtable = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
desc_tables[this_level].table_base = this_base;
desc_tables[this_level].table_idx_va = table_idx_va;
desc_tables[this_level].idx = table_idx;
base_va = table_idx_va;
this_level++;
level_size = XLAT_BLOCK_SIZE(this_level);
table_idx_va = xlat_tables_find_start_va(mm, base_va,
this_level);
table_idx = xlat_tables_va_to_index(base_va,
table_idx_va, this_level);
this_base = subtable;
max_entries = XLAT_TABLE_ENTRIES;
#if PLAT_XLAT_TABLES_DYNAMIC
if (this_level > ctx->base_level) {
xlat_table_inc_regions_count(ctx, subtable);
}
#endif
if (end_va !=
(table_idx_va + XLAT_BLOCK_SIZE(level) - 1U))
return end_va;
} else {
assert(action == ACTION_NONE);
table_idx++;
table_idx_va += level_size;
}
table_idx++;
table_idx_va += XLAT_BLOCK_SIZE(level);
/* If reached the end of the region, exit */
if (mm_end_va <= table_idx_va)
break;
}
return table_idx_va - 1U;

View File

@ -109,7 +109,7 @@ static const char *invalid_descriptors_ommited =
"%s(%d invalid descriptors omitted)\n";
/*
* Recursive function that reads the translation tables passed as an argument
* Function that reads the translation tables passed as an argument
* and prints their status.
*/
static void xlat_tables_print_internal(xlat_ctx_t *ctx, uintptr_t table_base_va,
@ -118,10 +118,23 @@ static void xlat_tables_print_internal(xlat_ctx_t *ctx, uintptr_t table_base_va,
{
assert(level <= XLAT_TABLE_LEVEL_MAX);
uint64_t desc;
uintptr_t table_idx_va = table_base_va;
/*
* data structure to track DESC_TABLE entry before iterate into subtable
* of next translation level. it will be restored after return from
* subtable iteration.
*/
struct desc_table {
const uint64_t *table_base;
uintptr_t table_idx_va;
unsigned int idx;
} desc_tables[XLAT_TABLE_LEVEL_MAX + 1] = {
{NULL, 0U, XLAT_TABLE_ENTRIES}, };
unsigned int this_level = level;
const uint64_t *this_base = table_base;
unsigned int max_entries = table_entries;
size_t level_size = XLAT_BLOCK_SIZE(this_level);
unsigned int table_idx = 0U;
size_t level_size = XLAT_BLOCK_SIZE(level);
uintptr_t table_idx_va = table_base_va;
/*
* Keep track of how many invalid descriptors are counted in a row.
@ -131,67 +144,110 @@ static void xlat_tables_print_internal(xlat_ctx_t *ctx, uintptr_t table_base_va,
*/
int invalid_row_count = 0;
while (table_idx < table_entries) {
desc = table_base[table_idx];
if ((desc & DESC_MASK) == INVALID_DESC) {
if (invalid_row_count == 0) {
printf("%sVA:0x%lx size:0x%zx\n",
level_spacers[level],
table_idx_va, level_size);
}
invalid_row_count++;
} else {
while (this_base != NULL) {
/* finish current xlat level */
if (table_idx >= max_entries) {
if (invalid_row_count > 1) {
printf(invalid_descriptors_ommited,
level_spacers[level],
invalid_row_count - 1);
level_spacers[this_level],
invalid_row_count - 1);
}
invalid_row_count = 0;
/*
* Check if this is a table or a block. Tables are only
* allowed in levels other than 3, but DESC_PAGE has the
* same value as DESC_TABLE, so we need to check.
*/
if (((desc & DESC_MASK) == TABLE_DESC) &&
(level < XLAT_TABLE_LEVEL_MAX)) {
/*
* Do not print any PA for a table descriptor,
* as it doesn't directly map physical memory
* but instead points to the next translation
* table in the translation table walk.
*/
printf("%sVA:0x%lx size:0x%zx\n",
level_spacers[level],
table_idx_va, level_size);
uintptr_t addr_inner = desc & TABLE_ADDR_MASK;
xlat_tables_print_internal(ctx, table_idx_va,
(uint64_t *)addr_inner,
XLAT_TABLE_ENTRIES, level + 1U);
/* no parent level to iterate. */
if (this_level <= level) {
this_base = NULL;
table_idx = max_entries + 1;
} else {
printf("%sVA:0x%lx PA:0x%llx size:0x%zx ",
level_spacers[level], table_idx_va,
(uint64_t)(desc & TABLE_ADDR_MASK),
level_size);
xlat_desc_print(ctx, desc);
printf("\n");
/* retore previous DESC_TABLE entry and start
* to iterate.
*/
this_level--;
level_size = XLAT_BLOCK_SIZE(this_level);
this_base = desc_tables[this_level].table_base;
table_idx = desc_tables[this_level].idx;
table_idx_va =
desc_tables[this_level].table_idx_va;
if (this_level == level) {
max_entries = table_entries;
} else {
max_entries = XLAT_TABLE_ENTRIES;
}
assert(this_base != NULL);
}
} else {
uint64_t desc = this_base[table_idx];
if ((desc & DESC_MASK) == INVALID_DESC) {
if (invalid_row_count == 0) {
printf("%sVA:0x%lx size:0x%zx\n",
level_spacers[this_level],
table_idx_va, level_size);
}
invalid_row_count++;
table_idx++;
table_idx_va += level_size;
} else {
if (invalid_row_count > 1) {
printf(invalid_descriptors_ommited,
level_spacers[this_level],
invalid_row_count - 1);
}
invalid_row_count = 0;
/*
* Check if this is a table or a block. Tables
* are only allowed in levels other than 3, but
* DESC_PAGE has the same value as DESC_TABLE,
* so we need to check.
*/
if (((desc & DESC_MASK) == TABLE_DESC) &&
(this_level < XLAT_TABLE_LEVEL_MAX)) {
uintptr_t addr_inner;
/*
* Do not print any PA for a table
* descriptor, as it doesn't directly
* map physical memory but instead
* points to the next translation
* table in the translation table walk.
*/
printf("%sVA:0x%lx size:0x%zx\n",
level_spacers[this_level],
table_idx_va, level_size);
addr_inner = desc & TABLE_ADDR_MASK;
/* save current xlat level */
desc_tables[this_level].table_base =
this_base;
desc_tables[this_level].idx =
table_idx + 1;
desc_tables[this_level].table_idx_va =
table_idx_va + level_size;
/* start iterating next level entries */
this_base = (uint64_t *)addr_inner;
max_entries = XLAT_TABLE_ENTRIES;
this_level++;
level_size =
XLAT_BLOCK_SIZE(this_level);
table_idx = 0U;
} else {
printf("%sVA:0x%lx PA:0x%llx size:0x%zx ",
level_spacers[this_level],
table_idx_va,
(uint64_t)(desc & TABLE_ADDR_MASK),
level_size);
xlat_desc_print(ctx, desc);
printf("\n");
table_idx++;
table_idx_va += level_size;
}
}
}
table_idx++;
table_idx_va += level_size;
}
if (invalid_row_count > 1) {
printf(invalid_descriptors_ommited,
level_spacers[level], invalid_row_count - 1);
}
}