I've just committed the attached patch to correct the i and d cache line
size calculation used in sync_cache_range() for AArch64.
/Marcus
2012-09-03 Marcus Shawcroft <marcus.shawcr...@arm.com>
* config/aarch64/sync-cache.c (__aarch64_sync_cache_range): Lift
declarations to top of function. Update comment. Correct
icache_linesize and dcache_linesize calculation.
diff --git a/libgcc/config/aarch64/sync-cache.c b/libgcc/config/aarch64/sync-cache.c
index 089439d..1636b94 100644
--- a/libgcc/config/aarch64/sync-cache.c
+++ b/libgcc/config/aarch64/sync-cache.c
@@ -22,20 +22,22 @@ void
__aarch64_sync_cache_range (const void *base, const void *end)
{
unsigned int cache_info = 0;
+ unsigned int icache_lsize;
+ unsigned int dcache_lsize;
+ const char *address;
- /* CTR_EL0 is the same as AArch32's CTR which contains log2 of the
- icache size in [3:0], and log2 of the dcache line in [19:16]. */
+ /* CTR_EL0 [3:0] contains log2 of icache line size in words.
+ CTR_EL0 [19:16] contains log2 of dcache line size in words. */
asm volatile ("mrs\t%0, ctr_el0":"=r" (cache_info));
- unsigned int icache_lsize = 1 << (cache_info & 0xF);
- unsigned int dcache_lsize = 1 << ((cache_info >> 16) & 0xF);
+ icache_lsize = 4 << (cache_info & 0xF);
+ dcache_lsize = 4 << ((cache_info >> 16) & 0xF);
/* Loop over the address range, clearing one cache line at once.
Data cache must be flushed to unification first to make sure the
instruction cache fetches the updated data. 'end' is exclusive,
as per the GNU definition of __clear_cache. */
- const char *address;
for (address = base; address < (const char *) end; address += dcache_lsize)
asm volatile ("dc\tcvau, %0"
:
--
1.7.12.rc0.22.gcdd159b