Merge branch '2021-06-07-arm-cache-cp15-improvements' into next
To quote the series author, Patrick Delaunay:
On STM32MP15x platform we can use OP-TEE, loaded in DDR in a region
protected by a firewall. This region is reserved in the device with
the "no-map" property as defined in the binding file
doc/device-tree-bindings/reserved-memory/reserved-memory.txt.
Sometime the platform boot failed in U-Boot on a Cortex A7 access to
this region (depending of the binary and the issue can change with compiler
version or with code alignment), then the firewall raise an error,
for example:
E/TC:0   tzc_it_handler:19 TZC permission failure
E/TC:0   dump_fail_filter:420 Permission violation on filter 0
E/TC:0   dump_fail_filter:425 Violation @0xde5c6bf0, non-secure privileged read,
         AXI ID 5c0
E/TC:0   Panic
After investigation, the forbidden access is a speculative request performed
by the Cortex A7 because all the DDR is mapped as MEMORY with CACHEABLE
property.
The issue is solved only when the region reserved by OP-TEE is no more
mapped in U-Boot as it is already done in Linux kernel.
Tested on DK2 board with OP-TEE 3.12 / TF-A 2.4:
With hard-coded address for OP-TEE reserved memory,
the error doesn't occur.
 void dram_bank_mmu_setup(int bank)
 {
 ....
    	for (i = start >> MMU_SECTION_SHIFT;
 	     i < (start >> MMU_SECTION_SHIFT) + (size >> MMU_SECTION_SHIFT);
 	     i++) {
 		option = DCACHE_DEFAULT_OPTION;
 		if (i >= 0xde0)
 			option = INVALID_ENTRY;
 		set_section_dcache(i, option);
 	}
 }
Just by modifying the test on 0xde0 to 0xdf0, the OP-TEE memory protected
by firewall is mapped cacheable and the error occurs.
I think that it can be a general issue for ARM architecture: the "no-map" tag
of reserved memory in device should be respected by U-Boot if firewall
is configured before U-Boot execution.
But I don't propose a generic solution in
arm/lib/cache-cp15.c:dram_bank_mmu_setup()
because the device tree parsing done in lmb_init_and_reserve() takes a
long time when it is executed without data cache.
			
			
This commit is contained in:
		
						commit
						24e1e8841c
					
				| 
						 | 
					@ -12,6 +12,7 @@
 | 
				
			||||||
#include <env.h>
 | 
					#include <env.h>
 | 
				
			||||||
#include <init.h>
 | 
					#include <init.h>
 | 
				
			||||||
#include <log.h>
 | 
					#include <log.h>
 | 
				
			||||||
 | 
					#include <lmb.h>
 | 
				
			||||||
#include <misc.h>
 | 
					#include <misc.h>
 | 
				
			||||||
#include <net.h>
 | 
					#include <net.h>
 | 
				
			||||||
#include <asm/io.h>
 | 
					#include <asm/io.h>
 | 
				
			||||||
| 
						 | 
					@ -90,6 +91,8 @@
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
u8 early_tlb[PGTABLE_SIZE] __section(".data") __aligned(0x4000);
 | 
					u8 early_tlb[PGTABLE_SIZE] __section(".data") __aligned(0x4000);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					struct lmb lmb;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#if !defined(CONFIG_SPL) || defined(CONFIG_SPL_BUILD)
 | 
					#if !defined(CONFIG_SPL) || defined(CONFIG_SPL_BUILD)
 | 
				
			||||||
#ifndef CONFIG_TFABOOT
 | 
					#ifndef CONFIG_TFABOOT
 | 
				
			||||||
static void security_init(void)
 | 
					static void security_init(void)
 | 
				
			||||||
| 
						 | 
					@ -221,6 +224,8 @@ void dram_bank_mmu_setup(int bank)
 | 
				
			||||||
	int	i;
 | 
						int	i;
 | 
				
			||||||
	phys_addr_t start;
 | 
						phys_addr_t start;
 | 
				
			||||||
	phys_size_t size;
 | 
						phys_size_t size;
 | 
				
			||||||
 | 
						bool use_lmb = false;
 | 
				
			||||||
 | 
						enum dcache_option option;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (IS_ENABLED(CONFIG_SPL_BUILD)) {
 | 
						if (IS_ENABLED(CONFIG_SPL_BUILD)) {
 | 
				
			||||||
		start = ALIGN_DOWN(STM32_SYSRAM_BASE, MMU_SECTION_SIZE);
 | 
							start = ALIGN_DOWN(STM32_SYSRAM_BASE, MMU_SECTION_SIZE);
 | 
				
			||||||
| 
						 | 
					@ -229,6 +234,7 @@ void dram_bank_mmu_setup(int bank)
 | 
				
			||||||
		/* bd->bi_dram is available only after relocation */
 | 
							/* bd->bi_dram is available only after relocation */
 | 
				
			||||||
		start = bd->bi_dram[bank].start;
 | 
							start = bd->bi_dram[bank].start;
 | 
				
			||||||
		size =  bd->bi_dram[bank].size;
 | 
							size =  bd->bi_dram[bank].size;
 | 
				
			||||||
 | 
							use_lmb = true;
 | 
				
			||||||
	} else {
 | 
						} else {
 | 
				
			||||||
		/* mark cacheable and executable the beggining of the DDR */
 | 
							/* mark cacheable and executable the beggining of the DDR */
 | 
				
			||||||
		start = STM32_DDR_BASE;
 | 
							start = STM32_DDR_BASE;
 | 
				
			||||||
| 
						 | 
					@ -237,8 +243,12 @@ void dram_bank_mmu_setup(int bank)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for (i = start >> MMU_SECTION_SHIFT;
 | 
						for (i = start >> MMU_SECTION_SHIFT;
 | 
				
			||||||
	     i < (start >> MMU_SECTION_SHIFT) + (size >> MMU_SECTION_SHIFT);
 | 
						     i < (start >> MMU_SECTION_SHIFT) + (size >> MMU_SECTION_SHIFT);
 | 
				
			||||||
	     i++)
 | 
						     i++) {
 | 
				
			||||||
		set_section_dcache(i, DCACHE_DEFAULT_OPTION);
 | 
							option = DCACHE_DEFAULT_OPTION;
 | 
				
			||||||
 | 
							if (use_lmb && lmb_is_reserved_flags(&lmb, i << MMU_SECTION_SHIFT, LMB_NOMAP))
 | 
				
			||||||
 | 
								option = 0; /* INVALID ENTRY in TLB */
 | 
				
			||||||
 | 
							set_section_dcache(i, option);
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * initialize the MMU and activate cache in SPL or in U-Boot pre-reloc stage
 | 
					 * initialize the MMU and activate cache in SPL or in U-Boot pre-reloc stage
 | 
				
			||||||
| 
						 | 
					@ -302,6 +312,9 @@ int arch_cpu_init(void)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void enable_caches(void)
 | 
					void enable_caches(void)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
 | 
						/* parse device tree when data cache is still activated */
 | 
				
			||||||
 | 
						lmb_init_and_reserve(&lmb, gd->bd, (void *)gd->fdt_blob);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* I-cache is already enabled in start.S: icache_enable() not needed */
 | 
						/* I-cache is already enabled in start.S: icache_enable() not needed */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* deactivate the data cache, early enabled in arch_cpu_init() */
 | 
						/* deactivate the data cache, early enabled in arch_cpu_init() */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -50,13 +50,16 @@ ulong board_get_usable_ram_top(ulong total_size)
 | 
				
			||||||
	lmb_init(&lmb);
 | 
						lmb_init(&lmb);
 | 
				
			||||||
	lmb_add(&lmb, gd->ram_base, gd->ram_size);
 | 
						lmb_add(&lmb, gd->ram_base, gd->ram_size);
 | 
				
			||||||
	boot_fdt_add_mem_rsv_regions(&lmb, (void *)gd->fdt_blob);
 | 
						boot_fdt_add_mem_rsv_regions(&lmb, (void *)gd->fdt_blob);
 | 
				
			||||||
	size = ALIGN(CONFIG_SYS_MALLOC_LEN + total_size, MMU_SECTION_SIZE),
 | 
						/* add 8M for reserved memory for display, fdt, gd,... */
 | 
				
			||||||
 | 
						size = ALIGN(SZ_8M + CONFIG_SYS_MALLOC_LEN + total_size, MMU_SECTION_SIZE),
 | 
				
			||||||
	reg = lmb_alloc(&lmb, size, MMU_SECTION_SIZE);
 | 
						reg = lmb_alloc(&lmb, size, MMU_SECTION_SIZE);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!reg)
 | 
						if (!reg)
 | 
				
			||||||
		reg = gd->ram_top - size;
 | 
							reg = gd->ram_top - size;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	mmu_set_region_dcache_behaviour(reg, size, DCACHE_DEFAULT_OPTION);
 | 
						/* before relocation, mark the U-Boot memory as cacheable by default */
 | 
				
			||||||
 | 
						if (!(gd->flags & GD_FLG_RELOC))
 | 
				
			||||||
 | 
							mmu_set_region_dcache_behaviour(reg, size, DCACHE_DEFAULT_OPTION);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return reg + size;
 | 
						return reg + size;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -75,18 +75,20 @@ static const image_header_t *image_get_fdt(ulong fdt_addr)
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void boot_fdt_reserve_region(struct lmb *lmb, uint64_t addr,
 | 
					static void boot_fdt_reserve_region(struct lmb *lmb, uint64_t addr,
 | 
				
			||||||
				    uint64_t size)
 | 
									    uint64_t size, enum lmb_flags flags)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	long ret;
 | 
						long ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ret = lmb_reserve(lmb, addr, size);
 | 
						ret = lmb_reserve_flags(lmb, addr, size, flags);
 | 
				
			||||||
	if (ret >= 0) {
 | 
						if (ret >= 0) {
 | 
				
			||||||
		debug("   reserving fdt memory region: addr=%llx size=%llx\n",
 | 
							debug("   reserving fdt memory region: addr=%llx size=%llx flags=%x\n",
 | 
				
			||||||
		      (unsigned long long)addr, (unsigned long long)size);
 | 
							      (unsigned long long)addr,
 | 
				
			||||||
 | 
							      (unsigned long long)size, flags);
 | 
				
			||||||
	} else {
 | 
						} else {
 | 
				
			||||||
		puts("ERROR: reserving fdt memory region failed ");
 | 
							puts("ERROR: reserving fdt memory region failed ");
 | 
				
			||||||
		printf("(addr=%llx size=%llx)\n",
 | 
							printf("(addr=%llx size=%llx flags=%x)\n",
 | 
				
			||||||
		       (unsigned long long)addr, (unsigned long long)size);
 | 
							       (unsigned long long)addr,
 | 
				
			||||||
 | 
							       (unsigned long long)size, flags);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -106,6 +108,7 @@ void boot_fdt_add_mem_rsv_regions(struct lmb *lmb, void *fdt_blob)
 | 
				
			||||||
	int i, total, ret;
 | 
						int i, total, ret;
 | 
				
			||||||
	int nodeoffset, subnode;
 | 
						int nodeoffset, subnode;
 | 
				
			||||||
	struct fdt_resource res;
 | 
						struct fdt_resource res;
 | 
				
			||||||
 | 
						enum lmb_flags flags;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (fdt_check_header(fdt_blob) != 0)
 | 
						if (fdt_check_header(fdt_blob) != 0)
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
| 
						 | 
					@ -115,7 +118,7 @@ void boot_fdt_add_mem_rsv_regions(struct lmb *lmb, void *fdt_blob)
 | 
				
			||||||
	for (i = 0; i < total; i++) {
 | 
						for (i = 0; i < total; i++) {
 | 
				
			||||||
		if (fdt_get_mem_rsv(fdt_blob, i, &addr, &size) != 0)
 | 
							if (fdt_get_mem_rsv(fdt_blob, i, &addr, &size) != 0)
 | 
				
			||||||
			continue;
 | 
								continue;
 | 
				
			||||||
		boot_fdt_reserve_region(lmb, addr, size);
 | 
							boot_fdt_reserve_region(lmb, addr, size, LMB_NONE);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* process reserved-memory */
 | 
						/* process reserved-memory */
 | 
				
			||||||
| 
						 | 
					@ -127,9 +130,13 @@ void boot_fdt_add_mem_rsv_regions(struct lmb *lmb, void *fdt_blob)
 | 
				
			||||||
			ret = fdt_get_resource(fdt_blob, subnode, "reg", 0,
 | 
								ret = fdt_get_resource(fdt_blob, subnode, "reg", 0,
 | 
				
			||||||
					       &res);
 | 
										       &res);
 | 
				
			||||||
			if (!ret && fdtdec_get_is_enabled(fdt_blob, subnode)) {
 | 
								if (!ret && fdtdec_get_is_enabled(fdt_blob, subnode)) {
 | 
				
			||||||
 | 
									flags = LMB_NONE;
 | 
				
			||||||
 | 
									if (fdtdec_get_bool(fdt_blob, subnode,
 | 
				
			||||||
 | 
											    "no-map"))
 | 
				
			||||||
 | 
										flags = LMB_NOMAP;
 | 
				
			||||||
				addr = res.start;
 | 
									addr = res.start;
 | 
				
			||||||
				size = res.end - res.start + 1;
 | 
									size = res.end - res.start + 1;
 | 
				
			||||||
				boot_fdt_reserve_region(lmb, addr, size);
 | 
									boot_fdt_reserve_region(lmb, addr, size, flags);
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			subnode = fdt_next_subnode(fdt_blob, subnode);
 | 
								subnode = fdt_next_subnode(fdt_blob, subnode);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -12,6 +12,16 @@
 | 
				
			||||||
 * Copyright (C) 2001 Peter Bergner, IBM Corp.
 | 
					 * Copyright (C) 2001 Peter Bergner, IBM Corp.
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/**
 | 
				
			||||||
 | 
					 * enum lmb_flags - definition of memory region attributes
 | 
				
			||||||
 | 
					 * @LMB_NONE: no special request
 | 
				
			||||||
 | 
					 * @LMB_NOMAP: don't add to mmu configuration
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					enum lmb_flags {
 | 
				
			||||||
 | 
						LMB_NONE		= 0x0,
 | 
				
			||||||
 | 
						LMB_NOMAP		= 0x4,
 | 
				
			||||||
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/**
 | 
					/**
 | 
				
			||||||
 * struct lmb_property - Description of one region.
 | 
					 * struct lmb_property - Description of one region.
 | 
				
			||||||
 *
 | 
					 *
 | 
				
			||||||
| 
						 | 
					@ -21,6 +31,7 @@
 | 
				
			||||||
struct lmb_property {
 | 
					struct lmb_property {
 | 
				
			||||||
	phys_addr_t base;
 | 
						phys_addr_t base;
 | 
				
			||||||
	phys_size_t size;
 | 
						phys_size_t size;
 | 
				
			||||||
 | 
						enum lmb_flags flags;
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/**
 | 
					/**
 | 
				
			||||||
| 
						 | 
					@ -69,6 +80,17 @@ extern void lmb_init_and_reserve_range(struct lmb *lmb, phys_addr_t base,
 | 
				
			||||||
				       phys_size_t size, void *fdt_blob);
 | 
									       phys_size_t size, void *fdt_blob);
 | 
				
			||||||
extern long lmb_add(struct lmb *lmb, phys_addr_t base, phys_size_t size);
 | 
					extern long lmb_add(struct lmb *lmb, phys_addr_t base, phys_size_t size);
 | 
				
			||||||
extern long lmb_reserve(struct lmb *lmb, phys_addr_t base, phys_size_t size);
 | 
					extern long lmb_reserve(struct lmb *lmb, phys_addr_t base, phys_size_t size);
 | 
				
			||||||
 | 
					/**
 | 
				
			||||||
 | 
					 * lmb_reserve_flags - Reserve one region with a specific flags bitfield.
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * @lmb		the logical memory block struct
 | 
				
			||||||
 | 
					 * @base	base address of the memory region
 | 
				
			||||||
 | 
					 * @size	size of the memory region
 | 
				
			||||||
 | 
					 * @flags	flags for the memory region
 | 
				
			||||||
 | 
					 * @return 0 if OK, > 0 for coalesced region or a negative error code.
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					long lmb_reserve_flags(struct lmb *lmb, phys_addr_t base,
 | 
				
			||||||
 | 
							       phys_size_t size, enum lmb_flags flags);
 | 
				
			||||||
extern phys_addr_t lmb_alloc(struct lmb *lmb, phys_size_t size, ulong align);
 | 
					extern phys_addr_t lmb_alloc(struct lmb *lmb, phys_size_t size, ulong align);
 | 
				
			||||||
extern phys_addr_t lmb_alloc_base(struct lmb *lmb, phys_size_t size, ulong align,
 | 
					extern phys_addr_t lmb_alloc_base(struct lmb *lmb, phys_size_t size, ulong align,
 | 
				
			||||||
			    phys_addr_t max_addr);
 | 
								    phys_addr_t max_addr);
 | 
				
			||||||
| 
						 | 
					@ -78,6 +100,15 @@ extern phys_addr_t lmb_alloc_addr(struct lmb *lmb, phys_addr_t base,
 | 
				
			||||||
				  phys_size_t size);
 | 
									  phys_size_t size);
 | 
				
			||||||
extern phys_size_t lmb_get_free_size(struct lmb *lmb, phys_addr_t addr);
 | 
					extern phys_size_t lmb_get_free_size(struct lmb *lmb, phys_addr_t addr);
 | 
				
			||||||
extern int lmb_is_reserved(struct lmb *lmb, phys_addr_t addr);
 | 
					extern int lmb_is_reserved(struct lmb *lmb, phys_addr_t addr);
 | 
				
			||||||
 | 
					/**
 | 
				
			||||||
 | 
					 * lmb_is_reserved_flags - test if tha address is in reserved region with a bitfield flag
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * @lmb		the logical memory block struct
 | 
				
			||||||
 | 
					 * @addr	address to be tested
 | 
				
			||||||
 | 
					 * @flags	flags bitfied to be tested
 | 
				
			||||||
 | 
					 * @return 0 if not reserved or reserved without the requested flag else 1
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					int lmb_is_reserved_flags(struct lmb *lmb, phys_addr_t addr, int flags);
 | 
				
			||||||
extern long lmb_free(struct lmb *lmb, phys_addr_t base, phys_size_t size);
 | 
					extern long lmb_free(struct lmb *lmb, phys_addr_t base, phys_size_t size);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
extern void lmb_dump_all(struct lmb *lmb);
 | 
					extern void lmb_dump_all(struct lmb *lmb);
 | 
				
			||||||
| 
						 | 
					@ -92,6 +123,13 @@ lmb_size_bytes(struct lmb_region *type, unsigned long region_nr)
 | 
				
			||||||
void board_lmb_reserve(struct lmb *lmb);
 | 
					void board_lmb_reserve(struct lmb *lmb);
 | 
				
			||||||
void arch_lmb_reserve(struct lmb *lmb);
 | 
					void arch_lmb_reserve(struct lmb *lmb);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/* Low level functions */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static inline bool lmb_is_nomap(struct lmb_property *m)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						return m->flags & LMB_NOMAP;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#endif /* __KERNEL__ */
 | 
					#endif /* __KERNEL__ */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#endif /* _LINUX_LMB_H */
 | 
					#endif /* _LINUX_LMB_H */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
							
								
								
									
										97
									
								
								lib/lmb.c
								
								
								
								
							
							
						
						
									
										97
									
								
								lib/lmb.c
								
								
								
								
							| 
						 | 
					@ -14,26 +14,30 @@
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#define LMB_ALLOC_ANYWHERE	0
 | 
					#define LMB_ALLOC_ANYWHERE	0
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static void lmb_dump_region(struct lmb_region *rgn, char *name)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						unsigned long long base, size, end;
 | 
				
			||||||
 | 
						enum lmb_flags flags;
 | 
				
			||||||
 | 
						int i;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						printf(" %s.cnt  = 0x%lx\n", name, rgn->cnt);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						for (i = 0; i < rgn->cnt; i++) {
 | 
				
			||||||
 | 
							base = rgn->region[i].base;
 | 
				
			||||||
 | 
							size = rgn->region[i].size;
 | 
				
			||||||
 | 
							end = base + size - 1;
 | 
				
			||||||
 | 
							flags = rgn->region[i].flags;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							printf(" %s[%d]\t[0x%llx-0x%llx], 0x%08llx bytes flags: %x\n",
 | 
				
			||||||
 | 
							       name, i, base, end, size, flags);
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void lmb_dump_all_force(struct lmb *lmb)
 | 
					void lmb_dump_all_force(struct lmb *lmb)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	unsigned long i;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	printf("lmb_dump_all:\n");
 | 
						printf("lmb_dump_all:\n");
 | 
				
			||||||
	printf("    memory.cnt		   = 0x%lx\n", lmb->memory.cnt);
 | 
						lmb_dump_region(&lmb->memory, "memory");
 | 
				
			||||||
	for (i = 0; i < lmb->memory.cnt; i++) {
 | 
						lmb_dump_region(&lmb->reserved, "reserved");
 | 
				
			||||||
		printf("    memory.reg[0x%lx].base   = 0x%llx\n", i,
 | 
					 | 
				
			||||||
		       (unsigned long long)lmb->memory.region[i].base);
 | 
					 | 
				
			||||||
		printf("		   .size   = 0x%llx\n",
 | 
					 | 
				
			||||||
		       (unsigned long long)lmb->memory.region[i].size);
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	printf("\n    reserved.cnt	   = 0x%lx\n", lmb->reserved.cnt);
 | 
					 | 
				
			||||||
	for (i = 0; i < lmb->reserved.cnt; i++) {
 | 
					 | 
				
			||||||
		printf("    reserved.reg[0x%lx].base = 0x%llx\n", i,
 | 
					 | 
				
			||||||
		       (unsigned long long)lmb->reserved.region[i].base);
 | 
					 | 
				
			||||||
		printf("		     .size = 0x%llx\n",
 | 
					 | 
				
			||||||
		       (unsigned long long)lmb->reserved.region[i].size);
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void lmb_dump_all(struct lmb *lmb)
 | 
					void lmb_dump_all(struct lmb *lmb)
 | 
				
			||||||
| 
						 | 
					@ -81,6 +85,7 @@ static void lmb_remove_region(struct lmb_region *rgn, unsigned long r)
 | 
				
			||||||
	for (i = r; i < rgn->cnt - 1; i++) {
 | 
						for (i = r; i < rgn->cnt - 1; i++) {
 | 
				
			||||||
		rgn->region[i].base = rgn->region[i + 1].base;
 | 
							rgn->region[i].base = rgn->region[i + 1].base;
 | 
				
			||||||
		rgn->region[i].size = rgn->region[i + 1].size;
 | 
							rgn->region[i].size = rgn->region[i + 1].size;
 | 
				
			||||||
 | 
							rgn->region[i].flags = rgn->region[i + 1].flags;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	rgn->cnt--;
 | 
						rgn->cnt--;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -144,7 +149,8 @@ void lmb_init_and_reserve_range(struct lmb *lmb, phys_addr_t base,
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/* This routine called with relocation disabled. */
 | 
					/* This routine called with relocation disabled. */
 | 
				
			||||||
static long lmb_add_region(struct lmb_region *rgn, phys_addr_t base, phys_size_t size)
 | 
					static long lmb_add_region_flags(struct lmb_region *rgn, phys_addr_t base,
 | 
				
			||||||
 | 
									 phys_size_t size, enum lmb_flags flags)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	unsigned long coalesced = 0;
 | 
						unsigned long coalesced = 0;
 | 
				
			||||||
	long adjacent, i;
 | 
						long adjacent, i;
 | 
				
			||||||
| 
						 | 
					@ -152,6 +158,7 @@ static long lmb_add_region(struct lmb_region *rgn, phys_addr_t base, phys_size_t
 | 
				
			||||||
	if (rgn->cnt == 0) {
 | 
						if (rgn->cnt == 0) {
 | 
				
			||||||
		rgn->region[0].base = base;
 | 
							rgn->region[0].base = base;
 | 
				
			||||||
		rgn->region[0].size = size;
 | 
							rgn->region[0].size = size;
 | 
				
			||||||
 | 
							rgn->region[0].flags = flags;
 | 
				
			||||||
		rgn->cnt = 1;
 | 
							rgn->cnt = 1;
 | 
				
			||||||
		return 0;
 | 
							return 0;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
| 
						 | 
					@ -160,18 +167,27 @@ static long lmb_add_region(struct lmb_region *rgn, phys_addr_t base, phys_size_t
 | 
				
			||||||
	for (i = 0; i < rgn->cnt; i++) {
 | 
						for (i = 0; i < rgn->cnt; i++) {
 | 
				
			||||||
		phys_addr_t rgnbase = rgn->region[i].base;
 | 
							phys_addr_t rgnbase = rgn->region[i].base;
 | 
				
			||||||
		phys_size_t rgnsize = rgn->region[i].size;
 | 
							phys_size_t rgnsize = rgn->region[i].size;
 | 
				
			||||||
 | 
							phys_size_t rgnflags = rgn->region[i].flags;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if ((rgnbase == base) && (rgnsize == size))
 | 
							if (rgnbase == base && rgnsize == size) {
 | 
				
			||||||
			/* Already have this region, so we're done */
 | 
								if (flags == rgnflags)
 | 
				
			||||||
			return 0;
 | 
									/* Already have this region, so we're done */
 | 
				
			||||||
 | 
									return 0;
 | 
				
			||||||
 | 
								else
 | 
				
			||||||
 | 
									return -1; /* regions with new flags */
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		adjacent = lmb_addrs_adjacent(base, size, rgnbase, rgnsize);
 | 
							adjacent = lmb_addrs_adjacent(base, size, rgnbase, rgnsize);
 | 
				
			||||||
		if (adjacent > 0) {
 | 
							if (adjacent > 0) {
 | 
				
			||||||
 | 
								if (flags != rgnflags)
 | 
				
			||||||
 | 
									break;
 | 
				
			||||||
			rgn->region[i].base -= size;
 | 
								rgn->region[i].base -= size;
 | 
				
			||||||
			rgn->region[i].size += size;
 | 
								rgn->region[i].size += size;
 | 
				
			||||||
			coalesced++;
 | 
								coalesced++;
 | 
				
			||||||
			break;
 | 
								break;
 | 
				
			||||||
		} else if (adjacent < 0) {
 | 
							} else if (adjacent < 0) {
 | 
				
			||||||
 | 
								if (flags != rgnflags)
 | 
				
			||||||
 | 
									break;
 | 
				
			||||||
			rgn->region[i].size += size;
 | 
								rgn->region[i].size += size;
 | 
				
			||||||
			coalesced++;
 | 
								coalesced++;
 | 
				
			||||||
			break;
 | 
								break;
 | 
				
			||||||
| 
						 | 
					@ -182,8 +198,10 @@ static long lmb_add_region(struct lmb_region *rgn, phys_addr_t base, phys_size_t
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if ((i < rgn->cnt - 1) && lmb_regions_adjacent(rgn, i, i + 1)) {
 | 
						if ((i < rgn->cnt - 1) && lmb_regions_adjacent(rgn, i, i + 1)) {
 | 
				
			||||||
		lmb_coalesce_regions(rgn, i, i + 1);
 | 
							if (rgn->region[i].flags == rgn->region[i + 1].flags) {
 | 
				
			||||||
		coalesced++;
 | 
								lmb_coalesce_regions(rgn, i, i + 1);
 | 
				
			||||||
 | 
								coalesced++;
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (coalesced)
 | 
						if (coalesced)
 | 
				
			||||||
| 
						 | 
					@ -196,9 +214,11 @@ static long lmb_add_region(struct lmb_region *rgn, phys_addr_t base, phys_size_t
 | 
				
			||||||
		if (base < rgn->region[i].base) {
 | 
							if (base < rgn->region[i].base) {
 | 
				
			||||||
			rgn->region[i + 1].base = rgn->region[i].base;
 | 
								rgn->region[i + 1].base = rgn->region[i].base;
 | 
				
			||||||
			rgn->region[i + 1].size = rgn->region[i].size;
 | 
								rgn->region[i + 1].size = rgn->region[i].size;
 | 
				
			||||||
 | 
								rgn->region[i + 1].flags = rgn->region[i].flags;
 | 
				
			||||||
		} else {
 | 
							} else {
 | 
				
			||||||
			rgn->region[i + 1].base = base;
 | 
								rgn->region[i + 1].base = base;
 | 
				
			||||||
			rgn->region[i + 1].size = size;
 | 
								rgn->region[i + 1].size = size;
 | 
				
			||||||
 | 
								rgn->region[i + 1].flags = flags;
 | 
				
			||||||
			break;
 | 
								break;
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
| 
						 | 
					@ -206,6 +226,7 @@ static long lmb_add_region(struct lmb_region *rgn, phys_addr_t base, phys_size_t
 | 
				
			||||||
	if (base < rgn->region[0].base) {
 | 
						if (base < rgn->region[0].base) {
 | 
				
			||||||
		rgn->region[0].base = base;
 | 
							rgn->region[0].base = base;
 | 
				
			||||||
		rgn->region[0].size = size;
 | 
							rgn->region[0].size = size;
 | 
				
			||||||
 | 
							rgn->region[0].flags = flags;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	rgn->cnt++;
 | 
						rgn->cnt++;
 | 
				
			||||||
| 
						 | 
					@ -213,6 +234,12 @@ static long lmb_add_region(struct lmb_region *rgn, phys_addr_t base, phys_size_t
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static long lmb_add_region(struct lmb_region *rgn, phys_addr_t base,
 | 
				
			||||||
 | 
								   phys_size_t size)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						return lmb_add_region_flags(rgn, base, size, LMB_NONE);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/* This routine may be called with relocation disabled. */
 | 
					/* This routine may be called with relocation disabled. */
 | 
				
			||||||
long lmb_add(struct lmb *lmb, phys_addr_t base, phys_size_t size)
 | 
					long lmb_add(struct lmb *lmb, phys_addr_t base, phys_size_t size)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
| 
						 | 
					@ -267,14 +294,21 @@ long lmb_free(struct lmb *lmb, phys_addr_t base, phys_size_t size)
 | 
				
			||||||
	 * beginging of the hole and add the region after hole.
 | 
						 * beginging of the hole and add the region after hole.
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	rgn->region[i].size = base - rgn->region[i].base;
 | 
						rgn->region[i].size = base - rgn->region[i].base;
 | 
				
			||||||
	return lmb_add_region(rgn, end + 1, rgnend - end);
 | 
						return lmb_add_region_flags(rgn, end + 1, rgnend - end,
 | 
				
			||||||
 | 
									    rgn->region[i].flags);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					long lmb_reserve_flags(struct lmb *lmb, phys_addr_t base, phys_size_t size,
 | 
				
			||||||
 | 
							       enum lmb_flags flags)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct lmb_region *_rgn = &(lmb->reserved);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return lmb_add_region_flags(_rgn, base, size, flags);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
long lmb_reserve(struct lmb *lmb, phys_addr_t base, phys_size_t size)
 | 
					long lmb_reserve(struct lmb *lmb, phys_addr_t base, phys_size_t size)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct lmb_region *_rgn = &(lmb->reserved);
 | 
						return lmb_reserve_flags(lmb, base, size, LMB_NONE);
 | 
				
			||||||
 | 
					 | 
				
			||||||
	return lmb_add_region(_rgn, base, size);
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static long lmb_overlaps_region(struct lmb_region *rgn, phys_addr_t base,
 | 
					static long lmb_overlaps_region(struct lmb_region *rgn, phys_addr_t base,
 | 
				
			||||||
| 
						 | 
					@ -409,7 +443,7 @@ phys_size_t lmb_get_free_size(struct lmb *lmb, phys_addr_t addr)
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
int lmb_is_reserved(struct lmb *lmb, phys_addr_t addr)
 | 
					int lmb_is_reserved_flags(struct lmb *lmb, phys_addr_t addr, int flags)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	int i;
 | 
						int i;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -417,11 +451,16 @@ int lmb_is_reserved(struct lmb *lmb, phys_addr_t addr)
 | 
				
			||||||
		phys_addr_t upper = lmb->reserved.region[i].base +
 | 
							phys_addr_t upper = lmb->reserved.region[i].base +
 | 
				
			||||||
			lmb->reserved.region[i].size - 1;
 | 
								lmb->reserved.region[i].size - 1;
 | 
				
			||||||
		if ((addr >= lmb->reserved.region[i].base) && (addr <= upper))
 | 
							if ((addr >= lmb->reserved.region[i].base) && (addr <= upper))
 | 
				
			||||||
			return 1;
 | 
								return (lmb->reserved.region[i].flags & flags) == flags;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					int lmb_is_reserved(struct lmb *lmb, phys_addr_t addr)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						return lmb_is_reserved_flags(lmb, addr, LMB_NONE);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
__weak void board_lmb_reserve(struct lmb *lmb)
 | 
					__weak void board_lmb_reserve(struct lmb *lmb)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	/* please define platform specific board_lmb_reserve() */
 | 
						/* please define platform specific board_lmb_reserve() */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -723,3 +723,92 @@ static int lib_test_lmb_max_regions(struct unit_test_state *uts)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
DM_TEST(lib_test_lmb_max_regions,
 | 
					DM_TEST(lib_test_lmb_max_regions,
 | 
				
			||||||
	UT_TESTF_SCAN_PDATA | UT_TESTF_SCAN_FDT);
 | 
						UT_TESTF_SCAN_PDATA | UT_TESTF_SCAN_FDT);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static int lib_test_lmb_flags(struct unit_test_state *uts)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						const phys_addr_t ram = 0x40000000;
 | 
				
			||||||
 | 
						const phys_size_t ram_size = 0x20000000;
 | 
				
			||||||
 | 
						struct lmb lmb;
 | 
				
			||||||
 | 
						long ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						lmb_init(&lmb);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ret = lmb_add(&lmb, ram, ram_size);
 | 
				
			||||||
 | 
						ut_asserteq(ret, 0);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* reserve, same flag */
 | 
				
			||||||
 | 
						ret = lmb_reserve_flags(&lmb, 0x40010000, 0x10000, LMB_NOMAP);
 | 
				
			||||||
 | 
						ut_asserteq(ret, 0);
 | 
				
			||||||
 | 
						ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40010000, 0x10000,
 | 
				
			||||||
 | 
							   0, 0, 0, 0);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* reserve again, same flag */
 | 
				
			||||||
 | 
						ret = lmb_reserve_flags(&lmb, 0x40010000, 0x10000, LMB_NOMAP);
 | 
				
			||||||
 | 
						ut_asserteq(ret, 0);
 | 
				
			||||||
 | 
						ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40010000, 0x10000,
 | 
				
			||||||
 | 
							   0, 0, 0, 0);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* reserve again, new flag */
 | 
				
			||||||
 | 
						ret = lmb_reserve_flags(&lmb, 0x40010000, 0x10000, LMB_NONE);
 | 
				
			||||||
 | 
						ut_asserteq(ret, -1);
 | 
				
			||||||
 | 
						ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40010000, 0x10000,
 | 
				
			||||||
 | 
							   0, 0, 0, 0);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ut_asserteq(lmb_is_nomap(&lmb.reserved.region[0]), 1);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* merge after */
 | 
				
			||||||
 | 
						ret = lmb_reserve_flags(&lmb, 0x40020000, 0x10000, LMB_NOMAP);
 | 
				
			||||||
 | 
						ut_asserteq(ret, 1);
 | 
				
			||||||
 | 
						ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40010000, 0x20000,
 | 
				
			||||||
 | 
							   0, 0, 0, 0);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* merge before */
 | 
				
			||||||
 | 
						ret = lmb_reserve_flags(&lmb, 0x40000000, 0x10000, LMB_NOMAP);
 | 
				
			||||||
 | 
						ut_asserteq(ret, 1);
 | 
				
			||||||
 | 
						ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40000000, 0x30000,
 | 
				
			||||||
 | 
							   0, 0, 0, 0);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ut_asserteq(lmb_is_nomap(&lmb.reserved.region[0]), 1);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ret = lmb_reserve_flags(&lmb, 0x40030000, 0x10000, LMB_NONE);
 | 
				
			||||||
 | 
						ut_asserteq(ret, 0);
 | 
				
			||||||
 | 
						ASSERT_LMB(&lmb, ram, ram_size, 2, 0x40000000, 0x30000,
 | 
				
			||||||
 | 
							   0x40030000, 0x10000, 0, 0);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ut_asserteq(lmb_is_nomap(&lmb.reserved.region[0]), 1);
 | 
				
			||||||
 | 
						ut_asserteq(lmb_is_nomap(&lmb.reserved.region[1]), 0);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* test that old API use LMB_NONE */
 | 
				
			||||||
 | 
						ret = lmb_reserve(&lmb, 0x40040000, 0x10000);
 | 
				
			||||||
 | 
						ut_asserteq(ret, 1);
 | 
				
			||||||
 | 
						ASSERT_LMB(&lmb, ram, ram_size, 2, 0x40000000, 0x30000,
 | 
				
			||||||
 | 
							   0x40030000, 0x20000, 0, 0);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ut_asserteq(lmb_is_nomap(&lmb.reserved.region[0]), 1);
 | 
				
			||||||
 | 
						ut_asserteq(lmb_is_nomap(&lmb.reserved.region[1]), 0);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ret = lmb_reserve_flags(&lmb, 0x40070000, 0x10000, LMB_NOMAP);
 | 
				
			||||||
 | 
						ut_asserteq(ret, 0);
 | 
				
			||||||
 | 
						ASSERT_LMB(&lmb, ram, ram_size, 3, 0x40000000, 0x30000,
 | 
				
			||||||
 | 
							   0x40030000, 0x20000, 0x40070000, 0x10000);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ret = lmb_reserve_flags(&lmb, 0x40050000, 0x10000, LMB_NOMAP);
 | 
				
			||||||
 | 
						ut_asserteq(ret, 0);
 | 
				
			||||||
 | 
						ASSERT_LMB(&lmb, ram, ram_size, 4, 0x40000000, 0x30000,
 | 
				
			||||||
 | 
							   0x40030000, 0x20000, 0x40050000, 0x10000);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* merge with 2 adjacent regions */
 | 
				
			||||||
 | 
						ret = lmb_reserve_flags(&lmb, 0x40060000, 0x10000, LMB_NOMAP);
 | 
				
			||||||
 | 
						ut_asserteq(ret, 2);
 | 
				
			||||||
 | 
						ASSERT_LMB(&lmb, ram, ram_size, 3, 0x40000000, 0x30000,
 | 
				
			||||||
 | 
							   0x40030000, 0x20000, 0x40050000, 0x30000);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ut_asserteq(lmb_is_nomap(&lmb.reserved.region[0]), 1);
 | 
				
			||||||
 | 
						ut_asserteq(lmb_is_nomap(&lmb.reserved.region[1]), 0);
 | 
				
			||||||
 | 
						ut_asserteq(lmb_is_nomap(&lmb.reserved.region[2]), 1);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return 0;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					DM_TEST(lib_test_lmb_flags,
 | 
				
			||||||
 | 
						UT_TESTF_SCAN_PDATA | UT_TESTF_SCAN_FDT);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in New Issue