diff options
Diffstat (limited to 'arch/mips/cavium-octeon/executive/cvmx-bootmem.c')
| -rw-r--r-- | arch/mips/cavium-octeon/executive/cvmx-bootmem.c | 123 |
1 files changed, 116 insertions, 7 deletions
diff --git a/arch/mips/cavium-octeon/executive/cvmx-bootmem.c b/arch/mips/cavium-octeon/executive/cvmx-bootmem.c index 4f5a08b37cc..504ed61a47c 100644 --- a/arch/mips/cavium-octeon/executive/cvmx-bootmem.c +++ b/arch/mips/cavium-octeon/executive/cvmx-bootmem.c @@ -31,6 +31,7 @@ */ #include <linux/kernel.h> +#include <linux/module.h> #include <asm/octeon/cvmx.h> #include <asm/octeon/cvmx-spinlock.h> @@ -97,6 +98,33 @@ void *cvmx_bootmem_alloc(uint64_t size, uint64_t alignment) return cvmx_bootmem_alloc_range(size, alignment, 0, 0); } +void *cvmx_bootmem_alloc_named_range(uint64_t size, uint64_t min_addr, + uint64_t max_addr, uint64_t align, + char *name) +{ + int64_t addr; + + addr = cvmx_bootmem_phy_named_block_alloc(size, min_addr, max_addr, + align, name, 0); + if (addr >= 0) + return cvmx_phys_to_ptr(addr); + else + return NULL; +} + +void *cvmx_bootmem_alloc_named_address(uint64_t size, uint64_t address, + char *name) +{ + return cvmx_bootmem_alloc_named_range(size, address, address + size, + 0, name); +} + +void *cvmx_bootmem_alloc_named(uint64_t size, uint64_t alignment, char *name) +{ + return cvmx_bootmem_alloc_named_range(size, 0, 0, alignment, name); +} +EXPORT_SYMBOL(cvmx_bootmem_alloc_named); + int cvmx_bootmem_free_named(char *name) { return cvmx_bootmem_phy_named_block_free(name, 0); @@ -106,6 +134,7 @@ struct cvmx_bootmem_named_block_desc *cvmx_bootmem_find_named_block(char *name) { return cvmx_bootmem_phy_named_block_find(name, 0); } +EXPORT_SYMBOL(cvmx_bootmem_find_named_block); void cvmx_bootmem_lock(void) { @@ -126,8 +155,8 @@ int cvmx_bootmem_init(void *mem_desc_ptr) * * Linux 64 bit: Set XKPHYS bit * Linux 32 bit: use mmap to create mapping, use virtual address - * CVMX 64 bit: use physical address directly - * CVMX 32 bit: use physical address directly + * CVMX 64 bit: use physical address directly + * CVMX 32 bit: use physical address directly * * Note that the CVMX environment assumes the use of 1-1 TLB * mappings so that the physical addresses can be used @@ -224,7 +253,7 @@ int64_t cvmx_bootmem_phy_alloc(uint64_t req_size, uint64_t address_min, * impossible requests up front. (NOP for address_min == 0) */ if (alignment) - address_min = __ALIGN_MASK(address_min, (alignment - 1)); + address_min = ALIGN(address_min, alignment); /* * Reject inconsistent args. We have adjusted these, so this @@ -262,7 +291,7 @@ int64_t cvmx_bootmem_phy_alloc(uint64_t req_size, uint64_t address_min, * satisfy request. */ usable_base = - __ALIGN_MASK(max(address_min, ent_addr), alignment - 1); + ALIGN(max(address_min, ent_addr), alignment); usable_max = min(address_max, ent_addr + ent_size); /* * We should be able to allocate block at address @@ -369,7 +398,7 @@ error_out: int __cvmx_bootmem_phy_free(uint64_t phy_addr, uint64_t size, uint32_t flags) { uint64_t cur_addr; - uint64_t prev_addr = 0; /* zero is invalid */ + uint64_t prev_addr = 0; /* zero is invalid */ int retval = 0; #ifdef DEBUG @@ -395,7 +424,7 @@ int __cvmx_bootmem_phy_free(uint64_t phy_addr, uint64_t size, uint32_t flags) if (cur_addr == 0 || phy_addr < cur_addr) { /* add at front of list - special case with changing head ptr */ if (cur_addr && phy_addr + size > cur_addr) - goto bootmem_free_done; /* error, overlapping section */ + goto bootmem_free_done; /* error, overlapping section */ else if (phy_addr + size == cur_addr) { /* Add to front of existing first block */ cvmx_bootmem_phy_set_next(phy_addr, @@ -582,5 +611,85 @@ int cvmx_bootmem_phy_named_block_free(char *name, uint32_t flags) } cvmx_bootmem_unlock(); - return named_block_ptr != NULL; /* 0 on failure, 1 on success */ + return named_block_ptr != NULL; /* 0 on failure, 1 on success */ +} + +int64_t cvmx_bootmem_phy_named_block_alloc(uint64_t size, uint64_t min_addr, + uint64_t max_addr, + uint64_t alignment, + char *name, + uint32_t flags) +{ + int64_t addr_allocated; + struct cvmx_bootmem_named_block_desc *named_block_desc_ptr; + +#ifdef DEBUG + cvmx_dprintf("cvmx_bootmem_phy_named_block_alloc: size: 0x%llx, min: " + "0x%llx, max: 0x%llx, align: 0x%llx, name: %s\n", + (unsigned long long)size, + (unsigned long long)min_addr, + (unsigned long long)max_addr, + (unsigned long long)alignment, + name); +#endif + if (cvmx_bootmem_desc->major_version != 3) { + cvmx_dprintf("ERROR: Incompatible bootmem descriptor version: " + "%d.%d at addr: %p\n", + (int)cvmx_bootmem_desc->major_version, + (int)cvmx_bootmem_desc->minor_version, + cvmx_bootmem_desc); + return -1; + } + + /* + * Take lock here, as name lookup/block alloc/name add need to + * be atomic. + */ + if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING)) + cvmx_spinlock_lock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock)); + + /* Get pointer to first available named block descriptor */ + named_block_desc_ptr = + cvmx_bootmem_phy_named_block_find(NULL, + flags | CVMX_BOOTMEM_FLAG_NO_LOCKING); + + /* + * Check to see if name already in use, return error if name + * not available or no more room for blocks. + */ + if (cvmx_bootmem_phy_named_block_find(name, + flags | CVMX_BOOTMEM_FLAG_NO_LOCKING) || !named_block_desc_ptr) { + if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING)) + cvmx_spinlock_unlock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock)); + return -1; + } + + + /* + * Round size up to mult of minimum alignment bytes We need + * the actual size allocated to allow for blocks to be + * coallesced when they are freed. The alloc routine does the + * same rounding up on all allocations. + */ + size = ALIGN(size, CVMX_BOOTMEM_ALIGNMENT_SIZE); + + addr_allocated = cvmx_bootmem_phy_alloc(size, min_addr, max_addr, + alignment, + flags | CVMX_BOOTMEM_FLAG_NO_LOCKING); + if (addr_allocated >= 0) { + named_block_desc_ptr->base_addr = addr_allocated; + named_block_desc_ptr->size = size; + strncpy(named_block_desc_ptr->name, name, + cvmx_bootmem_desc->named_block_name_len); + named_block_desc_ptr->name[cvmx_bootmem_desc->named_block_name_len - 1] = 0; + } + + if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING)) + cvmx_spinlock_unlock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock)); + return addr_allocated; +} + +struct cvmx_bootmem_desc *cvmx_bootmem_get_desc(void) +{ + return cvmx_bootmem_desc; } |
