diff --git a/zephyr/lib/regions_mm.c b/zephyr/lib/regions_mm.c index dbb1aa087b26..9a232cf03d09 100644 --- a/zephyr/lib/regions_mm.c +++ b/zephyr/lib/regions_mm.c @@ -263,9 +263,6 @@ void *vmh_alloc(struct vmh_heap *heap, uint32_t alloc_size) if (!heap->physical_blocks_allocators[mem_block_iterator]) continue; - /* If we do not span alloc and block is smaller than alloc we try next mem_block */ - if (block_size < alloc_size && !heap->allocating_continuously) - continue; /* calculate block count needed to allocate for current * mem_block. */ @@ -273,6 +270,10 @@ void *vmh_alloc(struct vmh_heap *heap, uint32_t alloc_size) 1 << heap->physical_blocks_allocators[mem_block_iterator]->info.blk_sz_shift; block_count = SOF_DIV_ROUND_UP((uint64_t)alloc_size, (uint64_t)block_size); + /* If we do not span alloc and block is smaller than alloc we try next mem_block */ + if (block_size < alloc_size && !heap->allocating_continuously) + continue; + if (block_count > heap->physical_blocks_allocators[mem_block_iterator]->info.num_blocks) continue; @@ -283,7 +284,7 @@ void *vmh_alloc(struct vmh_heap *heap, uint32_t alloc_size) allocation_error_code = sys_mem_blocks_alloc_contiguous( heap->physical_blocks_allocators[mem_block_iterator], block_count, &ptr); - } else if (block_size > alloc_size) { + } else if (block_size >= alloc_size) { allocation_error_code = sys_mem_blocks_alloc( heap->physical_blocks_allocators[mem_block_iterator], block_count, &ptr); @@ -494,7 +495,7 @@ int vmh_free(struct vmh_heap *heap, void *ptr) size_t mem_block_iter, i, size_to_free, block_size, ptr_bit_array_offset, ptr_bit_array_position, physical_block_count, - check_offset, check_position, check_size; + check_offset, check_position, check_size, blocks_to_free; uintptr_t phys_aligned_ptr, phys_aligned_alloc_end, phys_block_ptr; bool ptr_range_found; @@ -551,20 +552,14 @@ int vmh_free(struct vmh_heap *heap, void *ptr) return -EINVAL; if (bit_value) { - /* We know we have more than one block was allocated so - * we need to find the size - */ - size_t bits_to_check = - heap->physical_blocks_allocators - [mem_block_iter]->info.num_blocks - ptr_bit_array_position; - /* Neeeeeeeds optimization - thinking how to do it properly * each set bit in order after another means one allocated block. * When we get to 0 in such range we know that is last allocated block. * Testing bundles looks promising - need to investigate. */ for (i = ptr_bit_array_position; - i < bits_to_check; + i < heap->physical_blocks_allocators + [mem_block_iter]->info.num_blocks; i++) { sys_bitarray_test_bit(heap->allocation_sizes[mem_block_iter], i, @@ -580,10 +575,13 @@ int vmh_free(struct vmh_heap *heap, void *ptr) */ size_to_free = block_size; } - + blocks_to_free = size_to_free / block_size; retval = sys_mem_blocks_free_contiguous( heap->physical_blocks_allocators[mem_block_iter], ptr, - size_to_free / block_size); + blocks_to_free); + if (!retval) + sys_bitarray_clear_region(heap->allocation_sizes[mem_block_iter], + blocks_to_free, ptr_bit_array_position); } else { retval = sys_mem_blocks_free(heap->physical_blocks_allocators[mem_block_iter], 1, &ptr);