diff --git a/cores/esp8266/abi.cpp b/cores/esp8266/abi.cpp index 72f3fb0a06..d1e6980c45 100644 --- a/cores/esp8266/abi.cpp +++ b/cores/esp8266/abi.cpp @@ -20,64 +20,354 @@ #include #include #include +#include +#include using __cxxabiv1::__guard; // Debugging helper, last allocation which returned NULL -extern void *umm_last_fail_alloc_addr; -extern int umm_last_fail_alloc_size; +extern "C" void* _heap_abi_malloc(size_t size, bool unhandled, const void* const caller); +#if UMM_ENABLE_MEMALIGN +extern "C" void* _heap_abi_memalign(size_t alignment, size_t size, bool unhandled, const void* const caller); +#endif extern "C" void __cxa_pure_virtual(void) __attribute__ ((__noreturn__)); extern "C" void __cxa_deleted_virtual(void) __attribute__ ((__noreturn__)); -#if !defined(__cpp_exceptions) +#if DEV_DEBUG_ABI_CPP +extern "C" void _dbg_abi_print_pstr(const char *function_name); +#define DEBUG_NEW_OP_PRINTF() _dbg_abi_print_pstr(__PRETTY_FUNCTION__) +#else +#define DEBUG_NEW_OP_PRINTF() do { } while (false) +#endif +/* + This is what I perceived to be the intent of the original code. -// overwrite weak operators new/new[] definitions + Use C++ "Replaceable allocation functions" to install debug wrappers to catch + additional information for debugging. The default C++ exception handlers use + weak links. + + C++ Exceptions: "enabled" - + * With debug (eg. "Debug port: Serial"), do full caller info capture and + Heap debug checks. "Replaceable allocation functions" are in use by the + debugging code. "Replaceable allocation functions" are not available to + the Sketch. + * Without debug, no OOM details captured. The C++ "Replaceable allocation + functions" are available to the Sketch. + + C++ Exceptions: "disabled" - + * C++ "Replaceable allocation functions" are always in use. + * With debug, do full caller info capture and Heap debug checks. + * Without debug, capture minimum OOM information. Calling address and size + of last alloc failure. +*/ + +#if defined(__cpp_exceptions) && \ +(defined(DEBUG_ESP_OOM) || defined(DEBUG_ESP_PORT) || defined(DEBUG_ESP_WITHINISR) || defined(MIN_ESP_OOM)) -void* operator new(size_t size) +// Debug replacement adaptation from ".../new_op.cc". +using std::new_handler; +using std::bad_alloc; + +#if defined(UMM_ENABLE_MEMALIGN) + +// Includes C++ exceptions +// Includes C++17 operator new align variants + +static void* _heap_new_align(std::size_t size, std::size_t alignment, const void* caller) { - void *ret = malloc(size); - if (0 != size && 0 == ret) { - umm_last_fail_alloc_addr = __builtin_return_address(0); - umm_last_fail_alloc_size = size; - __unhandled_exception(PSTR("OOM")); + /* + "Alignment must be a power of two." + The C++ sample code did this: if (__builtin_expect(!std::__has_single_bit(alignment), false)) throw(bad_alloc()); + + From https://en.cppreference.com/w/cpp/memory/c/aligned_alloc + "alignment - specifies the alignment. Must be a valid alignment + supported by the implementation." + + I left the validation to the umm_malloc library. See umm_memalign() for + details. Generally speaking, zero is handled as default, and the default + is sizeof(umm_block), 8 bytes. Since the default is 8 bytes, the + umm_malloc library is less strict about checking alignments less than 8 + bytes. + */ + + void* p; + + while (nullptr == (p = _heap_abi_memalign(alignment, size, false, caller))) { + new_handler handler = std::get_new_handler(); + if (!handler) { + throw(bad_alloc()); + } + handler(); } - return ret; + + return p; +} + + +// new_opa +void* operator new (std::size_t size, std::align_val_t alignment) +{ + DEBUG_NEW_OP_PRINTF(); + + return _heap_new_align(size, std::size_t(alignment), __builtin_return_address(0)); +} + +// new_opva +void* operator new[] (std::size_t size, std::align_val_t alignment) +{ + DEBUG_NEW_OP_PRINTF(); + + return _heap_new_align(size, std::size_t(alignment), __builtin_return_address(0)); } -void* operator new[](size_t size) +// new_opant +void* operator new (std::size_t size, std::align_val_t alignment, const std::nothrow_t&) noexcept { - void *ret = malloc(size); - if (0 != size && 0 == ret) { - umm_last_fail_alloc_addr = __builtin_return_address(0); - umm_last_fail_alloc_size = size; - __unhandled_exception(PSTR("OOM")); + DEBUG_NEW_OP_PRINTF(); + + __try { + return _heap_new_align(size, std::size_t(alignment), __builtin_return_address(0)); + } + __catch(...) { + return nullptr; } - return ret; } -void* operator new (size_t size, const std::nothrow_t&) +// new_opvant +void* operator new[] (std::size_t size, std::align_val_t alignment, const std::nothrow_t&) noexcept { - void *ret = malloc(size); - if (0 != size && 0 == ret) { - umm_last_fail_alloc_addr = __builtin_return_address(0); - umm_last_fail_alloc_size = size; + DEBUG_NEW_OP_PRINTF(); + + __try { + return _heap_new_align(size, std::size_t(alignment), __builtin_return_address(0)); + } + __catch(...) { + return nullptr; } - return ret; } -void* operator new[] (size_t size, const std::nothrow_t&) +// default alignment + +// new_op +void* operator new (std::size_t size) +{ + DEBUG_NEW_OP_PRINTF(); + + return _heap_new_align(size, __STDCPP_DEFAULT_NEW_ALIGNMENT__, __builtin_return_address(0)); +} + +// new_opv +void* operator new[] (std::size_t size) +{ + DEBUG_NEW_OP_PRINTF(); + + return _heap_new_align(size, __STDCPP_DEFAULT_NEW_ALIGNMENT__, __builtin_return_address(0)); +} + +// new_opnt +void* operator new (size_t size, const std::nothrow_t&) noexcept +{ + DEBUG_NEW_OP_PRINTF(); + + __try { + return _heap_new_align(size, __STDCPP_DEFAULT_NEW_ALIGNMENT__, __builtin_return_address(0)); + } + __catch (...) { + return nullptr; + } +} + +// new_opvnt +void* operator new[] (size_t size, const std::nothrow_t&) noexcept +{ + DEBUG_NEW_OP_PRINTF(); + + __try { + return _heap_new_align(size, __STDCPP_DEFAULT_NEW_ALIGNMENT__, __builtin_return_address(0)); + } + __catch (...) { + return nullptr; + } +} + +#else // ! UMM_ENABLE_MEMALIGN + +// Includes C++ exceptions +// Without C++17 operator new align variants + +static void* _heap_new(std::size_t size, const void* caller) +{ + void* p; + + while (nullptr == (p = _heap_abi_malloc(size, false, caller))) { + new_handler handler = std::get_new_handler(); + if (!handler) { + throw(bad_alloc()); + } + handler(); + } + + return p; +} + +void* operator new (std::size_t size) +{ + DEBUG_NEW_OP_PRINTF(); + + return _heap_new(size, __builtin_return_address(0)); +} + +void* operator new[] (std::size_t size) +{ + DEBUG_NEW_OP_PRINTF(); + + return _heap_new(size, __builtin_return_address(0)); +} + +void* operator new (size_t size, const std::nothrow_t&) noexcept +{ + DEBUG_NEW_OP_PRINTF(); + + __try { + return _heap_new(size, __builtin_return_address(0)); + } + __catch (...) { + return nullptr; + } +} + +void* operator new[] (size_t size, const std::nothrow_t&) noexcept { - void *ret = malloc(size); - if (0 != size && 0 == ret) { - umm_last_fail_alloc_addr = __builtin_return_address(0); - umm_last_fail_alloc_size = size; + DEBUG_NEW_OP_PRINTF(); + + __try { + return _heap_new(size, __builtin_return_address(0)); + } + __catch (...) { + return nullptr; } - return ret; } +#endif // #if UMM_ENABLE_MEMALIGN + +#elif !defined(__cpp_exceptions) +// When doing builds with C++ Exceptions "disabled", always save details of +// the last OOM event. + +// overwrite weak operators new/new[] definitions + +#if defined(UMM_ENABLE_MEMALIGN) + +// Without C++ exceptions +// Includes C++17 operator new align variants + +void* operator new (size_t size, std::align_val_t alignment) +{ + DEBUG_NEW_OP_PRINTF(); + + return _heap_abi_memalign(std::size_t(alignment), size, true, __builtin_return_address(0)); +} + +void* operator new[] (size_t size, std::align_val_t alignment) +{ + DEBUG_NEW_OP_PRINTF(); + + return _heap_abi_memalign(std::size_t(alignment), size, true, __builtin_return_address(0)); +} + +void* operator new (size_t size, std::align_val_t alignment, const std::nothrow_t&) +{ + DEBUG_NEW_OP_PRINTF(); + + return _heap_abi_memalign(std::size_t(alignment), size, false, __builtin_return_address(0)); +} + +void* operator new[] (size_t size, std::align_val_t alignment, const std::nothrow_t&) +{ + DEBUG_NEW_OP_PRINTF(); + + return _heap_abi_memalign(std::size_t(alignment), size, false, __builtin_return_address(0)); +} + +// default alignment + +void* operator new (size_t size) +{ + DEBUG_NEW_OP_PRINTF(); + + return _heap_abi_memalign(__STDCPP_DEFAULT_NEW_ALIGNMENT__, size, true, __builtin_return_address(0)); +} + +void* operator new[] (size_t size) +{ + DEBUG_NEW_OP_PRINTF(); + + return _heap_abi_memalign(__STDCPP_DEFAULT_NEW_ALIGNMENT__, size, true, __builtin_return_address(0)); +} + +void* operator new (size_t size, const std::nothrow_t&) +{ + DEBUG_NEW_OP_PRINTF(); + + return _heap_abi_memalign(__STDCPP_DEFAULT_NEW_ALIGNMENT__, size, false, __builtin_return_address(0)); +} + +void* operator new[] (size_t size, const std::nothrow_t&) +{ + DEBUG_NEW_OP_PRINTF(); + + return _heap_abi_memalign(__STDCPP_DEFAULT_NEW_ALIGNMENT__, size, false, __builtin_return_address(0)); +} + +#else + +// Without C++ exceptions +// Without C++17 operator new align variants + +void* operator new (size_t size) +{ + DEBUG_NEW_OP_PRINTF(); + + return _heap_abi_malloc(size, true, __builtin_return_address(0)); +} + +void* operator new[] (size_t size) +{ + DEBUG_NEW_OP_PRINTF(); + + return _heap_abi_malloc(size, true, __builtin_return_address(0)); +} + +void* operator new (size_t size, const std::nothrow_t&) +{ + DEBUG_NEW_OP_PRINTF(); + + return _heap_abi_malloc(size, false, __builtin_return_address(0)); +} + +void* operator new[] (size_t size, const std::nothrow_t&) +{ + DEBUG_NEW_OP_PRINTF(); + + return _heap_abi_malloc(size, false, __builtin_return_address(0)); +} +#endif // #elif !defined(__cpp_exceptions) #if defined(UMM_ENABLE_MEMALIGN) +#else +/* + The C++ Exception handlers in libstdc are using weaklinks. The "new" operators + that express alignment should work through libstdc via memalign() in the + umm_malloc library. While not likely to ever be needed, the Sketch can replace + the C++ "Replaceable allocation functions." + + Note that libstdc will fail errors in alignment value early. Thus, the + UMM_STATS_FULL alignment error count will be zero. + + This saves about 20 bytes in the UMM_ENABLE_MEMALIGN=1 case and 32 bytes when + UMM_ENABLE_MEMALIGN=0. +*/ +#endif // #if defined(__cpp_exceptions) -#endif // !defined(__cpp_exceptions) void __cxa_pure_virtual(void) { diff --git a/cores/esp8266/core_esp8266_postmortem.cpp b/cores/esp8266/core_esp8266_postmortem.cpp index 95844534e8..1e03ec7e61 100644 --- a/cores/esp8266/core_esp8266_postmortem.cpp +++ b/cores/esp8266/core_esp8266_postmortem.cpp @@ -33,29 +33,10 @@ #include "gdb_hooks.h" #include "StackThunk.h" #include "coredecls.h" +#include "umm_malloc/umm_malloc.h" extern "C" { -// These will be pointers to PROGMEM const strings -static const char* s_panic_file = 0; -static int s_panic_line = 0; -static const char* s_panic_func = 0; -static const char* s_panic_what = 0; - -// Our wiring for abort() and C++ exceptions -static bool s_abort_called = false; -static const char* s_unhandled_exception = NULL; - -// Common way to notify about where the stack smashing happened -// (but, **only** if caller uses our handler function) -static uint32_t s_stack_chk_addr = 0; - -void abort() __attribute__((noreturn)); -static void uart_write_char_d(char c); -static void uart0_write_char_d(char c); -static void uart1_write_char_d(char c); -static void print_stack(uint32_t start, uint32_t end); - // using numbers different from "REASON_" in user_interface.h (=0..6) enum rst_reason_sw { @@ -63,15 +44,44 @@ enum rst_reason_sw REASON_USER_STACK_SMASH = 253, REASON_USER_SWEXCEPTION_RST = 254 }; -static int s_user_reset_reason = REASON_DEFAULT_RST; + +// Confirmed on 12/17/22: s_pm is in the .bss section and is in the +// _bss_start/end range to be zeroed by the SDK this happens after the SDK first +// calls to Cache_Read_Enable_New. +static struct PostmortemInfo { + int user_reset_reason = REASON_DEFAULT_RST; + + // These will be pointers to PROGMEM const strings + const char* panic_file = 0; + int panic_line = 0; + const char* panic_func = 0; + const char* panic_what = 0; + + // Our wiring for abort() and C++ exceptions + bool abort_called = false; + const char* unhandled_exception = NULL; + + // Common way to notify about where the stack smashing happened + // (but, **only** if caller uses our handler function) + uint32_t stack_chk_addr = 0; +} s_pm; // From UMM, the last caller of a malloc/realloc/calloc which failed: -extern void *umm_last_fail_alloc_addr; -extern int umm_last_fail_alloc_size; +extern struct umm_last_fail_alloc { + const void *addr; + size_t size; #if defined(DEBUG_ESP_OOM) -extern const char *umm_last_fail_alloc_file; -extern int umm_last_fail_alloc_line; + const char *file; + int line; #endif +} _umm_last_fail_alloc; + + +void abort() __attribute__((noreturn)); +static void uart_write_char_d(char c); +static void uart0_write_char_d(char c); +static void uart1_write_char_d(char c); +static void print_stack(uint32_t start, uint32_t end); static void raise_exception() __attribute__((noreturn)); @@ -139,7 +149,7 @@ asm( static void postmortem_report(uint32_t sp_dump) { struct rst_info rst_info; memset(&rst_info, 0, sizeof(rst_info)); - if (s_user_reset_reason == REASON_DEFAULT_RST) + if (s_pm.user_reset_reason == REASON_DEFAULT_RST) { system_rtc_mem_read(0, &rst_info, sizeof(rst_info)); if (rst_info.reason != REASON_SOFT_WDT_RST && @@ -150,26 +160,26 @@ static void postmortem_report(uint32_t sp_dump) { } } else - rst_info.reason = s_user_reset_reason; + rst_info.reason = s_pm.user_reset_reason; ets_install_putc1(&uart_write_char_d); cut_here(); - if (s_panic_line) { - ets_printf_P(PSTR("\nPanic %S:%d %S"), s_panic_file, s_panic_line, s_panic_func); - if (s_panic_what) { - ets_printf_P(PSTR(": Assertion '%S' failed."), s_panic_what); + if (s_pm.panic_line) { + ets_printf_P(PSTR("\nPanic %S:%d %S"), s_pm.panic_file, s_pm.panic_line, s_pm.panic_func); + if (s_pm.panic_what) { + ets_printf_P(PSTR(": Assertion '%S' failed."), s_pm.panic_what); } ets_putc('\n'); } - else if (s_panic_file) { - ets_printf_P(PSTR("\nPanic %S\n"), s_panic_file); + else if (s_pm.panic_file) { + ets_printf_P(PSTR("\nPanic %S\n"), s_pm.panic_file); } - else if (s_unhandled_exception) { - ets_printf_P(PSTR("\nUnhandled C++ exception: %S\n"), s_unhandled_exception); + else if (s_pm.unhandled_exception) { + ets_printf_P(PSTR("\nUnhandled C++ exception: %S\n"), s_pm.unhandled_exception); } - else if (s_abort_called) { + else if (s_pm.abort_called) { ets_printf_P(PSTR("\nAbort called\n")); } else if (rst_info.reason == REASON_EXCEPTION_RST) { @@ -199,7 +209,7 @@ static void postmortem_report(uint32_t sp_dump) { rst_info.exccause, /* Address executing at time of Soft WDT level-1 interrupt */ rst_info.epc1, 0, 0, 0, 0); } else if (rst_info.reason == REASON_USER_STACK_SMASH) { - ets_printf_P(PSTR("\nStack smashing detected at 0x%08x\n"), s_stack_chk_addr); + ets_printf_P(PSTR("\nStack smashing detected at 0x%08x\n"), s_pm.stack_chk_addr); } else if (rst_info.reason == REASON_USER_STACK_OVERFLOW) { ets_printf_P(PSTR("\nStack overflow detected\n")); @@ -210,7 +220,7 @@ static void postmortem_report(uint32_t sp_dump) { uint32_t cont_stack_start; if (rst_info.reason == REASON_USER_STACK_SMASH) { - cont_stack_start = s_stack_chk_addr; + cont_stack_start = s_pm.stack_chk_addr; } else { cont_stack_start = (uint32_t) (&g_pcont->stack[0]); } @@ -229,7 +239,7 @@ static void postmortem_report(uint32_t sp_dump) { // 16 ?unnamed? - index into a table, pull out pointer, and call if non-zero // appears near near wDev_ProcessFiq // 32 pp_soft_wdt_feed_local - gather the specifics and call __wrap_system_restart_local - offset = 32 + 16 + 48 + 256; + offset = 32 + 16 + 48 + 256; } else if (rst_info.reason == REASON_EXCEPTION_RST) { // Stack Tally @@ -280,24 +290,33 @@ static void postmortem_report(uint32_t sp_dump) { ets_printf_P(PSTR("<<stack[0]; + s_pm.user_reset_reason = REASON_USER_STACK_OVERFLOW; + s_pm.stack_chk_addr = (uint32_t)&cont->stack[0]; if (gdb_present()) __asm__ __volatile__ ("syscall"); // triggers GDB when enabled diff --git a/cores/esp8266/heap.cpp b/cores/esp8266/heap.cpp index 54db1ede4a..4006584f5b 100644 --- a/cores/esp8266/heap.cpp +++ b/cores/esp8266/heap.cpp @@ -2,9 +2,123 @@ * Copyright (c) 2016 Ivan Grokhotkov. All rights reserved. * This file is distributed under MIT license. */ +/* + * On the Arduino ESP8266 platform, there are four heap API families in use: + * * The C++ `new`/`delete` operators - libstdc/libsupc + * * The legacy `malloc`, ... often used by "C" programs + * * An internal LIBC library `_malloc_r`, ... + * * The portable heap APIs, `pvPortMalloc`, ... for embedded platforms. + * Used by NONOS SDK and LWIP. + * + * Their is only one Heap. A thin wrapper or shim is used to access the + * underlying heap library. The underlying heap library is a local port of + * `umm_malloc` from https://github.com/rhempel/umm_malloc/. + * + * This module supplies a convergence point and a redirect to umm_malloc for + * Heap APIs families malloc, pvPortMalloc, "new", and _malloc_r. It builds as + * either a "thin wrapper" or a "thick wrapper" to capture Heap debug info and + * diagnostics. + * + * Even though ISRs should not perform Heap API calls, we protect the call path + * with IRAM_ATTR for malloc APIs and pvPortMalloc APIs. "new" and _malloc_r + * (LIBC) are unprotected. + * + * Inventory of debug options supported by this modules + * + * * DEBUG_ESP_OOM - Monitors all "allocating" Heap API families for an + * out-of-memory result and saves the Last OOM for Postmortem to display. + * Additionally, if system OS print is enabled (system_get_os_print() == + * true) , print a diagnostic message at the time of the OOM event. To + * further assist in debugging, "fancy macros" redefine malloc, calloc, and + * realloc to their matching cousins in the portable malloc family. + * Identifies the file name and line number of the caller where the OOM event + * occurred. + * + * When DEBUG_ESP_OOM is not selected, use a minimized Last OOM wrapper to + * track LIBC and the C++ operator "new". These wrappers only save the + * caller address and size of the Last OOM - and report details at + * Postmortem. No Last OOM tracking for the "new" operator with the non-debug + * build and option C++ Exceptions: "enabled". + * Update: Use MIN_ESP_OOM to always track OOM on "new" operators. With this + * option C++ "Replaceable allocation functions" are always in use. + * + * You may select DEBUG_ESP_OOM through the Arduino IDE, Tools->Debug level: + * "OOM". Or, enable via a build option define. + * + * DEBUG_ESP_WITHINISR - Monitors in-flash Heap APIs for calls from ISRs. + * If they occur, print a message with the address of the caller. + * + * Considerations: + * * There may be some rare special case where 'new', 'delete', or LIBC's + * _malloc_r APIs are called with interrupts disabled. + * * If called from within an ISR, we could have crash before reaching + * this code. + * * Build define, DEBUG_ESP_WITHINISR, may help bring attention to allocs + * made from within ISRs. + * + * Enable via a build option define. + * + * * UMM_POISON_CHECK_LITE - A much lighter version of UMM_POISON_CHECK. + * Adds and presets an extra 4 bytes of poison at the beginning and end of + * each allocation. On each call to free or realloc, test the current + * allocation's poison areas, then each active allocation's neighbor is + * tested. During each successful malloc/calloc, check the neighbors of the + * free block before resizing to fulfill the request. + * + * In the absence of other UMM_POISON_... options, this option assumes + * "enabled" when Tools->Debug: Serial is selected or Tools->Debug level: + * "CORE" is selected. Otherwise, you may enable it via a build option + * definition. + * + * While coverage is not 100%, a sketch is less likely to have strange + * behavior from heavy heap access with interrupts disabled. Also, with + * UMM_POISON_CHECK_LITE, more caller context is available at "poison fail." + * If you need more perspective, continue reading "UMM_POISON_CHECK." + * + * * UMM_POISON_CHECK - Adds and presets 4 bytes of poison at the beginning + * and end of each allocation. At each Heap API call, performs a global Heap + * poison data verification. This check runs with interrupts disabled and may + * affect WiFi performance and possibly system stability. + * + * As the number of active heap allocations grows, this option will cause + * increasingly long periods with interrupts disabled, adversely affecting + * time-critical sketches. + * + * Enable via a build option define. + * + * * UMM_INTEGRITY_CHECK - will verify that the Heap is semantically correct + * and that all the block indexes make sense. While it can catch errors + * quickly, the check runs with interrupts disabled and may affect WiFi + * performance and maybe system stability. + * + * As the number of active heap allocations grows, this option will cause + * increasingly long periods with interrupts disabled, adversely affecting + * time-critical sketches. + * + * Enable via a build option define. + * + * IMHO, UMM_INTEGRITY_CHECK is best suited for heap library verification + * rather than general debugging. It will detect heap corruption; however, it + * offers little aid in determining who did it. + * + * While not as comprehensive as UMM_INTEGRITY_CHECK, using + * UMM_POISON_CHECK_LITE should reveal most heap corruptions with lower + * overhead. + * + * DEV_DEBUG_ABI_CPP - Not normally needed. Its intended use is for module + * code maintenance. Use DEV_DEBUG_ABI_CPP when debugging the new/delete + * overload wrappers in abi.cpp and heap.cpp. To use in a test Sketch, add + * "extern bool abi_new_print;" and set abi_new_print=true/false around test + * function calls. Also, set '-DDEV_DEBUG_ABI_CPP=1' in Sketch.ino.globals.h. + * With this option, using print functions in the test Sketch that performs + * allocs can create a confusing array of debug prints. For this reason, I + * often use ets_uart_printf in alloc test Sketches. + */ #include #include "umm_malloc/umm_malloc.h" + +extern "C" size_t umm_uadd_sat(const size_t a, const size_t b); extern "C" size_t umm_umul_sat(const size_t a, const size_t b); // z2EapFree: See wpa2_eap_patch.cpp for details @@ -13,7 +127,8 @@ extern "C" void z2EapFree(void *ptr, const char* file, int line) __attribute__(( // Adding "__attribute__ ((nothrow))" seems to resolve the issue. // This may be relevant: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81824 -// Need FORCE_ALWAYS_INLINE to put HeapSelect class constructor/deconstructor in IRAM +// For "pvPortMalloc" API wrappers, use FORCE_ALWAYS_INLINE to put HeapSelect +// class constructor/deconstructor in IRAM. #define FORCE_ALWAYS_INLINE_HEAP_SELECT #include "umm_malloc/umm_heap_select.h" @@ -23,368 +138,567 @@ extern "C" void z2EapFree(void *ptr, const char* file, int line) __attribute__(( extern "C" { -#if defined(UMM_POISON_CHECK) || defined(UMM_POISON_CHECK_LITE) -#define UMM_MALLOC(s) umm_poison_malloc(s) -#define UMM_CALLOC(n,s) umm_poison_calloc(n,s) -#define UMM_REALLOC_FL(p,s,f,l) umm_poison_realloc_fl(p,s,f,l) -#define UMM_FREE_FL(p,f,l) umm_poison_free_fl(p,f,l) -#define STATIC_ALWAYS_INLINE +#define DEBUG_HEAP_PRINTF ets_uart_printf -#undef realloc -#undef free +inline bool withinISR(uint32_t ps) { + return ((ps & 0x0f) != 0); +} -#elif defined(DEBUG_ESP_OOM) || defined(UMM_INTEGRITY_CHECK) -#define UMM_MALLOC(s) umm_malloc(s) -#define UMM_CALLOC(n,s) umm_calloc(n,s) -#define UMM_REALLOC_FL(p,s,f,l) umm_realloc(p,s) -#define UMM_FREE_FL(p,f,l) umm_free(p) -#define STATIC_ALWAYS_INLINE +/////////////////////////////////////////////////////////////////////////////// +// Select from various heap function renames that facilitate inserting debug +// wrappers, and tradditional names for the non-debug case. +// +/* + * With any debug options listed above, umm_malloc changes its heap API names + * from malloc, calloc, realloc, and free to umm_malloc, umm_calloc, + * umm_realloc, and umm_free. + * + */ +#undef STATIC_ALWAYS_INLINE +#undef ENABLE_THICK_DEBUG_WRAPPERS + +// Whether using thick or thin wrappers, we want none of these redefined. +// Position these "#undefs" after all includes. +#undef malloc +#undef calloc #undef realloc #undef free +#undef memalign -#else // ! UMM_POISON_CHECK && ! DEBUG_ESP_OOM -#define UMM_MALLOC(s) malloc(s) -#define UMM_CALLOC(n,s) calloc(n,s) -#define UMM_REALLOC_FL(p,s,f,l) realloc(p,s) -#define UMM_FREE_FL(p,f,l) free(p) +#if defined(UMM_POISON_CHECK_LITE) +/* + * umm_malloc will build with umm_poison_* wrappers for each Heap API. + * + * Support debug wrappers that need to include handling poison + */ +#define UMM_MALLOC(s) umm_poison_malloc(s) +#define UMM_MEMALIGN(a,s) umm_posion_memalign(a,s) +#define UMM_CALLOC(n,s) umm_poison_calloc(n,s) +#define UMM_REALLOC_FL(p,s,f,l,c) umm_poison_realloc_flc(p,s,f,l,c) +#define UMM_FREE_FL(p,f,l,c) umm_poison_free_flc(p,f,l,c) +#define ENABLE_THICK_DEBUG_WRAPPERS + +#elif defined(UMM_POISON_CHECK) +/* + * umm_malloc will build with umm_poison_* wrappers for each Heap API. + * + * Support debug wrappers that need to include handling poison + */ +#define UMM_MALLOC(s) umm_poison_malloc(s) +#define UMM_MEMALIGN(a,s) umm_posion_memalign(a,s) +#define UMM_CALLOC(n,s) umm_poison_calloc(n,s) +#define UMM_REALLOC_FL(p,s,f,l,c) umm_poison_realloc(p,s) +#define UMM_FREE_FL(p,f,l,c) umm_poison_free(p) +#define ENABLE_THICK_DEBUG_WRAPPERS + +#elif defined(DEBUG_ESP_OOM) || defined(UMM_INTEGRITY_CHECK) || defined(DEBUG_ESP_WITHINISR) +// All other debug wrappers that do not require handling poison +#define UMM_MALLOC(s) umm_malloc(s) +#define UMM_MEMALIGN(a,s) umm_memalign(a,s) +#define UMM_CALLOC(n,s) umm_calloc(n,s) +#define UMM_REALLOC_FL(p,s,f,l,c) umm_realloc(p,s) +#define UMM_FREE_FL(p,f,l,c) umm_free(p) +#define ENABLE_THICK_DEBUG_WRAPPERS -// STATIC_ALWAYS_INLINE only applies to the non-debug build path, -// it must not be enabled on the debug build path. -#define STATIC_ALWAYS_INLINE static ALWAYS_INLINE +#else // ! UMM_POISON_CHECK && ! DEBUG_ESP_OOM +extern "C" void* memalign(size_t alignment, size_t size); +// Used to create thin heap wrappers not for debugging. +#define UMM_MALLOC(s) malloc(s) +#define UMM_MEMALIGN(a,s) memalign(a,s) +#define UMM_CALLOC(n,s) calloc(n,s) +#define UMM_REALLOC(p,s) realloc(p,s) +#define UMM_FREE(p) free(p) #endif +/////////////////////////////////////////////////////////////////////////////// +// UMM_POISON_CHECK wrapper macros +// +// Take care not to blame the messenger; the function (file/line) that resulted +// in the discovery may not be directly responsible for the damage. We could use +// abort; however, using panic may provide some hints of the location of the +// problem. +// +// Note well, the failure is a discovery of an error that could have occurred at +// any time between calls to POISON_CHECK. +// #if defined(UMM_POISON_CHECK) - #define POISON_CHECK__ABORT() \ - do { \ - if ( ! POISON_CHECK() ) \ - abort(); \ - } while(0) - - #define POISON_CHECK__PANIC_FL(file, line) \ + #define POISON_CHECK__PANIC_FL(f, l, c) \ do { \ - if ( ! POISON_CHECK() ) \ - __panic_func(file, line, ""); \ + if ( ! POISON_CHECK() ) { \ + __panic_func(f, l, ""); \ + } \ } while(0) -#else // No full heap poison checking. - #define POISON_CHECK__ABORT() do {} while(0) - #define POISON_CHECK__PANIC_FL(file, line) do { (void)file; (void)line; } while(0) +#else + // Disable full heap poison checking. + #define POISON_CHECK__PANIC_FL(f, l, c) do { (void)f; (void)l; (void)c; } while(0) #endif -// Debugging helper, last allocation which returned NULL -void *umm_last_fail_alloc_addr = NULL; -int umm_last_fail_alloc_size = 0; -#if defined(DEBUG_ESP_OOM) -const char *umm_last_fail_alloc_file = NULL; -int umm_last_fail_alloc_line = 0; -#endif +/////////////////////////////////////////////////////////////////////////////// +// UMM_INTEGRITY_CHECK wrapper macros +// +// (Caution notes of UMM_POISON_CHECK also apply here.) +// #ifdef UMM_INTEGRITY_CHECK -#define INTEGRITY_CHECK__ABORT() \ +#define INTEGRITY_CHECK__PANIC_FL(f, l, c) \ do { \ - if ( ! INTEGRITY_CHECK() ) \ - abort(); \ - } while(0) - -#define INTEGRITY_CHECK__PANIC_FL(file, line) \ - do { \ - if ( ! INTEGRITY_CHECK() ) \ - __panic_func(file, line, ""); \ + if ( ! INTEGRITY_CHECK() ) { \ + __panic_func(f, l, ""); \ + } \ } while(0) #else // ! UMM_INTEGRITY_CHECK -#define INTEGRITY_CHECK__ABORT() do {} while(0) -#define INTEGRITY_CHECK__PANIC_FL(file, line) do { (void)file; (void)line; } while(0) - +#define INTEGRITY_CHECK__PANIC_FL(f, l, c) do { (void)f; (void)l; (void)c; } while(0) #endif // UMM_INTEGRITY_CHECK + +/////////////////////////////////////////////////////////////////////////////// +// OOM - this structure variable is always in use by abi.cpp - except for +// C++ Exceptions "enabled" builds. +// +// When building with C++ Exceptions "disabled" or debug build, +// always track last failed caller and size requested +#if defined(DEBUG_ESP_OOM) +struct umm_last_fail_alloc { + const void *addr = { nullptr }; + size_t size = { 0 }; + const char *file = { nullptr }; + int line = { 0 }; +} _umm_last_fail_alloc; + +#else +// Note for the least used case "(defined(__cpp_exceptions) && +// !defined(DEBUG_ESP_OOM))", we only capture details for LIBC calls. +struct umm_last_fail_alloc { + const void *addr = { nullptr }; + size_t size = { 0 }; +} _umm_last_fail_alloc; +#endif + +/////////////////////////////////////////////////////////////////////////////// +// OOM - DEBUG_ESP_OOM extends monitoring for OOM to capture caller information +// across various Heap entry points and their aliases. +// +// data capture wrapper macros and defines +// Debugging helper, save the last caller address that got a NULL pointer +// response. And when available, the file and line number. #if defined(DEBUG_ESP_OOM) -#define PTR_CHECK__LOG_LAST_FAIL_FL(p, s, f, l) \ - if(0 != (s) && 0 == p)\ - {\ - umm_last_fail_alloc_addr = __builtin_return_address(0);\ - umm_last_fail_alloc_size = s;\ - umm_last_fail_alloc_file = f;\ - umm_last_fail_alloc_line = l;\ + +// OOM - Debug printing +// +// IRQ/ISR safe printing macros. Printing is controled according to the results +// of system_get_os_print(). Also, being in an IRQ may prevent the printing of +// file names stored in PROGMEM. The PROGMEM address to the string is printed in +// its place. +static void IRAM_ATTR print_loc(bool inISR, size_t size, const char* file, int line, const void* caller) { + if (system_get_os_print()) { + DEBUG_HEAP_PRINTF(":oom %p(%d), File: ", caller, (int)size); + if (file) { + // Small code reduction by assuming file address is in PROGMEM + if (inISR) { + DEBUG_HEAP_PRINTF("%p", file); + } else { + size_t sz = strlen_P(file); + char buf[sz + 1]; + strcpy_P(buf, file); + DEBUG_HEAP_PRINTF(buf); + } + } else { + DEBUG_HEAP_PRINTF("??"); + } + DEBUG_HEAP_PRINTF(":%d\n", line); } -#define PTR_CHECK__LOG_LAST_FAIL(p, s) \ - if(0 != (s) && 0 == p)\ - {\ - umm_last_fail_alloc_addr = __builtin_return_address(0);\ - umm_last_fail_alloc_size = s;\ - umm_last_fail_alloc_file = NULL;\ - umm_last_fail_alloc_line = 0;\ +} + +static bool IRAM_ATTR oom_check__log_last_fail_atomic_psflc(void *ptr, size_t size, const char* file, int line, const void* caller) { + if (0 != (size) && 0 == ptr) { + // Ensure changes to umm_last_fail_alloc are atomic. Otherwise, with a + // poorly timed interrupt, OOM details would mix between foreground and + // IRQ paths. + uint32_t saved_ps = xt_rsil(DEFAULT_CRITICAL_SECTION_INTLEVEL); + _umm_last_fail_alloc.addr = caller; + _umm_last_fail_alloc.size = size; + _umm_last_fail_alloc.file = file; + _umm_last_fail_alloc.line = line; + print_loc(withinISR(saved_ps), size, file, line, caller); + xt_wsr_ps(saved_ps); + return false; } + return true; +} +#define OOM_CHECK__LOG_LAST_FAIL_FL(p, s, f, l, c) oom_check__log_last_fail_atomic_psflc(p, s, f, l, c) +#define OOM_CHECK__LOG_LAST_FAIL_LITE_FL(p, s, c) ({ (void)p; (void)s; (void)c; true; }) + +#elif defined(ENABLE_THICK_DEBUG_WRAPPERS) +static bool IRAM_ATTR oom_check__log_last_fail_atomic_psc(void *ptr, size_t size, const void* caller) { + if (0 != (size) && 0 == ptr) { + // Need to ensure changes to umm_last_fail_alloc are atomic. + uint32_t saved_ps = xt_rsil(DEFAULT_CRITICAL_SECTION_INTLEVEL); + _umm_last_fail_alloc.addr = caller; + _umm_last_fail_alloc.size = size; + xt_wsr_ps(saved_ps); + return false; + } + return true; +} +#define OOM_CHECK__LOG_LAST_FAIL_FL(p, s, f, l, c) oom_check__log_last_fail_atomic_psc(p, s, c) +#define OOM_CHECK__LOG_LAST_FAIL_LITE_FL(p, s, c) ({ (void)p; (void)s; (void)c; true; }) + #else -#define PTR_CHECK__LOG_LAST_FAIL_FL(p, s, f, l) \ - (void)f;\ - (void)l;\ - if(0 != (s) && 0 == p)\ - {\ - umm_last_fail_alloc_addr = __builtin_return_address(0);\ - umm_last_fail_alloc_size = s;\ +// At this location, the macro is only used by Heap API families "new" and +// _malloc_r, which are not called from ISRs. Thus, the non-debug OOM call path +// does not require the IRAM_ATTR attribute. +static bool oom_check__log_last_fail_psc(void *ptr, size_t size, const void* caller) { + if (0 != (size) && 0 == ptr) { + _umm_last_fail_alloc.addr = caller; + _umm_last_fail_alloc.size = size; + return false; } -#define PTR_CHECK__LOG_LAST_FAIL(p, s) \ - if(0 != (s) && 0 == p)\ - {\ - umm_last_fail_alloc_addr = __builtin_return_address(0);\ - umm_last_fail_alloc_size = s;\ + return true; +} +// Used to capture minumum OOM info for "new" and LIBC +#define OOM_CHECK__LOG_LAST_FAIL_LITE_FL(p, s, c) oom_check__log_last_fail_psc(p, s, c) +#define OOM_CHECK__LOG_LAST_FAIL_FL(p, s, c) ({ (void)p; (void)s; (void)c; true; }) +#endif + + +/////////////////////////////////////////////////////////////////////////////// +// Monitor Heap APIs in flash for calls from ISRs +// +#if DEBUG_ESP_WITHINISR +static void isr_check__flash_not_safe(const void *caller) { + if (ETS_INTR_WITHINISR()) { // Assumes, non-zero INTLEVEL means in ISR + DEBUG_HEAP_PRINTF("\nIn-flash, Heap API call from %p with Interrupts Disabled.\n", caller); } +} +#define ISR_CHECK__LOG_NOT_SAFE(c) isr_check__flash_not_safe(c) +#else +#define ISR_CHECK__LOG_NOT_SAFE(c) do { (void)c; } while(false) #endif -void* _malloc_r(struct _reent* unused, size_t size) + +#ifdef ENABLE_THICK_DEBUG_WRAPPERS +/////////////////////////////////////////////////////////////////////////////// +// Thick Heap API wrapper for debugging: malloc, pvPortMalloc, "new", and +// _malloc_r families of heap APIs. +// +// While UMM_INTEGRITY_CHECK and UMM_POISON_CHECK are included, the Arduino IDE +// has no selection to build with them. Both are CPU intensive and can adversly +// effect the WiFi operation. For completeness they are all included in the +// list below. Both UMM_INTEGRITY_CHECK and UMM_POISON_CHECK can be enabled by +// build defines. +// +// A debug build will use option UMM_POISON_CHECK_LITE by default. If explicitly +// specifying UMM_POISON_CHECK_LITE or UMM_POISON_CHECK, only one is permitted +// in a Build. +// +// When selected, do Integrity Check first. Verifies the heap management +// information is not corrupt. Followed by Full Poison Check before *alloc +// operation. + +/////////////////////////////////////////////////////////////////////////////// +// Common Heap debug helper functions for each alloc operation +// +// Used by debug wrapper for: +// * portable malloc API, pvPortMalloc, ... +// * LIBC variation, _malloc_r, ... +// * "fancy macros" that call heap_pvPortMalloc, ... +// * Fallback for uncapture malloc API calls, malloc, ... +// +void* IRAM_ATTR _heap_pvPortMalloc(size_t size, const char* file, int line, const void* caller) { - (void) unused; - void *ret = malloc(size); - PTR_CHECK__LOG_LAST_FAIL(ret, size); + INTEGRITY_CHECK__PANIC_FL(file, line, caller); + POISON_CHECK__PANIC_FL(file, line, caller); + void* ret = UMM_MALLOC(size); + OOM_CHECK__LOG_LAST_FAIL_FL(ret, size, file, line, caller); return ret; } -void _free_r(struct _reent* unused, void* ptr) +void* IRAM_ATTR _heap_pvPortCalloc(size_t count, size_t size, const char* file, int line, const void* caller) { - (void) unused; - free(ptr); + INTEGRITY_CHECK__PANIC_FL(file, line, caller); + POISON_CHECK__PANIC_FL(file, line, caller); + size_t total_size = umm_umul_sat(count, size); + void* ret = UMM_CALLOC(1, total_size); + OOM_CHECK__LOG_LAST_FAIL_FL(ret, total_size, file, line, caller); + return ret; } -void* _realloc_r(struct _reent* unused, void* ptr, size_t size) +void* IRAM_ATTR _heap_pvPortRealloc(void* ptr, size_t size, const char* file, int line, const void* caller) { - (void) unused; - void *ret = realloc(ptr, size); - PTR_CHECK__LOG_LAST_FAIL(ret, size); + INTEGRITY_CHECK__PANIC_FL(file, line, caller); + POISON_CHECK__PANIC_FL(file, line, caller); + void* ret = UMM_REALLOC_FL(ptr, size, file, line, caller); + OOM_CHECK__LOG_LAST_FAIL_FL(ret, size, file, line, caller); return ret; } -void* _calloc_r(struct _reent* unused, size_t count, size_t size) +void IRAM_ATTR _heap_vPortFree(void* ptr, const char* file, int line, [[maybe_unused]] const void* caller) { - (void) unused; - void *ret = calloc(count, size); - PTR_CHECK__LOG_LAST_FAIL(ret, umm_umul_sat(count, size)); - return ret; + INTEGRITY_CHECK__PANIC_FL(file, line, caller); + POISON_CHECK__PANIC_FL(file, line, caller); + UMM_FREE_FL(ptr, file, line, caller); } -#ifdef DEBUG_ESP_OOM -#undef malloc -#undef calloc -#undef realloc - -#define DEBUG_HEAP_PRINTF ets_uart_printf - -void IRAM_ATTR print_loc(size_t size, const char* file, int line) +#if UMM_ENABLE_MEMALIGN +// The base name pvPortMemalign is a fabrication. I am not aware of an alignment +// option for the portable malloc library; however, I need one for the way these +// debug wrappers are set up. +void* IRAM_ATTR _heap_pvPortMemalign(size_t alignment, size_t size, const char* file, int line, const void* caller) { - (void)size; - (void)line; - if (system_get_os_print()) { - DEBUG_HEAP_PRINTF(":oom(%d)@", (int)size); - - bool inISR = ETS_INTR_WITHINISR(); - if (NULL == file || (inISR && (uint32_t)file >= 0x40200000)) { - DEBUG_HEAP_PRINTF("File: %p", file); - } else if (!inISR && (uint32_t)file >= 0x40200000) { - char buf[strlen_P(file) + 1]; - strcpy_P(buf, file); - DEBUG_HEAP_PRINTF(buf); - } else { - DEBUG_HEAP_PRINTF(file); - } - - DEBUG_HEAP_PRINTF(":%d\n", line); - } + INTEGRITY_CHECK__PANIC_FL(file, line, caller); + POISON_CHECK__PANIC_FL(file, line, caller); + void* ret = UMM_MEMALIGN(alignment, size); + OOM_CHECK__LOG_LAST_FAIL_FL(ret, size, file, line, caller); + return ret; } +#endif -void IRAM_ATTR print_oom_size(size_t size) +/////////////////////////////////////////////////////////////////////////////// +// Heap debug wrappers used by "fancy debug macros" to capture caller's context: +// module name, line no. and caller return address. +// The "fancy debug macros" are defined in `heap_api_debug.h` +void* IRAM_ATTR heap_pvPortMalloc(size_t size, const char* file, int line) { - (void)size; - if (system_get_os_print()) { - DEBUG_HEAP_PRINTF(":oom(%d)@?\n", (int)size); - } + return _heap_pvPortMalloc(size, file, line, __builtin_return_address(0)); } -#define OOM_CHECK__PRINT_OOM(p, s) if ((s) && !(p)) print_oom_size(s) -#define OOM_CHECK__PRINT_LOC(p, s, f, l) if ((s) && !(p)) print_loc(s, f, l) +void* IRAM_ATTR heap_pvPortCalloc(size_t count, size_t size, const char* file, int line) +{ + return _heap_pvPortCalloc(count, size, file, line, __builtin_return_address(0)); +} -#else // ! DEBUG_ESP_OOM +void* IRAM_ATTR heap_pvPortRealloc(void* ptr, size_t size, const char* file, int line) +{ + return _heap_pvPortRealloc(ptr, size, file, line, __builtin_return_address(0)); +} -#if 1 -//C - to be discussed - is this what you want? -//C Skip OOM logging of last fail for malloc/... and pvPort... . -//C It cost 64 more bytes of IRAM to turn on. And was not previously enabled. -#undef PTR_CHECK__LOG_LAST_FAIL_FL -#define PTR_CHECK__LOG_LAST_FAIL_FL(p, s, f, l) -#undef PTR_CHECK__LOG_LAST_FAIL -#define PTR_CHECK__LOG_LAST_FAIL(p, s) -#endif +void IRAM_ATTR heap_vPortFree(void* ptr, const char* file, int line) +{ + return _heap_vPortFree(ptr, file, line, __builtin_return_address(0)); +} -#define OOM_CHECK__PRINT_OOM(p, s) -#define OOM_CHECK__PRINT_LOC(p, s, f, l) +#if UMM_ENABLE_MEMALIGN +void* IRAM_ATTR heap_pvPortMemalign(size_t alignment, size_t size, const char* file, int line) +{ + return _heap_pvPortMemalign(alignment, size, file, line, __builtin_return_address(0)); +} #endif -#if defined(DEBUG_ESP_OOM) || defined(UMM_POISON_CHECK) || defined(UMM_POISON_CHECK_LITE) || defined(UMM_INTEGRITY_CHECK) -/* - The thinking behind the ordering of Integrity Check, Full Poison Check, and - the specific *alloc function. - - 1. Integrity Check - verifies the heap management information is not corrupt. - This allows any other testing, that walks the heap, to run safely. - - 2. Place Full Poison Check before or after a specific *alloc function? - a. After, when the *alloc function operates on an existing allocation. - b. Before, when the *alloc function creates a new, not modified, allocation. - - In a free() or realloc() call, the focus is on their allocation. It is - checked 1st and reported on 1ST if an error exists. Full Poison Check is - done after. - - For malloc(), calloc(), and zalloc() Full Poison Check is done 1st since - these functions do not modify an existing allocation. -*/ +/////////////////////////////////////////////////////////////////////////////// +// Heap debug wrappers used to captured any remaining standard heap api calls void* IRAM_ATTR malloc(size_t size) { - INTEGRITY_CHECK__ABORT(); - POISON_CHECK__ABORT(); - void* ret = UMM_MALLOC(size); - PTR_CHECK__LOG_LAST_FAIL(ret, size); - OOM_CHECK__PRINT_OOM(ret, size); - return ret; + return _heap_pvPortMalloc(size, NULL, 0, __builtin_return_address(0)); } void* IRAM_ATTR calloc(size_t count, size_t size) { - INTEGRITY_CHECK__ABORT(); - POISON_CHECK__ABORT(); - void* ret = UMM_CALLOC(count, size); - #if defined(DEBUG_ESP_OOM) - size_t total_size = umm_umul_sat(count, size);// For logging purposes - #endif - PTR_CHECK__LOG_LAST_FAIL(ret, total_size); - OOM_CHECK__PRINT_OOM(ret, total_size); - return ret; + return _heap_pvPortCalloc(count, size, NULL, 0, __builtin_return_address(0)); } void* IRAM_ATTR realloc(void* ptr, size_t size) { - INTEGRITY_CHECK__ABORT(); - void* ret = UMM_REALLOC_FL(ptr, size, NULL, 0); - POISON_CHECK__ABORT(); - PTR_CHECK__LOG_LAST_FAIL(ret, size); - OOM_CHECK__PRINT_OOM(ret, size); - return ret; + return _heap_pvPortRealloc(ptr, size, NULL, 0, __builtin_return_address(0)); +} + +void IRAM_ATTR free(void* ptr) +{ + _heap_vPortFree(ptr, NULL, 0, __builtin_return_address(0)); } -void IRAM_ATTR free(void* p) +#if UMM_ENABLE_MEMALIGN +void* IRAM_ATTR memalign(size_t alignment, size_t size) { - INTEGRITY_CHECK__ABORT(); - UMM_FREE_FL(p, NULL, 0); - POISON_CHECK__ABORT(); + return _heap_pvPortMemalign(alignment, size, NULL, 0, __builtin_return_address(0)); } #endif +#else +/////////////////////////////////////////////////////////////////////////////// +// Non-debug path +// +// Make Non-debug Portable Heap wrappers ultra thin with STATIC_ALWAYS_INLINE +#define STATIC_ALWAYS_INLINE static ALWAYS_INLINE + STATIC_ALWAYS_INLINE -void* IRAM_ATTR heap_pvPortMalloc(size_t size, const char* file, int line) +void* IRAM_ATTR _heap_pvPortMalloc(size_t size, const char* file, int line, const void* caller) { - INTEGRITY_CHECK__PANIC_FL(file, line); - POISON_CHECK__PANIC_FL(file, line); - void* ret = UMM_MALLOC(size); - PTR_CHECK__LOG_LAST_FAIL_FL(ret, size, file, line); - OOM_CHECK__PRINT_LOC(ret, size, file, line); - return ret; + (void)file; + (void)line; + (void)caller; + return UMM_MALLOC(size); } STATIC_ALWAYS_INLINE -void* IRAM_ATTR heap_pvPortCalloc(size_t count, size_t size, const char* file, int line) +void* IRAM_ATTR _heap_pvPortCalloc(size_t count, size_t size, const char* file, int line, const void* caller) { - INTEGRITY_CHECK__PANIC_FL(file, line); - POISON_CHECK__PANIC_FL(file, line); - void* ret = UMM_CALLOC(count, size); - #if defined(DEBUG_ESP_OOM) - size_t total_size = umm_umul_sat(count, size); - #endif - PTR_CHECK__LOG_LAST_FAIL_FL(ret, total_size, file, line); - OOM_CHECK__PRINT_LOC(ret, total_size, file, line); - return ret; + (void)file; + (void)line; + (void)caller; + return UMM_CALLOC(count, size); } STATIC_ALWAYS_INLINE -void* IRAM_ATTR heap_pvPortRealloc(void *ptr, size_t size, const char* file, int line) +void* IRAM_ATTR _heap_pvPortRealloc(void* ptr, size_t size, const char* file, int line, const void* caller) { - INTEGRITY_CHECK__PANIC_FL(file, line); - void* ret = UMM_REALLOC_FL(ptr, size, file, line); - POISON_CHECK__PANIC_FL(file, line); - PTR_CHECK__LOG_LAST_FAIL_FL(ret, size, file, line); - OOM_CHECK__PRINT_LOC(ret, size, file, line); - return ret; + (void)file; + (void)line; + (void)caller; + return UMM_REALLOC(ptr, size); } STATIC_ALWAYS_INLINE -void* IRAM_ATTR heap_pvPortZalloc(size_t size, const char* file, int line) +void IRAM_ATTR _heap_vPortFree(void* ptr, const char* file, int line, const void* caller) { - INTEGRITY_CHECK__PANIC_FL(file, line); - POISON_CHECK__PANIC_FL(file, line); - void* ret = UMM_CALLOC(1, size); - PTR_CHECK__LOG_LAST_FAIL_FL(ret, size, file, line); - OOM_CHECK__PRINT_LOC(ret, size, file, line); - return ret; + (void)file; + (void)line; + (void)caller; + UMM_FREE(ptr); } +#if UMM_ENABLE_MEMALIGN STATIC_ALWAYS_INLINE -void IRAM_ATTR heap_vPortFree(void *ptr, const char* file, int line) +void* IRAM_ATTR _heap_pvPortMemalign(size_t alignment, size_t size, const char* file, int line, const void* caller) { - INTEGRITY_CHECK__PANIC_FL(file, line); - UMM_FREE_FL(ptr, file, line); - POISON_CHECK__PANIC_FL(file, line); + (void)file; + (void)line; + (void)caller; + return UMM_MEMALIGN(alignment, size); } +#endif -size_t IRAM_ATTR xPortWantedSizeAlign(size_t size) +#endif + + +/////////////////////////////////////////////////////////////////////////////// +// Heap debug wrappers used by LIBC +// capture caller return address at exposed API +void* _malloc_r(struct _reent* unused, size_t size) { - return (size + 3) & ~((size_t) 3); + (void) unused; + void* caller = __builtin_return_address(0); + ISR_CHECK__LOG_NOT_SAFE(caller); + void* ret = _heap_pvPortMalloc(size, NULL, 0, caller); + OOM_CHECK__LOG_LAST_FAIL_LITE_FL(ret, size, caller); + return ret; } -void system_show_malloc(void) +void* _calloc_r(struct _reent* unused, size_t count, size_t size) { -#ifdef UMM_INFO - HeapSelectDram ephemeral; - umm_info(NULL, true); -#endif + (void) unused; + void* caller = __builtin_return_address(0); + ISR_CHECK__LOG_NOT_SAFE(caller); + void* ret = _heap_pvPortCalloc(count, size, NULL, 0, caller); + OOM_CHECK__LOG_LAST_FAIL_LITE_FL(ret, size, caller); + return ret; +} + +void* _realloc_r(struct _reent* unused, void* ptr, size_t size) +{ + (void) unused; + void* caller = __builtin_return_address(0); + ISR_CHECK__LOG_NOT_SAFE(caller); + void* ret = _heap_pvPortRealloc(ptr, size, NULL, 0, caller); + OOM_CHECK__LOG_LAST_FAIL_LITE_FL(ret, size, caller); + return ret; +} + +void _free_r(struct _reent* unused, void* ptr) +{ + (void) unused; + void* caller = __builtin_return_address(0); + ISR_CHECK__LOG_NOT_SAFE(caller); + _heap_vPortFree(ptr, NULL, 0, caller); } +#if UMM_ENABLE_MEMALIGN +// Calls to aligned_alloc(a,s) will passthrough LIBC _memalign_r; however, +// "new" operators with alignment pass through memalign. +void* _memalign_r(struct _reent* unused, size_t alignment, size_t size) +{ + (void) unused; + void* caller = __builtin_return_address(0); + ISR_CHECK__LOG_NOT_SAFE(caller); + void* ret = _heap_pvPortMemalign(alignment, size, NULL, 0, caller); + OOM_CHECK__LOG_LAST_FAIL_LITE_FL(ret, size, caller); + return ret; +} +#endif + + /* - NONOS SDK and lwIP do not handle IRAM heap well. Since they also use portable - malloc calls pvPortMalloc, ... we can leverage that for this solution. + NONOS SDK and lwIP expect DRAM memory. Ensure they don't get something else + like an IRAM Heap allocation. Since they also use portable malloc calls + pvPortMalloc, ... we can leverage that for this solution. Force pvPortMalloc, ... APIs to serve DRAM only. + + _heap_xxx() functions will be inline for non-debug builds. + + capture caller return address at exposed API */ void* IRAM_ATTR pvPortMalloc(size_t size, const char* file, int line) { HeapSelectDram ephemeral; - return heap_pvPortMalloc(size, file, line);; + return _heap_pvPortMalloc(size, file, line, __builtin_return_address(0)); } void* IRAM_ATTR pvPortCalloc(size_t count, size_t size, const char* file, int line) { HeapSelectDram ephemeral; - return heap_pvPortCalloc(count, size, file, line); + return _heap_pvPortCalloc(count, size, file, line, __builtin_return_address(0)); } -void* IRAM_ATTR pvPortRealloc(void *ptr, size_t size, const char* file, int line) +void* IRAM_ATTR pvPortRealloc(void* ptr, size_t size, const char* file, int line) { HeapSelectDram ephemeral; - return heap_pvPortRealloc(ptr, size, file, line); + return _heap_pvPortRealloc(ptr, size, file, line, __builtin_return_address(0)); } void* IRAM_ATTR pvPortZalloc(size_t size, const char* file, int line) { HeapSelectDram ephemeral; - return heap_pvPortZalloc(size, file, line); + return _heap_pvPortCalloc(1, size, file, line, __builtin_return_address(0)); } -void IRAM_ATTR vPortFree(void *ptr, const char* file, int line) +void IRAM_ATTR vPortFree(void* ptr, const char* file, int line) { -#if defined(DEBUG_ESP_OOM) || defined(UMM_POISON_CHECK) || defined(UMM_POISON_CHECK_LITE) || defined(UMM_INTEGRITY_CHECK) - // This is only needed for debug checks to ensure they are performed in - // correct context. umm_malloc free internally determines the correct heap. +#if defined(UMM_POISON_CHECK) || defined(UMM_INTEGRITY_CHECK) + // While umm_free internally determines the correct heap, UMM_POISON_CHECK + // and UMM_INTEGRITY_CHECK do not have arguments. They will use the + // current heap to identify which one to analyze. + // + // This is not needed for UMM_POISON_CHECK_LITE, it directly handles + // multiple heaps. + // + // DEBUG_ESP_OOM is not tied to any one heap. HeapSelectDram ephemeral; #endif - return heap_vPortFree(ptr, file, line); + return _heap_vPortFree(ptr, file, line, __builtin_return_address(0)); +} + +#if UMM_ENABLE_MEMALIGN +// Just a reminder, function pvPortMemalign() is made up. SDK does not use it. +//C maybe rename later? +#endif + +/////////////////////////////////////////////////////////////////////////////// +// NONOS SDK - Replacement functions +// +size_t IRAM_ATTR xPortWantedSizeAlign(size_t size) +{ + return (size + 3) & ~((size_t) 3); +} + +void system_show_malloc(void) +{ +#ifdef UMM_INFO + HeapSelectDram ephemeral; + umm_info(NULL, true); +#endif } #if (NONOSDK >= (0x30000)) @@ -415,40 +729,43 @@ void IRAM_ATTR vPortFree(void *ptr, const char* file, int line) #ifdef UMM_HEAP_IRAM void* IRAM_ATTR sdk3_pvPortMalloc(size_t size, const char* file, int line, bool iram) { + void* caller = __builtin_return_address(0); if (iram) { HeapSelectIram ephemeral; - void* ret = heap_pvPortMalloc(size, file, line); + void* ret = _heap_pvPortMalloc(size, file, line, caller); if (ret) return ret; } { HeapSelectDram ephemeral; - return heap_pvPortMalloc(size, file, line); + return _heap_pvPortMalloc(size, file, line, caller); } } void* IRAM_ATTR pvPortCallocIram(size_t count, size_t size, const char* file, int line) { + void* caller = __builtin_return_address(0); { HeapSelectIram ephemeral; - void* ret = heap_pvPortCalloc(count, size, file, line); + void* ret = _heap_pvPortCalloc(count, size, file, line, caller); if (ret) return ret; } { HeapSelectDram ephemeral; - return heap_pvPortCalloc(count, size, file, line); + return _heap_pvPortCalloc(count, size, file, line, caller); } } void* IRAM_ATTR pvPortZallocIram(size_t size, const char* file, int line) { + void* caller = __builtin_return_address(0); { HeapSelectIram ephemeral; - void* ret = heap_pvPortZalloc(size, file, line); + void* ret = _heap_pvPortCalloc(1, size, file, line, caller); if (ret) return ret; } { HeapSelectDram ephemeral; - return heap_pvPortZalloc(size, file, line); + return _heap_pvPortCalloc(1, size, file, line, caller); } } #define CONFIG_IRAM_MEMORY 1 @@ -478,4 +795,255 @@ uint32 IRAM_ATTR user_iram_memory_is_enabled(void) return CONFIG_IRAM_MEMORY; } #endif // #if (NONOSDK >= (0x30000)) -}; + + +/////////////////////////////////////////////////////////////////////////////// +// Not normally needed, DEV_DEBUG_ABI_CPP is for module maintenance. +#if DEV_DEBUG_ABI_CPP +// In test Sketch, set abi_new_print=true/false around test function calls. +// +// Note that this code path is skipped when built with "C++ Exception: enabled" +// and the non-debug build option. For this build case, there is no need to +// collect OOM information. To use DEV_DEBUG_ABI_CPP, you must also define one +// of the following: MIN_ESP_OOM, DEBUG_ESP_OOM, DEBUG_ESP_PORT, or +// DEBUG_ESP_WITHINISR. +bool abi_new_print = false; +#define DEBUG_ABI_CPP_PRINTF ets_uart_printf + +// _dbg_abi_print_pstr is shared (private) between abi.cpp and heap.cpp +extern "C" void _dbg_abi_print_pstr(const char *function_name) { + if (abi_new_print) { + uint32_t saved_ps = xt_rsil(DEFAULT_CRITICAL_SECTION_INTLEVEL); + DEBUG_HEAP_PRINTF("\nTrace: "); + if (withinISR(saved_ps)) { + DEBUG_HEAP_PRINTF("FN(%p)", function_name); + } else { + size_t sz = strlen_P(function_name); + char buf[sz + 1]; + strcpy_P(buf, function_name); + DEBUG_HEAP_PRINTF("'%s'", buf); + } + xt_wsr_ps(saved_ps); + } +} +// #define DEBUG_DELETE_OP_PRINT_FN() do { _dbg_abi_print_pstr("delete_op", __PRETTY_FUNCTION__, __builtin_return_address(0)); } while (false) +#define DEBUG_DELETE_OP_PRINT_FN() do { _dbg_abi_print_pstr(__PRETTY_FUNCTION__); } while (false) + +#else +#define DEBUG_DELETE_OP_PRINT_FN(...) do { } while (false) +#define DEBUG_ABI_CPP_PRINTF(...) +#endif + +/////////////////////////////////////////////////////////////////////////////// +// heap allocator for "new" (ABI) - To support collecting OOM info, always defined +// +#undef USE_HEAP_ABI_MEMALIGN +#undef USE_HEAP_ABI_MALLOC + +#if defined(__cpp_exceptions) && \ +(defined(DEBUG_ESP_OOM) || defined(DEBUG_ESP_PORT) || defined(DEBUG_ESP_WITHINISR) || defined(MIN_ESP_OOM)) +#if defined(UMM_ENABLE_MEMALIGN) +#define USE_HEAP_ABI_MEMALIGN 1 +#else +#define USE_HEAP_ABI_MALLOC 1 +#endif + +#elif !defined(__cpp_exceptions) +#if defined(UMM_ENABLE_MEMALIGN) +#define USE_HEAP_ABI_MEMALIGN 1 +#else +#define USE_HEAP_ABI_MALLOC 1 +#endif +#endif + +#if USE_HEAP_ABI_MEMALIGN +// To maintain my sanity, I intend that all functions containing the string +// "memalign" in the name will have the "alignment" argument before the "size". +void* _heap_abi_memalign(size_t alignment, size_t size, bool unhandled, const void* const caller) +{ + DEBUG_ABI_CPP_PRINTF(" _heap_abi_memalign(alignment(%u), size(%u), unhandled(%s), caller(%p))\n", alignment, size, (unhandled) ? "true" : "false", caller); + // A comment from libstdc new_op.cc says: "malloc (0) is unpredictable; + // avoid it." This behavoir is only done for the new operator. Our malloc is + // happy to return a null pointer for a zero size allocation; however, the + // comment implies to me that other behavior might probe an under tested + // logic path. We take the road more traveled. + if (__builtin_expect(0 == size, false)) { + size = 1; + } + + #ifdef ENABLE_THICK_DEBUG_WRAPPERS + ISR_CHECK__LOG_NOT_SAFE(caller); + INTEGRITY_CHECK__PANIC_FL(NULL, 0, caller); + POISON_CHECK__PANIC_FL(NULL, 0, caller); + void* ret = UMM_MEMALIGN(alignment, size); + bool ok = OOM_CHECK__LOG_LAST_FAIL_FL(ret, size, NULL, 0, caller); + #else + void* ret = UMM_MEMALIGN(alignment, size); + // minimum OOM check + bool ok = OOM_CHECK__LOG_LAST_FAIL_LITE_FL(ret, size, caller); + #endif + if (!ok && unhandled) { + __unhandled_exception(PSTR("OOM")); + } + return ret; +} +#elif USE_HEAP_ABI_MALLOC +void* _heap_abi_malloc(size_t size, bool unhandled, const void* const caller) +{ + DEBUG_ABI_CPP_PRINTF(" _heap_abi_malloc(size(%u), unhandled(%s), caller(%p))\n", size, (unhandled) ? "true" : "false", caller); + + /* malloc (0) is unpredictable; avoid it. */ + if (__builtin_expect(0 == size, false)) { + size = 1; + } + + #ifdef ENABLE_THICK_DEBUG_WRAPPERS + ISR_CHECK__LOG_NOT_SAFE(caller); + INTEGRITY_CHECK__PANIC_FL(NULL, 0, caller); + POISON_CHECK__PANIC_FL(NULL, 0, caller); + void* ret = UMM_MALLOC(size); + bool ok = OOM_CHECK__LOG_LAST_FAIL_FL(ret, size, NULL, 0, caller); + #else + void* ret = UMM_MALLOC(size); + // minimum OOM check + bool ok = OOM_CHECK__LOG_LAST_FAIL_LITE_FL(ret, size, caller); + #endif + if (!ok && unhandled) { + __unhandled_exception(PSTR("OOM")); + } + return ret; +} +#endif + +}; // extern "C" { + + +#if defined(ENABLE_THICK_DEBUG_WRAPPERS) +/////////////////////////////////////////////////////////////////////////////// +// Replacement C++ delete operator to capture callers address +// +#include +#include +#include + +void _heap_delete(void* ptr, const void* caller) noexcept +{ + ISR_CHECK__LOG_NOT_SAFE(caller); + _heap_vPortFree(ptr, NULL, 0, caller); +} + +// These function replace their weak counterparts tagged with _GLIBCXX_WEAK_DEFINITION + +// del_op +void operator delete (void* ptr) noexcept +{ + DEBUG_DELETE_OP_PRINT_FN(); + DEBUG_ABI_CPP_PRINTF(", ptr(%p), caller(%p)\n", ptr, __builtin_return_address(0)); + + _heap_delete(ptr, __builtin_return_address(0)); +} + +// del_opnt +void operator delete (void *ptr, const std::nothrow_t&) noexcept +{ + DEBUG_DELETE_OP_PRINT_FN(); + DEBUG_ABI_CPP_PRINTF(", ptr(%p), std::nothrow_t&, caller(%p)\n", ptr, __builtin_return_address(0)); + + _heap_delete(ptr, __builtin_return_address(0)); +} + +// del_ops +void operator delete (void* ptr, [[maybe_unused]]std::size_t size) noexcept +{ + DEBUG_DELETE_OP_PRINT_FN(); + DEBUG_ABI_CPP_PRINTF(", ptr(%p), size(%u), caller(%p)\n", ptr, size, __builtin_return_address(0)); + + _heap_delete(ptr, __builtin_return_address(0)); +} + +// del_opv +void operator delete[] (void *ptr) noexcept +{ + DEBUG_DELETE_OP_PRINT_FN(); + DEBUG_ABI_CPP_PRINTF(", ptr(%p), caller(%p)\n", ptr, __builtin_return_address(0)); + + _heap_delete(ptr, __builtin_return_address(0)); +} + +// del_opvnt +void operator delete[] (void *ptr, const std::nothrow_t&) noexcept +{ + DEBUG_DELETE_OP_PRINT_FN(); + DEBUG_ABI_CPP_PRINTF(", ptr(%p), std::nothrow_t, caller(%p)\n", ptr, __builtin_return_address(0)); + + _heap_delete(ptr, __builtin_return_address(0)); +} + +// del_opvs +void operator delete[] (void *ptr, [[maybe_unused]]std::size_t size) noexcept +{ + DEBUG_DELETE_OP_PRINT_FN(); + DEBUG_ABI_CPP_PRINTF(", ptr(%p), size(%u), caller(%p)\n", ptr, size, __builtin_return_address(0)); + + _heap_delete(ptr, __builtin_return_address(0)); +} + +#if defined(UMM_ENABLE_MEMALIGN) + +// del_opant +void operator delete (void *ptr, [[maybe_unused]]std::align_val_t alignment, const std::nothrow_t&) noexcept +{ + DEBUG_DELETE_OP_PRINT_FN(); + DEBUG_ABI_CPP_PRINTF(", ptr(%p), alignment(%u), std::nothrow_t, caller(%p)\n", ptr, std::size_t(alignment), __builtin_return_address(0)); + + _heap_delete(ptr, __builtin_return_address(0)); +} + +// del_opvant +void operator delete[] (void *ptr, [[maybe_unused]]std::align_val_t alignment, const std::nothrow_t&) noexcept +{ + DEBUG_DELETE_OP_PRINT_FN(); + DEBUG_ABI_CPP_PRINTF(", ptr(%p), alignment(%u), std::nothrow_t, caller(%p)\n", ptr, std::size_t(alignment), __builtin_return_address(0)); + + _heap_delete(ptr, __builtin_return_address(0)); +} +/////////////////////////////////////////////////////////////////////////////// + +// del_opa +void operator delete (void* ptr, [[maybe_unused]]std::align_val_t alignment) noexcept +{ + DEBUG_DELETE_OP_PRINT_FN(); + DEBUG_ABI_CPP_PRINTF(", ptr(%p), alignment(%u), caller(%p)\n", ptr, std::size_t(alignment), __builtin_return_address(0)); + + _heap_delete(ptr, __builtin_return_address(0)); +} + +// del_opsa +void operator delete (void* ptr, [[maybe_unused]]std::size_t size, [[maybe_unused]]std::align_val_t alignment) noexcept +{ + DEBUG_DELETE_OP_PRINT_FN(); + DEBUG_ABI_CPP_PRINTF(", ptr(%p), size(%u), alignment(%u), caller(%p)\n", ptr, size, std::size_t(alignment), __builtin_return_address(0)); + + _heap_delete(ptr, __builtin_return_address(0)); +} + +// del_opva +void operator delete[] (void *ptr, [[maybe_unused]]std::align_val_t alignment) noexcept +{ + DEBUG_DELETE_OP_PRINT_FN(); + DEBUG_ABI_CPP_PRINTF(", ptr(%p), alignment(%u), caller(%p)\n", ptr, std::size_t(alignment), __builtin_return_address(0)); + + _heap_delete(ptr, __builtin_return_address(0)); +} + +// del_opvsa +void operator delete[] (void *ptr, [[maybe_unused]]std::size_t size, [[maybe_unused]]std::align_val_t alignment) noexcept +{ + DEBUG_DELETE_OP_PRINT_FN(); + DEBUG_ABI_CPP_PRINTF(", ptr(%p), size(%u), alignment(%u), caller(%p)\n", ptr, size, std::size_t(alignment), __builtin_return_address(0)); + + _heap_delete(ptr, __builtin_return_address(0)); +} + +#endif // #if defined(UMM_ENABLE_MEMALIGN) +#endif // #if defined(ENABLE_THICK_DEBUG_WRAPPERS) diff --git a/cores/esp8266/heap_api_debug.h b/cores/esp8266/heap_api_debug.h index 62f7e7bad5..8a08819f1e 100644 --- a/cores/esp8266/heap_api_debug.h +++ b/cores/esp8266/heap_api_debug.h @@ -19,37 +19,13 @@ extern "C" { #endif -#ifdef DEBUG_ESP_OOM -#define MEMLEAK_DEBUG - -#include "umm_malloc/umm_malloc_cfg.h" - +#if defined(DEBUG_ESP_OOM) || defined(UMM_POISON_CHECK) || defined(UMM_POISON_CHECK_LITE) #include -// Reuse pvPort* calls, since they already support passing location information. -// Specifically the debug version (heap_...) that does not force DRAM heap. -void *IRAM_ATTR heap_pvPortMalloc(size_t size, const char *file, int line); -void *IRAM_ATTR heap_pvPortCalloc(size_t count, size_t size, const char *file, int line); void *IRAM_ATTR heap_pvPortRealloc(void *ptr, size_t size, const char *file, int line); -void *IRAM_ATTR heap_pvPortZalloc(size_t size, const char *file, int line); void IRAM_ATTR heap_vPortFree(void *ptr, const char *file, int line); -#define malloc(s) ({ static const char mem_debug_file[] PROGMEM STORE_ATTR = __FILE__; heap_pvPortMalloc(s, mem_debug_file, __LINE__); }) -#define calloc(n,s) ({ static const char mem_debug_file[] PROGMEM STORE_ATTR = __FILE__; heap_pvPortCalloc(n, s, mem_debug_file, __LINE__); }) -#define realloc(p,s) ({ static const char mem_debug_file[] PROGMEM STORE_ATTR = __FILE__; heap_pvPortRealloc(p, s, mem_debug_file, __LINE__); }) - -#if defined(UMM_POISON_CHECK) || defined(UMM_POISON_CHECK_LITE) -#define dbg_heap_free(p) ({ static const char mem_debug_file[] PROGMEM STORE_ATTR = __FILE__; heap_vPortFree(p, mem_debug_file, __LINE__); }) -#else -#define dbg_heap_free(p) free(p) -#endif - -#elif defined(UMM_POISON_CHECK) || defined(UMM_POISON_CHECK_LITE) // #elif for #ifdef DEBUG_ESP_OOM -#include -void *IRAM_ATTR heap_pvPortRealloc(void *ptr, size_t size, const char *file, int line); #define realloc(p,s) ({ static const char mem_debug_file[] PROGMEM STORE_ATTR = __FILE__; heap_pvPortRealloc(p, s, mem_debug_file, __LINE__); }) -void IRAM_ATTR heap_vPortFree(void *ptr, const char *file, int line); -// C - to be discussed /* Problem, I would like to report the file and line number with the umm poison event as close as possible to the event. The #define method works for malloc, @@ -64,10 +40,28 @@ void IRAM_ATTR heap_vPortFree(void *ptr, const char *file, int line); more help in debugging the more challenging problems. */ #define dbg_heap_free(p) ({ static const char mem_debug_file[] PROGMEM STORE_ATTR = __FILE__; heap_vPortFree(p, mem_debug_file, __LINE__); }) - #else #define dbg_heap_free(p) free(p) -#endif /* DEBUG_ESP_OOM */ +#endif + +#ifdef DEBUG_ESP_OOM +#define MEMLEAK_DEBUG + +// Reuse pvPort* calls, since they already support passing location information. +// Specifically the debug version (heap_...) that does not force DRAM heap. +void *IRAM_ATTR heap_pvPortMalloc(size_t size, const char *file, int line); +void *IRAM_ATTR heap_pvPortCalloc(size_t count, size_t size, const char *file, int line); +#ifdef UMM_ENABLE_MEMALIGN +// fake pvPort... name for memalign +void* IRAM_ATTR heap_pvPortMemalign(size_t alignment, size_t size, const char* file, int line); +#endif + +#define malloc(s) ({ static const char mem_debug_file[] PROGMEM STORE_ATTR = __FILE__; heap_pvPortMalloc(s, mem_debug_file, __LINE__); }) +#define calloc(n,s) ({ static const char mem_debug_file[] PROGMEM STORE_ATTR = __FILE__; heap_pvPortCalloc(n, s, mem_debug_file, __LINE__); }) +#ifdef UMM_ENABLE_MEMALIGN +#define memalign(a,s) ({ static const char mem_debug_file[] PROGMEM STORE_ATTR = __FILE__; heap_pvPortMemalign(a, s, mem_debug_file, __LINE__); }) +#endif +#endif #ifdef __cplusplus } diff --git a/cores/esp8266/umm_malloc/Notes.h b/cores/esp8266/umm_malloc/Notes.h index 5d076c4b67..9546f99f0a 100644 --- a/cores/esp8266/umm_malloc/Notes.h +++ b/cores/esp8266/umm_malloc/Notes.h @@ -352,5 +352,30 @@ Enhancement ideas: Move get_poisoned() within UMM_CRITICAL_* in umm_malloc() and umm_realloc(). + + September 14, 2024 + + Added conditional build support (UMM_ENABLE_MEMALIGN) for memalign() needed + for support of C++17. Expand umm_malloc_core() to support an alignment value. + I updated umm_malloc/umm_memalign to handle minimum alignment parameter + testing. For the umm_malloc library, ignore all the C++ noise on lower + alignment limits, etc. Language-restrictive requirements are left to the upper + levels. + + Referenced info: + + New new() - The C++17's Alignment Parameter for Operator new() + https://www.cppstories.com/2019/08/newnew-align/ + + Alignment + https://en.cppreference.com/w/cpp/language/object#Alignment + + std::max_align_t (in stddef.h) is a typedef struct with a collection of + types that would be used. "alignof(std::max_align_t)" yields the largest + alignment for the largest type. In some locations, I have seen references + to "sizeof(std::max_align_t)" I think those were typos. + + From https://github.com/m-labs/uclibc-lm32/blob/defb191aab7711218035a506ec5cd8bb87c05c55/libc/stdlib/malloc-standard/malloc.h#L39 + */ #endif diff --git a/cores/esp8266/umm_malloc/umm_local.c b/cores/esp8266/umm_malloc/umm_local.c index c08e2a27ca..86f0d7120a 100644 --- a/cores/esp8266/umm_malloc/umm_local.c +++ b/cores/esp8266/umm_malloc/umm_local.c @@ -81,23 +81,19 @@ static bool check_poison_neighbors(umm_heap_context_t *_context, uint16_t cur) { return true; } -#endif - -#if defined(UMM_POISON_CHECK) || defined(UMM_POISON_CHECK_LITE) /* ------------------------------------------------------------------------ */ -static void *get_unpoisoned_check_neighbors(void *vptr, const char *file, int line) { +static void *get_unpoisoned_check_neighbors(const void *vptr, const char *file, int line, const void *caller) { uintptr_t ptr = (uintptr_t)vptr; if (ptr != 0) { ptr -= (sizeof(UMM_POISONED_BLOCK_LEN_TYPE) + UMM_POISON_SIZE_BEFORE); - #if defined(UMM_POISON_CHECK_LITE) UMM_CRITICAL_DECL(id_poison); uint16_t c; - bool poison = false; + bool poison = true; umm_heap_context_t *_context = _umm_get_ptr_context((void *)ptr); if (_context) { @@ -105,29 +101,28 @@ static void *get_unpoisoned_check_neighbors(void *vptr, const char *file, int li c = (ptr - (uintptr_t)(&(_context->heap[0]))) / sizeof(umm_block); UMM_CRITICAL_ENTRY(id_poison); - poison = - check_poison_block(&UMM_BLOCK(c)) && - check_poison_neighbors(_context, c); + if (!check_poison_block(&UMM_BLOCK(c))) { + DBGLOG_ERROR("Allocation address %p\n", vptr); + poison = false; + } else + if (!check_poison_neighbors(_context, c)) { + DBGLOG_ERROR("This bad block is in a neighbor allocation near: %p\n", vptr); + poison = false; + } UMM_CRITICAL_EXIT(id_poison); } else { DBGLOG_ERROR("\nPointer %p is not a Heap address.\n", vptr); + poison = false; } if (!poison) { + DBGLOG_ERROR("Caller near %p\n", caller); if (file) { __panic_func(file, line, ""); } else { abort(); } } - #else - /* - * No need to check poison here. POISON_CHECK() has already done a - * full heap check. - */ - (void)file; - (void)line; - #endif } return (void *)ptr; @@ -135,10 +130,10 @@ static void *get_unpoisoned_check_neighbors(void *vptr, const char *file, int li /* ------------------------------------------------------------------------ */ -void *umm_poison_realloc_fl(void *ptr, size_t size, const char *file, int line) { +void *umm_poison_realloc_flc(void *ptr, size_t size, const char *file, int line, const void *caller) { void *ret; - ptr = get_unpoisoned_check_neighbors(ptr, file, line); + ptr = get_unpoisoned_check_neighbors(ptr, file, line, caller); add_poison_size(&size); ret = umm_realloc(ptr, size); @@ -153,9 +148,9 @@ void *umm_poison_realloc_fl(void *ptr, size_t size, const char *file, int line) /* ------------------------------------------------------------------------ */ -void umm_poison_free_fl(void *ptr, const char *file, int line) { +void umm_poison_free_flc(void *ptr, const char *file, int line, const void *caller) { - ptr = get_unpoisoned_check_neighbors(ptr, file, line); + ptr = get_unpoisoned_check_neighbors(ptr, file, line, caller); umm_free(ptr); } @@ -362,6 +357,13 @@ size_t ICACHE_FLASH_ATTR umm_get_free_null_count(void) { umm_heap_context_t *_context = umm_get_current_heap(); return _context->stats.id_free_null_count; } + +#if UMM_ENABLE_MEMALIGN +size_t ICACHE_FLASH_ATTR umm_get_malloc_align_error_count(void) { + umm_heap_context_t *_context = umm_get_current_heap(); + return _context->stats.id_malloc_align_error_count; +} +#endif #endif // UMM_STATS_FULL #if defined(UMM_POISON_CHECK) || defined(UMM_POISON_CHECK_LITE) @@ -369,7 +371,7 @@ size_t ICACHE_FLASH_ATTR umm_get_free_null_count(void) { * Saturated unsigned add * Poison added to allocation size requires overflow protection. */ -static size_t umm_uadd_sat(const size_t a, const size_t b) { +size_t umm_uadd_sat(const size_t a, const size_t b) { size_t r = a + b; if (r < a) { return SIZE_MAX; @@ -391,5 +393,4 @@ size_t umm_umul_sat(const size_t a, const size_t b) { return r; } - #endif // BUILD_UMM_MALLOC_C diff --git a/cores/esp8266/umm_malloc/umm_local.h b/cores/esp8266/umm_malloc/umm_local.h index c5dcffd73c..3beb8f1d5e 100644 --- a/cores/esp8266/umm_malloc/umm_local.h +++ b/cores/esp8266/umm_malloc/umm_local.h @@ -34,13 +34,15 @@ static size_t umm_uadd_sat(const size_t a, const size_t b); // #define DBGLOG_FORCE(force, format, ...) {if(force) {::printf(PSTR(format), ## __VA_ARGS__);}} -#if defined(DEBUG_ESP_OOM) || defined(UMM_POISON_CHECK) || defined(UMM_POISON_CHECK_LITE) || defined(UMM_INTEGRITY_CHECK) +#if defined(DEBUG_ESP_OOM) || defined(UMM_POISON_CHECK) \ + || defined(UMM_POISON_CHECK_LITE) || defined(UMM_INTEGRITY_CHECK) \ + || defined(DEBUG_ESP_WITHINISR) #else - -#define umm_malloc(s) malloc(s) -#define umm_calloc(n,s) calloc(n,s) -#define umm_realloc(p,s) realloc(p,s) -#define umm_free(p) free(p) +#define umm_malloc(s) malloc(s) +#define umm_memalign(a,s) memalign(a,s) +#define umm_calloc(n,s) calloc(n,s) +#define umm_realloc(p,s) realloc(p,s) +#define umm_free(p) free(p) #endif diff --git a/cores/esp8266/umm_malloc/umm_malloc.cpp b/cores/esp8266/umm_malloc/umm_malloc.cpp index e130862cf7..24abe6d335 100644 --- a/cores/esp8266/umm_malloc/umm_malloc.cpp +++ b/cores/esp8266/umm_malloc/umm_malloc.cpp @@ -524,16 +524,28 @@ static void ICACHE_MAYBE _umm_init_heap(umm_heap_context_t *_context) { void ICACHE_MAYBE umm_init_heap(size_t id, void *start_addr, size_t size, bool full_init) { /* Check for bad values and block duplicate init attempts. */ umm_heap_context_t *_context = umm_get_heap_by_id(id); - if (NULL == start_addr || NULL == _context || _context->heap) { + if (NULL == start_addr || NULL == _context || _context->heap || size < 20u) { return; } /* init heap pointer and size, and memset it to 0 */ - _context->id = id; - _context->heap = (umm_block *)start_addr; - _context->heap_end = (void *)((uintptr_t)start_addr + size); + // start_addr is 8-byte aligned. However, we want the allocated data address + // we return to be 8-byte aligned. By padding the start address, we adjust + // the data allocated address to land on an 8-byte aligned value. + // Our default target alignment is __STDCPP_DEFAULT_NEW_ALIGNMENT__ == 8 + // To return malloc memory 8-byte aligned, we have to sacrifice 4-bytes at + // beginning and end of the heap address space. + #if defined(UMM_LEGACY_ALIGN_4BYTE) && defined(UMM_ENABLE_MEMALIGN) + #error "Build option conflict - cannot support both UMM_LEGACY_ALIGN_4BYTE and UMM_ENABLE_MEMALIGN" + #elif defined(UMM_LEGACY_ALIGN_4BYTE) + _context->heap = (umm_block *)((uintptr_t)start_addr); + #else + _context->heap = (umm_block *)((uintptr_t)start_addr + 4u); + size -= 4u; + #endif _context->numblocks = (size / sizeof(umm_block)); + _context->heap_end = (void *)((uintptr_t)_context->heap + _context->numblocks * sizeof(umm_block)); // An option for blocking the zeroing of extra heaps. This allows for // post-crash debugging after reboot. @@ -574,7 +586,7 @@ void ICACHE_MAYBE umm_init(void) { * inited and ICACHE has been enabled. */ #ifdef UMM_HEAP_IRAM -void ICACHE_FLASH_ATTR umm_init_iram_ex(void *addr, unsigned int size, bool full_init) { +void IRAM_ATTR umm_init_iram_ex(void *addr, unsigned int size, bool full_init) { /* We need the main, internal heap set up first */ UMM_CHECK_INITIALIZED(); @@ -582,20 +594,20 @@ void ICACHE_FLASH_ATTR umm_init_iram_ex(void *addr, unsigned int size, bool full } void _text_end(void); -void ICACHE_FLASH_ATTR umm_init_iram(void) __attribute__((weak)); +void IRAM_ATTR umm_init_iram(void) __attribute__((weak)); /* By using a weak link, it is possible to reduce the IRAM heap size with a user-supplied init function. This would allow the creation of a block of IRAM dedicated to a sketch and possibly used/preserved across reboots. */ -void ICACHE_FLASH_ATTR umm_init_iram(void) { +void IRAM_ATTR umm_init_iram(void) { umm_init_iram_ex(mmu_sec_heap(), mmu_sec_heap_size(), true); } #endif // #ifdef UMM_HEAP_IRAM #ifdef UMM_HEAP_EXTERNAL -void ICACHE_FLASH_ATTR umm_init_vm(void *vmaddr, unsigned int vmsize) { +void IRAM_ATTR umm_init_vm(void *vmaddr, unsigned int vmsize) { /* We need the main, internal (DRAM) heap set up first */ UMM_CHECK_INITIALIZED(); @@ -693,7 +705,14 @@ void umm_free(void *ptr) { * UMM_CRITICAL_ENTRY() and UMM_CRITICAL_EXIT(). */ -static void *umm_malloc_core(umm_heap_context_t *_context, size_t size) { +#if UMM_ENABLE_MEMALIGN +#define UMM_MALLOC_CORE(a, b, c) umm_malloc_core(a, b, c) +static void *umm_malloc_core(umm_heap_context_t *_context, size_t size, size_t alignment) +#else +#define UMM_MALLOC_CORE(a, b, c) umm_malloc_core(a, b) +static void *umm_malloc_core(umm_heap_context_t *_context, size_t size) +#endif +{ uint16_t blocks; uint16_t blockSize = 0; @@ -706,6 +725,13 @@ static void *umm_malloc_core(umm_heap_context_t *_context, size_t size) { STATS__ALLOC_REQUEST(id_malloc, size); + #if UMM_ENABLE_MEMALIGN + if (alignment) { + // Pad size to cover alignment adjustments + size += alignment; + } + #endif + blocks = umm_blocks(size); /* @@ -754,6 +780,44 @@ static void *umm_malloc_core(umm_heap_context_t *_context, size_t size) { UMM_FRAGMENTATION_METRIC_REMOVE(cf); + #if UMM_ENABLE_MEMALIGN + if (__builtin_expect(alignment, 0u)) { + size_t alignMask = (alignment - 1u); + uintptr_t aptr = (uintptr_t)&UMM_DATA(cf); + + #if defined(UMM_POISON_CHECK) || defined(UMM_POISON_CHECK_LITE) + /* Data address must be properly aligned after poison space is inserted */ + aptr += sizeof(UMM_POISONED_BLOCK_LEN_TYPE) + UMM_POISON_SIZE_BEFORE; + aptr = (aptr + alignMask) & ~alignMask; + aptr -= sizeof(UMM_POISONED_BLOCK_LEN_TYPE) + UMM_POISON_SIZE_BEFORE; + #else + aptr = (aptr + alignMask) & ~alignMask; + #endif + + /* Figure out which block we're in. Note the use of truncated division... */ + uint16_t c = ((aptr) - (uintptr_t)(&(_context->heap[0]))) / sizeof(umm_block); + + // Release space before our aligned space, adjusting for overhead + // From cf up to c can be free-ed + uint16_t frag = c - cf; + if (frag) { + umm_split_block(_context, cf, frag, UMM_FREELIST_MASK); + UMM_NBLOCK(cf) = (UMM_NBLOCK(cf) & UMM_BLOCKNO_MASK) | UMM_FREELIST_MASK; + + // Splice in the new free block, let the later logic trim away + // unneeded spaces + UMM_PFREE(UMM_NFREE(cf)) = c; + UMM_NFREE(c) = UMM_NFREE(cf); + + UMM_NFREE(cf) = c; + UMM_PFREE(c) = cf; + + cf = c; + blocks -= frag; + } + } + #endif + /* * This is an existing block in the memory heap, we just need to split off * what we need, unlink it from the free list and mark it as in use, and @@ -813,8 +877,12 @@ static void *umm_malloc_core(umm_heap_context_t *_context, size_t size) { } /* ------------------------------------------------------------------------ */ - -void *umm_malloc(size_t size) { +#if UMM_ENABLE_MEMALIGN +void *umm_memalign(size_t alignment, size_t size) +#else +void *umm_malloc(size_t size) +#endif +{ UMM_CRITICAL_DECL(id_malloc); void *ptr = NULL; @@ -889,6 +957,30 @@ void *umm_malloc(size_t size) { return ptr; } + #if UMM_ENABLE_MEMALIGN + + /* + * Ensure alignment is power of 2; however, we allow zero. + */ + + if (0u != (alignment & (alignment - 1u))) { + STATS__ALIGNMENT_ERROR(id_malloc, alignment); + return ptr; + } + + /* + * Allocations default to 8-byte alignment, same as __STDCPP_DEFAULT_NEW_ALIGNMENT__. + * There is no need to execute extra alignment logic for small alignments. + * + * Side note when you add '-faligned-new=4' or other non-8 values to the + * build the GCC compiler complains with: "cc1: warning: command-line option + * '-faligned-new=4' is valid for C++/ObjC++ but not for C." + */ + if (alignment <= sizeof(umm_block)) { + alignment = 0u; // Use implementation default/minimum, 8. + } + #endif + /* Allocate the memory within a protected critical section */ UMM_CRITICAL_ENTRY(id_malloc); @@ -903,11 +995,12 @@ void *umm_malloc(size_t size) { * use DRAM for an ISR. Each 16-bit access to IRAM that umm_malloc has to make * requires a pass through the exception handling logic. */ + if (UMM_CRITICAL_WITHINISR(id_malloc)) { _context = umm_get_heap_by_id(UMM_HEAP_DRAM); } - ptr = umm_malloc_core(_context, size); + ptr = UMM_MALLOC_CORE(_context, size, alignment); ptr = POISON_CHECK_SET_POISON(ptr, size); @@ -916,6 +1009,12 @@ void *umm_malloc(size_t size) { return ptr; } +#if UMM_ENABLE_MEMALIGN +void *umm_malloc(size_t size) { + return umm_memalign(0, size); +} +#endif + /* ------------------------------------------------------------------------ */ void *umm_realloc(void *ptr, size_t size) { @@ -1122,7 +1221,7 @@ void *umm_realloc(void *ptr, size_t size) { } else { DBGLOG_DEBUG("realloc a completely new block %i\n", blocks); void *oldptr = ptr; - if ((ptr = umm_malloc_core(_context, size))) { + if ((ptr = UMM_MALLOC_CORE(_context, size, 0))) { DBGLOG_DEBUG("realloc %i to a bigger block %i, copy, and free the old\n", blockSize, blocks); (void)POISON_CHECK_SET_POISON(ptr, size); UMM_CRITICAL_SUSPEND(id_realloc); @@ -1204,7 +1303,7 @@ void *umm_realloc(void *ptr, size_t size) { } else { // 4 DBGLOG_DEBUG("realloc a completely new block %d\n", blocks); void *oldptr = ptr; - if ((ptr = umm_malloc_core(_context, size))) { + if ((ptr = UMM_MALLOC_CORE(_context, size, 0))) { DBGLOG_DEBUG("realloc %d to a bigger block %d, copy, and free the old\n", blockSize, blocks); (void)POISON_CHECK_SET_POISON(ptr, size); UMM_CRITICAL_SUSPEND(id_realloc); @@ -1230,7 +1329,7 @@ void *umm_realloc(void *ptr, size_t size) { } else { DBGLOG_DEBUG("realloc a completely new block %d\n", blocks); void *oldptr = ptr; - if ((ptr = umm_malloc_core(_context, size))) { + if ((ptr = UMM_MALLOC_CORE(_context, size, 0))) { DBGLOG_DEBUG("realloc %d to a bigger block %d, copy, and free the old\n", blockSize, blocks); (void)POISON_CHECK_SET_POISON(ptr, size); UMM_CRITICAL_SUSPEND(id_realloc); diff --git a/cores/esp8266/umm_malloc/umm_malloc.h b/cores/esp8266/umm_malloc/umm_malloc.h index 2c3b22cf74..039264b36b 100644 --- a/cores/esp8266/umm_malloc/umm_malloc.h +++ b/cores/esp8266/umm_malloc/umm_malloc.h @@ -34,6 +34,9 @@ extern void *umm_malloc(size_t size); extern void *umm_calloc(size_t num, size_t size); extern void *umm_realloc(void *ptr, size_t size); extern void umm_free(void *ptr); +#if UMM_ENABLE_MEMALIGN +extern void *umm_memalign(size_t alignment, size_t size); +#endif /* ------------------------------------------------------------------------ */ diff --git a/cores/esp8266/umm_malloc/umm_malloc_cfg.h b/cores/esp8266/umm_malloc/umm_malloc_cfg.h index bcc355f893..c2bded9f86 100644 --- a/cores/esp8266/umm_malloc/umm_malloc_cfg.h +++ b/cores/esp8266/umm_malloc/umm_malloc_cfg.h @@ -269,6 +269,9 @@ typedef struct UMM_STATISTICS_t { size_t id_realloc_zero_count; size_t id_free_count; size_t id_free_null_count; + #if UMM_ENABLE_MEMALIGN + size_t id_malloc_align_error_count; + #endif #endif } UMM_STATISTICS; @@ -321,6 +324,13 @@ size_t ICACHE_FLASH_ATTR umm_block_size(void); _context->stats.tag##_zero_count += 1; \ } while (false) +#if UMM_ENABLE_MEMALIGN +#define STATS__ALIGNMENT_ERROR(tag, s) \ + do { \ + _context->stats.tag##_align_error_count += 1; \ + } while (false) +#endif + #define STATS__NULL_FREE_REQUEST(tag) \ do { \ umm_heap_context_t *_context = umm_get_current_heap(); \ @@ -345,6 +355,9 @@ size_t umm_get_realloc_count(void); size_t umm_get_realloc_zero_count(void); size_t umm_get_free_count(void); size_t umm_get_free_null_count(void); +#if UMM_ENABLE_MEMALIGN +size_t umm_get_malloc_align_error_count(void); +#endif #else // Not UMM_STATS_FULL #define STATS__FREE_BLOCKS_MIN() (void)0 @@ -353,6 +366,9 @@ size_t umm_get_free_null_count(void); #define STATS__ZERO_ALLOC_REQUEST(tag, s) (void)(s) #define STATS__NULL_FREE_REQUEST(tag) (void)0 #define STATS__FREE_REQUEST(tag) (void)0 +#if UMM_ENABLE_MEMALIGN +#define STATS__ALIGNMENT_ERROR(tag, a) (void)(a) +#endif #endif /* @@ -543,12 +559,7 @@ extern void umm_corruption(void); #define INTEGRITY_CHECK() (1) #endif -///////////////////////////////////////////////// - /* - * -D UMM_POISON_CHECK : - * -D UMM_POISON_CHECK_LITE - * * Enables heap poisoning: add predefined value (poison) before and after each * allocation, and check before each heap operation that no poison is * corrupted. @@ -568,105 +579,30 @@ extern void umm_corruption(void); * NOTE: each allocated buffer is aligned by 4 bytes. But when poisoning is * enabled, actual pointer returned to user is shifted by * `(sizeof(UMM_POISONED_BLOCK_LEN_TYPE) + UMM_POISON_SIZE_BEFORE)`. + * * It's your responsibility to make resulting pointers aligned appropriately. * * If poison corruption is detected, the message is printed and user-provided * callback is called: `UMM_HEAP_CORRUPTION_CB()` - * - * UMM_POISON_CHECK - does a global heap check on all active allocation at - * every alloc API call. May exceed 10us due to critical section with IRQs - * disabled. - * - * UMM_POISON_CHECK_LITE - checks the allocation presented at realloc() - * and free(). Expands the poison check on the current allocation to - * include its nearest allocated neighbors in the heap. - * umm_malloc() will also checks the neighbors of the selected allocation - * before use. - * - * Status: TODO?: UMM_POISON_CHECK_LITE is a new option. We could propose for - * upstream; however, the upstream version has much of the framework for calling - * poison check on each alloc call refactored out. Not sure how this will be - * received. - */ - -/* - * Compatibility for deprecated UMM_POISON - */ -#if defined(UMM_POISON) && !defined(UMM_POISON_CHECK) -#define UMM_POISON_CHECK_LITE -#endif - -#if defined(DEBUG_ESP_PORT) || defined(DEBUG_ESP_CORE) -#if !defined(UMM_POISON_CHECK) && !defined(UMM_POISON_CHECK_LITE) -/* -#define UMM_POISON_CHECK */ - #define UMM_POISON_CHECK_LITE -#endif -#endif +#if 0 +// Multiple port specific changes. Handle/move this block to umm_malloc_cfgport.h +// Issolated block kept to ease comparing with upstream for changes. +#ifdef UMM_POISON_CHECK + #define UMM_POISON_SIZE_BEFORE (4) + #define UMM_POISON_SIZE_AFTER (4) + #define UMM_POISONED_BLOCK_LEN_TYPE uint16_t -#define UMM_POISON_SIZE_BEFORE (4) -#define UMM_POISON_SIZE_AFTER (4) -#define UMM_POISONED_BLOCK_LEN_TYPE uint32_t - -#if defined(UMM_POISON_CHECK) || defined(UMM_POISON_CHECK_LITE) extern void *umm_poison_malloc(size_t size); extern void *umm_poison_calloc(size_t num, size_t size); extern void *umm_poison_realloc(void *ptr, size_t size); extern void umm_poison_free(void *ptr); extern bool umm_poison_check(void); -// Local Additions to better report location in code of the caller. -void *umm_poison_realloc_fl(void *ptr, size_t size, const char *file, int line); -void umm_poison_free_fl(void *ptr, const char *file, int line); -#define POISON_CHECK_SET_POISON(p, s) get_poisoned(p, s) -#define POISON_CHECK_SET_POISON_BLOCKS(p, s) \ - do { \ - size_t super_size = (s * sizeof(umm_block)) - (sizeof(((umm_block *)0)->header)); \ - get_poisoned(p, super_size); \ - } while (false) -#define UMM_POISON_SKETCH_PTR(p) ((void *)((uintptr_t)p + sizeof(UMM_POISONED_BLOCK_LEN_TYPE) + UMM_POISON_SIZE_BEFORE)) -#define UMM_POISON_SKETCH_PTRSZ(p) (*(UMM_POISONED_BLOCK_LEN_TYPE *)p) -#define UMM_POISON_MEMMOVE(t, p, s) memmove(UMM_POISON_SKETCH_PTR(t), UMM_POISON_SKETCH_PTR(p), UMM_POISON_SKETCH_PTRSZ(p)) -#define UMM_POISON_MEMCPY(t, p, s) memcpy(UMM_POISON_SKETCH_PTR(t), UMM_POISON_SKETCH_PTR(p), UMM_POISON_SKETCH_PTRSZ(p)) -#if defined(UMM_POISON_CHECK_LITE) -/* - * We can safely do individual poison checks at free and realloc and stay - * under 10us or close. - */ - #define POISON_CHECK() 1 - #define POISON_CHECK_NEIGHBORS(c) \ - do { \ - if (!check_poison_neighbors(_context, c)) \ - panic(); \ - } while (false) + #define POISON_CHECK() umm_poison_check() #else -/* Not normally enabled. A full heap poison check may exceed 10us. */ - #define POISON_CHECK() umm_poison_check() - #define POISON_CHECK_NEIGHBORS(c) do {} while (false) + #define POISON_CHECK() (1) #endif -#else -#define POISON_CHECK() 1 -#define POISON_CHECK_NEIGHBORS(c) do {} while (false) -#define POISON_CHECK_SET_POISON(p, s) (p) -#define POISON_CHECK_SET_POISON_BLOCKS(p, s) -#define UMM_POISON_MEMMOVE(t, p, s) memmove((t), (p), (s)) -#define UMM_POISON_MEMCPY(t, p, s) memcpy((t), (p), (s)) -#endif - -#if defined(UMM_POISON_CHECK) || defined(UMM_POISON_CHECK_LITE) -/* - * Overhead adjustments needed for free_blocks to express the number of bytes - * that can actually be allocated. - */ -#define UMM_OVERHEAD_ADJUST ( \ - umm_block_size() / 2 + \ - UMM_POISON_SIZE_BEFORE + \ - UMM_POISON_SIZE_AFTER + \ - sizeof(UMM_POISONED_BLOCK_LEN_TYPE)) - -#else -#define UMM_OVERHEAD_ADJUST (umm_block_size() / 2) #endif diff --git a/cores/esp8266/umm_malloc/umm_malloc_cfgport.h b/cores/esp8266/umm_malloc/umm_malloc_cfgport.h index 233671304f..8f8da8cc38 100644 --- a/cores/esp8266/umm_malloc/umm_malloc_cfgport.h +++ b/cores/esp8266/umm_malloc/umm_malloc_cfgport.h @@ -100,6 +100,40 @@ extern char _heap_start[]; #define UMM_HEAP_STACK_DEPTH 32 #endif +/* + * -DUMM_ENABLE_MEMALIGN=1 + * + * Include function memalign(size_t alignment, size_t size) in the build. + * Provides lowlevel memalign function to support C++17 addition of aligned + * "new" operators. + * + * Requires default 8-byte aligned data addresses. + * Build options "-DUMM_LEGACY_ALIGN_4BYTE=1" and "-DUMM_LEGACY_ALIGN_4BYTE=1" + * are mutually exclusive. + * + * Allocations from memalign() internally appear and are handled like any other + * UMM_MALLOC memory allocation. + * + * The existing free() function handles releasing memalign() memory allocations. + * + * Function realloc() should not be called for aligned memory allocations. + * It can break the alignment. At worst, the alignment falls back to + * sizeof(umm_block), 8 bytes. + * + * The UMM_POISON build option supports memalign(). + * + #define UMM_ENABLE_MEMALIGN 1 + */ + +// #define UMM_ENABLE_MEMALIGN 1 + +#if ((1 - UMM_ENABLE_MEMALIGN - 1) == 2) +#undef UMM_ENABLE_MEMALIGN +#define UMM_ENABLE_MEMALIGN 1 +#elif ((1 - UMM_ENABLE_MEMALIGN - 1) == 0) +#undef UMM_ENABLE_MEMALIGN +#endif + /* * The NONOS SDK API requires function `umm_info()` for implementing * `system_show_malloc()`. Build option `-DUMM_INFO` enables this support. @@ -216,4 +250,198 @@ extern char _heap_start[]; #error "Specify at least one of these build options: (UMM_STATS or UMM_STATS_FULL) and/or UMM_INFO and/or UMM_INLINE_METRICS" #endif + +//////////////////////////////////////////////////////////////////////////////// +/* + * -D UMM_POISON_CHECK : + * -D UMM_POISON_CHECK_LITE : + * -D UMM_POISON_NONE + * + * Enables heap poisoning: add predefined value (poison) before and after each + * allocation, and check before each heap operation that no poison is + * corrupted. + * + * Other than the poison itself, we need to store exact user-requested length + * for each buffer, so that overrun by just 1 byte will be always noticed. + * + * Customizations: + * + * UMM_POISON_SIZE_BEFORE: + * Number of poison bytes before each block, e.g. 4 + * UMM_POISON_SIZE_AFTER: + * Number of poison bytes after each block e.g. 4 + * UMM_POISONED_BLOCK_LEN_TYPE + * Type of the exact buffer length, e.g. `uint16_t` + * + * NOTE: each allocated buffer is aligned by 4 bytes. But when poisoning is + * enabled, actual pointer returned to user is shifted by + * `(sizeof(UMM_POISONED_BLOCK_LEN_TYPE) + UMM_POISON_SIZE_BEFORE)`. + * It's your responsibility to make resulting pointers aligned appropriately. + * + * If poison corruption is detected, the message is printed and user-provided + * callback is called: `UMM_HEAP_CORRUPTION_CB()` + * + * UMM_POISON_CHECK - does a global heap check on all active allocation at + * every alloc API call. May exceed 10us due to critical section with IRQs + * disabled. + * + * UMM_POISON_CHECK_LITE - checks the allocation presented at realloc() + * and free(). Expands the poison check on the current allocation to + * include its nearest allocated neighbors in the heap. + * umm_malloc() will also checks the neighbors of the selected allocation + * before use. + * + * UMM_POISON_NONE - No UMM_POISON... checking. + * + * Status: TODO?: UMM_POISON_CHECK_LITE is a new option. We could propose for + * upstream; however, the upstream version has much of the framework for calling + * poison check on each alloc call refactored out. Not sure how this will be + * received. + */ + +/* + * Compatibility for deprecated UMM_POISON + */ +#if defined(UMM_POISON) && !defined(UMM_POISON_CHECK) && !defined(UMM_POISON_NONE) +#define UMM_POISON_CHECK_LITE +#endif + +#if defined(DEBUG_ESP_PORT) || defined(DEBUG_ESP_CORE) +#if !defined(UMM_POISON_CHECK) && !defined(UMM_POISON_CHECK_LITE) && !defined(UMM_POISON_NONE) +/* +#define UMM_POISON_CHECK + */ +#define UMM_POISON_CHECK_LITE +#endif +#endif + +#if defined(UMM_POISON_CHECK) && defined(UMM_POISON_CHECK_LITE) +// There can only be one. +#error "Build options UMM_POISON_NONE, UMM_POISON_CHECK and UMM_POISON_CHECK_LITE are mutually exclusive." +#endif + +#if defined(UMM_POISON_CHECK) || defined(UMM_POISON_CHECK_LITE) +#if defined(UMM_POISON_NONE) +#error "Build options UMM_POISON_NONE, UMM_POISON_CHECK and UMM_POISON_CHECK_LITE are mutually exclusive." +#endif + +#define UMM_POISON_SIZE_BEFORE (4) +#define UMM_POISON_SIZE_AFTER (4) +#define UMM_POISONED_BLOCK_LEN_TYPE uint32_t + +extern void *umm_poison_malloc(size_t size); +extern void *umm_poison_calloc(size_t num, size_t size); +#if UMM_ENABLE_MEMALIGN +extern void *umm_posion_memalign(size_t alignment, size_t size); +#endif +#endif + +#if defined(UMM_POISON_CHECK_LITE) +/* + * Local Additions to better report location in code of the caller. + * + * We can safely do individual poison checks at free and realloc and stay + * under 10us or close. + */ +extern void *umm_poison_realloc_flc(void *ptr, size_t size, const char *file, int line, const void *caller); +extern void umm_poison_free_flc(void *ptr, const char *file, int line, const void *caller); +#define POISON_CHECK_SET_POISON(p, s) get_poisoned(p, s) +#define POISON_CHECK_SET_POISON_BLOCKS(p, s) \ + do { \ + size_t super_size = (s * sizeof(umm_block)) - (sizeof(((umm_block *)0)->header)); \ + get_poisoned(p, super_size); \ + } while (false) +#define UMM_POISON_SKETCH_PTR(p) ((void *)((uintptr_t)p + sizeof(UMM_POISONED_BLOCK_LEN_TYPE) + UMM_POISON_SIZE_BEFORE)) +#define UMM_POISON_SKETCH_PTRSZ(p) (*(UMM_POISONED_BLOCK_LEN_TYPE *)p) +#define UMM_POISON_MEMMOVE(t, p, s) memmove(UMM_POISON_SKETCH_PTR(t), UMM_POISON_SKETCH_PTR(p), UMM_POISON_SKETCH_PTRSZ(p)) +#define UMM_POISON_MEMCPY(t, p, s) memcpy(UMM_POISON_SKETCH_PTR(t), UMM_POISON_SKETCH_PTR(p), UMM_POISON_SKETCH_PTRSZ(p)) + +// No meaningful information is conveyed with panic() for fail. Save space used abort(). +#define POISON_CHECK_NEIGHBORS(c) \ + do { \ + if (!check_poison_neighbors(_context, c)) { \ + DBGLOG_ERROR("This bad block is in a neighbor allocation near free memory %p\n", (void *)&UMM_BLOCK(c)); \ + abort(); \ + } \ + } while (false) +/* + * Nullify any POISON_CHECK for UMM_POISON_CHECK builds. + */ +#define POISON_CHECK() 1 + +#elif defined(UMM_POISON_CHECK) +extern void *umm_poison_realloc(void *ptr, size_t size); +extern void umm_poison_free(void *ptr); +extern bool umm_poison_check(void); +#define POISON_CHECK_SET_POISON(p, s) get_poisoned(p, s) +#define POISON_CHECK_SET_POISON_BLOCKS(p, s) \ + do { \ + size_t super_size = (s * sizeof(umm_block)) - (sizeof(((umm_block *)0)->header)); \ + get_poisoned(p, super_size); \ + } while (false) +#define UMM_POISON_SKETCH_PTR(p) ((void *)((uintptr_t)p + sizeof(UMM_POISONED_BLOCK_LEN_TYPE) + UMM_POISON_SIZE_BEFORE)) +#define UMM_POISON_SKETCH_PTRSZ(p) (*(UMM_POISONED_BLOCK_LEN_TYPE *)p) +#define UMM_POISON_MEMMOVE(t, p, s) memmove(UMM_POISON_SKETCH_PTR(t), UMM_POISON_SKETCH_PTR(p), UMM_POISON_SKETCH_PTRSZ(p)) +#define UMM_POISON_MEMCPY(t, p, s) memcpy(UMM_POISON_SKETCH_PTR(t), UMM_POISON_SKETCH_PTR(p), UMM_POISON_SKETCH_PTRSZ(p)) + +/* Not normally enabled. A full heap poison check may exceed 10us. */ +#define POISON_CHECK() umm_poison_check() +#define POISON_CHECK_NEIGHBORS(c) do {} while (false) + +#else +#define POISON_CHECK() 1 +#define POISON_CHECK_NEIGHBORS(c) do {} while (false) +#define POISON_CHECK_SET_POISON(p, s) (p) +#define POISON_CHECK_SET_POISON_BLOCKS(p, s) +#define UMM_POISON_MEMMOVE(t, p, s) memmove((t), (p), (s)) +#define UMM_POISON_MEMCPY(t, p, s) memcpy((t), (p), (s)) +#endif + + +#if defined(UMM_POISON_CHECK) || defined(UMM_POISON_CHECK_LITE) +/* + * Overhead adjustments needed for free_blocks to express the number of bytes + * that can actually be allocated. + */ +#define UMM_OVERHEAD_ADJUST ( \ + umm_block_size() / 2 + \ + UMM_POISON_SIZE_BEFORE + \ + UMM_POISON_SIZE_AFTER + \ + sizeof(UMM_POISONED_BLOCK_LEN_TYPE)) + +#else +#define UMM_OVERHEAD_ADJUST (umm_block_size() / 2) +#endif + + +/* + * -DUMM_LEGACY_ALIGN_4BYTE=1 + * + * To accommodate any libraries or Sketches that may have workarounds for the + * old 4-byte alignment, this deprecated build option is available. + * This option cannot be combined with -DUMM_ENABLE_MEMALIGN=1 + * +//C I am not sure we need to do this; however, the old behavior goes back to the +//C beginning of time. And, would never return a data allocation address that was +//C 8-byte aligned. If this was an issue and a workaround was created, we may +//C have broken it. + */ + +#if ((1 - UMM_LEGACY_ALIGN_4BYTE - 1) == 2) +#undef UMM_LEGACY_ALIGN_4BYTE +#define UMM_LEGACY_ALIGN_4BYTE 1 +#elif ((1 - UMM_LEGACY_ALIGN_4BYTE - 1) == 0) +#undef UMM_LEGACY_ALIGN_4BYTE +#endif + + +#if UMM_LEGACY_ALIGN_4BYTE +#pragma message("Support for legacy 4-byte alignment is deprecated") +#endif + +// //C Should we default to legacy? +// #if ! UMM_ENABLE_MEMALIGN && ! UMM_LEGACY_ALIGN_4BYTE +// #define UMM_LEGACY_ALIGN_4BYTE 1 +// #endif + #endif diff --git a/cores/esp8266/umm_malloc/umm_poison.c b/cores/esp8266/umm_malloc/umm_poison.c index ca41cabf4f..64a2c61e29 100644 --- a/cores/esp8266/umm_malloc/umm_poison.c +++ b/cores/esp8266/umm_malloc/umm_poison.c @@ -126,7 +126,8 @@ static void *get_poisoned(void *vptr, size_t size_w_poison) { return (void *)ptr; } - +#endif +#if defined(UMM_POISON_CHECK) /* * Takes "poisoned" pointer (i.e. pointer returned from `get_poisoned()`), * and checks that the poison of this particular block is still there. @@ -154,7 +155,31 @@ static void *get_unpoisoned(void *vptr) { } /* }}} */ +#endif +#if defined(UMM_POISON_CHECK) || defined(UMM_POISON_CHECK_LITE) + +#if UMM_ENABLE_MEMALIGN +/* ------------------------------------------------------------------------ */ + +void *umm_posion_memalign(size_t alignment, size_t size) { + void *ret; + + add_poison_size(&size); + + ret = umm_memalign(alignment, size); + /* + "get_poisoned" is now called from umm_malloc while still in a critical + section. Before umm_malloc returned, the pointer offset was adjusted to + the start of the requested buffer. + */ + + return ret; +} +void *umm_poison_malloc(size_t size) { + return umm_posion_memalign(0u, size); +} +#else /* ------------------------------------------------------------------------ */ void *umm_poison_malloc(size_t size) { @@ -171,6 +196,7 @@ void *umm_poison_malloc(size_t size) { return ret; } +#endif /* ------------------------------------------------------------------------ */ @@ -193,6 +219,9 @@ void *umm_poison_calloc(size_t num, size_t item_size) { return ret; } +#endif // #if defined(UMM_POISON_CHECK) || defined(UMM_POISON_CHECK_LITE) +#if defined(UMM_POISON_CHECK) + /* ------------------------------------------------------------------------ */ void *umm_poison_realloc(void *ptr, size_t size) { @@ -256,6 +285,6 @@ bool umm_poison_check(void) { /* ------------------------------------------------------------------------ */ -#endif +#endif // #if defined(UMM_POISON_CHECK) #endif // defined(BUILD_UMM_MALLOC_C) diff --git a/libraries/ESP8266SdFat b/libraries/ESP8266SdFat index eaab1369d5..46151c90a4 160000 --- a/libraries/ESP8266SdFat +++ b/libraries/ESP8266SdFat @@ -1 +1 @@ -Subproject commit eaab1369d5b988d844888bc560967ae143847d5d +Subproject commit 46151c90a410a6f983f2f8c147e13086aaecdd8e diff --git a/libraries/SoftwareSerial b/libraries/SoftwareSerial index bcfd6d10e6..a00554a6ad 160000 --- a/libraries/SoftwareSerial +++ b/libraries/SoftwareSerial @@ -1 +1 @@ -Subproject commit bcfd6d10e6a45a0d07705d08728f293defe9cc1d +Subproject commit a00554a6ad1d28c633dd893e6d6ec4ca2811437f diff --git a/libraries/esp8266/examples/HeapNewDeleteTest/HeapNewDeleteTest.ino b/libraries/esp8266/examples/HeapNewDeleteTest/HeapNewDeleteTest.ino new file mode 100644 index 0000000000..300dff0490 --- /dev/null +++ b/libraries/esp8266/examples/HeapNewDeleteTest/HeapNewDeleteTest.ino @@ -0,0 +1,474 @@ +/* + +This Sketch is more for testing "new" and "delete" operators defined in +cores/esp8266/heap.cpp for capturing caller debug context than an example. + +Covers C++ Exceptions "enable" and "disable" for the "new" and "delete" operators. + +This test sketch assumes memalign enabled. + + +Notes, + +From https://stackoverflow.com/a/4005664 + A std::nothrow_t deallocation function exists, but you cannot call it with a + delete expression. + + The deallocation function is there for completeness. If a new expression fails + because of an exception, the compiler needs to free the memory it allocated via + operator new with a matching call to operator delete. So there needs to be an + operator delete that accepts a std::nothrow_t, to allow this. + + (That is, in general, a new expression with the form new (args...) T will + allocate memory with a call to operator new(sizeof(T), args...). To "match" + means to call operator delete with the same arguments, except the first.) + + Note you can call the operator directly: operator delete(memory, std::nothrow);. + However, a delete expression never calls a global deallocation function with + additional parameters. + ... + Note that for C++14 onward there is an issue about symmetry: + LWG 2458(https://wg21.cmeerw.net/lwg/issue2458) . – FrankHB CommentedAug 1 at 3:32 + +Exercise variations on new and delete operators listed in + https://en.cppreference.com/w/cpp/memory/new/operator_new + * Replaceable allocation functions + * Replaceable non-throwing allocation functions + https://en.cppreference.com/w/cpp/memory/new/operator_delete + * replaceable usual deallocation functions + * replaceable placement deallocation functions + +If you see the linker error message: + ".../gcc-gnu/libstdc++-v3/libsupc++/new_opa.cc:86: undefined reference to `memalign'" +your Arduino Sketch is using an operation that requires an aligned allocation. +You need to add line'-DUMM_ENABLE_MEMALIGN=1' to the build options. + +*/ + +#include +#include +#include // aligned_alloc + +#define PRINTF(fmt, ...) ets_uart_printf(pstrcpy(PSTR(fmt)), ##__VA_ARGS__) +#define PRINTF_LN(fmt, ...) ets_uart_printf(pstrcpy(PSTR((fmt "\r\n"))), ##__VA_ARGS__) +#define PRINTF_P(a, ...) ets_uart_printf(pstrcpy(a), ##__VA_ARGS__) + +constexpr size_t buf_sz = 256u; +char bigbuf1[buf_sz]; +char bigbuf2[buf_sz]; + +inline const char* pstrcpy(const char* p) { + strcpy_P(bigbuf1, p); + return bigbuf1; +} + +inline const char* pstrcpy2(const char* p) { + strcpy_P(bigbuf2, p); + return bigbuf2; +} + + +#ifdef DEV_DEBUG_ABI_CPP +extern bool abi_new_print; +#else +bool abi_new_print; +#endif + +#define POISON_BYTE (0xa5) +// Heap safe printf - does not need heap space. +// #define PRINTF ets_uart_printf + +#ifndef _NOINLINE +#define _NOINLINE __attribute__((noinline)) +#endif + +constexpr size_t oom_sz = 128000u; +constexpr size_t ok_sz = 300u; + +struct NEW_OOM { + char buf[oom_sz]; +}; + +struct NEW_OK { + char buf[ok_sz]; +}; + + +void printSeparator(const char* str = NULL) { + PRINTF_LN("\r\n=============================================================================="); + if (str) { + PRINTF_P(str); + PRINTF_LN(); + } +} +#define PRINT_SEPARATOR(a) printSeparator(__PRETTY_FUNCTION__) + +void print_pass_fail(int err_count) { + PRINTF_LN(); + PRINTF_LN("===================================="); + PRINTF_LN("===================================="); + PRINTF_LN("== %-29s ==", ""); + if (err_count) { + PRINTF_LN("== %5u %-24s ==", err_count, "Error(s) see '*' "); + } else { + PRINTF_LN("== %-29s ==", "All Tests Passed"); + } + PRINTF_LN("== %-29s ==", ""); + PRINTF_LN("===================================="); + PRINTF_LN("===================================="); +} + +//////////////////////////////////////////////////////////////////////////////////////// +// Test "new" and "delete" Operators +// Note, all array values are even. This detail is used later in the test functions. +// +// Delete enumeration values based on note numbers at +// https://en.cppreference.com/w/cpp/memory/new/operator_delete +enum NewDelFn { + del = 1, + del_array = 2, + del_align = 3, + del_align_array = 4, + del_sz = 5, + del_sz_array = 6, + del_sz_align = 7, + del_sz_align_array = 8, + del_nothrow = 9, + del_nothrow_array = 10, + del_nothrow_align = 11, + del_nothrow_align_array = 12 +}; + +void delete_new(void* new_ptr, size_t sz, size_t al, size_t delete_idx, bool array) { + std::align_val_t alignment = std::align_val_t(al); + if (new_ptr) { + if (array && (delete_idx & 1u)) delete_idx++; + abi_new_print = true; + switch (delete_idx) { + // ... + case NewDelFn::del: + ::operator delete(new_ptr); + break; + case NewDelFn::del_array: + ::operator delete[](new_ptr); + break; + case NewDelFn::del_align: + ::operator delete(new_ptr, alignment); + break; + case NewDelFn::del_align_array: + ::operator delete[](new_ptr, alignment); + break; + // ... size + case NewDelFn::del_sz: + ::operator delete(new_ptr, sz); + break; + case NewDelFn::del_sz_array: + ::operator delete[](new_ptr, sz); + break; + case NewDelFn::del_sz_align: + ::operator delete(new_ptr, sz, alignment); + break; + case NewDelFn::del_sz_align_array: + ::operator delete[](new_ptr, sz, alignment); + break; + // ... nothrow + case NewDelFn::del_nothrow: + ::operator delete(new_ptr, std::nothrow); + break; + case NewDelFn::del_nothrow_array: + ::operator delete[](new_ptr, std::nothrow); + break; + case NewDelFn::del_nothrow_align: + ::operator delete(new_ptr, alignment, std::nothrow); + break; + case NewDelFn::del_nothrow_align_array: + ::operator delete[](new_ptr, alignment, std::nothrow); + break; + default: + panic(); + break; + } + abi_new_print = false; + } +} + +// This works for C++ Exceptions enabled or disabled +void _NOINLINE test_new_nothrow(int& err_count, size_t sz, size_t alignment, int delete_idx, bool expect_success) { + bool array = (0 == (delete_idx & 1u)) ? true : false; + PRINT_SEPARATOR(); + + void* new_ptr; + abi_new_print = true; + if (array) { + if (alignment) { + new_ptr = new (std::align_val_t(alignment), std::nothrow) char[sz]; + } else { + new_ptr = new (std::nothrow) char[sz]; + } + } else { + if (alignment) { + if (oom_sz == sz) { + new_ptr = new (std::align_val_t(alignment), std::nothrow) struct NEW_OOM; + } else { + new_ptr = new (std::align_val_t(alignment), std::nothrow) struct NEW_OK; + } + } else { + if (oom_sz == sz) { + new_ptr = new (std::nothrow) struct NEW_OOM; + } else { + new_ptr = new (std::nothrow) struct NEW_OK; + } + } + } + abi_new_print = false; + + if (new_ptr) { + // Success + if (!expect_success) err_count++; + if (alignment) { + PRINTF_LN("%c %p, 'new (std::align_val_t(%u), std::nothrow)' worked, size %u", (expect_success) ? ' ' : '*', new_ptr, alignment, sz); + if (0 != ((uintptr_t)new_ptr & (alignment - 1))) { + err_count++; + PRINTF_LN("* 'new (std::align_val_t(%u), std::nothrow)' allocation not aligned", alignment); + } + } else { + PRINTF_LN("%c %p, 'new (std::nothrow)' worked, size %u", (expect_success) ? ' ' : '*', new_ptr, sz); + if (0 != ((uintptr_t)new_ptr & (__STDCPP_DEFAULT_NEW_ALIGNMENT__ - 1))) { + err_count++; + PRINTF_LN("* 'new (std::nothrow)' allocation not aligned, default alignment %u", __STDCPP_DEFAULT_NEW_ALIGNMENT__); + } + } + } else { + if (expect_success) err_count++; + if (alignment) { + PRINTF_LN("%c NULL, 'new (std::align_val_t(%u), std::nothrow)' failed, size %u", (expect_success) ? '*' : ' ', alignment, sz); + } else { + PRINTF_LN("%c NULL, 'new (std::nothrow)' failed, size %u", (expect_success) ? '*' : ' ', sz); + } + } + delete_new(new_ptr, sz, alignment, delete_idx, array); +} + +// This only works for C++ Exceptions enabled +void _NOINLINE test_new_try_catch(int& err_count, size_t sz, size_t alignment, int delete_idx, bool expect_success) { + bool array = (0 == (delete_idx & 1u)) ? true : false; + PRINT_SEPARATOR(); + +#if defined(__cpp_exceptions) + void* new_ptr = NULL; + + try { + abi_new_print = true; + if (array) { + if (alignment) { + new_ptr = new (std::align_val_t(alignment)) char[sz]; + } else { + new_ptr = new char[sz]; + } + } else { + if (alignment) { + if (oom_sz == sz) { + new_ptr = new (std::align_val_t(alignment)) struct NEW_OOM; + } else { + new_ptr = new (std::align_val_t(alignment)) struct NEW_OK; + } + } else { + if (oom_sz == sz) { + new_ptr = new struct NEW_OOM; + } else { + new_ptr = new struct NEW_OK; + } + } + } + } catch (const std::exception& e) { + abi_new_print = false; + PRINTF_LN(" e.what() string address %p", e.what()); + String str = String(e.what()); + PRINTF_LN(" Catch Exception: \"%s\"", pstrcpy2(e.what())); + } catch (...) { + abi_new_print = false; + PRINTF_LN(" Catch (...)"); + } + abi_new_print = false; + + if (new_ptr) { + if (!expect_success) err_count++; + if (alignment) { + PRINTF_LN("%c %p, 'new (std::align_val_t(%u))' worked, size %u", (expect_success) ? ' ' : '*', new_ptr, alignment, sz); + if (0 != ((uintptr_t)new_ptr & (alignment - 1))) { + err_count++; + PRINTF_LN("* 'new (std::align_val_t(%u))' allocation not aligned", alignment); + } + } else { + PRINTF_LN("%c %p, 'new' worked, size %u", (expect_success) ? ' ' : '*', new_ptr, sz); + if (0 != ((uintptr_t)new_ptr & (__STDCPP_DEFAULT_NEW_ALIGNMENT__ - 1))) { + err_count++; + PRINTF_LN("* 'new' allocation not aligned, default alignment %u", __STDCPP_DEFAULT_NEW_ALIGNMENT__); + } + } + } else { + if (expect_success) err_count++; + if (alignment) { + PRINTF_LN("%c NULL, 'new (std::align_val_t(%u))' failed, size %u", (expect_success) ? '*' : ' ', alignment, sz); + } else { + PRINTF_LN("%c NULL, 'new' failed, size %u", (expect_success) ? '*' : ' ', sz); + } + } + delete_new(new_ptr, sz, alignment, delete_idx, array); + +#else + (void)err_count, (void)sz, (void)expect_success, (void)alignment, (void)array, (void)delete_idx; + PRINTF_LN(" Requires build option: C++ Exceptions: 'enabled'"); +#endif +} + + +void print_build_options() { +#if DEV_DEBUG_ABI_CPP + PRINTF_LN(" Built with DEV_DEBUG_ABI_CPP"); +#else + PRINTF_LN("* For complete operation, the test build requires option -DDEV_DEBUG_ABI_CPP=1"); +#endif + +#if defined(__cpp_exceptions) + PRINTF_LN(" Built with C++ Exceptions: 'enabled'"); +#else + PRINTF_LN(" Built with C++ Exceptions: 'disabled'"); +#endif + +#if defined(UMM_POISON_CHECK_LITE) + PRINTF_LN(" Built with UMM_POISON_CHECK_LITE"); +#elif defined(UMM_POISON_CHECK) + PRINTF_LN(" Built with UMM_POISON_CHECK"); +#elif defined(UMM_POISON_NONE) + PRINTF_LN(" Built with UMM_POISON_NONE"); +#else + PRINTF_LN(" No POISON options"); +#endif + +#if UMM_ENABLE_MEMALIGN + PRINTF_LN(" Built with UMM_ENABLE_MEMALIGN, i.e. memalign present"); +#endif +} +// +// test_new_nothrow +// test_new_try_catch +// +void run_new_delete_tests() { + int err_count = 0; + printSeparator(); + PRINTF_LN("Build Option Highlights:"); + print_build_options(); + + // counter size align by use deallocate fn expect success + test_new_nothrow(err_count, oom_sz, 0u, NewDelFn::del_nothrow, false); + test_new_nothrow(err_count, oom_sz, 0u, NewDelFn::del_nothrow_array, false); + test_new_nothrow(err_count, oom_sz, 128u, NewDelFn::del_nothrow_align, false); + test_new_nothrow(err_count, oom_sz, 128u, NewDelFn::del_nothrow_align_array, false); + test_new_nothrow(err_count, ok_sz, 0u, NewDelFn::del_nothrow, true); + test_new_nothrow(err_count, ok_sz, 0u, NewDelFn::del_nothrow_array, true); + test_new_nothrow(err_count, ok_sz, 128u, NewDelFn::del_nothrow_align, true); + test_new_nothrow(err_count, ok_sz, 128u, NewDelFn::del_nothrow_align_array, true); + +#if defined(__cpp_exceptions) + test_new_try_catch(err_count, oom_sz, 0u, NewDelFn::del, false); + test_new_try_catch(err_count, oom_sz, 0u, NewDelFn::del_array, false); + test_new_try_catch(err_count, oom_sz, 128u, NewDelFn::del_align, false); + test_new_try_catch(err_count, oom_sz, 128u, NewDelFn::del_align_array, false); + test_new_try_catch(err_count, ok_sz, 0u, NewDelFn::del, true); + test_new_try_catch(err_count, ok_sz, 0u, NewDelFn::del_array, true); + test_new_try_catch(err_count, ok_sz, 128u, NewDelFn::del_align, true); + test_new_try_catch(err_count, ok_sz, 128u, NewDelFn::del_align_array, true); + + test_new_try_catch(err_count, oom_sz, 0u, NewDelFn::del_sz, false); + test_new_try_catch(err_count, oom_sz, 0u, NewDelFn::del_sz_array, false); + test_new_try_catch(err_count, oom_sz, 128u, NewDelFn::del_sz_align, false); + test_new_try_catch(err_count, oom_sz, 128u, NewDelFn::del_sz_align_array, false); + test_new_try_catch(err_count, ok_sz, 0u, NewDelFn::del_sz, true); + test_new_try_catch(err_count, ok_sz, 0u, NewDelFn::del_sz_array, true); + test_new_try_catch(err_count, ok_sz, 128u, NewDelFn::del_sz_align, true); + test_new_try_catch(err_count, ok_sz, 128u, NewDelFn::del_sz_align_array, true); +#endif + print_pass_fail(err_count); +} + + +void setup() { + Serial.begin(115200); + delay(100); + PRINTF_LN("\r\n\n\nOperator \"new\" / \"delete\" test"); + + PRINTF_LN(" __STDCPP_DEFAULT_NEW_ALIGNMENT__: %u", __STDCPP_DEFAULT_NEW_ALIGNMENT__); + + print_build_options(); +#if UMM_ENABLE_MEMALIGN + PRINTF_LN(" Built with UMM_ENABLE_MEMALIGN, i.e. memalign present"); + + void* new_ptr = aligned_alloc(128, 32); + if (new_ptr) { + PRINTF_LN(" %p, function \"aligned_alloc()\" worked", new_ptr); + free(new_ptr); + } +#endif + processKey('?'); +} + +void loop(void) { + if (Serial.available() > 0) { + int hotKey = Serial.read(); + processKey(hotKey); + } +} + +void processKey(int hotKey) { + switch (hotKey) { + case 'r': + run_new_delete_tests(); + break; + + case 'u': + { + HeapSelectDram ephemeral; + umm_info(NULL, true); + break; + } + // + case 'A': + abort(); + break; + + case 'R': + PRINTF("Restart, ESP.restart(); ...\r\n"); + ESP.restart(); + break; + + case 'P': + panic(); + break; + + case '\r': + PRINTF_LN(); + + case '\n': + break; + + case '?': + PRINTF_LN(); + PRINTF_LN("Press a key + "); + PRINTF_LN(" u - umm_info for DRAM Heap"); + PRINTF_LN(" r - Run \"new\"/\"delete\" tests"); + PRINTF_LN(" ? - Print Help"); + PRINTF_LN(); + PRINTF_LN("Crash with:"); + PRINTF_LN(" A - Abort"); + PRINTF_LN(" P - Panic"); + PRINTF_LN(" R - Restart, ESP.restart();"); + PRINTF_LN(); + break; + + default: + PRINTF_LN("\"%c\" - Not an option? / ? - help", hotKey); + processKey('?'); + break; + } +} diff --git a/libraries/esp8266/examples/HeapNewDeleteTest/HeapNewDeleteTest.ino.globals.h b/libraries/esp8266/examples/HeapNewDeleteTest/HeapNewDeleteTest.ino.globals.h new file mode 100644 index 0000000000..bb3e106016 --- /dev/null +++ b/libraries/esp8266/examples/HeapNewDeleteTest/HeapNewDeleteTest.ino.globals.h @@ -0,0 +1,29 @@ +/* + Review "Inventory of debug options ..." in core/esp8266/heap.cpp +*/ + +/*@create-file:build.opt@ +// -DDEBUG_ESP_OOM=1 +// -DDEBUG_ESP_WITHINISR=1 +// -DUMM_POISON_CHECK_LITE=1 +// -DUMM_INTEGRITY_CHECK=1 +// -DUMM_POISON_CHECK=1 +// -DUMM_POISON_NONE=1 +// +-DUMM_INFO=1 +-DUMM_STATS_FULL=1 + + +// C++17 feature for "new" operator provide aligned allocations +// These build options are required for this test program. +-DUMM_ENABLE_MEMALIGN=1 +-DDEV_DEBUG_ABI_CPP=1 +// -DMIN_ESP_OOM=1 +// Use Arduino IDE "Tools->C++ Exceptions" to enable disable C++ exceptions. + + +// cc1: warning: command-line option '-faligned-new=4' is valid for C++/ObjC++ but not for C +// -faligned-new=4 +*/ + +// @create-file:build.opt:debug@ diff --git a/tools/sdk/lwip2/builder b/tools/sdk/lwip2/builder index 4087efd9d2..8e3c4eec00 160000 --- a/tools/sdk/lwip2/builder +++ b/tools/sdk/lwip2/builder @@ -1 +1 @@ -Subproject commit 4087efd9d2a8e1cee9a159e0796d831dc1e0c497 +Subproject commit 8e3c4eec00ec7f3962705ee095a310642953b5fb