Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • cellframe/libdap
1 result
Show changes
Commits on Source (18)
Showing
with 4172 additions and 950 deletions
[submodule "test/libdap-test"]
path = test/libdap-test
url = https://github.com/cellframe/libdap-test
url = https://gitlab.demlabs.net/cellframe/libdap-test.git
branch = master
......@@ -4,17 +4,91 @@ project (dap_core)
# fix implicit declaration warnings
add_definitions ("-D_GNU_SOURCE")
set(CMAKE_C_FLAGS "-std=c11 -Wall -Wextra")
if(UNIX)
file(GLOB CORE_SRCS
src/*.c
src/etc/*.c
src/rpmalloc/*.c
)
file(GLOB CORE_HEADERS
include/*.h
include/unix/*.h
include/unix/linux/*.h
)
endif()
if(WIN32)
file(GLOB CORE_SRCS
src/*.c
src/etc/*.c
src/win32/*.c
src/rpmalloc/*.c
)
file(GLOB CORE_HEADERS
include/*.h
src/win32/*.h
src/win32/*.h
)
endif()
if(NOT (${SUBMODULES_NO_BUILD} MATCHES ON))
set(SUBMODULES_NO_BUILD ON)
# Check whether we're on a 32-bit or 64-bit system
if(CMAKE_SIZEOF_VOID_P EQUAL "8")
set(DEFAULT_BUILD_64 ON)
else()
set(DEFAULT_BUILD_64 OFF)
endif()
option(BUILD_64 "Build for 64-bit? 'OFF' builds for 32-bit." ${DEFAULT_BUILD_64})
add_definitions ("-DDAP_SERVER")
add_definitions ("-DNODE_NETNAME=\"kelvin\"")
if(WIN32)
add_definitions ("-DUNDEBUG")
add_definitions ("-DNDEBUG")
add_definitions ("-DWIN32")
add_definitions ("-D_WINDOWS")
add_definitions ("-D__WINDOWS__")
add_definitions ("-D_CRT_SECURE_NO_WARNINGS")
# if(DAP_RELEASE)
set(_CCOPT "-mconsole -static -Wall -O3 -fno-ident -ffast-math -ftree-vectorize -mfpmath=sse -mmmx -msse2 -fno-asynchronous-unwind-tables -ffunction-sections -Wl,--gc-sections -Wl,--strip-all")
# else()
# set(_CCOPT "-mconsole -static -Wall -pg")
# set(_LOPT "-mconsole -static -pg")
# endif()
file(GLOB CORE_SRCS src/*.c)
file(GLOB CORE_HEADERS include/*.h include/unix/*.h include/unix/linux/*.h )
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${_CCOPT}")
set(CMAKE_LINKER_FLAGS "${CMAKE_LINKER_FLAGS} ${_LOPT}")
endif()
if(UNIX)
add_definitions ("-DDAP_OS_LINUX")
if(DAP_RELEASE)
set(_CCOPT "-Wall -O3 -fPIC -fno-pie -no-pie -fno-ident -ffast-math -ftree-vectorize -mfpmath=sse -mmmx -msse2 -fno-asynchronous-unwind-tables -ffunction-sections -Wl,--gc-sections -Wl,--strip-all")
else()
set(_CCOPT "-Wall -pg -fPIC -fno-pie -no-pie")
set(_LOPT "-pg")
SET(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -pg")
endif()
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${_CCOPT}")
set(CMAKE_LINKER_FLAGS "${CMAKE_LINKER_FLAGS} ${_LOPT}")
endif()
endif()
add_library(${PROJECT_NAME} STATIC ${CORE_SRCS} ${CORE_UNIX_SRCS})
#This paths will be used by project-dependent project libraries
target_include_directories(${PROJECT_NAME} INTERFACE include/)
target_include_directories(${PROJECT_NAME} INTERFACE include/ src/rpmalloc/)
if(WIN32)
include_directories(include/)
endif()
if ( ${CMAKE_SYSTEM_NAME} MATCHES "Linux" )
set(LINUX "Linux")
......@@ -30,9 +104,7 @@ if(DARWIN)
target_link_libraries(${PROJECT_NAME} dap_core_darwin)
endif()
if(BUILD_DAP_TESTS)
enable_testing()
add_subdirectory(test)
endif()
......@@ -89,7 +89,7 @@ var searchBox = new SearchBox("searchBox", "search",false,'Search');
<div class="title">dap_common.h</div> </div>
</div><!--header-->
<div class="contents">
<a href="dap__common_8h.html">Go to the documentation of this file.</a><div class="fragment"><div class="line"><a name="l00001"></a><span class="lineno"> 1</span>&#160;<span class="comment">/*</span></div><div class="line"><a name="l00002"></a><span class="lineno"> 2</span>&#160;<span class="comment"> * Authors:</span></div><div class="line"><a name="l00003"></a><span class="lineno"> 3</span>&#160;<span class="comment"> * Dmitriy A. Gearasimov &lt;kahovski@gmail.com&gt;</span></div><div class="line"><a name="l00004"></a><span class="lineno"> 4</span>&#160;<span class="comment"> * DeM Labs Inc. https://demlabs.net</span></div><div class="line"><a name="l00005"></a><span class="lineno"> 5</span>&#160;<span class="comment"> * DeM Labs Open source community https://github.com/demlabsinc</span></div><div class="line"><a name="l00006"></a><span class="lineno"> 6</span>&#160;<span class="comment"> * Copyright (c) 2017-2018</span></div><div class="line"><a name="l00007"></a><span class="lineno"> 7</span>&#160;<span class="comment"> * All rights reserved.</span></div><div class="line"><a name="l00008"></a><span class="lineno"> 8</span>&#160;<span class="comment"></span></div><div class="line"><a name="l00009"></a><span class="lineno"> 9</span>&#160;<span class="comment"> This file is part of DAP (Deus Applications Prototypes) the open source project</span></div><div class="line"><a name="l00010"></a><span class="lineno"> 10</span>&#160;<span class="comment"></span></div><div class="line"><a name="l00011"></a><span class="lineno"> 11</span>&#160;<span class="comment"> DAP (Deus Applicaions Prototypes) is free software: you can redistribute it and/or modify</span></div><div class="line"><a name="l00012"></a><span class="lineno"> 12</span>&#160;<span class="comment"> it under the terms of the GNU General Public License as published by</span></div><div class="line"><a name="l00013"></a><span class="lineno"> 13</span>&#160;<span class="comment"> the Free Software Foundation, either version 3 of the License, or</span></div><div class="line"><a name="l00014"></a><span class="lineno"> 14</span>&#160;<span class="comment"> (at your option) any later version.</span></div><div class="line"><a name="l00015"></a><span class="lineno"> 15</span>&#160;<span class="comment"></span></div><div class="line"><a name="l00016"></a><span class="lineno"> 16</span>&#160;<span class="comment"> DAP is distributed in the hope that it will be useful,</span></div><div class="line"><a name="l00017"></a><span class="lineno"> 17</span>&#160;<span class="comment"> but WITHOUT ANY WARRANTY; without even the implied warranty of</span></div><div class="line"><a name="l00018"></a><span class="lineno"> 18</span>&#160;<span class="comment"> MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the</span></div><div class="line"><a name="l00019"></a><span class="lineno"> 19</span>&#160;<span class="comment"> GNU General Public License for more details.</span></div><div class="line"><a name="l00020"></a><span class="lineno"> 20</span>&#160;<span class="comment"></span></div><div class="line"><a name="l00021"></a><span class="lineno"> 21</span>&#160;<span class="comment"> You should have received a copy of the GNU General Public License</span></div><div class="line"><a name="l00022"></a><span class="lineno"> 22</span>&#160;<span class="comment"> along with any DAP based project. If not, see &lt;http://www.gnu.org/licenses/&gt;.</span></div><div class="line"><a name="l00023"></a><span class="lineno"> 23</span>&#160;<span class="comment">*/</span></div><div class="line"><a name="l00024"></a><span class="lineno"> 24</span>&#160;<span class="preprocessor">#pragma once</span></div><div class="line"><a name="l00025"></a><span class="lineno"> 25</span>&#160;<span class="preprocessor">#include &lt;stdarg.h&gt;</span></div><div class="line"><a name="l00026"></a><span class="lineno"> 26</span>&#160;<span class="preprocessor">#include &lt;stddef.h&gt;</span></div><div class="line"><a name="l00027"></a><span class="lineno"> 27</span>&#160;<span class="preprocessor">#include &lt;stdlib.h&gt;</span></div><div class="line"><a name="l00028"></a><span class="lineno"> 28</span>&#160;<span class="preprocessor">#include &lt;time.h&gt;</span></div><div class="line"><a name="l00029"></a><span class="lineno"> 29</span>&#160;</div><div class="line"><a name="l00030"></a><span class="lineno"><a class="line" href="dap__common_8h.html#a74a9d9e85c7cc12c155f147d9a971cad"> 30</a></span>&#160;<span class="preprocessor">#define DAP_NEW(a) ( (a*) malloc(sizeof(a)))</span></div><div class="line"><a name="l00031"></a><span class="lineno"><a class="line" href="dap__common_8h.html#a80373ba28489011c16cd76f6d0ba5b53"> 31</a></span>&#160;<span class="preprocessor">#define DAP_NEW_SIZE(a,b) ( (a*) malloc(b))</span></div><div class="line"><a name="l00032"></a><span class="lineno"><a class="line" href="dap__common_8h.html#a9270d1341aa00be475591ecfa8985c08"> 32</a></span>&#160;<span class="preprocessor">#define DAP_NEW_Z(a) ( (a*) calloc(1,sizeof(a)))</span></div><div class="line"><a name="l00033"></a><span class="lineno"><a class="line" href="dap__common_8h.html#ad0f1f3c74154c73a5dfd33598dfd375b"> 33</a></span>&#160;<span class="preprocessor">#define DAP_NEW_Z_SIZE(a,b) ( (a*) calloc(1,b))</span></div><div class="line"><a name="l00034"></a><span class="lineno"> 34</span>&#160;</div><div class="line"><a name="l00035"></a><span class="lineno"><a class="line" href="dap__common_8h.html#abc94d3603906f97d0ce7368f44eebf8b"> 35</a></span>&#160;<span class="preprocessor">#define DAP_DELETE(a) free(a)</span></div><div class="line"><a name="l00036"></a><span class="lineno"><a class="line" href="dap__common_8h.html#a7303c16a9766b284e07bd6790d65b59e"> 36</a></span>&#160;<span class="preprocessor">#define DAP_DUP(a) (__typeof(a) ret = memcpy(ret,a,sizeof(*a)) )</span></div><div class="line"><a name="l00037"></a><span class="lineno"> 37</span>&#160;</div><div class="line"><a name="l00038"></a><span class="lineno"><a class="line" href="dap__common_8h.html#a9757f0cc77df1fd0759b1b91a9f63ff0"> 38</a></span>&#160;<span class="preprocessor">#define DAP_PROTOCOL_VERSION 21</span></div><div class="line"><a name="l00039"></a><span class="lineno"> 39</span>&#160;</div><div class="line"><a name="l00040"></a><span class="lineno"> 40</span>&#160;<span class="preprocessor">#if defined(__GNUC__) ||defined (__clang__)</span></div><div class="line"><a name="l00041"></a><span class="lineno"> 41</span>&#160;<span class="preprocessor">#define DAP_ALIGN_PACKED __attribute__((aligned(1),packed))</span></div><div class="line"><a name="l00042"></a><span class="lineno"> 42</span>&#160;<span class="preprocessor">#endif</span></div><div class="line"><a name="l00043"></a><span class="lineno"> 43</span>&#160;</div><div class="line"><a name="l00047"></a><span class="lineno"><a class="line" href="dap__common_8h.html#ac91d55174d383848b976a34de843748e"> 47</a></span>&#160;<span class="keyword">enum</span> <a class="code" href="dap__common_8h.html#ac91d55174d383848b976a34de843748e">log_level</a>{<a class="code" href="dap__common_8h.html#ac91d55174d383848b976a34de843748ead95cd234638314479dea217167c37e4a">L_CRITICAL</a>=5,<a class="code" href="dap__common_8h.html#ac91d55174d383848b976a34de843748ea5aa6d01f59e4b628af96f650fc5ecc15">L_ERROR</a>=4, <a class="code" href="dap__common_8h.html#ac91d55174d383848b976a34de843748ea83e54d43eb3fd145052377ecd43932a1">L_WARNING</a>=3,<a class="code" href="dap__common_8h.html#ac91d55174d383848b976a34de843748eac0e398e95a19b2d3e23eb0620e91a515">L_NOTICE</a>=2,<a class="code" href="dap__common_8h.html#ac91d55174d383848b976a34de843748eae580867d0ddde34905fea2f8669839b7">L_INFO</a>=1,<a class="code" href="dap__common_8h.html#ac91d55174d383848b976a34de843748eabef96148470abb1ed19980e5b5c40ad4">L_DEBUG</a>=0};</div><div class="line"><a name="l00048"></a><span class="lineno"> 48</span>&#160;</div><div class="line"><a name="l00049"></a><span class="lineno"> 49</span>&#160;<span class="preprocessor">#ifdef __cplusplus</span></div><div class="line"><a name="l00050"></a><span class="lineno"> 50</span>&#160;<span class="keyword">extern</span> <span class="stringliteral">&quot;C&quot;</span> {</div><div class="line"><a name="l00051"></a><span class="lineno"> 51</span>&#160;<span class="preprocessor">#endif</span></div><div class="line"><a name="l00052"></a><span class="lineno"> 52</span>&#160;</div><div class="line"><a name="l00053"></a><span class="lineno"> 53</span>&#160;<span class="keywordtype">int</span> <a class="code" href="dap__common_8h.html#aff6dc9e558a255f56618643f5be92b08">dap_common_init</a>( <span class="keyword">const</span> <span class="keywordtype">char</span> * a_log_file );</div><div class="line"><a name="l00054"></a><span class="lineno"> 54</span>&#160;<span class="keywordtype">void</span> <a class="code" href="dap__common_8h.html#ab96d7e843bc09468220a7d264295cf69">dap_common_deinit</a>(<span class="keywordtype">void</span>);</div><div class="line"><a name="l00055"></a><span class="lineno"> 55</span>&#160;</div><div class="line"><a name="l00056"></a><span class="lineno"> 56</span>&#160;<span class="keywordtype">void</span> <a class="code" href="dap__common_8h.html#acbe3239b788dc1105a094596354a7e42">_log_it</a>(<span class="keyword">const</span> <span class="keywordtype">char</span> * log_tag, <span class="keyword">enum</span> <a class="code" href="dap__common_8h.html#ac91d55174d383848b976a34de843748e">log_level</a>, <span class="keyword">const</span> <span class="keywordtype">char</span> * format,...);</div><div class="line"><a name="l00057"></a><span class="lineno"> 57</span>&#160;<span class="keywordtype">void</span> <a class="code" href="dap__common_8h.html#ab3ae03011f7dfbbf40dce01f7bdd4157">_vlog_it</a>(<span class="keyword">const</span> <span class="keywordtype">char</span> * log_tag, <span class="keyword">enum</span> <a class="code" href="dap__common_8h.html#ac91d55174d383848b976a34de843748e">log_level</a>, <span class="keyword">const</span> <span class="keywordtype">char</span> * format, va_list ap );</div><div class="line"><a name="l00058"></a><span class="lineno"><a class="line" href="dap__common_8h.html#acd8f4f3ce595157ca36ce6b61ca4195e"> 58</a></span>&#160;<span class="preprocessor">#define log_it(_log_level,...) _log_it(LOG_TAG,_log_level,##__VA_ARGS__)</span></div><div class="line"><a name="l00059"></a><span class="lineno"><a class="line" href="dap__common_8h.html#ab53061ef6723b1e4a233022ef9f33c76"> 59</a></span>&#160;<span class="preprocessor">#define vlog_it(a_log_level,a_format,a_ap) _vlog_it(LOG_TAG,a_log_level,a_format,a_ap)</span></div><div class="line"><a name="l00060"></a><span class="lineno"> 60</span>&#160;</div><div class="line"><a name="l00061"></a><span class="lineno"> 61</span>&#160;<span class="keyword">const</span> <span class="keywordtype">char</span> * <a class="code" href="dap__common_8h.html#aa76592df3b155b21f4d05cbd042db5f7">log_error</a>(<span class="keywordtype">void</span>);</div><div class="line"><a name="l00062"></a><span class="lineno"> 62</span>&#160;<span class="keywordtype">void</span> <a class="code" href="dap__common_8h.html#a98de0fce0a8fb5c3b0cfe80bebe8f691">set_log_level</a>(<span class="keyword">enum</span> <a class="code" href="dap__common_8h.html#ac91d55174d383848b976a34de843748e">log_level</a> ll);</div><div class="line"><a name="l00063"></a><span class="lineno"> 63</span>&#160;<span class="keywordtype">void</span> <a class="code" href="dap__common_8h.html#ac8d0df7015664c720b27ee4f6e660479">dap_set_log_tag_width</a>(<span class="keywordtype">size_t</span> width);</div><div class="line"><a name="l00064"></a><span class="lineno"> 64</span>&#160;</div><div class="line"><a name="l00065"></a><span class="lineno"> 65</span>&#160;<span class="preprocessor">#ifdef __GNUC__</span></div><div class="line"><a name="l00066"></a><span class="lineno"> 66</span>&#160;<span class="keywordtype">char</span> *<a class="code" href="dap__common_8c.html#a9c7174a7bbe81eedbd86ded2e247eee7">itoa</a>(<span class="keywordtype">int</span> i);</div><div class="line"><a name="l00067"></a><span class="lineno"> 67</span>&#160;</div><div class="line"><a name="l00068"></a><span class="lineno"> 68</span>&#160;</div><div class="line"><a name="l00069"></a><span class="lineno"> 69</span>&#160;<span class="preprocessor">#elif _MSC_VER</span></div><div class="line"><a name="l00070"></a><span class="lineno"> 70</span>&#160;<span class="keywordtype">char</span> *strndup(<span class="keyword">const</span> <span class="keywordtype">char</span> *s, <span class="keywordtype">size_t</span> n);</div><div class="line"><a name="l00071"></a><span class="lineno"> 71</span>&#160;<span class="preprocessor">#endif</span></div><div class="line"><a name="l00072"></a><span class="lineno"> 72</span>&#160;<span class="keywordtype">int</span> <a class="code" href="dap__common_8h.html#a6ab10606e8ac33dd93a0526933b192c8">time_to_rfc822</a>(<span class="keywordtype">char</span> * out, <span class="keywordtype">size_t</span> out_size_max, time_t t);</div><div class="line"><a name="l00073"></a><span class="lineno"> 73</span>&#160;</div><div class="line"><a name="l00074"></a><span class="lineno"> 74</span>&#160;<span class="keywordtype">int</span> <a class="code" href="dap__common_8h.html#ab027eeb728bcf25f75bc592fc627e4fe">get_select_breaker</a>(<span class="keywordtype">void</span>);</div><div class="line"><a name="l00075"></a><span class="lineno"> 75</span>&#160;<span class="keywordtype">int</span> <a class="code" href="dap__common_8h.html#aa3c5a3515672b9ecc8d114af678cb0a4">send_select_break</a>(<span class="keywordtype">void</span>);</div><div class="line"><a name="l00076"></a><span class="lineno"> 76</span>&#160;<span class="keywordtype">char</span> * <a class="code" href="dap__common_8h.html#a00992fd7732b0ff40ce020728f84bc3a">exec_with_ret</a>(<span class="keyword">const</span> <span class="keywordtype">char</span> * a_cmd);</div><div class="line"><a name="l00077"></a><span class="lineno"> 77</span>&#160;<span class="keywordtype">char</span> * <a class="code" href="dap__common_8h.html#aa4a4c13332f14e44630f5e269048249a">exec_with_ret_multistring</a>(<span class="keyword">const</span> <span class="keywordtype">char</span> * a_cmd);</div><div class="line"><a name="l00078"></a><span class="lineno"> 78</span>&#160;<span class="keywordtype">char</span> * <a class="code" href="dap__common_8h.html#aabbc0306fee1c3a56540b1604bbb516c">dap_random_string_create_alloc</a>(<span class="keywordtype">size_t</span> a_length);</div><div class="line"><a name="l00079"></a><span class="lineno"> 79</span>&#160;<span class="keywordtype">void</span> <a class="code" href="dap__common_8h.html#a3fa34950395c0139c5c95510de7119a8">dap_random_string_fill</a>(<span class="keywordtype">char</span> *str, <span class="keywordtype">size_t</span> length);</div><div class="line"><a name="l00080"></a><span class="lineno"> 80</span>&#160;</div><div class="line"><a name="l00081"></a><span class="lineno"> 81</span>&#160;<span class="preprocessor">#ifdef __cplusplus</span></div><div class="line"><a name="l00082"></a><span class="lineno"> 82</span>&#160;}</div><div class="line"><a name="l00083"></a><span class="lineno"> 83</span>&#160;<span class="preprocessor">#endif</span></div><div class="ttc" id="dap__common_8h_html_ac91d55174d383848b976a34de843748eabef96148470abb1ed19980e5b5c40ad4"><div class="ttname"><a href="dap__common_8h.html#ac91d55174d383848b976a34de843748eabef96148470abb1ed19980e5b5c40ad4">L_DEBUG</a></div><div class="ttdef"><b>Definition:</b> dap_common.h:47</div></div>
<a href="dap__common_8h.html">Go to the documentation of this file.</a><div class="fragment"><div class="line"><a name="l00001"></a><span class="lineno"> 1</span>&#160;<span class="comment">/*</span></div><div class="line"><a name="l00002"></a><span class="lineno"> 2</span>&#160;<span class="comment"> * Authors:</span></div><div class="line"><a name="l00003"></a><span class="lineno"> 3</span>&#160;<span class="comment"> * Dmitriy A. Gearasimov &lt;kahovski@gmail.com&gt;</span></div><div class="line"><a name="l00004"></a><span class="lineno"> 4</span>&#160;<span class="comment"> * DeM Labs Inc. https://demlabs.net</span></div><div class="line"><a name="l00005"></a><span class="lineno"> 5</span>&#160;<span class="comment"> * DeM Labs Open source community https://gitlab.demlabs.net/cellframe</span></div><div class="line"><a name="l00006"></a><span class="lineno"> 6</span>&#160;<span class="comment"> * Copyright (c) 2017-2018</span></div><div class="line"><a name="l00007"></a><span class="lineno"> 7</span>&#160;<span class="comment"> * All rights reserved.</span></div><div class="line"><a name="l00008"></a><span class="lineno"> 8</span>&#160;<span class="comment"></span></div><div class="line"><a name="l00009"></a><span class="lineno"> 9</span>&#160;<span class="comment"> This file is part of DAP (Deus Applications Prototypes) the open source project</span></div><div class="line"><a name="l00010"></a><span class="lineno"> 10</span>&#160;<span class="comment"></span></div><div class="line"><a name="l00011"></a><span class="lineno"> 11</span>&#160;<span class="comment"> DAP (Deus Applicaions Prototypes) is free software: you can redistribute it and/or modify</span></div><div class="line"><a name="l00012"></a><span class="lineno"> 12</span>&#160;<span class="comment"> it under the terms of the GNU General Public License as published by</span></div><div class="line"><a name="l00013"></a><span class="lineno"> 13</span>&#160;<span class="comment"> the Free Software Foundation, either version 3 of the License, or</span></div><div class="line"><a name="l00014"></a><span class="lineno"> 14</span>&#160;<span class="comment"> (at your option) any later version.</span></div><div class="line"><a name="l00015"></a><span class="lineno"> 15</span>&#160;<span class="comment"></span></div><div class="line"><a name="l00016"></a><span class="lineno"> 16</span>&#160;<span class="comment"> DAP is distributed in the hope that it will be useful,</span></div><div class="line"><a name="l00017"></a><span class="lineno"> 17</span>&#160;<span class="comment"> but WITHOUT ANY WARRANTY; without even the implied warranty of</span></div><div class="line"><a name="l00018"></a><span class="lineno"> 18</span>&#160;<span class="comment"> MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the</span></div><div class="line"><a name="l00019"></a><span class="lineno"> 19</span>&#160;<span class="comment"> GNU General Public License for more details.</span></div><div class="line"><a name="l00020"></a><span class="lineno"> 20</span>&#160;<span class="comment"></span></div><div class="line"><a name="l00021"></a><span class="lineno"> 21</span>&#160;<span class="comment"> You should have received a copy of the GNU General Public License</span></div><div class="line"><a name="l00022"></a><span class="lineno"> 22</span>&#160;<span class="comment"> along with any DAP based project. If not, see &lt;http://www.gnu.org/licenses/&gt;.</span></div><div class="line"><a name="l00023"></a><span class="lineno"> 23</span>&#160;<span class="comment">*/</span></div><div class="line"><a name="l00024"></a><span class="lineno"> 24</span>&#160;<span class="preprocessor">#pragma once</span></div><div class="line"><a name="l00025"></a><span class="lineno"> 25</span>&#160;<span class="preprocessor">#include &lt;stdarg.h&gt;</span></div><div class="line"><a name="l00026"></a><span class="lineno"> 26</span>&#160;<span class="preprocessor">#include &lt;stddef.h&gt;</span></div><div class="line"><a name="l00027"></a><span class="lineno"> 27</span>&#160;<span class="preprocessor">#include &lt;stdlib.h&gt;</span></div><div class="line"><a name="l00028"></a><span class="lineno"> 28</span>&#160;<span class="preprocessor">#include &lt;time.h&gt;</span></div><div class="line"><a name="l00029"></a><span class="lineno"> 29</span>&#160;</div><div class="line"><a name="l00030"></a><span class="lineno"><a class="line" href="dap__common_8h.html#a74a9d9e85c7cc12c155f147d9a971cad"> 30</a></span>&#160;<span class="preprocessor">#define DAP_NEW(a) ( (a*) malloc(sizeof(a)))</span></div><div class="line"><a name="l00031"></a><span class="lineno"><a class="line" href="dap__common_8h.html#a80373ba28489011c16cd76f6d0ba5b53"> 31</a></span>&#160;<span class="preprocessor">#define DAP_NEW_SIZE(a,b) ( (a*) malloc(b))</span></div><div class="line"><a name="l00032"></a><span class="lineno"><a class="line" href="dap__common_8h.html#a9270d1341aa00be475591ecfa8985c08"> 32</a></span>&#160;<span class="preprocessor">#define DAP_NEW_Z(a) ( (a*) calloc(1,sizeof(a)))</span></div><div class="line"><a name="l00033"></a><span class="lineno"><a class="line" href="dap__common_8h.html#ad0f1f3c74154c73a5dfd33598dfd375b"> 33</a></span>&#160;<span class="preprocessor">#define DAP_NEW_Z_SIZE(a,b) ( (a*) calloc(1,b))</span></div><div class="line"><a name="l00034"></a><span class="lineno"> 34</span>&#160;</div><div class="line"><a name="l00035"></a><span class="lineno"><a class="line" href="dap__common_8h.html#abc94d3603906f97d0ce7368f44eebf8b"> 35</a></span>&#160;<span class="preprocessor">#define DAP_DELETE(a) free(a)</span></div><div class="line"><a name="l00036"></a><span class="lineno"><a class="line" href="dap__common_8h.html#a7303c16a9766b284e07bd6790d65b59e"> 36</a></span>&#160;<span class="preprocessor">#define DAP_DUP(a) (__typeof(a) ret = memcpy(ret,a,sizeof(*a)) )</span></div><div class="line"><a name="l00037"></a><span class="lineno"> 37</span>&#160;</div><div class="line"><a name="l00038"></a><span class="lineno"><a class="line" href="dap__common_8h.html#a9757f0cc77df1fd0759b1b91a9f63ff0"> 38</a></span>&#160;<span class="preprocessor">#define DAP_PROTOCOL_VERSION 21</span></div><div class="line"><a name="l00039"></a><span class="lineno"> 39</span>&#160;</div><div class="line"><a name="l00040"></a><span class="lineno"> 40</span>&#160;<span class="preprocessor">#if defined(__GNUC__) ||defined (__clang__)</span></div><div class="line"><a name="l00041"></a><span class="lineno"> 41</span>&#160;<span class="preprocessor">#define DAP_ALIGN_PACKED __attribute__((aligned(1),packed))</span></div><div class="line"><a name="l00042"></a><span class="lineno"> 42</span>&#160;<span class="preprocessor">#endif</span></div><div class="line"><a name="l00043"></a><span class="lineno"> 43</span>&#160;</div><div class="line"><a name="l00047"></a><span class="lineno"><a class="line" href="dap__common_8h.html#ac91d55174d383848b976a34de843748e"> 47</a></span>&#160;<span class="keyword">enum</span> <a class="code" href="dap__common_8h.html#ac91d55174d383848b976a34de843748e">log_level</a>{<a class="code" href="dap__common_8h.html#ac91d55174d383848b976a34de843748ead95cd234638314479dea217167c37e4a">L_CRITICAL</a>=5,<a class="code" href="dap__common_8h.html#ac91d55174d383848b976a34de843748ea5aa6d01f59e4b628af96f650fc5ecc15">L_ERROR</a>=4, <a class="code" href="dap__common_8h.html#ac91d55174d383848b976a34de843748ea83e54d43eb3fd145052377ecd43932a1">L_WARNING</a>=3,<a class="code" href="dap__common_8h.html#ac91d55174d383848b976a34de843748eac0e398e95a19b2d3e23eb0620e91a515">L_NOTICE</a>=2,<a class="code" href="dap__common_8h.html#ac91d55174d383848b976a34de843748eae580867d0ddde34905fea2f8669839b7">L_INFO</a>=1,<a class="code" href="dap__common_8h.html#ac91d55174d383848b976a34de843748eabef96148470abb1ed19980e5b5c40ad4">L_DEBUG</a>=0};</div><div class="line"><a name="l00048"></a><span class="lineno"> 48</span>&#160;</div><div class="line"><a name="l00049"></a><span class="lineno"> 49</span>&#160;<span class="preprocessor">#ifdef __cplusplus</span></div><div class="line"><a name="l00050"></a><span class="lineno"> 50</span>&#160;<span class="keyword">extern</span> <span class="stringliteral">&quot;C&quot;</span> {</div><div class="line"><a name="l00051"></a><span class="lineno"> 51</span>&#160;<span class="preprocessor">#endif</span></div><div class="line"><a name="l00052"></a><span class="lineno"> 52</span>&#160;</div><div class="line"><a name="l00053"></a><span class="lineno"> 53</span>&#160;<span class="keywordtype">int</span> <a class="code" href="dap__common_8h.html#aff6dc9e558a255f56618643f5be92b08">dap_common_init</a>( <span class="keyword">const</span> <span class="keywordtype">char</span> * a_log_file );</div><div class="line"><a name="l00054"></a><span class="lineno"> 54</span>&#160;<span class="keywordtype">void</span> <a class="code" href="dap__common_8h.html#ab96d7e843bc09468220a7d264295cf69">dap_common_deinit</a>(<span class="keywordtype">void</span>);</div><div class="line"><a name="l00055"></a><span class="lineno"> 55</span>&#160;</div><div class="line"><a name="l00056"></a><span class="lineno"> 56</span>&#160;<span class="keywordtype">void</span> <a class="code" href="dap__common_8h.html#acbe3239b788dc1105a094596354a7e42">_log_it</a>(<span class="keyword">const</span> <span class="keywordtype">char</span> * log_tag, <span class="keyword">enum</span> <a class="code" href="dap__common_8h.html#ac91d55174d383848b976a34de843748e">log_level</a>, <span class="keyword">const</span> <span class="keywordtype">char</span> * format,...);</div><div class="line"><a name="l00057"></a><span class="lineno"> 57</span>&#160;<span class="keywordtype">void</span> <a class="code" href="dap__common_8h.html#ab3ae03011f7dfbbf40dce01f7bdd4157">_vlog_it</a>(<span class="keyword">const</span> <span class="keywordtype">char</span> * log_tag, <span class="keyword">enum</span> <a class="code" href="dap__common_8h.html#ac91d55174d383848b976a34de843748e">log_level</a>, <span class="keyword">const</span> <span class="keywordtype">char</span> * format, va_list ap );</div><div class="line"><a name="l00058"></a><span class="lineno"><a class="line" href="dap__common_8h.html#acd8f4f3ce595157ca36ce6b61ca4195e"> 58</a></span>&#160;<span class="preprocessor">#define log_it(_log_level,...) _log_it(LOG_TAG,_log_level,##__VA_ARGS__)</span></div><div class="line"><a name="l00059"></a><span class="lineno"><a class="line" href="dap__common_8h.html#ab53061ef6723b1e4a233022ef9f33c76"> 59</a></span>&#160;<span class="preprocessor">#define vlog_it(a_log_level,a_format,a_ap) _vlog_it(LOG_TAG,a_log_level,a_format,a_ap)</span></div><div class="line"><a name="l00060"></a><span class="lineno"> 60</span>&#160;</div><div class="line"><a name="l00061"></a><span class="lineno"> 61</span>&#160;<span class="keyword">const</span> <span class="keywordtype">char</span> * <a class="code" href="dap__common_8h.html#aa76592df3b155b21f4d05cbd042db5f7">log_error</a>(<span class="keywordtype">void</span>);</div><div class="line"><a name="l00062"></a><span class="lineno"> 62</span>&#160;<span class="keywordtype">void</span> <a class="code" href="dap__common_8h.html#a98de0fce0a8fb5c3b0cfe80bebe8f691">set_log_level</a>(<span class="keyword">enum</span> <a class="code" href="dap__common_8h.html#ac91d55174d383848b976a34de843748e">log_level</a> ll);</div><div class="line"><a name="l00063"></a><span class="lineno"> 63</span>&#160;<span class="keywordtype">void</span> <a class="code" href="dap__common_8h.html#ac8d0df7015664c720b27ee4f6e660479">dap_set_log_tag_width</a>(<span class="keywordtype">size_t</span> width);</div><div class="line"><a name="l00064"></a><span class="lineno"> 64</span>&#160;</div><div class="line"><a name="l00065"></a><span class="lineno"> 65</span>&#160;<span class="preprocessor">#ifdef __GNUC__</span></div><div class="line"><a name="l00066"></a><span class="lineno"> 66</span>&#160;<span class="keywordtype">char</span> *<a class="code" href="dap__common_8c.html#a9c7174a7bbe81eedbd86ded2e247eee7">itoa</a>(<span class="keywordtype">int</span> i);</div><div class="line"><a name="l00067"></a><span class="lineno"> 67</span>&#160;</div><div class="line"><a name="l00068"></a><span class="lineno"> 68</span>&#160;</div><div class="line"><a name="l00069"></a><span class="lineno"> 69</span>&#160;<span class="preprocessor">#elif _MSC_VER</span></div><div class="line"><a name="l00070"></a><span class="lineno"> 70</span>&#160;<span class="keywordtype">char</span> *strndup(<span class="keyword">const</span> <span class="keywordtype">char</span> *s, <span class="keywordtype">size_t</span> n);</div><div class="line"><a name="l00071"></a><span class="lineno"> 71</span>&#160;<span class="preprocessor">#endif</span></div><div class="line"><a name="l00072"></a><span class="lineno"> 72</span>&#160;<span class="keywordtype">int</span> <a class="code" href="dap__common_8h.html#a6ab10606e8ac33dd93a0526933b192c8">time_to_rfc822</a>(<span class="keywordtype">char</span> * out, <span class="keywordtype">size_t</span> out_size_max, time_t t);</div><div class="line"><a name="l00073"></a><span class="lineno"> 73</span>&#160;</div><div class="line"><a name="l00074"></a><span class="lineno"> 74</span>&#160;<span class="keywordtype">int</span> <a class="code" href="dap__common_8h.html#ab027eeb728bcf25f75bc592fc627e4fe">get_select_breaker</a>(<span class="keywordtype">void</span>);</div><div class="line"><a name="l00075"></a><span class="lineno"> 75</span>&#160;<span class="keywordtype">int</span> <a class="code" href="dap__common_8h.html#aa3c5a3515672b9ecc8d114af678cb0a4">send_select_break</a>(<span class="keywordtype">void</span>);</div><div class="line"><a name="l00076"></a><span class="lineno"> 76</span>&#160;<span class="keywordtype">char</span> * <a class="code" href="dap__common_8h.html#a00992fd7732b0ff40ce020728f84bc3a">exec_with_ret</a>(<span class="keyword">const</span> <span class="keywordtype">char</span> * a_cmd);</div><div class="line"><a name="l00077"></a><span class="lineno"> 77</span>&#160;<span class="keywordtype">char</span> * <a class="code" href="dap__common_8h.html#aa4a4c13332f14e44630f5e269048249a">exec_with_ret_multistring</a>(<span class="keyword">const</span> <span class="keywordtype">char</span> * a_cmd);</div><div class="line"><a name="l00078"></a><span class="lineno"> 78</span>&#160;<span class="keywordtype">char</span> * <a class="code" href="dap__common_8h.html#aabbc0306fee1c3a56540b1604bbb516c">dap_random_string_create_alloc</a>(<span class="keywordtype">size_t</span> a_length);</div><div class="line"><a name="l00079"></a><span class="lineno"> 79</span>&#160;<span class="keywordtype">void</span> <a class="code" href="dap__common_8h.html#a3fa34950395c0139c5c95510de7119a8">dap_random_string_fill</a>(<span class="keywordtype">char</span> *str, <span class="keywordtype">size_t</span> length);</div><div class="line"><a name="l00080"></a><span class="lineno"> 80</span>&#160;</div><div class="line"><a name="l00081"></a><span class="lineno"> 81</span>&#160;<span class="preprocessor">#ifdef __cplusplus</span></div><div class="line"><a name="l00082"></a><span class="lineno"> 82</span>&#160;}</div><div class="line"><a name="l00083"></a><span class="lineno"> 83</span>&#160;<span class="preprocessor">#endif</span></div><div class="ttc" id="dap__common_8h_html_ac91d55174d383848b976a34de843748eabef96148470abb1ed19980e5b5c40ad4"><div class="ttname"><a href="dap__common_8h.html#ac91d55174d383848b976a34de843748eabef96148470abb1ed19980e5b5c40ad4">L_DEBUG</a></div><div class="ttdef"><b>Definition:</b> dap_common.h:47</div></div>
<div class="ttc" id="dap__common_8h_html_ac8d0df7015664c720b27ee4f6e660479"><div class="ttname"><a href="dap__common_8h.html#ac8d0df7015664c720b27ee4f6e660479">dap_set_log_tag_width</a></div><div class="ttdeci">void dap_set_log_tag_width(size_t width)</div><div class="ttdoc">dap_set_log_tag_width Sets the length of the label </div><div class="ttdef"><b>Definition:</b> dap_common.c:77</div></div>
<div class="ttc" id="dap__common_8h_html_ac91d55174d383848b976a34de843748ea5aa6d01f59e4b628af96f650fc5ecc15"><div class="ttname"><a href="dap__common_8h.html#ac91d55174d383848b976a34de843748ea5aa6d01f59e4b628af96f650fc5ecc15">L_ERROR</a></div><div class="ttdef"><b>Definition:</b> dap_common.h:47</div></div>
<div class="ttc" id="dap__common_8h_html_ac91d55174d383848b976a34de843748e"><div class="ttname"><a href="dap__common_8h.html#ac91d55174d383848b976a34de843748e">log_level</a></div><div class="ttdeci">log_level</div><div class="ttdoc">The log_level enum. </div><div class="ttdef"><b>Definition:</b> dap_common.h:47</div></div>
......
......@@ -3,7 +3,7 @@
* Dmitriy A. Gearasimov <kahovski@gmail.com>
* Anatolii Kurotych <akurotych@gmail.com>
* DeM Labs Inc. https://demlabs.net
* DeM Labs Open source community https://github.com/demlabsinc
* DeM Labs Open source community https://gitlab.demlabs.net/cellframe
* Copyright (c) 2017-2019
* All rights reserved.
......@@ -32,15 +32,7 @@
#include <stdlib.h>
#include <time.h>
#define DAP_NEW( a ) ( (a*) malloc(sizeof(a)) )
#define DAP_NEW_SIZE( a, b ) ( (a*) malloc(b) )
#define DAP_NEW_Z( a ) ( (a*) calloc(1,sizeof(a)) )
#define DAP_NEW_Z_SIZE( a, b )( (a*) calloc(1,b) )
#define DAP_REALLOC( a, b ) ( realloc(a,b) )
#define DAP_DELETE(a) free( a )
#define DAP_DUP(a) ( __typeof(a) ret = memcpy(ret,a,sizeof(*a)) )
#define DAP_PROTOCOL_VERSION 22
#include "portable_endian.h"
#if defined(__GNUC__) ||defined (__clang__)
#define DAP_ALIGN_PACKED __attribute__((aligned(1),packed))
......@@ -51,9 +43,11 @@
#ifdef _MSC_VER
#define DAP_STATIC_INLINE static __forceinline
#define DAP_INLINE __forceinline
#define DAP_ALIGNED(x) __declspec( align(x) )
#else
#define DAP_STATIC_INLINE static __attribute__((always_inline)) inline
#define DAP_INLINE __attribute__((always_inline)) inline
#define DAP_ALIGNED(x) __attribute__ ((aligned (x)))
#endif
#ifndef TRUE
......@@ -61,19 +55,152 @@
#define FALSE false
#endif
#ifndef ROUNDUP
#define ROUNDUP(n,width) (((n) + (width) - 1) & ~unsigned((width) - 1))
#endif
#if DAP_USE_RPMALLOC
#include "rpmalloc.h"
#define DAP_MALLOC(a) rpmalloc(a)
#define DAP_FREE(a) rpfree(a)
#define DAP_CALLOC(a, b) rpcalloc(a, b)
#define DAP_ALMALLOC(a, b) rpaligned_alloc(a, b)
#define DAP_ALREALLOC(a,b,c) rpaligned_realloc(a, b, c, 0, 0)
#define DAP_ALFREE(a) rpfree(a)
#define DAP_NEW(a) ((a*) rpmalloc(sizeof(a)))
#define DAP_NEW_SIZE(a, b) ((a*) rpmalloc(b))
#define DAP_NEW_Z(a) ((a*) rpcalloc(1,sizeof(a)))
#define DAP_NEW_Z_SIZE(a, b) ((a*) rpcalloc(1,b))
#define DAP_REALLOC(a, b) rprealloc(a,b)
#define DAP_DELETE(a) rpfree(a)
#define DAP_DUP(a) ( __typeof(a) ret = memcpy(ret,a,sizeof(*a)) )
#else
#define DAP_MALLOC(a) malloc(a)
#define DAP_FREE(a) free(a)
#define DAP_CALLOC(a, b) calloc(a, b)
#define DAP_ALMALLOC(a, b) _dap_aligned_alloc(a, b)
#define DAP_ALREALLOC(a, b) _dap_aligned_realloc(a, b)
#define DAP_ALFREE(a) _dap_aligned_free(a, b)
#define DAP_NEW( a ) ((a*) malloc(sizeof(a)))
#define DAP_NEW_SIZE(a, b) ((a*) malloc(b) )
#define DAP_NEW_Z( a ) ((a*) calloc(1,sizeof(a)))
#define DAP_NEW_Z_SIZE(a, b) ((a*) calloc(1,b))
#define DAP_REALLOC(a, b) realloc(a,b)
#define DAP_DELETE(a) free(a)
#define DAP_DUP(a) ( __typeof(a) ret = memcpy(ret,a,sizeof(*a)) )
#endif
DAP_STATIC_INLINE void *_dap_aligned_alloc( uintptr_t alignment, uintptr_t size )
{
uintptr_t ptr = (uintptr_t) DAP_MALLOC( size + (alignment * 2) + sizeof(void *) );
if ( !ptr )
return (void *)ptr;
uintptr_t al_ptr = ( ptr + sizeof(void *) + alignment) & ~(alignment - 1 );
((uintptr_t *)al_ptr)[-1] = ptr;
return (void *)al_ptr;
}
DAP_STATIC_INLINE void *_dap_aligned_realloc( uintptr_t alignment, void *bptr, uintptr_t size )
{
uintptr_t ptr = (uintptr_t) DAP_REALLOC( bptr, size + (alignment * 2) + sizeof(void *) );
if ( !ptr )
return (void *)ptr;
uintptr_t al_ptr = ( ptr + sizeof(void *) + alignment) & ~(alignment - 1 );
((uintptr_t *)al_ptr)[-1] = ptr;
return (void *)al_ptr;
}
DAP_STATIC_INLINE void _dap_aligned_free( void *ptr )
{
if ( !ptr )
return;
void *base_ptr = (void *)((uintptr_t *)ptr)[-1];
DAP_FREE( base_ptr );
}
#define DAP_PROTOCOL_VERSION 22
#ifndef MAX
#define MAX(a, b) ((a) > (b) ? (a) : (b))
#endif
#ifndef MIN
#define MIN(a, b) ((a) < (b) ? (a) : (b))
#endif
#ifndef min
#define min MIN
#endif
#ifndef max
#define max MAX
#endif
#ifndef LOWORD
#define LOWORD( l ) ((uint16_t) (((uintptr_t) (l)) & 0xFFFF))
#define HIWORD( l ) ((uint16_t) ((((uintptr_t) (l)) >> 16) & 0xFFFF))
#define LOBYTE( w ) ((uint8_t) (((uintptr_t) (w)) & 0xFF))
#define HIBYTE( w ) ((uint8_t) ((((uintptr_t) (w)) >> 8) & 0xFF))
#endif
#ifndef RGB
#define RGB(r,g,b) ((uint32_t)(((uint8_t)(r)|((uint16_t)((uint8_t)(g))<<8))|(((uint32_t)(uint8_t)(b))<<16)))
#define RGBA(r, g, b, a) ((uint32_t) ((uint32_t)RGB(r,g,b) | (uint32_t)(a) << 24))
#define GetRValue(rgb) (LOBYTE(rgb))
#define GetGValue(rgb) (LOBYTE(((uint16_t)(rgb)) >> 8))
#define GetBValue(rgb) (LOBYTE((rgb)>>16))
#define GetAValue(rgb) (LOBYTE((rgb)>>24))
#endif
#define QBYTE RGBA
#define DAP_LOG_HISTORY_STR_SIZE 128
#define DAP_LOG_HISTORY_MAX_STRINGS 1024
#define DAP_LOG_HISTORY_MAX_STRINGS 4096
#define DAP_LOG_HISTORY_BUFFER_SIZE (DAP_LOG_HISTORY_STR_SIZE * DAP_LOG_HISTORY_MAX_STRINGS)
#define DAP_LOG_HISTORY_M (DAP_LOG_HISTORY_MAX_STRINGS - 1)
#ifdef _WIN32
#define dap_sscanf __mingw_sscanf
#define dap_vsscanf __mingw_vsscanf
#define dap_scanf __mingw_scanf
#define dap_vscanf __mingw_vscanf
#define dap_fscanf __mingw_fscanf
#define dap_vfscanf __mingw_vfscanf
#define dap_sprintf __mingw_sprintf
#define dap_snprintf __mingw_snprintf
#define dap_printf __mingw_printf
#define dap_vprintf __mingw_vprintf
#define dap_fprintf __mingw_fprintf
#define dap_vfprintf __mingw_vfprintf
#define dap_vsprintf __mingw_vsprintf
#define dap_vsnprintf __mingw_vsnprintf
#define dap_asprintf __mingw_asprintf
#define dap_vasprintf __mingw_vasprintf
#else
#define dap_sscanf sscanf
#define dap_vsscanf vsscanf
#define dap_scanf scanf
#define dap_vscanf vscanf
#define dap_fscanf fscanf
#define dap_vfscanf vfscanf
#define dap_sprintf sprintf
#define dap_snprintf snprintf
#define dap_printf printf
#define dap_vprintf vprintf
#define dap_fprintf fprintf
#define dap_vfprintf vfprintf
#define dap_vsprintf vsprintf
#define dap_vsnprintf vsnprintf
#define dap_asprintf asprintf
#define dap_vasprintf vasprintf
#endif
/**
* @brief The log_level enum
*/
......@@ -83,7 +210,7 @@ typedef enum dap_log_level {
L_DEBUG = 0,
L_INFO = 1,
L_NOTICE = 2,
L_MESSAGE = 3,
L_MSG = 3,
L_DAP = 4,
L_WARNING = 5,
L_ATT = 6,
......@@ -105,6 +232,29 @@ typedef struct dap_log_str_s {
extern "C" {
#endif
extern uint16_t htoa_lut256[ 256 ];
#define dap_htoa64( out, in, len ) \
{\
uintptr_t _len = len; \
uint16_t *__restrict _out = (uint16_t *__restrict)out; \
uint64_t *__restrict _in = (uint64_t *__restrict)in;\
\
while ( _len ) {\
uint64_t _val = *_in ++;\
_out[0] = htoa_lut256[ _val & 0x00000000000000FF ];\
_out[1] = htoa_lut256[ (_val & 0x000000000000FF00) >> 8 ];\
_out[2] = htoa_lut256[ (_val & 0x0000000000FF0000) >> 16 ];\
_out[3] = htoa_lut256[ (_val & 0x00000000FF000000) >> 24 ];\
_out[4] = htoa_lut256[ (_val & 0x000000FF00000000) >> 32 ];\
_out[5] = htoa_lut256[ (_val & 0x0000FF0000000000) >> 40 ];\
_out[6] = htoa_lut256[ (_val & 0x00FF000000000000) >> 48 ];\
_out[7] = htoa_lut256[ (_val & 0xFF00000000000000) >> 56 ];\
_out += 8;\
_len -= 8;\
}\
}
typedef enum {
DAP_ASCII_ALNUM = 1 << 0,
DAP_ASCII_ALPHA = 1 << 1,
......@@ -144,7 +294,9 @@ static const uint16_t s_ascii_table_data[256] = {
#define dap_ascii_isspace(c) (s_ascii_table_data[(unsigned char) (c)] & DAP_ASCII_SPACE) != 0
#define dap_ascii_isalpha(c) (s_ascii_table_data[(unsigned char) (c)] & DAP_ASCII_ALPHA) != 0
int dap_common_init( const char * a_log_file );
//int dap_common_init( const char * a_log_file );
int dap_common_init( const char *console_title, const char *a_log_file );
void dap_common_deinit(void);
// set max items in log list
......@@ -153,10 +305,11 @@ void dap_log_set_max_item(unsigned int a_max);
char *dap_log_get_item(time_t a_start_time, int a_limit);
void _log_it(const char * log_tag, enum dap_log_level, const char * format,...);
void _vlog_it(const char * log_tag, enum dap_log_level, const char * format, va_list ap );
#define log_it(_log_level,...) _log_it(LOG_TAG,_log_level,##__VA_ARGS__)
#define vlog_it(a_log_level,a_format,a_ap) _vlog_it(LOG_TAG,a_log_level,a_format,a_ap)
void _log_it( const char * log_tag, enum dap_log_level, const char * format,... );
void _vlog_it( const char * log_tag, enum dap_log_level, const char * format, va_list ap );
#define log_it(_log_level,...) _log_it( LOG_TAG, _log_level, ##__VA_ARGS__)
#define vlog_it( a_log_level, a_format, a_ap ) _vlog_it( LOG_TAG, a_log_level, a_format, a_ap )
const char * log_error(void);
void dap_log_level_set(enum dap_log_level ll);
......@@ -179,6 +332,8 @@ void dap_dump_hex(const void* data, size_t size);
size_t dap_hex2bin(uint8_t *a_out, const char *a_in, size_t a_len);
size_t dap_bin2hex(char *a_out, const void *a_in, size_t a_len);
void dap_digit_from_string(const char *num_str, uint8_t *raw, size_t raw_len);
void dap_digit_from_string2(const char *num_str, uint8_t *raw, size_t raw_len);
#ifdef __MINGW32__
int exec_silent(const char *a_cmd);
......
......@@ -3,7 +3,7 @@
* Dmitriy A. Gearasimov <kahovski@gmail.com>
* Anatolii Kurotych <akurotych@gmail.com>
* DeM Labs Inc. https://demlabs.net
* DeM Labs Open source community https://github.com/demlabsinc
* DeM Labs Open source community https://gitlab.demlabs.net/cellframe
* Copyright (c) 2017-2019
* All rights reserved.
......
......@@ -2,7 +2,7 @@
* Authors:
* Aleksandr Lysikov <alexander.lysikov@demlabs.net>
* DeM Labs Inc. https://demlabs.net
* Kelvin Project https://github.com/kelvinblockchain
* Kelvin Project https://gitlab.demlabs.net/cellframe
* Copyright (c) 2017-2019
* All rights reserved.
......
......@@ -2,7 +2,7 @@
* Authors:
* Dmitriy A. Gearasimov <gerasimov.dmitriy@demlabs.net>
* DeM Labs Inc. https://demlabs.net
* Kelvin Project https://github.com/kelvinblockchain
* Kelvin Project https://gitlab.demlabs.net/cellframe
* Copyright (c) 2017-2018
* All rights reserved.
......
......@@ -32,6 +32,12 @@
#undef clamp
#define clamp(x, low, high) (((x) > (high)) ? (high) : (((x) < (low)) ? (low) : (x)))
#ifdef _WIN32
char *strptime( char *buff, const char *fmt, struct tm *tm );
#endif
size_t dap_strlen(const char *a_str);
// compare a_str1 and a_str2
int dap_strcmp(const char *a_str1, const char *a_str2);
......
// "License": Public Domain
// I, Mathias Panzenb?ck, place this file hereby into the public domain. Use it at your own risk for whatever you like.
// In case there are jurisdictions that don't support putting things in the public domain you can also consider it to
// be "dual licensed" under the BSD, MIT and Apache licenses, if you want to. This code is trivial anyway. Consider it
// an example on how to get the endian conversion functions on different platforms.
#ifndef PORTABLE_ENDIAN_H__
#define PORTABLE_ENDIAN_H__
#if (defined(_WIN16) || defined(_WIN32) || defined(_WIN64)) && !defined(__WINDOWS__)
# define __WINDOWS__
#endif
#if defined(__linux__) || defined(__CYGWIN__)
# include <endian.h>
#elif defined(__APPLE__)
# include <libkern/OSByteOrder.h>
# define htobe16(x) OSSwapHostToBigInt16(x)
# define htole16(x) OSSwapHostToLittleInt16(x)
# define be16toh(x) OSSwapBigToHostInt16(x)
# define le16toh(x) OSSwapLittleToHostInt16(x)
# define htobe32(x) OSSwapHostToBigInt32(x)
# define htole32(x) OSSwapHostToLittleInt32(x)
# define be32toh(x) OSSwapBigToHostInt32(x)
# define le32toh(x) OSSwapLittleToHostInt32(x)
# define htobe64(x) OSSwapHostToBigInt64(x)
# define htole64(x) OSSwapHostToLittleInt64(x)
# define be64toh(x) OSSwapBigToHostInt64(x)
# define le64toh(x) OSSwapLittleToHostInt64(x)
# define __BYTE_ORDER BYTE_ORDER
# define __BIG_ENDIAN BIG_ENDIAN
# define __LITTLE_ENDIAN LITTLE_ENDIAN
# define __PDP_ENDIAN PDP_ENDIAN
#elif defined(__OpenBSD__)
# include <sys/endian.h>
#elif defined(__NetBSD__) || defined(__FreeBSD__) || defined(__DragonFly__)
# include <sys/endian.h>
# define be16toh(x) betoh16(x)
# define le16toh(x) letoh16(x)
# define be32toh(x) betoh32(x)
# define le32toh(x) letoh32(x)
# define be64toh(x) betoh64(x)
# define le64toh(x) letoh64(x)
#elif defined(__WINDOWS__)
# include <windows.h>
# if BYTE_ORDER == LITTLE_ENDIAN
# if defined(_MSC_VER)
# include <stdlib.h>
# define htobe16(x) _byteswap_ushort(x)
# define htole16(x) (x)
# define be16toh(x) _byteswap_ushort(x)
# define le16toh(x) (x)
# define htobe32(x) _byteswap_ulong(x)
# define htole32(x) (x)
# define be32toh(x) _byteswap_ulong(x)
# define le32toh(x) (x)
# define htobe64(x) _byteswap_uint64(x)
# define htole64(x) (x)
# define be64toh(x) _byteswap_uint64(x)
# define le64toh(x) (x)
# elif defined(__GNUC__) || defined(__clang__)
# define htobe16(x) __builtin_bswap16(x)
# define htole16(x) (x)
# define be16toh(x) __builtin_bswap16(x)
# define le16toh(x) (x)
# define htobe32(x) __builtin_bswap32(x)
# define htole32(x) (x)
# define be32toh(x) __builtin_bswap32(x)
# define le32toh(x) (x)
# define htobe64(x) __builtin_bswap64(x)
# define htole64(x) (x)
# define be64toh(x) __builtin_bswap64(x)
# define le64toh(x) (x)
# else
# error platform not supported
# endif
# else
# error byte order not supported
# endif
# define __BYTE_ORDER BYTE_ORDER
# define __BIG_ENDIAN BIG_ENDIAN
# define __LITTLE_ENDIAN LITTLE_ENDIAN
# define __PDP_ENDIAN PDP_ENDIAN
#else
# error platform not supported
#endif
#endif
......@@ -335,9 +335,9 @@ void circular_buffer_print(circular_buffer_t cBuf, bool hex)
c = b[i];
}
if(hex)
sprintf(str+i*2, "%02X|",c);
dap_sprintf(str+i*2, "%02X|",c);
else
sprintf(str+i*2, "%c|",c);
dap_sprintf(str+i*2, "%c|",c);
}
printf("CircularBuffer: %s <size %zu dataSize:%zu>\n",str,circular_buffer_get_capacity(cBuf),circular_buffer_get_data_size(cBuf));
......
/*
* Authors:
* Dmitriy A. Gearasimov <kahovski@gmail.com>
* DeM Labs Inc. https://demlabs.net
* DeM Labs Open source community https://github.com/demlabsinc
* Copyright (c) 2017-2019
* All rights reserved.
This file is part of DAP (Deus Applications Prototypes) the open source project
DAP (Deus Applicaions Prototypes) is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
DAP is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with any DAP based project. If not, see <http://www.gnu.org/licenses/>.
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h> /* 'nanosleep' */
#include <unistd.h> /* 'pipe', 'read', 'write' */
#include <string.h>
#include <stdarg.h>
#include <assert.h>
#include <stdint.h>
#ifdef DAP_OS_ANDROID
#include <android/log.h>
#endif
#ifndef _WIN32
#include <pthread.h>
#include <syslog.h>
// Quick and dirty, I'm not sure but afair somewhere it was in UNIX systems too
#define min(a,b) (((a)<(b))?(a):(b))
#define max(a,b) (((a)>(b))?(a):(b))
#else // WIN32
#include <stdlib.h>
#include <windows.h>
#include <process.h>
#include <pthread.h>
///typedef HANDLE pthread_mutex_t;
#define popen _popen
#define pclose _pclose
#define pipe(pfds) _pipe(pfds, 4096, 0x8000)
#endif
#include "dap_common.h"
#include "dap_strfuncs.h"
#include "dap_string.h"
#include "dap_list.h"
#define LAST_ERROR_MAX 255
#define LOG_TAG "dap_common"
static char s_last_error[LAST_ERROR_MAX] = {0};
static enum dap_log_level dap_log_level = L_DEBUG;
static FILE *s_log_file = NULL;
static char log_tag_fmt_str[10];
static pthread_mutex_t s_list_logs_mutex = PTHREAD_MUTEX_INITIALIZER;
static uint32_t logh_total = 0; // log history size
static uint32_t logh_outindex = 0;
static uint8_t *log_buffer = NULL;
static uint8_t *temp_buffer = NULL;
static uint8_t *end_of_log_buffer = NULL;
static dap_log_str_t *log_history = NULL;
const char *log_level_tag[ 16 ] = {
" [DBG] [ ", // L_DEBUG = 0
" [INF] [ ", // L_INFO = 1,
" [ * ] [ ", // L_NOTICE = 2,
" [MSG] [ ", // L_MESSAGE = 3,
" [DAP] [ ", // L_DAP = 4,
" [WRN] [ ", // L_WARNING = 5,
" [ATT] [ ", // L_ATT = 6,
" [ERR] [ ", // L_ERROR = 7,
" [ ! ] [ ", // L_CRITICAL = 8,
" [---] [ ", // = 9
" [---] [ ", // = 10
" [---] [ ", // = 11
" [---] [ ", // = 12
" [---] [ ", // = 13
" [---] [ ", // = 14
" [---] [ ", // = 15
};
const char *ansi_seq_color[ 16 ] = {
"\x1b[0;37;40m", // L_DEBUG = 0
"\x1b[1;32;40m", // L_INFO = 2,
"\x1b[0;32;40m", // L_NOTICE = 1,
"\x1b[1;33;40m", // L_MESSAGE = 3,
"\x1b[0;36;40m", // L_DAP = 4,
"\x1b[1;35;40m", // L_WARNING = 5,
"\x1b[1;36;40m", // L_ATT = 6,
"\x1b[1;31;40m", // L_ERROR = 7,
"\x1b[1;37;41m", // L_CRITICAL = 8,
"", // = 9
"", // = 10
"", // = 11
"", // = 12
"", // = 13
"", // = 14
"", // = 15
};
#ifdef _WIN32
OSVERSIONINFO win32_osvi;
bool bUseANSIEscapeSequences = false;
HANDLE hWin32ConOut = INVALID_HANDLE_VALUE;
WORD log_level_colors[ 16 ] = {
7, // L_DEBUG
10, // L_INFO
2, // L_NOTICE
11, // L_MESSAGE
9, // L_DAP
13, // L_WARNING
14, // L_ATT
12, // L_ERROR
(12 << 4) + 15, // L_CRITICAL
7,
7,
7,
7,
7,
7,
7
};
#endif
uint32_t ansi_seq_color_len[ 16 ];
/**
* @brief set_log_level Sets the logging level
* @param[in] ll logging level
*/
void dap_log_level_set( enum dap_log_level ll ) {
dap_log_level = ll;
}
enum dap_log_level dap_log_level_get( void ) {
return dap_log_level ;
}
/**
* @brief dap_set_log_tag_width Sets the length of the label
* @param[in] width Length not more than 99
*/
void dap_set_log_tag_width(size_t width) {
if (width > 99) {
fprintf(stderr,"Can't set width %zd", width);
return;
}
// construct new log_tag_fmt_str
strcpy( log_tag_fmt_str, "[%" );
strcat( log_tag_fmt_str, dap_itoa((int)width) );
// strcat( log_tag_fmt_str, itoa((int)width) );
strcat( log_tag_fmt_str, "s]\t" );
}
/**
* @brief dap_common_init initialise
* @param[in] a_log_file
* @return
*/
int dap_common_init( const char *a_log_file )
{
srand( (unsigned int)time(NULL) );
// init default log tag 8 width
strcpy( log_tag_fmt_str, "[%8s]\t");
log_buffer = (uint8_t *)malloc( DAP_LOG_HISTORY_BUFFER_SIZE + 65536 );
if ( !log_buffer )
goto err;
temp_buffer = log_buffer + 65536;
end_of_log_buffer = log_buffer + DAP_LOG_HISTORY_BUFFER_SIZE;
log_history = (dap_log_str_t *)malloc( DAP_LOG_HISTORY_MAX_STRINGS * sizeof(dap_log_str_t) );
if ( !log_history )
goto err;
for ( uint32_t i = 0; i < DAP_LOG_HISTORY_MAX_STRINGS; ++ i ) {
log_history[ i ].t = 0;
log_history[ i ].str = log_buffer + DAP_LOG_HISTORY_STR_SIZE * i;
}
for ( uint32_t i = 0; i < 16; ++ i )
ansi_seq_color_len[ i ] = strlen( ansi_seq_color[i] );
#ifdef _WIN32
memset( &win32_osvi, 0, sizeof(OSVERSIONINFO) );
win32_osvi.dwOSVersionInfoSize = sizeof( OSVERSIONINFO );
GetVersionEx( (OSVERSIONINFO *)&win32_osvi );
bUseANSIEscapeSequences = (win32_osvi.dwMajorVersion >= 10);
//if ( !bUseANSIEscapeSequences )
hWin32ConOut = GetStdHandle( STD_OUTPUT_HANDLE );
#if 0
printf( "Windows version %u.%u platformID %u \n",
win32_osvi.dwMajorVersion,
win32_osvi.dwMinorVersion,
win32_osvi.dwPlatformId );
#endif
#endif
if ( !a_log_file )
return 0;
s_log_file = fopen( a_log_file , "a" );
if( s_log_file == NULL ) {
fprintf( stderr, "Can't open log file %s to append\n", a_log_file );
return -1;
}
return 0;
err:
printf( "Fatal Error: Out of memory!\n" );
dap_common_deinit( );
return -1;
}
/**
* @brief dap_common_deinit Deinitialise
*/
void dap_common_deinit( )
{
if ( s_log_file )
fclose( s_log_file );
if( log_history )
free( log_history );
if( log_buffer )
free( log_buffer );
}
void log_log( char *str, uint32_t len, time_t t )
{
pthread_mutex_lock( &s_list_logs_mutex );
while( len ) {
uint8_t *out = log_history[ logh_outindex ].str;
uint32_t ilen = len;
if ( out + len >= end_of_log_buffer )
ilen = end_of_log_buffer - out;
memcpy( out, str, ilen );
len -= ilen;
do {
log_history[ logh_outindex ].t = t;
if ( ilen >= DAP_LOG_HISTORY_STR_SIZE ) {
log_history[ logh_outindex ].len = DAP_LOG_HISTORY_STR_SIZE;
ilen -= DAP_LOG_HISTORY_STR_SIZE;
}
else {
log_history[ logh_outindex ].len = ilen;
ilen = 0;
}
++ logh_outindex;
logh_outindex &= DAP_LOG_HISTORY_M;
if ( logh_total < DAP_LOG_HISTORY_MAX_STRINGS )
++ logh_total;
} while( ilen );
}
pthread_mutex_unlock( &s_list_logs_mutex );
return;
}
uint32_t logh_since( time_t t )
{
uint32_t li = (logh_outindex - 1) & DAP_LOG_HISTORY_M;
uint32_t count = logh_total;
uint32_t fi = 0;
uint32_t si = 0;
if ( log_history[li].t < t ) // no new logs
return 0xFFFFFFFF;
if (logh_total >= DAP_LOG_HISTORY_MAX_STRINGS )
fi = logh_outindex;
if ( log_history[fi].t >= t ) // all logs is new
return fi;
do {
if ( log_history[li].t < t ) {
si = li;
break;
}
li = (li - 1) & DAP_LOG_HISTORY_M;
} while ( --count );
return (si + 1) & DAP_LOG_HISTORY_M;
}
/*
* Get logs from list
*/
char *dap_log_get_item( time_t a_start_time, int a_limit )
{
uint32_t l_count;
uint32_t si;
char *res, *out;
pthread_mutex_lock( &s_list_logs_mutex );
l_count = logh_total;
if ( l_count > (uint32_t)a_limit )
l_count = a_limit;
if ( !l_count ) {
pthread_mutex_unlock( &s_list_logs_mutex );
return NULL;
}
si = logh_since( a_start_time );
if ( si == 0xFFFFFFFF || log_history[ si ].t < a_start_time ) {// no new logs
pthread_mutex_unlock( &s_list_logs_mutex );
return NULL;
}
out = res = (char *)malloc( l_count * DAP_LOG_HISTORY_STR_SIZE + 1 );
if ( !res ) {
pthread_mutex_unlock( &s_list_logs_mutex );
return NULL;
}
do {
memcpy( out, log_history[ si ].str, log_history[ si ].len );
out += log_history[ si ].len;
si = (si + 1) & DAP_LOG_HISTORY_M;
if ( si == logh_outindex || log_history[ si ].t < a_start_time )
break;
} while ( --l_count );
*out = 0;
pthread_mutex_unlock( &s_list_logs_mutex );
return res;
}
#if 0
// save log to list
static void log_add_to_list(time_t a_t, const char *a_time_str, const char * a_log_tag, enum dap_log_level a_ll,
const char * a_format, va_list a_ap)
{
// pthread_mutex_lock(&s_list_logs_mutex);
// dap_string_t *l_string = dap_string_new("");
//
// dap_string_append_printf(l_string, "[%s]\t", a_time_str);
// l_string = dap_string_append(l_string, log_level_tag[a_ll] );
/**
if(a_ll == L_DEBUG) {
l_string = dap_string_append(l_string, "[DBG]\t");
} else if(a_ll == L_INFO) {
l_string = dap_string_append(l_string, "[INF]\t");
} else if(a_ll == L_NOTICE) {
l_string = dap_string_append(l_string, "[ * ]\t");
} else if(a_ll == L_WARNING) {
l_string = dap_string_append(l_string, "[WRN]\t");
} else if(a_ll == L_ERROR) {
l_string = dap_string_append(l_string, "[ERR]\t");
} else if(a_ll == L_CRITICAL) {
l_string = dap_string_append(l_string, "[!!!]\t");
}
**/
/**
if(a_log_tag != NULL) {
dap_string_append_printf(l_string, log_tag_fmt_str, a_log_tag);
}
dap_string_append_vprintf(l_string, a_format, a_ap);
dap_list_logs_item_t *l_item = DAP_NEW(dap_list_logs_item_t);
l_item->t = a_t;
l_item->str = dap_string_free(l_string, false);
s_list_logs = dap_list_append(s_list_logs, l_item);
// remove old items
unsigned int l_count = dap_list_length(s_list_logs);
if(l_count > s_max_items) {
// remove items from the beginning
for(unsigned int i = 0; i < l_count - s_max_items; i++) {
s_list_logs = dap_list_remove(s_list_logs, s_list_logs->data);
}
}
pthread_mutex_unlock(&s_list_logs_mutex);
**/
}
#endif
/**
* @brief _log_it Writes information to the log
* @param[in] log_tag Tag
* @param[in] ll Log level
* @param[in] format
*/
void _log_it( const char *log_tag, enum dap_log_level ll, const char *fmt,... )
{
static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
uint8_t *buf0 = temp_buffer;
uint32_t len,
time_offset,
tag_offset,
msg_offset;
if ( ll < dap_log_level || ll >= 16 || !log_tag )
return;
time_t t = time( NULL );
pthread_mutex_lock( &mutex );
memcpy( buf0, ansi_seq_color[ll], ansi_seq_color_len[ll] );
time_offset = ansi_seq_color_len[ll];
struct tm *tmptime = localtime( &t );
len = strftime( (char *)(buf0 + time_offset), 65536, "[%x-%X]", tmptime );
tag_offset = time_offset + len;
memcpy( buf0 + tag_offset, log_level_tag[ll], 8 );
memcpy( buf0 + tag_offset + 8, log_level_tag[ll], 8 );
msg_offset = tag_offset + 8;
while ( *log_tag )
buf0[ msg_offset ++ ] = *log_tag ++;
buf0[ msg_offset ++ ] = ']';
buf0[ msg_offset ++ ] = ' ';
// buf0[ msg_offset ++ ] = 9;
va_list va;
va_start( va, fmt );
len = vsprintf( (char * __restrict )(buf0 + msg_offset), fmt, va );
va_end( va );
len += msg_offset;
#ifdef DAP_OS_ANDROID
buf2[ len ] = 0;
__android_log_write( ANDROID_LOG_INFO, DAP_BRAND, buf0 + msg_offset );
#endif
buf0[ len++ ] = 10;
if ( s_log_file )
fwrite( buf0 + time_offset, len - time_offset, 1, s_log_file );
// buf0[ len++ ] = 0;
log_log( (char *)(buf0 + time_offset), len - time_offset, t );
#ifdef _WIN32
// if ( !bUseANSIEscapeSequences )
SetConsoleTextAttribute( hWin32ConOut, log_level_colors[ll] );
// printf( "%s", (char *)(buf0 + time_offset) );
#else
// memcpy( buf0, len,
// printf( "%s", (char *)buf0 );
fwrite( buf0, len, 1, stdout );
#endif
///stdout
// printf("\x1b[0m\n");
// "\x1b[0;37;40m", // L_DEBUG = 0
pthread_mutex_unlock( &mutex );
}
/**
* @brief log_error Error log
* @return
*/
const char *log_error()
{
return s_last_error;
}
#if 1
#define INT_DIGITS 19 /* enough for 64 bit integer */
/**
* @brief itoa The function converts an integer num to a string equivalent and places the result in a string
* @param[in] i number
* @return
*/
char *dap_itoa(int i)
{
/* Room for INT_DIGITS digits, - and '\0' */
static char buf[INT_DIGITS + 2];
char *p = buf + INT_DIGITS + 1; /* points to terminating '\0' */
if (i >= 0) {
do {
*--p = '0' + (i % 10);
i /= 10;
} while (i != 0);
return p;
}
else { /* i < 0 */
do {
*--p = '0' - (i % 10);
i /= 10;
} while (i != 0);
*--p = '-';
}
return p;
}
#endif
/**
* @brief time_to_rfc822 Convert time_t to string with RFC822 formatted date and time
* @param[out] out Output buffer
* @param[out] out_size_mac Maximum size of output buffer
* @param[in] t UNIX time
* @return Length of resulting string if ok or lesser than zero if not
*/
int dap_time_to_str_rfc822(char * out, size_t out_size_max, time_t t)
{
struct tm *tmp;
tmp = localtime( &t );
if ( tmp == NULL ) {
log_it( L_ERROR, "Can't convert data from unix fromat to structured one" );
return -2;
}
int ret;
#ifndef _WIN32
ret = strftime( out, out_size_max, "%a, %d %b %y %T %z", tmp );
#else
ret = strftime( out, out_size_max, "%a, %d %b %y %H:%M:%S", tmp );
#endif
if ( !ret ) {
log_it( L_ERROR, "Can't print formatted time in string" );
return -1;
}
return ret;
}
#define BREAK_LATENCY 1
static int breaker_set[2] = { -1, -1 };
static int initialized = 0;
static struct timespec break_latency = { 0, BREAK_LATENCY * 1000 * 1000 };
int get_select_breaker( )
{
if ( !initialized ) {
if ( pipe(breaker_set) < 0 )
return -1;
else
initialized = 1;
}
return breaker_set[0];
}
int send_select_break( )
{
if ( !initialized )
return -1;
char buffer[1];
#ifndef _WIN32
if ( write(breaker_set[1], "\0", 1) <= 0 )
#else
if ( _write(breaker_set[1], "\0", 1) <= 0 )
#endif
return -1;
#ifndef _WIN32
nanosleep( &break_latency, NULL );
#else
Sleep( BREAK_LATENCY );
#endif
#ifndef _WIN32
if ( read(breaker_set[0], buffer, 1) <= 0 || buffer[0] != '\0' )
#else
if ( _read(breaker_set[0], buffer, 1) <= 0 || buffer[0] != '\0' )
#endif
return -1;
return 0;
}
#ifdef ANDROID1
static u_long myNextRandom = 1;
double atof(const char *nptr)
{
return (strtod(nptr, NULL));
}
int rand(void)
{
return (int)((myNextRandom = (1103515245 * myNextRandom) + 12345) % ((u_long)RAND_MAX + 1));
}
void srand(u_int seed)
{
myNextRandom = seed;
}
#endif
#if 0
/**
* @brief exec_with_ret Executes a command with result return
* @param[in] a_cmd Command
* @return Result
*/
char * exec_with_ret(const char * a_cmd)
{
FILE * fp;
size_t buf_len = 0;
char buf[4096] = {0};
fp= popen(a_cmd, "r");
if (!fp) {
goto FIN;
}
memset(buf,0,sizeof(buf));
fgets(buf,sizeof(buf)-1,fp);
pclose(fp);
buf_len=strlen(buf);
if(buf[buf_len-1] =='\n')buf[buf_len-1] ='\0';
FIN:
return strdup(buf);
}
/**
* @brief exec_with_ret_multistring performs a command with a result return in the form of a multistring
* @param[in] a_cmd Coomand
* @return Return
*/
char * exec_with_ret_multistring(const char * a_cmd)
{
FILE * fp;
size_t buf_len = 0;
char buf[4096] = {0};
fp= popen(a_cmd, "r");
if (!fp) {
goto FIN;
}
memset(buf,0,sizeof(buf));
char retbuf[4096] = {0};
while(fgets(buf,sizeof(buf)-1,fp)) {
strcat(retbuf, buf);
}
pclose(fp);
buf_len=strlen(retbuf);
if(retbuf[buf_len-1] =='\n')retbuf[buf_len-1] ='\0';
FIN:
return strdup(retbuf);
}
#endif
static const char l_possible_chars[]="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789";
/**
* @brief random_string_fill Filling a string with random characters
* @param[out] str A pointer to a char array
* @param[in] length The length of the array or string
*/
void dap_random_string_fill(char *str, size_t length) {
for(size_t i = 0; i < length; i++)
str[i] = l_possible_chars[
rand() % (sizeof(l_possible_chars) - 1)];
}
/**
* @brief random_string_create Generates a random string
* @param[in] a_length lenght
* @return a pointer to an array
*/
char * dap_random_string_create_alloc(size_t a_length)
{
char * ret = DAP_NEW_SIZE(char, a_length+1);
size_t i;
for(i=0; i<a_length; ++i) {
int index = rand() % (sizeof(l_possible_chars)-1);
ret[i] = l_possible_chars[index];
}
return ret;
}
#if 0
#define MAX_PRINT_WIDTH 100
static void _printrepchar(char c, size_t count) {
assert(count < MAX_PRINT_WIDTH &&
"Too many characters");
static char buff[MAX_PRINT_WIDTH];
memset(buff, (int)c, count);
printf("%s\n", buff);
}
/**
* @brief The function displays a dump
* @param[in] data The data dump you want to display
* @param[in] size The size of the data whose dump you want to display
*
* The function displays a dump, for example an array, in hex format
*/
void dap_dump_hex(const void* data, size_t size) {
char ascii[17];
size_t i, j;
ascii[16] = '\0';
for (i = 0; i < size; ++i) {
printf("%02X ", ((const unsigned char*)data)[i]);
if (((const unsigned char*)data)[i] >= ' ' && ((const unsigned char*)data)[i] <= '~') {
ascii[i % 16] = ((const char*)data)[i];
} else {
ascii[i % 16] = '.';
}
if ((i+1) % 8 == 0 || i+1 == size) {
printf(" ");
if ((i+1) % 16 == 0) {
printf("| %s \n", ascii);
} else if (i+1 == size) {
ascii[(i+1) % 16] = '\0';
if ((i+1) % 16 <= 8) {
printf(" ");
}
for (j = (i+1) % 16; j < 16; ++j) {
printf(" ");
}
printf("| %s \n", ascii);
}
}
}
_printrepchar('-', 70);
}
void *memzero(void *a_buf, size_t n)
{
memset(a_buf,0,n);
return a_buf;
}
#endif
/**
* Convert binary data to binhex encoded data.
*
* out output buffer, must be twice the number of bytes to encode.
* len is the size of the data in the in[] buffer to encode.
* return the number of bytes encoded, or -1 on error.
*/
size_t dap_bin2hex(char *a_out, const void *a_in, size_t a_len)
{
size_t ct = a_len;
static char hex[] = "0123456789ABCDEF";
const uint8_t *l_in = (const uint8_t *)a_in;
if(!a_in || !a_out )
return 0;
// hexadecimal lookup table
while(ct-- > 0){
*a_out++ = hex[*l_in >> 4];
*a_out++ = hex[*l_in++ & 0x0F];
}
return a_len;
}
// !!!!!!!!!!!!!!!!!!!
/**
* Convert binhex encoded data to binary data
*
* len is the size of the data in the in[] buffer to decode, and must be even.
* out outputbuffer must be at least half of "len" in size.
* The buffers in[] and out[] can be the same to allow in-place decoding.
* return the number of bytes encoded, or 0 on error.
*/
size_t dap_hex2bin(uint8_t *a_out, const char *a_in, size_t a_len)
{
// '0'-'9' = 0x30-0x39
// 'a'-'f' = 0x61-0x66
// 'A'-'F' = 0x41-0x46
size_t ct = a_len;
if(!a_in || !a_out || (a_len & 1))
return 0;
while(ct > 0) {
char ch1 = ((*a_in >= 'a') ? (*a_in++ - 'a' + 10) : ((*a_in >= 'A') ? (*a_in++ - 'A' + 10) : (*a_in++ - '0'))) << 4;
char ch2 = ((*a_in >= 'a') ? (*a_in++ - 'a' + 10) : ((*a_in >= 'A') ? (*a_in++ - 'A' + 10) : (*a_in++ - '0'))); // ((*in >= 'A') ? (*in++ - 'A' + 10) : (*in++ - '0'));
*a_out++ =(uint8_t) ch1 + (uint8_t) ch2;
ct -= 2;
}
return a_len;
}
// !!!!!!!!!!!!!!!!!!!
/**
* Convert string to digit
*/
void dap_digit_from_string(const char *num_str, uint8_t *raw, size_t raw_len)
{
if(!num_str)
return;
uint64_t val;
if(!strncasecmp(num_str, "0x", 2)) {
val = strtoull(num_str + 2, NULL, 16);
}else {
val = strtoull(num_str, NULL, 10);
}
// for LITTLE_ENDIAN (Intel), do nothing, otherwise swap bytes
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
val = le64toh(val);
#endif
memset(raw, 0, raw_len);
memcpy(raw, &val, min(raw_len, sizeof(uint64_t)));
}
#if 0
/*!
* \brief Execute shell command silently
* \param a_cmd command line
* \return 0 if success, -1 otherwise
*/
int exec_silent(const char * a_cmd) {
PROCESS_INFORMATION p_info;
STARTUPINFOA s_info;
memzero(&s_info, sizeof(s_info));
memzero(&p_info, sizeof(p_info));
s_info.cb = sizeof(s_info);
char cmdline[512] = {'\0'};
strcat(cmdline, "C:\\Windows\\System32\\cmd.exe /c ");
strcat(cmdline, a_cmd);
if (CreateProcessA(NULL, cmdline, NULL, NULL, FALSE, 0x08000000, NULL, NULL, &s_info, &p_info)) {
WaitForSingleObject(p_info.hProcess, 0xffffffff);
CloseHandle(p_info.hProcess);
CloseHandle(p_info.hThread);
return 0;
}
else {
return -1;
}
}
#endif
/*
* Authors:
* Dmitriy A. Gearasimov <kahovski@gmail.com>
* DeM Labs Inc. https://demlabs.net
* DeM Labs Open source community https://gitlab.demlabs.net/cellframe
* Copyright (c) 2017-2019
* All rights reserved.
This file is part of DAP (Deus Applications Prototypes) the open source project
DAP (Deus Applicaions Prototypes) is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
DAP is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with any DAP based project. If not, see <http://www.gnu.org/licenses/>.
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h> /* 'nanosleep' */
#include <unistd.h> /* 'pipe', 'read', 'write' */
#include <string.h>
#include <stdarg.h>
#include <assert.h>
#include <stdint.h>
#ifdef DAP_OS_ANDROID
#include <android/log.h>
#endif
#ifndef _WIN32
#include <pthread.h>
#include <syslog.h>
#else // WIN32
#include <stdlib.h>
#include <windows.h>
#include <process.h>
#include <pthread.h>
#include "win32/dap_console_manager.h"
#define popen _popen
#define pclose _pclose
#define pipe(pfds) _pipe(pfds, 4096, 0x8000)
#endif
#include "dap_common.h"
#include "dap_strfuncs.h"
#include "dap_string.h"
#include "dap_list.h"
#include "dap_lut.h"
#define LAST_ERROR_MAX 255
#define LOG_TAG "dap_common"
static char s_last_error[LAST_ERROR_MAX] = {0};
static enum dap_log_level dap_log_level = L_DEBUG;
static FILE *s_log_file = NULL;
static char log_tag_fmt_str[10];
static pthread_mutex_t s_list_logs_mutex = PTHREAD_MUTEX_INITIALIZER;
static uint32_t logh_total = 0; // log history size
static uint32_t logh_outindex = 0;
static uint8_t *log_buffer = NULL;
static uint8_t *temp_buffer = NULL;
static uint8_t *end_of_log_buffer = NULL;
static dap_log_str_t *log_history = NULL;
static time_t g_start_time = 0;
const char *log_level_tag[ 16 ] = {
" [DBG] [ ", // L_DEBUG = 0
" [INF] [ ", // L_INFO = 1,
" [ * ] [ ", // L_NOTICE = 2,
" [MSG] [ ", // L_MESSAGE = 3,
" [DAP] [ ", // L_DAP = 4,
" [WRN] [ ", // L_WARNING = 5,
" [ATT] [ ", // L_ATT = 6,
" [ERR] [ ", // L_ERROR = 7,
" [ ! ] [ ", // L_CRITICAL = 8,
" [---] [ ", // = 9
" [---] [ ", // = 10
" [---] [ ", // = 11
" [---] [ ", // = 12
" [---] [ ", // = 13
" [---] [ ", // = 14
" [---] [ ", // = 15
};
const char *ansi_seq_color[ 16 ] = {
"\x1b[0;37;40m", // L_DEBUG = 0
"\x1b[1;32;40m", // L_INFO = 2,
"\x1b[0;32;40m", // L_NOTICE = 1,
"\x1b[1;33;40m", // L_MESSAGE = 3,
"\x1b[0;36;40m", // L_DAP = 4,
"\x1b[1;35;40m", // L_WARNING = 5,
"\x1b[1;36;40m", // L_ATT = 6,
"\x1b[1;31;40m", // L_ERROR = 7,
"\x1b[1;37;41m", // L_CRITICAL = 8,
"", // = 9
"", // = 10
"", // = 11
"", // = 12
"", // = 13
"", // = 14
"", // = 15
};
#ifdef _WIN32
OSVERSIONINFO win32_osvi;
bool bUseANSIEscapeSequences = false;
HANDLE hWin32ConOut = INVALID_HANDLE_VALUE;
WORD log_level_colors[ 16 ] = {
7, // L_DEBUG
10, // L_INFO
2, // L_NOTICE
11, // L_MESSAGE
9, // L_DAP
13, // L_WARNING
14, // L_ATT
12, // L_ERROR
(12 << 4) + 15, // L_CRITICAL
7,
7,
7,
7,
7,
7,
7
};
#endif
uint32_t ansi_seq_color_len[ 16 ];
/**
* @brief set_log_level Sets the logging level
* @param[in] ll logging level
*/
void dap_log_level_set( enum dap_log_level ll ) {
dap_log_level = ll;
}
enum dap_log_level dap_log_level_get( void ) {
return dap_log_level ;
}
/**
* @brief dap_set_log_tag_width Sets the length of the label
* @param[in] width Length not more than 99
*/
void dap_set_log_tag_width(size_t width) {
if (width > 99) {
dap_fprintf(stderr,"Can't set width %zd", width);
return;
}
// construct new log_tag_fmt_str
strcpy( log_tag_fmt_str, "[%" );
strcat( log_tag_fmt_str, dap_itoa((int)width) );
// strcat( log_tag_fmt_str, itoa((int)width) );
strcat( log_tag_fmt_str, "s]\t" );
}
/**
* @brief dap_common_init initialise
* @param[in] a_log_file
* @return
*/
int dap_common_init( const char *console_title, const char *a_log_file )
{
srand( (unsigned int)time(NULL) );
#ifdef _WIN32
SetupConsole( console_title, L"Lucida Console", 12, 20 );
#endif
g_start_time = time( NULL );
// init default log tag 8 width
strcpy( log_tag_fmt_str, "[%8s]\t");
log_buffer = (uint8_t *)malloc( DAP_LOG_HISTORY_BUFFER_SIZE + 65536 );
if ( !log_buffer )
goto err;
temp_buffer = log_buffer + 65536;
end_of_log_buffer = log_buffer + DAP_LOG_HISTORY_BUFFER_SIZE;
log_history = (dap_log_str_t *)malloc( DAP_LOG_HISTORY_MAX_STRINGS * sizeof(dap_log_str_t) );
if ( !log_history )
goto err;
for ( uint32_t i = 0; i < DAP_LOG_HISTORY_MAX_STRINGS; ++ i ) {
log_history[ i ].t = 0;
log_history[ i ].str = log_buffer + DAP_LOG_HISTORY_STR_SIZE * i;
}
for ( uint32_t i = 0; i < 16; ++ i )
ansi_seq_color_len[ i ] = strlen( ansi_seq_color[i] );
#ifdef _WIN32
memset( &win32_osvi, 0, sizeof(OSVERSIONINFO) );
win32_osvi.dwOSVersionInfoSize = sizeof( OSVERSIONINFO );
GetVersionEx( (OSVERSIONINFO *)&win32_osvi );
bUseANSIEscapeSequences = (win32_osvi.dwMajorVersion >= 10);
//if ( !bUseANSIEscapeSequences )
hWin32ConOut = GetStdHandle( STD_OUTPUT_HANDLE );
#if 0
printf( "Windows version %u.%u platformID %u \n",
win32_osvi.dwMajorVersion,
win32_osvi.dwMinorVersion,
win32_osvi.dwPlatformId );
#endif
#endif
if ( !a_log_file )
return 0;
s_log_file = fopen( a_log_file , "a" );
if( s_log_file == NULL ) {
dap_fprintf( stderr, "Can't open log file %s to append\n", a_log_file );
return -1;
}
return 0;
err:
printf( "Fatal Error: Out of memory!\n" );
dap_common_deinit( );
return -1;
}
/**
* @brief dap_common_deinit Deinitialise
*/
void dap_common_deinit( )
{
printf("dap_common_deinit( )\n");
if ( s_log_file )
fclose( s_log_file );
if( log_history )
free( log_history );
if( log_buffer )
free( log_buffer );
}
void log_log( char *str, uint32_t len, time_t t )
{
pthread_mutex_lock( &s_list_logs_mutex );
// printf("log_log with time = %llu\n", t );
while( len ) {
uint8_t *out = log_history[ logh_outindex ].str;
uint32_t ilen = len;
if ( out + len >= end_of_log_buffer )
ilen = end_of_log_buffer - out;
memcpy( out, str, ilen );
len -= ilen;
do {
log_history[ logh_outindex ].t = t;
if ( ilen >= DAP_LOG_HISTORY_STR_SIZE ) {
log_history[ logh_outindex ].len = DAP_LOG_HISTORY_STR_SIZE;
ilen -= DAP_LOG_HISTORY_STR_SIZE;
}
else {
log_history[ logh_outindex ].len = ilen;
ilen = 0;
}
++ logh_outindex;
logh_outindex &= DAP_LOG_HISTORY_M;
if ( logh_total < DAP_LOG_HISTORY_MAX_STRINGS )
++ logh_total;
} while( ilen );
}
pthread_mutex_unlock( &s_list_logs_mutex );
return;
}
uint32_t logh_since( time_t t )
{
uint32_t bi = 0;
uint32_t si = logh_total >> 1;
uint32_t li = (logh_outindex - 1) & DAP_LOG_HISTORY_M;
if ( log_history[li].t < t ) // no new logs
return 0xFFFFFFFF;
if (logh_total >= DAP_LOG_HISTORY_MAX_STRINGS )
bi = logh_outindex;
if ( log_history[bi].t >= t ) // all logs is new
return bi;
while( si ) {
if ( log_history[(bi + si) & DAP_LOG_HISTORY_M].t < t )
bi += si;
si >>= 1;
}
return (bi + si + 1) & DAP_LOG_HISTORY_M;
}
/**
uint32_t logh_since( time_t t )
{
uint32_t li = (logh_outindex - 1) & DAP_LOG_HISTORY_M;
uint32_t count = logh_total;
uint32_t fi = 0;
uint32_t si = 0;
if ( log_history[li].t < t ) // no new logs
return 0xFFFFFFFF;
if (logh_total >= DAP_LOG_HISTORY_MAX_STRINGS )
fi = logh_outindex;
if ( log_history[fi].t >= t ) // all logs is new
return fi;
do {
if ( log_history[li].t < t ) {
si = li;
break;
}
li = (li - 1) & DAP_LOG_HISTORY_M;
} while ( --count );
return (si + 1) & DAP_LOG_HISTORY_M;
}
**/
/*
* Get logs from list
*/
char *dap_log_get_item( time_t a_time, int a_limit )
{
uint32_t l_count;
uint32_t si;
char *res, *out;
time_t a_start_time;
a_start_time = time( NULL );
if ( a_time > a_start_time )
a_start_time = 0;
else
a_start_time -= a_time;
pthread_mutex_lock( &s_list_logs_mutex );
// printf("dap_log_get_item() a_start_time = %llu, a_limit = %u\n", a_start_time, a_limit );
l_count = logh_total;
if ( l_count > (uint32_t)a_limit )
l_count = a_limit;
if ( !l_count ) {
pthread_mutex_unlock( &s_list_logs_mutex );
return NULL;
}
si = logh_since( a_start_time );
if ( si == 0xFFFFFFFF || log_history[ si ].t < a_start_time ) {// no new logs
pthread_mutex_unlock( &s_list_logs_mutex );
return NULL;
}
out = res = (char *)malloc( l_count * DAP_LOG_HISTORY_STR_SIZE + 1 );
if ( !res ) {
pthread_mutex_unlock( &s_list_logs_mutex );
return NULL;
}
do {
memcpy( out, log_history[ si ].str, log_history[ si ].len );
out += log_history[ si ].len;
si = (si + 1) & DAP_LOG_HISTORY_M;
if ( si == logh_outindex || log_history[ si ].t < a_start_time )
break;
} while ( --l_count );
*out = 0;
pthread_mutex_unlock( &s_list_logs_mutex );
return res;
}
#if 0
// save log to list
static void log_add_to_list(time_t a_t, const char *a_time_str, const char * a_log_tag, enum dap_log_level a_ll,
const char * a_format, va_list a_ap)
{
// pthread_mutex_lock(&s_list_logs_mutex);
// dap_string_t *l_string = dap_string_new("");
//
// dap_string_append_printf(l_string, "[%s]\t", a_time_str);
// l_string = dap_string_append(l_string, log_level_tag[a_ll] );
/**
if(a_ll == L_DEBUG) {
l_string = dap_string_append(l_string, "[DBG]\t");
} else if(a_ll == L_INFO) {
l_string = dap_string_append(l_string, "[INF]\t");
} else if(a_ll == L_NOTICE) {
l_string = dap_string_append(l_string, "[ * ]\t");
} else if(a_ll == L_WARNING) {
l_string = dap_string_append(l_string, "[WRN]\t");
} else if(a_ll == L_ERROR) {
l_string = dap_string_append(l_string, "[ERR]\t");
} else if(a_ll == L_CRITICAL) {
l_string = dap_string_append(l_string, "[!!!]\t");
}
**/
/**
if(a_log_tag != NULL) {
dap_string_append_printf(l_string, log_tag_fmt_str, a_log_tag);
}
dap_string_append_vprintf(l_string, a_format, a_ap);
dap_list_logs_item_t *l_item = DAP_NEW(dap_list_logs_item_t);
l_item->t = a_t;
l_item->str = dap_string_free(l_string, false);
s_list_logs = dap_list_append(s_list_logs, l_item);
// remove old items
unsigned int l_count = dap_list_length(s_list_logs);
if(l_count > s_max_items) {
// remove items from the beginning
for(unsigned int i = 0; i < l_count - s_max_items; i++) {
s_list_logs = dap_list_remove(s_list_logs, s_list_logs->data);
}
}
pthread_mutex_unlock(&s_list_logs_mutex);
**/
}
#endif
/**
* @brief _log_it Writes information to the log
* @param[in] log_tag Tag
* @param[in] ll Log level
* @param[in] format
*/
void _log_it( const char *log_tag, enum dap_log_level ll, const char *fmt,... )
{
static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
uint8_t *buf0 = temp_buffer;
uint32_t len, tmp,
time_offset,
tag_offset,
msg_offset;
if ( ll < dap_log_level || ll >= 16 || !log_tag )
return;
// time_t t = time( NULL ) - g_start_time;
time_t t = time( NULL );
pthread_mutex_lock( &mutex );
memcpy( buf0, ansi_seq_color[ll], ansi_seq_color_len[ll] );
time_offset = ansi_seq_color_len[ll];
struct tm *tmptime = localtime( &t );
len = strftime( (char *)(buf0 + time_offset), 65536, "[%x-%X]", tmptime );
tag_offset = time_offset + len;
memcpy( buf0 + tag_offset, log_level_tag[ll], 8 );
memcpy( buf0 + tag_offset + 8, log_level_tag[ll], 8 );
msg_offset = tag_offset + 8;
while ( *log_tag )
buf0[ msg_offset ++ ] = *log_tag ++;
buf0[ msg_offset ++ ] = ']';
buf0[ msg_offset ++ ] = ' ';
// buf0[ msg_offset ++ ] = 9;
va_list va;
va_start( va, fmt );
len = dap_vsprintf( (char * __restrict )(buf0 + msg_offset), fmt, va );
va_end( va );
len += msg_offset;
#ifdef DAP_OS_ANDROID
buf2[ len ] = 0;
__android_log_write( ANDROID_LOG_INFO, DAP_BRAND, buf0 + msg_offset );
#endif
buf0[ len++ ] = 10;
if ( s_log_file )
fwrite( buf0 + time_offset, len - time_offset, 1, s_log_file );
// buf0[ len++ ] = 0;
log_log( (char *)(buf0 + time_offset), len - time_offset, t );
#ifdef _WIN32
// if ( !bUseANSIEscapeSequences )
SetConsoleTextAttribute( hWin32ConOut, log_level_colors[ll] );
// WriteConsole( hWin32ConOut, buf0 + time_offset, len - time_offset, &tmp, NULL );
// fwrite( buf0 + time_offset, len - time_offset, 1, stdout );
WriteFile( hWin32ConOut, buf0 + time_offset, len - time_offset, (LPDWORD)&tmp, NULL );
#else
fwrite( buf0, len, 1, stdout );
#endif
// printf("\x1b[0m\n");
pthread_mutex_unlock( &mutex );
}
/**
* @brief log_error Error log
* @return
*/
const char *log_error()
{
return s_last_error;
}
#if 1
#define INT_DIGITS 19 /* enough for 64 bit integer */
/**
* @brief itoa The function converts an integer num to a string equivalent and places the result in a string
* @param[in] i number
* @return
*/
char *dap_itoa(int i)
{
/* Room for INT_DIGITS digits, - and '\0' */
static char buf[INT_DIGITS + 2];
char *p = buf + INT_DIGITS + 1; /* points to terminating '\0' */
if (i >= 0) {
do {
*--p = '0' + (i % 10);
i /= 10;
} while (i != 0);
return p;
}
else { /* i < 0 */
do {
*--p = '0' - (i % 10);
i /= 10;
} while (i != 0);
*--p = '-';
}
return p;
}
#endif
/**
* @brief time_to_rfc822 Convert time_t to string with RFC822 formatted date and time
* @param[out] out Output buffer
* @param[out] out_size_mac Maximum size of output buffer
* @param[in] t UNIX time
* @return Length of resulting string if ok or lesser than zero if not
*/
int dap_time_to_str_rfc822(char * out, size_t out_size_max, time_t t)
{
struct tm *tmp;
tmp = localtime( &t );
if ( tmp == NULL ) {
log_it( L_ERROR, "Can't convert data from unix fromat to structured one" );
return -2;
}
int ret;
#ifndef _WIN32
ret = strftime( out, out_size_max, "%a, %d %b %y %T %z", tmp );
#else
ret = strftime( out, out_size_max, "%a, %d %b %y %H:%M:%S", tmp );
#endif
if ( !ret ) {
log_it( L_ERROR, "Can't print formatted time in string" );
return -1;
}
return ret;
}
#define BREAK_LATENCY 1
static int breaker_set[2] = { -1, -1 };
static int initialized = 0;
#ifndef _WIN32
static struct timespec break_latency = { 0, BREAK_LATENCY * 1000 * 1000 };
#endif
int get_select_breaker( )
{
if ( !initialized ) {
if ( pipe(breaker_set) < 0 )
return -1;
else
initialized = 1;
}
return breaker_set[0];
}
int send_select_break( )
{
if ( !initialized )
return -1;
char buffer[1];
#ifndef _WIN32
if ( write(breaker_set[1], "\0", 1) <= 0 )
#else
if ( _write(breaker_set[1], "\0", 1) <= 0 )
#endif
return -1;
#ifndef _WIN32
nanosleep( &break_latency, NULL );
#else
Sleep( BREAK_LATENCY );
#endif
#ifndef _WIN32
if ( read(breaker_set[0], buffer, 1) <= 0 || buffer[0] != '\0' )
#else
if ( _read(breaker_set[0], buffer, 1) <= 0 || buffer[0] != '\0' )
#endif
return -1;
return 0;
}
#ifdef ANDROID1
static u_long myNextRandom = 1;
double atof(const char *nptr)
{
return (strtod(nptr, NULL));
}
int rand(void)
{
return (int)((myNextRandom = (1103515245 * myNextRandom) + 12345) % ((u_long)RAND_MAX + 1));
}
void srand(u_int seed)
{
myNextRandom = seed;
}
#endif
#if 0
/**
* @brief exec_with_ret Executes a command with result return
* @param[in] a_cmd Command
* @return Result
*/
char * exec_with_ret(const char * a_cmd)
{
FILE * fp;
size_t buf_len = 0;
char buf[4096] = {0};
fp= popen(a_cmd, "r");
if (!fp) {
goto FIN;
}
memset(buf,0,sizeof(buf));
fgets(buf,sizeof(buf)-1,fp);
pclose(fp);
buf_len=strlen(buf);
if(buf[buf_len-1] =='\n')buf[buf_len-1] ='\0';
FIN:
return strdup(buf);
}
/**
* @brief exec_with_ret_multistring performs a command with a result return in the form of a multistring
* @param[in] a_cmd Coomand
* @return Return
*/
char * exec_with_ret_multistring(const char * a_cmd)
{
FILE * fp;
size_t buf_len = 0;
char buf[4096] = {0};
fp= popen(a_cmd, "r");
if (!fp) {
goto FIN;
}
memset(buf,0,sizeof(buf));
char retbuf[4096] = {0};
while(fgets(buf,sizeof(buf)-1,fp)) {
strcat(retbuf, buf);
}
pclose(fp);
buf_len=strlen(retbuf);
if(retbuf[buf_len-1] =='\n')retbuf[buf_len-1] ='\0';
FIN:
return strdup(retbuf);
}
#endif
static const char l_possible_chars[]="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789";
/**
* @brief random_string_fill Filling a string with random characters
* @param[out] str A pointer to a char array
* @param[in] length The length of the array or string
*/
void dap_random_string_fill(char *str, size_t length) {
for(size_t i = 0; i < length; i++)
str[i] = l_possible_chars[
rand() % (sizeof(l_possible_chars) - 1)];
}
/**
* @brief random_string_create Generates a random string
* @param[in] a_length lenght
* @return a pointer to an array
*/
char * dap_random_string_create_alloc(size_t a_length)
{
char * ret = DAP_NEW_SIZE(char, a_length+1);
size_t i;
for(i=0; i<a_length; ++i) {
int index = rand() % (sizeof(l_possible_chars)-1);
ret[i] = l_possible_chars[index];
}
return ret;
}
#if 0
#define MAX_PRINT_WIDTH 100
static void _printrepchar(char c, size_t count) {
assert(count < MAX_PRINT_WIDTH &&
"Too many characters");
static char buff[MAX_PRINT_WIDTH];
memset(buff, (int)c, count);
printf("%s\n", buff);
}
/**
* @brief The function displays a dump
* @param[in] data The data dump you want to display
* @param[in] size The size of the data whose dump you want to display
*
* The function displays a dump, for example an array, in hex format
*/
void dap_dump_hex(const void* data, size_t size) {
char ascii[17];
size_t i, j;
ascii[16] = '\0';
for (i = 0; i < size; ++i) {
printf("%02X ", ((const unsigned char*)data)[i]);
if (((const unsigned char*)data)[i] >= ' ' && ((const unsigned char*)data)[i] <= '~') {
ascii[i % 16] = ((const char*)data)[i];
} else {
ascii[i % 16] = '.';
}
if ((i+1) % 8 == 0 || i+1 == size) {
printf(" ");
if ((i+1) % 16 == 0) {
printf("| %s \n", ascii);
} else if (i+1 == size) {
ascii[(i+1) % 16] = '\0';
if ((i+1) % 16 <= 8) {
printf(" ");
}
for (j = (i+1) % 16; j < 16; ++j) {
printf(" ");
}
printf("| %s \n", ascii);
}
}
}
_printrepchar('-', 70);
}
void *memzero(void *a_buf, size_t n)
{
memset(a_buf,0,n);
return a_buf;
}
#endif
/**
* Convert binary data to binhex encoded data.
*
* out output buffer, must be twice the number of bytes to encode.
* len is the size of the data in the in[] buffer to encode.
* return the number of bytes encoded, or -1 on error.
*/
size_t dap_bin2hex(char *a_out, const void *a_in, size_t a_len)
{
size_t ct = a_len;
static char hex[] = "0123456789ABCDEF";
const uint8_t *l_in = (const uint8_t *)a_in;
if(!a_in || !a_out )
return 0;
// hexadecimal lookup table
while(ct-- > 0){
*a_out++ = hex[*l_in >> 4];
*a_out++ = hex[*l_in++ & 0x0F];
}
return a_len;
}
// !!!!!!!!!!!!!!!!!!!
/**
* Convert binhex encoded data to binary data
*
* len is the size of the data in the in[] buffer to decode, and must be even.
* out outputbuffer must be at least half of "len" in size.
* The buffers in[] and out[] can be the same to allow in-place decoding.
* return the number of bytes encoded, or 0 on error.
*/
size_t dap_hex2bin(uint8_t *a_out, const char *a_in, size_t a_len)
{
// '0'-'9' = 0x30-0x39
// 'a'-'f' = 0x61-0x66
// 'A'-'F' = 0x41-0x46
size_t ct = a_len;
if(!a_in || !a_out || (a_len & 1))
return 0;
while(ct > 0) {
char ch1 = ((*a_in >= 'a') ? (*a_in++ - 'a' + 10) : ((*a_in >= 'A') ? (*a_in++ - 'A' + 10) : (*a_in++ - '0'))) << 4;
char ch2 = ((*a_in >= 'a') ? (*a_in++ - 'a' + 10) : ((*a_in >= 'A') ? (*a_in++ - 'A' + 10) : (*a_in++ - '0'))); // ((*in >= 'A') ? (*in++ - 'A' + 10) : (*in++ - '0'));
*a_out++ =(uint8_t) ch1 + (uint8_t) ch2;
ct -= 2;
}
return a_len;
}
// !!!!!!!!!!!!!!!!!!!
/**
* Convert string to digit
*/
void dap_digit_from_string(const char *num_str, uint8_t *raw, size_t raw_len)
{
if(!num_str)
return;
uint64_t val;
if(!strncasecmp(num_str, "0x", 2)) {
val = strtoull(num_str + 2, NULL, 16);
}else {
val = strtoull(num_str, NULL, 10);
}
// for LITTLE_ENDIAN (Intel), do nothing, otherwise swap bytes
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
val = le64toh(val);
#endif
memset(raw, 0, raw_len);
memcpy(raw, &val, min(raw_len, sizeof(uint64_t)));
}
typedef union {
uint16_t addrs[4];
uint64_t addr;
} node_addr_t;
void dap_digit_from_string2(const char *num_str, uint8_t *raw, size_t raw_len)
{
if(!num_str)
return;
uint64_t val;
if(!strncasecmp(num_str, "0x", 2)) {
val = strtoull(num_str + 2, NULL, 16);
}else {
node_addr_t *nodeaddr = (node_addr_t *)&val;
sscanf( num_str, "%hx::%hx::%hx::%hx", &nodeaddr->addrs[3], &nodeaddr->addrs[2], &nodeaddr->addrs[1], &nodeaddr->addrs[0] );
}
// for LITTLE_ENDIAN (Intel), do nothing, otherwise swap bytes
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
val = le64toh(val);
#endif
memset(raw, 0, raw_len);
memcpy(raw, &val, min(raw_len, sizeof(uint64_t)));
}
#if 0
/*!
* \brief Execute shell command silently
* \param a_cmd command line
* \return 0 if success, -1 otherwise
*/
int exec_silent(const char * a_cmd) {
PROCESS_INFORMATION p_info;
STARTUPINFOA s_info;
memzero(&s_info, sizeof(s_info));
memzero(&p_info, sizeof(p_info));
s_info.cb = sizeof(s_info);
char cmdline[512] = {'\0'};
strcat(cmdline, "C:\\Windows\\System32\\cmd.exe /c ");
strcat(cmdline, a_cmd);
if (CreateProcessA(NULL, cmdline, NULL, NULL, FALSE, 0x08000000, NULL, NULL, &s_info, &p_info)) {
WaitForSingleObject(p_info.hProcess, 0xffffffff);
CloseHandle(p_info.hProcess);
CloseHandle(p_info.hThread);
return 0;
}
else {
return -1;
}
}
#endif
......@@ -2,7 +2,7 @@
* Authors:
* Aleksandr Lysikov <alexander.lysikov@demlabs.net>
* DeM Labs Inc. https://demlabs.net
* Kelvin Project https://github.com/kelvinblockchain
* Kelvin Project https://gitlab.demlabs.net/cellframe
* Copyright (c) 2017-2018
* All rights reserved.
......
DAP_ALIGNED(16) uint16_t htoa_lut256[ 256 ] = {
0x3030, 0x3130, 0x3230, 0x3330, 0x3430, 0x3530, 0x3630, 0x3730, 0x3830, 0x3930, 0x4130, 0x4230, 0x4330, 0x4430, 0x4530,
0x4630, 0x3031, 0x3131, 0x3231, 0x3331, 0x3431, 0x3531, 0x3631, 0x3731, 0x3831, 0x3931, 0x4131, 0x4231, 0x4331, 0x4431,
0x4531, 0x4631, 0x3032, 0x3132, 0x3232, 0x3332, 0x3432, 0x3532, 0x3632, 0x3732, 0x3832, 0x3932, 0x4132, 0x4232, 0x4332,
0x4432, 0x4532, 0x4632, 0x3033, 0x3133, 0x3233, 0x3333, 0x3433, 0x3533, 0x3633, 0x3733, 0x3833, 0x3933, 0x4133, 0x4233,
0x4333, 0x4433, 0x4533, 0x4633, 0x3034, 0x3134, 0x3234, 0x3334, 0x3434, 0x3534, 0x3634, 0x3734, 0x3834, 0x3934, 0x4134,
0x4234, 0x4334, 0x4434, 0x4534, 0x4634, 0x3035, 0x3135, 0x3235, 0x3335, 0x3435, 0x3535, 0x3635, 0x3735, 0x3835, 0x3935,
0x4135, 0x4235, 0x4335, 0x4435, 0x4535, 0x4635, 0x3036, 0x3136, 0x3236, 0x3336, 0x3436, 0x3536, 0x3636, 0x3736, 0x3836,
0x3936, 0x4136, 0x4236, 0x4336, 0x4436, 0x4536, 0x4636, 0x3037, 0x3137, 0x3237, 0x3337, 0x3437, 0x3537, 0x3637, 0x3737,
0x3837, 0x3937, 0x4137, 0x4237, 0x4337, 0x4437, 0x4537, 0x4637, 0x3038, 0x3138, 0x3238, 0x3338, 0x3438, 0x3538, 0x3638,
0x3738, 0x3838, 0x3938, 0x4138, 0x4238, 0x4338, 0x4438, 0x4538, 0x4638, 0x3039, 0x3139, 0x3239, 0x3339, 0x3439, 0x3539,
0x3639, 0x3739, 0x3839, 0x3939, 0x4139, 0x4239, 0x4339, 0x4439, 0x4539, 0x4639, 0x3041, 0x3141, 0x3241, 0x3341, 0x3441,
0x3541, 0x3641, 0x3741, 0x3841, 0x3941, 0x4141, 0x4241, 0x4341, 0x4441, 0x4541, 0x4641, 0x3042, 0x3142, 0x3242, 0x3342,
0x3442, 0x3542, 0x3642, 0x3742, 0x3842, 0x3942, 0x4142, 0x4242, 0x4342, 0x4442, 0x4542, 0x4642, 0x3043, 0x3143, 0x3243,
0x3343, 0x3443, 0x3543, 0x3643, 0x3743, 0x3843, 0x3943, 0x4143, 0x4243, 0x4343, 0x4443, 0x4543, 0x4643, 0x3044, 0x3144,
0x3244, 0x3344, 0x3444, 0x3544, 0x3644, 0x3744, 0x3844, 0x3944, 0x4144, 0x4244, 0x4344, 0x4444, 0x4544, 0x4644, 0x3045,
0x3145, 0x3245, 0x3345, 0x3445, 0x3545, 0x3645, 0x3745, 0x3845, 0x3945, 0x4145, 0x4245, 0x4345, 0x4445, 0x4545, 0x4645,
0x3046, 0x3146, 0x3246, 0x3346, 0x3446, 0x3546, 0x3646, 0x3746, 0x3846, 0x3946, 0x4146, 0x4246, 0x4346, 0x4446, 0x4546,
0x4646
};
......@@ -2,7 +2,7 @@
* Authors:
* Dmitriy A. Gearasimov <gerasimov.dmitriy@demlabs.net>
* DeM Labs Inc. https://demlabs.net
* Kelvin Project https://github.com/kelvinblockchain
* Kelvin Project https://gitlab.demlabs.net/cellframe
* Copyright (c) 2017-2018
* All rights reserved.
......
/* DAP String Functions */
#ifdef _WIN32
#define _CRT_SECURE_NO_WARNINGS
//#define _CRT_SECURE_NO_WARNINGS
#include <windows.h>
#endif
#include <stddef.h>
......@@ -14,7 +14,7 @@
#include "dap_strfuncs.h"
#ifdef _WIN32
char *strndup(char *str, int len) {
static inline char *strndup(char *str, int len) {
char *buf = (char*)malloc(len + 1);
memcpy(buf, str, len);
buf[len] = 0;
......@@ -22,6 +22,22 @@ char *strndup(char *str, int len) {
}
#endif
#ifdef _WIN32
char *strptime( char *buff, const char *fmt, struct tm *tm )
{
uint32_t len = strlen( buff );
dap_sscanf( buff,"%u.%u.%u_%u.%u.%u",&tm->tm_year, &tm->tm_mon, &tm->tm_mday, &tm->tm_hour, &tm->tm_min, &tm->tm_sec );
tm->tm_year += 2000;
return buff + len;
}
#endif
/**
* dap_strlen:
* @a_str: (nullable): the string
......@@ -113,7 +129,7 @@ char* dap_strdup(const char *a_str)
char* dap_strdup_vprintf(const char *a_format, va_list a_args)
{
char *l_string = NULL;
int len = vasprintf(&l_string, a_format, a_args);
int len = dap_vasprintf(&l_string, a_format, a_args);
if(len < 0)
l_string = NULL;
return l_string;
......
......@@ -833,7 +833,7 @@ void dap_string_append_vprintf(dap_string_t *string, const char *format, va_list
dap_return_if_fail(string != NULL);
dap_return_if_fail(format != NULL);
len = vasprintf(&buf, format, args);
len = dap_vasprintf(&buf, format, args);
if(len >= 0) {
dap_string_maybe_expand(string, len);
......
......@@ -2,7 +2,7 @@
* Authors:
* Anton Isaikin <anton.isaikin@demlabs.net>
* DeM Labs Inc. https://demlabs.net
* DeM Labs Open source community https://github.com/demlabsinc
* DeM Labs Open source community https://gitlab.demlabs.net/cellframe
* Copyright (c) 2017-2019
* All rights reserved.
......
/* rpmalloc.c - Memory allocator - Public Domain - 2016 Mattias Jansson
*
* This library provides a cross-platform lock free thread caching malloc implementation in C11.
* The latest source code is always available at
*
* https://github.com/mjansson/rpmalloc
*
* This library is put in the public domain; you can redistribute it and/or modify it without any restrictions.
*
*/
#include "rpmalloc.h"
/// Build time configurable limits
#ifndef HEAP_ARRAY_SIZE
//! Size of heap hashmap
#define HEAP_ARRAY_SIZE 47
#endif
#ifndef ENABLE_THREAD_CACHE
//! Enable per-thread cache
#define ENABLE_THREAD_CACHE 1
#endif
#ifndef ENABLE_GLOBAL_CACHE
//! Enable global cache shared between all threads, requires thread cache
#define ENABLE_GLOBAL_CACHE 1
#endif
#ifndef ENABLE_VALIDATE_ARGS
//! Enable validation of args to public entry points
#define ENABLE_VALIDATE_ARGS 0
#endif
#ifndef ENABLE_STATISTICS
//! Enable statistics collection
#define ENABLE_STATISTICS 0
#endif
#ifndef ENABLE_ASSERTS
//! Enable asserts
#define ENABLE_ASSERTS 0
#endif
#ifndef ENABLE_OVERRIDE
//! Override standard library malloc/free and new/delete entry points
#define ENABLE_OVERRIDE 0
#endif
#ifndef ENABLE_PRELOAD
//! Support preloading
#define ENABLE_PRELOAD 0
#endif
#ifndef DISABLE_UNMAP
//! Disable unmapping memory pages
#define DISABLE_UNMAP 0
#endif
#ifndef DEFAULT_SPAN_MAP_COUNT
//! Default number of spans to map in call to map more virtual memory (default values yield 4MiB here)
#define DEFAULT_SPAN_MAP_COUNT 64
#endif
#if ENABLE_THREAD_CACHE
#ifndef ENABLE_UNLIMITED_CACHE
//! Unlimited thread and global cache
#define ENABLE_UNLIMITED_CACHE 0
#endif
#ifndef ENABLE_UNLIMITED_THREAD_CACHE
//! Unlimited cache disables any thread cache limitations
#define ENABLE_UNLIMITED_THREAD_CACHE ENABLE_UNLIMITED_CACHE
#endif
#if !ENABLE_UNLIMITED_THREAD_CACHE
#ifndef THREAD_CACHE_MULTIPLIER
//! Multiplier for thread cache (cache limit will be span release count multiplied by this value)
#define THREAD_CACHE_MULTIPLIER 16
#endif
#ifndef ENABLE_ADAPTIVE_THREAD_CACHE
//! Enable adaptive size of per-thread cache (still bounded by THREAD_CACHE_MULTIPLIER hard limit)
#define ENABLE_ADAPTIVE_THREAD_CACHE 0
#endif
#endif
#endif
#if ENABLE_GLOBAL_CACHE && ENABLE_THREAD_CACHE
#ifndef ENABLE_UNLIMITED_GLOBAL_CACHE
//! Unlimited cache disables any global cache limitations
#define ENABLE_UNLIMITED_GLOBAL_CACHE ENABLE_UNLIMITED_CACHE
#endif
#if !ENABLE_UNLIMITED_GLOBAL_CACHE
//! Multiplier for global cache (cache limit will be span release count multiplied by this value)
#define GLOBAL_CACHE_MULTIPLIER (THREAD_CACHE_MULTIPLIER * 6)
#endif
#else
# undef ENABLE_GLOBAL_CACHE
# define ENABLE_GLOBAL_CACHE 0
#endif
#if !ENABLE_THREAD_CACHE || ENABLE_UNLIMITED_THREAD_CACHE
# undef ENABLE_ADAPTIVE_THREAD_CACHE
# define ENABLE_ADAPTIVE_THREAD_CACHE 0
#endif
#if DISABLE_UNMAP && !ENABLE_GLOBAL_CACHE
# error Must use global cache if unmap is disabled
#endif
#if defined( _WIN32 ) || defined( __WIN32__ ) || defined( _WIN64 )
# define PLATFORM_WINDOWS 1
# define PLATFORM_POSIX 0
#else
# define PLATFORM_WINDOWS 0
# define PLATFORM_POSIX 1
#endif
/// Platform and arch specifics
#if defined(_MSC_VER) && !defined(__clang__)
# define FORCEINLINE inline __forceinline
# define _Static_assert static_assert
#else
# define FORCEINLINE inline __attribute__((__always_inline__))
#endif
#if PLATFORM_WINDOWS
# define WIN32_LEAN_AND_MEAN
# include <windows.h>
# if ENABLE_VALIDATE_ARGS
# include <Intsafe.h>
# endif
#else
# include <unistd.h>
# include <stdio.h>
# include <stdlib.h>
# if defined(__APPLE__)
# include <mach/mach_vm.h>
# include <pthread.h>
# endif
# if defined(__HAIKU__)
# include <OS.h>
# include <pthread.h>
# endif
#endif
#include <stdint.h>
#include <string.h>
#if ENABLE_ASSERTS
# undef NDEBUG
# if defined(_MSC_VER) && !defined(_DEBUG)
# define _DEBUG
# endif
# include <assert.h>
#else
# undef assert
# define assert(x) do {} while(0)
#endif
#if ENABLE_STATISTICS
# include <stdio.h>
#endif
/// Atomic access abstraction
#if defined(_MSC_VER) && !defined(__clang__)
typedef volatile long atomic32_t;
typedef volatile long long atomic64_t;
typedef volatile void* atomicptr_t;
#define atomic_thread_fence_acquire()
#define atomic_thread_fence_release()
static FORCEINLINE int32_t atomic_load32(atomic32_t* src) { return *src; }
static FORCEINLINE void atomic_store32(atomic32_t* dst, int32_t val) { *dst = val; }
static FORCEINLINE int32_t atomic_incr32(atomic32_t* val) { return (int32_t)_InterlockedExchangeAdd(val, 1) + 1; }
static FORCEINLINE int32_t atomic_add32(atomic32_t* val, int32_t add) { return (int32_t)_InterlockedExchangeAdd(val, add) + add; }
static FORCEINLINE void* atomic_load_ptr(atomicptr_t* src) { return (void*)*src; }
static FORCEINLINE void atomic_store_ptr(atomicptr_t* dst, void* val) { *dst = val; }
# if defined(__LLP64__) || defined(__LP64__) || defined(_WIN64)
static FORCEINLINE int atomic_cas_ptr(atomicptr_t* dst, void* val, void* ref) { return (_InterlockedCompareExchange64((volatile long long*)dst, (long long)val, (long long)ref) == (long long)ref) ? 1 : 0; }
#else
static FORCEINLINE int atomic_cas_ptr(atomicptr_t* dst, void* val, void* ref) { return (_InterlockedCompareExchange((volatile long*)dst, (long)val, (long)ref) == (long)ref) ? 1 : 0; }
#endif
#define EXPECTED(x) (x)
#define UNEXPECTED(x) (x)
#else
#include <stdatomic.h>
typedef volatile _Atomic(int32_t) atomic32_t;
typedef volatile _Atomic(int64_t) atomic64_t;
typedef volatile _Atomic(void*) atomicptr_t;
#define atomic_thread_fence_acquire() atomic_thread_fence(memory_order_acquire)
#define atomic_thread_fence_release() atomic_thread_fence(memory_order_release)
static FORCEINLINE int32_t atomic_load32(atomic32_t* src) { return atomic_load_explicit(src, memory_order_relaxed); }
static FORCEINLINE void atomic_store32(atomic32_t* dst, int32_t val) { atomic_store_explicit(dst, val, memory_order_relaxed); }
static FORCEINLINE int32_t atomic_incr32(atomic32_t* val) { return atomic_fetch_add_explicit(val, 1, memory_order_relaxed) + 1; }
static FORCEINLINE int32_t atomic_add32(atomic32_t* val, int32_t add) { return atomic_fetch_add_explicit(val, add, memory_order_relaxed) + add; }
static FORCEINLINE void* atomic_load_ptr(atomicptr_t* src) { return atomic_load_explicit(src, memory_order_relaxed); }
static FORCEINLINE void atomic_store_ptr(atomicptr_t* dst, void* val) { atomic_store_explicit(dst, val, memory_order_relaxed); }
static FORCEINLINE int atomic_cas_ptr(atomicptr_t* dst, void* val, void* ref) { return atomic_compare_exchange_weak_explicit(dst, &ref, val, memory_order_release, memory_order_acquire); }
#define EXPECTED(x) __builtin_expect((x), 1)
#define UNEXPECTED(x) __builtin_expect((x), 0)
#endif
/// Preconfigured limits and sizes
//! Granularity of a small allocation block
#define SMALL_GRANULARITY 16
//! Small granularity shift count
#define SMALL_GRANULARITY_SHIFT 4
//! Number of small block size classes
#define SMALL_CLASS_COUNT 65
//! Maximum size of a small block
#define SMALL_SIZE_LIMIT (SMALL_GRANULARITY * (SMALL_CLASS_COUNT - 1))
//! Granularity of a medium allocation block
#define MEDIUM_GRANULARITY 512
//! Medium granularity shift count
#define MEDIUM_GRANULARITY_SHIFT 9
//! Number of medium block size classes
#define MEDIUM_CLASS_COUNT 61
//! Total number of small + medium size classes
#define SIZE_CLASS_COUNT (SMALL_CLASS_COUNT + MEDIUM_CLASS_COUNT)
//! Number of large block size classes
#define LARGE_CLASS_COUNT 32
//! Maximum size of a medium block
#define MEDIUM_SIZE_LIMIT (SMALL_SIZE_LIMIT + (MEDIUM_GRANULARITY * MEDIUM_CLASS_COUNT))
//! Maximum size of a large block
#define LARGE_SIZE_LIMIT ((LARGE_CLASS_COUNT * _memory_span_size) - SPAN_HEADER_SIZE)
//! Size of a span header (must be a multiple of SMALL_GRANULARITY)
#define SPAN_HEADER_SIZE 96
#if ENABLE_VALIDATE_ARGS
//! Maximum allocation size to avoid integer overflow
#undef MAX_ALLOC_SIZE
#define MAX_ALLOC_SIZE (((size_t)-1) - _memory_span_size)
#endif
#define pointer_offset(ptr, ofs) (void*)((char*)(ptr) + (ptrdiff_t)(ofs))
#define pointer_diff(first, second) (ptrdiff_t)((const char*)(first) - (const char*)(second))
#define INVALID_POINTER ((void*)((uintptr_t)-1))
/// Data types
//! A memory heap, per thread
typedef struct heap_t heap_t;
//! Heap spans per size class
typedef struct heap_class_t heap_class_t;
//! Span of memory pages
typedef struct span_t span_t;
//! Span list
typedef struct span_list_t span_list_t;
//! Span active data
typedef struct span_active_t span_active_t;
//! Size class definition
typedef struct size_class_t size_class_t;
//! Global cache
typedef struct global_cache_t global_cache_t;
//! Flag indicating span is the first (master) span of a split superspan
#define SPAN_FLAG_MASTER 1U
//! Flag indicating span is a secondary (sub) span of a split superspan
#define SPAN_FLAG_SUBSPAN 2U
//! Flag indicating span has blocks with increased alignment
#define SPAN_FLAG_ALIGNED_BLOCKS 4U
#if ENABLE_ADAPTIVE_THREAD_CACHE || ENABLE_STATISTICS
struct span_use_t {
//! Current number of spans used (actually used, not in cache)
uint32_t current;
//! High water mark of spans used
uint32_t high;
#if ENABLE_STATISTICS
//! Number of spans transitioned to global cache
uint32_t spans_to_global;
//! Number of spans transitioned from global cache
uint32_t spans_from_global;
//! Number of spans transitioned to thread cache
uint32_t spans_to_cache;
//! Number of spans transitioned from thread cache
uint32_t spans_from_cache;
//! Number of spans transitioned to reserved state
uint32_t spans_to_reserved;
//! Number of spans transitioned from reserved state
uint32_t spans_from_reserved;
//! Number of raw memory map calls
uint32_t spans_map_calls;
#endif
};
typedef struct span_use_t span_use_t;
#endif
#if ENABLE_STATISTICS
struct size_class_use_t {
//! Current number of allocations
atomic32_t alloc_current;
//! Peak number of allocations
int32_t alloc_peak;
//! Total number of allocations
int32_t alloc_total;
//! Total number of frees
atomic32_t free_total;
//! Number of spans transitioned to cache
uint32_t spans_to_cache;
//! Number of spans transitioned from cache
uint32_t spans_from_cache;
//! Number of spans transitioned from reserved state
uint32_t spans_from_reserved;
//! Number of spans mapped
uint32_t spans_map_calls;
};
typedef struct size_class_use_t size_class_use_t;
#endif
typedef enum span_state_t {
SPAN_STATE_ACTIVE = 0,
SPAN_STATE_PARTIAL,
SPAN_STATE_FULL
} span_state_t;
//A span can either represent a single span of memory pages with size declared by span_map_count configuration variable,
//or a set of spans in a continuous region, a super span. Any reference to the term "span" usually refers to both a single
//span or a super span. A super span can further be divided into multiple spans (or this, super spans), where the first
//(super)span is the master and subsequent (super)spans are subspans. The master span keeps track of how many subspans
//that are still alive and mapped in virtual memory, and once all subspans and master have been unmapped the entire
//superspan region is released and unmapped (on Windows for example, the entire superspan range has to be released
//in the same call to release the virtual memory range, but individual subranges can be decommitted individually
//to reduce physical memory use).
struct span_t {
//! Free list
void* free_list;
//! State
uint32_t state;
//! Used count when not active (not including deferred free list)
uint32_t used_count;
//! Block count
uint32_t block_count;
//! Size class
uint32_t size_class;
//! Index of last block initialized in free list
uint32_t free_list_limit;
//! Span list size when part of a cache list, or size of deferred free list when partial/full
uint32_t list_size;
//! Deferred free list
atomicptr_t free_list_deferred;
//! Size of a block
uint32_t block_size;
//! Flags and counters
uint32_t flags;
//! Number of spans
uint32_t span_count;
//! Total span counter for master spans, distance for subspans
uint32_t total_spans_or_distance;
//! Remaining span counter, for master spans
atomic32_t remaining_spans;
//! Alignment offset
uint32_t align_offset;
//! Owning heap
heap_t* heap;
//! Next span
span_t* next;
//! Previous span
span_t* prev;
};
_Static_assert(sizeof(span_t) <= SPAN_HEADER_SIZE, "span size mismatch");
struct heap_class_t {
//! Free list of active span
void* free_list;
//! Double linked list of partially used spans with free blocks for each size class.
// Current active span is at head of list. Previous span pointer in head points to tail span of list.
span_t* partial_span;
};
struct heap_t {
//! Active and semi-used span data per size class
heap_class_t span_class[SIZE_CLASS_COUNT];
#if ENABLE_THREAD_CACHE
//! List of free spans (single linked list)
span_t* span_cache[LARGE_CLASS_COUNT];
//! List of deferred free spans of class 0 (single linked list)
atomicptr_t span_cache_deferred;
#endif
#if ENABLE_ADAPTIVE_THREAD_CACHE || ENABLE_STATISTICS
//! Current and high water mark of spans used per span count
span_use_t span_use[LARGE_CLASS_COUNT];
#endif
//! Mapped but unused spans
span_t* span_reserve;
//! Master span for mapped but unused spans
span_t* span_reserve_master;
//! Number of mapped but unused spans
size_t spans_reserved;
//! Next heap in id list
heap_t* next_heap;
//! Next heap in orphan list
heap_t* next_orphan;
//! Memory pages alignment offset
size_t align_offset;
//! Heap ID
int32_t id;
#if ENABLE_STATISTICS
//! Number of bytes transitioned thread -> global
size_t thread_to_global;
//! Number of bytes transitioned global -> thread
size_t global_to_thread;
//! Allocation stats per size class
size_class_use_t size_class_use[SIZE_CLASS_COUNT + 1];
#endif
};
struct size_class_t {
//! Size of blocks in this class
uint32_t block_size;
//! Number of blocks in each chunk
uint16_t block_count;
//! Class index this class is merged with
uint16_t class_idx;
};
_Static_assert(sizeof(size_class_t) == 8, "Size class size mismatch");
struct global_cache_t {
//! Cache list pointer
atomicptr_t cache;
//! Cache size
atomic32_t size;
//! ABA counter
atomic32_t counter;
};
/// Global data
//! Initialized flag
static int _rpmalloc_initialized;
//! Configuration
static rpmalloc_config_t _memory_config;
//! Memory page size
static size_t _memory_page_size;
//! Shift to divide by page size
static size_t _memory_page_size_shift;
//! Granularity at which memory pages are mapped by OS
static size_t _memory_map_granularity;
#if RPMALLOC_CONFIGURABLE
//! Size of a span of memory pages
static size_t _memory_span_size;
//! Shift to divide by span size
static size_t _memory_span_size_shift;
//! Mask to get to start of a memory span
static uintptr_t _memory_span_mask;
#else
//! Hardwired span size (64KiB)
#define _memory_span_size (64 * 1024)
#define _memory_span_size_shift 16
#define _memory_span_mask (~((uintptr_t)(_memory_span_size - 1)))
#endif
//! Number of spans to map in each map call
static size_t _memory_span_map_count;
//! Number of spans to release from thread cache to global cache (single spans)
static size_t _memory_span_release_count;
//! Number of spans to release from thread cache to global cache (large multiple spans)
static size_t _memory_span_release_count_large;
//! Global size classes
static size_class_t _memory_size_class[SIZE_CLASS_COUNT];
//! Run-time size limit of medium blocks
static size_t _memory_medium_size_limit;
//! Heap ID counter
static atomic32_t _memory_heap_id;
//! Huge page support
static int _memory_huge_pages;
#if ENABLE_GLOBAL_CACHE
//! Global span cache
static global_cache_t _memory_span_cache[LARGE_CLASS_COUNT];
#endif
//! All heaps
static atomicptr_t _memory_heaps[HEAP_ARRAY_SIZE];
//! Orphaned heaps
static atomicptr_t _memory_orphan_heaps;
//! Running orphan counter to avoid ABA issues in linked list
static atomic32_t _memory_orphan_counter;
#if ENABLE_STATISTICS
//! Active heap count
static atomic32_t _memory_active_heaps;
//! Number of currently mapped memory pages
static atomic32_t _mapped_pages;
//! Peak number of concurrently mapped memory pages
static int32_t _mapped_pages_peak;
//! Number of currently unused spans
static atomic32_t _reserved_spans;
//! Running counter of total number of mapped memory pages since start
static atomic32_t _mapped_total;
//! Running counter of total number of unmapped memory pages since start
static atomic32_t _unmapped_total;
//! Number of currently mapped memory pages in OS calls
static atomic32_t _mapped_pages_os;
//! Number of currently allocated pages in huge allocations
static atomic32_t _huge_pages_current;
//! Peak number of currently allocated pages in huge allocations
static int32_t _huge_pages_peak;
#endif
//! Current thread heap
#if (defined(__APPLE__) || defined(__HAIKU__)) && ENABLE_PRELOAD
static pthread_key_t _memory_thread_heap;
#else
# ifdef _MSC_VER
# define _Thread_local __declspec(thread)
# define TLS_MODEL
# else
# define TLS_MODEL __attribute__((tls_model("initial-exec")))
# if !defined(__clang__) && defined(__GNUC__)
# define _Thread_local __thread
# endif
# endif
static _Thread_local heap_t* _memory_thread_heap TLS_MODEL;
#endif
static inline heap_t*
get_thread_heap_raw(void) {
#if (defined(__APPLE__) || defined(__HAIKU__)) && ENABLE_PRELOAD
return pthread_getspecific(_memory_thread_heap);
#else
return _memory_thread_heap;
#endif
}
//! Get the current thread heap
static inline heap_t*
get_thread_heap(void) {
heap_t* heap = get_thread_heap_raw();
#if ENABLE_PRELOAD
if (EXPECTED(heap != 0))
return heap;
rpmalloc_initialize();
return get_thread_heap_raw();
#else
return heap;
#endif
}
//! Set the current thread heap
static void
set_thread_heap(heap_t* heap) {
#if (defined(__APPLE__) || defined(__HAIKU__)) && ENABLE_PRELOAD
pthread_setspecific(_memory_thread_heap, heap);
#else
_memory_thread_heap = heap;
#endif
}
//! Default implementation to map more virtual memory
static void*
_memory_map_os(size_t size, size_t* offset);
//! Default implementation to unmap virtual memory
static void
_memory_unmap_os(void* address, size_t size, size_t offset, size_t release);
//! Lookup a memory heap from heap ID
static heap_t*
_memory_heap_lookup(int32_t id) {
uint32_t list_idx = id % HEAP_ARRAY_SIZE;
heap_t* heap = atomic_load_ptr(&_memory_heaps[list_idx]);
while (heap && (heap->id != id))
heap = heap->next_heap;
return heap;
}
#if ENABLE_STATISTICS
# define _memory_statistics_inc(counter, value) counter += value
# define _memory_statistics_add(atomic_counter, value) atomic_add32(atomic_counter, (int32_t)(value))
# define _memory_statistics_add_peak(atomic_counter, value, peak) do { int32_t _cur_count = atomic_add32(atomic_counter, (int32_t)(value)); if (_cur_count > (peak)) peak = _cur_count; } while (0)
# define _memory_statistics_sub(atomic_counter, value) atomic_add32(atomic_counter, -(int32_t)(value))
# define _memory_statistics_inc_alloc(heap, class_idx) do { \
int32_t alloc_current = atomic_incr32(&heap->size_class_use[class_idx].alloc_current); \
if (alloc_current > heap->size_class_use[class_idx].alloc_peak) \
heap->size_class_use[class_idx].alloc_peak = alloc_current; \
heap->size_class_use[class_idx].alloc_total++; \
} while(0)
# define _memory_statistics_inc_free(heap, class_idx) do { \
atomic_add32(&heap->size_class_use[class_idx].alloc_current, -1); \
atomic_incr32(&heap->size_class_use[class_idx].free_total); \
} while(0)
#else
# define _memory_statistics_inc(counter, value) do {} while(0)
# define _memory_statistics_add(atomic_counter, value) do {} while(0)
# define _memory_statistics_add_peak(atomic_counter, value, peak) do {} while (0)
# define _memory_statistics_sub(atomic_counter, value) do {} while(0)
# define _memory_statistics_inc_alloc(heap, class_idx) do {} while(0)
# define _memory_statistics_inc_free(heap, class_idx) do {} while(0)
#endif
static void
_memory_heap_cache_insert(heap_t* heap, span_t* span);
//! Map more virtual memory
static void*
_memory_map(size_t size, size_t* offset) {
assert(!(size % _memory_page_size));
assert(size >= _memory_page_size);
_memory_statistics_add_peak(&_mapped_pages, (size >> _memory_page_size_shift), _mapped_pages_peak);
_memory_statistics_add(&_mapped_total, (size >> _memory_page_size_shift));
return _memory_config.memory_map(size, offset);
}
//! Unmap virtual memory
static void
_memory_unmap(void* address, size_t size, size_t offset, size_t release) {
assert(!release || (release >= size));
assert(!release || (release >= _memory_page_size));
if (release) {
assert(!(release % _memory_page_size));
_memory_statistics_sub(&_mapped_pages, (release >> _memory_page_size_shift));
_memory_statistics_add(&_unmapped_total, (release >> _memory_page_size_shift));
}
_memory_config.memory_unmap(address, size, offset, release);
}
//! Declare the span to be a subspan and store distance from master span and span count
static void
_memory_span_mark_as_subspan_unless_master(span_t* master, span_t* subspan, size_t span_count) {
assert((subspan != master) || (subspan->flags & SPAN_FLAG_MASTER));
if (subspan != master) {
subspan->flags = SPAN_FLAG_SUBSPAN;
subspan->total_spans_or_distance = (uint32_t)((uintptr_t)pointer_diff(subspan, master) >> _memory_span_size_shift);
subspan->align_offset = 0;
}
subspan->span_count = (uint32_t)span_count;
}
//! Use reserved spans to fulfill a memory map request (reserve size must be checked by caller)
static span_t*
_memory_map_from_reserve(heap_t* heap, size_t span_count) {
//Update the heap span reserve
span_t* span = heap->span_reserve;
heap->span_reserve = pointer_offset(span, span_count * _memory_span_size);
heap->spans_reserved -= span_count;
_memory_span_mark_as_subspan_unless_master(heap->span_reserve_master, span, span_count);
if (span_count <= LARGE_CLASS_COUNT)
_memory_statistics_inc(heap->span_use[span_count - 1].spans_from_reserved, 1);
return span;
}
//! Get the aligned number of spans to map in based on wanted count, configured mapping granularity and the page size
static size_t
_memory_map_align_span_count(size_t span_count) {
size_t request_count = (span_count > _memory_span_map_count) ? span_count : _memory_span_map_count;
if ((_memory_page_size > _memory_span_size) && ((request_count * _memory_span_size) % _memory_page_size))
request_count += _memory_span_map_count - (request_count % _memory_span_map_count);
return request_count;
}
//! Store the given spans as reserve in the given heap
static void
_memory_heap_set_reserved_spans(heap_t* heap, span_t* master, span_t* reserve, size_t reserve_span_count) {
heap->span_reserve_master = master;
heap->span_reserve = reserve;
heap->spans_reserved = reserve_span_count;
}
//! Setup a newly mapped span
static void
_memory_span_initialize(span_t* span, size_t total_span_count, size_t span_count, size_t align_offset) {
span->total_spans_or_distance = (uint32_t)total_span_count;
span->span_count = (uint32_t)span_count;
span->align_offset = (uint32_t)align_offset;
span->flags = SPAN_FLAG_MASTER;
atomic_store32(&span->remaining_spans, (int32_t)total_span_count);
}
//! Map a akigned set of spans, taking configured mapping granularity and the page size into account
static span_t*
_memory_map_aligned_span_count(heap_t* heap, size_t span_count) {
//If we already have some, but not enough, reserved spans, release those to heap cache and map a new
//full set of spans. Otherwise we would waste memory if page size > span size (huge pages)
size_t aligned_span_count = _memory_map_align_span_count(span_count);
size_t align_offset = 0;
span_t* span = _memory_map(aligned_span_count * _memory_span_size, &align_offset);
if (!span)
return 0;
_memory_span_initialize(span, aligned_span_count, span_count, align_offset);
_memory_statistics_add(&_reserved_spans, aligned_span_count);
if (span_count <= LARGE_CLASS_COUNT)
_memory_statistics_inc(heap->span_use[span_count - 1].spans_map_calls, 1);
if (aligned_span_count > span_count) {
if (heap->spans_reserved) {
_memory_span_mark_as_subspan_unless_master(heap->span_reserve_master, heap->span_reserve, heap->spans_reserved);
_memory_heap_cache_insert(heap, heap->span_reserve);
}
_memory_heap_set_reserved_spans(heap, span, pointer_offset(span, span_count * _memory_span_size), aligned_span_count - span_count);
}
return span;
}
//! Map in memory pages for the given number of spans (or use previously reserved pages)
static span_t*
_memory_map_spans(heap_t* heap, size_t span_count) {
if (span_count <= heap->spans_reserved)
return _memory_map_from_reserve(heap, span_count);
return _memory_map_aligned_span_count(heap, span_count);
}
//! Unmap memory pages for the given number of spans (or mark as unused if no partial unmappings)
static void
_memory_unmap_span(span_t* span) {
assert((span->flags & SPAN_FLAG_MASTER) || (span->flags & SPAN_FLAG_SUBSPAN));
assert(!(span->flags & SPAN_FLAG_MASTER) || !(span->flags & SPAN_FLAG_SUBSPAN));
int is_master = !!(span->flags & SPAN_FLAG_MASTER);
span_t* master = is_master ? span : (pointer_offset(span, -(int32_t)(span->total_spans_or_distance * _memory_span_size)));
assert(is_master || (span->flags & SPAN_FLAG_SUBSPAN));
assert(master->flags & SPAN_FLAG_MASTER);
size_t span_count = span->span_count;
if (!is_master) {
//Directly unmap subspans (unless huge pages, in which case we defer and unmap entire page range with master)
assert(span->align_offset == 0);
if (_memory_span_size >= _memory_page_size) {
_memory_unmap(span, span_count * _memory_span_size, 0, 0);
_memory_statistics_sub(&_reserved_spans, span_count);
}
} else {
//Special double flag to denote an unmapped master
//It must be kept in memory since span header must be used
span->flags |= SPAN_FLAG_MASTER | SPAN_FLAG_SUBSPAN;
}
if (atomic_add32(&master->remaining_spans, -(int32_t)span_count) <= 0) {
//Everything unmapped, unmap the master span with release flag to unmap the entire range of the super span
assert(!!(master->flags & SPAN_FLAG_MASTER) && !!(master->flags & SPAN_FLAG_SUBSPAN));
size_t unmap_count = master->span_count;
if (_memory_span_size < _memory_page_size)
unmap_count = master->total_spans_or_distance;
_memory_statistics_sub(&_reserved_spans, unmap_count);
_memory_unmap(master, unmap_count * _memory_span_size, master->align_offset, master->total_spans_or_distance * _memory_span_size);
}
}
#if ENABLE_THREAD_CACHE
//! Unmap a single linked list of spans
static void
_memory_unmap_span_list(span_t* span) {
size_t list_size = span->list_size;
for (size_t ispan = 0; ispan < list_size; ++ispan) {
span_t* next_span = span->next;
_memory_unmap_span(span);
span = next_span;
}
assert(!span);
}
//! Add span to head of single linked span list
static size_t
_memory_span_list_push(span_t** head, span_t* span) {
span->next = *head;
if (*head)
span->list_size = (*head)->list_size + 1;
else
span->list_size = 1;
*head = span;
return span->list_size;
}
//! Remove span from head of single linked span list, returns the new list head
static span_t*
_memory_span_list_pop(span_t** head) {
span_t* span = *head;
span_t* next_span = 0;
if (span->list_size > 1) {
assert(span->next);
next_span = span->next;
assert(next_span);
next_span->list_size = span->list_size - 1;
}
*head = next_span;
return span;
}
//! Split a single linked span list
static span_t*
_memory_span_list_split(span_t* span, size_t limit) {
span_t* next = 0;
if (limit < 2)
limit = 2;
if (span->list_size > limit) {
uint32_t list_size = 1;
span_t* last = span;
next = span->next;
while (list_size < limit) {
last = next;
next = next->next;
++list_size;
}
last->next = 0;
assert(next);
next->list_size = span->list_size - list_size;
span->list_size = list_size;
span->prev = 0;
}
return next;
}
#endif
//! Add a span to partial span double linked list at the head
static void
_memory_span_partial_list_add(span_t** head, span_t* span) {
if (*head) {
span->next = *head;
//Maintain pointer to tail span
span->prev = (*head)->prev;
(*head)->prev = span;
} else {
span->next = 0;
span->prev = span;
}
*head = span;
}
//! Add a span to partial span double linked list at the tail
static void
_memory_span_partial_list_add_tail(span_t** head, span_t* span) {
span->next = 0;
if (*head) {
span_t* tail = (*head)->prev;
tail->next = span;
span->prev = tail;
//Maintain pointer to tail span
(*head)->prev = span;
} else {
span->prev = span;
*head = span;
}
}
//! Pop head span from partial span double linked list
static void
_memory_span_partial_list_pop_head(span_t** head) {
span_t* span = *head;
*head = span->next;
if (*head) {
//Maintain pointer to tail span
(*head)->prev = span->prev;
}
}
//! Remove a span from partial span double linked list
static void
_memory_span_partial_list_remove(span_t** head, span_t* span) {
if (UNEXPECTED(*head == span)) {
_memory_span_partial_list_pop_head(head);
} else {
span_t* next_span = span->next;
span_t* prev_span = span->prev;
prev_span->next = next_span;
if (EXPECTED(next_span != 0)) {
next_span->prev = prev_span;
} else {
//Update pointer to tail span
(*head)->prev = prev_span;
}
}
}
#if ENABLE_GLOBAL_CACHE
//! Insert the given list of memory page spans in the global cache
static void
_memory_cache_insert(global_cache_t* cache, span_t* span, size_t cache_limit) {
assert((span->list_size == 1) || (span->next != 0));
int32_t list_size = (int32_t)span->list_size;
//Unmap if cache has reached the limit
if (atomic_add32(&cache->size, list_size) > (int32_t)cache_limit) {
#if !ENABLE_UNLIMITED_GLOBAL_CACHE
_memory_unmap_span_list(span);
atomic_add32(&cache->size, -list_size);
return;
#endif
}
void* current_cache, *new_cache;
do {
current_cache = atomic_load_ptr(&cache->cache);
span->prev = (void*)((uintptr_t)current_cache & _memory_span_mask);
new_cache = (void*)((uintptr_t)span | ((uintptr_t)atomic_incr32(&cache->counter) & ~_memory_span_mask));
} while (!atomic_cas_ptr(&cache->cache, new_cache, current_cache));
}
//! Extract a number of memory page spans from the global cache
static span_t*
_memory_cache_extract(global_cache_t* cache) {
uintptr_t span_ptr;
do {
void* global_span = atomic_load_ptr(&cache->cache);
span_ptr = (uintptr_t)global_span & _memory_span_mask;
if (span_ptr) {
span_t* span = (void*)span_ptr;
//By accessing the span ptr before it is swapped out of list we assume that a contending thread
//does not manage to traverse the span to being unmapped before we access it
void* new_cache = (void*)((uintptr_t)span->prev | ((uintptr_t)atomic_incr32(&cache->counter) & ~_memory_span_mask));
if (atomic_cas_ptr(&cache->cache, new_cache, global_span)) {
atomic_add32(&cache->size, -(int32_t)span->list_size);
return span;
}
}
} while (span_ptr);
return 0;
}
//! Finalize a global cache, only valid from allocator finalization (not thread safe)
static void
_memory_cache_finalize(global_cache_t* cache) {
void* current_cache = atomic_load_ptr(&cache->cache);
span_t* span = (void*)((uintptr_t)current_cache & _memory_span_mask);
while (span) {
span_t* skip_span = (void*)((uintptr_t)span->prev & _memory_span_mask);
atomic_add32(&cache->size, -(int32_t)span->list_size);
_memory_unmap_span_list(span);
span = skip_span;
}
assert(!atomic_load32(&cache->size));
atomic_store_ptr(&cache->cache, 0);
atomic_store32(&cache->size, 0);
}
//! Insert the given list of memory page spans in the global cache
static void
_memory_global_cache_insert(span_t* span) {
size_t span_count = span->span_count;
#if ENABLE_UNLIMITED_GLOBAL_CACHE
_memory_cache_insert(&_memory_span_cache[span_count - 1], span, 0);
#else
const size_t cache_limit = (GLOBAL_CACHE_MULTIPLIER * ((span_count == 1) ? _memory_span_release_count : _memory_span_release_count_large));
_memory_cache_insert(&_memory_span_cache[span_count - 1], span, cache_limit);
#endif
}
//! Extract a number of memory page spans from the global cache for large blocks
static span_t*
_memory_global_cache_extract(size_t span_count) {
span_t* span = _memory_cache_extract(&_memory_span_cache[span_count - 1]);
assert(!span || (span->span_count == span_count));
return span;
}
#endif
#if ENABLE_THREAD_CACHE
//! Adopt the deferred span cache list
static void
_memory_heap_cache_adopt_deferred(heap_t* heap) {
atomic_thread_fence_acquire();
span_t* span = atomic_load_ptr(&heap->span_cache_deferred);
if (!span)
return;
do {
span = atomic_load_ptr(&heap->span_cache_deferred);
} while (!atomic_cas_ptr(&heap->span_cache_deferred, 0, span));
while (span) {
span_t* next_span = span->next;
_memory_span_list_push(&heap->span_cache[0], span);
#if ENABLE_STATISTICS
heap->size_class_use[span->size_class].spans_to_cache++;
#endif
span = next_span;
}
}
#endif
//! Insert a single span into thread heap cache, releasing to global cache if overflow
static void
_memory_heap_cache_insert(heap_t* heap, span_t* span) {
#if ENABLE_THREAD_CACHE
size_t span_count = span->span_count;
size_t idx = span_count - 1;
_memory_statistics_inc(heap->span_use[idx].spans_to_cache, 1);
if (!idx)
_memory_heap_cache_adopt_deferred(heap);
#if ENABLE_UNLIMITED_THREAD_CACHE
_memory_span_list_push(&heap->span_cache[idx], span);
#else
const size_t release_count = (!idx ? _memory_span_release_count : _memory_span_release_count_large);
size_t current_cache_size = _memory_span_list_push(&heap->span_cache[idx], span);
if (current_cache_size <= release_count)
return;
const size_t hard_limit = release_count * THREAD_CACHE_MULTIPLIER;
if (current_cache_size <= hard_limit) {
#if ENABLE_ADAPTIVE_THREAD_CACHE
//Require 25% of high water mark to remain in cache (and at least 1, if use is 0)
const size_t high_mark = heap->span_use[idx].high;
const size_t min_limit = (high_mark >> 2) + release_count + 1;
if (current_cache_size < min_limit)
return;
#else
return;
#endif
}
heap->span_cache[idx] = _memory_span_list_split(span, release_count);
assert(span->list_size == release_count);
#if ENABLE_STATISTICS
heap->thread_to_global += (size_t)span->list_size * span_count * _memory_span_size;
heap->span_use[idx].spans_to_global += span->list_size;
#endif
#if ENABLE_GLOBAL_CACHE
_memory_global_cache_insert(span);
#else
_memory_unmap_span_list(span);
#endif
#endif
#else
(void)sizeof(heap);
_memory_unmap_span(span);
#endif
}
//! Extract the given number of spans from the different cache levels
static span_t*
_memory_heap_thread_cache_extract(heap_t* heap, size_t span_count) {
#if ENABLE_THREAD_CACHE
size_t idx = span_count - 1;
if (!idx)
_memory_heap_cache_adopt_deferred(heap);
if (heap->span_cache[idx]) {
#if ENABLE_STATISTICS
heap->span_use[idx].spans_from_cache++;
#endif
return _memory_span_list_pop(&heap->span_cache[idx]);
}
#endif
return 0;
}
static span_t*
_memory_heap_reserved_extract(heap_t* heap, size_t span_count) {
if (heap->spans_reserved >= span_count)
return _memory_map_spans(heap, span_count);
return 0;
}
static span_t*
_memory_heap_global_cache_extract(heap_t* heap, size_t span_count) {
#if ENABLE_GLOBAL_CACHE
//Step 3: Extract from global cache
size_t idx = span_count - 1;
heap->span_cache[idx] = _memory_global_cache_extract(span_count);
if (heap->span_cache[idx]) {
#if ENABLE_STATISTICS
heap->global_to_thread += (size_t)heap->span_cache[idx]->list_size * span_count * _memory_span_size;
heap->span_use[idx].spans_from_global += heap->span_cache[idx]->list_size;
#endif
return _memory_span_list_pop(&heap->span_cache[idx]);
}
#endif
return 0;
}
static span_t*
_memory_heap_extract_new_span(heap_t* heap, size_t span_count, uint32_t class_idx) {
(void)sizeof(class_idx);
#if ENABLE_ADAPTIVE_THREAD_CACHE || ENABLE_STATISTICS
uint32_t use_idx = (uint32_t)span_count - 1;
++heap->span_use[use_idx].current;
if (heap->span_use[use_idx].current > heap->span_use[use_idx].high)
heap->span_use[use_idx].high = heap->span_use[use_idx].current;
#endif
span_t* span = _memory_heap_thread_cache_extract(heap, span_count);
if (EXPECTED(span != 0)) {
_memory_statistics_inc(heap->size_class_use[class_idx].spans_from_cache, 1);
return span;
}
span = _memory_heap_reserved_extract(heap, span_count);
if (EXPECTED(span != 0)) {
_memory_statistics_inc(heap->size_class_use[class_idx].spans_from_reserved, 1);
return span;
}
span = _memory_heap_global_cache_extract(heap, span_count);
if (EXPECTED(span != 0)) {
_memory_statistics_inc(heap->size_class_use[class_idx].spans_from_cache, 1);
return span;
}
//Final fallback, map in more virtual memory
span = _memory_map_spans(heap, span_count);
_memory_statistics_inc(heap->size_class_use[class_idx].spans_map_calls, 1);
return span;
}
//! Move the span to the heap thread cache
static void
_memory_span_release_to_cache(heap_t* heap, span_t* span) {
heap_class_t* heap_class = heap->span_class + span->size_class;
assert(heap_class->partial_span != span);
if (span->state == SPAN_STATE_PARTIAL)
_memory_span_partial_list_remove(&heap_class->partial_span, span);
#if ENABLE_ADAPTIVE_THREAD_CACHE || ENABLE_STATISTICS
if (heap->span_use[0].current)
--heap->span_use[0].current;
_memory_statistics_inc(heap->span_use[0].spans_to_cache, 1);
_memory_statistics_inc(heap->size_class_use[span->size_class].spans_to_cache, 1);
#endif
_memory_heap_cache_insert(heap, span);
}
//! Initialize a (partial) free list up to next system memory page, while reserving the first block
//! as allocated, returning number of blocks in list
static uint32_t
free_list_partial_init(void** list, void** first_block, void* page_start, void* block_start,
uint32_t block_count, uint32_t block_size) {
assert(block_count);
*first_block = block_start;
if (block_count > 1) {
void* free_block = pointer_offset(block_start, block_size);
void* block_end = pointer_offset(block_start, block_size * block_count);
//If block size is less than half a memory page, bound init to next memory page boundary
if (block_size < (_memory_page_size >> 1)) {
void* page_end = pointer_offset(page_start, _memory_page_size);
if (page_end < block_end)
block_end = page_end;
}
*list = free_block;
block_count = 2;
void* next_block = pointer_offset(free_block, block_size);
while (next_block < block_end) {
*((void**)free_block) = next_block;
free_block = next_block;
++block_count;
next_block = pointer_offset(next_block, block_size);
}
*((void**)free_block) = 0;
} else {
*list = 0;
}
return block_count;
}
//! Initialize an unused span (from cache or mapped) to be new active span
static void*
_memory_span_set_new_active(heap_t* heap, heap_class_t* heap_class, span_t* span, uint32_t class_idx) {
assert(span->span_count == 1);
size_class_t* size_class = _memory_size_class + class_idx;
span->size_class = class_idx;
span->heap = heap;
span->flags &= ~SPAN_FLAG_ALIGNED_BLOCKS;
span->block_count = size_class->block_count;
span->block_size = size_class->block_size;
span->state = SPAN_STATE_ACTIVE;
span->free_list = 0;
//Setup free list. Only initialize one system page worth of free blocks in list
void* block;
span->free_list_limit = free_list_partial_init(&heap_class->free_list, &block,
span, pointer_offset(span, SPAN_HEADER_SIZE), size_class->block_count, size_class->block_size);
atomic_store_ptr(&span->free_list_deferred, 0);
span->list_size = 0;
atomic_thread_fence_release();
_memory_span_partial_list_add(&heap_class->partial_span, span);
return block;
}
//! Promote a partially used span (from heap used list) to be new active span
static void
_memory_span_set_partial_active(heap_class_t* heap_class, span_t* span) {
assert(span->state == SPAN_STATE_PARTIAL);
assert(span->block_count == _memory_size_class[span->size_class].block_count);
//Move data to heap size class and set span as active
heap_class->free_list = span->free_list;
span->state = SPAN_STATE_ACTIVE;
span->free_list = 0;
assert(heap_class->free_list);
}
//! Mark span as full (from active)
static void
_memory_span_set_active_full(heap_class_t* heap_class, span_t* span) {
assert(span->state == SPAN_STATE_ACTIVE);
assert(span == heap_class->partial_span);
_memory_span_partial_list_pop_head(&heap_class->partial_span);
span->used_count = span->block_count;
span->state = SPAN_STATE_FULL;
span->free_list = 0;
}
//! Move span from full to partial state
static void
_memory_span_set_full_partial(heap_t* heap, span_t* span) {
assert(span->state == SPAN_STATE_FULL);
heap_class_t* heap_class = &heap->span_class[span->size_class];
span->state = SPAN_STATE_PARTIAL;
_memory_span_partial_list_add_tail(&heap_class->partial_span, span);
}
static void*
_memory_span_extract_deferred(span_t* span) {
void* free_list;
do {
free_list = atomic_load_ptr(&span->free_list_deferred);
} while ((free_list == INVALID_POINTER) || !atomic_cas_ptr(&span->free_list_deferred, INVALID_POINTER, free_list));
span->list_size = 0;
atomic_store_ptr(&span->free_list_deferred, 0);
atomic_thread_fence_release();
return free_list;
}
//! Pop first block from a free list
static void*
free_list_pop(void** list) {
void* block = *list;
*list = *((void**)block);
return block;
}
//! Allocate a small/medium sized memory block from the given heap
static void*
_memory_allocate_from_heap_fallback(heap_t* heap, uint32_t class_idx) {
heap_class_t* heap_class = &heap->span_class[class_idx];
void* block;
span_t* active_span = heap_class->partial_span;
if (EXPECTED(active_span != 0)) {
assert(active_span->state == SPAN_STATE_ACTIVE);
assert(active_span->block_count == _memory_size_class[active_span->size_class].block_count);
//Swap in free list if not empty
if (active_span->free_list) {
heap_class->free_list = active_span->free_list;
active_span->free_list = 0;
return free_list_pop(&heap_class->free_list);
}
//If the span did not fully initialize free list, link up another page worth of blocks
if (active_span->free_list_limit < active_span->block_count) {
void* block_start = pointer_offset(active_span, SPAN_HEADER_SIZE + (active_span->free_list_limit * active_span->block_size));
active_span->free_list_limit += free_list_partial_init(&heap_class->free_list, &block,
(void*)((uintptr_t)block_start & ~(_memory_page_size - 1)), block_start,
active_span->block_count - active_span->free_list_limit, active_span->block_size);
return block;
}
//Swap in deferred free list
atomic_thread_fence_acquire();
if (atomic_load_ptr(&active_span->free_list_deferred)) {
heap_class->free_list = _memory_span_extract_deferred(active_span);
return free_list_pop(&heap_class->free_list);
}
//If the active span is fully allocated, mark span as free floating (fully allocated and not part of any list)
assert(!heap_class->free_list);
assert(active_span->free_list_limit >= active_span->block_count);
_memory_span_set_active_full(heap_class, active_span);
}
assert(!heap_class->free_list);
//Try promoting a semi-used span to active
active_span = heap_class->partial_span;
if (EXPECTED(active_span != 0)) {
_memory_span_set_partial_active(heap_class, active_span);
return free_list_pop(&heap_class->free_list);
}
assert(!heap_class->free_list);
assert(!heap_class->partial_span);
//Find a span in one of the cache levels
active_span = _memory_heap_extract_new_span(heap, 1, class_idx);
//Mark span as owned by this heap and set base data, return first block
return _memory_span_set_new_active(heap, heap_class, active_span, class_idx);
}
//! Allocate a small sized memory block from the given heap
static void*
_memory_allocate_small(heap_t* heap, size_t size) {
//Small sizes have unique size classes
const uint32_t class_idx = (uint32_t)((size + (SMALL_GRANULARITY - 1)) >> SMALL_GRANULARITY_SHIFT);
_memory_statistics_inc_alloc(heap, class_idx);
if (EXPECTED(heap->span_class[class_idx].free_list != 0))
return free_list_pop(&heap->span_class[class_idx].free_list);
return _memory_allocate_from_heap_fallback(heap, class_idx);
}
//! Allocate a medium sized memory block from the given heap
static void*
_memory_allocate_medium(heap_t* heap, size_t size) {
//Calculate the size class index and do a dependent lookup of the final class index (in case of merged classes)
const uint32_t base_idx = (uint32_t)(SMALL_CLASS_COUNT + ((size - (SMALL_SIZE_LIMIT + 1)) >> MEDIUM_GRANULARITY_SHIFT));
const uint32_t class_idx = _memory_size_class[base_idx].class_idx;
_memory_statistics_inc_alloc(heap, class_idx);
if (EXPECTED(heap->span_class[class_idx].free_list != 0))
return free_list_pop(&heap->span_class[class_idx].free_list);
return _memory_allocate_from_heap_fallback(heap, class_idx);
}
//! Allocate a large sized memory block from the given heap
static void*
_memory_allocate_large(heap_t* heap, size_t size) {
//Calculate number of needed max sized spans (including header)
//Since this function is never called if size > LARGE_SIZE_LIMIT
//the span_count is guaranteed to be <= LARGE_CLASS_COUNT
size += SPAN_HEADER_SIZE;
size_t span_count = size >> _memory_span_size_shift;
if (size & (_memory_span_size - 1))
++span_count;
size_t idx = span_count - 1;
#if ENABLE_ADAPTIVE_THREAD_CACHE || ENABLE_STATISTICS
++heap->span_use[idx].current;
if (heap->span_use[idx].current > heap->span_use[idx].high)
heap->span_use[idx].high = heap->span_use[idx].current;
#endif
//Find a span in one of the cache levels
span_t* span = _memory_heap_extract_new_span(heap, span_count, SIZE_CLASS_COUNT);
//Mark span as owned by this heap and set base data
assert(span->span_count == span_count);
span->size_class = (uint32_t)(SIZE_CLASS_COUNT + idx);
span->heap = heap;
atomic_thread_fence_release();
return pointer_offset(span, SPAN_HEADER_SIZE);
}
//! Allocate a huge block by mapping memory pages directly
static void*
_memory_allocate_huge(size_t size) {
size += SPAN_HEADER_SIZE;
size_t num_pages = size >> _memory_page_size_shift;
if (size & (_memory_page_size - 1))
++num_pages;
size_t align_offset = 0;
span_t* span = _memory_map(num_pages * _memory_page_size, &align_offset);
if (!span)
return span;
//Store page count in span_count
span->size_class = (uint32_t)-1;
span->span_count = (uint32_t)num_pages;
span->align_offset = (uint32_t)align_offset;
_memory_statistics_add_peak(&_huge_pages_current, num_pages, _huge_pages_peak);
return pointer_offset(span, SPAN_HEADER_SIZE);
}
//! Allocate a block larger than medium size
static void*
_memory_allocate_oversized(heap_t* heap, size_t size) {
if (size <= LARGE_SIZE_LIMIT)
return _memory_allocate_large(heap, size);
return _memory_allocate_huge(size);
}
//! Allocate a block of the given size
static void*
_memory_allocate(heap_t* heap, size_t size) {
if (EXPECTED(size <= SMALL_SIZE_LIMIT))
return _memory_allocate_small(heap, size);
else if (size <= _memory_medium_size_limit)
return _memory_allocate_medium(heap, size);
return _memory_allocate_oversized(heap, size);
}
//! Allocate a new heap
static heap_t*
_memory_allocate_heap(void) {
void* raw_heap;
void* next_raw_heap;
uintptr_t orphan_counter;
heap_t* heap;
heap_t* next_heap;
//Try getting an orphaned heap
atomic_thread_fence_acquire();
do {
raw_heap = atomic_load_ptr(&_memory_orphan_heaps);
heap = (void*)((uintptr_t)raw_heap & ~(uintptr_t)0x1FF);
if (!heap)
break;
next_heap = heap->next_orphan;
orphan_counter = (uintptr_t)atomic_incr32(&_memory_orphan_counter);
next_raw_heap = (void*)((uintptr_t)next_heap | (orphan_counter & (uintptr_t)0x1FF));
} while (!atomic_cas_ptr(&_memory_orphan_heaps, next_raw_heap, raw_heap));
if (!heap) {
//Map in pages for a new heap
size_t align_offset = 0;
heap = _memory_map((1 + (sizeof(heap_t) >> _memory_page_size_shift)) * _memory_page_size, &align_offset);
if (!heap)
return heap;
memset(heap, 0, sizeof(heap_t));
heap->align_offset = align_offset;
//Get a new heap ID
do {
heap->id = atomic_incr32(&_memory_heap_id);
if (_memory_heap_lookup(heap->id))
heap->id = 0;
} while (!heap->id);
//Link in heap in heap ID map
size_t list_idx = heap->id % HEAP_ARRAY_SIZE;
do {
next_heap = atomic_load_ptr(&_memory_heaps[list_idx]);
heap->next_heap = next_heap;
} while (!atomic_cas_ptr(&_memory_heaps[list_idx], heap, next_heap));
}
return heap;
}
//! Deallocate the given small/medium memory block in the current thread local heap
static void
_memory_deallocate_direct(span_t* span, void* block) {
assert(span->heap == get_thread_heap_raw());
uint32_t state = span->state;
//Add block to free list
*((void**)block) = span->free_list;
span->free_list = block;
if (UNEXPECTED(state == SPAN_STATE_ACTIVE))
return;
uint32_t used = --span->used_count;
uint32_t free = span->list_size;
if (UNEXPECTED(used == free))
_memory_span_release_to_cache(span->heap, span);
else if (UNEXPECTED(state == SPAN_STATE_FULL))
_memory_span_set_full_partial(span->heap, span);
}
//! Put the block in the deferred free list of the owning span
static void
_memory_deallocate_defer(span_t* span, void* block) {
atomic_thread_fence_acquire();
if (span->state == SPAN_STATE_FULL) {
if ((span->list_size + 1) == span->block_count) {
//Span will be completely freed by deferred deallocations, no other thread can
//currently touch it. Safe to move to owner heap deferred cache
span_t* last_head;
heap_t* heap = span->heap;
do {
last_head = atomic_load_ptr(&heap->span_cache_deferred);
span->next = last_head;
} while (!atomic_cas_ptr(&heap->span_cache_deferred, span, last_head));
return;
}
}
void* free_list;
do {
atomic_thread_fence_acquire();
free_list = atomic_load_ptr(&span->free_list_deferred);
*((void**)block) = free_list;
} while ((free_list == INVALID_POINTER) || !atomic_cas_ptr(&span->free_list_deferred, INVALID_POINTER, free_list));
++span->list_size;
atomic_store_ptr(&span->free_list_deferred, block);
}
static void
_memory_deallocate_small_or_medium(span_t* span, void* p) {
_memory_statistics_inc_free(span->heap, span->size_class);
if (span->flags & SPAN_FLAG_ALIGNED_BLOCKS) {
//Realign pointer to block start
void* blocks_start = pointer_offset(span, SPAN_HEADER_SIZE);
uint32_t block_offset = (uint32_t)pointer_diff(p, blocks_start);
p = pointer_offset(p, -(int32_t)(block_offset % span->block_size));
}
//Check if block belongs to this heap or if deallocation should be deferred
if (span->heap == get_thread_heap_raw())
_memory_deallocate_direct(span, p);
else
_memory_deallocate_defer(span, p);
}
//! Deallocate the given large memory block to the current heap
static void
_memory_deallocate_large(span_t* span) {
//Decrease counter
assert(span->span_count == ((size_t)span->size_class - SIZE_CLASS_COUNT + 1));
assert(span->size_class >= SIZE_CLASS_COUNT);
assert(span->size_class - SIZE_CLASS_COUNT < LARGE_CLASS_COUNT);
assert(!(span->flags & SPAN_FLAG_MASTER) || !(span->flags & SPAN_FLAG_SUBSPAN));
assert((span->flags & SPAN_FLAG_MASTER) || (span->flags & SPAN_FLAG_SUBSPAN));
//Large blocks can always be deallocated and transferred between heaps
//Investigate if it is better to defer large spans as well through span_cache_deferred,
//possibly with some heuristics to pick either scheme at runtime per deallocation
heap_t* heap = get_thread_heap();
#if ENABLE_ADAPTIVE_THREAD_CACHE || ENABLE_STATISTICS
size_t idx = span->span_count - 1;
if (heap->span_use[idx].current)
--heap->span_use[idx].current;
#endif
if ((span->span_count > 1) && !heap->spans_reserved) {
heap->span_reserve = span;
heap->spans_reserved = span->span_count;
if (span->flags & SPAN_FLAG_MASTER) {
heap->span_reserve_master = span;
} else { //SPAN_FLAG_SUBSPAN
uint32_t distance = span->total_spans_or_distance;
span_t* master = pointer_offset(span, -(int32_t)(distance * _memory_span_size));
heap->span_reserve_master = master;
assert(master->flags & SPAN_FLAG_MASTER);
assert(atomic_load32(&master->remaining_spans) >= (int32_t)span->span_count);
}
_memory_statistics_inc(heap->span_use[idx].spans_to_reserved, 1);
} else {
//Insert into cache list
_memory_heap_cache_insert(heap, span);
}
}
//! Deallocat the given huge span
static void
_memory_deallocate_huge(span_t* span) {
//Oversized allocation, page count is stored in span_count
size_t num_pages = span->span_count;
_memory_unmap(span, num_pages * _memory_page_size, span->align_offset, num_pages * _memory_page_size);
_memory_statistics_sub(&_huge_pages_current, num_pages);
}
//! Deallocate the given block
static void
_memory_deallocate(void* p) {
//Grab the span (always at start of span, using span alignment)
span_t* span = (void*)((uintptr_t)p & _memory_span_mask);
if (UNEXPECTED(!span))
return;
if (EXPECTED(span->size_class < SIZE_CLASS_COUNT))
_memory_deallocate_small_or_medium(span, p);
else if (span->size_class != (uint32_t)-1)
_memory_deallocate_large(span);
else
_memory_deallocate_huge(span);
}
//! Reallocate the given block to the given size
static void*
_memory_reallocate(void* p, size_t size, size_t oldsize, unsigned int flags) {
if (p) {
//Grab the span using guaranteed span alignment
span_t* span = (void*)((uintptr_t)p & _memory_span_mask);
if (span->heap) {
if (span->size_class < SIZE_CLASS_COUNT) {
//Small/medium sized block
assert(span->span_count == 1);
void* blocks_start = pointer_offset(span, SPAN_HEADER_SIZE);
uint32_t block_offset = (uint32_t)pointer_diff(p, blocks_start);
uint32_t block_idx = block_offset / span->block_size;
void* block = pointer_offset(blocks_start, block_idx * span->block_size);
if (!oldsize)
oldsize = span->block_size - (uint32_t)pointer_diff(p, block);
if ((size_t)span->block_size >= size) {
//Still fits in block, never mind trying to save memory, but preserve data if alignment changed
if ((p != block) && !(flags & RPMALLOC_NO_PRESERVE))
memmove(block, p, oldsize);
return block;
}
} else {
//Large block
size_t total_size = size + SPAN_HEADER_SIZE;
size_t num_spans = total_size >> _memory_span_size_shift;
if (total_size & (_memory_span_mask - 1))
++num_spans;
size_t current_spans = (span->size_class - SIZE_CLASS_COUNT) + 1;
assert(current_spans == span->span_count);
void* block = pointer_offset(span, SPAN_HEADER_SIZE);
if (!oldsize)
oldsize = (current_spans * _memory_span_size) - (size_t)pointer_diff(p, block);
if ((current_spans >= num_spans) && (num_spans >= (current_spans / 2))) {
//Still fits in block, never mind trying to save memory, but preserve data if alignment changed
if ((p != block) && !(flags & RPMALLOC_NO_PRESERVE))
memmove(block, p, oldsize);
return block;
}
}
} else {
//Oversized block
size_t total_size = size + SPAN_HEADER_SIZE;
size_t num_pages = total_size >> _memory_page_size_shift;
if (total_size & (_memory_page_size - 1))
++num_pages;
//Page count is stored in span_count
size_t current_pages = span->span_count;
void* block = pointer_offset(span, SPAN_HEADER_SIZE);
if (!oldsize)
oldsize = (current_pages * _memory_page_size) - (size_t)pointer_diff(p, block);
if ((current_pages >= num_pages) && (num_pages >= (current_pages / 2))) {
//Still fits in block, never mind trying to save memory, but preserve data if alignment changed
if ((p != block) && !(flags & RPMALLOC_NO_PRESERVE))
memmove(block, p, oldsize);
return block;
}
}
}
//Size is greater than block size, need to allocate a new block and deallocate the old
heap_t* heap = get_thread_heap();
//Avoid hysteresis by overallocating if increase is small (below 37%)
size_t lower_bound = oldsize + (oldsize >> 2) + (oldsize >> 3);
void* block = _memory_allocate(heap, (size > lower_bound) ? size : ((size > oldsize) ? lower_bound : size));
if (p) {
if (!(flags & RPMALLOC_NO_PRESERVE))
memcpy(block, p, oldsize < size ? oldsize : size);
_memory_deallocate(p);
}
return block;
}
//! Get the usable size of the given block
static size_t
_memory_usable_size(void* p) {
//Grab the span using guaranteed span alignment
span_t* span = (void*)((uintptr_t)p & _memory_span_mask);
if (span->heap) {
//Small/medium block
if (span->size_class < SIZE_CLASS_COUNT) {
void* blocks_start = pointer_offset(span, SPAN_HEADER_SIZE);
return span->block_size - ((size_t)pointer_diff(p, blocks_start) % span->block_size);
}
//Large block
size_t current_spans = (span->size_class - SIZE_CLASS_COUNT) + 1;
return (current_spans * _memory_span_size) - (size_t)pointer_diff(p, span);
}
//Oversized block, page count is stored in span_count
size_t current_pages = span->span_count;
return (current_pages * _memory_page_size) - (size_t)pointer_diff(p, span);
}
//! Adjust and optimize the size class properties for the given class
static void
_memory_adjust_size_class(size_t iclass) {
size_t block_size = _memory_size_class[iclass].block_size;
size_t block_count = (_memory_span_size - SPAN_HEADER_SIZE) / block_size;
_memory_size_class[iclass].block_count = (uint16_t)block_count;
_memory_size_class[iclass].class_idx = (uint16_t)iclass;
//Check if previous size classes can be merged
size_t prevclass = iclass;
while (prevclass > 0) {
--prevclass;
//A class can be merged if number of pages and number of blocks are equal
if (_memory_size_class[prevclass].block_count == _memory_size_class[iclass].block_count)
memcpy(_memory_size_class + prevclass, _memory_size_class + iclass, sizeof(_memory_size_class[iclass]));
else
break;
}
}
#if defined(_MSC_VER) && !defined(__clang__) && (!defined(BUILD_DYNAMIC_LINK) || !BUILD_DYNAMIC_LINK)
#include <fibersapi.h>
static DWORD fls_key;
static void NTAPI
rp_thread_destructor(void* value) {
// printf("rpmalloc_thread_finalize( ) to\n");
if (value)
rpmalloc_thread_finalize();
}
#endif
#if PLATFORM_POSIX
# include <sys/mman.h>
# include <sched.h>
# ifdef __FreeBSD__
# include <sys/sysctl.h>
# define MAP_HUGETLB MAP_ALIGNED_SUPER
# endif
# ifndef MAP_UNINITIALIZED
# define MAP_UNINITIALIZED 0
# endif
#endif
#include <errno.h>
//! Initialize the allocator and setup global data
extern inline int
rpmalloc_initialize(void) {
// printf("*** rpmalloc_initialize( )\n");
if (_rpmalloc_initialized) {
rpmalloc_thread_initialize();
return 0;
}
memset(&_memory_config, 0, sizeof(rpmalloc_config_t));
return rpmalloc_initialize_config(0);
}
int
rpmalloc_initialize_config(const rpmalloc_config_t* config) {
if (_rpmalloc_initialized) {
rpmalloc_thread_initialize();
return 0;
}
_rpmalloc_initialized = 1;
if (config)
memcpy(&_memory_config, config, sizeof(rpmalloc_config_t));
if (!_memory_config.memory_map || !_memory_config.memory_unmap) {
_memory_config.memory_map = _memory_map_os;
_memory_config.memory_unmap = _memory_unmap_os;
}
#if RPMALLOC_CONFIGURABLE
_memory_page_size = _memory_config.page_size;
#else
_memory_page_size = 0;
#endif
_memory_huge_pages = 0;
_memory_map_granularity = _memory_page_size;
if (!_memory_page_size) {
#if PLATFORM_WINDOWS
SYSTEM_INFO system_info;
memset(&system_info, 0, sizeof(system_info));
GetSystemInfo(&system_info);
_memory_page_size = system_info.dwPageSize;
_memory_map_granularity = system_info.dwAllocationGranularity;
if (config && config->enable_huge_pages) {
HANDLE token = 0;
size_t large_page_minimum = GetLargePageMinimum();
if (large_page_minimum)
OpenProcessToken(GetCurrentProcess(), TOKEN_ADJUST_PRIVILEGES | TOKEN_QUERY, &token);
if (token) {
LUID luid;
if (LookupPrivilegeValue(0, SE_LOCK_MEMORY_NAME, &luid)) {
TOKEN_PRIVILEGES token_privileges;
memset(&token_privileges, 0, sizeof(token_privileges));
token_privileges.PrivilegeCount = 1;
token_privileges.Privileges[0].Luid = luid;
token_privileges.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED;
if (AdjustTokenPrivileges(token, FALSE, &token_privileges, 0, 0, 0)) {
DWORD err = GetLastError();
if (err == ERROR_SUCCESS) {
_memory_huge_pages = 1;
_memory_page_size = large_page_minimum;
_memory_map_granularity = large_page_minimum;
}
}
}
CloseHandle(token);
}
}
#else
_memory_page_size = (size_t)sysconf(_SC_PAGESIZE);
_memory_map_granularity = _memory_page_size;
if (config && config->enable_huge_pages) {
#if defined(__linux__)
size_t huge_page_size = 0;
FILE* meminfo = fopen("/proc/meminfo", "r");
if (meminfo) {
char line[128];
while (!huge_page_size && fgets(line, sizeof(line) - 1, meminfo)) {
line[sizeof(line) - 1] = 0;
if (strstr(line, "Hugepagesize:"))
huge_page_size = (size_t)strtol(line + 13, 0, 10) * 1024;
}
fclose(meminfo);
}
if (huge_page_size) {
_memory_huge_pages = 1;
_memory_page_size = huge_page_size;
_memory_map_granularity = huge_page_size;
}
#elif defined(__FreeBSD__)
int rc;
size_t sz = sizeof(rc);
if (sysctlbyname("vm.pmap.pg_ps_enabled", &rc, &sz, NULL, 0) == 0 && rc == 1) {
_memory_huge_pages = 1;
_memory_page_size = 2 * 1024 * 1024;
_memory_map_granularity = _memory_page_size;
}
#elif defined(__APPLE__)
_memory_huge_pages = 1;
_memory_page_size = 2 * 1024 * 1024;
_memory_map_granularity = _memory_page_size;
#endif
}
#endif
} else {
if (config && config->enable_huge_pages)
_memory_huge_pages = 1;
}
//The ABA counter in heap orphan list is tied to using 512 (bitmask 0x1FF)
if (_memory_page_size < 512)
_memory_page_size = 512;
if (_memory_page_size > (64 * 1024 * 1024))
_memory_page_size = (64 * 1024 * 1024);
_memory_page_size_shift = 0;
size_t page_size_bit = _memory_page_size;
while (page_size_bit != 1) {
++_memory_page_size_shift;
page_size_bit >>= 1;
}
_memory_page_size = ((size_t)1 << _memory_page_size_shift);
#if RPMALLOC_CONFIGURABLE
size_t span_size = _memory_config.span_size;
if (!span_size)
span_size = (64 * 1024);
if (span_size > (256 * 1024))
span_size = (256 * 1024);
_memory_span_size = 4096;
_memory_span_size_shift = 12;
while (_memory_span_size < span_size) {
_memory_span_size <<= 1;
++_memory_span_size_shift;
}
_memory_span_mask = ~(uintptr_t)(_memory_span_size - 1);
#endif
_memory_span_map_count = ( _memory_config.span_map_count ? _memory_config.span_map_count : DEFAULT_SPAN_MAP_COUNT);
if ((_memory_span_size * _memory_span_map_count) < _memory_page_size)
_memory_span_map_count = (_memory_page_size / _memory_span_size);
if ((_memory_page_size >= _memory_span_size) && ((_memory_span_map_count * _memory_span_size) % _memory_page_size))
_memory_span_map_count = (_memory_page_size / _memory_span_size);
_memory_config.page_size = _memory_page_size;
_memory_config.span_size = _memory_span_size;
_memory_config.span_map_count = _memory_span_map_count;
_memory_config.enable_huge_pages = _memory_huge_pages;
_memory_span_release_count = (_memory_span_map_count > 4 ? ((_memory_span_map_count < 64) ? _memory_span_map_count : 64) : 4);
_memory_span_release_count_large = (_memory_span_release_count > 8 ? (_memory_span_release_count / 4) : 2);
#if (defined(__APPLE__) || defined(__HAIKU__)) && ENABLE_PRELOAD
if (pthread_key_create(&_memory_thread_heap, 0))
return -1;
#endif
#if defined(_MSC_VER) && !defined(__clang__) && (!defined(BUILD_DYNAMIC_LINK) || !BUILD_DYNAMIC_LINK)
fls_key = FlsAlloc(&rp_thread_destructor);
#endif
atomic_store32(&_memory_heap_id, 0);
atomic_store32(&_memory_orphan_counter, 0);
#if ENABLE_STATISTICS
atomic_store32(&_memory_active_heaps, 0);
atomic_store32(&_reserved_spans, 0);
atomic_store32(&_mapped_pages, 0);
_mapped_pages_peak = 0;
atomic_store32(&_mapped_total, 0);
atomic_store32(&_unmapped_total, 0);
atomic_store32(&_mapped_pages_os, 0);
atomic_store32(&_huge_pages_current, 0);
_huge_pages_peak = 0;
#endif
//Setup all small and medium size classes
size_t iclass = 0;
_memory_size_class[iclass].block_size = SMALL_GRANULARITY;
_memory_adjust_size_class(iclass);
for (iclass = 1; iclass < SMALL_CLASS_COUNT; ++iclass) {
size_t size = iclass * SMALL_GRANULARITY;
_memory_size_class[iclass].block_size = (uint32_t)size;
_memory_adjust_size_class(iclass);
}
//At least two blocks per span, then fall back to large allocations
_memory_medium_size_limit = (_memory_span_size - SPAN_HEADER_SIZE) >> 1;
if (_memory_medium_size_limit > MEDIUM_SIZE_LIMIT)
_memory_medium_size_limit = MEDIUM_SIZE_LIMIT;
for (iclass = 0; iclass < MEDIUM_CLASS_COUNT; ++iclass) {
size_t size = SMALL_SIZE_LIMIT + ((iclass + 1) * MEDIUM_GRANULARITY);
if (size > _memory_medium_size_limit)
break;
_memory_size_class[SMALL_CLASS_COUNT + iclass].block_size = (uint32_t)size;
_memory_adjust_size_class(SMALL_CLASS_COUNT + iclass);
}
for (size_t list_idx = 0; list_idx < HEAP_ARRAY_SIZE; ++list_idx)
atomic_store_ptr(&_memory_heaps[list_idx], 0);
//Initialize this thread
rpmalloc_thread_initialize();
return 0;
}
//! Finalize the allocator
void
rpmalloc_finalize(void) {
atomic_thread_fence_acquire();
// printf(">>> rpmalloc_finalize\n");
rpmalloc_thread_finalize();
//rpmalloc_dump_statistics(stderr);
//Free all thread caches
for (size_t list_idx = 0; list_idx < HEAP_ARRAY_SIZE; ++list_idx) {
heap_t* heap = atomic_load_ptr(&_memory_heaps[list_idx]);
while (heap) {
if (heap->spans_reserved) {
span_t* span = _memory_map_spans(heap, heap->spans_reserved);
_memory_unmap_span(span);
}
for (size_t iclass = 0; iclass < SIZE_CLASS_COUNT; ++iclass) {
heap_class_t* heap_class = heap->span_class + iclass;
span_t* span = heap_class->partial_span;
while (span) {
span_t* next = span->next;
if (span->state == SPAN_STATE_ACTIVE) {
uint32_t used_blocks = span->block_count;
if (span->free_list_limit < span->block_count)
used_blocks = span->free_list_limit;
uint32_t free_blocks = 0;
void* block = heap_class->free_list;
while (block) {
++free_blocks;
block = *((void**)block);
}
block = span->free_list;
while (block) {
++free_blocks;
block = *((void**)block);
}
if (used_blocks == (free_blocks + span->list_size))
_memory_heap_cache_insert(heap, span);
} else {
if (span->used_count == span->list_size)
_memory_heap_cache_insert(heap, span);
}
span = next;
}
}
#if ENABLE_THREAD_CACHE
//Free span caches (other thread might have deferred after the thread using this heap finalized)
_memory_heap_cache_adopt_deferred(heap);
for (size_t iclass = 0; iclass < LARGE_CLASS_COUNT; ++iclass) {
if (heap->span_cache[iclass])
_memory_unmap_span_list(heap->span_cache[iclass]);
}
#endif
heap_t* next_heap = heap->next_heap;
size_t heap_size = (1 + (sizeof(heap_t) >> _memory_page_size_shift)) * _memory_page_size;
_memory_unmap(heap, heap_size, heap->align_offset, heap_size);
heap = next_heap;
}
}
#if ENABLE_GLOBAL_CACHE
//Free global caches
for (size_t iclass = 0; iclass < LARGE_CLASS_COUNT; ++iclass)
_memory_cache_finalize(&_memory_span_cache[iclass]);
#endif
atomic_store_ptr(&_memory_orphan_heaps, 0);
atomic_thread_fence_release();
#if ENABLE_STATISTICS
//If you hit these asserts you probably have memory leaks or double frees in your code
assert(!atomic_load32(&_mapped_pages));
assert(!atomic_load32(&_reserved_spans));
assert(!atomic_load32(&_mapped_pages_os));
#endif
#if (defined(__APPLE__) || defined(__HAIKU__)) && ENABLE_PRELOAD
pthread_key_delete(_memory_thread_heap);
#endif
#if defined(_MSC_VER) && !defined(__clang__) && (!defined(BUILD_DYNAMIC_LINK) || !BUILD_DYNAMIC_LINK)
FlsFree(fls_key);
#endif
_rpmalloc_initialized = 0;
}
//! Initialize thread, assign heap
extern inline void
rpmalloc_thread_initialize(void) {
// printf("rpmalloc_thread_initialize( )\n");
if (!get_thread_heap_raw()) {
heap_t* heap = _memory_allocate_heap();
if (heap) {
atomic_thread_fence_acquire();
#if ENABLE_STATISTICS
atomic_incr32(&_memory_active_heaps);
#endif
set_thread_heap(heap);
#if defined(_MSC_VER) && !defined(__clang__) && (!defined(BUILD_DYNAMIC_LINK) || !BUILD_DYNAMIC_LINK)
FlsSetValue(fls_key, heap);
#endif
}
}
}
//! Finalize thread, orphan heap
void
rpmalloc_thread_finalize(void) {
// printf("rpmalloc_thread_finalize( )\n");
heap_t* heap = get_thread_heap_raw();
if (!heap)
return;
//Release thread cache spans back to global cache
#if ENABLE_THREAD_CACHE
_memory_heap_cache_adopt_deferred(heap);
for (size_t iclass = 0; iclass < LARGE_CLASS_COUNT; ++iclass) {
span_t* span = heap->span_cache[iclass];
#if ENABLE_GLOBAL_CACHE
while (span) {
assert(span->span_count == (iclass + 1));
size_t release_count = (!iclass ? _memory_span_release_count : _memory_span_release_count_large);
span_t* next = _memory_span_list_split(span, (uint32_t)release_count);
#if ENABLE_STATISTICS
heap->thread_to_global += (size_t)span->list_size * span->span_count * _memory_span_size;
heap->span_use[iclass].spans_to_global += span->list_size;
#endif
_memory_global_cache_insert(span);
span = next;
}
#else
if (span)
_memory_unmap_span_list(span);
#endif
heap->span_cache[iclass] = 0;
}
#endif
//Orphan the heap
void* raw_heap;
uintptr_t orphan_counter;
heap_t* last_heap;
do {
last_heap = atomic_load_ptr(&_memory_orphan_heaps);
heap->next_orphan = (void*)((uintptr_t)last_heap & ~(uintptr_t)0x1FF);
orphan_counter = (uintptr_t)atomic_incr32(&_memory_orphan_counter);
raw_heap = (void*)((uintptr_t)heap | (orphan_counter & (uintptr_t)0x1FF));
} while (!atomic_cas_ptr(&_memory_orphan_heaps, raw_heap, last_heap));
set_thread_heap(0);
#if ENABLE_STATISTICS
atomic_add32(&_memory_active_heaps, -1);
assert(atomic_load32(&_memory_active_heaps) >= 0);
#endif
}
int
rpmalloc_is_thread_initialized(void) {
return (get_thread_heap_raw() != 0) ? 1 : 0;
}
const rpmalloc_config_t*
rpmalloc_config(void) {
return &_memory_config;
}
//! Map new pages to virtual memory
static void*
_memory_map_os(size_t size, size_t* offset) {
//Either size is a heap (a single page) or a (multiple) span - we only need to align spans, and only if larger than map granularity
size_t padding = ((size >= _memory_span_size) && (_memory_span_size > _memory_map_granularity)) ? _memory_span_size : 0;
assert(size >= _memory_page_size);
#if PLATFORM_WINDOWS
//Ok to MEM_COMMIT - according to MSDN, "actual physical pages are not allocated unless/until the virtual addresses are actually accessed"
void* ptr = VirtualAlloc(0, size + padding, (_memory_huge_pages ? MEM_LARGE_PAGES : 0) | MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
if (!ptr) {
assert(!"Failed to map virtual memory block");
return 0;
}
#else
int flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_UNINITIALIZED;
# if defined(__APPLE__)
void* ptr = mmap(0, size + padding, PROT_READ | PROT_WRITE, flags, (_memory_huge_pages ? VM_FLAGS_SUPERPAGE_SIZE_2MB : -1), 0);
# elif defined(MAP_HUGETLB)
void* ptr = mmap(0, size + padding, PROT_READ | PROT_WRITE, (_memory_huge_pages ? MAP_HUGETLB : 0) | flags, -1, 0);
# else
void* ptr = mmap(0, size + padding, PROT_READ | PROT_WRITE, flags, -1, 0);
# endif
if ((ptr == MAP_FAILED) || !ptr) {
assert("Failed to map virtual memory block" == 0);
return 0;
}
#endif
#if ENABLE_STATISTICS
atomic_add32(&_mapped_pages_os, (int32_t)((size + padding) >> _memory_page_size_shift));
#endif
if (padding) {
size_t final_padding = padding - ((uintptr_t)ptr & ~_memory_span_mask);
assert(final_padding <= _memory_span_size);
assert(final_padding <= padding);
assert(!(final_padding % 8));
ptr = pointer_offset(ptr, final_padding);
*offset = final_padding >> 3;
}
assert((size < _memory_span_size) || !((uintptr_t)ptr & ~_memory_span_mask));
return ptr;
}
//! Unmap pages from virtual memory
static void
_memory_unmap_os(void* address, size_t size, size_t offset, size_t release) {
assert(release || (offset == 0));
assert(!release || (release >= _memory_page_size));
assert(size >= _memory_page_size);
if (release && offset) {
offset <<= 3;
address = pointer_offset(address, -(int32_t)offset);
#if PLATFORM_POSIX
//Padding is always one span size
release += _memory_span_size;
#endif
}
#if !DISABLE_UNMAP
#if PLATFORM_WINDOWS
if (!VirtualFree(address, release ? 0 : size, release ? MEM_RELEASE : MEM_DECOMMIT)) {
assert(!"Failed to unmap virtual memory block");
}
#else
if (release) {
if (munmap(address, release)) {
assert("Failed to unmap virtual memory block" == 0);
}
}
else {
#if defined(POSIX_MADV_FREE)
if (posix_madvise(address, size, POSIX_MADV_FREE))
#endif
if (posix_madvise(address, size, POSIX_MADV_DONTNEED)) {
assert("Failed to madvise virtual memory block as free" == 0);
}
}
#endif
#endif
#if ENABLE_STATISTICS
if (release)
atomic_add32(&_mapped_pages_os, -(int32_t)(release >> _memory_page_size_shift));
#endif
}
// Extern interface
extern inline RPMALLOC_ALLOCATOR void*
rpmalloc(size_t size) {
#if ENABLE_VALIDATE_ARGS
if (size >= MAX_ALLOC_SIZE) {
errno = EINVAL;
return 0;
}
#endif
heap_t* heap = get_thread_heap();
return _memory_allocate(heap, size);
}
extern inline void
rpfree(void* ptr) {
_memory_deallocate(ptr);
}
extern inline RPMALLOC_ALLOCATOR void*
rpcalloc(size_t num, size_t size) {
size_t total;
#if ENABLE_VALIDATE_ARGS
#if PLATFORM_WINDOWS
int err = SizeTMult(num, size, &total);
if ((err != S_OK) || (total >= MAX_ALLOC_SIZE)) {
errno = EINVAL;
return 0;
}
#else
int err = __builtin_umull_overflow(num, size, &total);
if (err || (total >= MAX_ALLOC_SIZE)) {
errno = EINVAL;
return 0;
}
#endif
#else
total = num * size;
#endif
heap_t* heap = get_thread_heap();
void* block = _memory_allocate(heap, total);
memset(block, 0, total);
return block;
}
extern inline RPMALLOC_ALLOCATOR void*
rprealloc(void* ptr, size_t size) {
#if ENABLE_VALIDATE_ARGS
if (size >= MAX_ALLOC_SIZE) {
errno = EINVAL;
return ptr;
}
#endif
return _memory_reallocate(ptr, size, 0, 0);
}
extern RPMALLOC_ALLOCATOR void*
rpaligned_realloc(void* ptr, size_t alignment, size_t size, size_t oldsize,
unsigned int flags) {
#if ENABLE_VALIDATE_ARGS
if ((size + alignment < size) || (alignment > _memory_page_size)) {
errno = EINVAL;
return 0;
}
#endif
void* block;
if (alignment > 32) {
size_t usablesize = _memory_usable_size(ptr);
if ((usablesize >= size) && (size >= (usablesize / 2)) && !((uintptr_t)ptr & (alignment - 1)))
return ptr;
block = rpaligned_alloc(alignment, size);
if (ptr) {
if (!oldsize)
oldsize = usablesize;
if (!(flags & RPMALLOC_NO_PRESERVE))
memcpy(block, ptr, oldsize < size ? oldsize : size);
rpfree(ptr);
}
//Mark as having aligned blocks
span_t* span = (span_t*)((uintptr_t)block & _memory_span_mask);
span->flags |= SPAN_FLAG_ALIGNED_BLOCKS;
} else {
block = _memory_reallocate(ptr, size, oldsize, flags);
}
return block;
}
extern RPMALLOC_ALLOCATOR void*
rpaligned_alloc(size_t alignment, size_t size) {
if (alignment <= 16)
return rpmalloc(size);
#if ENABLE_VALIDATE_ARGS
if ((size + alignment) < size) {
errno = EINVAL;
return 0;
}
if (alignment & (alignment - 1)) {
errno = EINVAL;
return 0;
}
#endif
void* ptr = 0;
size_t align_mask = alignment - 1;
if (alignment < _memory_page_size) {
ptr = rpmalloc(size + alignment);
if ((uintptr_t)ptr & align_mask)
ptr = (void*)(((uintptr_t)ptr & ~(uintptr_t)align_mask) + alignment);
//Mark as having aligned blocks
span_t* span = (span_t*)((uintptr_t)ptr & _memory_span_mask);
span->flags |= SPAN_FLAG_ALIGNED_BLOCKS;
return ptr;
}
// Fallback to mapping new pages for this request. Since pointers passed
// to rpfree must be able to reach the start of the span by bitmasking of
// the address with the span size, the returned aligned pointer from this
// function must be with a span size of the start of the mapped area.
// In worst case this requires us to loop and map pages until we get a
// suitable memory address. It also means we can never align to span size
// or greater, since the span header will push alignment more than one
// span size away from span start (thus causing pointer mask to give us
// an invalid span start on free)
if (alignment & align_mask) {
errno = EINVAL;
return 0;
}
if (alignment >= _memory_span_size) {
errno = EINVAL;
return 0;
}
size_t extra_pages = alignment / _memory_page_size;
// Since each span has a header, we will at least need one extra memory page
size_t num_pages = 1 + (size / _memory_page_size);
if (size & (_memory_page_size - 1))
++num_pages;
if (extra_pages > num_pages)
num_pages = 1 + extra_pages;
size_t original_pages = num_pages;
size_t limit_pages = (_memory_span_size / _memory_page_size) * 2;
if (limit_pages < (original_pages * 2))
limit_pages = original_pages * 2;
size_t mapped_size, align_offset;
span_t* span;
retry:
align_offset = 0;
mapped_size = num_pages * _memory_page_size;
span = _memory_map(mapped_size, &align_offset);
if (!span) {
errno = ENOMEM;
return 0;
}
ptr = pointer_offset(span, SPAN_HEADER_SIZE);
if ((uintptr_t)ptr & align_mask)
ptr = (void*)(((uintptr_t)ptr & ~(uintptr_t)align_mask) + alignment);
if (((size_t)pointer_diff(ptr, span) >= _memory_span_size) ||
(pointer_offset(ptr, size) > pointer_offset(span, mapped_size)) ||
(((uintptr_t)ptr & _memory_span_mask) != (uintptr_t)span)) {
_memory_unmap(span, mapped_size, align_offset, mapped_size);
++num_pages;
if (num_pages > limit_pages) {
errno = EINVAL;
return 0;
}
goto retry;
}
//Store page count in span_count
span->size_class = (uint32_t)-1;
span->span_count = (uint32_t)num_pages;
span->align_offset = (uint32_t)align_offset;
_memory_statistics_add_peak(&_huge_pages_current, num_pages, _huge_pages_peak);
return ptr;
}
extern inline RPMALLOC_ALLOCATOR void*
rpmemalign(size_t alignment, size_t size) {
return rpaligned_alloc(alignment, size);
}
extern inline int
rpposix_memalign(void **memptr, size_t alignment, size_t size) {
if (memptr)
*memptr = rpaligned_alloc(alignment, size);
else
return EINVAL;
return *memptr ? 0 : ENOMEM;
}
extern inline size_t
rpmalloc_usable_size(void* ptr) {
return (ptr ? _memory_usable_size(ptr) : 0);
}
extern inline void
rpmalloc_thread_collect(void) {
}
void
rpmalloc_thread_statistics(rpmalloc_thread_statistics_t* stats) {
memset(stats, 0, sizeof(rpmalloc_thread_statistics_t));
heap_t* heap = get_thread_heap_raw();
if (!heap)
return;
for (size_t iclass = 0; iclass < SIZE_CLASS_COUNT; ++iclass) {
size_class_t* size_class = _memory_size_class + iclass;
heap_class_t* heap_class = heap->span_class + iclass;
span_t* span = heap_class->partial_span;
while (span) {
atomic_thread_fence_acquire();
size_t free_count = span->list_size;
if (span->state == SPAN_STATE_PARTIAL)
free_count += (size_class->block_count - span->used_count);
stats->sizecache = free_count * size_class->block_size;
span = span->next;
}
}
#if ENABLE_THREAD_CACHE
for (size_t iclass = 0; iclass < LARGE_CLASS_COUNT; ++iclass) {
if (heap->span_cache[iclass])
stats->spancache = (size_t)heap->span_cache[iclass]->list_size * (iclass + 1) * _memory_span_size;
span_t* deferred_list = !iclass ? atomic_load_ptr(&heap->span_cache_deferred) : 0;
//TODO: Incorrect, for deferred lists the size is NOT stored in list_size
if (deferred_list)
stats->spancache = (size_t)deferred_list->list_size * (iclass + 1) * _memory_span_size;
}
#endif
#if ENABLE_STATISTICS
stats->thread_to_global = heap->thread_to_global;
stats->global_to_thread = heap->global_to_thread;
for (size_t iclass = 0; iclass < LARGE_CLASS_COUNT; ++iclass) {
stats->span_use[iclass].current = (size_t)heap->span_use[iclass].current;
stats->span_use[iclass].peak = (size_t)heap->span_use[iclass].high;
stats->span_use[iclass].to_global = (size_t)heap->span_use[iclass].spans_to_global;
stats->span_use[iclass].from_global = (size_t)heap->span_use[iclass].spans_from_global;
stats->span_use[iclass].to_cache = (size_t)heap->span_use[iclass].spans_to_cache;
stats->span_use[iclass].from_cache = (size_t)heap->span_use[iclass].spans_from_cache;
stats->span_use[iclass].to_reserved = (size_t)heap->span_use[iclass].spans_to_reserved;
stats->span_use[iclass].from_reserved = (size_t)heap->span_use[iclass].spans_from_reserved;
stats->span_use[iclass].map_calls = (size_t)heap->span_use[iclass].spans_map_calls;
}
for (size_t iclass = 0; iclass < SIZE_CLASS_COUNT; ++iclass) {
stats->size_use[iclass].alloc_current = (size_t)atomic_load32(&heap->size_class_use[iclass].alloc_current);
stats->size_use[iclass].alloc_peak = (size_t)heap->size_class_use[iclass].alloc_peak;
stats->size_use[iclass].alloc_total = (size_t)heap->size_class_use[iclass].alloc_total;
stats->size_use[iclass].free_total = (size_t)atomic_load32(&heap->size_class_use[iclass].free_total);
stats->size_use[iclass].spans_to_cache = (size_t)heap->size_class_use[iclass].spans_to_cache;
stats->size_use[iclass].spans_from_cache = (size_t)heap->size_class_use[iclass].spans_from_cache;
stats->size_use[iclass].spans_from_reserved = (size_t)heap->size_class_use[iclass].spans_from_reserved;
stats->size_use[iclass].map_calls = (size_t)heap->size_class_use[iclass].spans_map_calls;
}
#endif
}
void
rpmalloc_global_statistics(rpmalloc_global_statistics_t* stats) {
memset(stats, 0, sizeof(rpmalloc_global_statistics_t));
#if ENABLE_STATISTICS
stats->mapped = (size_t)atomic_load32(&_mapped_pages) * _memory_page_size;
stats->mapped_peak = (size_t)_mapped_pages_peak * _memory_page_size;
stats->mapped_total = (size_t)atomic_load32(&_mapped_total) * _memory_page_size;
stats->unmapped_total = (size_t)atomic_load32(&_unmapped_total) * _memory_page_size;
stats->huge_alloc = (size_t)atomic_load32(&_huge_pages_current) * _memory_page_size;
stats->huge_alloc_peak = (size_t)_huge_pages_peak * _memory_page_size;
#endif
#if ENABLE_GLOBAL_CACHE
for (size_t iclass = 0; iclass < LARGE_CLASS_COUNT; ++iclass) {
stats->cached += (size_t)atomic_load32(&_memory_span_cache[iclass].size) * (iclass + 1) * _memory_span_size;
}
#endif
}
void
rpmalloc_dump_statistics(void* file) {
#if ENABLE_STATISTICS
//If you hit this assert, you still have active threads or forgot to finalize some thread(s)
assert(atomic_load32(&_memory_active_heaps) == 0);
for (size_t list_idx = 0; list_idx < HEAP_ARRAY_SIZE; ++list_idx) {
heap_t* heap = atomic_load_ptr(&_memory_heaps[list_idx]);
while (heap) {
fprintf(file, "Heap %d stats:\n", heap->id);
fprintf(file, "Class CurAlloc PeakAlloc TotAlloc TotFree BlkSize BlkCount PeakAllocMiB ToCacheMiB FromCacheMiB FromReserveMiB MmapCalls\n");
for (size_t iclass = 0; iclass < SIZE_CLASS_COUNT; ++iclass) {
if (!heap->size_class_use[iclass].alloc_total) {
assert(!atomic_load32(&heap->size_class_use[iclass].free_total));
assert(!heap->size_class_use[iclass].spans_map_calls);
continue;
}
fprintf(file, "%3u: %10u %10u %10u %10u %8u %8u %13zu %11zu %12zu %14zu %9u\n", (uint32_t)iclass,
atomic_load32(&heap->size_class_use[iclass].alloc_current),
heap->size_class_use[iclass].alloc_peak,
heap->size_class_use[iclass].alloc_total,
atomic_load32(&heap->size_class_use[iclass].free_total),
_memory_size_class[iclass].block_size,
_memory_size_class[iclass].block_count,
((size_t)heap->size_class_use[iclass].alloc_peak * (size_t)_memory_size_class[iclass].block_size) / (size_t)(1024 * 1024),
((size_t)heap->size_class_use[iclass].spans_to_cache * _memory_span_size) / (size_t)(1024 * 1024),
((size_t)heap->size_class_use[iclass].spans_from_cache * _memory_span_size) / (size_t)(1024 * 1024),
((size_t)heap->size_class_use[iclass].spans_from_reserved * _memory_span_size) / (size_t)(1024 * 1024),
heap->size_class_use[iclass].spans_map_calls);
}
fprintf(file, "Spans Current Peak PeakMiB Cached ToCacheMiB FromCacheMiB ToReserveMiB FromReserveMiB ToGlobalMiB FromGlobalMiB MmapCalls\n");
for (size_t iclass = 0; iclass < LARGE_CLASS_COUNT; ++iclass) {
if (!heap->span_use[iclass].high && !heap->span_use[iclass].spans_map_calls)
continue;
fprintf(file, "%4u: %8u %8u %8zu %7u %11zu %12zu %12zu %14zu %11zu %13zu %10u\n", (uint32_t)(iclass + 1),
heap->span_use[iclass].current,
heap->span_use[iclass].high,
((size_t)heap->span_use[iclass].high * (size_t)_memory_span_size * (iclass + 1)) / (size_t)(1024 * 1024),
heap->span_cache[iclass] ? heap->span_cache[iclass]->list_size : 0,
((size_t)heap->span_use[iclass].spans_to_cache * (iclass + 1) * _memory_span_size) / (size_t)(1024 * 1024),
((size_t)heap->span_use[iclass].spans_from_cache * (iclass + 1) * _memory_span_size) / (size_t)(1024 * 1024),
((size_t)heap->span_use[iclass].spans_to_reserved * (iclass + 1) * _memory_span_size) / (size_t)(1024 * 1024),
((size_t)heap->span_use[iclass].spans_from_reserved * (iclass + 1) * _memory_span_size) / (size_t)(1024 * 1024),
((size_t)heap->span_use[iclass].spans_to_global * (size_t)_memory_span_size * (iclass + 1)) / (size_t)(1024 * 1024),
((size_t)heap->span_use[iclass].spans_from_global * (size_t)_memory_span_size * (iclass + 1)) / (size_t)(1024 * 1024),
heap->span_use[iclass].spans_map_calls);
}
fprintf(file, "ThreadToGlobalMiB GlobalToThreadMiB\n");
fprintf(file, "%17zu %17zu\n", (size_t)heap->thread_to_global / (size_t)(1024 * 1024), (size_t)heap->global_to_thread / (size_t)(1024 * 1024));
heap = heap->next_heap;
}
}
size_t huge_current = (size_t)atomic_load32(&_huge_pages_current) * _memory_page_size;
size_t huge_peak = (size_t)_huge_pages_peak * _memory_page_size;
fprintf(file, "HugeCurrentMiB HugePeakMiB\n");
fprintf(file, "%14zu %11zu\n", huge_current / (size_t)(1024 * 1024), huge_peak / (size_t)(1024 * 1024));
size_t mapped = (size_t)atomic_load32(&_mapped_pages) * _memory_page_size;
size_t mapped_os = (size_t)atomic_load32(&_mapped_pages_os) * _memory_page_size;
size_t mapped_peak = (size_t)_mapped_pages_peak * _memory_page_size;
size_t mapped_total = (size_t)atomic_load32(&_mapped_total) * _memory_page_size;
size_t unmapped_total = (size_t)atomic_load32(&_unmapped_total) * _memory_page_size;
size_t reserved_total = (size_t)atomic_load32(&_reserved_spans) * _memory_span_size;
fprintf(file, "MappedMiB MappedOSMiB MappedPeakMiB MappedTotalMiB UnmappedTotalMiB ReservedTotalMiB\n");
fprintf(file, "%9zu %11zu %13zu %14zu %16zu %16zu\n",
mapped / (size_t)(1024 * 1024),
mapped_os / (size_t)(1024 * 1024),
mapped_peak / (size_t)(1024 * 1024),
mapped_total / (size_t)(1024 * 1024),
unmapped_total / (size_t)(1024 * 1024),
reserved_total / (size_t)(1024 * 1024));
fprintf(file, "\n");
#else
(void)sizeof(file);
#endif
}
#if ENABLE_PRELOAD || ENABLE_OVERRIDE
#include "malloc.c"
#endif
/* rpmalloc.h - Memory allocator - Public Domain - 2016 Mattias Jansson
*
* This library provides a cross-platform lock free thread caching malloc implementation in C11.
* The latest source code is always available at
*
* https://github.com/mjansson/rpmalloc
*
* This library is put in the public domain; you can redistribute it and/or modify it without any restrictions.
*
*/
#pragma once
#include <stddef.h>
#ifdef __cplusplus
extern "C" {
#endif
#if defined(__clang__) || defined(__GNUC__)
# define RPMALLOC_EXPORT __attribute__((visibility("default")))
# define RPMALLOC_ALLOCATOR
# define RPMALLOC_ATTRIB_MALLOC __attribute__((__malloc__))
# if defined(__clang_major__) && (__clang_major__ < 4)
# define RPMALLOC_ATTRIB_ALLOC_SIZE(size)
# define RPMALLOC_ATTRIB_ALLOC_SIZE2(count, size)
# else
# define RPMALLOC_ATTRIB_ALLOC_SIZE(size) __attribute__((alloc_size(size)))
# define RPMALLOC_ATTRIB_ALLOC_SIZE2(count, size) __attribute__((alloc_size(count, size)))
# endif
# define RPMALLOC_CDECL
#elif defined(_MSC_VER)
# define RPMALLOC_EXPORT
# define RPMALLOC_ALLOCATOR __declspec(allocator) __declspec(restrict)
# define RPMALLOC_ATTRIB_MALLOC
# define RPMALLOC_ATTRIB_ALLOC_SIZE(size)
# define RPMALLOC_ATTRIB_ALLOC_SIZE2(count,size)
# define RPMALLOC_CDECL __cdecl
#else
# define RPMALLOC_EXPORT
# define RPMALLOC_ALLOCATOR
# define RPMALLOC_ATTRIB_MALLOC
# define RPMALLOC_ATTRIB_ALLOC_SIZE(size)
# define RPMALLOC_ATTRIB_ALLOC_SIZE2(count,size)
# define RPMALLOC_CDECL
#endif
//! Define RPMALLOC_CONFIGURABLE to enable configuring sizes
#ifndef RPMALLOC_CONFIGURABLE
#define RPMALLOC_CONFIGURABLE 0
#endif
//! Flag to rpaligned_realloc to not preserve content in reallocation
#define RPMALLOC_NO_PRESERVE 1
typedef struct rpmalloc_global_statistics_t {
//! Current amount of virtual memory mapped, all of which might not have been committed (only if ENABLE_STATISTICS=1)
size_t mapped;
//! Peak amount of virtual memory mapped, all of which might not have been committed (only if ENABLE_STATISTICS=1)
size_t mapped_peak;
//! Current amount of memory in global caches for small and medium sizes (<32KiB)
size_t cached;
//! Current amount of memory allocated in huge allocations, i.e larger than LARGE_SIZE_LIMIT which is 2MiB by default (only if ENABLE_STATISTICS=1)
size_t huge_alloc;
//! Peak amount of memory allocated in huge allocations, i.e larger than LARGE_SIZE_LIMIT which is 2MiB by default (only if ENABLE_STATISTICS=1)
size_t huge_alloc_peak;
//! Total amount of memory mapped since initialization (only if ENABLE_STATISTICS=1)
size_t mapped_total;
//! Total amount of memory unmapped since initialization (only if ENABLE_STATISTICS=1)
size_t unmapped_total;
} rpmalloc_global_statistics_t;
typedef struct rpmalloc_thread_statistics_t {
//! Current number of bytes available in thread size class caches for small and medium sizes (<32KiB)
size_t sizecache;
//! Current number of bytes available in thread span caches for small and medium sizes (<32KiB)
size_t spancache;
//! Total number of bytes transitioned from thread cache to global cache (only if ENABLE_STATISTICS=1)
size_t thread_to_global;
//! Total number of bytes transitioned from global cache to thread cache (only if ENABLE_STATISTICS=1)
size_t global_to_thread;
//! Per span count statistics (only if ENABLE_STATISTICS=1)
struct {
//! Currently used number of spans
size_t current;
//! High water mark of spans used
size_t peak;
//! Number of spans transitioned to global cache
size_t to_global;
//! Number of spans transitioned from global cache
size_t from_global;
//! Number of spans transitioned to thread cache
size_t to_cache;
//! Number of spans transitioned from thread cache
size_t from_cache;
//! Number of spans transitioned to reserved state
size_t to_reserved;
//! Number of spans transitioned from reserved state
size_t from_reserved;
//! Number of raw memory map calls (not hitting the reserve spans but resulting in actual OS mmap calls)
size_t map_calls;
} span_use[32];
//! Per size class statistics (only if ENABLE_STATISTICS=1)
struct {
//! Current number of allocations
size_t alloc_current;
//! Peak number of allocations
size_t alloc_peak;
//! Total number of allocations
size_t alloc_total;
//! Total number of frees
size_t free_total;
//! Number of spans transitioned to cache
size_t spans_to_cache;
//! Number of spans transitioned from cache
size_t spans_from_cache;
//! Number of spans transitioned from reserved state
size_t spans_from_reserved;
//! Number of raw memory map calls (not hitting the reserve spans but resulting in actual OS mmap calls)
size_t map_calls;
} size_use[128];
} rpmalloc_thread_statistics_t;
typedef struct rpmalloc_config_t {
//! Map memory pages for the given number of bytes. The returned address MUST be
// aligned to the rpmalloc span size, which will always be a power of two.
// Optionally the function can store an alignment offset in the offset variable
// in case it performs alignment and the returned pointer is offset from the
// actual start of the memory region due to this alignment. The alignment offset
// will be passed to the memory unmap function. The alignment offset MUST NOT be
// larger than 65535 (storable in an uint16_t), if it is you must use natural
// alignment to shift it into 16 bits. If you set a memory_map function, you
// must also set a memory_unmap function or else the default implementation will
// be used for both.
void* (*memory_map)(size_t size, size_t* offset);
//! Unmap the memory pages starting at address and spanning the given number of bytes.
// If release is set to non-zero, the unmap is for an entire span range as returned by
// a previous call to memory_map and that the entire range should be released. The
// release argument holds the size of the entire span range. If release is set to 0,
// the unmap is a partial decommit of a subset of the mapped memory range.
// If you set a memory_unmap function, you must also set a memory_map function or
// else the default implementation will be used for both.
void (*memory_unmap)(void* address, size_t size, size_t offset, size_t release);
//! Size of memory pages. The page size MUST be a power of two. All memory mapping
// requests to memory_map will be made with size set to a multiple of the page size.
// Used if RPMALLOC_CONFIGURABLE is defined to 1, otherwise system page size is used.
size_t page_size;
//! Size of a span of memory blocks. MUST be a power of two, and in [4096,262144]
// range (unless 0 - set to 0 to use the default span size). Used if RPMALLOC_CONFIGURABLE
// is defined to 1.
size_t span_size;
//! Number of spans to map at each request to map new virtual memory blocks. This can
// be used to minimize the system call overhead at the cost of virtual memory address
// space. The extra mapped pages will not be written until actually used, so physical
// committed memory should not be affected in the default implementation. Will be
// aligned to a multiple of spans that match memory page size in case of huge pages.
size_t span_map_count;
//! Enable use of large/huge pages. If this flag is set to non-zero and page size is
// zero, the allocator will try to enable huge pages and auto detect the configuration.
// If this is set to non-zero and page_size is also non-zero, the allocator will
// assume huge pages have been configured and enabled prior to initializing the
// allocator.
// For Windows, see https://docs.microsoft.com/en-us/windows/desktop/memory/large-page-support
// For Linux, see https://www.kernel.org/doc/Documentation/vm/hugetlbpage.txt
int enable_huge_pages;
} rpmalloc_config_t;
//! Initialize allocator with default configuration
RPMALLOC_EXPORT int
rpmalloc_initialize(void);
//! Initialize allocator with given configuration
RPMALLOC_EXPORT int
rpmalloc_initialize_config(const rpmalloc_config_t* config);
//! Get allocator configuration
RPMALLOC_EXPORT const rpmalloc_config_t*
rpmalloc_config(void);
//! Finalize allocator
RPMALLOC_EXPORT void
rpmalloc_finalize(void);
//! Initialize allocator for calling thread
RPMALLOC_EXPORT void
rpmalloc_thread_initialize(void);
//! Finalize allocator for calling thread
RPMALLOC_EXPORT void
rpmalloc_thread_finalize(void);
//! Perform deferred deallocations pending for the calling thread heap
RPMALLOC_EXPORT void
rpmalloc_thread_collect(void);
//! Query if allocator is initialized for calling thread
RPMALLOC_EXPORT int
rpmalloc_is_thread_initialized(void);
//! Get per-thread statistics
RPMALLOC_EXPORT void
rpmalloc_thread_statistics(rpmalloc_thread_statistics_t* stats);
//! Get global statistics
RPMALLOC_EXPORT void
rpmalloc_global_statistics(rpmalloc_global_statistics_t* stats);
//! Dump all statistics in human readable format to file (should be a FILE*)
RPMALLOC_EXPORT void
rpmalloc_dump_statistics(void* file);
//! Allocate a memory block of at least the given size
RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void*
rpmalloc(size_t size) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE(1);
//! Free the given memory block
RPMALLOC_EXPORT void
rpfree(void* ptr);
//! Allocate a memory block of at least the given size and zero initialize it
RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void*
rpcalloc(size_t num, size_t size) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE2(1, 2);
//! Reallocate the given block to at least the given size
RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void*
rprealloc(void* ptr, size_t size) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE(2);
//! Reallocate the given block to at least the given size and alignment,
// with optional control flags (see RPMALLOC_NO_PRESERVE).
// Alignment must be a power of two and a multiple of sizeof(void*),
// and should ideally be less than memory page size. A caveat of rpmalloc
// internals is that this must also be strictly less than the span size (default 64KiB)
RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void*
rpaligned_realloc(void* ptr, size_t alignment, size_t size, size_t oldsize, unsigned int flags) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE(3);
//! Allocate a memory block of at least the given size and alignment.
// Alignment must be a power of two and a multiple of sizeof(void*),
// and should ideally be less than memory page size. A caveat of rpmalloc
// internals is that this must also be strictly less than the span size (default 64KiB)
RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void*
rpaligned_alloc(size_t alignment, size_t size) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE(2);
//! Allocate a memory block of at least the given size and alignment.
// Alignment must be a power of two and a multiple of sizeof(void*),
// and should ideally be less than memory page size. A caveat of rpmalloc
// internals is that this must also be strictly less than the span size (default 64KiB)
RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void*
rpmemalign(size_t alignment, size_t size) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE(2);
//! Allocate a memory block of at least the given size and alignment.
// Alignment must be a power of two and a multiple of sizeof(void*),
// and should ideally be less than memory page size. A caveat of rpmalloc
// internals is that this must also be strictly less than the span size (default 64KiB)
RPMALLOC_EXPORT int
rpposix_memalign(void **memptr, size_t alignment, size_t size);
//! Query the usable size of the given memory block (from given pointer to the end of block)
RPMALLOC_EXPORT size_t
rpmalloc_usable_size(void* ptr);
#ifdef __cplusplus
}
#endif
......@@ -2,7 +2,7 @@
* Authors:
* Anatolii Kurotych <akurotych@gmail.com>
* DeM Labs Inc. https://demlabs.net
* DeM Labs Open source community https://github.com/demlabsinc
* DeM Labs Open source community https://gitlab.demlabs.net/cellframe
* Copyright (c) 2017-2019
* All rights reserved.
......