Remove the opt.lg_prof_bt_max option.
Remove opt.lg_prof_bt_max, and hard code it to 7. The original intention of this option was to enable faster backtracing by limiting backtrace depth. However, this makes graphical pprof output very difficult to interpret. In practice, decreasing sampling frequency is a better mechanism for limiting profiling overhead.
This commit is contained in:
parent
0b526ff94d
commit
5389146191
@ -930,8 +930,6 @@ malloc_conf = "xmalloc:true";]]></programlisting>
|
|||||||
where <literal><prefix></literal> is controlled by the <link
|
where <literal><prefix></literal> is controlled by the <link
|
||||||
linkend="opt.prof_prefix"><mallctl>opt.prof_prefix</mallctl></link>
|
linkend="opt.prof_prefix"><mallctl>opt.prof_prefix</mallctl></link>
|
||||||
option. See the <link
|
option. See the <link
|
||||||
linkend="opt.lg_prof_bt_max"><mallctl>opt.lg_prof_bt_max</mallctl></link>
|
|
||||||
option for backtrace depth control. See the <link
|
|
||||||
linkend="opt.prof_active"><mallctl>opt.prof_active</mallctl></link>
|
linkend="opt.prof_active"><mallctl>opt.prof_active</mallctl></link>
|
||||||
option for on-the-fly activation/deactivation. See the <link
|
option for on-the-fly activation/deactivation. See the <link
|
||||||
linkend="opt.lg_prof_sample"><mallctl>opt.lg_prof_sample</mallctl></link>
|
linkend="opt.lg_prof_sample"><mallctl>opt.lg_prof_sample</mallctl></link>
|
||||||
@ -962,17 +960,6 @@ malloc_conf = "xmalloc:true";]]></programlisting>
|
|||||||
<filename>jeprof</filename>.</para></listitem>
|
<filename>jeprof</filename>.</para></listitem>
|
||||||
</varlistentry>
|
</varlistentry>
|
||||||
|
|
||||||
<varlistentry id="opt.lg_prof_bt_max">
|
|
||||||
<term>
|
|
||||||
<mallctl>opt.lg_prof_bt_max</mallctl>
|
|
||||||
(<type>size_t</type>)
|
|
||||||
<literal>r-</literal>
|
|
||||||
[<option>--enable-prof</option>]
|
|
||||||
</term>
|
|
||||||
<listitem><para>Maximum backtrace depth (log base 2) when profiling
|
|
||||||
memory allocation activity. The default is 128 (2^7).</para></listitem>
|
|
||||||
</varlistentry>
|
|
||||||
|
|
||||||
<varlistentry id="opt.prof_active">
|
<varlistentry id="opt.prof_active">
|
||||||
<term>
|
<term>
|
||||||
<mallctl>opt.prof_active</mallctl>
|
<mallctl>opt.prof_active</mallctl>
|
||||||
@ -1067,9 +1054,6 @@ malloc_conf = "xmalloc:true";]]></programlisting>
|
|||||||
<citerefentry><refentrytitle>atexit</refentrytitle>
|
<citerefentry><refentrytitle>atexit</refentrytitle>
|
||||||
<manvolnum>3</manvolnum></citerefentry> function to report memory leaks
|
<manvolnum>3</manvolnum></citerefentry> function to report memory leaks
|
||||||
detected by allocation sampling. See the
|
detected by allocation sampling. See the
|
||||||
<link
|
|
||||||
linkend="opt.lg_prof_bt_max"><mallctl>opt.lg_prof_bt_max</mallctl></link>
|
|
||||||
option for backtrace depth control. See the
|
|
||||||
<link linkend="opt.prof"><mallctl>opt.prof</mallctl></link> option for
|
<link linkend="opt.prof"><mallctl>opt.prof</mallctl></link> option for
|
||||||
information on analyzing heap profile output. This option is disabled
|
information on analyzing heap profile output. This option is disabled
|
||||||
by default.</para></listitem>
|
by default.</para></listitem>
|
||||||
|
@ -9,25 +9,19 @@ typedef struct prof_tdata_s prof_tdata_t;
|
|||||||
|
|
||||||
/* Option defaults. */
|
/* Option defaults. */
|
||||||
#define PROF_PREFIX_DEFAULT "jeprof"
|
#define PROF_PREFIX_DEFAULT "jeprof"
|
||||||
#define LG_PROF_BT_MAX_DEFAULT 7
|
|
||||||
#define LG_PROF_SAMPLE_DEFAULT 0
|
#define LG_PROF_SAMPLE_DEFAULT 0
|
||||||
#define LG_PROF_INTERVAL_DEFAULT -1
|
#define LG_PROF_INTERVAL_DEFAULT -1
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Hard limit on stack backtrace depth. The version of prof_backtrace() that
|
||||||
|
* is based on __builtin_return_address() necessarily has a hard-coded number
|
||||||
|
* of backtrace frame handlers, and should be kept in sync with this setting.
|
||||||
|
*/
|
||||||
|
#define PROF_BT_MAX 128
|
||||||
|
|
||||||
/* Maximum number of backtraces to store in each per thread LRU cache. */
|
/* Maximum number of backtraces to store in each per thread LRU cache. */
|
||||||
#define PROF_TCMAX 1024
|
#define PROF_TCMAX 1024
|
||||||
|
|
||||||
/*
|
|
||||||
* Hard limit on stack backtrace depth. Note that the version of
|
|
||||||
* prof_backtrace() that is based on __builtin_return_address() necessarily has
|
|
||||||
* a hard-coded number of backtrace frame handlers.
|
|
||||||
*/
|
|
||||||
#if (defined(JEMALLOC_PROF_LIBGCC) || defined(JEMALLOC_PROF_LIBUNWIND))
|
|
||||||
# define LG_PROF_BT_MAX ((ZU(1) << (LG_SIZEOF_PTR+3)) - 1)
|
|
||||||
#else
|
|
||||||
# define LG_PROF_BT_MAX 7 /* >= LG_PROF_BT_MAX_DEFAULT */
|
|
||||||
#endif
|
|
||||||
#define PROF_BT_MAX (1U << LG_PROF_BT_MAX)
|
|
||||||
|
|
||||||
/* Initial hash table size. */
|
/* Initial hash table size. */
|
||||||
#define PROF_CKH_MINITEMS 64
|
#define PROF_CKH_MINITEMS 64
|
||||||
|
|
||||||
@ -163,7 +157,6 @@ extern bool opt_prof;
|
|||||||
* to notice state changes.
|
* to notice state changes.
|
||||||
*/
|
*/
|
||||||
extern bool opt_prof_active;
|
extern bool opt_prof_active;
|
||||||
extern size_t opt_lg_prof_bt_max; /* Maximum backtrace depth. */
|
|
||||||
extern size_t opt_lg_prof_sample; /* Mean bytes between samples. */
|
extern size_t opt_lg_prof_sample; /* Mean bytes between samples. */
|
||||||
extern ssize_t opt_lg_prof_interval; /* lg(prof_interval). */
|
extern ssize_t opt_lg_prof_interval; /* lg(prof_interval). */
|
||||||
extern bool opt_prof_gdump; /* High-water memory dumping. */
|
extern bool opt_prof_gdump; /* High-water memory dumping. */
|
||||||
@ -186,9 +179,6 @@ extern uint64_t prof_interval;
|
|||||||
*/
|
*/
|
||||||
extern bool prof_promote;
|
extern bool prof_promote;
|
||||||
|
|
||||||
/* (1U << opt_lg_prof_bt_max). */
|
|
||||||
extern unsigned prof_bt_max;
|
|
||||||
|
|
||||||
/* Thread-specific backtrace cache, used to reduce bt2ctx contention. */
|
/* Thread-specific backtrace cache, used to reduce bt2ctx contention. */
|
||||||
#ifndef NO_TLS
|
#ifndef NO_TLS
|
||||||
extern __thread prof_tdata_t *prof_tdata_tls
|
extern __thread prof_tdata_t *prof_tdata_tls
|
||||||
@ -213,7 +203,7 @@ extern __thread prof_tdata_t *prof_tdata_tls
|
|||||||
extern pthread_key_t prof_tdata_tsd;
|
extern pthread_key_t prof_tdata_tsd;
|
||||||
|
|
||||||
void bt_init(prof_bt_t *bt, void **vec);
|
void bt_init(prof_bt_t *bt, void **vec);
|
||||||
void prof_backtrace(prof_bt_t *bt, unsigned nignore, unsigned max);
|
void prof_backtrace(prof_bt_t *bt, unsigned nignore);
|
||||||
prof_thr_cnt_t *prof_lookup(prof_bt_t *bt);
|
prof_thr_cnt_t *prof_lookup(prof_bt_t *bt);
|
||||||
void prof_idump(void);
|
void prof_idump(void);
|
||||||
bool prof_mdump(const char *filename);
|
bool prof_mdump(const char *filename);
|
||||||
@ -249,7 +239,7 @@ bool prof_boot2(void);
|
|||||||
/* Don't bother with sampling logic, since sampling */\
|
/* Don't bother with sampling logic, since sampling */\
|
||||||
/* interval is 1. */\
|
/* interval is 1. */\
|
||||||
bt_init(&bt, prof_tdata->vec); \
|
bt_init(&bt, prof_tdata->vec); \
|
||||||
prof_backtrace(&bt, nignore, prof_bt_max); \
|
prof_backtrace(&bt, nignore); \
|
||||||
ret = prof_lookup(&bt); \
|
ret = prof_lookup(&bt); \
|
||||||
} else { \
|
} else { \
|
||||||
if (prof_tdata->threshold == 0) { \
|
if (prof_tdata->threshold == 0) { \
|
||||||
@ -272,7 +262,7 @@ bool prof_boot2(void);
|
|||||||
if (size >= prof_tdata->threshold - \
|
if (size >= prof_tdata->threshold - \
|
||||||
prof_tdata->accum) { \
|
prof_tdata->accum) { \
|
||||||
bt_init(&bt, prof_tdata->vec); \
|
bt_init(&bt, prof_tdata->vec); \
|
||||||
prof_backtrace(&bt, nignore, prof_bt_max); \
|
prof_backtrace(&bt, nignore); \
|
||||||
ret = prof_lookup(&bt); \
|
ret = prof_lookup(&bt); \
|
||||||
} else \
|
} else \
|
||||||
ret = (prof_thr_cnt_t *)(uintptr_t)1U; \
|
ret = (prof_thr_cnt_t *)(uintptr_t)1U; \
|
||||||
|
@ -74,7 +74,6 @@ CTL_PROTO(opt_lg_tcache_gc_sweep)
|
|||||||
CTL_PROTO(opt_prof)
|
CTL_PROTO(opt_prof)
|
||||||
CTL_PROTO(opt_prof_prefix)
|
CTL_PROTO(opt_prof_prefix)
|
||||||
CTL_PROTO(opt_prof_active)
|
CTL_PROTO(opt_prof_active)
|
||||||
CTL_PROTO(opt_lg_prof_bt_max)
|
|
||||||
CTL_PROTO(opt_lg_prof_sample)
|
CTL_PROTO(opt_lg_prof_sample)
|
||||||
CTL_PROTO(opt_lg_prof_interval)
|
CTL_PROTO(opt_lg_prof_interval)
|
||||||
CTL_PROTO(opt_prof_gdump)
|
CTL_PROTO(opt_prof_gdump)
|
||||||
@ -216,7 +215,6 @@ static const ctl_node_t opt_node[] = {
|
|||||||
{NAME("prof"), CTL(opt_prof)},
|
{NAME("prof"), CTL(opt_prof)},
|
||||||
{NAME("prof_prefix"), CTL(opt_prof_prefix)},
|
{NAME("prof_prefix"), CTL(opt_prof_prefix)},
|
||||||
{NAME("prof_active"), CTL(opt_prof_active)},
|
{NAME("prof_active"), CTL(opt_prof_active)},
|
||||||
{NAME("lg_prof_bt_max"), CTL(opt_lg_prof_bt_max)},
|
|
||||||
{NAME("lg_prof_sample"), CTL(opt_lg_prof_sample)},
|
{NAME("lg_prof_sample"), CTL(opt_lg_prof_sample)},
|
||||||
{NAME("lg_prof_interval"), CTL(opt_lg_prof_interval)},
|
{NAME("lg_prof_interval"), CTL(opt_lg_prof_interval)},
|
||||||
{NAME("prof_gdump"), CTL(opt_prof_gdump)},
|
{NAME("prof_gdump"), CTL(opt_prof_gdump)},
|
||||||
@ -1125,7 +1123,6 @@ CTL_RO_NL_CGEN(config_tcache, opt_lg_tcache_gc_sweep, opt_lg_tcache_gc_sweep,
|
|||||||
CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool)
|
CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool)
|
||||||
CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *)
|
CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *)
|
||||||
CTL_RO_CGEN(config_prof, opt_prof_active, opt_prof_active, bool) /* Mutable. */
|
CTL_RO_CGEN(config_prof, opt_prof_active, opt_prof_active, bool) /* Mutable. */
|
||||||
CTL_RO_NL_CGEN(config_prof, opt_lg_prof_bt_max, opt_lg_prof_bt_max, size_t)
|
|
||||||
CTL_RO_NL_CGEN(config_prof, opt_lg_prof_sample, opt_lg_prof_sample, size_t)
|
CTL_RO_NL_CGEN(config_prof, opt_lg_prof_sample, opt_lg_prof_sample, size_t)
|
||||||
CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t)
|
CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t)
|
||||||
CTL_RO_NL_CGEN(config_prof, opt_prof_gdump, opt_prof_gdump, bool)
|
CTL_RO_NL_CGEN(config_prof, opt_prof_gdump, opt_prof_gdump, bool)
|
||||||
|
@ -597,8 +597,6 @@ malloc_conf_init(void)
|
|||||||
if (config_prof) {
|
if (config_prof) {
|
||||||
CONF_HANDLE_BOOL(prof)
|
CONF_HANDLE_BOOL(prof)
|
||||||
CONF_HANDLE_CHAR_P(prof_prefix, "jeprof")
|
CONF_HANDLE_CHAR_P(prof_prefix, "jeprof")
|
||||||
CONF_HANDLE_SIZE_T(lg_prof_bt_max, 0,
|
|
||||||
LG_PROF_BT_MAX)
|
|
||||||
CONF_HANDLE_BOOL(prof_active)
|
CONF_HANDLE_BOOL(prof_active)
|
||||||
CONF_HANDLE_SSIZE_T(lg_prof_sample, 0,
|
CONF_HANDLE_SSIZE_T(lg_prof_sample, 0,
|
||||||
(sizeof(uint64_t) << 3) - 1)
|
(sizeof(uint64_t) << 3) - 1)
|
||||||
|
22
src/prof.c
22
src/prof.c
@ -16,7 +16,6 @@
|
|||||||
|
|
||||||
bool opt_prof = false;
|
bool opt_prof = false;
|
||||||
bool opt_prof_active = true;
|
bool opt_prof_active = true;
|
||||||
size_t opt_lg_prof_bt_max = LG_PROF_BT_MAX_DEFAULT;
|
|
||||||
size_t opt_lg_prof_sample = LG_PROF_SAMPLE_DEFAULT;
|
size_t opt_lg_prof_sample = LG_PROF_SAMPLE_DEFAULT;
|
||||||
ssize_t opt_lg_prof_interval = LG_PROF_INTERVAL_DEFAULT;
|
ssize_t opt_lg_prof_interval = LG_PROF_INTERVAL_DEFAULT;
|
||||||
bool opt_prof_gdump = false;
|
bool opt_prof_gdump = false;
|
||||||
@ -27,8 +26,6 @@ char opt_prof_prefix[PATH_MAX + 1];
|
|||||||
uint64_t prof_interval;
|
uint64_t prof_interval;
|
||||||
bool prof_promote;
|
bool prof_promote;
|
||||||
|
|
||||||
unsigned prof_bt_max;
|
|
||||||
|
|
||||||
#ifndef NO_TLS
|
#ifndef NO_TLS
|
||||||
__thread prof_tdata_t *prof_tdata_tls
|
__thread prof_tdata_t *prof_tdata_tls
|
||||||
JEMALLOC_ATTR(tls_model("initial-exec"));
|
JEMALLOC_ATTR(tls_model("initial-exec"));
|
||||||
@ -179,7 +176,7 @@ prof_leave(void)
|
|||||||
|
|
||||||
#ifdef JEMALLOC_PROF_LIBUNWIND
|
#ifdef JEMALLOC_PROF_LIBUNWIND
|
||||||
void
|
void
|
||||||
prof_backtrace(prof_bt_t *bt, unsigned nignore, unsigned max)
|
prof_backtrace(prof_bt_t *bt, unsigned nignore)
|
||||||
{
|
{
|
||||||
unw_context_t uc;
|
unw_context_t uc;
|
||||||
unw_cursor_t cursor;
|
unw_cursor_t cursor;
|
||||||
@ -189,7 +186,6 @@ prof_backtrace(prof_bt_t *bt, unsigned nignore, unsigned max)
|
|||||||
cassert(config_prof);
|
cassert(config_prof);
|
||||||
assert(bt->len == 0);
|
assert(bt->len == 0);
|
||||||
assert(bt->vec != NULL);
|
assert(bt->vec != NULL);
|
||||||
assert(max <= (1U << opt_lg_prof_bt_max));
|
|
||||||
|
|
||||||
unw_getcontext(&uc);
|
unw_getcontext(&uc);
|
||||||
unw_init_local(&cursor, &uc);
|
unw_init_local(&cursor, &uc);
|
||||||
@ -205,7 +201,7 @@ prof_backtrace(prof_bt_t *bt, unsigned nignore, unsigned max)
|
|||||||
* Iterate over stack frames until there are no more, or until no space
|
* Iterate over stack frames until there are no more, or until no space
|
||||||
* remains in bt.
|
* remains in bt.
|
||||||
*/
|
*/
|
||||||
for (i = 0; i < max; i++) {
|
for (i = 0; i < PROF_BT_MAX; i++) {
|
||||||
unw_get_reg(&cursor, UNW_REG_IP, (unw_word_t *)&bt->vec[i]);
|
unw_get_reg(&cursor, UNW_REG_IP, (unw_word_t *)&bt->vec[i]);
|
||||||
bt->len++;
|
bt->len++;
|
||||||
err = unw_step(&cursor);
|
err = unw_step(&cursor);
|
||||||
@ -243,9 +239,9 @@ prof_unwind_callback(struct _Unwind_Context *context, void *arg)
|
|||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
prof_backtrace(prof_bt_t *bt, unsigned nignore, unsigned max)
|
prof_backtrace(prof_bt_t *bt, unsigned nignore)
|
||||||
{
|
{
|
||||||
prof_unwind_data_t data = {bt, nignore, max};
|
prof_unwind_data_t data = {bt, nignore, PROF_BT_MAX};
|
||||||
|
|
||||||
cassert(config_prof);
|
cassert(config_prof);
|
||||||
|
|
||||||
@ -253,10 +249,10 @@ prof_backtrace(prof_bt_t *bt, unsigned nignore, unsigned max)
|
|||||||
}
|
}
|
||||||
#elif (defined(JEMALLOC_PROF_GCC))
|
#elif (defined(JEMALLOC_PROF_GCC))
|
||||||
void
|
void
|
||||||
prof_backtrace(prof_bt_t *bt, unsigned nignore, unsigned max)
|
prof_backtrace(prof_bt_t *bt, unsigned nignore)
|
||||||
{
|
{
|
||||||
#define BT_FRAME(i) \
|
#define BT_FRAME(i) \
|
||||||
if ((i) < nignore + max) { \
|
if ((i) < nignore + PROF_BT_MAX) { \
|
||||||
void *p; \
|
void *p; \
|
||||||
if (__builtin_frame_address(i) == 0) \
|
if (__builtin_frame_address(i) == 0) \
|
||||||
return; \
|
return; \
|
||||||
@ -272,7 +268,6 @@ prof_backtrace(prof_bt_t *bt, unsigned nignore, unsigned max)
|
|||||||
|
|
||||||
cassert(config_prof);
|
cassert(config_prof);
|
||||||
assert(nignore <= 3);
|
assert(nignore <= 3);
|
||||||
assert(max <= (1U << opt_lg_prof_bt_max));
|
|
||||||
|
|
||||||
BT_FRAME(0)
|
BT_FRAME(0)
|
||||||
BT_FRAME(1)
|
BT_FRAME(1)
|
||||||
@ -423,7 +418,7 @@ prof_backtrace(prof_bt_t *bt, unsigned nignore, unsigned max)
|
|||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
void
|
void
|
||||||
prof_backtrace(prof_bt_t *bt, unsigned nignore, unsigned max)
|
prof_backtrace(prof_bt_t *bt, unsigned nignore)
|
||||||
{
|
{
|
||||||
|
|
||||||
cassert(config_prof);
|
cassert(config_prof);
|
||||||
@ -1168,7 +1163,7 @@ prof_tdata_init(void)
|
|||||||
}
|
}
|
||||||
ql_new(&prof_tdata->lru_ql);
|
ql_new(&prof_tdata->lru_ql);
|
||||||
|
|
||||||
prof_tdata->vec = imalloc(sizeof(void *) * prof_bt_max);
|
prof_tdata->vec = imalloc(sizeof(void *) * PROF_BT_MAX);
|
||||||
if (prof_tdata->vec == NULL) {
|
if (prof_tdata->vec == NULL) {
|
||||||
ckh_delete(&prof_tdata->bt2cnt);
|
ckh_delete(&prof_tdata->bt2cnt);
|
||||||
idalloc(prof_tdata);
|
idalloc(prof_tdata);
|
||||||
@ -1270,7 +1265,6 @@ prof_boot2(void)
|
|||||||
abort();
|
abort();
|
||||||
}
|
}
|
||||||
|
|
||||||
prof_bt_max = (1U << opt_lg_prof_bt_max);
|
|
||||||
if (malloc_mutex_init(&prof_dump_seq_mtx))
|
if (malloc_mutex_init(&prof_dump_seq_mtx))
|
||||||
return (true);
|
return (true);
|
||||||
|
|
||||||
|
@ -511,7 +511,6 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||||||
OPT_WRITE_SSIZE_T(lg_tcache_max)
|
OPT_WRITE_SSIZE_T(lg_tcache_max)
|
||||||
OPT_WRITE_BOOL(prof)
|
OPT_WRITE_BOOL(prof)
|
||||||
OPT_WRITE_CHAR_P(prof_prefix)
|
OPT_WRITE_CHAR_P(prof_prefix)
|
||||||
OPT_WRITE_SIZE_T(lg_prof_bt_max)
|
|
||||||
OPT_WRITE_BOOL(prof_active)
|
OPT_WRITE_BOOL(prof_active)
|
||||||
OPT_WRITE_SSIZE_T(lg_prof_sample)
|
OPT_WRITE_SSIZE_T(lg_prof_sample)
|
||||||
OPT_WRITE_BOOL(prof_accum)
|
OPT_WRITE_BOOL(prof_accum)
|
||||||
@ -616,11 +615,6 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||||||
}
|
}
|
||||||
if ((err = JEMALLOC_P(mallctl)("opt.prof", &bv, &bsz, NULL, 0))
|
if ((err = JEMALLOC_P(mallctl)("opt.prof", &bv, &bsz, NULL, 0))
|
||||||
== 0 && bv) {
|
== 0 && bv) {
|
||||||
CTL_GET("opt.lg_prof_bt_max", &sv, size_t);
|
|
||||||
write_cb(cbopaque, "Maximum profile backtrace depth: ");
|
|
||||||
write_cb(cbopaque, u2s((1U << sv), 10, s));
|
|
||||||
write_cb(cbopaque, "\n");
|
|
||||||
|
|
||||||
CTL_GET("opt.lg_prof_sample", &sv, size_t);
|
CTL_GET("opt.lg_prof_sample", &sv, size_t);
|
||||||
write_cb(cbopaque, "Average profile sample interval: ");
|
write_cb(cbopaque, "Average profile sample interval: ");
|
||||||
write_cb(cbopaque, u2s((((uint64_t)1U) << sv), 10, s));
|
write_cb(cbopaque, u2s((((uint64_t)1U) << sv), 10, s));
|
||||||
|
Loading…
Reference in New Issue
Block a user