2010-02-11 02:37:56 +08:00
|
|
|
/******************************************************************************/
|
|
|
|
#ifdef JEMALLOC_H_TYPES
|
|
|
|
|
|
|
|
#endif /* JEMALLOC_H_TYPES */
|
|
|
|
/******************************************************************************/
|
|
|
|
#ifdef JEMALLOC_H_STRUCTS
|
|
|
|
|
|
|
|
#endif /* JEMALLOC_H_STRUCTS */
|
|
|
|
/******************************************************************************/
|
|
|
|
#ifdef JEMALLOC_H_EXTERNS
|
|
|
|
|
|
|
|
#endif /* JEMALLOC_H_EXTERNS */
|
|
|
|
/******************************************************************************/
|
|
|
|
#ifdef JEMALLOC_H_INLINES
|
|
|
|
|
|
|
|
#ifndef JEMALLOC_ENABLE_INLINE
|
|
|
|
void mb_write(void);
|
|
|
|
#endif
|
|
|
|
|
2011-03-19 08:56:14 +08:00
|
|
|
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MB_C_))
|
2010-02-11 02:37:56 +08:00
|
|
|
#ifdef __i386__
|
|
|
|
/*
|
|
|
|
* According to the Intel Architecture Software Developer's Manual, current
|
|
|
|
* processors execute instructions in order from the perspective of other
|
|
|
|
* processors in a multiprocessor system, but 1) Intel reserves the right to
|
|
|
|
* change that, and 2) the compiler's optimizer could re-order instructions if
|
|
|
|
* there weren't some form of barrier. Therefore, even if running on an
|
|
|
|
* architecture that does not need memory barriers (everything through at least
|
|
|
|
* i686), an "optimizer barrier" is necessary.
|
|
|
|
*/
|
|
|
|
JEMALLOC_INLINE void
|
|
|
|
mb_write(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
# if 0
|
|
|
|
/* This is a true memory barrier. */
|
|
|
|
asm volatile ("pusha;"
|
|
|
|
"xor %%eax,%%eax;"
|
|
|
|
"cpuid;"
|
|
|
|
"popa;"
|
|
|
|
: /* Outputs. */
|
|
|
|
: /* Inputs. */
|
|
|
|
: "memory" /* Clobbers. */
|
|
|
|
);
|
2016-04-23 12:27:17 +08:00
|
|
|
# else
|
2010-02-11 02:37:56 +08:00
|
|
|
/*
|
|
|
|
* This is hopefully enough to keep the compiler from reordering
|
|
|
|
* instructions around this one.
|
|
|
|
*/
|
|
|
|
asm volatile ("nop;"
|
|
|
|
: /* Outputs. */
|
|
|
|
: /* Inputs. */
|
|
|
|
: "memory" /* Clobbers. */
|
|
|
|
);
|
2016-04-23 12:27:17 +08:00
|
|
|
# endif
|
2010-02-11 02:37:56 +08:00
|
|
|
}
|
2012-02-29 08:50:47 +08:00
|
|
|
#elif (defined(__amd64__) || defined(__x86_64__))
|
2010-02-11 02:37:56 +08:00
|
|
|
JEMALLOC_INLINE void
|
|
|
|
mb_write(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
asm volatile ("sfence"
|
|
|
|
: /* Outputs. */
|
|
|
|
: /* Inputs. */
|
|
|
|
: "memory" /* Clobbers. */
|
|
|
|
);
|
|
|
|
}
|
|
|
|
#elif defined(__powerpc__)
|
|
|
|
JEMALLOC_INLINE void
|
|
|
|
mb_write(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
asm volatile ("eieio"
|
|
|
|
: /* Outputs. */
|
|
|
|
: /* Inputs. */
|
|
|
|
: "memory" /* Clobbers. */
|
|
|
|
);
|
|
|
|
}
|
2016-12-21 16:38:54 +08:00
|
|
|
#elif defined(__sparc__) && defined(__arch64__)
|
2010-02-11 02:37:56 +08:00
|
|
|
JEMALLOC_INLINE void
|
|
|
|
mb_write(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
asm volatile ("membar #StoreStore"
|
|
|
|
: /* Outputs. */
|
|
|
|
: /* Inputs. */
|
|
|
|
: "memory" /* Clobbers. */
|
|
|
|
);
|
|
|
|
}
|
2012-02-29 08:50:47 +08:00
|
|
|
#elif defined(__tile__)
|
|
|
|
JEMALLOC_INLINE void
|
|
|
|
mb_write(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
__sync_synchronize();
|
|
|
|
}
|
2010-02-11 02:37:56 +08:00
|
|
|
#else
|
|
|
|
/*
|
|
|
|
* This is much slower than a simple memory barrier, but the semantics of mutex
|
|
|
|
* unlock make this work.
|
|
|
|
*/
|
|
|
|
JEMALLOC_INLINE void
|
|
|
|
mb_write(void)
|
|
|
|
{
|
|
|
|
malloc_mutex_t mtx;
|
|
|
|
|
2016-04-23 12:27:17 +08:00
|
|
|
malloc_mutex_init(&mtx, "mb", WITNESS_RANK_OMIT);
|
2016-05-13 12:07:08 +08:00
|
|
|
malloc_mutex_lock(TSDN_NULL, &mtx);
|
|
|
|
malloc_mutex_unlock(TSDN_NULL, &mtx);
|
2010-02-11 02:37:56 +08:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#endif /* JEMALLOC_H_INLINES */
|
|
|
|
/******************************************************************************/
|