|
|
@ -16,7 +16,6 @@ int kde_malloc_is_used = 0;
|
|
|
|
#define USE_MALLOC_LOCK
|
|
|
|
#define USE_MALLOC_LOCK
|
|
|
|
#define INLINE __inline__
|
|
|
|
#define INLINE __inline__
|
|
|
|
/*#define INLINE*/
|
|
|
|
/*#define INLINE*/
|
|
|
|
#define USE_MEMCPY 0
|
|
|
|
|
|
|
|
#define MMAP_CLEARS 1
|
|
|
|
#define MMAP_CLEARS 1
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
/*
|
|
|
@ -193,8 +192,6 @@ int kde_malloc_is_used = 0;
|
|
|
|
|
|
|
|
|
|
|
|
__STD_C derived from C compiler defines
|
|
|
|
__STD_C derived from C compiler defines
|
|
|
|
WIN32 NOT defined
|
|
|
|
WIN32 NOT defined
|
|
|
|
HAVE_MEMCPY defined
|
|
|
|
|
|
|
|
USE_MEMCPY 1 if HAVE_MEMCPY is defined
|
|
|
|
|
|
|
|
HAVE_MMAP defined as 1
|
|
|
|
HAVE_MMAP defined as 1
|
|
|
|
MMAP_CLEARS 1
|
|
|
|
MMAP_CLEARS 1
|
|
|
|
HAVE_MREMAP 0 unless linux defined
|
|
|
|
HAVE_MREMAP 0 unless linux defined
|
|
|
@ -563,34 +560,6 @@ extern "C" {
|
|
|
|
#endif /* USE_DL_PREFIX */
|
|
|
|
#endif /* USE_DL_PREFIX */
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
|
|
|
HAVE_MEMCPY should be defined if you are not otherwise using
|
|
|
|
|
|
|
|
ANSI STD C, but still have memcpy and memset in your C library
|
|
|
|
|
|
|
|
and want to use them in calloc and realloc. Otherwise simple
|
|
|
|
|
|
|
|
macro versions are defined below.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
USE_MEMCPY should be defined as 1 if you actually want to
|
|
|
|
|
|
|
|
have memset and memcpy called. People report that the macro
|
|
|
|
|
|
|
|
versions are faster than libc versions on some systems.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Even if USE_MEMCPY is set to 1, loops to copy/clear small chunks
|
|
|
|
|
|
|
|
(of <= 36 bytes) are manually unrolled in realloc and calloc.
|
|
|
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* If it's available it's defined in config.h. */
|
|
|
|
|
|
|
|
/* #define HAVE_MEMCPY */
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#ifndef USE_MEMCPY
|
|
|
|
|
|
|
|
#ifdef HAVE_MEMCPY
|
|
|
|
|
|
|
|
#define USE_MEMCPY 1
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
#define USE_MEMCPY 0
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#if (__STD_C || defined(HAVE_MEMCPY))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#ifdef WIN32
|
|
|
|
#ifdef WIN32
|
|
|
|
/* On Win32 memset and memcpy are already declared in windows.h */
|
|
|
|
/* On Win32 memset and memcpy are already declared in windows.h */
|
|
|
|
#else
|
|
|
|
#else
|
|
|
@ -602,7 +571,6 @@ Void_t* memset();
|
|
|
|
Void_t* memcpy();
|
|
|
|
Void_t* memcpy();
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
/*
|
|
|
|
MALLOC_FAILURE_ACTION is the action to take before "return 0" when
|
|
|
|
MALLOC_FAILURE_ACTION is the action to take before "return 0" when
|
|
|
@ -1747,64 +1715,6 @@ int public_mALLOPt(int p, int v) {
|
|
|
|
|
|
|
|
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* ------------- Optional versions of memcopy ---------------- */
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#if USE_MEMCPY
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
|
|
|
Note: memcpy is ONLY invoked with non-overlapping regions,
|
|
|
|
|
|
|
|
so the (usually slower) memmove is not needed.
|
|
|
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#define MALLOC_COPY(dest, src, nbytes) memcpy(dest, src, nbytes)
|
|
|
|
|
|
|
|
#define MALLOC_ZERO(dest, nbytes) memset(dest, 0, nbytes)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#else /* !USE_MEMCPY */
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* Use Duff's device for good zeroing/copying performance. */
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#define MALLOC_ZERO(charp, nbytes) \
|
|
|
|
|
|
|
|
do { \
|
|
|
|
|
|
|
|
INTERNAL_SIZE_T* mzp = (INTERNAL_SIZE_T*)(charp); \
|
|
|
|
|
|
|
|
unsigned long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T); \
|
|
|
|
|
|
|
|
long mcn; \
|
|
|
|
|
|
|
|
if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \
|
|
|
|
|
|
|
|
switch (mctmp) { \
|
|
|
|
|
|
|
|
case 0: for(;;) { *mzp++ = 0; \
|
|
|
|
|
|
|
|
case 7: *mzp++ = 0; \
|
|
|
|
|
|
|
|
case 6: *mzp++ = 0; \
|
|
|
|
|
|
|
|
case 5: *mzp++ = 0; \
|
|
|
|
|
|
|
|
case 4: *mzp++ = 0; \
|
|
|
|
|
|
|
|
case 3: *mzp++ = 0; \
|
|
|
|
|
|
|
|
case 2: *mzp++ = 0; \
|
|
|
|
|
|
|
|
case 1: *mzp++ = 0; if(mcn <= 0) break; mcn--; } \
|
|
|
|
|
|
|
|
} \
|
|
|
|
|
|
|
|
} while(0)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#define MALLOC_COPY(dest,src,nbytes) \
|
|
|
|
|
|
|
|
do { \
|
|
|
|
|
|
|
|
INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) src; \
|
|
|
|
|
|
|
|
INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) dest; \
|
|
|
|
|
|
|
|
unsigned long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T); \
|
|
|
|
|
|
|
|
long mcn; \
|
|
|
|
|
|
|
|
if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \
|
|
|
|
|
|
|
|
switch (mctmp) { \
|
|
|
|
|
|
|
|
case 0: for(;;) { *mcdst++ = *mcsrc++; \
|
|
|
|
|
|
|
|
case 7: *mcdst++ = *mcsrc++; \
|
|
|
|
|
|
|
|
case 6: *mcdst++ = *mcsrc++; \
|
|
|
|
|
|
|
|
case 5: *mcdst++ = *mcsrc++; \
|
|
|
|
|
|
|
|
case 4: *mcdst++ = *mcsrc++; \
|
|
|
|
|
|
|
|
case 3: *mcdst++ = *mcsrc++; \
|
|
|
|
|
|
|
|
case 2: *mcdst++ = *mcsrc++; \
|
|
|
|
|
|
|
|
case 1: *mcdst++ = *mcsrc++; if(mcn <= 0) break; mcn--; } \
|
|
|
|
|
|
|
|
} \
|
|
|
|
|
|
|
|
} while(0)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* ------------------ MMAP support ------------------ */
|
|
|
|
/* ------------------ MMAP support ------------------ */
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@ -4006,9 +3916,9 @@ Void_t* rEALLOc(oldmem, bytes) Void_t* oldmem; size_t bytes;
|
|
|
|
ncopies = copysize / sizeof(INTERNAL_SIZE_T);
|
|
|
|
ncopies = copysize / sizeof(INTERNAL_SIZE_T);
|
|
|
|
assert(ncopies >= 3);
|
|
|
|
assert(ncopies >= 3);
|
|
|
|
|
|
|
|
|
|
|
|
if (ncopies > 9)
|
|
|
|
if (ncopies > 9) {
|
|
|
|
MALLOC_COPY(d, s, copysize);
|
|
|
|
memcpy(d, s, copysize);
|
|
|
|
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
else {
|
|
|
|
*(d+0) = *(s+0);
|
|
|
|
*(d+0) = *(s+0);
|
|
|
|
*(d+1) = *(s+1);
|
|
|
|
*(d+1) = *(s+1);
|
|
|
@ -4106,7 +4016,7 @@ Void_t* rEALLOc(oldmem, bytes) Void_t* oldmem; size_t bytes;
|
|
|
|
/* Must alloc, copy, free. */
|
|
|
|
/* Must alloc, copy, free. */
|
|
|
|
newmem = mALLOc(nb - MALLOC_ALIGN_MASK);
|
|
|
|
newmem = mALLOc(nb - MALLOC_ALIGN_MASK);
|
|
|
|
if (newmem != 0) {
|
|
|
|
if (newmem != 0) {
|
|
|
|
MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ);
|
|
|
|
memcpy(newmem, oldmem, oldsize - 2*SIZE_SZ);
|
|
|
|
fREe(oldmem);
|
|
|
|
fREe(oldmem);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
@ -4264,9 +4174,9 @@ Void_t* cALLOc(n_elements, elem_size) size_t n_elements; size_t elem_size;
|
|
|
|
nclears = clearsize / sizeof(INTERNAL_SIZE_T);
|
|
|
|
nclears = clearsize / sizeof(INTERNAL_SIZE_T);
|
|
|
|
assert(nclears >= 3);
|
|
|
|
assert(nclears >= 3);
|
|
|
|
|
|
|
|
|
|
|
|
if (nclears > 9)
|
|
|
|
if (nclears > 9) {
|
|
|
|
MALLOC_ZERO(d, clearsize);
|
|
|
|
memset(d, 0, clearsize);
|
|
|
|
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
else {
|
|
|
|
*(d+0) = 0;
|
|
|
|
*(d+0) = 0;
|
|
|
|
*(d+1) = 0;
|
|
|
|
*(d+1) = 0;
|
|
|
@ -4290,7 +4200,7 @@ Void_t* cALLOc(n_elements, elem_size) size_t n_elements; size_t elem_size;
|
|
|
|
{
|
|
|
|
{
|
|
|
|
d = (INTERNAL_SIZE_T*)mem;
|
|
|
|
d = (INTERNAL_SIZE_T*)mem;
|
|
|
|
clearsize = chunksize(p) - 2 * SIZE_SZ;
|
|
|
|
clearsize = chunksize(p) - 2 * SIZE_SZ;
|
|
|
|
MALLOC_ZERO(d, clearsize);
|
|
|
|
memset(d, 0, clearsize);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
}
|
|
|
@ -4396,7 +4306,7 @@ static Void_t** iALLOc(n_elements, sizes, opts, chunks) size_t n_elements; size_
|
|
|
|
remainder_size = chunksize(p);
|
|
|
|
remainder_size = chunksize(p);
|
|
|
|
|
|
|
|
|
|
|
|
if (opts & 0x2) { /* optionally clear the elements */
|
|
|
|
if (opts & 0x2) { /* optionally clear the elements */
|
|
|
|
MALLOC_ZERO(mem, remainder_size - SIZE_SZ - array_size);
|
|
|
|
memset(mem, 0, remainder_size - SIZE_SZ - array_size)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/* If not provided, allocate the pointer array as final part of chunk */
|
|
|
|
/* If not provided, allocate the pointer array as final part of chunk */
|
|
|
|