1999-04-08 02:10:10 +08:00
/*
+ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +
| Zend Engine |
+ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +
2014-01-03 11:08:10 +08:00
| Copyright ( c ) 1998 - 2014 Zend Technologies Ltd . ( http : //www.zend.com) |
1999-04-08 02:10:10 +08:00
+ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +
2001-12-11 23:16:21 +08:00
| This source file is subject to version 2.00 of the Zend license , |
2006-07-18 17:06:33 +08:00
| that is bundled with this package in the file LICENSE , and is |
2003-06-11 04:04:29 +08:00
| available through the world - wide - web at the following url : |
2001-12-11 23:16:21 +08:00
| http : //www.zend.com/license/2_00.txt. |
1999-07-16 22:58:16 +08:00
| If you did not receive a copy of the Zend license and are unable to |
| obtain it through the world - wide - web , please send a note to |
| license @ zend . com so we can mail you a copy immediately . |
1999-04-08 02:10:10 +08:00
+ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +
| Authors : Andi Gutmans < andi @ zend . com > |
| Zeev Suraski < zeev @ zend . com > |
2007-02-17 07:49:48 +08:00
| Dmitry Stogov < dmitry @ zend . com > |
1999-04-08 02:10:10 +08:00
+ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +
*/
2003-02-01 09:49:15 +08:00
/* $Id$ */
2014-08-26 20:21:58 +08:00
/*
* zend_alloc is designed to be a modern CPU cache friendly memory manager
* for PHP . Most ideas are taken from jemalloc and tcmalloc implementations .
*
* All allocations are split into 3 categories :
*
* Huge - the size is greater than CHUNK size ( ~ 2 M by default ) , allocation is
* performed using mmap ( ) . The result is aligned on 2 M boundary .
*
* Large - a number of 4096 K pages inside a CHUNK . Large blocks
* are always alligned on page boundary .
*
* Small - less than 3 / 4 of page size . Small sizes are rounded up to nearest
* greater predefined small size ( there are 30 predefined sizes :
* 8 , 16 , 24 , 32 , . . . 3072 ) . Small blocks are allocated from
* RUNs . Each RUN is allocated as a single or few following pages .
* Allocation inside RUNs implemented using linked list of free
* elements . The result is aligned to 8 bytes .
*
* zend_alloc allocates memory from OS by CHUNKs , these CHUNKs and huge memory
* blocks are always aligned to CHUNK boundary . So it ' s very easy to determine
* the CHUNK owning the certain pointer . Regular CHUNKs reserve a single
* page at start for special purpose . It contains bitset of free pages ,
* few bitset for available runs of predefined small sizes , map of pages that
* keeps information about usage of each page in this CHUNK , etc .
*
* zend_alloc provides familiar emalloc / efree / erealloc API , but in addition it
* provides specialized and optimized routines to allocate blocks of predefined
* sizes ( e . g . emalloc_2 ( ) , emallc_4 ( ) , . . . , emalloc_large ( ) , etc )
* The library uses C preprocessor tricks that substitute calls to emalloc ( )
* with more specialized routines when the requested size is known .
*/
1999-04-08 02:10:10 +08:00
# include "zend.h"
# include "zend_alloc.h"
# include "zend_globals.h"
2006-07-18 17:06:33 +08:00
# include "zend_operators.h"
2014-09-18 17:31:25 +08:00
# include "zend_multiply.h"
2006-07-18 17:06:33 +08:00
1999-09-07 00:14:08 +08:00
# ifdef HAVE_SIGNAL_H
# include <signal.h>
1999-07-09 19:19:38 +08:00
# endif
1999-09-07 00:14:08 +08:00
# ifdef HAVE_UNISTD_H
# include <unistd.h>
1999-07-09 19:19:38 +08:00
# endif
1999-04-08 02:10:10 +08:00
2006-12-18 19:39:19 +08:00
# ifdef ZEND_WIN32
# include <wincrypt.h>
# include <process.h>
# endif
2014-08-26 20:21:58 +08:00
# include <stdio.h>
# include <stdlib.h>
# include <string.h>
# include <sys/types.h>
# include <sys/stat.h>
# if HAVE_LIMITS_H
# include <limits.h>
# endif
# include <fcntl.h>
# include <errno.h>
# ifndef _WIN32
# ifdef HAVE_MREMAP
# ifndef _GNU_SOURCE
# define _GNU_SOURCE
# endif
# ifndef __USE_GNU
# define __USE_GNU
# endif
# endif
# include <sys / mman.h>
# ifndef MAP_ANON
# ifdef MAP_ANONYMOUS
# define MAP_ANON MAP_ANONYMOUS
# endif
# endif
# ifndef MREMAP_MAYMOVE
# define MREMAP_MAYMOVE 0
# endif
# ifndef MAP_FAILED
# define MAP_FAILED ((void*)-1)
# endif
# ifndef MAP_POPULATE
# define MAP_POPULATE 0
# endif
2014-07-18 16:27:31 +08:00
# endif
2014-08-26 20:21:58 +08:00
# ifndef ZEND_MM_STAT
# define ZEND_MM_STAT 1 /* track current and peak memory usage */
# endif
# ifndef ZEND_MM_LIMIT
# define ZEND_MM_LIMIT 1 /* support for user-defined memory limit */
# endif
# ifndef ZEND_MM_CUSTOM
# define ZEND_MM_CUSTOM 1 /* support for custom memory allocator */
/* USE_ZEND_ALLOC=0 may switch to system malloc() */
# endif
# ifndef ZEND_MM_ERROR
# define ZEND_MM_ERROR 1 /* report system errors */
2014-07-18 16:27:31 +08:00
# endif
2014-08-26 20:21:58 +08:00
# ifndef ZEND_MM_CHECK
# define ZEND_MM_CHECK(condition, message) do { \
if ( UNEXPECTED ( ! ( condition ) ) ) { \
zend_mm_panic ( message ) ; \
} \
} while ( 0 )
2014-07-18 16:27:31 +08:00
# endif
2006-12-15 21:25:26 +08:00
2014-08-26 20:21:58 +08:00
typedef uint32_t zend_mm_page_info ; /* 4-byte integer */
2014-08-27 02:43:33 +08:00
typedef zend_ulong zend_mm_bitset ; /* 4-byte or 8-byte integer */
2014-08-26 20:21:58 +08:00
# define ZEND_MM_ALIGNED_OFFSET(size, alignment) \
( ( ( size_t ) ( size ) ) & ( ( alignment ) - 1 ) )
# define ZEND_MM_ALIGNED_BASE(size, alignment) \
( ( ( size_t ) ( size ) ) & ~ ( ( alignment ) - 1 ) )
# define ZEND_MM_ALIGNED_SIZE_EX(size, alignment) \
( ( ( size_t ) ( size ) + ( ( alignment ) - 1 ) ) & ~ ( ( alignment ) - 1 ) )
# define ZEND_MM_SIZE_TO_NUM(size, alignment) \
( ( ( size_t ) ( size ) + ( ( alignment ) - 1 ) ) / ( alignment ) )
# define ZEND_MM_BITSET_LEN (sizeof(zend_mm_bitset) * 8) /* 32 or 64 */
# define ZEND_MM_PAGE_MAP_LEN (ZEND_MM_PAGES / ZEND_MM_BITSET_LEN) /* 16 or 8 */
typedef zend_mm_bitset zend_mm_page_map [ ZEND_MM_PAGE_MAP_LEN ] ; /* 64B */
# define ZEND_MM_IS_FRUN 0x00000000
# define ZEND_MM_IS_LRUN 0x40000000
# define ZEND_MM_IS_SRUN 0x80000000
# define ZEND_MM_LRUN_PAGES_MASK 0x000003ff
# define ZEND_MM_LRUN_PAGES_OFFSET 0
# define ZEND_MM_SRUN_BIN_NUM_MASK 0x0000001f
# define ZEND_MM_SRUN_BIN_NUM_OFFSET 0
# define ZEND_MM_LRUN_PAGES(info) (((info) & ZEND_MM_LRUN_PAGES_MASK) >> ZEND_MM_LRUN_PAGES_OFFSET)
# define ZEND_MM_SRUN_BIN_NUM(info) (((info) & ZEND_MM_SRUN_BIN_NUM_MASK) >> ZEND_MM_SRUN_BIN_NUM_OFFSET)
# define ZEND_MM_FRUN() ZEND_MM_IS_FRUN
# define ZEND_MM_LRUN(count) (ZEND_MM_IS_LRUN | ((count) << ZEND_MM_LRUN_PAGES_OFFSET))
# define ZEND_MM_SRUN(bin_num) (ZEND_MM_IS_SRUN | ((bin_num) << ZEND_MM_SRUN_BIN_NUM_OFFSET))
# define ZEND_MM_BINS 30
typedef struct _zend_mm_page zend_mm_page ;
typedef struct _zend_mm_bin zend_mm_bin ;
typedef struct _zend_mm_free_slot zend_mm_free_slot ;
typedef struct _zend_mm_chunk zend_mm_chunk ;
typedef struct _zend_mm_huge_list zend_mm_huge_list ;
2007-04-16 16:09:56 +08:00
# ifdef _WIN64
# define PTR_FMT "0x%0.16I64x"
2014-08-26 20:21:58 +08:00
# elif SIZEOF_LONG == 8
2007-04-16 16:09:56 +08:00
# define PTR_FMT "0x%0.16lx"
# else
# define PTR_FMT "0x%0.8lx"
# endif
2014-08-26 20:21:58 +08:00
/*
* Memory is retrived from OS by chunks of fixed size 2 MB .
* Inside chunk it ' s managed by pages of fixed size 4096 B .
* So each chunk consists from 512 pages .
* The first page of each chunk is reseved for chunk header .
* It contains service information about all pages .
*
* free_pages - current number of free pages in this chunk
*
* free_tail - number of continuous free pages at the end of chunk
*
* free_map - bitset ( a bit for each page ) . The bit is set if the corresponding
* page is allocated . Allocator for " lage sizes " may easily find a
* free page ( or a continuous number of pages ) searching for zero
* bits .
*
* map - contains service information for each page . ( 32 - bits for each
* page ) .
* usage :
* ( 2 bits )
* FRUN - free page ,
* LRUN - first page of " large " allocation
* SRUN - first page of a bin used for " small " allocation
*
* lrun_pages :
* ( 10 bits ) number of allocated pages
*
* srun_bin_num :
* ( 5 bits ) bin number ( e . g . 0 for sizes 0 - 2 , 1 for 3 - 4 ,
* 2 for 5 - 8 , 3 for 9 - 16 etc ) see zend_alloc_sizes . h
*/
struct _zend_mm_heap {
# if ZEND_MM_CUSTOM
int use_custom_heap ;
# endif
# if ZEND_MM_STAT
size_t size ; /* current memory usage */
size_t peak ; /* peak memory usage */
# endif
zend_mm_free_slot * free_slot [ ZEND_MM_BINS ] ; /* free lists for small sizes */
# if ZEND_MM_STAT || ZEND_MM_LIMIT
size_t real_size ; /* current size of allocated pages */
# endif
# if ZEND_MM_STAT
size_t real_peak ; /* peak size of allocated pages */
# endif
# if ZEND_MM_LIMIT
size_t limit ; /* memory limit */
int overflow ; /* memory overflow flag */
# endif
zend_mm_huge_list * huge_list ; /* list of huge allocated blocks */
zend_mm_chunk * main_chunk ;
zend_mm_chunk * cached_chunks ; /* list of unused chunks */
int chunks_count ; /* number of alocated chunks */
int peak_chunks_count ; /* peak number of allocated chunks for current request */
int cached_chunks_count ; /* number of cached chunks */
double avg_chunks_count ; /* average number of chunks allocated per request */
# if ZEND_MM_CUSTOM
void * ( * _malloc ) ( size_t ) ;
void ( * _free ) ( void * ) ;
void * ( * _realloc ) ( void * , size_t ) ;
# endif
} ;
struct _zend_mm_chunk {
zend_mm_heap * heap ;
zend_mm_chunk * next ;
zend_mm_chunk * prev ;
int free_pages ; /* number of free pages */
int free_tail ; /* number of free pages at the end of chunk */
int num ;
char reserve [ 64 - ( sizeof ( void * ) * 3 + sizeof ( int ) * 3 ) ] ;
zend_mm_heap heap_slot ; /* used only in main chunk */
zend_mm_page_map free_map ; /* 512 bits or 64 bytes */
zend_mm_page_info map [ ZEND_MM_PAGES ] ; /* 2 KB = 512 * 4 */
} ;
struct _zend_mm_page {
char bytes [ ZEND_MM_PAGE_SIZE ] ;
} ;
/*
* bin - is one or few continuous pages ( up to 8 ) used for alocation of
* a particular " small size " .
*/
struct _zend_mm_bin {
char bytes [ ZEND_MM_PAGE_SIZE * 8 ] ;
} ;
# if ZEND_DEBUG
typedef struct _zend_mm_debug_info {
size_t size ;
const char * filename ;
const char * orig_filename ;
uint lineno ;
uint orig_lineno ;
} zend_mm_debug_info ;
# endif
struct _zend_mm_free_slot {
zend_mm_free_slot * next_free_slot ;
} ;
struct _zend_mm_huge_list {
void * ptr ;
size_t size ;
zend_mm_huge_list * next ;
# if ZEND_DEBUG
zend_mm_debug_info dbg ;
# endif
} ;
# define ZEND_MM_PAGE_ADDR(chunk, page_num) \
( ( void * ) ( ( ( zend_mm_page * ) ( chunk ) ) + ( page_num ) ) )
# define _BIN_DATA_SIZE(num, size, elements, pages, x, y) size,
static const unsigned int bin_data_size [ ] = {
ZEND_MM_BINS_INFO ( _BIN_DATA_SIZE , x , y )
} ;
# define _BIN_DATA_ELEMENTS(num, size, elements, pages, x, y) elements,
static const int bin_elements [ ] = {
ZEND_MM_BINS_INFO ( _BIN_DATA_ELEMENTS , x , y )
} ;
# define _BIN_DATA_PAGES(num, size, elements, pages, x, y) pages,
static const int bin_pages [ ] = {
ZEND_MM_BINS_INFO ( _BIN_DATA_PAGES , x , y )
} ;
2006-07-18 17:06:33 +08:00
# if ZEND_DEBUG
void zend_debug_alloc_output ( char * format , . . . )
{
char output_buf [ 256 ] ;
va_list args ;
va_start ( args , format ) ;
vsprintf ( output_buf , format , args ) ;
va_end ( args ) ;
1999-04-08 02:10:10 +08:00
2006-07-18 17:06:33 +08:00
# ifdef ZEND_WIN32
OutputDebugString ( output_buf ) ;
2002-06-24 21:41:26 +08:00
# else
2006-07-18 17:06:33 +08:00
fprintf ( stderr , " %s " , output_buf ) ;
# endif
}
2002-06-24 21:41:26 +08:00
# endif
1999-11-27 08:04:36 +08:00
2014-08-26 20:21:58 +08:00
static ZEND_NORETURN void zend_mm_panic ( const char * message )
2006-07-18 17:06:33 +08:00
{
fprintf ( stderr , " %s \n " , message ) ;
2010-01-25 22:47:19 +08:00
/* See http://support.microsoft.com/kb/190351 */
# ifdef PHP_WIN32
fflush ( stderr ) ;
# endif
2006-07-18 17:06:33 +08:00
# if ZEND_DEBUG && defined(HAVE_KILL) && defined(HAVE_GETPID)
kill ( getpid ( ) , SIGSEGV ) ;
# endif
2007-03-20 14:46:48 +08:00
exit ( 1 ) ;
2006-07-18 17:06:33 +08:00
}
2014-08-26 20:21:58 +08:00
static ZEND_NORETURN void zend_mm_safe_error ( zend_mm_heap * heap ,
const char * format ,
size_t limit ,
# if ZEND_DEBUG
const char * filename ,
uint lineno ,
2006-07-18 17:06:33 +08:00
# endif
2014-08-26 20:21:58 +08:00
size_t size )
2014-07-18 16:27:31 +08:00
{
2014-08-26 20:21:58 +08:00
TSRMLS_FETCH ( ) ;
2014-07-18 16:27:31 +08:00
2014-08-26 20:21:58 +08:00
heap - > overflow = 1 ;
zend_try {
zend_error_noreturn ( E_ERROR ,
format ,
limit ,
# if ZEND_DEBUG
filename ,
lineno ,
# endif
size ) ;
} zend_catch {
} zend_end_try ( ) ;
heap - > overflow = 0 ;
zend_bailout ( ) ;
exit ( 1 ) ;
2014-07-18 16:27:31 +08:00
}
2014-09-16 18:27:25 +08:00
# ifdef _WIN32
void
stderr_last_error ( char * msg )
{
LPSTR buf = NULL ;
DWORD err = GetLastError ( ) ;
if ( ! FormatMessage (
FORMAT_MESSAGE_ALLOCATE_BUFFER |
FORMAT_MESSAGE_FROM_SYSTEM |
FORMAT_MESSAGE_IGNORE_INSERTS ,
NULL ,
err ,
MAKELANGID ( LANG_NEUTRAL , SUBLANG_DEFAULT ) ,
( LPSTR ) & buf ,
0 , NULL ) ) {
fprintf ( stderr , " \n %s: [0x%08x] \n " , msg , err ) ;
}
else {
fprintf ( stderr , " \n %s: [0x%08x] %s \n " , msg , err , buf ) ;
}
}
# endif
2014-08-26 20:21:58 +08:00
/*****************/
/* OS Allocation */
/*****************/
2014-07-18 16:27:31 +08:00
2014-08-26 20:21:58 +08:00
static void * zend_mm_mmap_fixed ( void * addr , size_t size )
2014-07-18 16:27:31 +08:00
{
2014-08-26 20:21:58 +08:00
# ifdef _WIN32
return VirtualAlloc ( addr , size , MEM_COMMIT | MEM_RESERVE , PAGE_READWRITE ) ;
2014-07-18 16:27:31 +08:00
# else
2014-08-26 20:21:58 +08:00
/* MAP_FIXED leads to discarding of the old mapping, so it can't be used. */
void * ptr = mmap ( addr , size , PROT_READ | PROT_WRITE , MAP_PRIVATE | MAP_ANON /*| MAP_POPULATE | MAP_HUGETLB*/ , - 1 , 0 ) ;
if ( ptr = = MAP_FAILED ) {
# if ZEND_MM_ERROR
fprintf ( stderr , " \n mmap() failed: [%d] %s \n " , errno , strerror ( errno ) ) ;
2006-07-18 17:06:33 +08:00
# endif
2014-08-26 20:21:58 +08:00
return NULL ;
} else if ( ptr ! = addr ) {
if ( munmap ( ptr , size ) ! = 0 ) {
# if ZEND_MM_ERROR
fprintf ( stderr , " \n munmap() failed: [%d] %s \n " , errno , strerror ( errno ) ) ;
2006-07-18 17:06:33 +08:00
# endif
2014-07-18 16:27:31 +08:00
}
2014-08-26 20:21:58 +08:00
return NULL ;
2014-07-18 16:27:31 +08:00
}
2014-08-26 20:21:58 +08:00
return ptr ;
2014-07-16 16:35:48 +08:00
# endif
2014-07-18 16:27:31 +08:00
}
2014-08-26 20:21:58 +08:00
static void * zend_mm_mmap ( size_t size )
2014-07-18 16:27:31 +08:00
{
2014-08-26 20:21:58 +08:00
# ifdef _WIN32
void * ptr = VirtualAlloc ( NULL , size , MEM_COMMIT | MEM_RESERVE , PAGE_READWRITE ) ;
2014-07-18 16:27:31 +08:00
2014-08-26 20:21:58 +08:00
if ( ptr = = NULL ) {
# if ZEND_MM_ERROR
2014-09-16 18:27:25 +08:00
stderr_last_error ( " VirtualAlloc() failed " ) ;
2006-07-18 17:06:33 +08:00
# endif
2014-08-26 20:21:58 +08:00
return NULL ;
2014-07-18 16:27:31 +08:00
}
2014-08-26 20:21:58 +08:00
return ptr ;
# else
void * ptr = mmap ( NULL , size , PROT_READ | PROT_WRITE , MAP_PRIVATE | MAP_ANON /*| MAP_POPULATE | MAP_HUGETLB*/ , - 1 , 0 ) ;
2014-07-18 16:27:31 +08:00
2014-08-26 20:21:58 +08:00
if ( ptr = = MAP_FAILED ) {
# if ZEND_MM_ERROR
fprintf ( stderr , " \n mmap() failed: [%d] %s \n " , errno , strerror ( errno ) ) ;
2014-07-16 16:35:48 +08:00
# endif
2014-07-18 16:27:31 +08:00
return NULL ;
}
2014-08-26 20:21:58 +08:00
return ptr ;
# endif
2014-07-18 16:27:31 +08:00
}
2014-08-26 20:21:58 +08:00
static void zend_mm_munmap ( void * addr , size_t size )
2014-07-18 16:27:31 +08:00
{
2014-08-26 20:21:58 +08:00
# ifdef _WIN32
if ( VirtualFree ( addr , 0 , MEM_RELEASE ) = = 0 ) {
# if ZEND_MM_ERROR
2014-09-16 18:27:25 +08:00
stderr_last_error ( " VirtualFree() failed " ) ;
2014-08-26 20:21:58 +08:00
# endif
}
# else
if ( munmap ( addr , size ) ! = 0 ) {
# if ZEND_MM_ERROR
fprintf ( stderr , " \n munmap() failed: [%d] %s \n " , errno , strerror ( errno ) ) ;
# endif
}
# endif
2014-07-18 16:27:31 +08:00
}
2014-08-26 20:21:58 +08:00
/***********/
/* Bitmask */
/***********/
/* number of trailing set (1) bits */
static zend_always_inline int zend_mm_bitset_nts ( zend_mm_bitset bitset )
2014-07-18 16:27:31 +08:00
{
2014-08-26 20:21:58 +08:00
# if defined(__GNUC__)
return __builtin_ctzl ( ~ bitset ) ;
2014-09-20 07:36:51 +08:00
# elif defined(_WIN32)
unsigned long index ;
# if defined(_WIN64)
if ( ! BitScanForward64 ( & index , ~ bitset ) ) {
# else
if ( ! BitScanForward ( & index , ~ bitset ) ) {
# endif
/* undefined behavior */
return 32 ;
}
return ( int ) index ;
2014-08-26 20:21:58 +08:00
# else
int n ;
2014-07-18 16:27:31 +08:00
2014-08-26 20:21:58 +08:00
if ( bitset = = ( zend_mm_bitset ) - 1 ) return ZEND_MM_BITSET_LEN ;
2006-07-18 17:06:33 +08:00
2014-08-26 20:21:58 +08:00
n = 0 ;
2014-08-27 02:43:33 +08:00
# if SIZEOF_ZEND_LONG == 8
2014-08-26 20:21:58 +08:00
if ( sizeof ( zend_mm_bitset ) = = 8 ) {
2014-08-27 02:43:33 +08:00
if ( ( bitset & 0xffffffff ) = = 0xffffffff ) { n + = 32 ; bitset = bitset > > Z_UL ( 32 ) ; }
2014-08-26 20:21:58 +08:00
}
2014-07-18 16:27:31 +08:00
# endif
2014-08-26 20:21:58 +08:00
if ( ( bitset & 0x0000ffff ) = = 0x0000ffff ) { n + = 16 ; bitset = bitset > > 16 ; }
if ( ( bitset & 0x000000ff ) = = 0x000000ff ) { n + = 8 ; bitset = bitset > > 8 ; }
if ( ( bitset & 0x0000000f ) = = 0x0000000f ) { n + = 4 ; bitset = bitset > > 4 ; }
if ( ( bitset & 0x00000003 ) = = 0x00000003 ) { n + = 2 ; bitset = bitset > > 2 ; }
return n + ( bitset & 1 ) ;
# endif
}
2006-07-18 17:06:33 +08:00
2014-08-26 20:21:58 +08:00
/* number of trailing zero bits (0x01 -> 1; 0x40 -> 6; 0x00 -> LEN) */
static zend_always_inline int zend_mm_bitset_ntz ( zend_mm_bitset bitset )
2014-07-18 16:27:31 +08:00
{
2014-08-26 20:21:58 +08:00
# if defined(__GNUC__)
return __builtin_ctzl ( bitset ) ;
2014-09-20 07:36:51 +08:00
# elif defined(_WIN32)
unsigned long index ;
# if defined(_WIN64)
if ( ! BitScanForward64 ( & index , bitset ) ) {
# else
if ( ! BitScanForward ( & index , bitset ) ) {
# endif
/* undefined behavior */
return 32 ;
}
return ( int ) index ;
2014-08-26 20:21:58 +08:00
# else
int n ;
2006-12-15 21:25:26 +08:00
2014-08-26 20:21:58 +08:00
if ( bitset = = ( zend_mm_bitset ) 0 ) return ZEND_MM_BITSET_LEN ;
n = 1 ;
2014-08-27 02:43:33 +08:00
# if SIZEOF_ZEND_LONG == 8
2014-08-26 20:21:58 +08:00
if ( sizeof ( zend_mm_bitset ) = = 8 ) {
2014-08-27 02:43:33 +08:00
if ( ( bitset & 0xffffffff ) = = 0 ) { n + = 32 ; bitset = bitset > > Z_UL ( 32 ) ; }
2014-07-18 16:27:31 +08:00
}
2014-08-26 20:21:58 +08:00
# endif
if ( ( bitset & 0x0000ffff ) = = 0 ) { n + = 16 ; bitset = bitset > > 16 ; }
if ( ( bitset & 0x000000ff ) = = 0 ) { n + = 8 ; bitset = bitset > > 8 ; }
if ( ( bitset & 0x0000000f ) = = 0 ) { n + = 4 ; bitset = bitset > > 4 ; }
if ( ( bitset & 0x00000003 ) = = 0 ) { n + = 2 ; bitset = bitset > > 2 ; }
return n - ( bitset & 1 ) ;
# endif
2014-07-18 16:27:31 +08:00
}
2006-11-10 18:44:40 +08:00
2014-08-26 20:21:58 +08:00
static zend_always_inline int zend_mm_bitset_find_zero ( zend_mm_bitset * bitset , int size )
2014-07-18 16:27:31 +08:00
{
2014-08-26 20:21:58 +08:00
int i = 0 ;
do {
zend_mm_bitset tmp = bitset [ i ] ;
if ( tmp ! = ( zend_mm_bitset ) - 1 ) {
return i * ZEND_MM_BITSET_LEN + zend_mm_bitset_nts ( tmp ) ;
}
i + + ;
} while ( i < size ) ;
return - 1 ;
2014-07-18 16:27:31 +08:00
}
2006-11-10 18:44:40 +08:00
2014-08-26 20:21:58 +08:00
static zend_always_inline int zend_mm_bitset_find_one ( zend_mm_bitset * bitset , int size )
2014-07-18 16:27:31 +08:00
{
2014-08-26 20:21:58 +08:00
int i = 0 ;
do {
zend_mm_bitset tmp = bitset [ i ] ;
if ( tmp ! = 0 ) {
return i * ZEND_MM_BITSET_LEN + zend_mm_bitset_ntz ( tmp ) ;
}
i + + ;
} while ( i < size ) ;
return - 1 ;
2014-07-18 16:27:31 +08:00
}
2007-09-29 18:37:29 +08:00
2014-08-26 20:21:58 +08:00
static zend_always_inline int zend_mm_bitset_find_zero_and_set ( zend_mm_bitset * bitset , int size )
2014-07-18 16:27:31 +08:00
{
2014-08-26 20:21:58 +08:00
int i = 0 ;
do {
zend_mm_bitset tmp = bitset [ i ] ;
if ( tmp ! = ( zend_mm_bitset ) - 1 ) {
int n = zend_mm_bitset_nts ( tmp ) ;
2014-08-27 02:43:33 +08:00
bitset [ i ] | = Z_UL ( 1 ) < < n ;
2014-08-26 20:21:58 +08:00
return i * ZEND_MM_BITSET_LEN + n ;
}
i + + ;
} while ( i < size ) ;
return - 1 ;
2014-07-18 16:27:31 +08:00
}
2006-07-18 17:06:33 +08:00
2014-08-26 20:21:58 +08:00
static zend_always_inline int zend_mm_bitset_is_set ( zend_mm_bitset * bitset , int bit )
2014-07-18 16:27:31 +08:00
{
2014-08-27 02:43:33 +08:00
return ( bitset [ bit / ZEND_MM_BITSET_LEN ] & ( Z_L ( 1 ) < < ( bit & ( ZEND_MM_BITSET_LEN - 1 ) ) ) ) ! = 0 ;
2014-07-18 16:27:31 +08:00
}
2006-07-18 17:06:33 +08:00
2014-08-26 20:21:58 +08:00
static zend_always_inline void zend_mm_bitset_set_bit ( zend_mm_bitset * bitset , int bit )
2014-07-18 16:27:31 +08:00
{
2014-08-27 02:43:33 +08:00
bitset [ bit / ZEND_MM_BITSET_LEN ] | = ( Z_L ( 1 ) < < ( bit & ( ZEND_MM_BITSET_LEN - 1 ) ) ) ;
2014-07-18 16:27:31 +08:00
}
2006-07-18 17:06:33 +08:00
2014-08-26 20:21:58 +08:00
static zend_always_inline void zend_mm_bitset_reset_bit ( zend_mm_bitset * bitset , int bit )
{
2014-08-27 02:43:33 +08:00
bitset [ bit / ZEND_MM_BITSET_LEN ] & = ~ ( Z_L ( 1 ) < < ( bit & ( ZEND_MM_BITSET_LEN - 1 ) ) ) ;
2014-08-26 20:21:58 +08:00
}
2006-07-18 17:06:33 +08:00
2014-08-26 20:21:58 +08:00
static zend_always_inline void zend_mm_bitset_set_range ( zend_mm_bitset * bitset , int start , int len )
2014-07-18 16:27:31 +08:00
{
2014-08-26 20:21:58 +08:00
if ( len = = 1 ) {
zend_mm_bitset_set_bit ( bitset , start ) ;
} else {
int pos = start / ZEND_MM_BITSET_LEN ;
int end = ( start + len - 1 ) / ZEND_MM_BITSET_LEN ;
int bit = start & ( ZEND_MM_BITSET_LEN - 1 ) ;
zend_mm_bitset tmp ;
if ( pos ! = end ) {
/* set bits from "bit" to ZEND_MM_BITSET_LEN-1 */
tmp = ( zend_mm_bitset ) - 1 < < bit ;
bitset [ pos + + ] | = tmp ;
while ( pos ! = end ) {
/* set all bits */
bitset [ pos + + ] = ( zend_mm_bitset ) - 1 ;
}
end = ( start + len - 1 ) & ( ZEND_MM_BITSET_LEN - 1 ) ;
/* set bits from "0" to "end" */
tmp = ( zend_mm_bitset ) - 1 > > ( ( ZEND_MM_BITSET_LEN - 1 ) - end ) ;
bitset [ pos ] | = tmp ;
} else {
end = ( start + len - 1 ) & ( ZEND_MM_BITSET_LEN - 1 ) ;
/* set bits from "bit" to "end" */
tmp = ( zend_mm_bitset ) - 1 < < bit ;
tmp & = ( zend_mm_bitset ) - 1 > > ( ( ZEND_MM_BITSET_LEN - 1 ) - end ) ;
bitset [ pos ] | = tmp ;
}
}
2014-07-18 16:27:31 +08:00
}
2006-07-18 17:06:33 +08:00
2014-08-26 20:21:58 +08:00
static zend_always_inline void zend_mm_bitset_reset_range ( zend_mm_bitset * bitset , int start , int len )
2014-07-18 16:27:31 +08:00
{
2014-08-26 20:21:58 +08:00
if ( len = = 1 ) {
zend_mm_bitset_reset_bit ( bitset , start ) ;
} else {
int pos = start / ZEND_MM_BITSET_LEN ;
int end = ( start + len - 1 ) / ZEND_MM_BITSET_LEN ;
int bit = start & ( ZEND_MM_BITSET_LEN - 1 ) ;
zend_mm_bitset tmp ;
if ( pos ! = end ) {
/* reset bits from "bit" to ZEND_MM_BITSET_LEN-1 */
2014-08-27 02:43:33 +08:00
tmp = ~ ( ( Z_L ( 1 ) < < bit ) - 1 ) ;
2014-08-26 20:21:58 +08:00
bitset [ pos + + ] & = ~ tmp ;
while ( pos ! = end ) {
/* set all bits */
bitset [ pos + + ] = 0 ;
}
end = ( start + len - 1 ) & ( ZEND_MM_BITSET_LEN - 1 ) ;
/* reset bits from "0" to "end" */
tmp = ( zend_mm_bitset ) - 1 > > ( ( ZEND_MM_BITSET_LEN - 1 ) - end ) ;
bitset [ pos ] & = ~ tmp ;
} else {
end = ( start + len - 1 ) & ( ZEND_MM_BITSET_LEN - 1 ) ;
/* reset bits from "bit" to "end" */
tmp = ( zend_mm_bitset ) - 1 < < bit ;
tmp & = ( zend_mm_bitset ) - 1 > > ( ( ZEND_MM_BITSET_LEN - 1 ) - end ) ;
bitset [ pos ] & = ~ tmp ;
}
}
2014-07-18 16:27:31 +08:00
}
2006-07-18 17:06:33 +08:00
2014-08-26 20:21:58 +08:00
static zend_always_inline int zend_mm_bitset_is_free_range ( zend_mm_bitset * bitset , int start , int len )
2014-07-18 16:27:31 +08:00
{
2014-08-26 20:21:58 +08:00
if ( len = = 1 ) {
return ! zend_mm_bitset_is_set ( bitset , start ) ;
} else {
int pos = start / ZEND_MM_BITSET_LEN ;
int end = ( start + len - 1 ) / ZEND_MM_BITSET_LEN ;
int bit = start & ( ZEND_MM_BITSET_LEN - 1 ) ;
zend_mm_bitset tmp ;
if ( pos ! = end ) {
/* set bits from "bit" to ZEND_MM_BITSET_LEN-1 */
tmp = ( zend_mm_bitset ) - 1 < < bit ;
if ( ( bitset [ pos + + ] & tmp ) ! = 0 ) {
return 0 ;
}
while ( pos ! = end ) {
/* set all bits */
if ( bitset [ pos + + ] ! = 0 ) {
return 0 ;
}
}
end = ( start + len - 1 ) & ( ZEND_MM_BITSET_LEN - 1 ) ;
/* set bits from "0" to "end" */
tmp = ( zend_mm_bitset ) - 1 > > ( ( ZEND_MM_BITSET_LEN - 1 ) - end ) ;
return ( bitset [ pos ] & tmp ) = = 0 ;
} else {
end = ( start + len - 1 ) & ( ZEND_MM_BITSET_LEN - 1 ) ;
/* set bits from "bit" to "end" */
tmp = ( zend_mm_bitset ) - 1 < < bit ;
tmp & = ( zend_mm_bitset ) - 1 > > ( ( ZEND_MM_BITSET_LEN - 1 ) - end ) ;
return ( bitset [ pos ] & tmp ) = = 0 ;
}
}
2014-07-18 16:27:31 +08:00
}
2006-07-18 17:06:33 +08:00
2014-08-26 20:21:58 +08:00
/**********/
/* Chunks */
/**********/
2006-07-18 17:06:33 +08:00
2014-08-26 20:21:58 +08:00
static void * zend_mm_chunk_alloc ( size_t size , size_t alignment )
{
void * ptr = zend_mm_mmap ( size ) ;
2006-07-18 17:06:33 +08:00
2014-08-26 20:21:58 +08:00
if ( ptr = = NULL ) {
return NULL ;
} else if ( ZEND_MM_ALIGNED_OFFSET ( ptr , alignment ) = = 0 ) {
# ifdef MADV_HUGEPAGE
madvise ( ptr , size , MADV_HUGEPAGE ) ;
2014-07-18 16:27:31 +08:00
# endif
2014-08-26 20:21:58 +08:00
return ptr ;
} else {
size_t offset ;
/* chunk has to be aligned */
zend_mm_munmap ( ptr , size ) ;
ptr = zend_mm_mmap ( size + alignment - ZEND_MM_PAGE_SIZE ) ;
# ifdef _WIN32
offset = ZEND_MM_ALIGNED_OFFSET ( ptr , alignment ) ;
zend_mm_munmap ( ptr , size + alignment - ZEND_MM_PAGE_SIZE ) ;
ptr = zend_mm_mmap_fixed ( ( void * ) ( ( char * ) ptr + ( alignment - offset ) ) , size ) ;
offset = ZEND_MM_ALIGNED_OFFSET ( ptr , alignment ) ;
if ( offset ! = 0 ) {
zend_mm_munmap ( ptr , size ) ;
return NULL ;
}
return ptr ;
# else
offset = ZEND_MM_ALIGNED_OFFSET ( ptr , alignment ) ;
if ( offset ! = 0 ) {
offset = alignment - offset ;
zend_mm_munmap ( ptr , offset ) ;
ptr = ( char * ) ptr + offset ;
} else {
zend_mm_munmap ( ( char * ) ptr + size , alignment - ZEND_MM_PAGE_SIZE ) ;
}
# ifdef MADV_HUGEPAGE
madvise ( ptr , size , MADV_HUGEPAGE ) ;
# endif
2014-07-18 16:27:31 +08:00
# endif
2014-08-26 20:21:58 +08:00
return ptr ;
}
}
2014-07-18 16:27:31 +08:00
2014-08-26 20:21:58 +08:00
static zend_always_inline void zend_mm_chunk_init ( zend_mm_heap * heap , zend_mm_chunk * chunk )
{
chunk - > heap = heap ;
chunk - > next = heap - > main_chunk ;
chunk - > prev = heap - > main_chunk - > prev ;
chunk - > prev - > next = chunk ;
chunk - > next - > prev = chunk ;
/* mark first pages as allocated */
chunk - > free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE ;
chunk - > free_tail = ZEND_MM_FIRST_PAGE ;
/* the younger chunks have bigger number */
chunk - > num = chunk - > prev - > num + 1 ;
/* mark first pages as allocated */
chunk - > free_map [ 0 ] = ( 1L < < ZEND_MM_FIRST_PAGE ) - 1 ;
chunk - > map [ 0 ] = ZEND_MM_LRUN ( ZEND_MM_FIRST_PAGE ) ;
}
2014-07-18 16:27:31 +08:00
2014-08-26 20:21:58 +08:00
/***********************/
/* Huge Runs (forward) */
/***********************/
2014-07-18 16:27:31 +08:00
2014-08-26 20:21:58 +08:00
static size_t zend_mm_get_huge_block_size ( zend_mm_heap * heap , void * ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC ) ;
static void * zend_mm_alloc_huge ( zend_mm_heap * heap , size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC ) ;
static void zend_mm_free_huge ( zend_mm_heap * heap , void * ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC ) ;
2006-07-18 17:06:33 +08:00
2006-12-15 21:25:26 +08:00
# if ZEND_DEBUG
2014-08-26 20:21:58 +08:00
static void zend_mm_change_huge_block_size ( zend_mm_heap * heap , void * ptr , size_t size , size_t dbg_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC ) ;
# else
static void zend_mm_change_huge_block_size ( zend_mm_heap * heap , void * ptr , size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC ) ;
2014-07-16 16:35:48 +08:00
# endif
2006-07-18 17:06:33 +08:00
2014-08-26 20:21:58 +08:00
/**************/
/* Large Runs */
/**************/
2006-12-15 21:25:26 +08:00
2014-07-18 16:27:31 +08:00
# if ZEND_DEBUG
2014-08-26 20:21:58 +08:00
static void * zend_mm_alloc_pages ( zend_mm_heap * heap , int pages_count , size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC )
# else
static void * zend_mm_alloc_pages ( zend_mm_heap * heap , int pages_count ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC )
2014-07-18 16:27:31 +08:00
# endif
2014-08-26 20:21:58 +08:00
{
zend_mm_chunk * chunk = heap - > main_chunk ;
int page_num , len ;
2007-03-20 14:46:48 +08:00
2014-08-26 20:21:58 +08:00
while ( 1 ) {
if ( UNEXPECTED ( chunk - > free_pages < pages_count ) ) {
goto not_found ;
#if 0
} else if ( UNEXPECTED ( chunk - > free_pages + chunk - > free_tail = = ZEND_MM_PAGES ) ) {
if ( UNEXPECTED ( ZEND_MM_PAGES - chunk - > free_tail < pages_count ) ) {
goto not_found ;
} else {
page_num = chunk - > free_tail ;
goto found ;
}
} else if ( 0 ) {
/* First-Fit Search */
int free_tail = chunk - > free_tail ;
zend_mm_bitset * bitset = chunk - > free_map ;
zend_mm_bitset tmp = * ( bitset + + ) ;
int i = 0 ;
2007-03-20 14:46:48 +08:00
2014-08-26 20:21:58 +08:00
while ( 1 ) {
/* skip allocated blocks */
while ( tmp = = ( zend_mm_bitset ) - 1 ) {
i + = ZEND_MM_BITSET_LEN ;
if ( i = = ZEND_MM_PAGES ) {
goto not_found ;
}
tmp = * ( bitset + + ) ;
}
/* find first 0 bit */
page_num = i + zend_mm_bitset_nts ( tmp ) ;
/* reset bits from 0 to "bit" */
tmp & = tmp + 1 ;
/* skip free blocks */
while ( tmp = = 0 ) {
i + = ZEND_MM_BITSET_LEN ;
len = i - page_num ;
if ( len > = pages_count ) {
goto found ;
} else if ( i > = free_tail ) {
goto not_found ;
}
tmp = * ( bitset + + ) ;
}
/* find first 1 bit */
len = ( i + zend_mm_bitset_ntz ( tmp ) ) - page_num ;
if ( len > = pages_count ) {
goto found ;
}
/* set bits from 0 to "bit" */
tmp | = tmp - 1 ;
}
2006-12-15 21:25:26 +08:00
# endif
2014-08-26 20:21:58 +08:00
} else {
/* Best-Fit Search */
int best = - 1 ;
int best_len = ZEND_MM_PAGES ;
int free_tail = chunk - > free_tail ;
zend_mm_bitset * bitset = chunk - > free_map ;
zend_mm_bitset tmp = * ( bitset + + ) ;
int i = 0 ;
2006-12-15 21:25:26 +08:00
2014-08-26 20:21:58 +08:00
while ( 1 ) {
/* skip allocated blocks */
while ( tmp = = ( zend_mm_bitset ) - 1 ) {
i + = ZEND_MM_BITSET_LEN ;
if ( i = = ZEND_MM_PAGES ) {
if ( best > 0 ) {
page_num = best ;
goto found ;
} else {
goto not_found ;
}
}
tmp = * ( bitset + + ) ;
}
/* find first 0 bit */
page_num = i + zend_mm_bitset_nts ( tmp ) ;
/* reset bits from 0 to "bit" */
tmp & = tmp + 1 ;
/* skip free blocks */
while ( tmp = = 0 ) {
i + = ZEND_MM_BITSET_LEN ;
if ( i > = free_tail ) {
len = ZEND_MM_PAGES - page_num ;
if ( len > = pages_count & & len < best_len ) {
chunk - > free_tail = page_num + pages_count ;
goto found ;
} else {
/* set accurate value */
chunk - > free_tail = page_num ;
if ( best > 0 ) {
page_num = best ;
goto found ;
} else {
goto not_found ;
}
}
}
tmp = * ( bitset + + ) ;
}
/* find first 1 bit */
len = i + zend_mm_bitset_ntz ( tmp ) - page_num ;
if ( len > = pages_count ) {
if ( len = = pages_count ) {
goto found ;
} else if ( len < best_len ) {
best_len = len ;
best = page_num ;
}
}
/* set bits from 0 to "bit" */
tmp | = tmp - 1 ;
}
}
2014-07-18 16:27:31 +08:00
2014-08-26 20:21:58 +08:00
not_found :
if ( chunk - > next = = heap - > main_chunk ) {
if ( heap - > cached_chunks ) {
heap - > cached_chunks_count - - ;
chunk = heap - > cached_chunks ;
heap - > cached_chunks = chunk - > next ;
} else {
# if ZEND_MM_LIMIT
if ( heap - > real_size + ZEND_MM_CHUNK_SIZE > heap - > limit ) {
if ( heap - > overflow = = 0 ) {
2014-07-18 16:27:31 +08:00
# if ZEND_DEBUG
2014-08-26 20:21:58 +08:00
zend_mm_safe_error ( heap , " Allowed memory size of " ZEND_ULONG_FMT " bytes exhausted at %s:%d (tried to allocate " ZEND_ULONG_FMT " bytes) " , heap - > limit , __zend_filename , __zend_lineno , size ) ;
2014-07-18 16:27:31 +08:00
# else
2014-08-26 20:21:58 +08:00
zend_mm_safe_error ( heap , " Allowed memory size of " ZEND_ULONG_FMT " bytes exhausted (tried to allocate " ZEND_ULONG_FMT " bytes) " , heap - > limit , ZEND_MM_PAGE_SIZE * pages_count ) ;
2014-07-16 16:35:48 +08:00
# endif
2014-08-26 20:21:58 +08:00
return NULL ;
}
}
2014-07-16 16:35:48 +08:00
# endif
2014-08-26 20:21:58 +08:00
chunk = ( zend_mm_chunk * ) zend_mm_chunk_alloc ( ZEND_MM_CHUNK_SIZE , ZEND_MM_CHUNK_SIZE ) ;
if ( UNEXPECTED ( chunk = = NULL ) ) {
/* insufficient memory */
2014-09-16 17:53:26 +08:00
# if !ZEND_MM_LIMIT
zend_mm_safe_error ( heap , " Out of memory " ) ;
# elif ZEND_DEBUG
zend_mm_safe_error ( heap , " Out of memory (allocated %ld) at %s:%d (tried to allocate %lu bytes) " , heap - > real_size , __zend_filename , __zend_lineno , size ) ;
# else
zend_mm_safe_error ( heap , " Out of memory (allocated %ld) (tried to allocate %lu bytes) " , heap - > real_size , ZEND_MM_PAGE_SIZE * pages_count ) ;
# endif
2014-08-26 20:21:58 +08:00
return NULL ;
}
# if ZEND_MM_STAT
do {
size_t size = heap - > real_size + ZEND_MM_CHUNK_SIZE ;
size_t peak = MAX ( heap - > real_peak , size ) ;
heap - > real_size = size ;
heap - > real_peak = peak ;
} while ( 0 ) ;
# elif ZEND_MM_LIMIT
heap - > real_size + = ZEND_MM_CHUNK_SIZE ;
2014-07-18 16:27:31 +08:00
2006-12-15 21:25:26 +08:00
# endif
2014-08-26 20:21:58 +08:00
}
heap - > chunks_count + + ;
if ( heap - > chunks_count > heap - > peak_chunks_count ) {
heap - > peak_chunks_count = heap - > chunks_count ;
}
zend_mm_chunk_init ( heap , chunk ) ;
page_num = ZEND_MM_FIRST_PAGE ;
len = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE ;
goto found ;
} else {
chunk = chunk - > next ;
}
2007-03-20 14:46:48 +08:00
}
2014-08-26 20:21:58 +08:00
found :
/* mark run as allocated */
chunk - > free_pages - = pages_count ;
zend_mm_bitset_set_range ( chunk - > free_map , page_num , pages_count ) ;
chunk - > map [ page_num ] = ZEND_MM_LRUN ( pages_count ) ;
if ( page_num = = chunk - > free_tail ) {
chunk - > free_tail = page_num + pages_count ;
2014-07-18 16:27:31 +08:00
}
2014-08-26 20:21:58 +08:00
return ZEND_MM_PAGE_ADDR ( chunk , page_num ) ;
2007-03-20 14:46:48 +08:00
}
2014-08-26 20:21:58 +08:00
static zend_always_inline void * zend_mm_alloc_large ( zend_mm_heap * heap , size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC )
2007-03-20 14:46:48 +08:00
{
2014-08-26 20:21:58 +08:00
int pages_count = ZEND_MM_SIZE_TO_NUM ( size , ZEND_MM_PAGE_SIZE ) ;
# if ZEND_DEBUG
void * ptr = zend_mm_alloc_pages ( heap , pages_count , size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
2014-07-18 16:27:31 +08:00
# else
2014-08-26 20:21:58 +08:00
void * ptr = zend_mm_alloc_pages ( heap , pages_count ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
# endif
# if ZEND_MM_STAT
do {
size_t size = heap - > size + pages_count * ZEND_MM_PAGE_SIZE ;
size_t peak = MAX ( heap - > peak , size ) ;
heap - > size = size ;
heap - > peak = peak ;
} while ( 0 ) ;
2007-03-20 14:46:48 +08:00
# endif
2014-08-26 20:21:58 +08:00
return ptr ;
2007-03-20 14:46:48 +08:00
}
2014-08-26 20:21:58 +08:00
static void zend_mm_free_pages ( zend_mm_heap * heap , zend_mm_chunk * chunk , int page_num , int pages_count )
{
chunk - > free_pages + = pages_count ;
zend_mm_bitset_reset_range ( chunk - > free_map , page_num , pages_count ) ;
chunk - > map [ page_num ] = 0 ;
if ( chunk - > free_tail = = page_num + pages_count ) {
/* this setting may be not accurate */
chunk - > free_tail = page_num ;
}
if ( chunk - > free_pages = = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE ) {
/* delete chunk */
chunk - > next - > prev = chunk - > prev ;
chunk - > prev - > next = chunk - > next ;
heap - > chunks_count - - ;
if ( heap - > chunks_count + heap - > cached_chunks_count < heap - > avg_chunks_count + 0.1 ) {
/* delay deletion */
heap - > cached_chunks_count + + ;
chunk - > next = heap - > cached_chunks ;
heap - > cached_chunks = chunk ;
2014-07-18 16:27:31 +08:00
} else {
2014-08-26 20:21:58 +08:00
# if ZEND_MM_STAT || ZEND_MM_LIMIT
heap - > real_size - = ZEND_MM_CHUNK_SIZE ;
# endif
if ( ! heap - > cached_chunks | | chunk - > num > heap - > cached_chunks - > num ) {
zend_mm_munmap ( chunk , ZEND_MM_CHUNK_SIZE ) ;
} else {
//TODO: select the best chunk to delete???
chunk - > next = heap - > cached_chunks - > next ;
zend_mm_munmap ( heap - > cached_chunks , ZEND_MM_CHUNK_SIZE ) ;
heap - > cached_chunks = chunk ;
2014-07-18 16:27:31 +08:00
}
2006-07-18 17:06:33 +08:00
}
2014-07-18 16:27:31 +08:00
}
2006-07-18 17:06:33 +08:00
}
2014-08-26 20:21:58 +08:00
static zend_always_inline void zend_mm_free_large ( zend_mm_heap * heap , zend_mm_chunk * chunk , int page_num , int pages_count )
2006-07-18 17:06:33 +08:00
{
2014-08-26 20:21:58 +08:00
# if ZEND_MM_STAT
heap - > size - = pages_count * ZEND_MM_PAGE_SIZE ;
2014-07-18 16:27:31 +08:00
# endif
2014-08-26 20:21:58 +08:00
zend_mm_free_pages ( heap , chunk , page_num , pages_count ) ;
}
2006-07-18 17:06:33 +08:00
2014-08-26 20:21:58 +08:00
/**************/
/* Small Runs */
/**************/
2014-07-18 16:27:31 +08:00
2014-09-08 15:08:05 +08:00
/* higher set bit number (0->N/A, 1->1, 2->2, 4->3, 8->4, 127->7, 128->8 etc) */
2014-08-26 20:21:58 +08:00
static zend_always_inline int zend_mm_small_size_to_bit ( int size )
{
# if defined(__GNUC__)
return ( __builtin_clz ( size ) ^ 0x1f ) + 1 ;
2014-09-20 07:36:51 +08:00
# elif defined(_WIN32)
unsigned long index ;
if ( ! BitScanReverse ( & index , ( unsigned long ) size ) ) {
/* undefined behavior */
2014-09-20 08:14:35 +08:00
return 64 ;
2014-09-20 07:36:51 +08:00
}
return ( ( ( 31 - ( int ) index ) ^ 0x1f ) + 1 ) ;
2014-08-26 20:21:58 +08:00
# else
int n = 16 ;
if ( size < = 0x00ff ) { n - = 8 ; size = size < < 8 ; }
if ( size < = 0x0fff ) { n - = 4 ; size = size < < 4 ; }
if ( size < = 0x3fff ) { n - = 2 ; size = size < < 2 ; }
if ( size < = 0x7fff ) { n - = 1 ; }
return n ;
2014-07-18 16:27:31 +08:00
# endif
2010-10-04 23:50:47 +08:00
}
2014-08-26 20:21:58 +08:00
# ifndef MAX
# define MAX(a, b) (((a) > (b)) ? (a) : (b))
# endif
2014-07-18 16:27:31 +08:00
2014-08-26 20:21:58 +08:00
# ifndef MIN
# define MIN(a, b) (((a) < (b)) ? (a) : (b))
# endif
2006-07-18 17:06:33 +08:00
2014-08-26 20:21:58 +08:00
static zend_always_inline int zend_mm_small_size_to_bin ( size_t size )
2006-07-18 17:06:33 +08:00
{
2014-08-26 20:21:58 +08:00
#if 0
int n ;
/*0, 1, 2, 3, 4, 5, 6, 7, 8, 9 10, 11, 12*/
static const int f1 [ ] = { 3 , 3 , 3 , 3 , 3 , 3 , 3 , 4 , 5 , 6 , 7 , 8 , 9 } ;
static const int f2 [ ] = { 0 , 0 , 0 , 0 , 0 , 0 , 0 , 4 , 8 , 12 , 16 , 20 , 24 } ;
if ( UNEXPECTED ( size < = 2 ) ) return 0 ;
n = zend_mm_small_size_to_bit ( size - 1 ) ;
return ( ( size - 1 ) > > f1 [ n ] ) + f2 [ n ] ;
# else
int t1 , t2 , t3 ;
2014-07-18 16:27:31 +08:00
2014-08-26 20:21:58 +08:00
if ( UNEXPECTED ( size < = 8 ) ) return 0 ;
t1 = ( int ) ( size - 1 ) ;
t2 = zend_mm_small_size_to_bit ( t1 ) ;
t3 = t2 - 6 ;
t3 = ( t3 < 0 ) ? 0 : t3 ;
t2 = t3 + 3 ;
t1 = t1 > > t2 ;
t3 = t3 < < 2 ;
return t1 + t3 ;
2014-07-16 16:35:48 +08:00
# endif
2006-07-18 17:06:33 +08:00
}
2014-08-26 20:21:58 +08:00
# define ZEND_MM_SMALL_SIZE_TO_BIN(size) zend_mm_small_size_to_bin(size)
2006-07-18 17:06:33 +08:00
2014-08-26 20:21:58 +08:00
static zend_never_inline void * zend_mm_alloc_small_slow ( zend_mm_heap * heap , int bin_num ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC )
2014-07-18 16:27:31 +08:00
{
2014-08-26 20:21:58 +08:00
zend_mm_chunk * chunk ;
int page_num ;
zend_mm_bin * bin ;
zend_mm_free_slot * p , * end ;
2006-12-18 19:39:19 +08:00
2014-08-26 20:21:58 +08:00
# if ZEND_DEBUG
bin = ( zend_mm_bin * ) zend_mm_alloc_pages ( heap , bin_pages [ bin_num ] , bin_data_size [ bin_num ] ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
# else
bin = ( zend_mm_bin * ) zend_mm_alloc_pages ( heap , bin_pages [ bin_num ] ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
2014-07-18 16:27:31 +08:00
# endif
2014-08-26 20:21:58 +08:00
if ( UNEXPECTED ( bin = = NULL ) ) {
/* insufficient memory */
return NULL ;
2006-12-18 19:39:19 +08:00
}
2014-07-18 16:27:31 +08:00
2014-08-26 20:21:58 +08:00
chunk = ( zend_mm_chunk * ) ZEND_MM_ALIGNED_BASE ( bin , ZEND_MM_CHUNK_SIZE ) ;
page_num = ZEND_MM_ALIGNED_OFFSET ( bin , ZEND_MM_CHUNK_SIZE ) / ZEND_MM_PAGE_SIZE ;
chunk - > map [ page_num ] = ZEND_MM_SRUN ( bin_num ) ;
if ( bin_pages [ bin_num ] > 1 ) {
int i = 1 ;
2014-07-18 16:27:31 +08:00
do {
2014-08-26 20:21:58 +08:00
chunk - > map [ page_num + i ] = ZEND_MM_SRUN ( bin_num ) ;
i + + ;
} while ( i < bin_pages [ bin_num ] ) ;
2014-07-18 16:27:31 +08:00
}
2014-07-16 16:35:48 +08:00
2014-08-26 20:21:58 +08:00
/* create a linked list of elements from 1 to last */
end = ( zend_mm_free_slot * ) ( ( char * ) bin + ( bin_data_size [ bin_num ] * ( bin_elements [ bin_num ] - 1 ) ) ) ;
heap - > free_slot [ bin_num ] = p = ( zend_mm_free_slot * ) ( ( char * ) bin + bin_data_size [ bin_num ] ) ;
do {
p - > next_free_slot = ( zend_mm_free_slot * ) ( ( char * ) p + bin_data_size [ bin_num ] ) ; ;
# if ZEND_DEBUG
2014-07-18 16:27:31 +08:00
do {
2014-08-26 20:21:58 +08:00
zend_mm_debug_info * dbg = ( zend_mm_debug_info * ) ( ( char * ) p + bin_data_size [ bin_num ] - ZEND_MM_ALIGNED_SIZE ( sizeof ( zend_mm_debug_info ) ) ) ;
dbg - > size = 0 ;
} while ( 0 ) ;
2014-07-18 16:27:31 +08:00
# endif
2014-08-26 20:21:58 +08:00
p = ( zend_mm_free_slot * ) ( ( char * ) p + bin_data_size [ bin_num ] ) ;
} while ( p ! = end ) ;
2014-07-16 16:35:48 +08:00
2014-08-26 20:21:58 +08:00
/* terminate list using NULL */
p - > next_free_slot = NULL ;
# if ZEND_DEBUG
do {
zend_mm_debug_info * dbg = ( zend_mm_debug_info * ) ( ( char * ) p + bin_data_size [ bin_num ] - ZEND_MM_ALIGNED_SIZE ( sizeof ( zend_mm_debug_info ) ) ) ;
dbg - > size = 0 ;
} while ( 0 ) ;
2010-01-25 22:47:19 +08:00
# endif
2006-07-18 17:06:33 +08:00
2014-08-26 20:21:58 +08:00
/* return first element */
return ( char * ) bin ;
}
2014-07-16 16:35:48 +08:00
2014-08-26 20:21:58 +08:00
static zend_always_inline void * zend_mm_alloc_small ( zend_mm_heap * heap , size_t size , int bin_num ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC )
{
# if ZEND_MM_STAT
do {
size_t size = heap - > size + bin_data_size [ bin_num ] ;
size_t peak = MAX ( heap - > peak , size ) ;
heap - > size = size ;
heap - > peak = peak ;
} while ( 0 ) ;
2014-07-18 16:27:31 +08:00
# endif
2006-07-18 17:06:33 +08:00
2014-08-26 20:21:58 +08:00
if ( EXPECTED ( heap - > free_slot [ bin_num ] ! = NULL ) ) {
zend_mm_free_slot * p = heap - > free_slot [ bin_num ] ;
heap - > free_slot [ bin_num ] = p - > next_free_slot ;
return ( void * ) p ;
} else {
return zend_mm_alloc_small_slow ( heap , bin_num ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
2006-07-18 17:06:33 +08:00
}
}
2014-08-26 20:21:58 +08:00
static zend_always_inline void zend_mm_free_small ( zend_mm_heap * heap , void * ptr , int bin_num )
2006-07-18 17:06:33 +08:00
{
2014-08-26 20:21:58 +08:00
zend_mm_free_slot * p ;
2006-07-18 17:06:33 +08:00
2014-08-26 20:21:58 +08:00
# if ZEND_MM_STAT
heap - > size - = bin_data_size [ bin_num ] ;
2014-07-18 16:27:31 +08:00
# endif
2014-08-26 20:21:58 +08:00
# if ZEND_DEBUG
do {
zend_mm_debug_info * dbg = ( zend_mm_debug_info * ) ( ( char * ) ptr + bin_data_size [ bin_num ] - ZEND_MM_ALIGNED_SIZE ( sizeof ( zend_mm_debug_info ) ) ) ;
dbg - > size = 0 ;
} while ( 0 ) ;
2014-07-18 16:27:31 +08:00
# endif
2006-07-18 17:06:33 +08:00
2014-08-26 20:21:58 +08:00
p = ( zend_mm_free_slot * ) ptr ;
p - > next_free_slot = heap - > free_slot [ bin_num ] ;
heap - > free_slot [ bin_num ] = p ;
2014-07-16 16:35:48 +08:00
}
2006-07-18 17:06:33 +08:00
2014-08-26 20:21:58 +08:00
/********/
/* Heap */
/********/
2014-07-18 16:27:31 +08:00
# if ZEND_DEBUG
2014-08-26 20:21:58 +08:00
static zend_always_inline zend_mm_debug_info * zend_mm_get_debug_info ( zend_mm_heap * heap , void * ptr )
2014-07-16 16:35:48 +08:00
{
2014-08-26 20:21:58 +08:00
size_t page_offset = ZEND_MM_ALIGNED_OFFSET ( ptr , ZEND_MM_CHUNK_SIZE ) ;
zend_mm_chunk * chunk ;
int page_num ;
zend_mm_page_info info ;
2006-07-18 17:06:33 +08:00
2014-08-26 20:21:58 +08:00
ZEND_MM_CHECK ( page_offset ! = 0 , " zend_mm_heap corrupted " ) ;
chunk = ( zend_mm_chunk * ) ZEND_MM_ALIGNED_BASE ( ptr , ZEND_MM_CHUNK_SIZE ) ;
page_num = page_offset / ZEND_MM_PAGE_SIZE ;
info = chunk - > map [ page_num ] ;
ZEND_MM_CHECK ( chunk - > heap = = heap , " zend_mm_heap corrupted " ) ;
if ( EXPECTED ( info & ZEND_MM_IS_SRUN ) ) {
int bin_num = ZEND_MM_SRUN_BIN_NUM ( info ) ;
return ( zend_mm_debug_info * ) ( ( char * ) ptr + bin_data_size [ bin_num ] - ZEND_MM_ALIGNED_SIZE ( sizeof ( zend_mm_debug_info ) ) ) ;
} else /* if (info & ZEND_MM_IS_LRUN) */ {
int pages_count = ZEND_MM_LRUN_PAGES ( info ) ;
return ( zend_mm_debug_info * ) ( ( char * ) ptr + ZEND_MM_PAGE_SIZE * pages_count - ZEND_MM_ALIGNED_SIZE ( sizeof ( zend_mm_debug_info ) ) ) ;
2006-07-18 17:06:33 +08:00
}
2014-07-16 16:35:48 +08:00
}
2014-08-26 20:21:58 +08:00
# endif
2006-07-18 17:06:33 +08:00
2014-08-26 20:21:58 +08:00
static zend_always_inline void * zend_mm_alloc_heap ( zend_mm_heap * heap , size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC )
2014-07-16 16:35:48 +08:00
{
2014-08-26 20:21:58 +08:00
void * ptr ;
# if ZEND_DEBUG
size_t real_size = size ;
zend_mm_debug_info * dbg ;
2006-07-18 17:06:33 +08:00
2014-08-27 01:32:20 +08:00
/* special handling for zero-size allocation */
size = MAX ( size , 1 ) ;
2014-08-26 20:21:58 +08:00
size = ZEND_MM_ALIGNED_SIZE ( size ) + ZEND_MM_ALIGNED_SIZE ( sizeof ( zend_mm_debug_info ) ) ;
2014-07-16 16:35:48 +08:00
# endif
2014-08-26 20:21:58 +08:00
if ( size < = ZEND_MM_MAX_SMALL_SIZE ) {
ptr = zend_mm_alloc_small ( heap , size , ZEND_MM_SMALL_SIZE_TO_BIN ( size ) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
# if ZEND_DEBUG
dbg = zend_mm_get_debug_info ( heap , ptr ) ;
dbg - > size = real_size ;
dbg - > filename = __zend_filename ;
dbg - > orig_filename = __zend_orig_filename ;
dbg - > lineno = __zend_lineno ;
dbg - > orig_lineno = __zend_orig_lineno ;
# endif
return ptr ;
} else if ( size < = ZEND_MM_MAX_LARGE_SIZE ) {
ptr = zend_mm_alloc_large ( heap , size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
# if ZEND_DEBUG
dbg = zend_mm_get_debug_info ( heap , ptr ) ;
dbg - > size = real_size ;
dbg - > filename = __zend_filename ;
dbg - > orig_filename = __zend_orig_filename ;
dbg - > lineno = __zend_lineno ;
dbg - > orig_lineno = __zend_orig_lineno ;
# endif
return ptr ;
} else {
# if ZEND_DEBUG
size = real_size ;
# endif
return zend_mm_alloc_huge ( heap , size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
2006-07-18 17:06:33 +08:00
}
2014-07-16 16:35:48 +08:00
}
2006-07-18 17:06:33 +08:00
2014-08-26 20:21:58 +08:00
static zend_always_inline void zend_mm_free_heap ( zend_mm_heap * heap , void * ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC )
2014-07-16 16:35:48 +08:00
{
2014-08-26 20:21:58 +08:00
size_t page_offset = ZEND_MM_ALIGNED_OFFSET ( ptr , ZEND_MM_CHUNK_SIZE ) ;
2006-12-15 21:25:26 +08:00
2014-08-26 20:21:58 +08:00
if ( UNEXPECTED ( page_offset = = 0 ) ) {
if ( ptr ! = NULL ) {
zend_mm_free_huge ( heap , ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
2014-07-18 16:27:31 +08:00
}
} else {
2014-08-26 20:21:58 +08:00
zend_mm_chunk * chunk = ( zend_mm_chunk * ) ZEND_MM_ALIGNED_BASE ( ptr , ZEND_MM_CHUNK_SIZE ) ;
int page_num = page_offset / ZEND_MM_PAGE_SIZE ;
zend_mm_page_info info = chunk - > map [ page_num ] ;
2007-03-20 14:46:48 +08:00
2014-08-26 20:21:58 +08:00
ZEND_MM_CHECK ( chunk - > heap = = heap , " zend_mm_heap corrupted " ) ;
if ( EXPECTED ( info & ZEND_MM_IS_SRUN ) ) {
zend_mm_free_small ( heap , ptr , ZEND_MM_SRUN_BIN_NUM ( info ) ) ;
} else /* if (info & ZEND_MM_IS_LRUN) */ {
int pages_count = ZEND_MM_LRUN_PAGES ( info ) ;
2014-07-16 16:35:48 +08:00
2014-08-26 20:21:58 +08:00
ZEND_MM_CHECK ( ZEND_MM_ALIGNED_OFFSET ( page_offset , ZEND_MM_PAGE_SIZE ) = = 0 , " zend_mm_heap corrupted " ) ;
zend_mm_free_large ( heap , chunk , page_num , pages_count ) ;
2014-07-18 16:27:31 +08:00
}
2006-07-18 17:06:33 +08:00
}
}
2014-08-26 20:21:58 +08:00
static size_t zend_mm_size ( zend_mm_heap * heap , void * ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC )
2007-03-20 14:46:48 +08:00
{
2014-08-26 20:21:58 +08:00
size_t page_offset = ZEND_MM_ALIGNED_OFFSET ( ptr , ZEND_MM_CHUNK_SIZE ) ;
2007-03-20 14:46:48 +08:00
2014-08-26 20:21:58 +08:00
if ( UNEXPECTED ( page_offset = = 0 ) ) {
return zend_mm_get_huge_block_size ( heap , ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
} else {
zend_mm_chunk * chunk ;
#if 0 && ZEND_DEBUG
zend_mm_debug_info * dbg = zend_mm_get_debug_info ( heap , ptr ) ;
return dbg - > size ;
# else
int page_num ;
zend_mm_page_info info ;
chunk = ( zend_mm_chunk * ) ZEND_MM_ALIGNED_BASE ( ptr , ZEND_MM_CHUNK_SIZE ) ;
page_num = page_offset / ZEND_MM_PAGE_SIZE ;
info = chunk - > map [ page_num ] ;
ZEND_MM_CHECK ( chunk - > heap = = heap , " zend_mm_heap corrupted " ) ;
if ( EXPECTED ( info & ZEND_MM_IS_SRUN ) ) {
return bin_data_size [ ZEND_MM_SRUN_BIN_NUM ( info ) ] ;
} else /* if (info & ZEND_MM_IS_LARGE_RUN) */ {
return ZEND_MM_LRUN_PAGES ( info ) * ZEND_MM_PAGE_SIZE ;
2014-07-16 16:35:48 +08:00
}
2014-07-18 16:27:31 +08:00
# endif
2007-03-20 14:46:48 +08:00
}
}
2006-07-18 17:06:33 +08:00
2014-08-26 20:21:58 +08:00
static void * zend_mm_realloc_heap ( zend_mm_heap * heap , void * ptr , size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC )
2006-07-18 17:06:33 +08:00
{
2014-08-26 20:21:58 +08:00
size_t page_offset ;
size_t old_size ;
size_t new_size ;
void * ret ;
2014-07-16 16:35:48 +08:00
# if ZEND_DEBUG
2014-08-26 20:21:58 +08:00
size_t real_size ;
zend_mm_debug_info * dbg ;
2014-07-16 16:35:48 +08:00
# endif
2014-07-18 16:27:31 +08:00
2014-08-26 20:21:58 +08:00
page_offset = ZEND_MM_ALIGNED_OFFSET ( ptr , ZEND_MM_CHUNK_SIZE ) ;
if ( UNEXPECTED ( page_offset = = 0 ) ) {
if ( UNEXPECTED ( ptr = = NULL ) ) {
return zend_mm_alloc_heap ( heap , size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
2014-07-18 16:27:31 +08:00
}
2014-08-26 20:21:58 +08:00
old_size = zend_mm_get_huge_block_size ( heap , ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
# if ZEND_DEBUG
real_size = size ;
size = ZEND_MM_ALIGNED_SIZE ( size ) + ZEND_MM_ALIGNED_SIZE ( sizeof ( zend_mm_debug_info ) ) ;
2014-07-16 16:35:48 +08:00
# endif
2014-08-26 20:21:58 +08:00
if ( size > ZEND_MM_MAX_LARGE_SIZE ) {
2014-07-16 16:35:48 +08:00
# if ZEND_DEBUG
2014-08-26 20:21:58 +08:00
size = real_size ;
2014-07-16 16:35:48 +08:00
# endif
2014-08-26 20:21:58 +08:00
new_size = ZEND_MM_ALIGNED_SIZE_EX ( size , ZEND_MM_PAGE_SIZE ) ;
if ( new_size = = old_size ) {
# if ZEND_DEBUG
zend_mm_change_huge_block_size ( heap , ptr , new_size , real_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
# else
zend_mm_change_huge_block_size ( heap , ptr , new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
2014-07-16 16:35:48 +08:00
# endif
2014-08-26 20:21:58 +08:00
return ptr ;
# ifndef _WIN32
} else if ( new_size < old_size ) {
/* unmup tail */
zend_mm_munmap ( ( char * ) ptr + new_size , old_size - new_size ) ;
# if ZEND_MM_STAT || ZEND_MM_LIMIT
heap - > real_size - = old_size - new_size ;
# endif
# if ZEND_MM_STAT
heap - > size - = old_size - new_size ;
2014-07-18 16:27:31 +08:00
# endif
# if ZEND_DEBUG
2014-08-26 20:21:58 +08:00
zend_mm_change_huge_block_size ( heap , ptr , new_size , real_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
# else
zend_mm_change_huge_block_size ( heap , ptr , new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
2014-07-16 16:35:48 +08:00
# endif
2014-08-26 20:21:58 +08:00
return ptr ;
} else /* if (new_size > old_size) */ {
# if ZEND_MM_LIMIT
if ( heap - > real_size + ( new_size - old_size ) > heap - > limit ) {
if ( heap - > overflow = = 0 ) {
2014-07-16 16:35:48 +08:00
# if ZEND_DEBUG
2014-08-26 20:21:58 +08:00
zend_mm_safe_error ( heap , " Allowed memory size of " ZEND_ULONG_FMT " bytes exhausted at %s:%d (tried to allocate " ZEND_ULONG_FMT " bytes) " , heap - > limit , __zend_filename , __zend_lineno , size ) ;
# else
zend_mm_safe_error ( heap , " Allowed memory size of " ZEND_ULONG_FMT " bytes exhausted (tried to allocate " ZEND_ULONG_FMT " bytes) " , heap - > limit , size ) ;
# endif
return NULL ;
}
}
# endif
/* try to map tail right after this block */
if ( zend_mm_mmap_fixed ( ( char * ) ptr + old_size , new_size - old_size ) ) {
# if ZEND_MM_STAT || ZEND_MM_LIMIT
heap - > real_size + = new_size - old_size ;
# endif
# if ZEND_MM_STAT
heap - > size + = new_size - old_size ;
# endif
2014-07-16 16:35:48 +08:00
# if ZEND_DEBUG
2014-08-26 20:21:58 +08:00
zend_mm_change_huge_block_size ( heap , ptr , new_size , real_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
# else
zend_mm_change_huge_block_size ( heap , ptr , new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
2014-07-16 16:35:48 +08:00
# endif
2014-08-26 20:21:58 +08:00
return ptr ;
}
2014-07-16 16:35:48 +08:00
# endif
2014-08-26 20:21:58 +08:00
}
}
2014-07-18 16:27:31 +08:00
} else {
2014-08-26 20:21:58 +08:00
zend_mm_chunk * chunk = ( zend_mm_chunk * ) ZEND_MM_ALIGNED_BASE ( ptr , ZEND_MM_CHUNK_SIZE ) ;
int page_num = page_offset / ZEND_MM_PAGE_SIZE ;
zend_mm_page_info info = chunk - > map [ page_num ] ;
# if ZEND_DEBUG
size_t real_size = size ;
2007-03-20 14:46:48 +08:00
2014-08-26 20:21:58 +08:00
size = ZEND_MM_ALIGNED_SIZE ( size ) + ZEND_MM_ALIGNED_SIZE ( sizeof ( zend_mm_debug_info ) ) ;
# endif
2007-03-20 14:46:48 +08:00
2014-08-26 20:21:58 +08:00
ZEND_MM_CHECK ( chunk - > heap = = heap , " zend_mm_heap corrupted " ) ;
if ( info & ZEND_MM_IS_SRUN ) {
int old_bin_num , bin_num ;
2007-03-20 14:46:48 +08:00
2014-08-26 20:21:58 +08:00
old_bin_num = ZEND_MM_SRUN_BIN_NUM ( info ) ;
old_size = bin_data_size [ old_bin_num ] ;
bin_num = ZEND_MM_SMALL_SIZE_TO_BIN ( size ) ;
if ( old_bin_num = = bin_num ) {
# if ZEND_DEBUG
dbg = zend_mm_get_debug_info ( heap , ptr ) ;
dbg - > size = real_size ;
dbg - > filename = __zend_filename ;
dbg - > orig_filename = __zend_orig_filename ;
dbg - > lineno = __zend_lineno ;
dbg - > orig_lineno = __zend_orig_lineno ;
# endif
return ptr ;
2014-07-18 16:27:31 +08:00
}
2014-08-26 20:21:58 +08:00
} else /* if (info & ZEND_MM_IS_LARGE_RUN) */ {
ZEND_MM_CHECK ( ZEND_MM_ALIGNED_OFFSET ( page_offset , ZEND_MM_PAGE_SIZE ) = = 0 , " zend_mm_heap corrupted " ) ;
old_size = ZEND_MM_LRUN_PAGES ( info ) * ZEND_MM_PAGE_SIZE ;
if ( size > ZEND_MM_MAX_SMALL_SIZE & & size < = ZEND_MM_MAX_LARGE_SIZE ) {
new_size = ZEND_MM_ALIGNED_SIZE_EX ( size , ZEND_MM_PAGE_SIZE ) ;
if ( new_size = = old_size ) {
# if ZEND_DEBUG
dbg = zend_mm_get_debug_info ( heap , ptr ) ;
dbg - > size = real_size ;
dbg - > filename = __zend_filename ;
dbg - > orig_filename = __zend_orig_filename ;
dbg - > lineno = __zend_lineno ;
dbg - > orig_lineno = __zend_orig_lineno ;
# endif
return ptr ;
} else if ( new_size < old_size ) {
/* free tail pages */
int new_pages_count = new_size / ZEND_MM_PAGE_SIZE ;
int rest_pages_count = ( old_size - new_size ) / ZEND_MM_PAGE_SIZE ;
# if ZEND_MM_STAT
heap - > size - = rest_pages_count * ZEND_MM_PAGE_SIZE ;
# endif
chunk - > map [ page_num ] = ZEND_MM_LRUN ( new_pages_count ) ;
chunk - > free_pages + = rest_pages_count ;
zend_mm_bitset_reset_range ( chunk - > free_map , page_num + new_pages_count , rest_pages_count ) ;
# if ZEND_DEBUG
dbg = zend_mm_get_debug_info ( heap , ptr ) ;
dbg - > size = real_size ;
dbg - > filename = __zend_filename ;
dbg - > orig_filename = __zend_orig_filename ;
dbg - > lineno = __zend_lineno ;
dbg - > orig_lineno = __zend_orig_lineno ;
# endif
return ptr ;
} else /* if (new_size > old_size) */ {
int new_pages_count = new_size / ZEND_MM_PAGE_SIZE ;
int old_pages_count = old_size / ZEND_MM_PAGE_SIZE ;
/* try to allocate tail pages after this block */
if ( page_num + new_pages_count < = ZEND_MM_PAGES & &
zend_mm_bitset_is_free_range ( chunk - > free_map , page_num + old_pages_count , new_pages_count - old_pages_count ) ) {
# if ZEND_MM_STAT
do {
size_t size = heap - > size + ( new_size - old_size ) ;
size_t peak = MAX ( heap - > peak , size ) ;
heap - > size = size ;
heap - > peak = peak ;
} while ( 0 ) ;
# endif
chunk - > free_pages - = new_pages_count - old_pages_count ;
zend_mm_bitset_set_range ( chunk - > free_map , page_num + old_pages_count , new_pages_count - old_pages_count ) ;
chunk - > map [ page_num ] = ZEND_MM_LRUN ( new_pages_count ) ;
# if ZEND_DEBUG
dbg = zend_mm_get_debug_info ( heap , ptr ) ;
dbg - > size = real_size ;
dbg - > filename = __zend_filename ;
dbg - > orig_filename = __zend_orig_filename ;
dbg - > lineno = __zend_lineno ;
dbg - > orig_lineno = __zend_orig_lineno ;
# endif
return ptr ;
}
2014-07-18 16:27:31 +08:00
}
2014-07-16 16:35:48 +08:00
}
2008-02-14 22:42:00 +08:00
}
2014-08-26 20:21:58 +08:00
# if ZEND_DEBUG
size = real_size ;
2014-07-18 16:27:31 +08:00
# endif
}
2014-08-26 20:21:58 +08:00
/* Naive reallocation */
old_size = zend_mm_size ( heap , ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
ret = zend_mm_alloc_heap ( heap , size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
memcpy ( ret , ptr , MIN ( old_size , size ) ) ;
zend_mm_free_heap ( heap , ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
return ret ;
}
/*********************/
/* Huge Runs (again) */
/*********************/
2014-07-18 16:27:31 +08:00
2014-08-26 20:21:58 +08:00
# if ZEND_DEBUG
static void zend_mm_add_huge_block ( zend_mm_heap * heap , void * ptr , size_t size , size_t dbg_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC )
# else
static void zend_mm_add_huge_block ( zend_mm_heap * heap , void * ptr , size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC )
# endif
{
zend_mm_huge_list * list = ( zend_mm_huge_list * ) zend_mm_alloc_heap ( heap , sizeof ( zend_mm_huge_list ) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
list - > ptr = ptr ;
list - > size = size ;
list - > next = heap - > huge_list ;
# if ZEND_DEBUG
list - > dbg . size = dbg_size ;
list - > dbg . filename = __zend_filename ;
list - > dbg . orig_filename = __zend_orig_filename ;
list - > dbg . lineno = __zend_lineno ;
list - > dbg . orig_lineno = __zend_orig_lineno ;
# endif
heap - > huge_list = list ;
}
2014-07-18 16:27:31 +08:00
2014-08-26 20:21:58 +08:00
static size_t zend_mm_del_huge_block ( zend_mm_heap * heap , void * ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC )
{
zend_mm_huge_list * prev = NULL ;
zend_mm_huge_list * list = heap - > huge_list ;
while ( list ! = NULL ) {
if ( list - > ptr = = ptr ) {
size_t size ;
2014-07-18 16:27:31 +08:00
2014-08-26 20:21:58 +08:00
if ( prev ) {
prev - > next = list - > next ;
} else {
heap - > huge_list = list - > next ;
2014-07-18 16:27:31 +08:00
}
2014-08-26 20:21:58 +08:00
size = list - > size ;
zend_mm_free_heap ( heap , list ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
return size ;
2014-07-16 16:35:48 +08:00
}
2014-08-26 20:21:58 +08:00
prev = list ;
list = list - > next ;
2006-07-18 17:06:33 +08:00
}
2014-08-26 20:21:58 +08:00
ZEND_MM_CHECK ( 0 , " zend_mm_heap corrupted " ) ;
return 0 ;
}
2007-02-17 02:06:28 +08:00
2014-08-26 20:21:58 +08:00
static size_t zend_mm_get_huge_block_size ( zend_mm_heap * heap , void * ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC )
{
zend_mm_huge_list * list = heap - > huge_list ;
while ( list ! = NULL ) {
if ( list - > ptr = = ptr ) {
return list - > size ;
2014-07-18 16:27:31 +08:00
}
2014-08-26 20:21:58 +08:00
list = list - > next ;
}
ZEND_MM_CHECK ( 0 , " zend_mm_heap corrupted " ) ;
return 0 ;
}
2006-07-18 17:06:33 +08:00
2014-07-16 16:35:48 +08:00
# if ZEND_DEBUG
2014-08-26 20:21:58 +08:00
static void zend_mm_change_huge_block_size ( zend_mm_heap * heap , void * ptr , size_t size , size_t dbg_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC )
2014-07-16 16:35:48 +08:00
# else
2014-08-26 20:21:58 +08:00
static void zend_mm_change_huge_block_size ( zend_mm_heap * heap , void * ptr , size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC )
# endif
{
zend_mm_huge_list * list = heap - > huge_list ;
while ( list ! = NULL ) {
if ( list - > ptr = = ptr ) {
list - > size = size ;
# if ZEND_DEBUG
list - > dbg . size = dbg_size ;
list - > dbg . filename = __zend_filename ;
list - > dbg . orig_filename = __zend_orig_filename ;
list - > dbg . lineno = __zend_lineno ;
list - > dbg . orig_lineno = __zend_orig_lineno ;
2006-07-18 17:06:33 +08:00
# endif
2014-08-26 20:21:58 +08:00
return ;
2007-03-20 14:46:48 +08:00
}
2014-08-26 20:21:58 +08:00
list = list - > next ;
}
}
2014-07-18 16:27:31 +08:00
2014-08-26 20:21:58 +08:00
static void * zend_mm_alloc_huge ( zend_mm_heap * heap , size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC )
{
size_t new_size = ZEND_MM_ALIGNED_SIZE_EX ( size , ZEND_MM_PAGE_SIZE ) ;
void * ptr ;
2014-07-18 16:27:31 +08:00
2014-08-26 20:21:58 +08:00
# if ZEND_MM_LIMIT
if ( heap - > real_size + new_size > heap - > limit ) {
if ( heap - > overflow = = 0 ) {
2014-07-16 16:35:48 +08:00
# if ZEND_DEBUG
2014-08-28 18:07:39 +08:00
zend_mm_safe_error ( heap , " Allowed memory size of " ZEND_ULONG_FMT " bytes exhausted at %s:%d (tried to allocate %lu bytes) " , heap - > limit , __zend_filename , __zend_lineno , size ) ;
2014-07-16 16:35:48 +08:00
# else
2014-08-28 18:07:39 +08:00
zend_mm_safe_error ( heap , " Allowed memory size of " ZEND_ULONG_FMT " bytes exhausted (tried to allocate %lu bytes) " , heap - > limit , size ) ;
2014-07-16 16:35:48 +08:00
# endif
2014-07-18 16:27:31 +08:00
return NULL ;
}
2006-07-18 17:06:33 +08:00
}
2014-08-26 20:21:58 +08:00
# endif
ptr = zend_mm_chunk_alloc ( new_size , ZEND_MM_CHUNK_SIZE ) ;
if ( UNEXPECTED ( ptr = = NULL ) ) {
/* insufficient memory */
2014-09-16 17:53:26 +08:00
# if !ZEND_MM_LIMIT
zend_mm_safe_error ( heap , " Out of memory " ) ;
# elif ZEND_DEBUG
zend_mm_safe_error ( heap , " Out of memory (allocated %ld) at %s:%d (tried to allocate %lu bytes) " , heap - > real_size , __zend_filename , __zend_lineno , size ) ;
# else
zend_mm_safe_error ( heap , " Out of memory (allocated %ld) (tried to allocate %lu bytes) " , heap - > real_size , size ) ;
# endif
2014-08-26 20:21:58 +08:00
return NULL ;
2006-07-18 17:06:33 +08:00
}
2014-08-26 20:21:58 +08:00
# if ZEND_DEBUG
zend_mm_add_huge_block ( heap , ptr , new_size , size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
# else
zend_mm_add_huge_block ( heap , ptr , new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
# endif
# if ZEND_MM_STAT
do {
size_t size = heap - > real_size + new_size ;
size_t peak = MAX ( heap - > real_peak , size ) ;
heap - > real_size = size ;
heap - > real_peak = peak ;
} while ( 0 ) ;
do {
size_t size = heap - > size + new_size ;
size_t peak = MAX ( heap - > peak , size ) ;
heap - > size = size ;
heap - > peak = peak ;
} while ( 0 ) ;
# elif ZEND_MM_LIMIT
heap - > real_size + = new_size ;
# endif
return ptr ;
2014-07-16 16:35:48 +08:00
}
2011-06-03 05:16:50 +08:00
2014-08-26 20:21:58 +08:00
static void zend_mm_free_huge ( zend_mm_heap * heap , void * ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC )
2014-07-16 16:35:48 +08:00
{
2014-07-18 16:27:31 +08:00
size_t size ;
2006-12-25 20:16:33 +08:00
2014-08-26 20:21:58 +08:00
ZEND_MM_CHECK ( ZEND_MM_ALIGNED_OFFSET ( ptr , ZEND_MM_CHUNK_SIZE ) = = 0 , " zend_mm_heap corrupted " ) ;
size = zend_mm_del_huge_block ( heap , ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
zend_mm_munmap ( ptr , size ) ;
# if ZEND_MM_STAT || ZEND_MM_LIMIT
heap - > real_size - = size ;
# endif
# if ZEND_MM_STAT
heap - > size - = size ;
2014-07-18 16:27:31 +08:00
# endif
2014-08-26 20:21:58 +08:00
}
2006-07-18 17:06:33 +08:00
2014-08-26 20:21:58 +08:00
/******************/
/* Initialization */
/******************/
2006-07-18 17:06:33 +08:00
2014-08-26 20:21:58 +08:00
zend_mm_heap * zend_mm_init ( void )
{
zend_mm_chunk * chunk = ( zend_mm_chunk * ) zend_mm_chunk_alloc ( ZEND_MM_CHUNK_SIZE , ZEND_MM_CHUNK_SIZE ) ;
zend_mm_heap * heap ;
if ( UNEXPECTED ( chunk = = NULL ) ) {
# if ZEND_MM_ERROR
2014-09-16 18:27:25 +08:00
# ifdef _WIN32
stderr_last_error ( " Can't initialize heap " ) ;
# else
2014-08-26 20:21:58 +08:00
fprintf ( stderr , " \n Can't initialize heap: [%d] %s \n " , errno , strerror ( errno ) ) ;
2014-09-16 18:27:25 +08:00
# endif
2014-07-18 16:27:31 +08:00
# endif
2014-08-26 20:21:58 +08:00
return NULL ;
2014-07-18 16:27:31 +08:00
}
2014-08-26 20:21:58 +08:00
heap = & chunk - > heap_slot ;
chunk - > heap = heap ;
chunk - > next = chunk ;
chunk - > prev = chunk ;
chunk - > free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE ;
chunk - > free_tail = ZEND_MM_FIRST_PAGE ;
chunk - > num = 0 ;
2014-08-27 02:43:33 +08:00
chunk - > free_map [ 0 ] = ( Z_L ( 1 ) < < ZEND_MM_FIRST_PAGE ) - 1 ;
2014-08-26 20:21:58 +08:00
chunk - > map [ 0 ] = ZEND_MM_LRUN ( ZEND_MM_FIRST_PAGE ) ;
heap - > main_chunk = chunk ;
heap - > cached_chunks = NULL ;
heap - > chunks_count = 1 ;
heap - > peak_chunks_count = 1 ;
heap - > cached_chunks_count = 0 ;
heap - > avg_chunks_count = 1.0 ;
# if ZEND_MM_STAT || ZEND_MM_LIMIT
heap - > real_size = ZEND_MM_CHUNK_SIZE ;
# endif
# if ZEND_MM_STAT
heap - > real_peak = ZEND_MM_CHUNK_SIZE ;
heap - > size = 0 ;
heap - > peak = 0 ;
2014-07-18 16:27:31 +08:00
# endif
2014-08-26 20:21:58 +08:00
# if ZEND_MM_LIMIT
2014-08-27 02:43:33 +08:00
heap - > limit = ( Z_L ( - 1 ) > > Z_L ( 1 ) ) ;
2014-08-26 20:21:58 +08:00
heap - > overflow = 0 ;
# endif
# if ZEND_MM_CUSTOM
heap - > use_custom_heap = 0 ;
# endif
heap - > huge_list = NULL ;
return heap ;
}
2014-07-18 16:27:31 +08:00
2014-08-26 20:21:58 +08:00
# if ZEND_DEBUG
/******************/
/* Leak detection */
/******************/
static zend_long zend_mm_find_leaks_small ( zend_mm_chunk * p , int i , int j , zend_leak_info * leak )
{
int empty = 1 ;
zend_long count = 0 ;
int bin_num = ZEND_MM_SRUN_BIN_NUM ( p - > map [ i ] ) ;
zend_mm_debug_info * dbg = ( zend_mm_debug_info * ) ( ( char * ) p + ZEND_MM_PAGE_SIZE * i + bin_data_size [ bin_num ] * ( j + 1 ) - ZEND_MM_ALIGNED_SIZE ( sizeof ( zend_mm_debug_info ) ) ) ;
while ( j < bin_elements [ bin_num ] ) {
if ( dbg - > size ! = 0 ) {
if ( dbg - > filename = = leak - > filename & & dbg - > lineno = = leak - > lineno ) {
count + + ;
dbg - > size = 0 ;
dbg - > filename = NULL ;
dbg - > lineno = 0 ;
} else {
empty = 0 ;
}
}
j + + ;
dbg = ( zend_mm_debug_info * ) ( ( char * ) dbg + bin_data_size [ bin_num ] ) ;
2014-07-18 16:27:31 +08:00
}
2014-08-26 20:21:58 +08:00
if ( empty ) {
zend_mm_bitset_reset_range ( p - > free_map , i , bin_pages [ bin_num ] ) ;
2006-07-18 17:06:33 +08:00
}
2014-08-26 20:21:58 +08:00
return count ;
2014-07-16 16:35:48 +08:00
}
2006-07-18 17:06:33 +08:00
2014-08-26 20:21:58 +08:00
static zend_long zend_mm_find_leaks ( zend_mm_heap * heap , zend_mm_chunk * p , int i , zend_leak_info * leak )
2014-07-16 16:35:48 +08:00
{
2014-08-26 20:21:58 +08:00
zend_long count = 0 ;
2007-03-23 15:59:26 +08:00
2014-08-26 20:21:58 +08:00
do {
while ( i < p - > free_tail ) {
if ( zend_mm_bitset_is_set ( p - > free_map , i ) ) {
if ( p - > map [ i ] & ZEND_MM_IS_SRUN ) {
int bin_num = ZEND_MM_SRUN_BIN_NUM ( p - > map [ i ] ) ;
count + = zend_mm_find_leaks_small ( p , i , 0 , leak ) ;
i + = bin_pages [ bin_num ] ;
} else /* if (p->map[i] & ZEND_MM_IS_LRUN) */ {
int pages_count = ZEND_MM_LRUN_PAGES ( p - > map [ i ] ) ;
zend_mm_debug_info * dbg = ( zend_mm_debug_info * ) ( ( char * ) p + ZEND_MM_PAGE_SIZE * ( i + pages_count ) - ZEND_MM_ALIGNED_SIZE ( sizeof ( zend_mm_debug_info ) ) ) ;
2007-03-23 15:59:26 +08:00
2014-08-26 20:21:58 +08:00
if ( dbg - > filename = = leak - > filename & & dbg - > lineno = = leak - > lineno ) {
count + + ;
}
zend_mm_bitset_reset_range ( p - > free_map , i , pages_count ) ;
i + = pages_count ;
}
} else {
i + + ;
2014-07-18 16:27:31 +08:00
}
2006-07-18 17:06:33 +08:00
}
2014-08-26 20:21:58 +08:00
p = p - > next ;
} while ( p ! = heap - > main_chunk ) ;
return count ;
}
2014-07-18 16:27:31 +08:00
2014-08-26 20:21:58 +08:00
static void zend_mm_check_leaks ( zend_mm_heap * heap TSRMLS_DC )
{
zend_mm_huge_list * list ;
zend_mm_chunk * p ;
zend_leak_info leak ;
zend_long repeated = 0 ;
uint32_t total = 0 ;
int i , j ;
2007-03-20 14:46:48 +08:00
2014-08-26 20:21:58 +08:00
/* find leaked huge blocks and free them */
list = heap - > huge_list ;
while ( list ) {
zend_mm_huge_list * q = list ;
2014-07-16 16:35:48 +08:00
2014-08-26 20:21:58 +08:00
heap - > huge_list = list - > next ;
2014-07-18 16:27:31 +08:00
2014-08-26 20:21:58 +08:00
leak . addr = list - > ptr ;
leak . size = list - > dbg . size ;
leak . filename = list - > dbg . filename ;
leak . orig_filename = list - > dbg . orig_filename ;
leak . lineno = list - > dbg . lineno ;
leak . orig_lineno = list - > dbg . orig_lineno ;
2014-07-18 16:27:31 +08:00
2014-08-26 20:21:58 +08:00
zend_message_dispatcher ( ZMSG_LOG_SCRIPT_NAME , NULL TSRMLS_CC ) ;
zend_message_dispatcher ( ZMSG_MEMORY_LEAK_DETECTED , & leak TSRMLS_CC ) ;
//??? repeated = zend_mm_find_leaks_huge(segment, p);
total + = 1 + repeated ;
if ( repeated ) {
zend_message_dispatcher ( ZMSG_MEMORY_LEAK_REPEATED , ( void * ) ( zend_uintptr_t ) repeated TSRMLS_CC ) ;
}
list = list - > next ;
zend_mm_munmap ( q - > ptr , q - > size ) ;
zend_mm_free_heap ( heap , q , NULL , 0 , NULL , 0 ) ;
}
/* for each chunk */
p = heap - > main_chunk ;
do {
i = ZEND_MM_FIRST_PAGE ;
while ( i < p - > free_tail ) {
if ( zend_mm_bitset_is_set ( p - > free_map , i ) ) {
if ( p - > map [ i ] & ZEND_MM_IS_SRUN ) {
int bin_num = ZEND_MM_SRUN_BIN_NUM ( p - > map [ i ] ) ;
zend_mm_debug_info * dbg = ( zend_mm_debug_info * ) ( ( char * ) p + ZEND_MM_PAGE_SIZE * i + bin_data_size [ bin_num ] - ZEND_MM_ALIGNED_SIZE ( sizeof ( zend_mm_debug_info ) ) ) ;
j = 0 ;
while ( j < bin_elements [ bin_num ] ) {
if ( dbg - > size ! = 0 ) {
leak . addr = ( zend_mm_debug_info * ) ( ( char * ) p + ZEND_MM_PAGE_SIZE * i + bin_data_size [ bin_num ] * j ) ;
leak . size = dbg - > size ;
leak . filename = dbg - > filename ;
leak . orig_filename = dbg - > orig_filename ;
leak . lineno = dbg - > lineno ;
leak . orig_lineno = dbg - > orig_lineno ;
zend_message_dispatcher ( ZMSG_LOG_SCRIPT_NAME , NULL TSRMLS_CC ) ;
zend_message_dispatcher ( ZMSG_MEMORY_LEAK_DETECTED , & leak TSRMLS_CC ) ;
dbg - > size = 0 ;
dbg - > filename = NULL ;
dbg - > lineno = 0 ;
repeated = zend_mm_find_leaks_small ( p , i , j + 1 , & leak ) +
zend_mm_find_leaks ( heap , p , i + bin_pages [ bin_num ] , & leak ) ;
total + = 1 + repeated ;
if ( repeated ) {
zend_message_dispatcher ( ZMSG_MEMORY_LEAK_REPEATED , ( void * ) ( zend_uintptr_t ) repeated TSRMLS_CC ) ;
}
}
dbg = ( zend_mm_debug_info * ) ( ( char * ) dbg + bin_data_size [ bin_num ] ) ;
j + + ;
}
i + = bin_pages [ bin_num ] ;
} else /* if (p->map[i] & ZEND_MM_IS_LRUN) */ {
int pages_count = ZEND_MM_LRUN_PAGES ( p - > map [ i ] ) ;
zend_mm_debug_info * dbg = ( zend_mm_debug_info * ) ( ( char * ) p + ZEND_MM_PAGE_SIZE * ( i + pages_count ) - ZEND_MM_ALIGNED_SIZE ( sizeof ( zend_mm_debug_info ) ) ) ;
leak . addr = ( void * ) ( ( char * ) p + ZEND_MM_PAGE_SIZE * i ) ;
leak . size = dbg - > size ;
leak . filename = dbg - > filename ;
leak . orig_filename = dbg - > orig_filename ;
leak . lineno = dbg - > lineno ;
leak . orig_lineno = dbg - > orig_lineno ;
zend_message_dispatcher ( ZMSG_LOG_SCRIPT_NAME , NULL TSRMLS_CC ) ;
zend_message_dispatcher ( ZMSG_MEMORY_LEAK_DETECTED , & leak TSRMLS_CC ) ;
zend_mm_bitset_reset_range ( p - > free_map , i , pages_count ) ;
repeated = zend_mm_find_leaks ( heap , p , i + pages_count , & leak ) ;
total + = 1 + repeated ;
if ( repeated ) {
zend_message_dispatcher ( ZMSG_MEMORY_LEAK_REPEATED , ( void * ) ( zend_uintptr_t ) repeated TSRMLS_CC ) ;
}
i + = pages_count ;
}
} else {
i + + ;
}
2014-07-18 16:27:31 +08:00
}
2014-08-26 20:21:58 +08:00
p = p - > next ;
} while ( p ! = heap - > main_chunk ) ;
if ( total ) {
zend_message_dispatcher ( ZMSG_MEMORY_LEAKS_GRAND_TOTAL , & total TSRMLS_CC ) ;
2014-07-16 16:35:48 +08:00
}
2014-08-26 20:21:58 +08:00
}
2006-12-05 00:20:02 +08:00
# endif
2014-07-16 16:35:48 +08:00
2014-08-26 20:21:58 +08:00
void zend_mm_shutdown ( zend_mm_heap * heap , int full , int silent TSRMLS_DC )
{
zend_mm_chunk * p ;
zend_mm_huge_list * list ;
2014-07-18 16:27:31 +08:00
2014-08-26 20:21:58 +08:00
# if ZEND_MM_CUSTOM
if ( heap - > use_custom_heap ) {
return ;
}
2006-07-18 17:06:33 +08:00
# endif
2006-12-20 18:49:33 +08:00
2014-07-18 16:27:31 +08:00
# if ZEND_DEBUG
2014-08-26 20:21:58 +08:00
if ( ! silent ) {
zend_mm_check_leaks ( heap TSRMLS_CC ) ;
}
2014-07-16 16:35:48 +08:00
# endif
2014-07-18 16:27:31 +08:00
2014-08-26 20:21:58 +08:00
/* free huge blocks */
list = heap - > huge_list ;
while ( list ) {
zend_mm_huge_list * q = list ;
list = list - > next ;
zend_mm_munmap ( q - > ptr , q - > size ) ;
}
2014-07-18 16:27:31 +08:00
2014-08-26 20:21:58 +08:00
/* move all chunks except of the first one into the cache */
p = heap - > main_chunk - > next ;
while ( p ! = heap - > main_chunk ) {
zend_mm_chunk * q = p - > next ;
p - > next = heap - > cached_chunks ;
heap - > cached_chunks = p ;
p = q ;
heap - > chunks_count - - ;
heap - > cached_chunks_count + + ;
2014-07-16 16:35:48 +08:00
}
2006-07-18 17:06:33 +08:00
2014-08-26 20:21:58 +08:00
if ( full ) {
/* free all cached chunks */
while ( heap - > cached_chunks ) {
p = heap - > cached_chunks ;
heap - > cached_chunks = p - > next ;
zend_mm_munmap ( p , ZEND_MM_CHUNK_SIZE ) ;
}
/* free the first chunk */
zend_mm_munmap ( heap - > main_chunk , ZEND_MM_CHUNK_SIZE ) ;
} else {
zend_mm_heap old_heap ;
/* free some cached chunks to keep average count */
heap - > avg_chunks_count = ( heap - > avg_chunks_count + ( double ) heap - > peak_chunks_count ) / 2.0 ;
while ( ( double ) heap - > cached_chunks_count + 0.9 > heap - > avg_chunks_count & &
heap - > cached_chunks ) {
p = heap - > cached_chunks ;
heap - > cached_chunks = p - > next ;
zend_mm_munmap ( p , ZEND_MM_CHUNK_SIZE ) ;
heap - > cached_chunks_count - - ;
}
/* clear cached chunks */
p = heap - > cached_chunks ;
while ( p ! = NULL ) {
zend_mm_chunk * q = p - > next ;
memset ( p , 0 , sizeof ( zend_mm_chunk ) ) ;
p - > next = q ;
p = q ;
}
/* reinitialize the first chunk and heap */
old_heap = * heap ;
p = heap - > main_chunk ;
memset ( p , 0 , ZEND_MM_FIRST_PAGE * ZEND_MM_PAGE_SIZE ) ;
* heap = old_heap ;
memset ( heap - > free_slot , 0 , sizeof ( heap - > free_slot ) ) ;
heap - > main_chunk = p ;
p - > heap = & p - > heap_slot ;
p - > next = p ;
p - > prev = p ;
p - > free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE ;
p - > free_tail = ZEND_MM_FIRST_PAGE ;
p - > free_map [ 0 ] = ( 1L < < ZEND_MM_FIRST_PAGE ) - 1 ;
p - > map [ 0 ] = ZEND_MM_LRUN ( ZEND_MM_FIRST_PAGE ) ;
heap - > chunks_count = 1 ;
heap - > peak_chunks_count = 1 ;
# if ZEND_MM_STAT || ZEND_MM_LIMIT
heap - > real_size = ZEND_MM_CHUNK_SIZE ;
# endif
# if ZEND_MM_STAT
heap - > real_peak = ZEND_MM_CHUNK_SIZE ;
# endif
}
}
/**************/
/* PUBLIC API */
/**************/
2014-07-16 16:35:48 +08:00
2006-07-18 17:06:33 +08:00
ZEND_API void * _zend_mm_alloc ( zend_mm_heap * heap , size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC )
{
2014-08-26 20:21:58 +08:00
return zend_mm_alloc_heap ( heap , size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
2006-07-18 17:06:33 +08:00
}
2014-08-26 20:21:58 +08:00
ZEND_API void _zend_mm_free ( zend_mm_heap * heap , void * ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC )
2006-07-18 17:06:33 +08:00
{
2014-08-26 20:21:58 +08:00
zend_mm_free_heap ( heap , ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
2006-07-18 17:06:33 +08:00
}
2014-08-26 20:21:58 +08:00
void * _zend_mm_realloc ( zend_mm_heap * heap , void * ptr , size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC )
2006-07-18 17:06:33 +08:00
{
2014-08-26 20:21:58 +08:00
return zend_mm_realloc_heap ( heap , ptr , size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
2006-07-18 17:06:33 +08:00
}
2014-08-26 20:21:58 +08:00
ZEND_API size_t _zend_mm_block_size ( zend_mm_heap * heap , void * ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC )
2006-07-18 17:06:33 +08:00
{
2014-08-26 20:21:58 +08:00
return zend_mm_size ( heap , ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
2014-07-18 16:27:31 +08:00
}
2006-07-18 17:06:33 +08:00
/**********************/
/* Allocation Manager */
/**********************/
typedef struct _zend_alloc_globals {
zend_mm_heap * mm_heap ;
} zend_alloc_globals ;
# ifdef ZTS
2014-09-21 03:22:14 +08:00
TSRMG_D ( zend_alloc_globals , alloc_globals_id ) ;
2006-07-18 17:06:33 +08:00
# define AG(v) TSRMG(alloc_globals_id, zend_alloc_globals *, v)
# else
# define AG(v) (alloc_globals.v)
static zend_alloc_globals alloc_globals ;
# endif
2006-09-14 16:00:44 +08:00
ZEND_API int is_zend_mm ( TSRMLS_D )
{
2014-08-26 20:21:58 +08:00
# if ZEND_MM_CUSTOM
return ! AG ( mm_heap ) - > use_custom_heap ;
# else
return 1 ;
# endif
}
# if !ZEND_DEBUG && !defined(_WIN32)
# undef _emalloc
# if ZEND_MM_CUSTOM
# define ZEND_MM_CUSTOM_ALLOCATOR(size) do { \
if ( UNEXPECTED ( AG ( mm_heap ) - > use_custom_heap ) ) { \
return AG ( mm_heap ) - > _malloc ( size ) ; \
} \
} while ( 0 )
# define ZEND_MM_CUSTOM_DEALLOCATOR(ptr) do { \
if ( UNEXPECTED ( AG ( mm_heap ) - > use_custom_heap ) ) { \
AG ( mm_heap ) - > _free ( ptr ) ; \
return ; \
} \
} while ( 0 )
# else
# define ZEND_MM_CUSTOM_ALLOCATOR(size)
# define ZEND_MM_CUSTOM_DEALLOCATOR(ptr)
# endif
# define _ZEND_BIN_ALLOCATOR(_num, _size, _elements, _pages, x, y) \
ZEND_API void * ZEND_FASTCALL _emalloc_ # # _size ( void ) { \
TSRMLS_FETCH ( ) ; \
ZEND_MM_CUSTOM_ALLOCATOR ( _size ) ; \
return zend_mm_alloc_small ( AG ( mm_heap ) , _size , _num ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ; \
}
ZEND_MM_BINS_INFO ( _ZEND_BIN_ALLOCATOR , x , y )
ZEND_API void * ZEND_FASTCALL _emalloc_large ( size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC )
{
TSRMLS_FETCH ( ) ;
ZEND_MM_CUSTOM_ALLOCATOR ( size ) ;
return zend_mm_alloc_large ( AG ( mm_heap ) , size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
}
ZEND_API void * ZEND_FASTCALL _emalloc_huge ( size_t size )
{
TSRMLS_FETCH ( ) ;
ZEND_MM_CUSTOM_ALLOCATOR ( size ) ;
return zend_mm_alloc_huge ( AG ( mm_heap ) , size ) ;
}
# define _ZEND_BIN_FREE(_num, _size, _elements, _pages, x, y) \
ZEND_API void ZEND_FASTCALL _efree_ # # _size ( void * ptr ) { \
TSRMLS_FETCH ( ) ; \
ZEND_MM_CUSTOM_DEALLOCATOR ( ptr ) ; \
{ \
size_t page_offset = ZEND_MM_ALIGNED_OFFSET ( ptr , ZEND_MM_CHUNK_SIZE ) ; \
zend_mm_chunk * chunk = ( zend_mm_chunk * ) ZEND_MM_ALIGNED_BASE ( ptr , ZEND_MM_CHUNK_SIZE ) ; \
int page_num = page_offset / ZEND_MM_PAGE_SIZE ; \
ZEND_MM_CHECK ( chunk - > heap = = AG ( mm_heap ) , " zend_mm_heap corrupted " ) ; \
ZEND_ASSERT ( chunk - > map [ page_num ] & ZEND_MM_IS_SRUN ) ; \
ZEND_ASSERT ( ZEND_MM_SRUN_BIN_NUM ( chunk - > map [ page_num ] ) = = _num ) ; \
zend_mm_free_small ( AG ( mm_heap ) , ptr , _num ) ; \
} \
}
ZEND_MM_BINS_INFO ( _ZEND_BIN_FREE , x , y )
ZEND_API void ZEND_FASTCALL _efree_large ( void * ptr , size_t size )
{
TSRMLS_FETCH ( ) ;
ZEND_MM_CUSTOM_DEALLOCATOR ( ptr ) ;
{
size_t page_offset = ZEND_MM_ALIGNED_OFFSET ( ptr , ZEND_MM_CHUNK_SIZE ) ;
zend_mm_chunk * chunk = ( zend_mm_chunk * ) ZEND_MM_ALIGNED_BASE ( ptr , ZEND_MM_CHUNK_SIZE ) ;
int page_num = page_offset / ZEND_MM_PAGE_SIZE ;
int pages_count = ZEND_MM_ALIGNED_SIZE_EX ( size , ZEND_MM_PAGE_SIZE ) / ZEND_MM_PAGE_SIZE ;
ZEND_MM_CHECK ( chunk - > heap = = AG ( mm_heap ) & & ZEND_MM_ALIGNED_OFFSET ( page_offset , ZEND_MM_PAGE_SIZE ) = = 0 , " zend_mm_heap corrupted " ) ;
ZEND_ASSERT ( chunk - > map [ page_num ] & ZEND_MM_IS_LRUN ) ;
ZEND_ASSERT ( ZEND_MM_LRUN_PAGES ( chunk - > map [ page_num ] ) = = pages_count ) ;
zend_mm_free_large ( AG ( mm_heap ) , chunk , page_num , pages_count ) ;
}
}
ZEND_API void ZEND_FASTCALL _efree_huge ( void * ptr , size_t size )
{
TSRMLS_FETCH ( ) ;
ZEND_MM_CUSTOM_DEALLOCATOR ( ptr ) ;
// TODO: use size???
zend_mm_free_huge ( AG ( mm_heap ) , ptr ) ;
2006-09-14 16:00:44 +08:00
}
2014-08-26 20:21:58 +08:00
# endif
2006-09-14 16:00:44 +08:00
2014-08-26 20:21:58 +08:00
ZEND_API void * ZEND_FASTCALL _emalloc ( size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC )
2006-07-18 17:06:33 +08:00
{
TSRMLS_FETCH ( ) ;
2014-08-26 20:21:58 +08:00
# if ZEND_MM_CUSTOM
if ( UNEXPECTED ( AG ( mm_heap ) - > use_custom_heap ) ) {
2007-11-06 20:06:05 +08:00
return AG ( mm_heap ) - > _malloc ( size ) ;
2006-07-18 17:06:33 +08:00
}
2014-08-26 20:21:58 +08:00
# endif
return zend_mm_alloc_heap ( AG ( mm_heap ) , size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
2006-07-18 17:06:33 +08:00
}
2014-08-26 20:21:58 +08:00
ZEND_API void ZEND_FASTCALL _efree ( void * ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC )
2006-07-18 17:06:33 +08:00
{
TSRMLS_FETCH ( ) ;
2014-08-26 20:21:58 +08:00
# if ZEND_MM_CUSTOM
if ( UNEXPECTED ( AG ( mm_heap ) - > use_custom_heap ) ) {
2007-11-06 20:06:05 +08:00
AG ( mm_heap ) - > _free ( ptr ) ;
2006-07-18 17:06:33 +08:00
return ;
}
2014-08-26 20:21:58 +08:00
# endif
zend_mm_free_heap ( AG ( mm_heap ) , ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
2006-07-18 17:06:33 +08:00
}
2014-08-26 20:21:58 +08:00
ZEND_API void * ZEND_FASTCALL _erealloc ( void * ptr , size_t size , int allow_failure ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC )
1999-04-08 02:10:10 +08:00
{
2001-07-28 18:51:54 +08:00
TSRMLS_FETCH ( ) ;
1999-04-08 02:10:10 +08:00
2014-08-26 20:21:58 +08:00
if ( UNEXPECTED ( AG ( mm_heap ) - > use_custom_heap ) ) {
2007-11-06 20:06:05 +08:00
return AG ( mm_heap ) - > _realloc ( ptr , size ) ;
1999-04-08 02:10:10 +08:00
}
2014-08-26 20:21:58 +08:00
return zend_mm_realloc_heap ( AG ( mm_heap ) , ptr , size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
2006-07-18 17:06:33 +08:00
}
1999-04-08 02:10:10 +08:00
2014-08-26 20:21:58 +08:00
ZEND_API size_t ZEND_FASTCALL _zend_mem_block_size ( void * ptr TSRMLS_DC ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC )
2006-07-18 17:06:33 +08:00
{
2014-08-26 20:21:58 +08:00
if ( UNEXPECTED ( AG ( mm_heap ) - > use_custom_heap ) ) {
2006-07-18 17:06:33 +08:00
return 0 ;
1999-04-08 02:10:10 +08:00
}
2014-08-26 20:21:58 +08:00
return zend_mm_size ( AG ( mm_heap ) , ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
1999-04-08 02:10:10 +08:00
}
2014-09-18 17:31:25 +08:00
static zend_always_inline size_t safe_address ( size_t nmemb , size_t size , size_t offset )
2014-09-07 04:12:37 +08:00
{
2014-09-18 17:31:25 +08:00
int overflow ;
size_t ret = zend_safe_address ( nmemb , size , offset , & overflow ) ;
2014-09-07 04:12:37 +08:00
if ( UNEXPECTED ( overflow ) ) {
zend_error_noreturn ( E_ERROR , " Possible integer overflow in memory allocation (%zu * %zu + %zu) " , nmemb , size , offset ) ;
return 0 ;
}
2014-09-18 17:31:25 +08:00
return ret ;
2014-09-07 04:12:37 +08:00
}
2014-08-26 20:21:58 +08:00
ZEND_API void * ZEND_FASTCALL _safe_emalloc ( size_t nmemb , size_t size , size_t offset ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC )
2007-03-20 14:46:48 +08:00
{
2014-09-07 04:12:37 +08:00
return emalloc_rel ( safe_address ( nmemb , size , offset ) ) ;
2003-04-24 11:35:06 +08:00
}
2014-08-26 20:21:58 +08:00
ZEND_API void * ZEND_FASTCALL _safe_malloc ( size_t nmemb , size_t size , size_t offset )
2004-07-21 05:55:57 +08:00
{
2014-09-07 04:12:37 +08:00
return pemalloc ( safe_address ( nmemb , size , offset ) , 1 ) ;
2004-07-21 05:55:57 +08:00
}
2003-04-24 11:35:06 +08:00
2014-08-26 20:21:58 +08:00
ZEND_API void * ZEND_FASTCALL _safe_erealloc ( void * ptr , size_t nmemb , size_t size , size_t offset ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC )
2007-02-17 02:06:28 +08:00
{
2014-09-07 04:12:37 +08:00
return erealloc_rel ( ptr , safe_address ( nmemb , size , offset ) ) ;
2007-02-17 02:06:28 +08:00
}
2014-08-26 20:21:58 +08:00
ZEND_API void * ZEND_FASTCALL _safe_realloc ( void * ptr , size_t nmemb , size_t size , size_t offset )
2007-02-17 02:06:28 +08:00
{
2014-09-07 04:12:37 +08:00
return perealloc ( ptr , safe_address ( nmemb , size , offset ) , 1 ) ;
2007-02-17 02:06:28 +08:00
}
2014-08-26 20:21:58 +08:00
ZEND_API void * ZEND_FASTCALL _ecalloc ( size_t nmemb , size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC )
2002-04-28 14:24:15 +08:00
{
void * p ;
2012-01-29 19:17:07 +08:00
# ifdef ZEND_SIGNALS
2011-06-03 05:16:50 +08:00
TSRMLS_FETCH ( ) ;
# endif
HANDLE_BLOCK_INTERRUPTIONS ( ) ;
2006-07-18 17:06:33 +08:00
2006-10-01 01:12:06 +08:00
p = _safe_emalloc ( nmemb , size , 0 ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
2007-03-20 14:46:48 +08:00
if ( UNEXPECTED ( p = = NULL ) ) {
2011-06-03 05:16:50 +08:00
HANDLE_UNBLOCK_INTERRUPTIONS ( ) ;
2007-03-20 14:46:48 +08:00
return p ;
2002-04-28 14:24:15 +08:00
}
2006-10-01 01:12:06 +08:00
memset ( p , 0 , size * nmemb ) ;
2011-06-03 05:16:50 +08:00
HANDLE_UNBLOCK_INTERRUPTIONS ( ) ;
2002-04-28 14:24:15 +08:00
return p ;
}
2014-08-26 20:21:58 +08:00
ZEND_API char * ZEND_FASTCALL _estrdup ( const char * s ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC )
1999-04-08 02:10:10 +08:00
{
2014-08-26 01:24:55 +08:00
size_t length ;
1999-04-08 02:10:10 +08:00
char * p ;
2012-01-29 19:17:07 +08:00
# ifdef ZEND_SIGNALS
2011-06-03 05:16:50 +08:00
TSRMLS_FETCH ( ) ;
# endif
HANDLE_BLOCK_INTERRUPTIONS ( ) ;
1999-04-08 02:10:10 +08:00
length = strlen ( s ) + 1 ;
1999-08-28 18:18:54 +08:00
p = ( char * ) _emalloc ( length ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
2007-03-20 14:46:48 +08:00
if ( UNEXPECTED ( p = = NULL ) ) {
2011-06-03 05:16:50 +08:00
HANDLE_UNBLOCK_INTERRUPTIONS ( ) ;
2007-03-20 14:46:48 +08:00
return p ;
1999-04-08 02:10:10 +08:00
}
2001-04-28 23:59:39 +08:00
memcpy ( p , s , length ) ;
2011-06-03 05:16:50 +08:00
HANDLE_UNBLOCK_INTERRUPTIONS ( ) ;
1999-04-08 02:10:10 +08:00
return p ;
}
2014-08-26 20:21:58 +08:00
ZEND_API char * ZEND_FASTCALL _estrndup ( const char * s , size_t length ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC )
1999-04-08 02:10:10 +08:00
{
char * p ;
2012-01-29 19:17:07 +08:00
# ifdef ZEND_SIGNALS
2011-06-03 05:16:50 +08:00
TSRMLS_FETCH ( ) ;
# endif
HANDLE_BLOCK_INTERRUPTIONS ( ) ;
1999-04-08 02:10:10 +08:00
1999-08-28 18:18:54 +08:00
p = ( char * ) _emalloc ( length + 1 ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
2007-03-20 14:46:48 +08:00
if ( UNEXPECTED ( p = = NULL ) ) {
2011-06-03 05:16:50 +08:00
HANDLE_UNBLOCK_INTERRUPTIONS ( ) ;
2007-03-20 14:46:48 +08:00
return p ;
1999-04-08 02:10:10 +08:00
}
2001-04-28 23:59:39 +08:00
memcpy ( p , s , length ) ;
p [ length ] = 0 ;
2011-06-03 05:16:50 +08:00
HANDLE_UNBLOCK_INTERRUPTIONS ( ) ;
1999-04-08 02:10:10 +08:00
return p ;
}
2014-08-26 20:21:58 +08:00
ZEND_API char * ZEND_FASTCALL zend_strndup ( const char * s , size_t length )
1999-04-08 02:10:10 +08:00
{
char * p ;
2012-01-29 19:17:07 +08:00
# ifdef ZEND_SIGNALS
2011-06-03 05:16:50 +08:00
TSRMLS_FETCH ( ) ;
# endif
HANDLE_BLOCK_INTERRUPTIONS ( ) ;
1999-04-08 02:10:10 +08:00
p = ( char * ) malloc ( length + 1 ) ;
2007-03-20 14:46:48 +08:00
if ( UNEXPECTED ( p = = NULL ) ) {
2011-06-03 05:16:50 +08:00
HANDLE_UNBLOCK_INTERRUPTIONS ( ) ;
2007-03-20 14:46:48 +08:00
return p ;
1999-04-08 02:10:10 +08:00
}
if ( length ) {
2001-04-28 23:59:39 +08:00
memcpy ( p , s , length ) ;
1999-04-08 02:10:10 +08:00
}
2001-04-28 23:59:39 +08:00
p [ length ] = 0 ;
2011-06-03 05:16:50 +08:00
HANDLE_UNBLOCK_INTERRUPTIONS ( ) ;
1999-04-08 02:10:10 +08:00
return p ;
}
2013-11-18 08:36:17 +08:00
ZEND_API int zend_set_memory_limit ( size_t memory_limit TSRMLS_DC )
1999-04-10 22:44:35 +08:00
{
2014-08-26 20:21:58 +08:00
# if ZEND_MM_LIMIT
AG ( mm_heap ) - > limit = ( memory_limit > = ZEND_MM_CHUNK_SIZE ) ? memory_limit : ZEND_MM_CHUNK_SIZE ;
# endif
1999-04-10 22:44:35 +08:00
return SUCCESS ;
}
2006-07-25 21:40:05 +08:00
ZEND_API size_t zend_memory_usage ( int real_usage TSRMLS_DC )
1999-04-08 02:10:10 +08:00
{
2014-08-26 20:21:58 +08:00
# if ZEND_MM_STAT
2006-07-25 21:40:05 +08:00
if ( real_usage ) {
return AG ( mm_heap ) - > real_size ;
} else {
2009-05-31 00:42:13 +08:00
size_t usage = AG ( mm_heap ) - > size ;
return usage ;
2006-07-25 21:40:05 +08:00
}
2014-08-26 20:21:58 +08:00
# endif
return 0 ;
1999-04-08 02:10:10 +08:00
}
2006-07-25 21:40:05 +08:00
ZEND_API size_t zend_memory_peak_usage ( int real_usage TSRMLS_DC )
1999-04-08 02:10:10 +08:00
{
2014-08-26 20:21:58 +08:00
# if ZEND_MM_STAT
2006-07-25 21:40:05 +08:00
if ( real_usage ) {
return AG ( mm_heap ) - > real_peak ;
} else {
return AG ( mm_heap ) - > peak ;
}
2014-08-26 20:21:58 +08:00
# endif
return 0 ;
2006-10-12 14:46:51 +08:00
}
1999-04-08 02:10:10 +08:00
2006-07-18 17:06:33 +08:00
ZEND_API void shutdown_memory_manager ( int silent , int full_shutdown TSRMLS_DC )
{
2008-08-16 03:47:33 +08:00
zend_mm_shutdown ( AG ( mm_heap ) , full_shutdown , silent TSRMLS_CC ) ;
2006-07-18 17:06:33 +08:00
}
2002-06-26 19:07:35 +08:00
2006-07-18 17:06:33 +08:00
static void alloc_globals_ctor ( zend_alloc_globals * alloc_globals TSRMLS_DC )
{
2014-08-26 20:21:58 +08:00
# if ZEND_MM_CUSTOM
2010-09-08 15:52:49 +08:00
char * tmp = getenv ( " USE_ZEND_ALLOC " ) ;
2007-03-20 14:46:48 +08:00
2010-09-08 15:52:49 +08:00
if ( tmp & & ! zend_atoi ( tmp , 0 ) ) {
2014-08-26 20:21:58 +08:00
alloc_globals - > mm_heap = malloc ( sizeof ( zend_mm_heap ) ) ;
memset ( alloc_globals - > mm_heap , 0 , sizeof ( zend_mm_heap ) ) ;
alloc_globals - > mm_heap - > use_custom_heap = 1 ;
2010-09-08 15:52:49 +08:00
alloc_globals - > mm_heap - > _malloc = malloc ;
alloc_globals - > mm_heap - > _free = free ;
alloc_globals - > mm_heap - > _realloc = realloc ;
2014-08-26 20:21:58 +08:00
return ;
1999-04-08 02:10:10 +08:00
}
2014-08-26 20:21:58 +08:00
# endif
alloc_globals - > mm_heap = zend_mm_init ( ) ;
2006-07-18 17:06:33 +08:00
}
2001-08-03 15:06:05 +08:00
2006-07-18 17:06:33 +08:00
# ifdef ZTS
static void alloc_globals_dtor ( zend_alloc_globals * alloc_globals TSRMLS_DC )
{
shutdown_memory_manager ( 1 , 1 TSRMLS_CC ) ;
}
2002-06-24 15:22:25 +08:00
# endif
2002-06-25 02:49:13 +08:00
2006-07-18 17:06:33 +08:00
ZEND_API void start_memory_manager ( TSRMLS_D )
{
# ifdef ZTS
2014-09-21 03:22:14 +08:00
TSRMG_ALLOCATE ( alloc_globals_id , sizeof ( zend_alloc_globals ) , ( ts_allocate_ctor ) alloc_globals_ctor , ( ts_allocate_dtor ) alloc_globals_dtor ) ;
2006-07-18 17:06:33 +08:00
# else
alloc_globals_ctor ( & alloc_globals ) ;
1999-12-28 00:42:59 +08:00
# endif
1999-04-08 02:10:10 +08:00
}
2006-07-18 17:06:33 +08:00
ZEND_API zend_mm_heap * zend_mm_set_heap ( zend_mm_heap * new_heap TSRMLS_DC )
1999-05-12 05:38:39 +08:00
{
2006-07-18 17:06:33 +08:00
zend_mm_heap * old_heap ;
1999-05-12 05:38:39 +08:00
2006-07-18 17:06:33 +08:00
old_heap = AG ( mm_heap ) ;
2014-08-26 20:21:58 +08:00
AG ( mm_heap ) = ( zend_mm_heap * ) new_heap ;
return ( zend_mm_heap * ) old_heap ;
2007-03-20 14:46:48 +08:00
}
1999-05-12 05:38:39 +08:00
2007-11-06 15:22:13 +08:00
ZEND_API void zend_mm_set_custom_handlers ( zend_mm_heap * heap ,
2007-11-06 20:06:05 +08:00
void * ( * _malloc ) ( size_t ) ,
void ( * _free ) ( void * ) ,
void * ( * _realloc ) ( void * , size_t ) )
2007-11-06 15:22:13 +08:00
{
2014-08-26 20:21:58 +08:00
# if ZEND_MM_CUSTOM
zend_mm_heap * _heap = ( zend_mm_heap * ) heap ;
2014-07-18 16:27:31 +08:00
2014-08-26 20:21:58 +08:00
_heap - > use_custom_heap = 1 ;
_heap - > _malloc = _malloc ;
_heap - > _free = _free ;
_heap - > _realloc = _realloc ;
1999-04-08 02:10:10 +08:00
# endif
2014-08-26 20:21:58 +08:00
}
1999-04-08 02:10:10 +08:00
/*
* Local variables :
* tab - width : 4
* c - basic - offset : 4
2003-02-01 09:49:15 +08:00
* indent - tabs - mode : t
1999-04-08 02:10:10 +08:00
* End :
*/