1999-04-08 02:10:10 +08:00
/*
+ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +
| Zend Engine |
+ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +
2019-01-30 17:23:29 +08:00
| Copyright ( c ) Zend Technologies Ltd . ( http : //www.zend.com) |
1999-04-08 02:10:10 +08:00
+ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +
2001-12-11 23:16:21 +08:00
| This source file is subject to version 2.00 of the Zend license , |
2006-07-18 17:06:33 +08:00
| that is bundled with this package in the file LICENSE , and is |
2003-06-11 04:04:29 +08:00
| available through the world - wide - web at the following url : |
2001-12-11 23:16:21 +08:00
| http : //www.zend.com/license/2_00.txt. |
1999-07-16 22:58:16 +08:00
| If you did not receive a copy of the Zend license and are unable to |
| obtain it through the world - wide - web , please send a note to |
| license @ zend . com so we can mail you a copy immediately . |
1999-04-08 02:10:10 +08:00
+ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +
2018-11-01 23:20:07 +08:00
| Authors : Andi Gutmans < andi @ php . net > |
| Zeev Suraski < zeev @ php . net > |
| Dmitry Stogov < dmitry @ php . net > |
1999-04-08 02:10:10 +08:00
+ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +
*/
2014-08-26 20:21:58 +08:00
/*
* zend_alloc is designed to be a modern CPU cache friendly memory manager
* for PHP . Most ideas are taken from jemalloc and tcmalloc implementations .
*
* All allocations are split into 3 categories :
*
* Huge - the size is greater than CHUNK size ( ~ 2 M by default ) , allocation is
* performed using mmap ( ) . The result is aligned on 2 M boundary .
*
* Large - a number of 4096 K pages inside a CHUNK . Large blocks
2014-11-20 03:59:31 +08:00
* are always aligned on page boundary .
2014-08-26 20:21:58 +08:00
*
* Small - less than 3 / 4 of page size . Small sizes are rounded up to nearest
* greater predefined small size ( there are 30 predefined sizes :
* 8 , 16 , 24 , 32 , . . . 3072 ) . Small blocks are allocated from
* RUNs . Each RUN is allocated as a single or few following pages .
* Allocation inside RUNs implemented using linked list of free
* elements . The result is aligned to 8 bytes .
*
* zend_alloc allocates memory from OS by CHUNKs , these CHUNKs and huge memory
* blocks are always aligned to CHUNK boundary . So it ' s very easy to determine
* the CHUNK owning the certain pointer . Regular CHUNKs reserve a single
* page at start for special purpose . It contains bitset of free pages ,
* few bitset for available runs of predefined small sizes , map of pages that
* keeps information about usage of each page in this CHUNK , etc .
*
* zend_alloc provides familiar emalloc / efree / erealloc API , but in addition it
* provides specialized and optimized routines to allocate blocks of predefined
* sizes ( e . g . emalloc_2 ( ) , emallc_4 ( ) , . . . , emalloc_large ( ) , etc )
* The library uses C preprocessor tricks that substitute calls to emalloc ( )
* with more specialized routines when the requested size is known .
*/
1999-04-08 02:10:10 +08:00
# include "zend.h"
# include "zend_alloc.h"
# include "zend_globals.h"
2006-07-18 17:06:33 +08:00
# include "zend_operators.h"
2014-09-18 17:31:25 +08:00
# include "zend_multiply.h"
2016-05-18 04:17:22 +08:00
# include "zend_bitset.h"
2019-04-07 21:55:34 +08:00
# include <signal.h>
2006-07-18 17:06:33 +08:00
1999-09-07 00:14:08 +08:00
# ifdef HAVE_UNISTD_H
# include <unistd.h>
1999-07-09 19:19:38 +08:00
# endif
1999-04-08 02:10:10 +08:00
2006-12-18 19:39:19 +08:00
# ifdef ZEND_WIN32
# include <wincrypt.h>
# include <process.h>
2018-09-17 15:48:33 +08:00
# include "win32 / winutil.h"
2006-12-18 19:39:19 +08:00
# endif
2014-08-26 20:21:58 +08:00
# include <stdio.h>
# include <stdlib.h>
# include <string.h>
# include <sys/types.h>
# include <sys/stat.h>
# include <limits.h>
# include <fcntl.h>
# include <errno.h>
# ifndef _WIN32
# include <sys / mman.h>
# ifndef MAP_ANON
# ifdef MAP_ANONYMOUS
# define MAP_ANON MAP_ANONYMOUS
# endif
# endif
# ifndef MAP_FAILED
# define MAP_FAILED ((void*)-1)
# endif
# ifndef MAP_POPULATE
# define MAP_POPULATE 0
2014-12-02 21:17:26 +08:00
# endif
# if defined(_SC_PAGESIZE) || (_SC_PAGE_SIZE)
# define REAL_PAGE_SIZE _real_page_size
static size_t _real_page_size = ZEND_MM_PAGE_SIZE ;
# endif
2018-11-26 18:47:55 +08:00
# ifdef MAP_ALIGNED_SUPER
# define MAP_HUGETLB MAP_ALIGNED_SUPER
# endif
2014-08-26 20:21:58 +08:00
# endif
2014-12-02 21:17:26 +08:00
# ifndef REAL_PAGE_SIZE
# define REAL_PAGE_SIZE ZEND_MM_PAGE_SIZE
2014-07-18 16:27:31 +08:00
# endif
2019-02-15 00:25:13 +08:00
/* NetBSD has an mremap() function with a signature that is incompatible with Linux (WTF?),
* so pretend it doesn ' t exist . */
# ifndef __linux__
# undef HAVE_MREMAP
# endif
2019-07-27 20:33:48 +08:00
# ifndef __APPLE__
# define ZEND_MM_FD -1
# else
/* Mac allows to track anonymous page via vmmap per TAG id.
* user land applications are allowed to take from 240 to 255.
*/
# define ZEND_MM_FD (250<<24)
# endif
2014-08-26 20:21:58 +08:00
# ifndef ZEND_MM_STAT
# define ZEND_MM_STAT 1 /* track current and peak memory usage */
# endif
# ifndef ZEND_MM_LIMIT
# define ZEND_MM_LIMIT 1 /* support for user-defined memory limit */
# endif
# ifndef ZEND_MM_CUSTOM
# define ZEND_MM_CUSTOM 1 /* support for custom memory allocator */
/* USE_ZEND_ALLOC=0 may switch to system malloc() */
# endif
2014-10-14 13:41:16 +08:00
# ifndef ZEND_MM_STORAGE
# define ZEND_MM_STORAGE 1 /* support for custom memory storage */
# endif
2014-08-26 20:21:58 +08:00
# ifndef ZEND_MM_ERROR
# define ZEND_MM_ERROR 1 /* report system errors */
2014-07-18 16:27:31 +08:00
# endif
2014-08-26 20:21:58 +08:00
# ifndef ZEND_MM_CHECK
# define ZEND_MM_CHECK(condition, message) do { \
if ( UNEXPECTED ( ! ( condition ) ) ) { \
zend_mm_panic ( message ) ; \
} \
} while ( 0 )
2014-07-18 16:27:31 +08:00
# endif
2006-12-15 21:25:26 +08:00
2014-08-26 20:21:58 +08:00
typedef uint32_t zend_mm_page_info ; /* 4-byte integer */
2014-08-27 02:43:33 +08:00
typedef zend_ulong zend_mm_bitset ; /* 4-byte or 8-byte integer */
2014-08-26 20:21:58 +08:00
# define ZEND_MM_ALIGNED_OFFSET(size, alignment) \
( ( ( size_t ) ( size ) ) & ( ( alignment ) - 1 ) )
# define ZEND_MM_ALIGNED_BASE(size, alignment) \
( ( ( size_t ) ( size ) ) & ~ ( ( alignment ) - 1 ) )
# define ZEND_MM_SIZE_TO_NUM(size, alignment) \
( ( ( size_t ) ( size ) + ( ( alignment ) - 1 ) ) / ( alignment ) )
# define ZEND_MM_BITSET_LEN (sizeof(zend_mm_bitset) * 8) /* 32 or 64 */
# define ZEND_MM_PAGE_MAP_LEN (ZEND_MM_PAGES / ZEND_MM_BITSET_LEN) /* 16 or 8 */
typedef zend_mm_bitset zend_mm_page_map [ ZEND_MM_PAGE_MAP_LEN ] ; /* 64B */
# define ZEND_MM_IS_FRUN 0x00000000
# define ZEND_MM_IS_LRUN 0x40000000
# define ZEND_MM_IS_SRUN 0x80000000
# define ZEND_MM_LRUN_PAGES_MASK 0x000003ff
# define ZEND_MM_LRUN_PAGES_OFFSET 0
# define ZEND_MM_SRUN_BIN_NUM_MASK 0x0000001f
# define ZEND_MM_SRUN_BIN_NUM_OFFSET 0
2015-08-04 23:21:05 +08:00
# define ZEND_MM_SRUN_FREE_COUNTER_MASK 0x01ff0000
# define ZEND_MM_SRUN_FREE_COUNTER_OFFSET 16
# define ZEND_MM_NRUN_OFFSET_MASK 0x01ff0000
# define ZEND_MM_NRUN_OFFSET_OFFSET 16
2014-08-26 20:21:58 +08:00
# define ZEND_MM_LRUN_PAGES(info) (((info) & ZEND_MM_LRUN_PAGES_MASK) >> ZEND_MM_LRUN_PAGES_OFFSET)
# define ZEND_MM_SRUN_BIN_NUM(info) (((info) & ZEND_MM_SRUN_BIN_NUM_MASK) >> ZEND_MM_SRUN_BIN_NUM_OFFSET)
2015-08-04 23:21:05 +08:00
# define ZEND_MM_SRUN_FREE_COUNTER(info) (((info) & ZEND_MM_SRUN_FREE_COUNTER_MASK) >> ZEND_MM_SRUN_FREE_COUNTER_OFFSET)
# define ZEND_MM_NRUN_OFFSET(info) (((info) & ZEND_MM_NRUN_OFFSET_MASK) >> ZEND_MM_NRUN_OFFSET_OFFSET)
2014-08-26 20:21:58 +08:00
# define ZEND_MM_FRUN() ZEND_MM_IS_FRUN
# define ZEND_MM_LRUN(count) (ZEND_MM_IS_LRUN | ((count) << ZEND_MM_LRUN_PAGES_OFFSET))
# define ZEND_MM_SRUN(bin_num) (ZEND_MM_IS_SRUN | ((bin_num) << ZEND_MM_SRUN_BIN_NUM_OFFSET))
2015-08-04 23:21:05 +08:00
# define ZEND_MM_SRUN_EX(bin_num, count) (ZEND_MM_IS_SRUN | ((bin_num) << ZEND_MM_SRUN_BIN_NUM_OFFSET) | ((count) << ZEND_MM_SRUN_FREE_COUNTER_OFFSET))
2015-09-08 22:22:26 +08:00
# define ZEND_MM_NRUN(bin_num, offset) (ZEND_MM_IS_SRUN | ZEND_MM_IS_LRUN | ((bin_num) << ZEND_MM_SRUN_BIN_NUM_OFFSET) | ((offset) << ZEND_MM_NRUN_OFFSET_OFFSET))
2014-08-26 20:21:58 +08:00
# define ZEND_MM_BINS 30
typedef struct _zend_mm_page zend_mm_page ;
typedef struct _zend_mm_bin zend_mm_bin ;
typedef struct _zend_mm_free_slot zend_mm_free_slot ;
typedef struct _zend_mm_chunk zend_mm_chunk ;
typedef struct _zend_mm_huge_list zend_mm_huge_list ;
2016-03-22 23:04:53 +08:00
int zend_mm_use_huge_pages = 0 ;
2016-03-18 03:43:42 +08:00
2014-08-26 20:21:58 +08:00
/*
2018-08-10 10:19:55 +08:00
* Memory is retrieved from OS by chunks of fixed size 2 MB .
2014-08-26 20:21:58 +08:00
* Inside chunk it ' s managed by pages of fixed size 4096 B .
* So each chunk consists from 512 pages .
2019-02-19 00:35:35 +08:00
* The first page of each chunk is reserved for chunk header .
2014-08-26 20:21:58 +08:00
* It contains service information about all pages .
*
* free_pages - current number of free pages in this chunk
*
* free_tail - number of continuous free pages at the end of chunk
*
* free_map - bitset ( a bit for each page ) . The bit is set if the corresponding
* page is allocated . Allocator for " lage sizes " may easily find a
* free page ( or a continuous number of pages ) searching for zero
* bits .
*
* map - contains service information for each page . ( 32 - bits for each
* page ) .
* usage :
* ( 2 bits )
* FRUN - free page ,
* LRUN - first page of " large " allocation
* SRUN - first page of a bin used for " small " allocation
*
* lrun_pages :
* ( 10 bits ) number of allocated pages
*
* srun_bin_num :
* ( 5 bits ) bin number ( e . g . 0 for sizes 0 - 2 , 1 for 3 - 4 ,
* 2 for 5 - 8 , 3 for 9 - 16 etc ) see zend_alloc_sizes . h
*/
struct _zend_mm_heap {
# if ZEND_MM_CUSTOM
int use_custom_heap ;
# endif
2014-10-14 13:41:16 +08:00
# if ZEND_MM_STORAGE
zend_mm_storage * storage ;
# endif
2014-08-26 20:21:58 +08:00
# if ZEND_MM_STAT
size_t size ; /* current memory usage */
size_t peak ; /* peak memory usage */
# endif
zend_mm_free_slot * free_slot [ ZEND_MM_BINS ] ; /* free lists for small sizes */
# if ZEND_MM_STAT || ZEND_MM_LIMIT
size_t real_size ; /* current size of allocated pages */
# endif
# if ZEND_MM_STAT
size_t real_peak ; /* peak size of allocated pages */
# endif
# if ZEND_MM_LIMIT
size_t limit ; /* memory limit */
int overflow ; /* memory overflow flag */
# endif
zend_mm_huge_list * huge_list ; /* list of huge allocated blocks */
zend_mm_chunk * main_chunk ;
zend_mm_chunk * cached_chunks ; /* list of unused chunks */
2018-08-10 10:19:55 +08:00
int chunks_count ; /* number of allocated chunks */
2014-08-26 20:21:58 +08:00
int peak_chunks_count ; /* peak number of allocated chunks for current request */
int cached_chunks_count ; /* number of cached chunks */
double avg_chunks_count ; /* average number of chunks allocated per request */
2017-10-13 18:56:06 +08:00
int last_chunks_delete_boundary ; /* numer of chunks after last deletion */
int last_chunks_delete_count ; /* number of deletion over the last boundary */
2014-08-26 20:21:58 +08:00
# if ZEND_MM_CUSTOM
2015-08-11 21:33:47 +08:00
union {
struct {
void * ( * _malloc ) ( size_t ) ;
void ( * _free ) ( void * ) ;
void * ( * _realloc ) ( void * , size_t ) ;
} std ;
struct {
void * ( * _malloc ) ( size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC ) ;
void ( * _free ) ( void * ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC ) ;
void * ( * _realloc ) ( void * , size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC ) ;
} debug ;
} custom_heap ;
2019-06-27 16:30:45 +08:00
HashTable * tracked_allocs ;
2014-08-26 20:21:58 +08:00
# endif
} ;
struct _zend_mm_chunk {
zend_mm_heap * heap ;
zend_mm_chunk * next ;
zend_mm_chunk * prev ;
2016-06-21 21:55:17 +08:00
uint32_t free_pages ; /* number of free pages */
uint32_t free_tail ; /* number of free pages at the end of chunk */
uint32_t num ;
2017-09-28 22:45:51 +08:00
char reserve [ 64 - ( sizeof ( void * ) * 3 + sizeof ( uint32_t ) * 3 ) ] ;
2014-08-26 20:21:58 +08:00
zend_mm_heap heap_slot ; /* used only in main chunk */
zend_mm_page_map free_map ; /* 512 bits or 64 bytes */
zend_mm_page_info map [ ZEND_MM_PAGES ] ; /* 2 KB = 512 * 4 */
} ;
struct _zend_mm_page {
char bytes [ ZEND_MM_PAGE_SIZE ] ;
} ;
/*
2014-11-20 03:59:31 +08:00
* bin - is one or few continuous pages ( up to 8 ) used for allocation of
2014-08-26 20:21:58 +08:00
* a particular " small size " .
*/
struct _zend_mm_bin {
char bytes [ ZEND_MM_PAGE_SIZE * 8 ] ;
} ;
struct _zend_mm_free_slot {
zend_mm_free_slot * next_free_slot ;
} ;
struct _zend_mm_huge_list {
void * ptr ;
size_t size ;
zend_mm_huge_list * next ;
# if ZEND_DEBUG
zend_mm_debug_info dbg ;
# endif
} ;
# define ZEND_MM_PAGE_ADDR(chunk, page_num) \
( ( void * ) ( ( ( zend_mm_page * ) ( chunk ) ) + ( page_num ) ) )
# define _BIN_DATA_SIZE(num, size, elements, pages, x, y) size,
2016-06-21 21:55:17 +08:00
static const uint32_t bin_data_size [ ] = {
2014-08-26 20:21:58 +08:00
ZEND_MM_BINS_INFO ( _BIN_DATA_SIZE , x , y )
} ;
# define _BIN_DATA_ELEMENTS(num, size, elements, pages, x, y) elements,
2016-06-21 21:55:17 +08:00
static const uint32_t bin_elements [ ] = {
2014-08-26 20:21:58 +08:00
ZEND_MM_BINS_INFO ( _BIN_DATA_ELEMENTS , x , y )
} ;
# define _BIN_DATA_PAGES(num, size, elements, pages, x, y) pages,
2016-06-21 21:55:17 +08:00
static const uint32_t bin_pages [ ] = {
2014-08-26 20:21:58 +08:00
ZEND_MM_BINS_INFO ( _BIN_DATA_PAGES , x , y )
} ;
2006-07-18 17:06:33 +08:00
# if ZEND_DEBUG
2015-08-19 19:40:56 +08:00
ZEND_COLD void zend_debug_alloc_output ( char * format , . . . )
2006-07-18 17:06:33 +08:00
{
char output_buf [ 256 ] ;
va_list args ;
va_start ( args , format ) ;
vsprintf ( output_buf , format , args ) ;
va_end ( args ) ;
1999-04-08 02:10:10 +08:00
2006-07-18 17:06:33 +08:00
# ifdef ZEND_WIN32
OutputDebugString ( output_buf ) ;
2002-06-24 21:41:26 +08:00
# else
2006-07-18 17:06:33 +08:00
fprintf ( stderr , " %s " , output_buf ) ;
# endif
}
2002-06-24 21:41:26 +08:00
# endif
1999-11-27 08:04:36 +08:00
2015-08-19 19:40:56 +08:00
static ZEND_COLD ZEND_NORETURN void zend_mm_panic ( const char * message )
2006-07-18 17:06:33 +08:00
{
fprintf ( stderr , " %s \n " , message ) ;
2010-01-25 22:47:19 +08:00
/* See http://support.microsoft.com/kb/190351 */
2015-07-05 00:55:22 +08:00
# ifdef ZEND_WIN32
2010-01-25 22:47:19 +08:00
fflush ( stderr ) ;
# endif
2006-07-18 17:06:33 +08:00
# if ZEND_DEBUG && defined(HAVE_KILL) && defined(HAVE_GETPID)
kill ( getpid ( ) , SIGSEGV ) ;
# endif
2007-03-20 14:46:48 +08:00
exit ( 1 ) ;
2006-07-18 17:06:33 +08:00
}
2015-08-19 19:40:56 +08:00
static ZEND_COLD ZEND_NORETURN void zend_mm_safe_error ( zend_mm_heap * heap ,
2014-08-26 20:21:58 +08:00
const char * format ,
size_t limit ,
# if ZEND_DEBUG
const char * filename ,
2016-11-26 22:18:42 +08:00
uint32_t lineno ,
2006-07-18 17:06:33 +08:00
# endif
2014-08-26 20:21:58 +08:00
size_t size )
2014-07-18 16:27:31 +08:00
{
2014-10-15 15:37:55 +08:00
2014-08-26 20:21:58 +08:00
heap - > overflow = 1 ;
zend_try {
zend_error_noreturn ( E_ERROR ,
format ,
limit ,
# if ZEND_DEBUG
filename ,
lineno ,
# endif
size ) ;
} zend_catch {
} zend_end_try ( ) ;
heap - > overflow = 0 ;
zend_bailout ( ) ;
exit ( 1 ) ;
2014-07-18 16:27:31 +08:00
}
2014-09-16 18:27:25 +08:00
# ifdef _WIN32
void
stderr_last_error ( char * msg )
{
DWORD err = GetLastError ( ) ;
2018-09-17 15:48:33 +08:00
char * buf = php_win32_error_to_msg ( err ) ;
2014-09-16 18:27:25 +08:00
2018-09-17 15:48:33 +08:00
if ( ! buf [ 0 ] ) {
2014-11-12 03:28:28 +08:00
fprintf ( stderr , " \n %s: [0x%08lx] \n " , msg , err ) ;
2014-09-16 18:27:25 +08:00
}
else {
2014-11-12 03:28:28 +08:00
fprintf ( stderr , " \n %s: [0x%08lx] %s \n " , msg , err , buf ) ;
2014-09-16 18:27:25 +08:00
}
2018-09-17 15:48:33 +08:00
php_win32_error_msg_free ( buf ) ;
2014-09-16 18:27:25 +08:00
}
# endif
2014-08-26 20:21:58 +08:00
/*****************/
/* OS Allocation */
/*****************/
2014-07-18 16:27:31 +08:00
2019-02-15 00:25:13 +08:00
# ifndef HAVE_MREMAP
2014-08-26 20:21:58 +08:00
static void * zend_mm_mmap_fixed ( void * addr , size_t size )
2014-07-18 16:27:31 +08:00
{
2014-08-26 20:21:58 +08:00
# ifdef _WIN32
return VirtualAlloc ( addr , size , MEM_COMMIT | MEM_RESERVE , PAGE_READWRITE ) ;
2014-07-18 16:27:31 +08:00
# else
2018-08-30 04:04:32 +08:00
int flags = MAP_PRIVATE | MAP_ANON ;
# if defined(MAP_EXCL)
flags | = MAP_FIXED | MAP_EXCL ;
# endif
2018-08-29 20:41:04 +08:00
/* MAP_FIXED leads to discarding of the old mapping, so it can't be used. */
2019-07-27 20:33:48 +08:00
void * ptr = mmap ( addr , size , PROT_READ | PROT_WRITE , flags /*| MAP_POPULATE | MAP_HUGETLB*/ , ZEND_MM_FD , 0 ) ;
2014-08-26 20:21:58 +08:00
if ( ptr = = MAP_FAILED ) {
2018-08-30 04:04:32 +08:00
# if ZEND_MM_ERROR && !defined(MAP_EXCL)
2014-08-26 20:21:58 +08:00
fprintf ( stderr , " \n mmap() failed: [%d] %s \n " , errno , strerror ( errno ) ) ;
2006-07-18 17:06:33 +08:00
# endif
2014-08-26 20:21:58 +08:00
return NULL ;
} else if ( ptr ! = addr ) {
if ( munmap ( ptr , size ) ! = 0 ) {
# if ZEND_MM_ERROR
fprintf ( stderr , " \n munmap() failed: [%d] %s \n " , errno , strerror ( errno ) ) ;
2006-07-18 17:06:33 +08:00
# endif
2014-07-18 16:27:31 +08:00
}
2014-08-26 20:21:58 +08:00
return NULL ;
2014-07-18 16:27:31 +08:00
}
2014-08-26 20:21:58 +08:00
return ptr ;
2014-07-16 16:35:48 +08:00
# endif
2014-07-18 16:27:31 +08:00
}
2019-02-15 00:25:13 +08:00
# endif
2014-07-18 16:27:31 +08:00
2014-08-26 20:21:58 +08:00
static void * zend_mm_mmap ( size_t size )
2014-07-18 16:27:31 +08:00
{
2014-08-26 20:21:58 +08:00
# ifdef _WIN32
void * ptr = VirtualAlloc ( NULL , size , MEM_COMMIT | MEM_RESERVE , PAGE_READWRITE ) ;
2014-07-18 16:27:31 +08:00
2014-08-26 20:21:58 +08:00
if ( ptr = = NULL ) {
# if ZEND_MM_ERROR
2014-09-16 18:27:25 +08:00
stderr_last_error ( " VirtualAlloc() failed " ) ;
2006-07-18 17:06:33 +08:00
# endif
2014-08-26 20:21:58 +08:00
return NULL ;
2014-07-18 16:27:31 +08:00
}
2014-08-26 20:21:58 +08:00
return ptr ;
# else
2015-09-02 20:55:41 +08:00
void * ptr ;
# ifdef MAP_HUGETLB
2016-03-18 03:43:42 +08:00
if ( zend_mm_use_huge_pages & & size = = ZEND_MM_CHUNK_SIZE ) {
2015-09-02 20:55:41 +08:00
ptr = mmap ( NULL , size , PROT_READ | PROT_WRITE , MAP_PRIVATE | MAP_ANON | MAP_HUGETLB , - 1 , 0 ) ;
if ( ptr ! = MAP_FAILED ) {
return ptr ;
}
}
# endif
2019-07-27 20:33:48 +08:00
ptr = mmap ( NULL , size , PROT_READ | PROT_WRITE , MAP_PRIVATE | MAP_ANON , ZEND_MM_FD , 0 ) ;
2014-07-18 16:27:31 +08:00
2014-08-26 20:21:58 +08:00
if ( ptr = = MAP_FAILED ) {
# if ZEND_MM_ERROR
fprintf ( stderr , " \n mmap() failed: [%d] %s \n " , errno , strerror ( errno ) ) ;
2014-07-16 16:35:48 +08:00
# endif
2014-07-18 16:27:31 +08:00
return NULL ;
}
2014-08-26 20:21:58 +08:00
return ptr ;
# endif
2014-07-18 16:27:31 +08:00
}
2014-08-26 20:21:58 +08:00
static void zend_mm_munmap ( void * addr , size_t size )
2014-07-18 16:27:31 +08:00
{
2014-08-26 20:21:58 +08:00
# ifdef _WIN32
if ( VirtualFree ( addr , 0 , MEM_RELEASE ) = = 0 ) {
# if ZEND_MM_ERROR
2014-09-16 18:27:25 +08:00
stderr_last_error ( " VirtualFree() failed " ) ;
2014-08-26 20:21:58 +08:00
# endif
}
# else
if ( munmap ( addr , size ) ! = 0 ) {
# if ZEND_MM_ERROR
fprintf ( stderr , " \n munmap() failed: [%d] %s \n " , errno , strerror ( errno ) ) ;
# endif
}
# endif
2014-07-18 16:27:31 +08:00
}
2014-08-26 20:21:58 +08:00
/***********/
/* Bitmask */
/***********/
/* number of trailing set (1) bits */
static zend_always_inline int zend_mm_bitset_nts ( zend_mm_bitset bitset )
2014-07-18 16:27:31 +08:00
{
2015-12-03 18:28:41 +08:00
# if (defined(__GNUC__) || __has_builtin(__builtin_ctzl)) && SIZEOF_ZEND_LONG == SIZEOF_LONG && defined(PHP_HAVE_BUILTIN_CTZL)
2014-08-26 20:21:58 +08:00
return __builtin_ctzl ( ~ bitset ) ;
2015-12-03 18:28:41 +08:00
# elif (defined(__GNUC__) || __has_builtin(__builtin_ctzll)) && defined(PHP_HAVE_BUILTIN_CTZLL)
2014-10-16 18:38:44 +08:00
return __builtin_ctzll ( ~ bitset ) ;
2014-09-20 07:36:51 +08:00
# elif defined(_WIN32)
unsigned long index ;
# if defined(_WIN64)
if ( ! BitScanForward64 ( & index , ~ bitset ) ) {
# else
if ( ! BitScanForward ( & index , ~ bitset ) ) {
# endif
/* undefined behavior */
return 32 ;
}
return ( int ) index ;
2014-08-26 20:21:58 +08:00
# else
int n ;
2014-07-18 16:27:31 +08:00
2014-08-26 20:21:58 +08:00
if ( bitset = = ( zend_mm_bitset ) - 1 ) return ZEND_MM_BITSET_LEN ;
2006-07-18 17:06:33 +08:00
2014-08-26 20:21:58 +08:00
n = 0 ;
2014-08-27 02:43:33 +08:00
# if SIZEOF_ZEND_LONG == 8
2014-08-26 20:21:58 +08:00
if ( sizeof ( zend_mm_bitset ) = = 8 ) {
2014-08-27 02:43:33 +08:00
if ( ( bitset & 0xffffffff ) = = 0xffffffff ) { n + = 32 ; bitset = bitset > > Z_UL ( 32 ) ; }
2014-08-26 20:21:58 +08:00
}
2014-07-18 16:27:31 +08:00
# endif
2014-08-26 20:21:58 +08:00
if ( ( bitset & 0x0000ffff ) = = 0x0000ffff ) { n + = 16 ; bitset = bitset > > 16 ; }
if ( ( bitset & 0x000000ff ) = = 0x000000ff ) { n + = 8 ; bitset = bitset > > 8 ; }
if ( ( bitset & 0x0000000f ) = = 0x0000000f ) { n + = 4 ; bitset = bitset > > 4 ; }
if ( ( bitset & 0x00000003 ) = = 0x00000003 ) { n + = 2 ; bitset = bitset > > 2 ; }
return n + ( bitset & 1 ) ;
# endif
}
2006-07-18 17:06:33 +08:00
2014-08-26 20:21:58 +08:00
static zend_always_inline int zend_mm_bitset_is_set ( zend_mm_bitset * bitset , int bit )
2014-07-18 16:27:31 +08:00
{
2018-05-08 16:58:17 +08:00
return ZEND_BIT_TEST ( bitset , bit ) ;
2014-07-18 16:27:31 +08:00
}
2006-07-18 17:06:33 +08:00
2014-08-26 20:21:58 +08:00
static zend_always_inline void zend_mm_bitset_set_bit ( zend_mm_bitset * bitset , int bit )
2014-07-18 16:27:31 +08:00
{
2019-03-06 05:25:21 +08:00
bitset [ bit / ZEND_MM_BITSET_LEN ] | = ( Z_UL ( 1 ) < < ( bit & ( ZEND_MM_BITSET_LEN - 1 ) ) ) ;
2014-07-18 16:27:31 +08:00
}
2006-07-18 17:06:33 +08:00
2014-08-26 20:21:58 +08:00
static zend_always_inline void zend_mm_bitset_reset_bit ( zend_mm_bitset * bitset , int bit )
{
2019-03-06 05:25:21 +08:00
bitset [ bit / ZEND_MM_BITSET_LEN ] & = ~ ( Z_UL ( 1 ) < < ( bit & ( ZEND_MM_BITSET_LEN - 1 ) ) ) ;
2014-08-26 20:21:58 +08:00
}
2006-07-18 17:06:33 +08:00
2014-08-26 20:21:58 +08:00
static zend_always_inline void zend_mm_bitset_set_range ( zend_mm_bitset * bitset , int start , int len )
2014-07-18 16:27:31 +08:00
{
2014-08-26 20:21:58 +08:00
if ( len = = 1 ) {
zend_mm_bitset_set_bit ( bitset , start ) ;
} else {
int pos = start / ZEND_MM_BITSET_LEN ;
int end = ( start + len - 1 ) / ZEND_MM_BITSET_LEN ;
int bit = start & ( ZEND_MM_BITSET_LEN - 1 ) ;
zend_mm_bitset tmp ;
if ( pos ! = end ) {
/* set bits from "bit" to ZEND_MM_BITSET_LEN-1 */
tmp = ( zend_mm_bitset ) - 1 < < bit ;
bitset [ pos + + ] | = tmp ;
while ( pos ! = end ) {
/* set all bits */
bitset [ pos + + ] = ( zend_mm_bitset ) - 1 ;
}
end = ( start + len - 1 ) & ( ZEND_MM_BITSET_LEN - 1 ) ;
/* set bits from "0" to "end" */
tmp = ( zend_mm_bitset ) - 1 > > ( ( ZEND_MM_BITSET_LEN - 1 ) - end ) ;
bitset [ pos ] | = tmp ;
} else {
end = ( start + len - 1 ) & ( ZEND_MM_BITSET_LEN - 1 ) ;
/* set bits from "bit" to "end" */
tmp = ( zend_mm_bitset ) - 1 < < bit ;
tmp & = ( zend_mm_bitset ) - 1 > > ( ( ZEND_MM_BITSET_LEN - 1 ) - end ) ;
bitset [ pos ] | = tmp ;
}
}
2014-07-18 16:27:31 +08:00
}
2006-07-18 17:06:33 +08:00
2014-08-26 20:21:58 +08:00
static zend_always_inline void zend_mm_bitset_reset_range ( zend_mm_bitset * bitset , int start , int len )
2014-07-18 16:27:31 +08:00
{
2014-08-26 20:21:58 +08:00
if ( len = = 1 ) {
zend_mm_bitset_reset_bit ( bitset , start ) ;
} else {
int pos = start / ZEND_MM_BITSET_LEN ;
int end = ( start + len - 1 ) / ZEND_MM_BITSET_LEN ;
int bit = start & ( ZEND_MM_BITSET_LEN - 1 ) ;
zend_mm_bitset tmp ;
if ( pos ! = end ) {
/* reset bits from "bit" to ZEND_MM_BITSET_LEN-1 */
2019-06-19 18:47:56 +08:00
tmp = ~ ( ( Z_UL ( 1 ) < < bit ) - 1 ) ;
2014-08-26 20:21:58 +08:00
bitset [ pos + + ] & = ~ tmp ;
while ( pos ! = end ) {
/* set all bits */
bitset [ pos + + ] = 0 ;
}
end = ( start + len - 1 ) & ( ZEND_MM_BITSET_LEN - 1 ) ;
/* reset bits from "0" to "end" */
tmp = ( zend_mm_bitset ) - 1 > > ( ( ZEND_MM_BITSET_LEN - 1 ) - end ) ;
bitset [ pos ] & = ~ tmp ;
} else {
end = ( start + len - 1 ) & ( ZEND_MM_BITSET_LEN - 1 ) ;
/* reset bits from "bit" to "end" */
tmp = ( zend_mm_bitset ) - 1 < < bit ;
tmp & = ( zend_mm_bitset ) - 1 > > ( ( ZEND_MM_BITSET_LEN - 1 ) - end ) ;
bitset [ pos ] & = ~ tmp ;
}
}
2014-07-18 16:27:31 +08:00
}
2006-07-18 17:06:33 +08:00
2014-08-26 20:21:58 +08:00
static zend_always_inline int zend_mm_bitset_is_free_range ( zend_mm_bitset * bitset , int start , int len )
2014-07-18 16:27:31 +08:00
{
2014-08-26 20:21:58 +08:00
if ( len = = 1 ) {
return ! zend_mm_bitset_is_set ( bitset , start ) ;
} else {
int pos = start / ZEND_MM_BITSET_LEN ;
int end = ( start + len - 1 ) / ZEND_MM_BITSET_LEN ;
int bit = start & ( ZEND_MM_BITSET_LEN - 1 ) ;
zend_mm_bitset tmp ;
if ( pos ! = end ) {
/* set bits from "bit" to ZEND_MM_BITSET_LEN-1 */
tmp = ( zend_mm_bitset ) - 1 < < bit ;
if ( ( bitset [ pos + + ] & tmp ) ! = 0 ) {
return 0 ;
}
while ( pos ! = end ) {
/* set all bits */
if ( bitset [ pos + + ] ! = 0 ) {
return 0 ;
}
}
end = ( start + len - 1 ) & ( ZEND_MM_BITSET_LEN - 1 ) ;
/* set bits from "0" to "end" */
tmp = ( zend_mm_bitset ) - 1 > > ( ( ZEND_MM_BITSET_LEN - 1 ) - end ) ;
return ( bitset [ pos ] & tmp ) = = 0 ;
} else {
end = ( start + len - 1 ) & ( ZEND_MM_BITSET_LEN - 1 ) ;
/* set bits from "bit" to "end" */
tmp = ( zend_mm_bitset ) - 1 < < bit ;
tmp & = ( zend_mm_bitset ) - 1 > > ( ( ZEND_MM_BITSET_LEN - 1 ) - end ) ;
return ( bitset [ pos ] & tmp ) = = 0 ;
}
}
2014-07-18 16:27:31 +08:00
}
2006-07-18 17:06:33 +08:00
2014-08-26 20:21:58 +08:00
/**********/
/* Chunks */
/**********/
2006-07-18 17:06:33 +08:00
2014-10-14 13:41:16 +08:00
static void * zend_mm_chunk_alloc_int ( size_t size , size_t alignment )
2014-08-26 20:21:58 +08:00
{
void * ptr = zend_mm_mmap ( size ) ;
2006-07-18 17:06:33 +08:00
2014-08-26 20:21:58 +08:00
if ( ptr = = NULL ) {
return NULL ;
} else if ( ZEND_MM_ALIGNED_OFFSET ( ptr , alignment ) = = 0 ) {
# ifdef MADV_HUGEPAGE
2019-02-18 21:01:45 +08:00
if ( zend_mm_use_huge_pages ) {
madvise ( ptr , size , MADV_HUGEPAGE ) ;
}
2014-07-18 16:27:31 +08:00
# endif
2014-08-26 20:21:58 +08:00
return ptr ;
} else {
size_t offset ;
/* chunk has to be aligned */
zend_mm_munmap ( ptr , size ) ;
2014-12-02 21:17:26 +08:00
ptr = zend_mm_mmap ( size + alignment - REAL_PAGE_SIZE ) ;
2014-08-26 20:21:58 +08:00
# ifdef _WIN32
offset = ZEND_MM_ALIGNED_OFFSET ( ptr , alignment ) ;
2014-12-02 21:17:26 +08:00
zend_mm_munmap ( ptr , size + alignment - REAL_PAGE_SIZE ) ;
2014-08-26 20:21:58 +08:00
ptr = zend_mm_mmap_fixed ( ( void * ) ( ( char * ) ptr + ( alignment - offset ) ) , size ) ;
offset = ZEND_MM_ALIGNED_OFFSET ( ptr , alignment ) ;
if ( offset ! = 0 ) {
zend_mm_munmap ( ptr , size ) ;
return NULL ;
}
return ptr ;
# else
offset = ZEND_MM_ALIGNED_OFFSET ( ptr , alignment ) ;
if ( offset ! = 0 ) {
offset = alignment - offset ;
zend_mm_munmap ( ptr , offset ) ;
ptr = ( char * ) ptr + offset ;
2014-12-02 21:29:13 +08:00
alignment - = offset ;
}
if ( alignment > REAL_PAGE_SIZE ) {
2014-12-02 21:17:26 +08:00
zend_mm_munmap ( ( char * ) ptr + size , alignment - REAL_PAGE_SIZE ) ;
2014-08-26 20:21:58 +08:00
}
# ifdef MADV_HUGEPAGE
2019-02-18 21:01:45 +08:00
if ( zend_mm_use_huge_pages ) {
madvise ( ptr , size , MADV_HUGEPAGE ) ;
}
2014-08-26 20:21:58 +08:00
# endif
2014-07-18 16:27:31 +08:00
# endif
2014-08-26 20:21:58 +08:00
return ptr ;
}
}
2014-07-18 16:27:31 +08:00
2014-10-14 13:41:16 +08:00
static void * zend_mm_chunk_alloc ( zend_mm_heap * heap , size_t size , size_t alignment )
{
# if ZEND_MM_STORAGE
if ( UNEXPECTED ( heap - > storage ) ) {
2015-05-14 19:56:13 +08:00
void * ptr = heap - > storage - > handlers . chunk_alloc ( heap - > storage , size , alignment ) ;
2014-10-14 13:41:16 +08:00
ZEND_ASSERT ( ( ( zend_uintptr_t ) ( ( char * ) ptr + ( alignment - 1 ) ) & ( alignment - 1 ) ) = = ( zend_uintptr_t ) ptr ) ;
return ptr ;
}
# endif
return zend_mm_chunk_alloc_int ( size , alignment ) ;
}
static void zend_mm_chunk_free ( zend_mm_heap * heap , void * addr , size_t size )
{
# if ZEND_MM_STORAGE
if ( UNEXPECTED ( heap - > storage ) ) {
2015-05-14 19:56:13 +08:00
heap - > storage - > handlers . chunk_free ( heap - > storage , addr , size ) ;
2014-10-14 13:41:16 +08:00
return ;
}
# endif
zend_mm_munmap ( addr , size ) ;
}
2015-05-14 19:56:13 +08:00
static int zend_mm_chunk_truncate ( zend_mm_heap * heap , void * addr , size_t old_size , size_t new_size )
2014-10-14 13:41:16 +08:00
{
# if ZEND_MM_STORAGE
if ( UNEXPECTED ( heap - > storage ) ) {
2015-05-14 19:56:13 +08:00
if ( heap - > storage - > handlers . chunk_truncate ) {
return heap - > storage - > handlers . chunk_truncate ( heap - > storage , addr , old_size , new_size ) ;
} else {
return 0 ;
}
2014-10-14 13:41:16 +08:00
}
# endif
2015-05-14 19:56:13 +08:00
# ifndef _WIN32
2014-10-14 13:41:16 +08:00
zend_mm_munmap ( ( char * ) addr + new_size , old_size - new_size ) ;
2015-05-14 19:56:13 +08:00
return 1 ;
# else
return 0 ;
# endif
}
static int zend_mm_chunk_extend ( zend_mm_heap * heap , void * addr , size_t old_size , size_t new_size )
{
# if ZEND_MM_STORAGE
if ( UNEXPECTED ( heap - > storage ) ) {
if ( heap - > storage - > handlers . chunk_extend ) {
return heap - > storage - > handlers . chunk_extend ( heap - > storage , addr , old_size , new_size ) ;
} else {
return 0 ;
}
}
# endif
2019-02-15 00:25:13 +08:00
# ifdef HAVE_MREMAP
/* We don't use MREMAP_MAYMOVE due to alignment requirements. */
void * ptr = mremap ( addr , old_size , new_size , 0 ) ;
if ( ptr = = MAP_FAILED ) {
return 0 ;
}
/* Sanity check: The mapping shouldn't have moved. */
ZEND_ASSERT ( ptr = = addr ) ;
return 1 ;
# elif !defined(_WIN32)
2015-05-14 19:56:13 +08:00
return ( zend_mm_mmap_fixed ( ( char * ) addr + old_size , new_size - old_size ) ! = NULL ) ;
# else
return 0 ;
# endif
2014-10-14 13:41:16 +08:00
}
2014-08-26 20:21:58 +08:00
static zend_always_inline void zend_mm_chunk_init ( zend_mm_heap * heap , zend_mm_chunk * chunk )
{
chunk - > heap = heap ;
chunk - > next = heap - > main_chunk ;
chunk - > prev = heap - > main_chunk - > prev ;
chunk - > prev - > next = chunk ;
chunk - > next - > prev = chunk ;
/* mark first pages as allocated */
chunk - > free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE ;
chunk - > free_tail = ZEND_MM_FIRST_PAGE ;
/* the younger chunks have bigger number */
chunk - > num = chunk - > prev - > num + 1 ;
/* mark first pages as allocated */
chunk - > free_map [ 0 ] = ( 1L < < ZEND_MM_FIRST_PAGE ) - 1 ;
chunk - > map [ 0 ] = ZEND_MM_LRUN ( ZEND_MM_FIRST_PAGE ) ;
}
2014-07-18 16:27:31 +08:00
2014-08-26 20:21:58 +08:00
/***********************/
/* Huge Runs (forward) */
/***********************/
2014-07-18 16:27:31 +08:00
2014-08-26 20:21:58 +08:00
static size_t zend_mm_get_huge_block_size ( zend_mm_heap * heap , void * ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC ) ;
static void * zend_mm_alloc_huge ( zend_mm_heap * heap , size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC ) ;
static void zend_mm_free_huge ( zend_mm_heap * heap , void * ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC ) ;
2006-07-18 17:06:33 +08:00
2006-12-15 21:25:26 +08:00
# if ZEND_DEBUG
2014-08-26 20:21:58 +08:00
static void zend_mm_change_huge_block_size ( zend_mm_heap * heap , void * ptr , size_t size , size_t dbg_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC ) ;
# else
static void zend_mm_change_huge_block_size ( zend_mm_heap * heap , void * ptr , size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC ) ;
2014-07-16 16:35:48 +08:00
# endif
2006-07-18 17:06:33 +08:00
2014-08-26 20:21:58 +08:00
/**************/
/* Large Runs */
/**************/
2006-12-15 21:25:26 +08:00
2014-07-18 16:27:31 +08:00
# if ZEND_DEBUG
2016-06-21 21:55:17 +08:00
static void * zend_mm_alloc_pages ( zend_mm_heap * heap , uint32_t pages_count , size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC )
2014-08-26 20:21:58 +08:00
# else
2016-06-21 21:55:17 +08:00
static void * zend_mm_alloc_pages ( zend_mm_heap * heap , uint32_t pages_count ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC )
2014-07-18 16:27:31 +08:00
# endif
2014-08-26 20:21:58 +08:00
{
zend_mm_chunk * chunk = heap - > main_chunk ;
2016-06-21 21:55:17 +08:00
uint32_t page_num , len ;
2016-10-26 20:17:09 +08:00
int steps = 0 ;
2007-03-20 14:46:48 +08:00
2014-08-26 20:21:58 +08:00
while ( 1 ) {
if ( UNEXPECTED ( chunk - > free_pages < pages_count ) ) {
goto not_found ;
#if 0
} else if ( UNEXPECTED ( chunk - > free_pages + chunk - > free_tail = = ZEND_MM_PAGES ) ) {
if ( UNEXPECTED ( ZEND_MM_PAGES - chunk - > free_tail < pages_count ) ) {
goto not_found ;
} else {
page_num = chunk - > free_tail ;
goto found ;
}
} else if ( 0 ) {
/* First-Fit Search */
int free_tail = chunk - > free_tail ;
zend_mm_bitset * bitset = chunk - > free_map ;
zend_mm_bitset tmp = * ( bitset + + ) ;
int i = 0 ;
2007-03-20 14:46:48 +08:00
2014-08-26 20:21:58 +08:00
while ( 1 ) {
/* skip allocated blocks */
while ( tmp = = ( zend_mm_bitset ) - 1 ) {
i + = ZEND_MM_BITSET_LEN ;
if ( i = = ZEND_MM_PAGES ) {
goto not_found ;
}
tmp = * ( bitset + + ) ;
}
/* find first 0 bit */
page_num = i + zend_mm_bitset_nts ( tmp ) ;
/* reset bits from 0 to "bit" */
tmp & = tmp + 1 ;
/* skip free blocks */
while ( tmp = = 0 ) {
i + = ZEND_MM_BITSET_LEN ;
len = i - page_num ;
if ( len > = pages_count ) {
goto found ;
} else if ( i > = free_tail ) {
goto not_found ;
}
tmp = * ( bitset + + ) ;
}
/* find first 1 bit */
2016-05-18 04:17:22 +08:00
len = ( i + zend_ulong_ntz ( tmp ) ) - page_num ;
2014-08-26 20:21:58 +08:00
if ( len > = pages_count ) {
goto found ;
}
/* set bits from 0 to "bit" */
tmp | = tmp - 1 ;
}
2006-12-15 21:25:26 +08:00
# endif
2014-08-26 20:21:58 +08:00
} else {
/* Best-Fit Search */
int best = - 1 ;
2016-06-21 21:55:17 +08:00
uint32_t best_len = ZEND_MM_PAGES ;
uint32_t free_tail = chunk - > free_tail ;
2014-08-26 20:21:58 +08:00
zend_mm_bitset * bitset = chunk - > free_map ;
zend_mm_bitset tmp = * ( bitset + + ) ;
2016-06-21 21:55:17 +08:00
uint32_t i = 0 ;
2006-12-15 21:25:26 +08:00
2014-08-26 20:21:58 +08:00
while ( 1 ) {
/* skip allocated blocks */
while ( tmp = = ( zend_mm_bitset ) - 1 ) {
i + = ZEND_MM_BITSET_LEN ;
if ( i = = ZEND_MM_PAGES ) {
if ( best > 0 ) {
page_num = best ;
goto found ;
} else {
goto not_found ;
}
}
tmp = * ( bitset + + ) ;
}
/* find first 0 bit */
page_num = i + zend_mm_bitset_nts ( tmp ) ;
/* reset bits from 0 to "bit" */
tmp & = tmp + 1 ;
/* skip free blocks */
while ( tmp = = 0 ) {
i + = ZEND_MM_BITSET_LEN ;
2015-09-02 18:05:12 +08:00
if ( i > = free_tail | | i = = ZEND_MM_PAGES ) {
2014-08-26 20:21:58 +08:00
len = ZEND_MM_PAGES - page_num ;
if ( len > = pages_count & & len < best_len ) {
chunk - > free_tail = page_num + pages_count ;
goto found ;
} else {
/* set accurate value */
chunk - > free_tail = page_num ;
if ( best > 0 ) {
page_num = best ;
goto found ;
} else {
goto not_found ;
}
}
}
tmp = * ( bitset + + ) ;
}
/* find first 1 bit */
2016-05-18 04:17:22 +08:00
len = i + zend_ulong_ntz ( tmp ) - page_num ;
2014-08-26 20:21:58 +08:00
if ( len > = pages_count ) {
if ( len = = pages_count ) {
goto found ;
} else if ( len < best_len ) {
best_len = len ;
best = page_num ;
}
}
/* set bits from 0 to "bit" */
tmp | = tmp - 1 ;
}
}
2014-07-18 16:27:31 +08:00
2014-08-26 20:21:58 +08:00
not_found :
if ( chunk - > next = = heap - > main_chunk ) {
2015-08-04 23:21:05 +08:00
get_chunk :
2014-08-26 20:21:58 +08:00
if ( heap - > cached_chunks ) {
heap - > cached_chunks_count - - ;
chunk = heap - > cached_chunks ;
heap - > cached_chunks = chunk - > next ;
} else {
# if ZEND_MM_LIMIT
2019-08-26 16:23:23 +08:00
if ( UNEXPECTED ( ZEND_MM_CHUNK_SIZE > heap - > limit - heap - > real_size ) ) {
2015-08-04 23:21:05 +08:00
if ( zend_mm_gc ( heap ) ) {
goto get_chunk ;
} else if ( heap - > overflow = = 0 ) {
2014-07-18 16:27:31 +08:00
# if ZEND_DEBUG
2014-10-16 18:14:37 +08:00
zend_mm_safe_error ( heap , " Allowed memory size of %zu bytes exhausted at %s:%d (tried to allocate %zu bytes) " , heap - > limit , __zend_filename , __zend_lineno , size ) ;
2014-07-18 16:27:31 +08:00
# else
2014-10-16 18:14:37 +08:00
zend_mm_safe_error ( heap , " Allowed memory size of %zu bytes exhausted (tried to allocate %zu bytes) " , heap - > limit , ZEND_MM_PAGE_SIZE * pages_count ) ;
2014-07-16 16:35:48 +08:00
# endif
2014-08-26 20:21:58 +08:00
return NULL ;
}
}
2014-07-16 16:35:48 +08:00
# endif
2014-10-14 13:41:16 +08:00
chunk = ( zend_mm_chunk * ) zend_mm_chunk_alloc ( heap , ZEND_MM_CHUNK_SIZE , ZEND_MM_CHUNK_SIZE ) ;
2014-08-26 20:21:58 +08:00
if ( UNEXPECTED ( chunk = = NULL ) ) {
/* insufficient memory */
2015-08-04 23:21:05 +08:00
if ( zend_mm_gc ( heap ) & &
( chunk = ( zend_mm_chunk * ) zend_mm_chunk_alloc ( heap , ZEND_MM_CHUNK_SIZE , ZEND_MM_CHUNK_SIZE ) ) ! = NULL ) {
/* pass */
} else {
2014-09-16 17:53:26 +08:00
# if !ZEND_MM_LIMIT
2015-08-04 23:21:05 +08:00
zend_mm_safe_error ( heap , " Out of memory " ) ;
2014-09-16 17:53:26 +08:00
# elif ZEND_DEBUG
2015-08-04 23:21:05 +08:00
zend_mm_safe_error ( heap , " Out of memory (allocated %zu) at %s:%d (tried to allocate %zu bytes) " , heap - > real_size , __zend_filename , __zend_lineno , size ) ;
2014-09-16 17:53:26 +08:00
# else
2015-08-04 23:21:05 +08:00
zend_mm_safe_error ( heap , " Out of memory (allocated %zu) (tried to allocate %zu bytes) " , heap - > real_size , ZEND_MM_PAGE_SIZE * pages_count ) ;
2014-09-16 17:53:26 +08:00
# endif
2015-08-04 23:21:05 +08:00
return NULL ;
}
2014-08-26 20:21:58 +08:00
}
# if ZEND_MM_STAT
do {
size_t size = heap - > real_size + ZEND_MM_CHUNK_SIZE ;
size_t peak = MAX ( heap - > real_peak , size ) ;
heap - > real_size = size ;
heap - > real_peak = peak ;
} while ( 0 ) ;
# elif ZEND_MM_LIMIT
heap - > real_size + = ZEND_MM_CHUNK_SIZE ;
2014-07-18 16:27:31 +08:00
2006-12-15 21:25:26 +08:00
# endif
2014-08-26 20:21:58 +08:00
}
heap - > chunks_count + + ;
if ( heap - > chunks_count > heap - > peak_chunks_count ) {
heap - > peak_chunks_count = heap - > chunks_count ;
}
zend_mm_chunk_init ( heap , chunk ) ;
page_num = ZEND_MM_FIRST_PAGE ;
len = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE ;
goto found ;
} else {
chunk = chunk - > next ;
2016-10-26 20:17:09 +08:00
steps + + ;
2014-08-26 20:21:58 +08:00
}
2007-03-20 14:46:48 +08:00
}
2014-08-26 20:21:58 +08:00
found :
2016-10-26 20:17:09 +08:00
if ( steps > 2 & & pages_count < 8 ) {
/* move chunk into the head of the linked-list */
chunk - > prev - > next = chunk - > next ;
chunk - > next - > prev = chunk - > prev ;
chunk - > next = heap - > main_chunk - > next ;
chunk - > prev = heap - > main_chunk ;
chunk - > prev - > next = chunk ;
chunk - > next - > prev = chunk ;
}
2014-08-26 20:21:58 +08:00
/* mark run as allocated */
chunk - > free_pages - = pages_count ;
zend_mm_bitset_set_range ( chunk - > free_map , page_num , pages_count ) ;
chunk - > map [ page_num ] = ZEND_MM_LRUN ( pages_count ) ;
if ( page_num = = chunk - > free_tail ) {
chunk - > free_tail = page_num + pages_count ;
2014-07-18 16:27:31 +08:00
}
2014-08-26 20:21:58 +08:00
return ZEND_MM_PAGE_ADDR ( chunk , page_num ) ;
2007-03-20 14:46:48 +08:00
}
2018-05-08 03:29:16 +08:00
static zend_always_inline void * zend_mm_alloc_large_ex ( zend_mm_heap * heap , size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC )
2007-03-20 14:46:48 +08:00
{
2014-10-25 02:27:56 +08:00
int pages_count = ( int ) ZEND_MM_SIZE_TO_NUM ( size , ZEND_MM_PAGE_SIZE ) ;
2014-08-26 20:21:58 +08:00
# if ZEND_DEBUG
void * ptr = zend_mm_alloc_pages ( heap , pages_count , size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
2014-07-18 16:27:31 +08:00
# else
2014-08-26 20:21:58 +08:00
void * ptr = zend_mm_alloc_pages ( heap , pages_count ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
# endif
# if ZEND_MM_STAT
do {
size_t size = heap - > size + pages_count * ZEND_MM_PAGE_SIZE ;
size_t peak = MAX ( heap - > peak , size ) ;
heap - > size = size ;
heap - > peak = peak ;
} while ( 0 ) ;
2007-03-20 14:46:48 +08:00
# endif
2014-08-26 20:21:58 +08:00
return ptr ;
2007-03-20 14:46:48 +08:00
}
2018-05-08 03:29:16 +08:00
# if ZEND_DEBUG
static zend_never_inline void * zend_mm_alloc_large ( zend_mm_heap * heap , size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC )
{
return zend_mm_alloc_large_ex ( heap , size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
}
# else
static zend_never_inline void * zend_mm_alloc_large ( zend_mm_heap * heap , size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC )
{
return zend_mm_alloc_large_ex ( heap , size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
}
# endif
2015-08-04 23:21:05 +08:00
static zend_always_inline void zend_mm_delete_chunk ( zend_mm_heap * heap , zend_mm_chunk * chunk )
{
chunk - > next - > prev = chunk - > prev ;
chunk - > prev - > next = chunk - > next ;
heap - > chunks_count - - ;
2017-10-13 18:56:06 +08:00
if ( heap - > chunks_count + heap - > cached_chunks_count < heap - > avg_chunks_count + 0.1
| | ( heap - > chunks_count = = heap - > last_chunks_delete_boundary
& & heap - > last_chunks_delete_count > = 4 ) ) {
2015-08-04 23:21:05 +08:00
/* delay deletion */
heap - > cached_chunks_count + + ;
chunk - > next = heap - > cached_chunks ;
heap - > cached_chunks = chunk ;
} else {
# if ZEND_MM_STAT || ZEND_MM_LIMIT
heap - > real_size - = ZEND_MM_CHUNK_SIZE ;
# endif
2017-10-13 18:56:06 +08:00
if ( ! heap - > cached_chunks ) {
if ( heap - > chunks_count ! = heap - > last_chunks_delete_boundary ) {
heap - > last_chunks_delete_boundary = heap - > chunks_count ;
heap - > last_chunks_delete_count = 0 ;
} else {
heap - > last_chunks_delete_count + + ;
}
}
2015-08-04 23:21:05 +08:00
if ( ! heap - > cached_chunks | | chunk - > num > heap - > cached_chunks - > num ) {
zend_mm_chunk_free ( heap , chunk , ZEND_MM_CHUNK_SIZE ) ;
} else {
//TODO: select the best chunk to delete???
chunk - > next = heap - > cached_chunks - > next ;
zend_mm_chunk_free ( heap , heap - > cached_chunks , ZEND_MM_CHUNK_SIZE ) ;
heap - > cached_chunks = chunk ;
}
}
}
2016-06-21 21:55:17 +08:00
static zend_always_inline void zend_mm_free_pages_ex ( zend_mm_heap * heap , zend_mm_chunk * chunk , uint32_t page_num , uint32_t pages_count , int free_chunk )
2014-08-26 20:21:58 +08:00
{
chunk - > free_pages + = pages_count ;
zend_mm_bitset_reset_range ( chunk - > free_map , page_num , pages_count ) ;
chunk - > map [ page_num ] = 0 ;
if ( chunk - > free_tail = = page_num + pages_count ) {
/* this setting may be not accurate */
chunk - > free_tail = page_num ;
}
2015-08-04 23:21:05 +08:00
if ( free_chunk & & chunk - > free_pages = = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE ) {
zend_mm_delete_chunk ( heap , chunk ) ;
2014-07-18 16:27:31 +08:00
}
2006-07-18 17:06:33 +08:00
}
2018-05-08 03:29:16 +08:00
static zend_never_inline void zend_mm_free_pages ( zend_mm_heap * heap , zend_mm_chunk * chunk , int page_num , int pages_count )
2015-08-04 23:21:05 +08:00
{
zend_mm_free_pages_ex ( heap , chunk , page_num , pages_count , 1 ) ;
}
2014-08-26 20:21:58 +08:00
static zend_always_inline void zend_mm_free_large ( zend_mm_heap * heap , zend_mm_chunk * chunk , int page_num , int pages_count )
2006-07-18 17:06:33 +08:00
{
2014-08-26 20:21:58 +08:00
# if ZEND_MM_STAT
heap - > size - = pages_count * ZEND_MM_PAGE_SIZE ;
2014-07-18 16:27:31 +08:00
# endif
2014-08-26 20:21:58 +08:00
zend_mm_free_pages ( heap , chunk , page_num , pages_count ) ;
}
2006-07-18 17:06:33 +08:00
2014-08-26 20:21:58 +08:00
/**************/
/* Small Runs */
/**************/
2014-07-18 16:27:31 +08:00
2014-09-08 15:08:05 +08:00
/* higher set bit number (0->N/A, 1->1, 2->2, 4->3, 8->4, 127->7, 128->8 etc) */
2014-08-26 20:21:58 +08:00
static zend_always_inline int zend_mm_small_size_to_bit ( int size )
{
2015-12-03 18:28:41 +08:00
# if (defined(__GNUC__) || __has_builtin(__builtin_clz)) && defined(PHP_HAVE_BUILTIN_CLZ)
2014-08-26 20:21:58 +08:00
return ( __builtin_clz ( size ) ^ 0x1f ) + 1 ;
2014-09-20 07:36:51 +08:00
# elif defined(_WIN32)
unsigned long index ;
if ( ! BitScanReverse ( & index , ( unsigned long ) size ) ) {
/* undefined behavior */
2014-09-20 08:14:35 +08:00
return 64 ;
2014-09-20 07:36:51 +08:00
}
return ( ( ( 31 - ( int ) index ) ^ 0x1f ) + 1 ) ;
2014-08-26 20:21:58 +08:00
# else
int n = 16 ;
if ( size < = 0x00ff ) { n - = 8 ; size = size < < 8 ; }
if ( size < = 0x0fff ) { n - = 4 ; size = size < < 4 ; }
if ( size < = 0x3fff ) { n - = 2 ; size = size < < 2 ; }
if ( size < = 0x7fff ) { n - = 1 ; }
return n ;
2014-07-18 16:27:31 +08:00
# endif
2010-10-04 23:50:47 +08:00
}
2014-08-26 20:21:58 +08:00
# ifndef MAX
# define MAX(a, b) (((a) > (b)) ? (a) : (b))
# endif
2014-07-18 16:27:31 +08:00
2014-08-26 20:21:58 +08:00
# ifndef MIN
# define MIN(a, b) (((a) < (b)) ? (a) : (b))
# endif
2006-07-18 17:06:33 +08:00
2014-08-26 20:21:58 +08:00
static zend_always_inline int zend_mm_small_size_to_bin ( size_t size )
2006-07-18 17:06:33 +08:00
{
2014-08-26 20:21:58 +08:00
#if 0
int n ;
/*0, 1, 2, 3, 4, 5, 6, 7, 8, 9 10, 11, 12*/
static const int f1 [ ] = { 3 , 3 , 3 , 3 , 3 , 3 , 3 , 4 , 5 , 6 , 7 , 8 , 9 } ;
static const int f2 [ ] = { 0 , 0 , 0 , 0 , 0 , 0 , 0 , 4 , 8 , 12 , 16 , 20 , 24 } ;
if ( UNEXPECTED ( size < = 2 ) ) return 0 ;
n = zend_mm_small_size_to_bit ( size - 1 ) ;
return ( ( size - 1 ) > > f1 [ n ] ) + f2 [ n ] ;
# else
2015-09-20 05:07:43 +08:00
unsigned int t1 , t2 ;
if ( size < = 64 ) {
/* we need to support size == 0 ... */
return ( size - ! ! size ) > > 3 ;
} else {
t1 = size - 1 ;
t2 = zend_mm_small_size_to_bit ( t1 ) - 3 ;
t1 = t1 > > t2 ;
t2 = t2 - 3 ;
t2 = t2 < < 2 ;
return ( int ) ( t1 + t2 ) ;
}
2014-07-16 16:35:48 +08:00
# endif
2006-07-18 17:06:33 +08:00
}
2014-08-26 20:21:58 +08:00
# define ZEND_MM_SMALL_SIZE_TO_BIN(size) zend_mm_small_size_to_bin(size)
2006-07-18 17:06:33 +08:00
2016-06-21 21:55:17 +08:00
static zend_never_inline void * zend_mm_alloc_small_slow ( zend_mm_heap * heap , uint32_t bin_num ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC )
2014-07-18 16:27:31 +08:00
{
2014-08-26 20:21:58 +08:00
zend_mm_chunk * chunk ;
int page_num ;
zend_mm_bin * bin ;
zend_mm_free_slot * p , * end ;
2006-12-18 19:39:19 +08:00
2014-08-26 20:21:58 +08:00
# if ZEND_DEBUG
bin = ( zend_mm_bin * ) zend_mm_alloc_pages ( heap , bin_pages [ bin_num ] , bin_data_size [ bin_num ] ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
# else
bin = ( zend_mm_bin * ) zend_mm_alloc_pages ( heap , bin_pages [ bin_num ] ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
2014-07-18 16:27:31 +08:00
# endif
2014-08-26 20:21:58 +08:00
if ( UNEXPECTED ( bin = = NULL ) ) {
/* insufficient memory */
return NULL ;
2006-12-18 19:39:19 +08:00
}
2014-07-18 16:27:31 +08:00
2014-08-26 20:21:58 +08:00
chunk = ( zend_mm_chunk * ) ZEND_MM_ALIGNED_BASE ( bin , ZEND_MM_CHUNK_SIZE ) ;
page_num = ZEND_MM_ALIGNED_OFFSET ( bin , ZEND_MM_CHUNK_SIZE ) / ZEND_MM_PAGE_SIZE ;
chunk - > map [ page_num ] = ZEND_MM_SRUN ( bin_num ) ;
if ( bin_pages [ bin_num ] > 1 ) {
2016-06-21 21:55:17 +08:00
uint32_t i = 1 ;
2014-07-18 16:27:31 +08:00
do {
2015-08-04 23:21:05 +08:00
chunk - > map [ page_num + i ] = ZEND_MM_NRUN ( bin_num , i ) ;
2014-08-26 20:21:58 +08:00
i + + ;
} while ( i < bin_pages [ bin_num ] ) ;
2014-07-18 16:27:31 +08:00
}
2014-07-16 16:35:48 +08:00
2014-08-26 20:21:58 +08:00
/* create a linked list of elements from 1 to last */
end = ( zend_mm_free_slot * ) ( ( char * ) bin + ( bin_data_size [ bin_num ] * ( bin_elements [ bin_num ] - 1 ) ) ) ;
heap - > free_slot [ bin_num ] = p = ( zend_mm_free_slot * ) ( ( char * ) bin + bin_data_size [ bin_num ] ) ;
do {
2017-06-12 18:35:13 +08:00
p - > next_free_slot = ( zend_mm_free_slot * ) ( ( char * ) p + bin_data_size [ bin_num ] ) ;
2014-08-26 20:21:58 +08:00
# if ZEND_DEBUG
2014-07-18 16:27:31 +08:00
do {
2014-08-26 20:21:58 +08:00
zend_mm_debug_info * dbg = ( zend_mm_debug_info * ) ( ( char * ) p + bin_data_size [ bin_num ] - ZEND_MM_ALIGNED_SIZE ( sizeof ( zend_mm_debug_info ) ) ) ;
dbg - > size = 0 ;
} while ( 0 ) ;
2014-07-18 16:27:31 +08:00
# endif
2014-08-26 20:21:58 +08:00
p = ( zend_mm_free_slot * ) ( ( char * ) p + bin_data_size [ bin_num ] ) ;
} while ( p ! = end ) ;
2014-07-16 16:35:48 +08:00
2014-08-26 20:21:58 +08:00
/* terminate list using NULL */
p - > next_free_slot = NULL ;
# if ZEND_DEBUG
do {
zend_mm_debug_info * dbg = ( zend_mm_debug_info * ) ( ( char * ) p + bin_data_size [ bin_num ] - ZEND_MM_ALIGNED_SIZE ( sizeof ( zend_mm_debug_info ) ) ) ;
dbg - > size = 0 ;
} while ( 0 ) ;
2010-01-25 22:47:19 +08:00
# endif
2006-07-18 17:06:33 +08:00
2014-08-26 20:21:58 +08:00
/* return first element */
return ( char * ) bin ;
}
2014-07-16 16:35:48 +08:00
2019-03-01 19:34:17 +08:00
static zend_always_inline void * zend_mm_alloc_small ( zend_mm_heap * heap , int bin_num ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC )
2014-08-26 20:21:58 +08:00
{
# if ZEND_MM_STAT
do {
size_t size = heap - > size + bin_data_size [ bin_num ] ;
size_t peak = MAX ( heap - > peak , size ) ;
heap - > size = size ;
heap - > peak = peak ;
} while ( 0 ) ;
2014-07-18 16:27:31 +08:00
# endif
2006-07-18 17:06:33 +08:00
2014-08-26 20:21:58 +08:00
if ( EXPECTED ( heap - > free_slot [ bin_num ] ! = NULL ) ) {
zend_mm_free_slot * p = heap - > free_slot [ bin_num ] ;
heap - > free_slot [ bin_num ] = p - > next_free_slot ;
return ( void * ) p ;
} else {
return zend_mm_alloc_small_slow ( heap , bin_num ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
2006-07-18 17:06:33 +08:00
}
}
2014-08-26 20:21:58 +08:00
static zend_always_inline void zend_mm_free_small ( zend_mm_heap * heap , void * ptr , int bin_num )
2006-07-18 17:06:33 +08:00
{
2014-08-26 20:21:58 +08:00
zend_mm_free_slot * p ;
2006-07-18 17:06:33 +08:00
2014-08-26 20:21:58 +08:00
# if ZEND_MM_STAT
heap - > size - = bin_data_size [ bin_num ] ;
2014-07-18 16:27:31 +08:00
# endif
2014-08-26 20:21:58 +08:00
# if ZEND_DEBUG
do {
zend_mm_debug_info * dbg = ( zend_mm_debug_info * ) ( ( char * ) ptr + bin_data_size [ bin_num ] - ZEND_MM_ALIGNED_SIZE ( sizeof ( zend_mm_debug_info ) ) ) ;
dbg - > size = 0 ;
} while ( 0 ) ;
2014-07-18 16:27:31 +08:00
# endif
2006-07-18 17:06:33 +08:00
2014-08-26 20:21:58 +08:00
p = ( zend_mm_free_slot * ) ptr ;
p - > next_free_slot = heap - > free_slot [ bin_num ] ;
heap - > free_slot [ bin_num ] = p ;
2014-07-16 16:35:48 +08:00
}
2006-07-18 17:06:33 +08:00
2014-08-26 20:21:58 +08:00
/********/
/* Heap */
/********/
2014-07-18 16:27:31 +08:00
# if ZEND_DEBUG
2014-08-26 20:21:58 +08:00
static zend_always_inline zend_mm_debug_info * zend_mm_get_debug_info ( zend_mm_heap * heap , void * ptr )
2014-07-16 16:35:48 +08:00
{
2014-08-26 20:21:58 +08:00
size_t page_offset = ZEND_MM_ALIGNED_OFFSET ( ptr , ZEND_MM_CHUNK_SIZE ) ;
zend_mm_chunk * chunk ;
int page_num ;
zend_mm_page_info info ;
2006-07-18 17:06:33 +08:00
2014-08-26 20:21:58 +08:00
ZEND_MM_CHECK ( page_offset ! = 0 , " zend_mm_heap corrupted " ) ;
chunk = ( zend_mm_chunk * ) ZEND_MM_ALIGNED_BASE ( ptr , ZEND_MM_CHUNK_SIZE ) ;
2014-10-25 02:27:56 +08:00
page_num = ( int ) ( page_offset / ZEND_MM_PAGE_SIZE ) ;
2014-08-26 20:21:58 +08:00
info = chunk - > map [ page_num ] ;
ZEND_MM_CHECK ( chunk - > heap = = heap , " zend_mm_heap corrupted " ) ;
if ( EXPECTED ( info & ZEND_MM_IS_SRUN ) ) {
int bin_num = ZEND_MM_SRUN_BIN_NUM ( info ) ;
return ( zend_mm_debug_info * ) ( ( char * ) ptr + bin_data_size [ bin_num ] - ZEND_MM_ALIGNED_SIZE ( sizeof ( zend_mm_debug_info ) ) ) ;
} else /* if (info & ZEND_MM_IS_LRUN) */ {
int pages_count = ZEND_MM_LRUN_PAGES ( info ) ;
return ( zend_mm_debug_info * ) ( ( char * ) ptr + ZEND_MM_PAGE_SIZE * pages_count - ZEND_MM_ALIGNED_SIZE ( sizeof ( zend_mm_debug_info ) ) ) ;
2006-07-18 17:06:33 +08:00
}
2014-07-16 16:35:48 +08:00
}
2014-08-26 20:21:58 +08:00
# endif
2006-07-18 17:06:33 +08:00
2014-08-26 20:21:58 +08:00
static zend_always_inline void * zend_mm_alloc_heap ( zend_mm_heap * heap , size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC )
2014-07-16 16:35:48 +08:00
{
2014-08-26 20:21:58 +08:00
void * ptr ;
# if ZEND_DEBUG
size_t real_size = size ;
zend_mm_debug_info * dbg ;
2006-07-18 17:06:33 +08:00
2014-08-27 01:32:20 +08:00
/* special handling for zero-size allocation */
size = MAX ( size , 1 ) ;
2014-08-26 20:21:58 +08:00
size = ZEND_MM_ALIGNED_SIZE ( size ) + ZEND_MM_ALIGNED_SIZE ( sizeof ( zend_mm_debug_info ) ) ;
2016-02-24 16:04:48 +08:00
if ( UNEXPECTED ( size < real_size ) ) {
zend_error_noreturn ( E_ERROR , " Possible integer overflow in memory allocation (%zu + %zu) " , ZEND_MM_ALIGNED_SIZE ( real_size ) , ZEND_MM_ALIGNED_SIZE ( sizeof ( zend_mm_debug_info ) ) ) ;
return NULL ;
}
2014-07-16 16:35:48 +08:00
# endif
2018-05-08 03:29:16 +08:00
if ( EXPECTED ( size < = ZEND_MM_MAX_SMALL_SIZE ) ) {
2019-03-01 19:34:17 +08:00
ptr = zend_mm_alloc_small ( heap , ZEND_MM_SMALL_SIZE_TO_BIN ( size ) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
2014-08-26 20:21:58 +08:00
# if ZEND_DEBUG
dbg = zend_mm_get_debug_info ( heap , ptr ) ;
dbg - > size = real_size ;
dbg - > filename = __zend_filename ;
dbg - > orig_filename = __zend_orig_filename ;
dbg - > lineno = __zend_lineno ;
dbg - > orig_lineno = __zend_orig_lineno ;
# endif
return ptr ;
2018-05-08 03:29:16 +08:00
} else if ( EXPECTED ( size < = ZEND_MM_MAX_LARGE_SIZE ) ) {
2014-08-26 20:21:58 +08:00
ptr = zend_mm_alloc_large ( heap , size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
# if ZEND_DEBUG
dbg = zend_mm_get_debug_info ( heap , ptr ) ;
dbg - > size = real_size ;
dbg - > filename = __zend_filename ;
dbg - > orig_filename = __zend_orig_filename ;
dbg - > lineno = __zend_lineno ;
dbg - > orig_lineno = __zend_orig_lineno ;
# endif
return ptr ;
} else {
# if ZEND_DEBUG
size = real_size ;
# endif
return zend_mm_alloc_huge ( heap , size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
2006-07-18 17:06:33 +08:00
}
2014-07-16 16:35:48 +08:00
}
2006-07-18 17:06:33 +08:00
2014-08-26 20:21:58 +08:00
static zend_always_inline void zend_mm_free_heap ( zend_mm_heap * heap , void * ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC )
2014-07-16 16:35:48 +08:00
{
2014-08-26 20:21:58 +08:00
size_t page_offset = ZEND_MM_ALIGNED_OFFSET ( ptr , ZEND_MM_CHUNK_SIZE ) ;
2006-12-15 21:25:26 +08:00
2014-08-26 20:21:58 +08:00
if ( UNEXPECTED ( page_offset = = 0 ) ) {
if ( ptr ! = NULL ) {
zend_mm_free_huge ( heap , ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
2014-07-18 16:27:31 +08:00
}
} else {
2014-08-26 20:21:58 +08:00
zend_mm_chunk * chunk = ( zend_mm_chunk * ) ZEND_MM_ALIGNED_BASE ( ptr , ZEND_MM_CHUNK_SIZE ) ;
2014-10-25 02:27:56 +08:00
int page_num = ( int ) ( page_offset / ZEND_MM_PAGE_SIZE ) ;
2014-08-26 20:21:58 +08:00
zend_mm_page_info info = chunk - > map [ page_num ] ;
2007-03-20 14:46:48 +08:00
2014-08-26 20:21:58 +08:00
ZEND_MM_CHECK ( chunk - > heap = = heap , " zend_mm_heap corrupted " ) ;
if ( EXPECTED ( info & ZEND_MM_IS_SRUN ) ) {
zend_mm_free_small ( heap , ptr , ZEND_MM_SRUN_BIN_NUM ( info ) ) ;
} else /* if (info & ZEND_MM_IS_LRUN) */ {
int pages_count = ZEND_MM_LRUN_PAGES ( info ) ;
2014-07-16 16:35:48 +08:00
2014-08-26 20:21:58 +08:00
ZEND_MM_CHECK ( ZEND_MM_ALIGNED_OFFSET ( page_offset , ZEND_MM_PAGE_SIZE ) = = 0 , " zend_mm_heap corrupted " ) ;
zend_mm_free_large ( heap , chunk , page_num , pages_count ) ;
2014-07-18 16:27:31 +08:00
}
2006-07-18 17:06:33 +08:00
}
}
2014-08-26 20:21:58 +08:00
static size_t zend_mm_size ( zend_mm_heap * heap , void * ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC )
2007-03-20 14:46:48 +08:00
{
2014-08-26 20:21:58 +08:00
size_t page_offset = ZEND_MM_ALIGNED_OFFSET ( ptr , ZEND_MM_CHUNK_SIZE ) ;
2007-03-20 14:46:48 +08:00
2014-08-26 20:21:58 +08:00
if ( UNEXPECTED ( page_offset = = 0 ) ) {
return zend_mm_get_huge_block_size ( heap , ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
} else {
zend_mm_chunk * chunk ;
#if 0 && ZEND_DEBUG
zend_mm_debug_info * dbg = zend_mm_get_debug_info ( heap , ptr ) ;
return dbg - > size ;
# else
int page_num ;
zend_mm_page_info info ;
chunk = ( zend_mm_chunk * ) ZEND_MM_ALIGNED_BASE ( ptr , ZEND_MM_CHUNK_SIZE ) ;
2014-10-25 02:27:56 +08:00
page_num = ( int ) ( page_offset / ZEND_MM_PAGE_SIZE ) ;
2014-08-26 20:21:58 +08:00
info = chunk - > map [ page_num ] ;
ZEND_MM_CHECK ( chunk - > heap = = heap , " zend_mm_heap corrupted " ) ;
if ( EXPECTED ( info & ZEND_MM_IS_SRUN ) ) {
return bin_data_size [ ZEND_MM_SRUN_BIN_NUM ( info ) ] ;
} else /* if (info & ZEND_MM_IS_LARGE_RUN) */ {
return ZEND_MM_LRUN_PAGES ( info ) * ZEND_MM_PAGE_SIZE ;
2014-07-16 16:35:48 +08:00
}
2014-07-18 16:27:31 +08:00
# endif
2007-03-20 14:46:48 +08:00
}
}
2006-07-18 17:06:33 +08:00
2017-11-03 23:35:03 +08:00
static zend_never_inline void * zend_mm_realloc_slow ( zend_mm_heap * heap , void * ptr , size_t size , size_t copy_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC )
{
void * ret ;
# if ZEND_MM_STAT
do {
size_t orig_peak = heap - > peak ;
# endif
ret = zend_mm_alloc_heap ( heap , size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
memcpy ( ret , ptr , copy_size ) ;
zend_mm_free_heap ( heap , ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
# if ZEND_MM_STAT
heap - > peak = MAX ( orig_peak , heap - > size ) ;
} while ( 0 ) ;
# endif
return ret ;
}
static zend_never_inline void * zend_mm_realloc_huge ( zend_mm_heap * heap , void * ptr , size_t size , size_t copy_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC )
2006-07-18 17:06:33 +08:00
{
2014-08-26 20:21:58 +08:00
size_t old_size ;
size_t new_size ;
2014-07-16 16:35:48 +08:00
# if ZEND_DEBUG
2014-08-26 20:21:58 +08:00
size_t real_size ;
2014-07-16 16:35:48 +08:00
# endif
2014-07-18 16:27:31 +08:00
2017-11-03 23:35:03 +08:00
old_size = zend_mm_get_huge_block_size ( heap , ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
2014-08-26 20:21:58 +08:00
# if ZEND_DEBUG
2017-11-03 23:35:03 +08:00
real_size = size ;
size = ZEND_MM_ALIGNED_SIZE ( size ) + ZEND_MM_ALIGNED_SIZE ( sizeof ( zend_mm_debug_info ) ) ;
2014-07-16 16:35:48 +08:00
# endif
2017-11-03 23:35:03 +08:00
if ( size > ZEND_MM_MAX_LARGE_SIZE ) {
2014-07-16 16:35:48 +08:00
# if ZEND_DEBUG
2017-11-03 23:35:03 +08:00
size = real_size ;
2014-07-16 16:35:48 +08:00
# endif
2015-10-27 23:53:17 +08:00
# ifdef ZEND_WIN32
2017-11-03 23:35:03 +08:00
/* On Windows we don't have ability to extend huge blocks in-place.
* We allocate them with 2 MB size granularity , to avoid many
* reallocations when they are extended by small pieces
*/
new_size = ZEND_MM_ALIGNED_SIZE_EX ( size , MAX ( REAL_PAGE_SIZE , ZEND_MM_CHUNK_SIZE ) ) ;
2015-10-27 23:53:17 +08:00
# else
2017-11-03 23:35:03 +08:00
new_size = ZEND_MM_ALIGNED_SIZE_EX ( size , REAL_PAGE_SIZE ) ;
2015-10-27 23:53:17 +08:00
# endif
2017-11-03 23:35:03 +08:00
if ( new_size = = old_size ) {
2014-08-26 20:21:58 +08:00
# if ZEND_DEBUG
2017-11-03 23:35:03 +08:00
zend_mm_change_huge_block_size ( heap , ptr , new_size , real_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
2014-08-26 20:21:58 +08:00
# else
2017-11-03 23:35:03 +08:00
zend_mm_change_huge_block_size ( heap , ptr , new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
2014-07-16 16:35:48 +08:00
# endif
2017-11-03 23:35:03 +08:00
return ptr ;
} else if ( new_size < old_size ) {
/* unmup tail */
if ( zend_mm_chunk_truncate ( heap , ptr , old_size , new_size ) ) {
2014-08-26 20:21:58 +08:00
# if ZEND_MM_STAT || ZEND_MM_LIMIT
2017-11-03 23:35:03 +08:00
heap - > real_size - = old_size - new_size ;
2014-08-26 20:21:58 +08:00
# endif
# if ZEND_MM_STAT
2017-11-03 23:35:03 +08:00
heap - > size - = old_size - new_size ;
2014-07-18 16:27:31 +08:00
# endif
# if ZEND_DEBUG
2017-11-03 23:35:03 +08:00
zend_mm_change_huge_block_size ( heap , ptr , new_size , real_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
2014-08-26 20:21:58 +08:00
# else
2017-11-03 23:35:03 +08:00
zend_mm_change_huge_block_size ( heap , ptr , new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
2014-07-16 16:35:48 +08:00
# endif
2017-11-03 23:35:03 +08:00
return ptr ;
}
} else /* if (new_size > old_size) */ {
2014-08-26 20:21:58 +08:00
# if ZEND_MM_LIMIT
2019-08-26 16:26:49 +08:00
if ( UNEXPECTED ( new_size - old_size > heap - > limit - heap - > real_size ) ) {
if ( zend_mm_gc ( heap ) & & new_size - old_size < = heap - > limit - heap - > real_size ) {
2017-11-03 23:35:03 +08:00
/* pass */
} else if ( heap - > overflow = = 0 ) {
2014-07-16 16:35:48 +08:00
# if ZEND_DEBUG
2017-11-03 23:35:03 +08:00
zend_mm_safe_error ( heap , " Allowed memory size of %zu bytes exhausted at %s:%d (tried to allocate %zu bytes) " , heap - > limit , __zend_filename , __zend_lineno , size ) ;
2014-08-26 20:21:58 +08:00
# else
2017-11-03 23:35:03 +08:00
zend_mm_safe_error ( heap , " Allowed memory size of %zu bytes exhausted (tried to allocate %zu bytes) " , heap - > limit , size ) ;
2014-08-26 20:21:58 +08:00
# endif
2017-11-03 23:35:03 +08:00
return NULL ;
2014-08-26 20:21:58 +08:00
}
2017-11-03 23:35:03 +08:00
}
2014-08-26 20:21:58 +08:00
# endif
2017-11-03 23:35:03 +08:00
/* try to map tail right after this block */
if ( zend_mm_chunk_extend ( heap , ptr , old_size , new_size ) ) {
2014-08-26 20:21:58 +08:00
# if ZEND_MM_STAT || ZEND_MM_LIMIT
2017-11-03 23:35:03 +08:00
heap - > real_size + = new_size - old_size ;
2014-08-26 20:21:58 +08:00
# endif
# if ZEND_MM_STAT
2017-11-03 23:35:03 +08:00
heap - > real_peak = MAX ( heap - > real_peak , heap - > real_size ) ;
heap - > size + = new_size - old_size ;
heap - > peak = MAX ( heap - > peak , heap - > size ) ;
2014-08-26 20:21:58 +08:00
# endif
2014-07-16 16:35:48 +08:00
# if ZEND_DEBUG
2017-11-03 23:35:03 +08:00
zend_mm_change_huge_block_size ( heap , ptr , new_size , real_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
2014-08-26 20:21:58 +08:00
# else
2017-11-03 23:35:03 +08:00
zend_mm_change_huge_block_size ( heap , ptr , new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
2014-07-16 16:35:48 +08:00
# endif
2017-11-03 23:35:03 +08:00
return ptr ;
2014-08-26 20:21:58 +08:00
}
}
2017-11-03 23:35:03 +08:00
}
return zend_mm_realloc_slow ( heap , ptr , size , MIN ( old_size , copy_size ) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
}
static zend_always_inline void * zend_mm_realloc_heap ( zend_mm_heap * heap , void * ptr , size_t size , zend_bool use_copy_size , size_t copy_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC )
{
size_t page_offset ;
size_t old_size ;
size_t new_size ;
void * ret ;
# if ZEND_DEBUG
zend_mm_debug_info * dbg ;
# endif
page_offset = ZEND_MM_ALIGNED_OFFSET ( ptr , ZEND_MM_CHUNK_SIZE ) ;
if ( UNEXPECTED ( page_offset = = 0 ) ) {
if ( EXPECTED ( ptr = = NULL ) ) {
return _zend_mm_alloc ( heap , size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
} else {
return zend_mm_realloc_huge ( heap , ptr , size , copy_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
}
2014-07-18 16:27:31 +08:00
} else {
2014-08-26 20:21:58 +08:00
zend_mm_chunk * chunk = ( zend_mm_chunk * ) ZEND_MM_ALIGNED_BASE ( ptr , ZEND_MM_CHUNK_SIZE ) ;
2014-10-25 02:27:56 +08:00
int page_num = ( int ) ( page_offset / ZEND_MM_PAGE_SIZE ) ;
2014-08-26 20:21:58 +08:00
zend_mm_page_info info = chunk - > map [ page_num ] ;
# if ZEND_DEBUG
size_t real_size = size ;
2007-03-20 14:46:48 +08:00
2014-08-26 20:21:58 +08:00
size = ZEND_MM_ALIGNED_SIZE ( size ) + ZEND_MM_ALIGNED_SIZE ( sizeof ( zend_mm_debug_info ) ) ;
# endif
2007-03-20 14:46:48 +08:00
2014-08-26 20:21:58 +08:00
ZEND_MM_CHECK ( chunk - > heap = = heap , " zend_mm_heap corrupted " ) ;
if ( info & ZEND_MM_IS_SRUN ) {
2016-08-15 10:07:15 +08:00
int old_bin_num = ZEND_MM_SRUN_BIN_NUM ( info ) ;
2017-11-03 23:35:03 +08:00
do {
old_size = bin_data_size [ old_bin_num ] ;
/* Check if requested size fits into current bin */
if ( size < = old_size ) {
/* Check if truncation is necessary */
2017-11-07 15:29:17 +08:00
if ( old_bin_num > 0 & & size < bin_data_size [ old_bin_num - 1 ] ) {
2017-11-03 23:35:03 +08:00
/* truncation */
2019-03-01 19:34:17 +08:00
ret = zend_mm_alloc_small ( heap , ZEND_MM_SMALL_SIZE_TO_BIN ( size ) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
2017-11-03 23:35:03 +08:00
copy_size = use_copy_size ? MIN ( size , copy_size ) : size ;
memcpy ( ret , ptr , copy_size ) ;
zend_mm_free_small ( heap , ptr , old_bin_num ) ;
} else {
/* reallocation in-place */
ret = ptr ;
}
} else if ( size < = ZEND_MM_MAX_SMALL_SIZE ) {
/* small extension */
# if ZEND_MM_STAT
do {
size_t orig_peak = heap - > peak ;
# endif
2019-03-01 19:34:17 +08:00
ret = zend_mm_alloc_small ( heap , ZEND_MM_SMALL_SIZE_TO_BIN ( size ) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
2017-11-03 23:35:03 +08:00
copy_size = use_copy_size ? MIN ( old_size , copy_size ) : old_size ;
memcpy ( ret , ptr , copy_size ) ;
zend_mm_free_small ( heap , ptr , old_bin_num ) ;
# if ZEND_MM_STAT
heap - > peak = MAX ( orig_peak , heap - > size ) ;
} while ( 0 ) ;
2014-08-26 20:21:58 +08:00
# endif
2017-11-03 23:35:03 +08:00
} else {
/* slow reallocation */
break ;
2016-08-15 10:07:15 +08:00
}
2017-11-03 23:35:03 +08:00
# if ZEND_DEBUG
dbg = zend_mm_get_debug_info ( heap , ret ) ;
dbg - > size = real_size ;
dbg - > filename = __zend_filename ;
dbg - > orig_filename = __zend_orig_filename ;
dbg - > lineno = __zend_lineno ;
dbg - > orig_lineno = __zend_orig_lineno ;
# endif
return ret ;
} while ( 0 ) ;
2014-08-26 20:21:58 +08:00
} else /* if (info & ZEND_MM_IS_LARGE_RUN) */ {
ZEND_MM_CHECK ( ZEND_MM_ALIGNED_OFFSET ( page_offset , ZEND_MM_PAGE_SIZE ) = = 0 , " zend_mm_heap corrupted " ) ;
old_size = ZEND_MM_LRUN_PAGES ( info ) * ZEND_MM_PAGE_SIZE ;
if ( size > ZEND_MM_MAX_SMALL_SIZE & & size < = ZEND_MM_MAX_LARGE_SIZE ) {
new_size = ZEND_MM_ALIGNED_SIZE_EX ( size , ZEND_MM_PAGE_SIZE ) ;
if ( new_size = = old_size ) {
# if ZEND_DEBUG
dbg = zend_mm_get_debug_info ( heap , ptr ) ;
dbg - > size = real_size ;
dbg - > filename = __zend_filename ;
dbg - > orig_filename = __zend_orig_filename ;
dbg - > lineno = __zend_lineno ;
dbg - > orig_lineno = __zend_orig_lineno ;
# endif
return ptr ;
} else if ( new_size < old_size ) {
/* free tail pages */
2014-10-25 02:27:56 +08:00
int new_pages_count = ( int ) ( new_size / ZEND_MM_PAGE_SIZE ) ;
int rest_pages_count = ( int ) ( ( old_size - new_size ) / ZEND_MM_PAGE_SIZE ) ;
2014-08-26 20:21:58 +08:00
# if ZEND_MM_STAT
heap - > size - = rest_pages_count * ZEND_MM_PAGE_SIZE ;
# endif
chunk - > map [ page_num ] = ZEND_MM_LRUN ( new_pages_count ) ;
chunk - > free_pages + = rest_pages_count ;
zend_mm_bitset_reset_range ( chunk - > free_map , page_num + new_pages_count , rest_pages_count ) ;
# if ZEND_DEBUG
dbg = zend_mm_get_debug_info ( heap , ptr ) ;
dbg - > size = real_size ;
dbg - > filename = __zend_filename ;
dbg - > orig_filename = __zend_orig_filename ;
dbg - > lineno = __zend_lineno ;
dbg - > orig_lineno = __zend_orig_lineno ;
# endif
return ptr ;
} else /* if (new_size > old_size) */ {
2014-10-25 02:27:56 +08:00
int new_pages_count = ( int ) ( new_size / ZEND_MM_PAGE_SIZE ) ;
int old_pages_count = ( int ) ( old_size / ZEND_MM_PAGE_SIZE ) ;
2014-08-26 20:21:58 +08:00
/* try to allocate tail pages after this block */
if ( page_num + new_pages_count < = ZEND_MM_PAGES & &
zend_mm_bitset_is_free_range ( chunk - > free_map , page_num + old_pages_count , new_pages_count - old_pages_count ) ) {
# if ZEND_MM_STAT
do {
size_t size = heap - > size + ( new_size - old_size ) ;
size_t peak = MAX ( heap - > peak , size ) ;
heap - > size = size ;
heap - > peak = peak ;
} while ( 0 ) ;
# endif
chunk - > free_pages - = new_pages_count - old_pages_count ;
zend_mm_bitset_set_range ( chunk - > free_map , page_num + old_pages_count , new_pages_count - old_pages_count ) ;
chunk - > map [ page_num ] = ZEND_MM_LRUN ( new_pages_count ) ;
# if ZEND_DEBUG
dbg = zend_mm_get_debug_info ( heap , ptr ) ;
dbg - > size = real_size ;
dbg - > filename = __zend_filename ;
dbg - > orig_filename = __zend_orig_filename ;
dbg - > lineno = __zend_lineno ;
dbg - > orig_lineno = __zend_orig_lineno ;
# endif
return ptr ;
}
2014-07-18 16:27:31 +08:00
}
2014-07-16 16:35:48 +08:00
}
2008-02-14 22:42:00 +08:00
}
2014-08-26 20:21:58 +08:00
# if ZEND_DEBUG
size = real_size ;
2014-07-18 16:27:31 +08:00
# endif
}
2017-11-03 23:35:03 +08:00
copy_size = MIN ( old_size , copy_size ) ;
return zend_mm_realloc_slow ( heap , ptr , size , copy_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
2014-08-26 20:21:58 +08:00
}
/*********************/
/* Huge Runs (again) */
/*********************/
2014-07-18 16:27:31 +08:00
2014-08-26 20:21:58 +08:00
# if ZEND_DEBUG
static void zend_mm_add_huge_block ( zend_mm_heap * heap , void * ptr , size_t size , size_t dbg_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC )
# else
static void zend_mm_add_huge_block ( zend_mm_heap * heap , void * ptr , size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC )
# endif
{
zend_mm_huge_list * list = ( zend_mm_huge_list * ) zend_mm_alloc_heap ( heap , sizeof ( zend_mm_huge_list ) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
list - > ptr = ptr ;
list - > size = size ;
list - > next = heap - > huge_list ;
# if ZEND_DEBUG
list - > dbg . size = dbg_size ;
list - > dbg . filename = __zend_filename ;
list - > dbg . orig_filename = __zend_orig_filename ;
list - > dbg . lineno = __zend_lineno ;
list - > dbg . orig_lineno = __zend_orig_lineno ;
# endif
heap - > huge_list = list ;
}
2014-07-18 16:27:31 +08:00
2014-08-26 20:21:58 +08:00
static size_t zend_mm_del_huge_block ( zend_mm_heap * heap , void * ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC )
{
zend_mm_huge_list * prev = NULL ;
zend_mm_huge_list * list = heap - > huge_list ;
while ( list ! = NULL ) {
if ( list - > ptr = = ptr ) {
size_t size ;
2014-07-18 16:27:31 +08:00
2014-08-26 20:21:58 +08:00
if ( prev ) {
prev - > next = list - > next ;
} else {
heap - > huge_list = list - > next ;
2014-07-18 16:27:31 +08:00
}
2014-08-26 20:21:58 +08:00
size = list - > size ;
zend_mm_free_heap ( heap , list ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
return size ;
2014-07-16 16:35:48 +08:00
}
2014-08-26 20:21:58 +08:00
prev = list ;
list = list - > next ;
2006-07-18 17:06:33 +08:00
}
2014-08-26 20:21:58 +08:00
ZEND_MM_CHECK ( 0 , " zend_mm_heap corrupted " ) ;
return 0 ;
}
2007-02-17 02:06:28 +08:00
2014-08-26 20:21:58 +08:00
static size_t zend_mm_get_huge_block_size ( zend_mm_heap * heap , void * ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC )
{
zend_mm_huge_list * list = heap - > huge_list ;
while ( list ! = NULL ) {
if ( list - > ptr = = ptr ) {
return list - > size ;
2014-07-18 16:27:31 +08:00
}
2014-08-26 20:21:58 +08:00
list = list - > next ;
}
ZEND_MM_CHECK ( 0 , " zend_mm_heap corrupted " ) ;
return 0 ;
}
2006-07-18 17:06:33 +08:00
2014-07-16 16:35:48 +08:00
# if ZEND_DEBUG
2014-08-26 20:21:58 +08:00
static void zend_mm_change_huge_block_size ( zend_mm_heap * heap , void * ptr , size_t size , size_t dbg_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC )
2014-07-16 16:35:48 +08:00
# else
2014-08-26 20:21:58 +08:00
static void zend_mm_change_huge_block_size ( zend_mm_heap * heap , void * ptr , size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC )
# endif
{
zend_mm_huge_list * list = heap - > huge_list ;
while ( list ! = NULL ) {
if ( list - > ptr = = ptr ) {
list - > size = size ;
# if ZEND_DEBUG
list - > dbg . size = dbg_size ;
list - > dbg . filename = __zend_filename ;
list - > dbg . orig_filename = __zend_orig_filename ;
list - > dbg . lineno = __zend_lineno ;
list - > dbg . orig_lineno = __zend_orig_lineno ;
2006-07-18 17:06:33 +08:00
# endif
2014-08-26 20:21:58 +08:00
return ;
2007-03-20 14:46:48 +08:00
}
2014-08-26 20:21:58 +08:00
list = list - > next ;
}
}
2014-07-18 16:27:31 +08:00
2014-08-26 20:21:58 +08:00
static void * zend_mm_alloc_huge ( zend_mm_heap * heap , size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC )
{
2015-10-27 23:53:17 +08:00
# ifdef ZEND_WIN32
2015-11-08 15:03:25 +08:00
/* On Windows we don't have ability to extend huge blocks in-place.
2016-10-13 14:07:47 +08:00
* We allocate them with 2 MB size granularity , to avoid many
2015-11-08 15:03:25 +08:00
* reallocations when they are extended by small pieces
2015-10-27 23:53:17 +08:00
*/
2019-10-04 01:23:05 +08:00
size_t alignment = MAX ( REAL_PAGE_SIZE , ZEND_MM_CHUNK_SIZE ) ;
2015-10-27 23:53:17 +08:00
# else
2019-10-04 01:23:05 +08:00
size_t alignment = REAL_PAGE_SIZE ;
2015-10-27 23:53:17 +08:00
# endif
2019-10-04 01:23:05 +08:00
size_t new_size = ZEND_MM_ALIGNED_SIZE_EX ( size , alignment ) ;
2014-08-26 20:21:58 +08:00
void * ptr ;
2014-07-18 16:27:31 +08:00
2019-10-04 01:23:05 +08:00
if ( UNEXPECTED ( new_size < size ) ) {
zend_error_noreturn ( E_ERROR , " Possible integer overflow in memory allocation (%zu + %zu) " , size , alignment ) ;
}
2014-08-26 20:21:58 +08:00
# if ZEND_MM_LIMIT
2019-08-26 16:23:23 +08:00
if ( UNEXPECTED ( new_size > heap - > limit - heap - > real_size ) ) {
if ( zend_mm_gc ( heap ) & & new_size < = heap - > limit - heap - > real_size ) {
2015-08-04 23:21:05 +08:00
/* pass */
} else if ( heap - > overflow = = 0 ) {
2014-07-16 16:35:48 +08:00
# if ZEND_DEBUG
2014-10-16 18:14:37 +08:00
zend_mm_safe_error ( heap , " Allowed memory size of %zu bytes exhausted at %s:%d (tried to allocate %zu bytes) " , heap - > limit , __zend_filename , __zend_lineno , size ) ;
2014-07-16 16:35:48 +08:00
# else
2014-10-16 18:14:37 +08:00
zend_mm_safe_error ( heap , " Allowed memory size of %zu bytes exhausted (tried to allocate %zu bytes) " , heap - > limit , size ) ;
2014-07-16 16:35:48 +08:00
# endif
2014-07-18 16:27:31 +08:00
return NULL ;
}
2006-07-18 17:06:33 +08:00
}
2014-08-26 20:21:58 +08:00
# endif
2014-10-14 13:41:16 +08:00
ptr = zend_mm_chunk_alloc ( heap , new_size , ZEND_MM_CHUNK_SIZE ) ;
2014-08-26 20:21:58 +08:00
if ( UNEXPECTED ( ptr = = NULL ) ) {
/* insufficient memory */
2015-08-04 23:21:05 +08:00
if ( zend_mm_gc ( heap ) & &
( ptr = zend_mm_chunk_alloc ( heap , new_size , ZEND_MM_CHUNK_SIZE ) ) ! = NULL ) {
/* pass */
} else {
2014-09-16 17:53:26 +08:00
# if !ZEND_MM_LIMIT
2015-08-04 23:21:05 +08:00
zend_mm_safe_error ( heap , " Out of memory " ) ;
2014-09-16 17:53:26 +08:00
# elif ZEND_DEBUG
2015-08-04 23:21:05 +08:00
zend_mm_safe_error ( heap , " Out of memory (allocated %zu) at %s:%d (tried to allocate %zu bytes) " , heap - > real_size , __zend_filename , __zend_lineno , size ) ;
2014-09-16 17:53:26 +08:00
# else
2015-08-04 23:21:05 +08:00
zend_mm_safe_error ( heap , " Out of memory (allocated %zu) (tried to allocate %zu bytes) " , heap - > real_size , size ) ;
2014-09-16 17:53:26 +08:00
# endif
2015-08-04 23:21:05 +08:00
return NULL ;
}
2006-07-18 17:06:33 +08:00
}
2014-08-26 20:21:58 +08:00
# if ZEND_DEBUG
zend_mm_add_huge_block ( heap , ptr , new_size , size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
# else
zend_mm_add_huge_block ( heap , ptr , new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
# endif
# if ZEND_MM_STAT
do {
size_t size = heap - > real_size + new_size ;
size_t peak = MAX ( heap - > real_peak , size ) ;
heap - > real_size = size ;
heap - > real_peak = peak ;
} while ( 0 ) ;
do {
size_t size = heap - > size + new_size ;
size_t peak = MAX ( heap - > peak , size ) ;
heap - > size = size ;
heap - > peak = peak ;
} while ( 0 ) ;
# elif ZEND_MM_LIMIT
heap - > real_size + = new_size ;
# endif
return ptr ;
2014-07-16 16:35:48 +08:00
}
2011-06-03 05:16:50 +08:00
2014-08-26 20:21:58 +08:00
static void zend_mm_free_huge ( zend_mm_heap * heap , void * ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC )
2014-07-16 16:35:48 +08:00
{
2014-07-18 16:27:31 +08:00
size_t size ;
2006-12-25 20:16:33 +08:00
2014-08-26 20:21:58 +08:00
ZEND_MM_CHECK ( ZEND_MM_ALIGNED_OFFSET ( ptr , ZEND_MM_CHUNK_SIZE ) = = 0 , " zend_mm_heap corrupted " ) ;
size = zend_mm_del_huge_block ( heap , ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
2014-10-14 13:41:16 +08:00
zend_mm_chunk_free ( heap , ptr , size ) ;
2014-08-26 20:21:58 +08:00
# if ZEND_MM_STAT || ZEND_MM_LIMIT
heap - > real_size - = size ;
# endif
# if ZEND_MM_STAT
heap - > size - = size ;
2014-07-18 16:27:31 +08:00
# endif
2014-08-26 20:21:58 +08:00
}
2006-07-18 17:06:33 +08:00
2014-08-26 20:21:58 +08:00
/******************/
/* Initialization */
/******************/
2006-07-18 17:06:33 +08:00
2014-10-14 13:41:16 +08:00
static zend_mm_heap * zend_mm_init ( void )
2014-08-26 20:21:58 +08:00
{
2014-10-14 13:41:16 +08:00
zend_mm_chunk * chunk = ( zend_mm_chunk * ) zend_mm_chunk_alloc_int ( ZEND_MM_CHUNK_SIZE , ZEND_MM_CHUNK_SIZE ) ;
2014-08-26 20:21:58 +08:00
zend_mm_heap * heap ;
if ( UNEXPECTED ( chunk = = NULL ) ) {
# if ZEND_MM_ERROR
2014-09-16 18:27:25 +08:00
# ifdef _WIN32
stderr_last_error ( " Can't initialize heap " ) ;
# else
2014-08-26 20:21:58 +08:00
fprintf ( stderr , " \n Can't initialize heap: [%d] %s \n " , errno , strerror ( errno ) ) ;
2014-09-16 18:27:25 +08:00
# endif
2014-07-18 16:27:31 +08:00
# endif
2014-08-26 20:21:58 +08:00
return NULL ;
2014-07-18 16:27:31 +08:00
}
2014-08-26 20:21:58 +08:00
heap = & chunk - > heap_slot ;
chunk - > heap = heap ;
chunk - > next = chunk ;
chunk - > prev = chunk ;
chunk - > free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE ;
chunk - > free_tail = ZEND_MM_FIRST_PAGE ;
chunk - > num = 0 ;
2014-08-27 02:43:33 +08:00
chunk - > free_map [ 0 ] = ( Z_L ( 1 ) < < ZEND_MM_FIRST_PAGE ) - 1 ;
2014-08-26 20:21:58 +08:00
chunk - > map [ 0 ] = ZEND_MM_LRUN ( ZEND_MM_FIRST_PAGE ) ;
heap - > main_chunk = chunk ;
heap - > cached_chunks = NULL ;
heap - > chunks_count = 1 ;
heap - > peak_chunks_count = 1 ;
heap - > cached_chunks_count = 0 ;
heap - > avg_chunks_count = 1.0 ;
2017-10-13 18:56:06 +08:00
heap - > last_chunks_delete_boundary = 0 ;
heap - > last_chunks_delete_count = 0 ;
2014-08-26 20:21:58 +08:00
# if ZEND_MM_STAT || ZEND_MM_LIMIT
heap - > real_size = ZEND_MM_CHUNK_SIZE ;
# endif
# if ZEND_MM_STAT
heap - > real_peak = ZEND_MM_CHUNK_SIZE ;
heap - > size = 0 ;
heap - > peak = 0 ;
2014-07-18 16:27:31 +08:00
# endif
2014-08-26 20:21:58 +08:00
# if ZEND_MM_LIMIT
2016-04-29 17:38:48 +08:00
heap - > limit = ( ( size_t ) Z_L ( - 1 ) > > ( size_t ) Z_L ( 1 ) ) ;
2014-08-26 20:21:58 +08:00
heap - > overflow = 0 ;
# endif
# if ZEND_MM_CUSTOM
2015-08-11 21:33:47 +08:00
heap - > use_custom_heap = ZEND_MM_CUSTOM_HEAP_NONE ;
2014-10-14 13:41:16 +08:00
# endif
# if ZEND_MM_STORAGE
heap - > storage = NULL ;
2014-08-26 20:21:58 +08:00
# endif
heap - > huge_list = NULL ;
return heap ;
}
2014-07-18 16:27:31 +08:00
2015-08-04 23:21:05 +08:00
ZEND_API size_t zend_mm_gc ( zend_mm_heap * heap )
{
zend_mm_free_slot * p , * * q ;
zend_mm_chunk * chunk ;
size_t page_offset ;
int page_num ;
zend_mm_page_info info ;
2016-06-21 21:55:17 +08:00
uint32_t i , free_counter ;
int has_free_pages ;
2015-08-04 23:21:05 +08:00
size_t collected = 0 ;
# if ZEND_MM_CUSTOM
if ( heap - > use_custom_heap ) {
return 0 ;
}
# endif
for ( i = 0 ; i < ZEND_MM_BINS ; i + + ) {
has_free_pages = 0 ;
p = heap - > free_slot [ i ] ;
while ( p ! = NULL ) {
chunk = ( zend_mm_chunk * ) ZEND_MM_ALIGNED_BASE ( p , ZEND_MM_CHUNK_SIZE ) ;
ZEND_MM_CHECK ( chunk - > heap = = heap , " zend_mm_heap corrupted " ) ;
page_offset = ZEND_MM_ALIGNED_OFFSET ( p , ZEND_MM_CHUNK_SIZE ) ;
ZEND_ASSERT ( page_offset ! = 0 ) ;
page_num = ( int ) ( page_offset / ZEND_MM_PAGE_SIZE ) ;
info = chunk - > map [ page_num ] ;
ZEND_ASSERT ( info & ZEND_MM_IS_SRUN ) ;
if ( info & ZEND_MM_IS_LRUN ) {
page_num - = ZEND_MM_NRUN_OFFSET ( info ) ;
info = chunk - > map [ page_num ] ;
ZEND_ASSERT ( info & ZEND_MM_IS_SRUN ) ;
ZEND_ASSERT ( ! ( info & ZEND_MM_IS_LRUN ) ) ;
}
ZEND_ASSERT ( ZEND_MM_SRUN_BIN_NUM ( info ) = = i ) ;
free_counter = ZEND_MM_SRUN_FREE_COUNTER ( info ) + 1 ;
if ( free_counter = = bin_elements [ i ] ) {
has_free_pages = 1 ;
}
2017-06-26 05:24:23 +08:00
chunk - > map [ page_num ] = ZEND_MM_SRUN_EX ( i , free_counter ) ;
2015-08-04 23:21:05 +08:00
p = p - > next_free_slot ;
}
if ( ! has_free_pages ) {
continue ;
}
q = & heap - > free_slot [ i ] ;
p = * q ;
while ( p ! = NULL ) {
chunk = ( zend_mm_chunk * ) ZEND_MM_ALIGNED_BASE ( p , ZEND_MM_CHUNK_SIZE ) ;
ZEND_MM_CHECK ( chunk - > heap = = heap , " zend_mm_heap corrupted " ) ;
page_offset = ZEND_MM_ALIGNED_OFFSET ( p , ZEND_MM_CHUNK_SIZE ) ;
ZEND_ASSERT ( page_offset ! = 0 ) ;
page_num = ( int ) ( page_offset / ZEND_MM_PAGE_SIZE ) ;
info = chunk - > map [ page_num ] ;
ZEND_ASSERT ( info & ZEND_MM_IS_SRUN ) ;
if ( info & ZEND_MM_IS_LRUN ) {
page_num - = ZEND_MM_NRUN_OFFSET ( info ) ;
info = chunk - > map [ page_num ] ;
ZEND_ASSERT ( info & ZEND_MM_IS_SRUN ) ;
ZEND_ASSERT ( ! ( info & ZEND_MM_IS_LRUN ) ) ;
}
ZEND_ASSERT ( ZEND_MM_SRUN_BIN_NUM ( info ) = = i ) ;
if ( ZEND_MM_SRUN_FREE_COUNTER ( info ) = = bin_elements [ i ] ) {
/* remove from cache */
2017-06-26 05:24:23 +08:00
p = p - > next_free_slot ;
2015-08-04 23:21:05 +08:00
* q = p ;
} else {
q = & p - > next_free_slot ;
p = * q ;
}
}
}
chunk = heap - > main_chunk ;
do {
i = ZEND_MM_FIRST_PAGE ;
while ( i < chunk - > free_tail ) {
if ( zend_mm_bitset_is_set ( chunk - > free_map , i ) ) {
info = chunk - > map [ i ] ;
if ( info & ZEND_MM_IS_SRUN ) {
int bin_num = ZEND_MM_SRUN_BIN_NUM ( info ) ;
int pages_count = bin_pages [ bin_num ] ;
if ( ZEND_MM_SRUN_FREE_COUNTER ( info ) = = bin_elements [ bin_num ] ) {
2019-02-19 00:35:35 +08:00
/* all elements are free */
2015-08-04 23:21:05 +08:00
zend_mm_free_pages_ex ( heap , chunk , i , pages_count , 0 ) ;
collected + = pages_count ;
} else {
/* reset counter */
chunk - > map [ i ] = ZEND_MM_SRUN ( bin_num ) ;
}
i + = bin_pages [ bin_num ] ;
} else /* if (info & ZEND_MM_IS_LRUN) */ {
i + = ZEND_MM_LRUN_PAGES ( info ) ;
}
} else {
i + + ;
}
}
if ( chunk - > free_pages = = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE ) {
zend_mm_chunk * next_chunk = chunk - > next ;
zend_mm_delete_chunk ( heap , chunk ) ;
chunk = next_chunk ;
} else {
chunk = chunk - > next ;
}
} while ( chunk ! = heap - > main_chunk ) ;
return collected * ZEND_MM_PAGE_SIZE ;
}
2014-08-26 20:21:58 +08:00
# if ZEND_DEBUG
/******************/
/* Leak detection */
/******************/
2016-06-21 21:55:17 +08:00
static zend_long zend_mm_find_leaks_small ( zend_mm_chunk * p , uint32_t i , uint32_t j , zend_leak_info * leak )
2014-08-26 20:21:58 +08:00
{
int empty = 1 ;
zend_long count = 0 ;
int bin_num = ZEND_MM_SRUN_BIN_NUM ( p - > map [ i ] ) ;
zend_mm_debug_info * dbg = ( zend_mm_debug_info * ) ( ( char * ) p + ZEND_MM_PAGE_SIZE * i + bin_data_size [ bin_num ] * ( j + 1 ) - ZEND_MM_ALIGNED_SIZE ( sizeof ( zend_mm_debug_info ) ) ) ;
while ( j < bin_elements [ bin_num ] ) {
if ( dbg - > size ! = 0 ) {
if ( dbg - > filename = = leak - > filename & & dbg - > lineno = = leak - > lineno ) {
count + + ;
dbg - > size = 0 ;
dbg - > filename = NULL ;
dbg - > lineno = 0 ;
} else {
empty = 0 ;
}
}
j + + ;
dbg = ( zend_mm_debug_info * ) ( ( char * ) dbg + bin_data_size [ bin_num ] ) ;
2014-07-18 16:27:31 +08:00
}
2014-08-26 20:21:58 +08:00
if ( empty ) {
zend_mm_bitset_reset_range ( p - > free_map , i , bin_pages [ bin_num ] ) ;
2006-07-18 17:06:33 +08:00
}
2014-08-26 20:21:58 +08:00
return count ;
2014-07-16 16:35:48 +08:00
}
2006-07-18 17:06:33 +08:00
2016-06-21 21:55:17 +08:00
static zend_long zend_mm_find_leaks ( zend_mm_heap * heap , zend_mm_chunk * p , uint32_t i , zend_leak_info * leak )
2014-07-16 16:35:48 +08:00
{
2014-08-26 20:21:58 +08:00
zend_long count = 0 ;
2007-03-23 15:59:26 +08:00
2014-08-26 20:21:58 +08:00
do {
while ( i < p - > free_tail ) {
if ( zend_mm_bitset_is_set ( p - > free_map , i ) ) {
if ( p - > map [ i ] & ZEND_MM_IS_SRUN ) {
int bin_num = ZEND_MM_SRUN_BIN_NUM ( p - > map [ i ] ) ;
count + = zend_mm_find_leaks_small ( p , i , 0 , leak ) ;
i + = bin_pages [ bin_num ] ;
} else /* if (p->map[i] & ZEND_MM_IS_LRUN) */ {
int pages_count = ZEND_MM_LRUN_PAGES ( p - > map [ i ] ) ;
zend_mm_debug_info * dbg = ( zend_mm_debug_info * ) ( ( char * ) p + ZEND_MM_PAGE_SIZE * ( i + pages_count ) - ZEND_MM_ALIGNED_SIZE ( sizeof ( zend_mm_debug_info ) ) ) ;
2007-03-23 15:59:26 +08:00
2014-08-26 20:21:58 +08:00
if ( dbg - > filename = = leak - > filename & & dbg - > lineno = = leak - > lineno ) {
count + + ;
}
zend_mm_bitset_reset_range ( p - > free_map , i , pages_count ) ;
i + = pages_count ;
}
} else {
i + + ;
2014-07-18 16:27:31 +08:00
}
2006-07-18 17:06:33 +08:00
}
2014-08-26 20:21:58 +08:00
p = p - > next ;
} while ( p ! = heap - > main_chunk ) ;
return count ;
}
2014-07-18 16:27:31 +08:00
2015-09-22 20:46:41 +08:00
static zend_long zend_mm_find_leaks_huge ( zend_mm_heap * heap , zend_mm_huge_list * list )
{
zend_long count = 0 ;
zend_mm_huge_list * prev = list ;
zend_mm_huge_list * p = list - > next ;
while ( p ) {
if ( p - > dbg . filename = = list - > dbg . filename & & p - > dbg . lineno = = list - > dbg . lineno ) {
prev - > next = p - > next ;
zend_mm_chunk_free ( heap , p - > ptr , p - > size ) ;
zend_mm_free_heap ( heap , p , NULL , 0 , NULL , 0 ) ;
count + + ;
} else {
prev = p ;
}
p = prev - > next ;
}
return count ;
}
2014-12-14 06:06:14 +08:00
static void zend_mm_check_leaks ( zend_mm_heap * heap )
2014-08-26 20:21:58 +08:00
{
zend_mm_huge_list * list ;
zend_mm_chunk * p ;
zend_leak_info leak ;
zend_long repeated = 0 ;
uint32_t total = 0 ;
2016-06-21 21:55:17 +08:00
uint32_t i , j ;
2007-03-20 14:46:48 +08:00
2014-08-26 20:21:58 +08:00
/* find leaked huge blocks and free them */
list = heap - > huge_list ;
while ( list ) {
zend_mm_huge_list * q = list ;
2014-07-16 16:35:48 +08:00
2014-08-26 20:21:58 +08:00
leak . addr = list - > ptr ;
leak . size = list - > dbg . size ;
leak . filename = list - > dbg . filename ;
leak . orig_filename = list - > dbg . orig_filename ;
leak . lineno = list - > dbg . lineno ;
leak . orig_lineno = list - > dbg . orig_lineno ;
2014-07-18 16:27:31 +08:00
2014-12-14 06:06:14 +08:00
zend_message_dispatcher ( ZMSG_LOG_SCRIPT_NAME , NULL ) ;
zend_message_dispatcher ( ZMSG_MEMORY_LEAK_DETECTED , & leak ) ;
2015-09-22 20:46:41 +08:00
repeated = zend_mm_find_leaks_huge ( heap , list ) ;
2014-08-26 20:21:58 +08:00
total + = 1 + repeated ;
if ( repeated ) {
2014-12-14 06:06:14 +08:00
zend_message_dispatcher ( ZMSG_MEMORY_LEAK_REPEATED , ( void * ) ( zend_uintptr_t ) repeated ) ;
2014-08-26 20:21:58 +08:00
}
2015-09-22 20:46:41 +08:00
heap - > huge_list = list = list - > next ;
2014-10-14 13:41:16 +08:00
zend_mm_chunk_free ( heap , q - > ptr , q - > size ) ;
2014-08-26 20:21:58 +08:00
zend_mm_free_heap ( heap , q , NULL , 0 , NULL , 0 ) ;
}
/* for each chunk */
p = heap - > main_chunk ;
do {
i = ZEND_MM_FIRST_PAGE ;
while ( i < p - > free_tail ) {
if ( zend_mm_bitset_is_set ( p - > free_map , i ) ) {
if ( p - > map [ i ] & ZEND_MM_IS_SRUN ) {
int bin_num = ZEND_MM_SRUN_BIN_NUM ( p - > map [ i ] ) ;
zend_mm_debug_info * dbg = ( zend_mm_debug_info * ) ( ( char * ) p + ZEND_MM_PAGE_SIZE * i + bin_data_size [ bin_num ] - ZEND_MM_ALIGNED_SIZE ( sizeof ( zend_mm_debug_info ) ) ) ;
j = 0 ;
while ( j < bin_elements [ bin_num ] ) {
if ( dbg - > size ! = 0 ) {
leak . addr = ( zend_mm_debug_info * ) ( ( char * ) p + ZEND_MM_PAGE_SIZE * i + bin_data_size [ bin_num ] * j ) ;
leak . size = dbg - > size ;
leak . filename = dbg - > filename ;
leak . orig_filename = dbg - > orig_filename ;
leak . lineno = dbg - > lineno ;
leak . orig_lineno = dbg - > orig_lineno ;
2014-12-14 06:06:14 +08:00
zend_message_dispatcher ( ZMSG_LOG_SCRIPT_NAME , NULL ) ;
zend_message_dispatcher ( ZMSG_MEMORY_LEAK_DETECTED , & leak ) ;
2014-08-26 20:21:58 +08:00
dbg - > size = 0 ;
dbg - > filename = NULL ;
dbg - > lineno = 0 ;
repeated = zend_mm_find_leaks_small ( p , i , j + 1 , & leak ) +
zend_mm_find_leaks ( heap , p , i + bin_pages [ bin_num ] , & leak ) ;
total + = 1 + repeated ;
if ( repeated ) {
2014-12-14 06:06:14 +08:00
zend_message_dispatcher ( ZMSG_MEMORY_LEAK_REPEATED , ( void * ) ( zend_uintptr_t ) repeated ) ;
2014-08-26 20:21:58 +08:00
}
}
dbg = ( zend_mm_debug_info * ) ( ( char * ) dbg + bin_data_size [ bin_num ] ) ;
j + + ;
}
i + = bin_pages [ bin_num ] ;
} else /* if (p->map[i] & ZEND_MM_IS_LRUN) */ {
int pages_count = ZEND_MM_LRUN_PAGES ( p - > map [ i ] ) ;
zend_mm_debug_info * dbg = ( zend_mm_debug_info * ) ( ( char * ) p + ZEND_MM_PAGE_SIZE * ( i + pages_count ) - ZEND_MM_ALIGNED_SIZE ( sizeof ( zend_mm_debug_info ) ) ) ;
leak . addr = ( void * ) ( ( char * ) p + ZEND_MM_PAGE_SIZE * i ) ;
leak . size = dbg - > size ;
leak . filename = dbg - > filename ;
leak . orig_filename = dbg - > orig_filename ;
leak . lineno = dbg - > lineno ;
leak . orig_lineno = dbg - > orig_lineno ;
2014-12-14 06:06:14 +08:00
zend_message_dispatcher ( ZMSG_LOG_SCRIPT_NAME , NULL ) ;
zend_message_dispatcher ( ZMSG_MEMORY_LEAK_DETECTED , & leak ) ;
2014-08-26 20:21:58 +08:00
zend_mm_bitset_reset_range ( p - > free_map , i , pages_count ) ;
repeated = zend_mm_find_leaks ( heap , p , i + pages_count , & leak ) ;
total + = 1 + repeated ;
if ( repeated ) {
2014-12-14 06:06:14 +08:00
zend_message_dispatcher ( ZMSG_MEMORY_LEAK_REPEATED , ( void * ) ( zend_uintptr_t ) repeated ) ;
2014-08-26 20:21:58 +08:00
}
i + = pages_count ;
}
} else {
i + + ;
}
2014-07-18 16:27:31 +08:00
}
2014-08-26 20:21:58 +08:00
p = p - > next ;
} while ( p ! = heap - > main_chunk ) ;
if ( total ) {
2014-12-14 06:06:14 +08:00
zend_message_dispatcher ( ZMSG_MEMORY_LEAKS_GRAND_TOTAL , & total ) ;
2014-07-16 16:35:48 +08:00
}
2014-08-26 20:21:58 +08:00
}
2006-12-05 00:20:02 +08:00
# endif
2014-07-16 16:35:48 +08:00
2019-06-27 16:30:45 +08:00
# if ZEND_MM_CUSTOM
static void * tracked_malloc ( size_t size ) ;
static void tracked_free_all ( ) ;
# endif
2014-12-14 06:06:14 +08:00
void zend_mm_shutdown ( zend_mm_heap * heap , int full , int silent )
2014-08-26 20:21:58 +08:00
{
zend_mm_chunk * p ;
zend_mm_huge_list * list ;
2014-07-18 16:27:31 +08:00
2014-08-26 20:21:58 +08:00
# if ZEND_MM_CUSTOM
if ( heap - > use_custom_heap ) {
2019-06-27 16:30:45 +08:00
if ( heap - > custom_heap . std . _malloc = = tracked_malloc ) {
if ( silent ) {
tracked_free_all ( ) ;
}
zend_hash_clean ( heap - > tracked_allocs ) ;
if ( full ) {
zend_hash_destroy ( heap - > tracked_allocs ) ;
free ( heap - > tracked_allocs ) ;
2019-06-28 15:13:45 +08:00
/* Make sure the heap free below does not use tracked_free(). */
heap - > custom_heap . std . _free = free ;
2015-08-11 21:33:47 +08:00
}
2015-03-09 11:19:05 +08:00
}
2019-06-27 16:30:45 +08:00
if ( full ) {
2019-06-28 15:13:45 +08:00
if ( ZEND_DEBUG & & heap - > use_custom_heap = = ZEND_MM_CUSTOM_HEAP_DEBUG ) {
heap - > custom_heap . debug . _free ( heap ZEND_FILE_LINE_CC ZEND_FILE_LINE_EMPTY_CC ) ;
} else {
heap - > custom_heap . std . _free ( heap ) ;
}
2019-06-27 16:30:45 +08:00
}
2014-08-26 20:21:58 +08:00
return ;
}
2006-07-18 17:06:33 +08:00
# endif
2006-12-20 18:49:33 +08:00
2014-07-18 16:27:31 +08:00
# if ZEND_DEBUG
2014-08-26 20:21:58 +08:00
if ( ! silent ) {
2014-12-14 06:06:14 +08:00
zend_mm_check_leaks ( heap ) ;
2014-08-26 20:21:58 +08:00
}
2014-07-16 16:35:48 +08:00
# endif
2014-07-18 16:27:31 +08:00
2014-08-26 20:21:58 +08:00
/* free huge blocks */
list = heap - > huge_list ;
2015-09-11 01:03:42 +08:00
heap - > huge_list = NULL ;
2014-08-26 20:21:58 +08:00
while ( list ) {
zend_mm_huge_list * q = list ;
list = list - > next ;
2014-10-14 13:41:16 +08:00
zend_mm_chunk_free ( heap , q - > ptr , q - > size ) ;
2014-08-26 20:21:58 +08:00
}
2014-07-18 16:27:31 +08:00
2014-08-26 20:21:58 +08:00
/* move all chunks except of the first one into the cache */
p = heap - > main_chunk - > next ;
while ( p ! = heap - > main_chunk ) {
zend_mm_chunk * q = p - > next ;
p - > next = heap - > cached_chunks ;
heap - > cached_chunks = p ;
p = q ;
heap - > chunks_count - - ;
heap - > cached_chunks_count + + ;
2014-07-16 16:35:48 +08:00
}
2006-07-18 17:06:33 +08:00
2014-08-26 20:21:58 +08:00
if ( full ) {
/* free all cached chunks */
while ( heap - > cached_chunks ) {
p = heap - > cached_chunks ;
heap - > cached_chunks = p - > next ;
2014-10-14 13:41:16 +08:00
zend_mm_chunk_free ( heap , p , ZEND_MM_CHUNK_SIZE ) ;
2014-08-26 20:21:58 +08:00
}
/* free the first chunk */
2014-10-14 13:41:16 +08:00
zend_mm_chunk_free ( heap , heap - > main_chunk , ZEND_MM_CHUNK_SIZE ) ;
2014-08-26 20:21:58 +08:00
} else {
/* free some cached chunks to keep average count */
heap - > avg_chunks_count = ( heap - > avg_chunks_count + ( double ) heap - > peak_chunks_count ) / 2.0 ;
while ( ( double ) heap - > cached_chunks_count + 0.9 > heap - > avg_chunks_count & &
heap - > cached_chunks ) {
p = heap - > cached_chunks ;
heap - > cached_chunks = p - > next ;
2014-10-14 13:41:16 +08:00
zend_mm_chunk_free ( heap , p , ZEND_MM_CHUNK_SIZE ) ;
2014-08-26 20:21:58 +08:00
heap - > cached_chunks_count - - ;
}
/* clear cached chunks */
p = heap - > cached_chunks ;
while ( p ! = NULL ) {
zend_mm_chunk * q = p - > next ;
memset ( p , 0 , sizeof ( zend_mm_chunk ) ) ;
p - > next = q ;
p = q ;
}
/* reinitialize the first chunk and heap */
p = heap - > main_chunk ;
p - > heap = & p - > heap_slot ;
p - > next = p ;
p - > prev = p ;
p - > free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE ;
p - > free_tail = ZEND_MM_FIRST_PAGE ;
2019-01-24 23:01:24 +08:00
p - > num = 0 ;
# if ZEND_MM_STAT
heap - > size = heap - > peak = 0 ;
# endif
memset ( heap - > free_slot , 0 , sizeof ( heap - > free_slot ) ) ;
2014-08-26 20:21:58 +08:00
# if ZEND_MM_STAT || ZEND_MM_LIMIT
heap - > real_size = ZEND_MM_CHUNK_SIZE ;
# endif
# if ZEND_MM_STAT
heap - > real_peak = ZEND_MM_CHUNK_SIZE ;
# endif
2019-01-24 23:01:24 +08:00
heap - > chunks_count = 1 ;
heap - > peak_chunks_count = 1 ;
heap - > last_chunks_delete_boundary = 0 ;
heap - > last_chunks_delete_count = 0 ;
memset ( p - > free_map , 0 , sizeof ( p - > free_map ) + sizeof ( p - > map ) ) ;
p - > free_map [ 0 ] = ( 1L < < ZEND_MM_FIRST_PAGE ) - 1 ;
p - > map [ 0 ] = ZEND_MM_LRUN ( ZEND_MM_FIRST_PAGE ) ;
2014-08-26 20:21:58 +08:00
}
}
/**************/
/* PUBLIC API */
/**************/
2014-07-16 16:35:48 +08:00
2014-10-06 20:32:15 +08:00
ZEND_API void * ZEND_FASTCALL _zend_mm_alloc ( zend_mm_heap * heap , size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC )
2006-07-18 17:06:33 +08:00
{
2014-08-26 20:21:58 +08:00
return zend_mm_alloc_heap ( heap , size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
2006-07-18 17:06:33 +08:00
}
2014-10-06 20:32:15 +08:00
ZEND_API void ZEND_FASTCALL _zend_mm_free ( zend_mm_heap * heap , void * ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC )
2006-07-18 17:06:33 +08:00
{
2014-08-26 20:21:58 +08:00
zend_mm_free_heap ( heap , ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
2006-07-18 17:06:33 +08:00
}
2014-10-06 20:32:15 +08:00
void * ZEND_FASTCALL _zend_mm_realloc ( zend_mm_heap * heap , void * ptr , size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC )
2006-07-18 17:06:33 +08:00
{
2017-11-03 23:35:03 +08:00
return zend_mm_realloc_heap ( heap , ptr , size , 0 , size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
2015-03-05 22:22:10 +08:00
}
void * ZEND_FASTCALL _zend_mm_realloc2 ( zend_mm_heap * heap , void * ptr , size_t size , size_t copy_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC )
{
2017-11-03 23:35:03 +08:00
return zend_mm_realloc_heap ( heap , ptr , size , 1 , copy_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
2006-07-18 17:06:33 +08:00
}
2014-10-06 20:32:15 +08:00
ZEND_API size_t ZEND_FASTCALL _zend_mm_block_size ( zend_mm_heap * heap , void * ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC )
2006-07-18 17:06:33 +08:00
{
2014-08-26 20:21:58 +08:00
return zend_mm_size ( heap , ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
2014-07-18 16:27:31 +08:00
}
2006-07-18 17:06:33 +08:00
/**********************/
/* Allocation Manager */
/**********************/
typedef struct _zend_alloc_globals {
zend_mm_heap * mm_heap ;
} zend_alloc_globals ;
# ifdef ZTS
reworked the patch, less new stuff but worky
TLS is already used in TSRM, the way exporting the tsrm cache through
a thread local variable is not portable. Additionally, the current
patch suffers from bugs which are hard to find, but prevent it to
be worky with apache. What is done here is mainly uses the idea
from the RFC patch, but
- __thread variable is removed
- offset math and declarations are removed
- extra macros and definitions are removed
What is done merely is
- use an inline function to access the tsrm cache. The function uses
the portable tsrm_tls_get macro which is cheap
- all the TSRM_* macros are set to placebo. Thus this opens the way
remove them later
Except that, the logic is old. TSRMLS_FETCH will have to be done once
per thread, then tsrm_get_ls_cache() can be used. Things seeming to be
worky are cli, cli server and apache. I also tried to enable bz2
shared and it has worked out of the box. The change is yet minimal
diffing to the current master bus is a worky start, IMHO. Though will
have to recheck the other previously done SAPIs - embed and cgi.
The offsets can be added to the tsrm_resource_type struct, then
it'll not be needed to declare them in the userspace. Even the
"done" member type can be changed to int16 or smaller, then adding
the offset as int16 will not change the struct size. As well on the
todo might be removing the hashed storage, thread_id != thread_id and
linked list logic in favour of the explicit TLS operations.
2014-09-26 00:48:27 +08:00
static int alloc_globals_id ;
2019-03-14 08:01:01 +08:00
static size_t alloc_globals_offset ;
# define AG(v) ZEND_TSRMG_FAST(alloc_globals_offset, zend_alloc_globals *, v)
2006-07-18 17:06:33 +08:00
# else
# define AG(v) (alloc_globals.v)
static zend_alloc_globals alloc_globals ;
# endif
2014-12-14 06:06:14 +08:00
ZEND_API int is_zend_mm ( void )
2006-09-14 16:00:44 +08:00
{
2014-08-26 20:21:58 +08:00
# if ZEND_MM_CUSTOM
return ! AG ( mm_heap ) - > use_custom_heap ;
# else
return 1 ;
# endif
}
2018-12-12 18:02:28 +08:00
ZEND_API int is_zend_ptr ( const void * ptr )
{
# if ZEND_MM_CUSTOM
if ( AG ( mm_heap ) - > use_custom_heap ) {
return 0 ;
}
# endif
if ( AG ( mm_heap ) - > main_chunk ) {
zend_mm_chunk * chunk = AG ( mm_heap ) - > main_chunk ;
do {
if ( ptr > = ( void * ) chunk
& & ptr < ( void * ) ( ( char * ) chunk + ZEND_MM_CHUNK_SIZE ) ) {
return 1 ;
}
chunk = chunk - > next ;
} while ( chunk ! = AG ( mm_heap ) - > main_chunk ) ;
}
if ( AG ( mm_heap ) - > huge_list ) {
zend_mm_huge_list * block = AG ( mm_heap ) - > huge_list ;
do {
if ( ptr > = ( void * ) block
& & ptr < ( void * ) ( ( char * ) block + block - > size ) ) {
return 1 ;
}
block = block - > next ;
} while ( block ! = AG ( mm_heap ) - > huge_list ) ;
}
return 0 ;
}
2014-08-26 20:21:58 +08:00
# if ZEND_MM_CUSTOM
2019-05-29 06:43:27 +08:00
static ZEND_COLD void * ZEND_FASTCALL _malloc_custom ( size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC )
{
if ( ZEND_DEBUG & & AG ( mm_heap ) - > use_custom_heap = = ZEND_MM_CUSTOM_HEAP_DEBUG ) {
return AG ( mm_heap ) - > custom_heap . debug . _malloc ( size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
} else {
return AG ( mm_heap ) - > custom_heap . std . _malloc ( size ) ;
}
}
static ZEND_COLD void ZEND_FASTCALL _efree_custom ( void * ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC )
{
if ( ZEND_DEBUG & & AG ( mm_heap ) - > use_custom_heap = = ZEND_MM_CUSTOM_HEAP_DEBUG ) {
AG ( mm_heap ) - > custom_heap . debug . _free ( ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
} else {
AG ( mm_heap ) - > custom_heap . std . _free ( ptr ) ;
}
}
static ZEND_COLD void * ZEND_FASTCALL _realloc_custom ( void * ptr , size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC )
{
if ( ZEND_DEBUG & & AG ( mm_heap ) - > use_custom_heap = = ZEND_MM_CUSTOM_HEAP_DEBUG ) {
return AG ( mm_heap ) - > custom_heap . debug . _realloc ( ptr , size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
} else {
return AG ( mm_heap ) - > custom_heap . std . _realloc ( ptr , size ) ;
}
}
2019-05-29 08:40:09 +08:00
# endif
2019-05-29 06:43:27 +08:00
2019-05-29 08:40:09 +08:00
# if !ZEND_DEBUG && defined(HAVE_BUILTIN_CONSTANT_P)
# undef _emalloc
# if ZEND_MM_CUSTOM
2014-08-26 20:21:58 +08:00
# define ZEND_MM_CUSTOM_ALLOCATOR(size) do { \
if ( UNEXPECTED ( AG ( mm_heap ) - > use_custom_heap ) ) { \
2019-05-29 06:43:27 +08:00
return _malloc_custom ( size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ; \
2014-08-26 20:21:58 +08:00
} \
} while ( 0 )
# define ZEND_MM_CUSTOM_DEALLOCATOR(ptr) do { \
if ( UNEXPECTED ( AG ( mm_heap ) - > use_custom_heap ) ) { \
2019-05-29 06:43:27 +08:00
_efree_custom ( ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ; \
2014-08-26 20:21:58 +08:00
return ; \
} \
} while ( 0 )
# else
# define ZEND_MM_CUSTOM_ALLOCATOR(size)
# define ZEND_MM_CUSTOM_DEALLOCATOR(ptr)
# endif
# define _ZEND_BIN_ALLOCATOR(_num, _size, _elements, _pages, x, y) \
ZEND_API void * ZEND_FASTCALL _emalloc_ # # _size ( void ) { \
ZEND_MM_CUSTOM_ALLOCATOR ( _size ) ; \
2019-03-01 19:34:17 +08:00
return zend_mm_alloc_small ( AG ( mm_heap ) , _num ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ; \
2014-08-26 20:21:58 +08:00
}
ZEND_MM_BINS_INFO ( _ZEND_BIN_ALLOCATOR , x , y )
ZEND_API void * ZEND_FASTCALL _emalloc_large ( size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC )
{
ZEND_MM_CUSTOM_ALLOCATOR ( size ) ;
2018-05-08 03:29:16 +08:00
return zend_mm_alloc_large_ex ( AG ( mm_heap ) , size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
2014-08-26 20:21:58 +08:00
}
ZEND_API void * ZEND_FASTCALL _emalloc_huge ( size_t size )
{
ZEND_MM_CUSTOM_ALLOCATOR ( size ) ;
return zend_mm_alloc_huge ( AG ( mm_heap ) , size ) ;
}
2014-11-27 17:52:31 +08:00
# if ZEND_DEBUG
2014-08-26 20:21:58 +08:00
# define _ZEND_BIN_FREE(_num, _size, _elements, _pages, x, y) \
ZEND_API void ZEND_FASTCALL _efree_ # # _size ( void * ptr ) { \
ZEND_MM_CUSTOM_DEALLOCATOR ( ptr ) ; \
{ \
size_t page_offset = ZEND_MM_ALIGNED_OFFSET ( ptr , ZEND_MM_CHUNK_SIZE ) ; \
zend_mm_chunk * chunk = ( zend_mm_chunk * ) ZEND_MM_ALIGNED_BASE ( ptr , ZEND_MM_CHUNK_SIZE ) ; \
int page_num = page_offset / ZEND_MM_PAGE_SIZE ; \
ZEND_MM_CHECK ( chunk - > heap = = AG ( mm_heap ) , " zend_mm_heap corrupted " ) ; \
ZEND_ASSERT ( chunk - > map [ page_num ] & ZEND_MM_IS_SRUN ) ; \
ZEND_ASSERT ( ZEND_MM_SRUN_BIN_NUM ( chunk - > map [ page_num ] ) = = _num ) ; \
zend_mm_free_small ( AG ( mm_heap ) , ptr , _num ) ; \
} \
}
2014-11-27 17:52:31 +08:00
# else
# define _ZEND_BIN_FREE(_num, _size, _elements, _pages, x, y) \
ZEND_API void ZEND_FASTCALL _efree_ # # _size ( void * ptr ) { \
ZEND_MM_CUSTOM_DEALLOCATOR ( ptr ) ; \
{ \
zend_mm_chunk * chunk = ( zend_mm_chunk * ) ZEND_MM_ALIGNED_BASE ( ptr , ZEND_MM_CHUNK_SIZE ) ; \
ZEND_MM_CHECK ( chunk - > heap = = AG ( mm_heap ) , " zend_mm_heap corrupted " ) ; \
zend_mm_free_small ( AG ( mm_heap ) , ptr , _num ) ; \
} \
}
# endif
2014-08-26 20:21:58 +08:00
ZEND_MM_BINS_INFO ( _ZEND_BIN_FREE , x , y )
ZEND_API void ZEND_FASTCALL _efree_large ( void * ptr , size_t size )
{
ZEND_MM_CUSTOM_DEALLOCATOR ( ptr ) ;
{
size_t page_offset = ZEND_MM_ALIGNED_OFFSET ( ptr , ZEND_MM_CHUNK_SIZE ) ;
zend_mm_chunk * chunk = ( zend_mm_chunk * ) ZEND_MM_ALIGNED_BASE ( ptr , ZEND_MM_CHUNK_SIZE ) ;
int page_num = page_offset / ZEND_MM_PAGE_SIZE ;
2016-06-28 16:37:51 +08:00
uint32_t pages_count = ZEND_MM_ALIGNED_SIZE_EX ( size , ZEND_MM_PAGE_SIZE ) / ZEND_MM_PAGE_SIZE ;
2014-08-26 20:21:58 +08:00
ZEND_MM_CHECK ( chunk - > heap = = AG ( mm_heap ) & & ZEND_MM_ALIGNED_OFFSET ( page_offset , ZEND_MM_PAGE_SIZE ) = = 0 , " zend_mm_heap corrupted " ) ;
ZEND_ASSERT ( chunk - > map [ page_num ] & ZEND_MM_IS_LRUN ) ;
ZEND_ASSERT ( ZEND_MM_LRUN_PAGES ( chunk - > map [ page_num ] ) = = pages_count ) ;
zend_mm_free_large ( AG ( mm_heap ) , chunk , page_num , pages_count ) ;
}
}
ZEND_API void ZEND_FASTCALL _efree_huge ( void * ptr , size_t size )
{
2014-10-15 15:37:55 +08:00
2014-08-26 20:21:58 +08:00
ZEND_MM_CUSTOM_DEALLOCATOR ( ptr ) ;
zend_mm_free_huge ( AG ( mm_heap ) , ptr ) ;
2006-09-14 16:00:44 +08:00
}
2014-08-26 20:21:58 +08:00
# endif
2006-09-14 16:00:44 +08:00
2014-08-26 20:21:58 +08:00
ZEND_API void * ZEND_FASTCALL _emalloc ( size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC )
2006-07-18 17:06:33 +08:00
{
2014-08-26 20:21:58 +08:00
# if ZEND_MM_CUSTOM
if ( UNEXPECTED ( AG ( mm_heap ) - > use_custom_heap ) ) {
2019-05-29 06:43:27 +08:00
return _malloc_custom ( size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
2006-07-18 17:06:33 +08:00
}
2014-08-26 20:21:58 +08:00
# endif
return zend_mm_alloc_heap ( AG ( mm_heap ) , size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
2006-07-18 17:06:33 +08:00
}
2014-08-26 20:21:58 +08:00
ZEND_API void ZEND_FASTCALL _efree ( void * ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC )
2006-07-18 17:06:33 +08:00
{
2014-08-26 20:21:58 +08:00
# if ZEND_MM_CUSTOM
if ( UNEXPECTED ( AG ( mm_heap ) - > use_custom_heap ) ) {
2019-05-29 06:43:27 +08:00
_efree_custom ( ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
2006-07-18 17:06:33 +08:00
return ;
}
2014-08-26 20:21:58 +08:00
# endif
zend_mm_free_heap ( AG ( mm_heap ) , ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
2006-07-18 17:06:33 +08:00
}
2015-03-05 22:22:10 +08:00
ZEND_API void * ZEND_FASTCALL _erealloc ( void * ptr , size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC )
{
2018-01-15 22:37:15 +08:00
# if ZEND_MM_CUSTOM
2015-03-05 22:22:10 +08:00
if ( UNEXPECTED ( AG ( mm_heap ) - > use_custom_heap ) ) {
2019-05-29 06:43:27 +08:00
return _realloc_custom ( ptr , size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
2015-03-05 22:22:10 +08:00
}
2018-01-15 22:37:15 +08:00
# endif
2017-11-03 23:35:03 +08:00
return zend_mm_realloc_heap ( AG ( mm_heap ) , ptr , size , 0 , size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
2015-03-05 22:22:10 +08:00
}
ZEND_API void * ZEND_FASTCALL _erealloc2 ( void * ptr , size_t size , size_t copy_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC )
1999-04-08 02:10:10 +08:00
{
2018-01-15 22:37:15 +08:00
# if ZEND_MM_CUSTOM
2014-08-26 20:21:58 +08:00
if ( UNEXPECTED ( AG ( mm_heap ) - > use_custom_heap ) ) {
2019-05-29 06:43:27 +08:00
return _realloc_custom ( ptr , size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
1999-04-08 02:10:10 +08:00
}
2018-01-15 22:37:15 +08:00
# endif
2017-11-03 23:35:03 +08:00
return zend_mm_realloc_heap ( AG ( mm_heap ) , ptr , size , 1 , copy_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
2006-07-18 17:06:33 +08:00
}
1999-04-08 02:10:10 +08:00
2014-12-14 21:07:59 +08:00
ZEND_API size_t ZEND_FASTCALL _zend_mem_block_size ( void * ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC )
2006-07-18 17:06:33 +08:00
{
2018-01-15 22:37:15 +08:00
# if ZEND_MM_CUSTOM
2014-08-26 20:21:58 +08:00
if ( UNEXPECTED ( AG ( mm_heap ) - > use_custom_heap ) ) {
2006-07-18 17:06:33 +08:00
return 0 ;
1999-04-08 02:10:10 +08:00
}
2018-01-15 22:37:15 +08:00
# endif
2014-08-26 20:21:58 +08:00
return zend_mm_size ( AG ( mm_heap ) , ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
1999-04-08 02:10:10 +08:00
}
2014-08-26 20:21:58 +08:00
ZEND_API void * ZEND_FASTCALL _safe_emalloc ( size_t nmemb , size_t size , size_t offset ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC )
2007-03-20 14:46:48 +08:00
{
2019-03-01 19:11:37 +08:00
return _emalloc ( zend_safe_address_guarded ( nmemb , size , offset ) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
2003-04-24 11:35:06 +08:00
}
2014-08-26 20:21:58 +08:00
ZEND_API void * ZEND_FASTCALL _safe_malloc ( size_t nmemb , size_t size , size_t offset )
2004-07-21 05:55:57 +08:00
{
2016-10-13 21:39:02 +08:00
return pemalloc ( zend_safe_address_guarded ( nmemb , size , offset ) , 1 ) ;
2004-07-21 05:55:57 +08:00
}
2003-04-24 11:35:06 +08:00
2014-08-26 20:21:58 +08:00
ZEND_API void * ZEND_FASTCALL _safe_erealloc ( void * ptr , size_t nmemb , size_t size , size_t offset ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC )
2007-02-17 02:06:28 +08:00
{
2019-03-01 19:11:37 +08:00
return _erealloc ( ptr , zend_safe_address_guarded ( nmemb , size , offset ) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
2007-02-17 02:06:28 +08:00
}
2014-08-26 20:21:58 +08:00
ZEND_API void * ZEND_FASTCALL _safe_realloc ( void * ptr , size_t nmemb , size_t size , size_t offset )
2007-02-17 02:06:28 +08:00
{
2016-10-13 21:39:02 +08:00
return perealloc ( ptr , zend_safe_address_guarded ( nmemb , size , offset ) , 1 ) ;
2007-02-17 02:06:28 +08:00
}
2014-08-26 20:21:58 +08:00
ZEND_API void * ZEND_FASTCALL _ecalloc ( size_t nmemb , size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC )
2002-04-28 14:24:15 +08:00
{
void * p ;
2015-01-14 19:31:57 +08:00
2018-06-06 06:57:19 +08:00
size = zend_safe_address_guarded ( nmemb , size , 0 ) ;
2019-03-01 19:11:37 +08:00
p = _emalloc ( size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
2018-06-06 06:57:19 +08:00
memset ( p , 0 , size ) ;
2002-04-28 14:24:15 +08:00
return p ;
}
2014-08-26 20:21:58 +08:00
ZEND_API char * ZEND_FASTCALL _estrdup ( const char * s ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC )
1999-04-08 02:10:10 +08:00
{
2014-08-26 01:24:55 +08:00
size_t length ;
1999-04-08 02:10:10 +08:00
char * p ;
2011-06-03 05:16:50 +08:00
2015-05-10 17:20:08 +08:00
length = strlen ( s ) ;
2015-08-28 17:47:21 +08:00
if ( UNEXPECTED ( length + 1 = = 0 ) ) {
2016-06-21 21:00:37 +08:00
zend_error_noreturn ( E_ERROR , " Possible integer overflow in memory allocation (1 * %zu + 1) " , length ) ;
2015-08-28 17:47:21 +08:00
}
p = ( char * ) _emalloc ( length + 1 ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
2015-05-10 17:20:08 +08:00
memcpy ( p , s , length + 1 ) ;
1999-04-08 02:10:10 +08:00
return p ;
}
2014-08-26 20:21:58 +08:00
ZEND_API char * ZEND_FASTCALL _estrndup ( const char * s , size_t length ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC )
1999-04-08 02:10:10 +08:00
{
char * p ;
2011-06-03 05:16:50 +08:00
2015-08-28 17:47:21 +08:00
if ( UNEXPECTED ( length + 1 = = 0 ) ) {
2016-06-21 21:00:37 +08:00
zend_error_noreturn ( E_ERROR , " Possible integer overflow in memory allocation (1 * %zu + 1) " , length ) ;
2015-08-28 17:47:21 +08:00
}
p = ( char * ) _emalloc ( length + 1 ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC ) ;
2001-04-28 23:59:39 +08:00
memcpy ( p , s , length ) ;
p [ length ] = 0 ;
1999-04-08 02:10:10 +08:00
return p ;
}
2014-08-26 20:21:58 +08:00
ZEND_API char * ZEND_FASTCALL zend_strndup ( const char * s , size_t length )
1999-04-08 02:10:10 +08:00
{
char * p ;
2011-06-03 05:16:50 +08:00
2015-08-28 17:47:21 +08:00
if ( UNEXPECTED ( length + 1 = = 0 ) ) {
2016-06-21 21:00:37 +08:00
zend_error_noreturn ( E_ERROR , " Possible integer overflow in memory allocation (1 * %zu + 1) " , length ) ;
2015-08-28 17:47:21 +08:00
}
p = ( char * ) malloc ( length + 1 ) ;
2007-03-20 14:46:48 +08:00
if ( UNEXPECTED ( p = = NULL ) ) {
return p ;
1999-04-08 02:10:10 +08:00
}
2015-08-28 17:39:18 +08:00
if ( EXPECTED ( length ) ) {
2001-04-28 23:59:39 +08:00
memcpy ( p , s , length ) ;
1999-04-08 02:10:10 +08:00
}
2001-04-28 23:59:39 +08:00
p [ length ] = 0 ;
1999-04-08 02:10:10 +08:00
return p ;
}
2014-12-14 06:06:14 +08:00
ZEND_API int zend_set_memory_limit ( size_t memory_limit )
1999-04-10 22:44:35 +08:00
{
2014-08-26 20:21:58 +08:00
# if ZEND_MM_LIMIT
AG ( mm_heap ) - > limit = ( memory_limit > = ZEND_MM_CHUNK_SIZE ) ? memory_limit : ZEND_MM_CHUNK_SIZE ;
# endif
1999-04-10 22:44:35 +08:00
return SUCCESS ;
}
2014-12-14 06:06:14 +08:00
ZEND_API size_t zend_memory_usage ( int real_usage )
1999-04-08 02:10:10 +08:00
{
2014-08-26 20:21:58 +08:00
# if ZEND_MM_STAT
2006-07-25 21:40:05 +08:00
if ( real_usage ) {
return AG ( mm_heap ) - > real_size ;
} else {
2009-05-31 00:42:13 +08:00
size_t usage = AG ( mm_heap ) - > size ;
return usage ;
2006-07-25 21:40:05 +08:00
}
2014-08-26 20:21:58 +08:00
# endif
return 0 ;
1999-04-08 02:10:10 +08:00
}
2014-12-14 06:06:14 +08:00
ZEND_API size_t zend_memory_peak_usage ( int real_usage )
1999-04-08 02:10:10 +08:00
{
2014-08-26 20:21:58 +08:00
# if ZEND_MM_STAT
2006-07-25 21:40:05 +08:00
if ( real_usage ) {
return AG ( mm_heap ) - > real_peak ;
} else {
return AG ( mm_heap ) - > peak ;
}
2014-08-26 20:21:58 +08:00
# endif
return 0 ;
2006-10-12 14:46:51 +08:00
}
1999-04-08 02:10:10 +08:00
2014-12-14 06:06:14 +08:00
ZEND_API void shutdown_memory_manager ( int silent , int full_shutdown )
2006-07-18 17:06:33 +08:00
{
2014-12-14 06:06:14 +08:00
zend_mm_shutdown ( AG ( mm_heap ) , full_shutdown , silent ) ;
2006-07-18 17:06:33 +08:00
}
2002-06-26 19:07:35 +08:00
2019-06-27 16:30:45 +08:00
# if ZEND_MM_CUSTOM
static void * tracked_malloc ( size_t size )
{
void * ptr = __zend_malloc ( size ) ;
zend_ulong h = ( ( uintptr_t ) ptr ) > > ZEND_MM_ALIGNMENT_LOG2 ;
ZEND_ASSERT ( ( void * ) ( uintptr_t ) ( h < < ZEND_MM_ALIGNMENT_LOG2 ) = = ptr ) ;
zend_hash_index_add_empty_element ( AG ( mm_heap ) - > tracked_allocs , h ) ;
return ptr ;
}
static void tracked_free ( void * ptr ) {
zend_ulong h = ( ( uintptr_t ) ptr ) > > ZEND_MM_ALIGNMENT_LOG2 ;
zend_hash_index_del ( AG ( mm_heap ) - > tracked_allocs , h ) ;
free ( ptr ) ;
}
static void * tracked_realloc ( void * ptr , size_t new_size ) {
zend_ulong h = ( ( uintptr_t ) ptr ) > > ZEND_MM_ALIGNMENT_LOG2 ;
zend_hash_index_del ( AG ( mm_heap ) - > tracked_allocs , h ) ;
ptr = __zend_realloc ( ptr , new_size ) ;
h = ( ( uintptr_t ) ptr ) > > ZEND_MM_ALIGNMENT_LOG2 ;
ZEND_ASSERT ( ( void * ) ( uintptr_t ) ( h < < ZEND_MM_ALIGNMENT_LOG2 ) = = ptr ) ;
zend_hash_index_add_empty_element ( AG ( mm_heap ) - > tracked_allocs , h ) ;
return ptr ;
}
static void tracked_free_all ( ) {
HashTable * tracked_allocs = AG ( mm_heap ) - > tracked_allocs ;
zend_ulong h ;
ZEND_HASH_FOREACH_NUM_KEY ( tracked_allocs , h ) {
void * ptr = ( void * ) ( uintptr_t ) ( h < < ZEND_MM_ALIGNMENT_LOG2 ) ;
free ( ptr ) ;
} ZEND_HASH_FOREACH_END ( ) ;
}
# endif
2014-12-14 06:06:14 +08:00
static void alloc_globals_ctor ( zend_alloc_globals * alloc_globals )
2006-07-18 17:06:33 +08:00
{
2018-01-15 22:37:15 +08:00
char * tmp ;
2007-03-20 14:46:48 +08:00
2018-01-15 22:37:15 +08:00
# if ZEND_MM_CUSTOM
tmp = getenv ( " USE_ZEND_ALLOC " ) ;
2010-09-08 15:52:49 +08:00
if ( tmp & & ! zend_atoi ( tmp , 0 ) ) {
2019-06-27 16:30:45 +08:00
zend_bool tracked = ( tmp = getenv ( " USE_TRACKED_ALLOC " ) ) & & zend_atoi ( tmp , 0 ) ;
zend_mm_heap * mm_heap = alloc_globals - > mm_heap = malloc ( sizeof ( zend_mm_heap ) ) ;
memset ( mm_heap , 0 , sizeof ( zend_mm_heap ) ) ;
mm_heap - > use_custom_heap = ZEND_MM_CUSTOM_HEAP_STD ;
if ( ! tracked ) {
/* Use system allocator. */
mm_heap - > custom_heap . std . _malloc = __zend_malloc ;
mm_heap - > custom_heap . std . _free = free ;
mm_heap - > custom_heap . std . _realloc = __zend_realloc ;
} else {
/* Use system allocator and track allocations for auto-free. */
mm_heap - > custom_heap . std . _malloc = tracked_malloc ;
mm_heap - > custom_heap . std . _free = tracked_free ;
mm_heap - > custom_heap . std . _realloc = tracked_realloc ;
mm_heap - > tracked_allocs = malloc ( sizeof ( HashTable ) ) ;
zend_hash_init ( mm_heap - > tracked_allocs , 1024 , NULL , NULL , 1 ) ;
}
2014-08-26 20:21:58 +08:00
return ;
1999-04-08 02:10:10 +08:00
}
2016-03-18 03:43:42 +08:00
# endif
2019-02-18 21:04:15 +08:00
2016-03-18 03:43:42 +08:00
tmp = getenv ( " USE_ZEND_ALLOC_HUGE_PAGES " ) ;
2016-03-22 23:04:53 +08:00
if ( tmp & & zend_atoi ( tmp , 0 ) ) {
zend_mm_use_huge_pages = 1 ;
2016-03-18 03:43:42 +08:00
}
2014-08-26 20:21:58 +08:00
alloc_globals - > mm_heap = zend_mm_init ( ) ;
2006-07-18 17:06:33 +08:00
}
2001-08-03 15:06:05 +08:00
2006-07-18 17:06:33 +08:00
# ifdef ZTS
2014-12-14 06:06:14 +08:00
static void alloc_globals_dtor ( zend_alloc_globals * alloc_globals )
2006-07-18 17:06:33 +08:00
{
2014-12-16 15:26:34 +08:00
zend_mm_shutdown ( alloc_globals - > mm_heap , 1 , 1 ) ;
2006-07-18 17:06:33 +08:00
}
2002-06-24 15:22:25 +08:00
# endif
2002-06-25 02:49:13 +08:00
2014-12-14 06:06:14 +08:00
ZEND_API void start_memory_manager ( void )
2006-07-18 17:06:33 +08:00
{
# ifdef ZTS
2019-03-14 08:01:01 +08:00
ts_allocate_fast_id ( & alloc_globals_id , & alloc_globals_offset , sizeof ( zend_alloc_globals ) , ( ts_allocate_ctor ) alloc_globals_ctor , ( ts_allocate_dtor ) alloc_globals_dtor ) ;
2006-07-18 17:06:33 +08:00
# else
alloc_globals_ctor ( & alloc_globals ) ;
1999-12-28 00:42:59 +08:00
# endif
2014-12-02 21:17:26 +08:00
# ifndef _WIN32
# if defined(_SC_PAGESIZE)
REAL_PAGE_SIZE = sysconf ( _SC_PAGESIZE ) ;
# elif defined(_SC_PAGE_SIZE)
REAL_PAGE_SIZE = sysconf ( _SC_PAGE_SIZE ) ;
# endif
# endif
1999-04-08 02:10:10 +08:00
}
2014-12-14 06:06:14 +08:00
ZEND_API zend_mm_heap * zend_mm_set_heap ( zend_mm_heap * new_heap )
1999-05-12 05:38:39 +08:00
{
2006-07-18 17:06:33 +08:00
zend_mm_heap * old_heap ;
1999-05-12 05:38:39 +08:00
2006-07-18 17:06:33 +08:00
old_heap = AG ( mm_heap ) ;
2014-08-26 20:21:58 +08:00
AG ( mm_heap ) = ( zend_mm_heap * ) new_heap ;
return ( zend_mm_heap * ) old_heap ;
2007-03-20 14:46:48 +08:00
}
1999-05-12 05:38:39 +08:00
2015-07-09 17:19:30 +08:00
ZEND_API zend_mm_heap * zend_mm_get_heap ( void )
{
return AG ( mm_heap ) ;
}
ZEND_API int zend_mm_is_custom_heap ( zend_mm_heap * new_heap )
{
# if ZEND_MM_CUSTOM
return AG ( mm_heap ) - > use_custom_heap ;
# else
return 0 ;
# endif
}
2007-11-06 15:22:13 +08:00
ZEND_API void zend_mm_set_custom_handlers ( zend_mm_heap * heap ,
2007-11-06 20:06:05 +08:00
void * ( * _malloc ) ( size_t ) ,
void ( * _free ) ( void * ) ,
void * ( * _realloc ) ( void * , size_t ) )
2007-11-06 15:22:13 +08:00
{
2014-08-26 20:21:58 +08:00
# if ZEND_MM_CUSTOM
zend_mm_heap * _heap = ( zend_mm_heap * ) heap ;
2014-07-18 16:27:31 +08:00
2018-09-18 01:20:13 +08:00
if ( ! _malloc & & ! _free & & ! _realloc ) {
_heap - > use_custom_heap = ZEND_MM_CUSTOM_HEAP_NONE ;
} else {
_heap - > use_custom_heap = ZEND_MM_CUSTOM_HEAP_STD ;
_heap - > custom_heap . std . _malloc = _malloc ;
_heap - > custom_heap . std . _free = _free ;
_heap - > custom_heap . std . _realloc = _realloc ;
}
1999-04-08 02:10:10 +08:00
# endif
2014-08-26 20:21:58 +08:00
}
1999-04-08 02:10:10 +08:00
2014-10-14 18:20:25 +08:00
ZEND_API void zend_mm_get_custom_handlers ( zend_mm_heap * heap ,
void * ( * * _malloc ) ( size_t ) ,
void ( * * _free ) ( void * ) ,
void * ( * * _realloc ) ( void * , size_t ) )
{
# if ZEND_MM_CUSTOM
zend_mm_heap * _heap = ( zend_mm_heap * ) heap ;
if ( heap - > use_custom_heap ) {
2015-08-11 21:33:47 +08:00
* _malloc = _heap - > custom_heap . std . _malloc ;
* _free = _heap - > custom_heap . std . _free ;
* _realloc = _heap - > custom_heap . std . _realloc ;
2014-10-14 18:20:25 +08:00
} else {
* _malloc = NULL ;
* _free = NULL ;
* _realloc = NULL ;
}
# else
* _malloc = NULL ;
* _free = NULL ;
* _realloc = NULL ;
# endif
}
2015-08-11 21:33:47 +08:00
# if ZEND_DEBUG
ZEND_API void zend_mm_set_custom_debug_handlers ( zend_mm_heap * heap ,
void * ( * _malloc ) ( size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC ) ,
void ( * _free ) ( void * ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC ) ,
void * ( * _realloc ) ( void * , size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC ) )
{
# if ZEND_MM_CUSTOM
zend_mm_heap * _heap = ( zend_mm_heap * ) heap ;
_heap - > use_custom_heap = ZEND_MM_CUSTOM_HEAP_DEBUG ;
_heap - > custom_heap . debug . _malloc = _malloc ;
_heap - > custom_heap . debug . _free = _free ;
_heap - > custom_heap . debug . _realloc = _realloc ;
# endif
}
# endif
2014-10-14 13:41:16 +08:00
ZEND_API zend_mm_storage * zend_mm_get_storage ( zend_mm_heap * heap )
{
2015-05-14 19:56:13 +08:00
# if ZEND_MM_STORAGE
2014-10-14 13:41:16 +08:00
return heap - > storage ;
# else
return NULL
# endif
}
ZEND_API zend_mm_heap * zend_mm_startup ( void )
{
return zend_mm_init ( ) ;
}
2015-05-14 21:47:22 +08:00
ZEND_API zend_mm_heap * zend_mm_startup_ex ( const zend_mm_handlers * handlers , void * data , size_t data_size )
2014-10-14 13:41:16 +08:00
{
# if ZEND_MM_STORAGE
2015-05-14 19:56:13 +08:00
zend_mm_storage tmp_storage , * storage ;
zend_mm_chunk * chunk ;
2014-10-14 13:41:16 +08:00
zend_mm_heap * heap ;
2015-05-14 19:56:13 +08:00
memcpy ( ( zend_mm_handlers * ) & tmp_storage . handlers , handlers , sizeof ( zend_mm_handlers ) ) ;
tmp_storage . data = data ;
chunk = ( zend_mm_chunk * ) handlers - > chunk_alloc ( & tmp_storage , ZEND_MM_CHUNK_SIZE , ZEND_MM_CHUNK_SIZE ) ;
2014-10-14 13:41:16 +08:00
if ( UNEXPECTED ( chunk = = NULL ) ) {
# if ZEND_MM_ERROR
# ifdef _WIN32
stderr_last_error ( " Can't initialize heap " ) ;
# else
fprintf ( stderr , " \n Can't initialize heap: [%d] %s \n " , errno , strerror ( errno ) ) ;
# endif
# endif
return NULL ;
}
heap = & chunk - > heap_slot ;
chunk - > heap = heap ;
chunk - > next = chunk ;
chunk - > prev = chunk ;
chunk - > free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE ;
chunk - > free_tail = ZEND_MM_FIRST_PAGE ;
chunk - > num = 0 ;
chunk - > free_map [ 0 ] = ( Z_L ( 1 ) < < ZEND_MM_FIRST_PAGE ) - 1 ;
chunk - > map [ 0 ] = ZEND_MM_LRUN ( ZEND_MM_FIRST_PAGE ) ;
heap - > main_chunk = chunk ;
heap - > cached_chunks = NULL ;
heap - > chunks_count = 1 ;
heap - > peak_chunks_count = 1 ;
heap - > cached_chunks_count = 0 ;
heap - > avg_chunks_count = 1.0 ;
2017-10-13 18:56:06 +08:00
heap - > last_chunks_delete_boundary = 0 ;
heap - > last_chunks_delete_count = 0 ;
2014-10-14 13:41:16 +08:00
# if ZEND_MM_STAT || ZEND_MM_LIMIT
heap - > real_size = ZEND_MM_CHUNK_SIZE ;
# endif
# if ZEND_MM_STAT
heap - > real_peak = ZEND_MM_CHUNK_SIZE ;
heap - > size = 0 ;
heap - > peak = 0 ;
# endif
# if ZEND_MM_LIMIT
heap - > limit = ( Z_L ( - 1 ) > > Z_L ( 1 ) ) ;
heap - > overflow = 0 ;
# endif
# if ZEND_MM_CUSTOM
heap - > use_custom_heap = 0 ;
# endif
2015-05-14 19:56:13 +08:00
heap - > storage = & tmp_storage ;
2014-10-14 13:41:16 +08:00
heap - > huge_list = NULL ;
2015-08-22 06:36:31 +08:00
memset ( heap - > free_slot , 0 , sizeof ( heap - > free_slot ) ) ;
2015-05-14 19:56:13 +08:00
storage = _zend_mm_alloc ( heap , sizeof ( zend_mm_storage ) + data_size ZEND_FILE_LINE_CC ZEND_FILE_LINE_CC ) ;
if ( ! storage ) {
handlers - > chunk_free ( & tmp_storage , chunk , ZEND_MM_CHUNK_SIZE ) ;
# if ZEND_MM_ERROR
# ifdef _WIN32
stderr_last_error ( " Can't initialize heap " ) ;
# else
fprintf ( stderr , " \n Can't initialize heap: [%d] %s \n " , errno , strerror ( errno ) ) ;
# endif
# endif
return NULL ;
}
memcpy ( storage , & tmp_storage , sizeof ( zend_mm_storage ) ) ;
if ( data ) {
storage - > data = ( void * ) ( ( ( char * ) storage + sizeof ( zend_mm_storage ) ) ) ;
memcpy ( storage - > data , data , data_size ) ;
}
heap - > storage = storage ;
2014-10-14 13:41:16 +08:00
return heap ;
# else
return NULL ;
# endif
}
2015-09-09 18:42:35 +08:00
static ZEND_COLD ZEND_NORETURN void zend_out_of_memory ( void )
{
fprintf ( stderr , " Out of memory \n " ) ;
exit ( 1 ) ;
}
ZEND_API void * __zend_malloc ( size_t len )
{
void * tmp = malloc ( len ) ;
2017-03-10 03:47:06 +08:00
if ( EXPECTED ( tmp | | ! len ) ) {
2015-09-09 18:42:35 +08:00
return tmp ;
}
zend_out_of_memory ( ) ;
}
ZEND_API void * __zend_calloc ( size_t nmemb , size_t len )
{
2018-06-06 06:57:19 +08:00
void * tmp ;
len = zend_safe_address_guarded ( nmemb , len , 0 ) ;
tmp = __zend_malloc ( len ) ;
memset ( tmp , 0 , len ) ;
2015-09-09 18:42:35 +08:00
return tmp ;
}
ZEND_API void * __zend_realloc ( void * p , size_t len )
{
p = realloc ( p , len ) ;
2017-03-10 03:47:06 +08:00
if ( EXPECTED ( p | | ! len ) ) {
2015-09-09 18:42:35 +08:00
return p ;
}
zend_out_of_memory ( ) ;
}
2019-03-14 08:01:01 +08:00
# ifdef ZTS
size_t zend_mm_globals_size ( void )
{
return sizeof ( zend_alloc_globals ) ;
}
# endif