Patch title: Release 85 bulk changes
Abstract:
File: /pliant/language/memory/alloc.c
Key:
    Removed line
    Added line
// This program is free software; you can redistribute it and/or
// modify it under the terms of the GNU General Public License
// as published by the Free Software Foundation; either version 2
// of the License, or (at your option) any later version.
// 
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU General Public License for more details.
// 
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.

/*
abstract
  [This program is based on the malloc/free/realloc functions written by Doug Lea and has been modifyed by Hubert Tonneau]
doc
  [Doug Lea wrote ] ; link "a very fine paper" "http://g.oswego.edu/dl/html/malloc.html" ; [ describing his algorithm which is the obvious way to start with before digging in the code.] ; eol
  [The change I introduced, except rewriting everything in Pliant C very special style, and changing a few flags in order to allow a more compact encoding of Pliant object headers, is that pages in the middle of large free memory blocks will be unmapped.] ; eol
  [I must also have changed a few other details because my version is much slower :-(] ; eol
*/


/*****************************************************************************/


#define MEMORY_LOCK_VAR
#define MEMORY_LOCK
#define MEMORY_UNLOCK_VAR
#define MEMORY_UNLOCK


INLINE Int required_size(Int size0) {
  return maximum( round_up(size0+sizeof(Int),align_size) , sizeof(struct MemoryChunk) ); }


static Int index_for_size(Int size) {
  Int shifted;
  check(size>=16);
  shifted=size>>9;
  if(shifted==0) return (size>>3)-1;
  eif(shifted<=4) return 55+(size>>6);
  eif(shifted<=20) return 90+(size>>9);
  eif(shifted<=84) return 109+(size>>12);
  eif(shifted<=340) return 118+(size>>15);
  eif(shifted<=1364) return 123+(size>>19);
  else return 125; }


INLINE Int index_for_small_size(Int size) {
  return (size>>3)-1; }


INLINE struct MemoryChunk *bin_chunk(struct MemoryPool *pool,Int index) {
  return struct_from_field(MemoryChunk,next,pool->bins+index); }


/*****************************************************************************/


INLINE Int Chunk_size(struct MemoryChunk *chunk) {
  check(chunk->size>=0);
  return chunk->size&~(align_size-1); }

INLINE struct MemoryChunk *Chunk_translate(struct MemoryChunk *chunk,Int offset) {
  return (struct MemoryChunk *)address_translate(chunk,offset); }

INLINE struct MemoryChunk *Chunk_next(struct MemoryChunk *chunk) {
  return Chunk_translate(chunk,Chunk_size(chunk)); }

INLINE Address Chunk_user_area(struct MemoryChunk *chunk) {
  return (Address)&chunk->next; }

INLINE Bool Chunk_set_inuse(struct MemoryChunk *chunk) {
  return Chunk_next(chunk)->size|=previous_chunk_is_inuse; }

INLINE Bool Chunk_clear_inuse(struct MemoryChunk *chunk) {
  return Chunk_next(chunk)->size&=~previous_chunk_is_inuse; }

INLINE Bool Chunk_is_inuse(struct MemoryChunk *chunk) {
  return Chunk_next(chunk)->size&previous_chunk_is_inuse; }

INLINE Bool Chunk_is_stop(struct MemoryChunk *chunk) {
  return Chunk_size(chunk)==0; }

INLINE Bool Chunk_is_committed(struct MemoryChunk *chunk) {
  return !(chunk->size&chunk_is_decomitted); }

INLINE struct MemoryChunk *Chunk_first(Address adr) {
  return struct_from_field(MemoryChunk,size,address_translate(adr,align_size-sizeof(Int))); }


/*****************************************************************************/


#ifdef _CHECK_

  static Void Chunk_check(struct MemoryChunk *chunk) {
    struct MemoryChunk *next;
    check(integer_from_address(&chunk->size)>=integer_from_address(G.memory_minimal_address) && integer_from_address(&chunk->size)<=integer_from_address(G.memory_maximal_address));
    next=Chunk_next(chunk);
    check(integer_from_address(&next->size)>=integer_from_address(G.memory_minimal_address) && integer_from_address(&next->size)<=integer_from_address(G.memory_maximal_address)); }


  static void Chunk_check_free(struct MemoryChunk *chunk) {
    struct MemoryChunk *next;
    Chunk_check(chunk);
    check(!Chunk_is_inuse(chunk));
    check(chunk->next->previous==chunk);
    check(chunk->previous->next==chunk);
    check(chunk->size&previous_chunk_is_inuse);
    next=Chunk_next(chunk);
    if(!Chunk_is_stop(next))
      check(Chunk_is_inuse(next));
    check(next->prev_size==Chunk_size(chunk)); }


  static Void Chunk_check_inuse(struct MemoryChunk *chunk) {
    struct MemoryChunk *previous,*next;
    Chunk_check(chunk);
    check(Chunk_is_inuse(chunk));
    if(!(chunk->size&previous_chunk_is_inuse)) {
      previous=Chunk_translate(chunk,-chunk->prev_size);
      Chunk_check_free(previous); }
    next=Chunk_next(chunk);
    if(!Chunk_is_stop(next)) {
      if(!Chunk_is_inuse(next))
        Chunk_check_free(next);
      else
        Chunk_check(next); } }


  static Void Chunk_check_allocated(struct MemoryChunk *chunk,Int size) {
    Int wasted=Chunk_size(chunk)-size;
    check(size>=sizeof(struct MemoryChunk));
    check(wasted>=0 && wasted<sizeof(struct MemoryChunk));
    Chunk_check_inuse(chunk); }

#else

  #define Chunk_check(chunk)
  #define Chunk_check_free(chunk)
  #define Chunk_check_inuse(chunk)
  #define Chunk_check_allocated(chunk,size)

#endif


/*****************************************************************************/


#ifdef _MEMORY_DECOMMIT_

  static Void Chunk_decommit(struct MemoryChunk *chunk) {
    Address start; Int size;
    struct MemoryChunk2 *c;
    #ifdef _CHECK_
      if(G.execution_phase>=phase_free) return;
    #endif
    c=(struct MemoryChunk2 *)chunk;
    start=address_from_integer(round_up(integer_from_address(c+1),memory_page_size));
    size=round_down(integer_from_address(Chunk_next(chunk)),memory_page_size)-integer_from_address(start);
    if(size<=0) return;
    if(!Chunk_is_committed(chunk)) {
      if(integer_from_address(start)<integer_from_address(c->decomitted_address))
        memory_page_decommit(start,integer_from_address(c->decomitted_address)-integer_from_address(start));
      if(integer_from_address(address_translate(start,size))>integer_from_address(address_translate(c->decomitted_address,c->decomitted_size)))
        memory_page_decommit(address_translate(c->decomitted_address,c->decomitted_size),integer_from_address(address_translate(start,size))-integer_from_address(address_translate(c->decomitted_address,c->decomitted_size)));
    other
      memory_page_decommit(start,size);
      chunk->size|=chunk_is_decomitted; }
    c->decomitted_address=start;
    c->decomitted_size=size; }


  static Void Chunk_commit(struct MemoryChunk *chunk) {
    struct MemoryChunk2 *c;
    check(!Chunk_is_committed(chunk));
    c=(struct MemoryChunk2 *)chunk;
    memory_page_commit(c->decomitted_address,c->decomitted_size);
    chunk->size&=~chunk_is_decomitted; }

#endif

/*****************************************************************************/


static Void Chunk_insert_before(struct MemoryChunk *chunk,struct MemoryChunk *before) {
  struct MemoryChunk *after;
  after=before->previous;
  chunk->next=before;
  chunk->previous=after;
  after->next=chunk;
  before->previous=chunk; }


static Void Chunk_insert_after(struct MemoryChunk *chunk,struct MemoryChunk *after) {
  struct MemoryChunk *before;
  before=after->next;
  chunk->next=before;
  chunk->previous=after;
  before->previous=chunk;
  after->next=chunk; }


static Void Chunk_remove(struct MemoryChunk *chunk) {
  struct MemoryChunk *next,*previous;
  check(chunk->next->previous==chunk);
  check(chunk->previous->next==chunk);
  next=chunk->next,previous=chunk->previous;
  next->previous=previous,previous->next=next; }


#ifdef _MEMORY_DECOMMIT_
  static Void Chunk_remove_and_commit(struct MemoryChunk *chunk) {
    Chunk_remove(chunk);
    if(!Chunk_is_committed(chunk))
      Chunk_commit(chunk); }
#else
  #define Chunk_remove_and_commit Chunk_remove
#endif

static Void Chunk_insert(struct MemoryPool *pool,struct MemoryChunk *chunk,Int size) {
  struct MemoryChunk *next;
  Int index; struct MemoryChunk *binchunk,*before;
  check(size==Chunk_size(chunk));
  #ifdef _MEMORY_DECOMMIT_
    if(size>=G.memory_decommit_threshold)
      Chunk_decommit(chunk);
  #endif
  next=Chunk_next(chunk);
  if(Chunk_is_stop(next)) {
    index=127;
  orif(size<sort_threshold)
    index=index_for_small_size(size);
    // small chunks are not sorted
    Chunk_insert_after(chunk,bin_chunk(pool,index));
    bit_set(pool->noempty_bins,index);
    return;
  other
    index=index_for_size(size); }
  binchunk=bin_chunk(pool,index);
  for(before=binchunk->next; before!=binchunk && size<Chunk_size(before); before=before->next);
  // smallest chunks are stored at the end of the bin
  Chunk_insert_before(chunk,before);
  bit_set(pool->noempty_bins,index); }


static Void remainder_clear(struct MemoryPool *pool) {
  struct MemoryChunk *binchunk;
  binchunk=bin_chunk(pool,0);
  binchunk->next=binchunk->previous=binchunk; }


static Void remainder_set(struct MemoryPool *pool,struct MemoryChunk *chunk) {
  Int size; struct MemoryChunk *next,*binchunk;
  size=Chunk_size(chunk);
  next=Chunk_translate(chunk,size);
  if(Chunk_is_stop(next)) {
    remainder_clear(pool);
    Chunk_insert(pool,chunk,size);
  other
    binchunk=bin_chunk(pool,0);
    binchunk->next=binchunk->previous=chunk;
    chunk->next=chunk->previous=binchunk; } }


/*****************************************************************************/


FUNCTION Address MemoryPool_build(struct MemoryPool *pool) {
  Int i; struct MemoryChunk *chunk;
  memory_clear(pool,sizeof(struct MemoryPool)); 
  for(i=0; i<nb_bins; i++) {
    chunk=bin_chunk(pool,i);
    chunk->next=chunk->previous=chunk; } }

FUNCTION Address MemoryPool_destroy(struct MemoryPool *pool) {
  Int i;
  for(i=pool->nb_area-1; i>=0; i--) {
    memory_page_decommit(pool->areas[i].address,pool->areas[i].consumed);
    memory_page_release(pool->areas[i].address,pool->areas[i].size); } }


FUNCTION Void default_pre_extend(Int size) {}
FUNCTION Void default_post_extend() {}

FUNCTION Address memory_allocate(Int asked,Address with) {
  return G.memory_allocate_hook(asked,with); }

FUNCTION Address MemoryPool_allocate(struct MemoryPool *pool,Int asked,Address with) {
  Int size,index; struct MemoryChunk *binchunk;
  struct MemoryChunk *victim; Int victim_size;
  struct MemoryChunk *remainder; Int remainder_size;
  Address address; Bool newsection; Int reserve;
  MEMORY_LOCK_VAR MEMORY_UNLOCK_VAR
  check(asked>=0);
  if(asked==0) return null;
  size=required_size(asked);
  MEMORY_LOCK
  if(size<sort_threshold-align_size) {
    index=index_for_small_size(size);
    binchunk=bin_chunk(pool,index);
    victim=binchunk->previous;
    if(victim==binchunk) {
      index++;
      binchunk=bin_chunk(pool,index);
      victim=binchunk->previous; }
    if(victim!=binchunk) {
      victim_size=Chunk_size(victim);
      Chunk_remove(victim);
      Chunk_set_inuse(victim);
      Chunk_check_allocated(victim,size);
      G.memory_current_used+=Chunk_size(victim);
      MEMORY_UNLOCK
      #ifdef _CHECK_
        memory_random(Chunk_user_area(victim),memory_size(Chunk_user_area(victim)));
      #endif
      return Chunk_user_area(victim); }
    index++;
  other
    index=index_for_size(size);
    binchunk=bin_chunk(pool,index);
    for(victim=binchunk->previous; victim!=binchunk; victim=victim->previous) {
      victim_size=Chunk_size(victim);
      remainder_size=victim_size-size;
      if(remainder_size>=sizeof(struct MemoryChunk)) {
        index--;
        break;
      orif(remainder_size>=0)
        Chunk_remove_and_commit(victim);
        Chunk_set_inuse(victim);
        Chunk_check_allocated(victim,size);
        G.memory_current_used+=Chunk_size(victim);
        MEMORY_UNLOCK
        #ifdef _CHECK_
          memory_random(Chunk_user_area(victim),memory_size(Chunk_user_area(victim)));
        #endif
        return Chunk_user_area(victim); } }
    index++; }
  binchunk=bin_chunk(pool,0);
  victim=binchunk->previous;
  if(victim!=binchunk) {
    victim_size=Chunk_size(victim);
    remainder_size=victim_size-size;
    if(remainder_size>=sizeof(struct MemoryChunk)) {
      victim->size=size|previous_chunk_is_inuse;
      remainder=Chunk_translate(victim,size);
      remainder->size=remainder_size|previous_chunk_is_inuse;
      Chunk_translate(remainder,remainder_size)->prev_size=remainder_size;
      remainder_set(pool,remainder);
      Chunk_check_allocated(victim,size);
      G.memory_current_used+=Chunk_size(victim);
      MEMORY_UNLOCK
      #ifdef _CHECK_
        memory_random(Chunk_user_area(victim),memory_size(Chunk_user_area(victim)));
      #endif
      return Chunk_user_area(victim);
    orif(remainder_size>=0)
      Chunk_set_inuse(victim);
      remainder_clear(pool);
      Chunk_check_allocated(victim,size);
      G.memory_current_used+=Chunk_size(victim);
      MEMORY_UNLOCK
      #ifdef _CHECK_
        memory_random(Chunk_user_area(victim),memory_size(Chunk_user_area(victim)));
      #endif
      return Chunk_user_area(victim);
    other
      remainder_clear(pool);
      Chunk_insert(pool,victim,victim_size); } }
  while(index<nb_bins)
    if(*(uInt32 *)(pool->noempty_bins+index/8)>>(index%8)==0) {
      index=round_up(index+1,32);
    orif(*(uInt8 *)(pool->noempty_bins+index/8)>>(index%8)==0)
      index=round_up(index+1,8);
    orif(!bit_test(pool->noempty_bins,index))
      index++;
    other
      binchunk=bin_chunk(pool,index);
      for(victim=binchunk->previous; victim!=binchunk; victim=victim->previous) {
        victim_size=Chunk_size(victim);
        remainder_size=victim_size-size;
        if(remainder_size>=sizeof(struct MemoryChunk)) {
          Chunk_remove_and_commit(victim);
          victim->size=size|previous_chunk_is_inuse;
          remainder=Chunk_translate(victim,size);
          remainder->size=remainder_size|previous_chunk_is_inuse;
          Chunk_translate(remainder,remainder_size)->prev_size=remainder_size;
          if(with!=(Address)-1) {
            remainder_set(pool,remainder);
          other
            Chunk_insert(pool,remainder,remainder_size); }
          Chunk_check_allocated(victim,size);
          G.memory_current_used+=Chunk_size(victim);
          MEMORY_UNLOCK
          #ifdef _CHECK_
            memory_random(Chunk_user_area(victim),memory_size(Chunk_user_area(victim)));
          #endif
          return Chunk_user_area(victim);
        orif(remainder_size>=0)
          Chunk_remove_and_commit(victim);
          Chunk_set_inuse(victim);
          Chunk_check_allocated(victim,size);
          G.memory_current_used+=Chunk_size(victim);
          MEMORY_UNLOCK
          #ifdef _CHECK_
            memory_random(Chunk_user_area(victim),memory_size(Chunk_user_area(victim)));
          #endif
          return Chunk_user_area(victim); } }
      if(binchunk->previous==binchunk)
        bit_clear(pool->noempty_bins,index);
      index++; }
  G.memory_pre_extend_hook(size);
  size=round_up(size+sizeof(Int),commit_step);
  newsection=false;
  if(pool->nb_area==0 || pool->areas[pool->nb_area-1].size-pool->areas[pool->nb_area-1].consumed<size) {
    reserve=maximum(size,G.memory_maximal_address==null ? first_reserve_step : reserve_step);
    pool->areas[pool->nb_area].address=memory_page_reserve(null,reserve);
    pool->areas[pool->nb_area].size=reserve;
    pool->areas[pool->nb_area++].consumed=0;
    newsection=true; }
  address=address_translate(pool->areas[pool->nb_area-1].address,pool->areas[pool->nb_area-1].consumed);
  pool->areas[pool->nb_area-1].consumed+=size;
  memory_page_commit(address,size);
  if(integer_from_address(address)<integer_from_address(G.memory_minimal_address)) G.memory_minimal_address=address;
  if(integer_from_address(address_translate(address,size-1))>integer_from_address(G.memory_maximal_address)) G.memory_maximal_address=address_translate(address,size-1);
  if(newsection) {
    victim=Chunk_first(address);
    victim_size=size-align_size;
    victim->size=victim_size|previous_chunk_is_inuse;
    remainder=Chunk_translate(victim,victim_size);
    remainder->prev_size=victim_size;
    remainder->size=0;
    Chunk_insert(pool,victim,victim_size);
    Chunk_check_free(victim);
    MEMORY_UNLOCK
  other
    victim=struct_from_field(MemoryChunk,size,address_translate(address,-sizeof(Int)));
    victim_size=size;
    victim->size=victim_size|(victim->size&previous_chunk_is_inuse);
    remainder=Chunk_translate(victim,victim_size);
    remainder->size=previous_chunk_is_inuse;
    Chunk_check_inuse(victim);
    G.memory_current_used+=victim_size;
    MEMORY_UNLOCK
    MemoryPool_free(pool,Chunk_user_area(victim)); }
  return MemoryPool_allocate(pool,asked,with); }
  address=MemoryPool_allocate(pool,asked,with);
  G.memory_post_extend_hook();
  return address; }

FUNCTION Address default_memory_allocate(Int asked,Address with) {
  return MemoryPool_allocate(&G.memory_pool,asked,with); }


FUNCTION Address memory_zallocate(Int asked,Address with) {
  return G.memory_zallocate_hook(asked,with); }

FUNCTION Address MemoryPool_zallocate(struct MemoryPool *pool,Int asked,Address with) {
  Address address;
  address=MemoryPool_allocate(pool,asked,with);
  memory_clear(address,memory_size(address));
  return address; }

FUNCTION Address default_memory_zallocate(Int asked,Address with) {
  return MemoryPool_zallocate(&G.memory_pool,asked,with); }


FUNCTION Void memory_free(Address mem) {
  G.memory_free_hook(mem); }

FUNCTION Void MemoryPool_free(struct MemoryPool *pool,Address mem) {
  struct MemoryChunk *chunk,*next,*previous; Int size;
  struct MemoryChunk2 *c;
  #ifdef _MEMORY_DECOMMIT_
    Address decomitted_address; Int decomitted_size;
  #endif
  Address middle_address; Int middle_size;
  Bool is_remainder;
  MEMORY_LOCK_VAR MEMORY_UNLOCK_VAR
  if(mem==null) return;
  MEMORY_LOCK
  chunk=struct_from_field(MemoryChunk,next,mem);
  Chunk_check_inuse(chunk);
  #ifdef _CHECK_
    memory_random(mem,memory_size(mem));
  #endif
  size=Chunk_size(chunk);
  G.memory_current_used-=size;
  #ifdef _MEMORY_DECOMMIT_
    decomitted_size=0;
  #endif
  is_remainder=false;
  next=Chunk_translate(chunk,size);
  if(!Chunk_is_stop(next) && !Chunk_is_inuse(next)) {
    #ifdef _MEMORY_DECOMMIT_
      if(!Chunk_is_committed(next)) {
        c=(struct MemoryChunk2 *)next;
        decomitted_address=c->decomitted_address;
        decomitted_size=c->decomitted_size; }
    #endif
    if(bin_chunk(pool,0)->next==next)
      is_remainder=true;
    Chunk_remove(next);
    size+=Chunk_size(next); }
  if(!(chunk->size&previous_chunk_is_inuse)) {
    previous=Chunk_translate(chunk,-chunk->prev_size);
    #ifdef _MEMORY_DECOMMIT_
      if(!Chunk_is_committed(previous)) {
        c=(struct MemoryChunk2 *)previous;
        if(decomitted_size==0) {
          decomitted_address=c->decomitted_address;
          decomitted_size=c->decomitted_size;
        other
          middle_address=address_translate(c->decomitted_address,c->decomitted_size);
          middle_size=integer_from_address(decomitted_address)-integer_from_address(middle_address);
          memory_page_decommit(middle_address,middle_size);
          decomitted_address=c->decomitted_address;
          decomitted_size+=c->decomitted_size+middle_size; } }
    #endif
    if(bin_chunk(pool,0)->next==previous)
      is_remainder=true;
    Chunk_remove(previous);
    size+=Chunk_size(previous);
    chunk=previous; }
  #ifdef _MEMORY_DECOMMIT_
    if(decomitted_size==0) {
      chunk->size=size|previous_chunk_is_inuse;
    other
      chunk->size=size|previous_chunk_is_inuse|chunk_is_decomitted;
      c=(struct MemoryChunk2 *)chunk;
      c->decomitted_address=decomitted_address;
      c->decomitted_size=decomitted_size; }
  #else
    chunk->size=size|previous_chunk_is_inuse;
  #endif
  next=Chunk_translate(chunk,size);
  next->prev_size=size;
  next->size&=~previous_chunk_is_inuse;
  #ifdef _MEMORY_DECOMMIT_
    if(is_remainder && decomitted_size==0)
  #else
    if(is_remainder)
  #endif
      remainder_set(pool,chunk);
  else
    Chunk_insert(pool,chunk,size);
  MEMORY_UNLOCK }

FUNCTION Void default_memory_free(Address mem) {
  MemoryPool_free(&G.memory_pool,mem); }


FUNCTION Address memory_resize(Address oldmem,Int asked,Address with) {
  return G.memory_resize_hook(oldmem,asked,with); }

FUNCTION Address MemoryPool_resize(struct MemoryPool *pool,Address oldmem,Int asked,Address with) {
  Int size;
  struct MemoryChunk *oldchunk; Int oldsize;
  struct MemoryChunk *newchunk; Int newsize;
  struct MemoryChunk *next; Int nextsize;
  struct MemoryChunk *prev; Int prevsize;
  struct MemoryChunk *remainder; Int remainder_size;
  Address newmem;
  MEMORY_LOCK_VAR MEMORY_UNLOCK_VAR
  check(asked>=0);
  if(asked==0) {
    if(oldmem!=null) MemoryPool_free(pool,oldmem);
    return null;
  orif(oldmem==null)
    return MemoryPool_allocate(pool,asked,with); }
  oldchunk=newchunk=struct_from_field(MemoryChunk,next,oldmem);
  oldsize=newsize=Chunk_size(oldchunk);
  MEMORY_LOCK
  Chunk_check_inuse(oldchunk);
  size=required_size(asked);
  if(oldsize<size) {
    next=Chunk_translate(oldchunk,oldsize);
    if(!Chunk_is_stop(next) && !Chunk_is_inuse(next)) {
      nextsize=Chunk_size(next);
      if(nextsize+newsize>=size) {
        Chunk_remove_and_commit(next);
        newsize+=nextsize;
        goto split; }
    other
      next=null;
      nextsize=0; }
    if(!(oldchunk->size&previous_chunk_is_inuse)) {
      prevsize=oldchunk->prev_size;
      prev=Chunk_translate(oldchunk,-prevsize);
      if(prevsize+oldsize+nextsize>=size) {
        Chunk_remove_and_commit(prev);
        if(next!=null) Chunk_remove_and_commit(next);
        newchunk=prev;
        newsize+=prevsize+nextsize;
        newmem=Chunk_user_area(newchunk);
        memory_copy(oldmem,newmem,oldsize-sizeof(Int));
        goto split; } }
    MEMORY_UNLOCK
    newmem=MemoryPool_allocate(pool,asked,(Address)-1);
    memory_copy(oldmem,newmem,oldsize-sizeof(uInt));
    MemoryPool_free(pool,oldmem);
    return newmem; }
  split:
  if(newsize-size>=sizeof(struct MemoryChunk)) {
    remainder=Chunk_translate(newchunk,size);
    remainder_size=newsize-size;
    newchunk->size=(newchunk->size&previous_chunk_is_inuse)|size;
    remainder->size=remainder_size|previous_chunk_is_inuse;
    Chunk_set_inuse(remainder);
    G.memory_current_used+=Chunk_size(newchunk)-oldsize+remainder_size;
    Chunk_check_inuse(newchunk);
    MEMORY_UNLOCK
    MemoryPool_free(pool,Chunk_user_area(remainder));
  other
    newchunk->size=(newchunk->size&previous_chunk_is_inuse)|newsize;
    Chunk_set_inuse(newchunk);
    G.memory_current_used+=Chunk_size(newchunk)-oldsize;
    Chunk_check_inuse(newchunk);
    MEMORY_UNLOCK }
  return Chunk_user_area(newchunk); }

FUNCTION Address default_memory_resize(Address oldmem,Int asked,Address with) {
  return MemoryPool_resize(&G.memory_pool,oldmem,asked,with); }


FUNCTION Address memory_zresize(Address oldmem,Int asked,Address with) {
  return G.memory_zresize_hook(oldmem,asked,with); }

FUNCTION Address MemoryPool_zresize(struct MemoryPool *pool,Address oldmem,Int asked,Address with) {
  Address newmem;
  Int oldsize,newsize;
  oldsize=memory_size(oldmem);
  newmem=MemoryPool_resize(pool,oldmem,asked,with);
  newsize=memory_size(newmem);
  if(newsize>oldsize)
    memory_clear(address_translate(newmem,oldsize),newsize-oldsize);
  return newmem; }

FUNCTION Address default_memory_zresize(Address oldmem,Int asked,Address with) {
  return MemoryPool_zresize(&G.memory_pool,oldmem,asked,with); }


FUNCTION Int memory_size(Address mem) {
  return G.memory_size_hook(mem); }

FUNCTION Int default_memory_size(Address mem) {
  struct MemoryChunk *chunk;
  if(mem==null) return 0;
  chunk=struct_from_field(MemoryChunk,next,mem);
  Chunk_check(chunk);
  return Chunk_size(chunk)-sizeof(Int); }


FUNCTION Void memory_checkup() {
  G.memory_checkup_hook(); }

#define memory_assert(cond) if(!(cond)) { error_notify_fatalz(error_id_memory_violation,"Memory is corrupted"); return; }

FUNCTION Void MemoryPool_checkup(struct MemoryPool *pool) {
  Int i; struct MemoryArea *a; struct MemoryChunk *c,*n; Int o1,o2;
  for(i=0; i<pool->nb_area; i++) {
    a=pool->areas+i;
    for(c=Chunk_first(a->address); !Chunk_is_stop(c); c=n) {
      o1=integer_from_address(&c->size)-integer_from_address(a->address);
      memory_assert(o1>=0 && o1<a->size);
      n=Chunk_next(c);
      o2=integer_from_address(&n->size)-integer_from_address(a->address);
      memory_assert(o2>o1 && o2<a->size);
      if(!(n->size&previous_chunk_is_inuse)) {
        memory_assert(n->prev_size==integer_from_address(n)-integer_from_address(c));
        memory_assert(c->next->previous==c);
        memory_assert(c->previous->next==c); } }
    memory_assert(c==Chunk_translate(Chunk_first(a->address),a->consumed-align_size)); } }

FUNCTION Void default_memory_checkup() {
  MemoryPool_checkup(&G.memory_pool); }


FUNCTION Void memory_shrink(Bool clearalso) {
  G.memory_shrink_hook(clearalso); }

FUNCTION Void MemoryPool_shrink(struct MemoryPool *pool,Bool clearalso) {
  Int i; struct MemoryChunk *c; struct MemoryChunk2 *c2; Byte *start,*stop;
  for(i=0; i<pool->nb_area; i++)
    for(c=Chunk_first(pool->areas[i].address); !Chunk_is_stop(c); c=Chunk_next(c)) {
      if(Chunk_is_inuse(c)) continue;
      #ifdef _MEMORY_DECOMMIT_
        Chunk_decommit(c);
      #endif
      if(!clearalso) continue;
      #ifdef _MEMORY_DECOMMIT_
        if(Chunk_is_committed(c)) {
          start=(Byte *)(c+1),stop=(Byte *)Chunk_next(c);
          memory_clear(start,stop-start);
        other
          c2=(struct MemoryChunk2 *)c;
          start=(Byte *)(c2+1),stop=(Byte *)c2->decomitted_address;
          memory_clear(start,stop-start);
          start=(Byte *)address_translate(c2->decomitted_address,c2->decomitted_size),stop=(Byte *)Chunk_next(c);
          memory_clear(start,stop-start); }
      #else
        start=(Byte *)(c+1),stop=(Byte *)Chunk_next(c);
        memory_clear(start,stop-start);
      #endif
      } }

FUNCTION Void default_memory_shrink(Bool clearalso) {
  MemoryPool_shrink(&G.memory_pool,clearalso); }

#ifdef _MARK_OBJECTS_
  FUNCTION Void memory_enumerate(Void (*hook)(Address area)) {
    G.memory_enumerate_hook(hook); }

  FUNCTION Void MemoryPool_enumerate(struct MemoryPool *pool,Void (*hook)(Address area)) {
    Int i; struct MemoryChunk *c;
    MEMORY_LOCK_VAR MEMORY_UNLOCK_VAR
    MEMORY_LOCK
    for(i=0; i<pool->nb_area; i++)
      for(c=Chunk_first(pool->areas[i].address); !Chunk_is_stop(c); c=Chunk_next(c))
        if(Chunk_is_inuse(c) && (c->size&chunk_is_object))
          hook(Chunk_user_area(c));
    MEMORY_UNLOCK }

  FUNCTION Void default_memory_enumerate(Void (*hook)(Address area)) {
    MemoryPool_enumerate(&G.memory_pool,hook); }
#endif

#ifdef _CHECK_
  FUNCTION Void memory_dump() {
    struct MemoryPool *pool; Int i; struct MemoryChunk *c; Int s; struct Str temp; Char buffer[16];
    Char *ptr; Int j;
    for(pool=&G.memory_pool; pool!=null; pool=pool->next)
      for(i=0; i<pool->nb_area; i++)
        for(c=Chunk_first(pool->areas[i].address); !Chunk_is_stop(c); c=Chunk_next(c))
          if(Chunk_is_inuse(c)) {
            s=Chunk_size(c)-((Byte *)Chunk_user_area(c)-(Byte *)c);
            consolen(
              S,Str_map_area(&temp,buffer,Int_str2(s,10,buffer)),
              Z," bytes: ",END);
            ptr=(Char *)Chunk_user_area(c);
            for(j=0; j<minimum(s,60); j++)
              consolen(S,Str_map_area(&temp,ptr[j]>=32 && ptr[j]<128 ? &ptr[j] : (Char *)".",1),END);
            consolen(EOL); } }
#endif