DOSBox-X
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Defines
src/cpu/core_dynrec/cache.h
00001 /*
00002  *  Copyright (C) 2002-2020  The DOSBox Team
00003  *
00004  *  This program is free software; you can redistribute it and/or modify
00005  *  it under the terms of the GNU General Public License as published by
00006  *  the Free Software Foundation; either version 2 of the License, or
00007  *  (at your option) any later version.
00008  *
00009  *  This program is distributed in the hope that it will be useful,
00010  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
00011  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
00012  *  GNU General Public License for more details.
00013  *
00014  *  You should have received a copy of the GNU General Public License along
00015  *  with this program; if not, write to the Free Software Foundation, Inc.,
00016  *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
00017  */
00018 
00019 
00020 class CodePageHandlerDynRec;    // forward
00021 
00022 // basic cache block representation
00023 class CacheBlockDynRec {
00024 public:
00025         void Clear(void);
00026         // link this cache block to another block, index specifies the code
00027         // path (always zero for unconditional links, 0/1 for conditional ones
00028         void LinkTo(Bitu index,CacheBlockDynRec * toblock) {
00029                 assert(toblock);
00030                 link[index].to=toblock;
00031                 link[index].next=toblock->link[index].from;     // set target block
00032                 toblock->link[index].from=this;                         // remember who links me
00033         }
00034         struct {
00035                 Bit16u start,end;               // where in the page is the original code
00036                 CodePageHandlerDynRec * handler;                        // page containing this code
00037         } page;
00038         struct {
00039                 Bit8u * start;                  // where in the cache are we
00040                 Bitu size;
00041                 CacheBlockDynRec * next;
00042                 // writemap masking maskpointer/start/length
00043                 // to allow holes in the writemap
00044                 Bit8u * wmapmask;
00045                 Bit16u maskstart;
00046                 Bit16u masklen;
00047         } cache;
00048         struct {
00049                 Bitu index;
00050                 CacheBlockDynRec * next;
00051         } hash;
00052         struct {
00053                 CacheBlockDynRec * to;          // this block can transfer control to the to-block
00054                 CacheBlockDynRec * next;
00055                 CacheBlockDynRec * from;        // the from-block can transfer control to this block
00056         } link[2];      // maximum two links (conditional jumps)
00057         CacheBlockDynRec * crossblock;
00058 };
00059 
00060 static struct {
00061         struct {
00062                 CacheBlockDynRec * first;               // the first cache block in the list
00063                 CacheBlockDynRec * active;              // the current cache block
00064                 CacheBlockDynRec * free;                // pointer to the free list
00065                 CacheBlockDynRec * running;             // the last block that was entered for execution
00066         } block;
00067         Bit8u * pos;            // position in the cache block
00068         CodePageHandlerDynRec * free_pages;             // pointer to the free list
00069         CodePageHandlerDynRec * used_pages;             // pointer to the list of used pages
00070         CodePageHandlerDynRec * last_page;              // the last used page
00071 } cache;
00072 
00073 
00074 // cache memory pointers, to be malloc'd later
00075 static Bit8u * cache_code_start_ptr=NULL;
00076 static Bit8u * cache_code=NULL;
00077 static Bit8u * cache_code_link_blocks=NULL;
00078 
00079 static CacheBlockDynRec * cache_blocks=NULL;
00080 static CacheBlockDynRec link_blocks[2];         // default linking (specially marked)
00081 
00082 
00083 // the CodePageHandlerDynRec class provides access to the contained
00084 // cache blocks and intercepts writes to the code for special treatment
00085 class CodePageHandlerDynRec : public PageHandler {
00086 public:
00087     CodePageHandlerDynRec() {
00088     }
00089 
00090         void SetupAt(Bitu _phys_page,PageHandler * _old_pagehandler) {
00091                 // initialize this codepage handler
00092                 phys_page=_phys_page;
00093                 // save the old pagehandler to provide direct read access to the memory,
00094                 // and to be able to restore it later on
00095                 old_pagehandler=_old_pagehandler;
00096 
00097                 // adjust flags
00098                 flags=old_pagehandler->flags|PFLAG_HASCODE;
00099                 flags&=~PFLAG_WRITEABLE;
00100 
00101                 active_blocks=0;
00102                 active_count=16;
00103 
00104                 // initialize the maps with zero (no cache blocks as well as code present)
00105                 memset(&hash_map,0,sizeof(hash_map));
00106                 memset(&write_map,0,sizeof(write_map));
00107                 if (invalidation_map!=NULL) {
00108                         free(invalidation_map);
00109                         invalidation_map=NULL;
00110                 }
00111         }
00112 
00113         // clear out blocks that contain code which has been modified
00114         bool InvalidateRange(Bitu start,Bitu end) {
00115                 Bits index=1+(Bits)(end>>(Bitu)DYN_HASH_SHIFT);
00116                 bool is_current_block=false;    // if the current block is modified, it has to be exited as soon as possible
00117 
00118                 Bit32u ip_point=SegPhys(cs)+reg_eip;
00119                 ip_point=(Bit32u)((PAGING_GetPhysicalPage(ip_point)-(phys_page<<12))+(ip_point&0xfff));
00120                 while (index>=0) {
00121                         Bitu map=0;
00122                         // see if there is still some code in the range
00123                         for (Bitu count=start;count<=end;count++) map+=write_map[count];
00124                         if (!map) return is_current_block;      // no more code, finished
00125 
00126                         CacheBlockDynRec * block=hash_map[index];
00127                         while (block) {
00128                                 CacheBlockDynRec * nextblock=block->hash.next;
00129                                 // test if this block is in the range
00130                                 if (start<=block->page.end && end>=block->page.start) {
00131                                         if (ip_point<=block->page.end && ip_point>=block->page.start) is_current_block=true;
00132                                         block->Clear();         // clear the block, decrements the write_map accordingly
00133                                 }
00134                                 block=nextblock;
00135                         }
00136                         index--;
00137                 }
00138                 return is_current_block;
00139         }
00140 
00141         // the following functions will clean all cache blocks that are invalid now due to the write
00142         void writeb(PhysPt addr,Bit8u val){
00143                 addr&=4095;
00144                 if (host_readb(hostmem+addr)==val) return;
00145                 host_writeb(hostmem+addr,val);
00146                 // see if there's code where we are writing to
00147                 if (!host_readb(&write_map[addr])) {
00148                         if (active_blocks) return;              // still some blocks in this page
00149                         active_count--;
00150                         if (!active_count) Release();   // delay page releasing until active_count is zero
00151                         return;
00152                 } else if (!invalidation_map) {
00153             invalidation_map = (Bit8u*)malloc(4096);
00154             if (invalidation_map != NULL)
00155                 memset(invalidation_map, 0, 4096);
00156             else
00157                 E_Exit("Memory allocation failed in writeb");
00158         }
00159         if (invalidation_map != NULL)
00160             invalidation_map[addr]++;
00161                 InvalidateRange(addr,addr);
00162         }
00163         void writew(PhysPt addr,Bit16u val){
00164                 addr&=4095;
00165                 if (host_readw(hostmem+addr)==val) return;
00166                 host_writew(hostmem+addr,val);
00167                 // see if there's code where we are writing to
00168                 if (!host_readw(&write_map[addr])) {
00169                         if (active_blocks) return;              // still some blocks in this page
00170                         active_count--;
00171                         if (!active_count) Release();   // delay page releasing until active_count is zero
00172                         return;
00173                 } else if (!invalidation_map) {
00174                         invalidation_map=(Bit8u*)malloc(4096);
00175             if (invalidation_map != NULL)
00176                 memset(invalidation_map, 0, 4096);
00177             else
00178                 E_Exit("Memory allocation failed in writew");
00179                 }
00180 #if defined(WORDS_BIGENDIAN) || !defined(C_UNALIGNED_MEMORY)
00181                 host_writew(&invalidation_map[addr],
00182                         host_readw(&invalidation_map[addr])+0x101);
00183 #else
00184         if (invalidation_map != NULL)
00185             (*(Bit16u*)& invalidation_map[addr]) += 0x101;
00186 #endif
00187                 InvalidateRange(addr,addr+(Bitu)1);
00188         }
00189         void writed(PhysPt addr,Bit32u val){
00190                 addr&=4095;
00191                 if (host_readd(hostmem+addr)==val) return;
00192                 host_writed(hostmem+addr,val);
00193                 // see if there's code where we are writing to
00194                 if (!host_readd(&write_map[addr])) {
00195                         if (active_blocks) return;              // still some blocks in this page
00196                         active_count--;
00197                         if (!active_count) Release();   // delay page releasing until active_count is zero
00198                         return;
00199                 } else if (!invalidation_map) {
00200                         invalidation_map=(Bit8u*)malloc(4096);
00201             if (invalidation_map != NULL)
00202                 memset(invalidation_map, 0, 4096);
00203             else
00204                 E_Exit("Memory allocation failed in writed");
00205                 }
00206 #if defined(WORDS_BIGENDIAN) || !defined(C_UNALIGNED_MEMORY)
00207                 host_writed(&invalidation_map[addr],
00208                         host_readd(&invalidation_map[addr])+0x1010101);
00209 #else
00210         if (invalidation_map != NULL)
00211             (*(Bit32u*)& invalidation_map[addr]) += 0x1010101;
00212 #endif
00213                 InvalidateRange(addr,addr+(Bitu)3);
00214         }
00215         bool writeb_checked(PhysPt addr,Bit8u val) {
00216                 addr&=4095;
00217                 if (host_readb(hostmem+addr)==val) return false;
00218                 // see if there's code where we are writing to
00219                 if (!host_readb(&write_map[addr])) {
00220                         if (!active_blocks) {
00221                                 // no blocks left in this page, still delay the page releasing a bit
00222                                 active_count--;
00223                                 if (!active_count) Release();
00224                         }
00225                 } else {
00226             if (!invalidation_map) {
00227                 invalidation_map = (Bit8u*)malloc(4096);
00228                 if (invalidation_map != NULL) {
00229                     memset(invalidation_map, 0, 4096);
00230                 }
00231                 else
00232                     E_Exit("Memory allocation failed in writeb_checked");
00233             }
00234             if (invalidation_map != NULL)
00235                 invalidation_map[addr]++;
00236                         if (InvalidateRange(addr,addr)) {
00237                                 cpu.exception.which=SMC_CURRENT_BLOCK;
00238                                 return true;
00239                         }
00240                 }
00241                 host_writeb(hostmem+addr,val);
00242                 return false;
00243         }
00244         bool writew_checked(PhysPt addr,Bit16u val) {
00245                 addr&=4095;
00246                 if (host_readw(hostmem+addr)==val) return false;
00247                 // see if there's code where we are writing to
00248                 if (!host_readw(&write_map[addr])) {
00249                         if (!active_blocks) {
00250                                 // no blocks left in this page, still delay the page releasing a bit
00251                                 active_count--;
00252                                 if (!active_count) Release();
00253                         }
00254                 } else {
00255                         if (!invalidation_map) {
00256                                 invalidation_map=(Bit8u*)malloc(4096);
00257                 if (invalidation_map != NULL)
00258                     memset(invalidation_map, 0, 4096);
00259                 else
00260                     E_Exit("Memory allocation failed in writew_checked");
00261                         }
00262 #if defined(WORDS_BIGENDIAN) || !defined(C_UNALIGNED_MEMORY)
00263                         host_writew(&invalidation_map[addr],
00264                                 host_readw(&invalidation_map[addr])+0x101);
00265 #else
00266             if (invalidation_map != NULL)
00267                 (*(Bit16u*)& invalidation_map[addr]) += 0x101;
00268 #endif
00269                         if (InvalidateRange(addr,addr+(Bitu)1)) {
00270                                 cpu.exception.which=SMC_CURRENT_BLOCK;
00271                                 return true;
00272                         }
00273                 }
00274                 host_writew(hostmem+addr,val);
00275                 return false;
00276         }
00277         bool writed_checked(PhysPt addr,Bit32u val) {
00278                 addr&=4095;
00279                 if (host_readd(hostmem+addr)==val) return false;
00280                 // see if there's code where we are writing to
00281                 if (!host_readd(&write_map[addr])) {
00282                         if (!active_blocks) {
00283                                 // no blocks left in this page, still delay the page releasing a bit
00284                                 active_count--;
00285                                 if (!active_count) Release();
00286                         }
00287                 } else {
00288                         if (!invalidation_map) {
00289                                 invalidation_map=(Bit8u*)malloc(4096);
00290                 if (invalidation_map != NULL)
00291                     memset(invalidation_map, 0, 4096);
00292                 else
00293                     E_Exit("Memory allocation failed in writed_checked");
00294                         }
00295 #if defined(WORDS_BIGENDIAN) || !defined(C_UNALIGNED_MEMORY)
00296                         host_writed(&invalidation_map[addr],
00297                                 host_readd(&invalidation_map[addr])+0x1010101);
00298 #else
00299             if (invalidation_map != NULL)
00300                 (*(Bit32u*)& invalidation_map[addr]) += 0x1010101;
00301 #endif
00302                         if (InvalidateRange(addr,addr+(Bitu)3)) {
00303                                 cpu.exception.which=SMC_CURRENT_BLOCK;
00304                                 return true;
00305                         }
00306                 }
00307                 host_writed(hostmem+addr,val);
00308                 return false;
00309         }
00310 
00311     // add a cache block to this page and note it in the hash map
00312         void AddCacheBlock(CacheBlockDynRec * block) {
00313                 Bitu index=1u+(Bitu)(block->page.start>>(Bit16u)DYN_HASH_SHIFT);
00314                 block->hash.next=hash_map[index];       // link to old block at index from the new block
00315                 block->hash.index=index;
00316                 hash_map[index]=block;                          // put new block at hash position
00317                 block->page.handler=this;
00318                 active_blocks++;
00319         }
00320         // there's a block whose code started in a different page
00321     void AddCrossBlock(CacheBlockDynRec * block) {
00322                 block->hash.next=hash_map[0];
00323                 block->hash.index=0;
00324                 hash_map[0]=block;
00325                 block->page.handler=this;
00326                 active_blocks++;
00327         }
00328         // remove a cache block
00329         void DelCacheBlock(CacheBlockDynRec * block) {
00330                 active_blocks--;
00331                 active_count=16;
00332                 CacheBlockDynRec * * bwhere=&hash_map[block->hash.index];
00333                 while (*bwhere!=block) {
00334                         bwhere=&((*bwhere)->hash.next);
00335                         //Will crash if a block isn't found, which should never happen.
00336                 }
00337                 *bwhere=block->hash.next;
00338 
00339                 // remove the cleared block from the write map
00340                 if (GCC_UNLIKELY(block->cache.wmapmask!=NULL)) {
00341                         // first part is not influenced by the mask
00342                         for (Bitu i=block->page.start;i<block->cache.maskstart;i++) {
00343                                 if (write_map[i]) write_map[i]--;
00344                         }
00345                         Bitu maskct=0;
00346                         // last part sticks to the writemap mask
00347                         for (Bitu i=block->cache.maskstart;i<=block->page.end;i++,maskct++) {
00348                                 if (write_map[i]) {
00349                                         // only adjust writemap if it isn't masked
00350                                         if ((maskct>=block->cache.masklen) || (!block->cache.wmapmask[maskct])) write_map[i]--;
00351                                 }
00352                         }
00353                         free(block->cache.wmapmask);
00354                         block->cache.wmapmask=NULL;
00355                 } else {
00356                         for (Bitu i=block->page.start;i<=block->page.end;i++) {
00357                                 if (write_map[i]) write_map[i]--;
00358                         }
00359                 }
00360         }
00361 
00362         void Release(void) {
00363                 MEM_SetPageHandler(phys_page,1,old_pagehandler);        // revert to old handler
00364                 PAGING_ClearTLB();
00365 
00366                 // remove page from the lists
00367                 if (prev) prev->next=next;
00368                 else cache.used_pages=next;
00369                 if (next) next->prev=prev;
00370                 else cache.last_page=prev;
00371                 next=cache.free_pages;
00372                 cache.free_pages=this;
00373                 prev=0;
00374         }
00375         void ClearRelease(void) {
00376                 // clear out all cache blocks in this page
00377                 for (Bitu index=0;index<(1+DYN_PAGE_HASH);index++) {
00378                         CacheBlockDynRec * block=hash_map[index];
00379                         while (block) {
00380                                 CacheBlockDynRec * nextblock=block->hash.next;
00381                                 block->page.handler=0;                  // no need, full clear
00382                                 block->Clear();
00383                                 block=nextblock;
00384                         }
00385                 }
00386                 Release();      // now can release this page
00387         }
00388 
00389         CacheBlockDynRec * FindCacheBlock(Bitu start) {
00390                 CacheBlockDynRec * block=hash_map[1+(start>>DYN_HASH_SHIFT)];
00391                 // see if there's a cache block present at the start address
00392                 while (block) {
00393                         if (block->page.start==start) return block;     // found
00394                         block=block->hash.next;
00395                 }
00396                 return 0;       // none found
00397         }
00398 
00399         HostPt GetHostReadPt(Bitu phys_page) { 
00400                 hostmem=old_pagehandler->GetHostReadPt(phys_page);
00401                 return hostmem;
00402         }
00403         HostPt GetHostWritePt(Bitu phys_page) { 
00404                 return GetHostReadPt( phys_page );
00405         }
00406 public:
00407         // the write map, there are write_map[i] cache blocks that cover the byte at address i
00408     Bit8u write_map[4096] = {};
00409     Bit8u* invalidation_map = NULL;
00410     CodePageHandlerDynRec* next = NULL; // page linking
00411     CodePageHandlerDynRec* prev = NULL; // page linking
00412 private:
00413     PageHandler* old_pagehandler = NULL;
00414 
00415         // hash map to quickly find the cache blocks in this page
00416     CacheBlockDynRec* hash_map[1 + DYN_PAGE_HASH] = {};
00417 
00418     Bitu active_blocks = 0;     // the number of cache blocks in this page
00419     Bitu active_count = 0;      // delaying parameter to not immediately release a page
00420     HostPt hostmem = NULL;
00421     Bitu phys_page = 0;
00422 };
00423 
00424 
00425 static INLINE void cache_addunusedblock(CacheBlockDynRec * block) {
00426         // block has become unused, add it to the freelist
00427         block->cache.next=cache.block.free;
00428         cache.block.free=block;
00429 }
00430 
00431 static CacheBlockDynRec * cache_getblock(void) {
00432         // get a free cache block and advance the free pointer
00433         CacheBlockDynRec * ret=cache.block.free;
00434     if (!ret)
00435         E_Exit("Ran out of CacheBlocks");
00436     else {
00437         cache.block.free = ret->cache.next;
00438         ret->cache.next = 0;
00439     }
00440         return ret;
00441 }
00442 
00443 void CacheBlockDynRec::Clear(void) {
00444         // check if this is not a cross page block
00445         if (hash.index) for (Bitu ind=0;ind<2;ind++) {
00446                 CacheBlockDynRec * fromlink=link[ind].from;
00447                 link[ind].from=0;
00448                 while (fromlink) {
00449                         CacheBlockDynRec * nextlink=fromlink->link[ind].next;
00450                         // clear the next-link and let the block point to the standard linkcode
00451                         fromlink->link[ind].next=0;
00452                         fromlink->link[ind].to=&link_blocks[ind];
00453 
00454                         fromlink=nextlink;
00455                 }
00456                 if (link[ind].to!=&link_blocks[ind]) {
00457                         // not linked to the standard linkcode, find the block that links to this block
00458                         CacheBlockDynRec * * wherelink=&link[ind].to->link[ind].from;
00459                         while (*wherelink != this && *wherelink) {
00460                                 wherelink = &(*wherelink)->link[ind].next;
00461                         }
00462                         // now remove the link
00463                         if(*wherelink) 
00464                                 *wherelink = (*wherelink)->link[ind].next;
00465                         else {
00466                                 LOG(LOG_CPU,LOG_ERROR)("Cache anomaly. please investigate");
00467                         }
00468                 }
00469         } else 
00470                 cache_addunusedblock(this);
00471         if (crossblock) {
00472                 // clear out the crossblock (in the page before) as well
00473                 crossblock->crossblock=0;
00474                 crossblock->Clear();
00475                 crossblock=0;
00476         }
00477         if (page.handler) {
00478                 // clear out the code page handler
00479                 page.handler->DelCacheBlock(this);
00480                 page.handler=0;
00481         }
00482         if (cache.wmapmask){
00483                 free(cache.wmapmask);
00484                 cache.wmapmask=NULL;
00485         }
00486 }
00487 
00488 
00489 static CacheBlockDynRec * cache_openblock(void) {
00490         CacheBlockDynRec * block=cache.block.active;
00491         // check for enough space in this block
00492         Bitu size=block->cache.size;
00493         CacheBlockDynRec * nextblock=block->cache.next;
00494         if (block->page.handler) 
00495                 block->Clear();
00496         // block size must be at least CACHE_MAXSIZE
00497         while (size<CACHE_MAXSIZE) {
00498                 if (!nextblock)
00499                         goto skipresize;
00500                 // merge blocks
00501                 size+=nextblock->cache.size;
00502                 CacheBlockDynRec * tempblock=nextblock->cache.next;
00503                 if (nextblock->page.handler) 
00504                         nextblock->Clear();
00505                 // block is free now
00506                 cache_addunusedblock(nextblock);
00507                 nextblock=tempblock;
00508         }
00509 skipresize:
00510         // adjust parameters and open this block
00511         block->cache.size=size;
00512         block->cache.next=nextblock;
00513         cache.pos=block->cache.start;
00514         return block;
00515 }
00516 
00517 static void cache_closeblock(void) {
00518         CacheBlockDynRec * block=cache.block.active;
00519         // links point to the default linking code
00520         block->link[0].to=&link_blocks[0];
00521         block->link[1].to=&link_blocks[1];
00522         block->link[0].from=0;
00523         block->link[1].from=0;
00524         block->link[0].next=0;
00525         block->link[1].next=0;
00526         // close the block with correct alignment
00527         Bitu written=(Bitu)(cache.pos-block->cache.start);
00528         if (written>block->cache.size) {
00529                 if (!block->cache.next) {
00530                         if (written>block->cache.size+CACHE_MAXSIZE) E_Exit("CacheBlock overrun 1 %lu",(unsigned long)written-block->cache.size);
00531                 } else E_Exit("CacheBlock overrun 2 written %lu size %lu",(unsigned long)written,(unsigned long)block->cache.size);
00532         } else {
00533                 Bitu left=block->cache.size-written;
00534                 // smaller than cache align then don't bother to resize
00535                 if (left>CACHE_ALIGN) {
00536                         Bitu new_size=((written-1)|(CACHE_ALIGN-1))+1;
00537                         CacheBlockDynRec * newblock=cache_getblock();
00538                         // align block now to CACHE_ALIGN
00539                         newblock->cache.start=block->cache.start+new_size;
00540                         newblock->cache.size=block->cache.size-new_size;
00541                         newblock->cache.next=block->cache.next;
00542                         block->cache.next=newblock;
00543                         block->cache.size=new_size;
00544                 }
00545         }
00546         // advance the active block pointer
00547         if (!block->cache.next || (block->cache.next->cache.start>(cache_code_start_ptr + CACHE_TOTAL - CACHE_MAXSIZE))) {
00548 //              LOG_MSG("Cache full restarting");
00549                 cache.block.active=cache.block.first;
00550         } else {
00551                 cache.block.active=block->cache.next;
00552         }
00553 }
00554 
00555 
00556 // place an 8bit value into the cache
00557 static INLINE void cache_addb(Bit8u val) {
00558         *cache.pos++=val;
00559 }
00560 
00561 // place a 16bit value into the cache
00562 static INLINE void cache_addw(Bit16u val) {
00563         *(Bit16u*)cache.pos=val;
00564         cache.pos+=2;
00565 }
00566 
00567 // place a 32bit value into the cache
00568 static INLINE void cache_addd(Bit32u val) {
00569         *(Bit32u*)cache.pos=val;
00570         cache.pos+=4;
00571 }
00572 
00573 // place a 64bit value into the cache
00574 static INLINE void cache_addq(Bit64u val) {
00575         *(Bit64u*)cache.pos=val;
00576         cache.pos+=8;
00577 }
00578 
00579 
00580 static void dyn_return(BlockReturn retcode,bool ret_exception);
00581 static void dyn_run_code(void);
00582 
00583 
00584 /* Define temporary pagesize so the MPROTECT case and the regular case share as much code as possible */
00585 #if (C_HAVE_MPROTECT)
00586 #define PAGESIZE_TEMP PAGESIZE
00587 #else 
00588 #define PAGESIZE_TEMP 4096
00589 #endif
00590 
00591 static bool cache_initialized = false;
00592 
00593 static void cache_reset(void) {
00594         if (cache_initialized) {
00595                 for (;;) {
00596                         if (cache.used_pages) {
00597                                 CodePageHandlerDynRec * cpage=cache.used_pages;
00598                                 CodePageHandlerDynRec * npage=cache.used_pages->next;
00599                                 cpage->ClearRelease();
00600                                 delete cpage;
00601                                 cache.used_pages=npage;
00602                         } else break;
00603                 }
00604 
00605                 if (cache_blocks == NULL) {
00606                         cache_blocks=(CacheBlockDynRec*)malloc(CACHE_BLOCKS*sizeof(CacheBlockDynRec));
00607                         if(!cache_blocks) E_Exit("Allocating cache_blocks has failed");
00608                 }
00609                 memset(cache_blocks,0,sizeof(CacheBlockDynRec)*CACHE_BLOCKS);
00610                 cache.block.free=&cache_blocks[0];
00611                 for (Bits i=0;i<CACHE_BLOCKS-1;i++) {
00612                         cache_blocks[i].link[0].to=(CacheBlockDynRec *)1;
00613                         cache_blocks[i].link[1].to=(CacheBlockDynRec *)1;
00614                         cache_blocks[i].cache.next=&cache_blocks[i+1];
00615                 }
00616 
00617                 if (cache_code_start_ptr==NULL) {
00618 #if defined (WIN32)
00619                         cache_code_start_ptr=(Bit8u*)VirtualAlloc(0,CACHE_TOTAL+CACHE_MAXSIZE+PAGESIZE_TEMP-1+PAGESIZE_TEMP,
00620                                 MEM_COMMIT,PAGE_EXECUTE_READWRITE);
00621                         if (!cache_code_start_ptr)
00622                                 cache_code_start_ptr=(Bit8u*)malloc(CACHE_TOTAL+CACHE_MAXSIZE+PAGESIZE_TEMP-1+PAGESIZE_TEMP);
00623 #else
00624                         cache_code_start_ptr=(Bit8u*)malloc(CACHE_TOTAL+CACHE_MAXSIZE+PAGESIZE_TEMP-1+PAGESIZE_TEMP);
00625 #endif
00626                         if (!cache_code_start_ptr) E_Exit("Allocating dynamic cache failed");
00627 
00628                         cache_code=(Bit8u*)(((Bitu)cache_code_start_ptr + PAGESIZE_TEMP-1) & ~(PAGESIZE_TEMP-1)); //Bitu is same size as a pointer.
00629 
00630                         cache_code_link_blocks=cache_code;
00631                         cache_code+=PAGESIZE_TEMP;
00632 
00633 #if (C_HAVE_MPROTECT)
00634                         if(mprotect(cache_code_link_blocks,CACHE_TOTAL+CACHE_MAXSIZE+PAGESIZE_TEMP,PROT_WRITE|PROT_READ|PROT_EXEC))
00635                                 LOG_MSG("Setting excute permission on the code cache has failed!");
00636 #endif
00637                 }
00638 
00639                 CacheBlockDynRec * block=cache_getblock();
00640                 cache.block.first=block;
00641                 cache.block.active=block;
00642                 block->cache.start=&cache_code[0];
00643                 block->cache.size=CACHE_TOTAL;
00644                 block->cache.next=0;                                                            //Last block in the list
00645 
00646                 /* Setup the default blocks for block linkage returns */
00647                 cache.pos=&cache_code_link_blocks[0];
00648                 link_blocks[0].cache.start=cache.pos;
00649                 dyn_return(BR_Link1,false);
00650                 cache.pos=&cache_code_link_blocks[32];
00651                 link_blocks[1].cache.start=cache.pos;
00652                 dyn_return(BR_Link2,false);
00653                 cache.free_pages=0;
00654                 cache.last_page=0;
00655                 cache.used_pages=0;
00656                 /* Setup the code pages */
00657                 for (Bitu i=0;i<CACHE_PAGES;i++) {
00658                         CodePageHandlerDynRec * newpage=new CodePageHandlerDynRec();
00659                         newpage->next=cache.free_pages;
00660                         cache.free_pages=newpage;
00661                 }
00662         }
00663 }
00664 
00665 
00666 static void cache_init(bool enable) {
00667         if (enable) {
00668                 Bits i;
00669                 // see if cache is already initialized
00670                 if (cache_initialized) return;
00671                 cache_initialized = true;
00672                 if (cache_blocks == NULL) {
00673                         // allocate the cache blocks memory
00674                         cache_blocks=(CacheBlockDynRec*)malloc(CACHE_BLOCKS*sizeof(CacheBlockDynRec));
00675             if (!cache_blocks)
00676                 E_Exit("Allocating cache_blocks has failed");
00677             else
00678                 memset(cache_blocks, 0, sizeof(CacheBlockDynRec) * CACHE_BLOCKS);
00679                         cache.block.free=&cache_blocks[0];
00680                         // initialize the cache blocks
00681             if (cache_blocks != NULL) {
00682                 for (i = 0; i < CACHE_BLOCKS - 1; i++) {
00683                     cache_blocks[i].link[0].to = (CacheBlockDynRec*)1;
00684                     cache_blocks[i].link[1].to = (CacheBlockDynRec*)1;
00685                     cache_blocks[i].cache.next = &cache_blocks[i + 1];
00686                 }
00687             }
00688                 }
00689                 if (cache_code_start_ptr==NULL) {
00690                         // allocate the code cache memory
00691 #if defined (WIN32)
00692                         cache_code_start_ptr=(Bit8u*)VirtualAlloc(0,CACHE_TOTAL+CACHE_MAXSIZE+PAGESIZE_TEMP-1+PAGESIZE_TEMP,
00693                                 MEM_COMMIT,PAGE_EXECUTE_READWRITE);
00694                         if (!cache_code_start_ptr)
00695                                 cache_code_start_ptr=(Bit8u*)malloc(CACHE_TOTAL+CACHE_MAXSIZE+PAGESIZE_TEMP-1+PAGESIZE_TEMP);
00696 #else
00697                         cache_code_start_ptr=(Bit8u*)malloc(CACHE_TOTAL+CACHE_MAXSIZE+PAGESIZE_TEMP-1+PAGESIZE_TEMP);
00698 #endif
00699                         if(!cache_code_start_ptr) E_Exit("Allocating dynamic cache failed");
00700 
00701                         // align the cache at a page boundary
00702                         cache_code=(Bit8u*)(((Bitu)cache_code_start_ptr + (Bitu)(PAGESIZE_TEMP-1)) & ~((Bitu)(PAGESIZE_TEMP-1)));//Bitu is same size as a pointer.
00703 
00704                         cache_code_link_blocks=cache_code;
00705                         cache_code=cache_code+PAGESIZE_TEMP;
00706 
00707 #if (C_HAVE_MPROTECT)
00708                         if(mprotect(cache_code_link_blocks,CACHE_TOTAL+CACHE_MAXSIZE+PAGESIZE_TEMP,PROT_WRITE|PROT_READ|PROT_EXEC))
00709                                 LOG_MSG("Setting execute permission on the code cache has failed");
00710 #endif
00711                         CacheBlockDynRec * block=cache_getblock();
00712                         cache.block.first=block;
00713                         cache.block.active=block;
00714                         block->cache.start=&cache_code[0];
00715                         block->cache.size=CACHE_TOTAL;
00716                         block->cache.next=0;                                            // last block in the list
00717                 }
00718                 // setup the default blocks for block linkage returns
00719                 cache.pos=&cache_code_link_blocks[0];
00720                 link_blocks[0].cache.start=cache.pos;
00721                 // link code that returns with a special return code
00722                 dyn_return(BR_Link1,false);
00723                 cache.pos=&cache_code_link_blocks[32];
00724                 link_blocks[1].cache.start=cache.pos;
00725                 // link code that returns with a special return code
00726                 dyn_return(BR_Link2,false);
00727 
00728                 cache.pos=&cache_code_link_blocks[64];
00729                 *(void**)(&core_dynrec.runcode) = (void*)cache.pos;
00730 //              link_blocks[1].cache.start=cache.pos;
00731                 dyn_run_code();
00732 
00733                 cache.free_pages=0;
00734                 cache.last_page=0;
00735                 cache.used_pages=0;
00736                 // setup the code pages
00737                 for (i=0;i<CACHE_PAGES;i++) {
00738                         CodePageHandlerDynRec * newpage=new CodePageHandlerDynRec();
00739                         newpage->next=cache.free_pages;
00740                         cache.free_pages=newpage;
00741                 }
00742         }
00743 }
00744 
00745 static void cache_close(void) {
00746 /*      for (;;) {
00747                 if (cache.used_pages) {
00748                         CodePageHandler * cpage=cache.used_pages;
00749                         CodePageHandler * npage=cache.used_pages->next;
00750                         cpage->ClearRelease();
00751                         delete cpage;
00752                         cache.used_pages=npage;
00753                 } else break;
00754         }
00755         if (cache_blocks != NULL) {
00756                 free(cache_blocks);
00757                 cache_blocks = NULL;
00758         }
00759         if (cache_code_start_ptr != NULL) {
00760                 ### care: under windows VirtualFree() has to be used if
00761                 ###       VirtualAlloc was used for memory allocation
00762                 free(cache_code_start_ptr);
00763                 cache_code_start_ptr = NULL;
00764         }
00765         cache_code = NULL;
00766         cache_code_link_blocks = NULL;
00767         cache_initialized = false; */
00768 }