DOSBox-X
|
00001 /* 00002 * Copyright (C) 2002-2020 The DOSBox Team 00003 * 00004 * This program is free software; you can redistribute it and/or modify 00005 * it under the terms of the GNU General Public License as published by 00006 * the Free Software Foundation; either version 2 of the License, or 00007 * (at your option) any later version. 00008 * 00009 * This program is distributed in the hope that it will be useful, 00010 * but WITHOUT ANY WARRANTY; without even the implied warranty of 00011 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 00012 * GNU General Public License for more details. 00013 * 00014 * You should have received a copy of the GNU General Public License along 00015 * with this program; if not, write to the Free Software Foundation, Inc., 00016 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 00017 */ 00018 00019 00020 00021 /* 00022 This file provides some definitions and basic level functions 00023 that use code generating functions from risc_?.h 00024 Important is the function call generation including parameter 00025 loading, the effective address calculation and the memory 00026 access functions. 00027 */ 00028 00029 00030 // instructions that use one operand 00031 enum SingleOps { 00032 SOP_INC,SOP_DEC, 00033 SOP_NOT,SOP_NEG 00034 }; 00035 00036 // instructions that use two operand 00037 enum DualOps { 00038 DOP_ADD,DOP_ADC, 00039 DOP_SUB,DOP_SBB, 00040 DOP_CMP,DOP_XOR, 00041 DOP_AND,DOP_OR, 00042 DOP_TEST, 00043 DOP_MOV, 00044 DOP_XCHG 00045 }; 00046 00047 // shift and rotate functions 00048 enum ShiftOps { 00049 SHIFT_ROL,SHIFT_ROR, 00050 SHIFT_RCL,SHIFT_RCR, 00051 SHIFT_SHL,SHIFT_SHR, 00052 SHIFT_SAL,SHIFT_SAR 00053 }; 00054 00055 // branch conditions 00056 enum BranchTypes { 00057 BR_O,BR_NO,BR_B,BR_NB, 00058 BR_Z,BR_NZ,BR_BE,BR_NBE, 00059 BR_S,BR_NS,BR_P,BR_NP, 00060 BR_L,BR_NL,BR_LE,BR_NLE 00061 }; 00062 00063 // string instructions 00064 enum StringOps { 00065 STR_OUTSB=0,STR_OUTSW,STR_OUTSD, 00066 STR_INSB=4,STR_INSW,STR_INSD, 00067 STR_MOVSB=8,STR_MOVSW,STR_MOVSD, 00068 STR_LODSB=12,STR_LODSW,STR_LODSD, 00069 STR_STOSB=16,STR_STOSW,STR_STOSD, 00070 STR_SCASB=20,STR_SCASW,STR_SCASD, 00071 STR_CMPSB=24,STR_CMPSW,STR_CMPSD 00072 }; 00073 00074 // repeat prefix type (for string operations) 00075 enum REP_Type { 00076 REP_NONE=0,REP_NZ,REP_Z 00077 }; 00078 00079 // loop type 00080 enum LoopTypes { 00081 LOOP_NONE,LOOP_NE,LOOP_E,LOOP_JCXZ 00082 }; 00083 00084 // rotate operand type 00085 enum grp2_types { 00086 grp2_1,grp2_imm,grp2_cl 00087 }; 00088 00089 // opcode mapping for group1 instructions 00090 static DualOps grp1_table[8]={ 00091 DOP_ADD,DOP_OR,DOP_ADC,DOP_SBB,DOP_AND,DOP_SUB,DOP_XOR,DOP_CMP 00092 }; 00093 00094 00095 // decoding information used during translation of a code block 00096 static struct DynDecode { 00097 PhysPt code; // pointer to next byte in the instruction stream 00098 PhysPt code_start; // pointer to the start of the current code block 00099 PhysPt op_start; // pointer to the start of the current instruction 00100 bool big_op; // operand modifier 00101 bool big_addr; // address modifier 00102 REP_Type rep; // current repeat prefix 00103 Bitu cycles; // number cycles used by currently translated code 00104 bool seg_prefix_used; // segment overridden 00105 Bit8u seg_prefix; // segment prefix (if seg_prefix_used==true) 00106 00107 // block that contains the first instruction translated 00108 CacheBlockDynRec * block; 00109 // block that contains the current byte of the instruction stream 00110 CacheBlockDynRec * active_block; 00111 00112 // the active page (containing the current byte of the instruction stream) 00113 struct { 00114 CodePageHandlerDynRec * code; 00115 Bitu index; // index to the current byte of the instruction stream 00116 Bit8u * wmap; // write map that indicates code presence for every byte of this page 00117 Bit8u * invmap; // invalidation map 00118 Bitu first; // page number 00119 } page; 00120 00121 // modrm state of the current instruction (if used) 00122 struct { 00123 // Bitu val; 00124 Bitu mod; 00125 Bit8u rm; 00126 Bitu reg; 00127 } modrm; 00128 } decode; 00129 00130 00131 static bool MakeCodePage(Bitu lin_addr,CodePageHandlerDynRec * &cph) { 00132 Bit8u rdval; 00133 //Ensure page contains memory: 00134 if (GCC_UNLIKELY(mem_readb_checked((PhysPt)lin_addr,&rdval))) return true; 00135 00136 PageHandler * handler=get_tlb_readhandler((PhysPt)lin_addr); 00137 if (handler->flags & PFLAG_HASCODE) { 00138 // this is a codepage handler, and the one that we're looking for 00139 cph=(CodePageHandlerDynRec *)handler; 00140 return false; 00141 } 00142 if (handler->flags & PFLAG_NOCODE) { 00143 if (false) { // PAGING_ForcePageInit(lin_addr)) { 00144 handler=get_tlb_readhandler((PhysPt)lin_addr); 00145 if (handler->flags & PFLAG_HASCODE) { 00146 cph=(CodePageHandlerDynRec *)handler; 00147 return false; 00148 } 00149 } 00150 if (handler->flags & PFLAG_NOCODE) { 00151 LOG_MSG("DYNREC:Can't run code in this page"); 00152 cph=0; 00153 return false; 00154 } 00155 } 00156 Bitu lin_page=lin_addr>>12; 00157 Bitu phys_page=lin_page; 00158 // find the physical page that the linear page is mapped to 00159 if (!PAGING_MakePhysPage(phys_page)) { 00160 LOG_MSG("DYNREC:Can't find physpage"); 00161 cph=0; 00162 return false; 00163 } 00164 // find a free CodePage 00165 if (!cache.free_pages) { 00166 if (cache.used_pages!=decode.page.code) cache.used_pages->ClearRelease(); 00167 else { 00168 // try another page to avoid clearing our source-crosspage 00169 if ((cache.used_pages->next) && (cache.used_pages->next!=decode.page.code)) 00170 cache.used_pages->next->ClearRelease(); 00171 else { 00172 LOG_MSG("DYNREC:Invalid cache links"); 00173 cache.used_pages->ClearRelease(); 00174 } 00175 } 00176 } 00177 CodePageHandlerDynRec * cpagehandler=cache.free_pages; 00178 if (cache.free_pages != NULL) { 00179 cache.free_pages = cache.free_pages->next; 00180 // adjust previous and next page pointer 00181 cpagehandler->prev = cache.last_page; 00182 cpagehandler->next = 0; 00183 if (cache.last_page) cache.last_page->next = cpagehandler; 00184 cache.last_page = cpagehandler; 00185 if (!cache.used_pages) cache.used_pages = cpagehandler; 00186 } 00187 else { 00188 E_Exit("NULL cache.free_pages in MakeCodePage"); 00189 } 00190 00191 // initialize the code page handler and add the handler to the memory page 00192 cpagehandler->SetupAt(phys_page,handler); 00193 MEM_SetPageHandler(phys_page,1,cpagehandler); 00194 PAGING_UnlinkPages(lin_page,1); 00195 cph=cpagehandler; 00196 return false; 00197 } 00198 00199 static void decode_advancepage(void) { 00200 // Advance to the next page 00201 decode.active_block->page.end=4095; 00202 // trigger possible page fault here 00203 decode.page.first++; 00204 Bitu faddr=decode.page.first << 12; 00205 mem_readb((PhysPt)faddr); 00206 MakeCodePage(faddr,decode.page.code); 00207 CacheBlockDynRec * newblock=cache_getblock(); 00208 decode.active_block->crossblock=newblock; 00209 newblock->crossblock=decode.active_block; 00210 decode.active_block=newblock; 00211 decode.active_block->page.start=0; 00212 decode.page.code->AddCrossBlock(decode.active_block); 00213 decode.page.wmap=decode.page.code->write_map; 00214 decode.page.invmap=decode.page.code->invalidation_map; 00215 decode.page.index=0; 00216 } 00217 00218 // fetch the next byte of the instruction stream 00219 static Bit8u decode_fetchb(void) { 00220 if (GCC_UNLIKELY(decode.page.index>=4096)) { 00221 decode_advancepage(); 00222 } 00223 decode.page.wmap[decode.page.index]+=0x01; 00224 decode.page.index++; 00225 decode.code+=1; 00226 return mem_readb(decode.code-1); 00227 } 00228 // fetch the next word of the instruction stream 00229 static Bit16u decode_fetchw(void) { 00230 if (GCC_UNLIKELY(decode.page.index>=4095)) { 00231 Bit16u val=decode_fetchb(); 00232 val|=decode_fetchb() << 8; 00233 return val; 00234 } 00235 *(Bit16u *)&decode.page.wmap[decode.page.index]+=0x0101; 00236 decode.code+=2;decode.page.index+=2; 00237 return mem_readw(decode.code-2); 00238 } 00239 // fetch the next dword of the instruction stream 00240 static Bit32u decode_fetchd(void) { 00241 if (GCC_UNLIKELY(decode.page.index>=4093)) { 00242 Bit32u val=decode_fetchb(); 00243 val|=decode_fetchb() << 8; 00244 val|=decode_fetchb() << 16; 00245 val|=decode_fetchb() << 24; 00246 return val; 00247 /* Advance to the next page */ 00248 } 00249 *(Bit32u *)&decode.page.wmap[decode.page.index]+=0x01010101; 00250 decode.code+=4;decode.page.index+=4; 00251 return mem_readd(decode.code-4); 00252 } 00253 00254 #define START_WMMEM 64 00255 00256 // adjust writemap mask to care for map holes due to special 00257 // codefetch functions 00258 static void INLINE decode_increase_wmapmask(Bitu size) { 00259 Bitu mapidx; 00260 CacheBlockDynRec* activecb=decode.active_block; 00261 if (GCC_UNLIKELY(!activecb->cache.wmapmask)) { 00262 // no mask memory yet allocated, start with a small buffer 00263 activecb->cache.wmapmask=(Bit8u*)malloc(START_WMMEM); 00264 if (activecb->cache.wmapmask != NULL) 00265 memset(activecb->cache.wmapmask, 0, START_WMMEM); 00266 else 00267 E_Exit("Memory allocation failed in decode_increase_wmapmask"); 00268 activecb->cache.maskstart=(Bit16u)decode.page.index; // start of buffer is current code position 00269 activecb->cache.masklen=START_WMMEM; 00270 mapidx=0; 00271 } else { 00272 mapidx=decode.page.index-activecb->cache.maskstart; 00273 if (GCC_UNLIKELY(mapidx+size>=activecb->cache.masklen)) { 00274 // mask buffer too small, increase 00275 Bitu newmasklen=activecb->cache.masklen*(Bitu)4; 00276 if (newmasklen<mapidx+size) newmasklen=((mapidx+size)&~3)*2; 00277 Bit8u* tempmem=(Bit8u*)malloc(newmasklen); 00278 if (tempmem != NULL) { 00279 memset(tempmem, 0, newmasklen); 00280 memcpy(tempmem, activecb->cache.wmapmask, activecb->cache.masklen); 00281 free(activecb->cache.wmapmask); 00282 activecb->cache.wmapmask = tempmem; 00283 activecb->cache.masklen = (Bit16u)newmasklen; 00284 } 00285 else 00286 E_Exit("Memory allocation failed in decode_increase_wmapmask"); 00287 } 00288 } 00289 // update mask entries 00290 if (activecb->cache.wmapmask != NULL) { 00291 switch (size) { 00292 case 1: activecb->cache.wmapmask[mapidx] += 0x01; break; 00293 case 2: (*(Bit16u*)& activecb->cache.wmapmask[mapidx]) += 0x0101; break; 00294 case 4: (*(Bit32u*)& activecb->cache.wmapmask[mapidx]) += 0x01010101; break; 00295 } 00296 } 00297 } 00298 00299 // fetch a byte, val points to the code location if possible, 00300 // otherwise val contains the current value read from the position 00301 static bool decode_fetchb_imm(Bitu & val) { 00302 if (GCC_UNLIKELY(decode.page.index>=4096)) { 00303 decode_advancepage(); 00304 } 00305 // see if position is directly accessible 00306 if (decode.page.invmap != NULL) { 00307 if (decode.page.invmap[decode.page.index] == 0) { 00308 // position not yet modified 00309 val=(Bit32u)decode_fetchb(); 00310 return false; 00311 } 00312 00313 HostPt tlb_addr=get_tlb_read(decode.code); 00314 if (tlb_addr) { 00315 val=(Bitu)(tlb_addr+decode.code); 00316 decode_increase_wmapmask(1); 00317 decode.code++; 00318 decode.page.index++; 00319 return true; 00320 } 00321 } 00322 // first time decoding or not directly accessible, just fetch the value 00323 val=(Bit32u)decode_fetchb(); 00324 return false; 00325 } 00326 00327 // fetch a word, val points to the code location if possible, 00328 // otherwise val contains the current value read from the position 00329 static bool decode_fetchw_imm(Bitu & val) { 00330 if (decode.page.index<4095) { 00331 if (decode.page.invmap != NULL) { 00332 if ((decode.page.invmap[decode.page.index] == 0) && 00333 (decode.page.invmap[decode.page.index + 1] == 0)) { 00334 // position not yet modified 00335 val=decode_fetchw(); 00336 return false; 00337 } 00338 00339 HostPt tlb_addr=get_tlb_read(decode.code); 00340 // see if position is directly accessible 00341 if (tlb_addr) { 00342 val=(Bitu)(tlb_addr+decode.code); 00343 decode_increase_wmapmask(2); 00344 decode.code+=2; 00345 decode.page.index+=2; 00346 return true; 00347 } 00348 } 00349 } 00350 // first time decoding or not directly accessible, just fetch the value 00351 val=decode_fetchw(); 00352 return false; 00353 } 00354 00355 // fetch a dword, val points to the code location if possible, 00356 // otherwise val contains the current value read from the position 00357 static bool decode_fetchd_imm(Bitu & val) { 00358 if (decode.page.index<4093) { 00359 if (decode.page.invmap != NULL) { 00360 if ((decode.page.invmap[decode.page.index] == 0) && 00361 (decode.page.invmap[decode.page.index + 1] == 0) && 00362 (decode.page.invmap[decode.page.index + 2] == 0) && 00363 (decode.page.invmap[decode.page.index + 3] == 0)) { 00364 // position not yet modified 00365 val=decode_fetchd(); 00366 return false; 00367 } 00368 00369 HostPt tlb_addr=get_tlb_read(decode.code); 00370 // see if position is directly accessible 00371 if (tlb_addr) { 00372 val=(Bitu)(tlb_addr+decode.code); 00373 decode_increase_wmapmask(4); 00374 decode.code+=4; 00375 decode.page.index+=4; 00376 return true; 00377 } 00378 } 00379 } 00380 // first time decoding or not directly accessible, just fetch the value 00381 val=decode_fetchd(); 00382 return false; 00383 } 00384 00385 00386 // modrm decoding helper 00387 static void INLINE dyn_get_modrm(void) { 00388 Bitu val=decode_fetchb(); 00389 decode.modrm.mod=(val >> 6) & 3; 00390 decode.modrm.reg=(val >> 3) & 7; 00391 decode.modrm.rm=(val & 7); 00392 } 00393 00394 00395 #ifdef DRC_USE_SEGS_ADDR 00396 00397 #define MOV_SEG_VAL_TO_HOST_REG(host_reg, seg_index) gen_mov_seg16_to_reg(host_reg,(DRC_PTR_SIZE_IM)(DRCD_SEG_VAL(seg_index)) - (DRC_PTR_SIZE_IM)(&Segs)) 00398 00399 #define MOV_SEG_PHYS_TO_HOST_REG(host_reg, seg_index) gen_mov_seg32_to_reg(host_reg,(DRC_PTR_SIZE_IM)(DRCD_SEG_PHYS(seg_index)) - (DRC_PTR_SIZE_IM)(&Segs)) 00400 #define ADD_SEG_PHYS_TO_HOST_REG(host_reg, seg_index) gen_add_seg32_to_reg(host_reg,(DRC_PTR_SIZE_IM)(DRCD_SEG_PHYS(seg_index)) - (DRC_PTR_SIZE_IM)(&Segs)) 00401 00402 #else 00403 00404 #define MOV_SEG_VAL_TO_HOST_REG(host_reg, seg_index) gen_mov_word_to_reg(host_reg,DRCD_SEG_VAL(seg_index),false) 00405 00406 #define MOV_SEG_PHYS_TO_HOST_REG(host_reg, seg_index) gen_mov_word_to_reg(host_reg,DRCD_SEG_PHYS(seg_index),true) 00407 #define ADD_SEG_PHYS_TO_HOST_REG(host_reg, seg_index) gen_add(host_reg,DRCD_SEG_PHYS(seg_index)) 00408 00409 #endif 00410 00411 00412 #ifdef DRC_USE_REGS_ADDR 00413 00414 #define MOV_REG_VAL_TO_HOST_REG(host_reg, reg_index) gen_mov_regval32_to_reg(host_reg,(DRC_PTR_SIZE_IM)(DRCD_REG_VAL(reg_index)) - (DRC_PTR_SIZE_IM)(&cpu_regs)) 00415 #define ADD_REG_VAL_TO_HOST_REG(host_reg, reg_index) gen_add_regval32_to_reg(host_reg,(DRC_PTR_SIZE_IM)(DRCD_REG_VAL(reg_index)) - (DRC_PTR_SIZE_IM)(&cpu_regs)) 00416 00417 #define MOV_REG_WORD16_TO_HOST_REG(host_reg, reg_index) gen_mov_regval16_to_reg(host_reg,(DRC_PTR_SIZE_IM)(DRCD_REG_WORD(reg_index,false)) - (DRC_PTR_SIZE_IM)(&cpu_regs)) 00418 #define MOV_REG_WORD32_TO_HOST_REG(host_reg, reg_index) gen_mov_regval32_to_reg(host_reg,(DRC_PTR_SIZE_IM)(DRCD_REG_WORD(reg_index,true)) - (DRC_PTR_SIZE_IM)(&cpu_regs)) 00419 #define MOV_REG_WORD_TO_HOST_REG(host_reg, reg_index, dword) gen_mov_regword_to_reg(host_reg,(DRC_PTR_SIZE_IM)(DRCD_REG_WORD(reg_index,dword)) - (DRC_PTR_SIZE_IM)(&cpu_regs), dword) 00420 00421 #define MOV_REG_WORD16_FROM_HOST_REG(host_reg, reg_index) gen_mov_regval16_from_reg(host_reg,(DRC_PTR_SIZE_IM)(DRCD_REG_WORD(reg_index,false)) - (DRC_PTR_SIZE_IM)(&cpu_regs)) 00422 #define MOV_REG_WORD32_FROM_HOST_REG(host_reg, reg_index) gen_mov_regval32_from_reg(host_reg,(DRC_PTR_SIZE_IM)(DRCD_REG_WORD(reg_index,true)) - (DRC_PTR_SIZE_IM)(&cpu_regs)) 00423 #define MOV_REG_WORD_FROM_HOST_REG(host_reg, reg_index, dword) gen_mov_regword_from_reg(host_reg,(DRC_PTR_SIZE_IM)(DRCD_REG_WORD(reg_index,dword)) - (DRC_PTR_SIZE_IM)(&cpu_regs), dword) 00424 00425 #define MOV_REG_BYTE_TO_HOST_REG_LOW(host_reg, reg_index, high_byte) gen_mov_regbyte_to_reg_low(host_reg,(DRC_PTR_SIZE_IM)(DRCD_REG_BYTE(reg_index,high_byte)) - (DRC_PTR_SIZE_IM)(&cpu_regs)) 00426 #define MOV_REG_BYTE_TO_HOST_REG_LOW_CANUSEWORD(host_reg, reg_index, high_byte) gen_mov_regbyte_to_reg_low_canuseword(host_reg,(DRC_PTR_SIZE_IM)(DRCD_REG_BYTE(reg_index,high_byte)) - (DRC_PTR_SIZE_IM)(&cpu_regs)) 00427 #define MOV_REG_BYTE_FROM_HOST_REG_LOW(host_reg, reg_index, high_byte) gen_mov_regbyte_from_reg_low(host_reg,(DRC_PTR_SIZE_IM)(DRCD_REG_BYTE(reg_index,high_byte)) - (DRC_PTR_SIZE_IM)(&cpu_regs)) 00428 00429 #else 00430 00431 #define MOV_REG_VAL_TO_HOST_REG(host_reg, reg_index) gen_mov_word_to_reg(host_reg,DRCD_REG_VAL(reg_index),true) 00432 #define ADD_REG_VAL_TO_HOST_REG(host_reg, reg_index) gen_add(host_reg,DRCD_REG_VAL(reg_index)) 00433 00434 #define MOV_REG_WORD16_TO_HOST_REG(host_reg, reg_index) gen_mov_word_to_reg(host_reg,DRCD_REG_WORD(reg_index,false),false) 00435 #define MOV_REG_WORD32_TO_HOST_REG(host_reg, reg_index) gen_mov_word_to_reg(host_reg,DRCD_REG_WORD(reg_index,true),true) 00436 #define MOV_REG_WORD_TO_HOST_REG(host_reg, reg_index, dword) gen_mov_word_to_reg(host_reg,DRCD_REG_WORD(reg_index,dword),dword) 00437 00438 #define MOV_REG_WORD16_FROM_HOST_REG(host_reg, reg_index) gen_mov_word_from_reg(host_reg,DRCD_REG_WORD(reg_index,false),false) 00439 #define MOV_REG_WORD32_FROM_HOST_REG(host_reg, reg_index) gen_mov_word_from_reg(host_reg,DRCD_REG_WORD(reg_index,true),true) 00440 #define MOV_REG_WORD_FROM_HOST_REG(host_reg, reg_index, dword) gen_mov_word_from_reg(host_reg,DRCD_REG_WORD(reg_index,dword),dword) 00441 00442 #define MOV_REG_BYTE_TO_HOST_REG_LOW(host_reg, reg_index, high_byte) gen_mov_byte_to_reg_low(host_reg,DRCD_REG_BYTE(reg_index,high_byte)) 00443 #define MOV_REG_BYTE_TO_HOST_REG_LOW_CANUSEWORD(host_reg, reg_index, high_byte) gen_mov_byte_to_reg_low_canuseword(host_reg,DRCD_REG_BYTE(reg_index,high_byte)) 00444 #define MOV_REG_BYTE_FROM_HOST_REG_LOW(host_reg, reg_index, high_byte) gen_mov_byte_from_reg_low(host_reg,DRCD_REG_BYTE(reg_index,high_byte)) 00445 00446 #endif 00447 00448 00449 #define DYN_LEA_MEM_MEM(ea_reg, op1, op2, scale, imm) dyn_lea_mem_mem(ea_reg,op1,op2,scale,imm) 00450 00451 #if defined(DRC_USE_REGS_ADDR) && defined(DRC_USE_SEGS_ADDR) 00452 00453 #define DYN_LEA_SEG_PHYS_REG_VAL(ea_reg, op1_index, op2_index, scale, imm) dyn_lea_segphys_regval(ea_reg,op1_index,op2_index,scale,imm) 00454 #define DYN_LEA_REG_VAL_REG_VAL(ea_reg, op1_index, op2_index, scale, imm) dyn_lea_regval_regval(ea_reg,op1_index,op2_index,scale,imm) 00455 #define DYN_LEA_MEM_REG_VAL(ea_reg, op1, op2_index, scale, imm) dyn_lea_mem_regval(ea_reg,op1,op2_index,scale,imm) 00456 00457 #elif defined(DRC_USE_REGS_ADDR) 00458 00459 #define DYN_LEA_SEG_PHYS_REG_VAL(ea_reg, op1_index, op2_index, scale, imm) dyn_lea_mem_regval(ea_reg,DRCD_SEG_PHYS(op1_index),op2_index,scale,imm) 00460 #define DYN_LEA_REG_VAL_REG_VAL(ea_reg, op1_index, op2_index, scale, imm) dyn_lea_regval_regval(ea_reg,op1_index,op2_index,scale,imm) 00461 #define DYN_LEA_MEM_REG_VAL(ea_reg, op1, op2_index, scale, imm) dyn_lea_mem_regval(ea_reg,op1,op2_index,scale,imm) 00462 00463 #elif defined(DRC_USE_SEGS_ADDR) 00464 00465 #define DYN_LEA_SEG_PHYS_REG_VAL(ea_reg, op1_index, op2_index, scale, imm) dyn_lea_segphys_mem(ea_reg,op1_index,DRCD_REG_VAL(op2_index),scale,imm) 00466 #define DYN_LEA_REG_VAL_REG_VAL(ea_reg, op1_index, op2_index, scale, imm) dyn_lea_mem_mem(ea_reg,DRCD_REG_VAL(op1_index),DRCD_REG_VAL(op2_index),scale,imm) 00467 #define DYN_LEA_MEM_REG_VAL(ea_reg, op1, op2_index, scale, imm) dyn_lea_mem_mem(ea_reg,op1,DRCD_REG_VAL(op2_index),scale,imm) 00468 00469 #else 00470 00471 #define DYN_LEA_SEG_PHYS_REG_VAL(ea_reg, op1_index, op2_index, scale, imm) dyn_lea_mem_mem(ea_reg,DRCD_SEG_PHYS(op1_index),DRCD_REG_VAL(op2_index),scale,imm) 00472 #define DYN_LEA_REG_VAL_REG_VAL(ea_reg, op1_index, op2_index, scale, imm) dyn_lea_mem_mem(ea_reg,DRCD_REG_VAL(op1_index),DRCD_REG_VAL(op2_index),scale,imm) 00473 #define DYN_LEA_MEM_REG_VAL(ea_reg, op1, op2_index, scale, imm) dyn_lea_mem_mem(ea_reg,op1,DRCD_REG_VAL(op2_index),scale,imm) 00474 00475 #endif 00476 00477 00478 00479 // adjust CPU_Cycles value 00480 static void dyn_reduce_cycles(void) { 00481 if (!decode.cycles) decode.cycles++; 00482 gen_sub_direct_word(&CPU_Cycles,(Bit32u)decode.cycles,true); 00483 } 00484 00485 00486 // set reg to the start of the next instruction 00487 // set reg_eip to the start of the current instruction 00488 static INLINE void dyn_set_eip_last_end(HostReg reg) { 00489 gen_mov_word_to_reg(reg,®_eip,true); 00490 gen_add_imm(reg,(Bit32u)(decode.code-decode.code_start)); 00491 gen_add_direct_word(®_eip,decode.op_start-decode.code_start,decode.big_op); 00492 } 00493 00494 // set reg_eip to the start of the current instruction 00495 static INLINE void dyn_set_eip_last(void) { 00496 gen_add_direct_word(®_eip,decode.op_start-decode.code_start,cpu.code.big); 00497 } 00498 00499 // set reg_eip to the start of the next instruction 00500 static INLINE void dyn_set_eip_end(void) { 00501 gen_add_direct_word(®_eip,decode.code-decode.code_start,cpu.code.big); 00502 } 00503 00504 // set reg_eip to the start of the next instruction plus an offset (imm) 00505 static INLINE void dyn_set_eip_end(HostReg reg,Bit32u imm=0) { 00506 gen_mov_word_to_reg(reg,®_eip,true); //get_extend_word will mask off the upper bits 00507 //gen_mov_word_to_reg(reg,®_eip,decode.big_op); 00508 gen_add_imm(reg,(Bit32u)(decode.code-decode.code_start+imm)); 00509 if (!decode.big_op) gen_extend_word(false,reg); 00510 } 00511 00512 00513 00514 // the following functions generate function calls 00515 // parameters are loaded by generating code using gen_load_param_ which 00516 // is architecture dependent 00517 // R=host register; I=32bit immediate value; A=address value; m=memory 00518 00519 template <typename T> static DRC_PTR_SIZE_IM INLINE gen_call_function_R(const T func,Bitu op) { 00520 gen_load_param_reg(op,0); 00521 return gen_call_function_setup(func, 1); 00522 } 00523 00524 template <typename T> static DRC_PTR_SIZE_IM INLINE gen_call_function_R3(const T func,Bitu op) { 00525 gen_load_param_reg(op,2); 00526 return gen_call_function_setup(func, 3, true); 00527 } 00528 00529 template <typename T> static DRC_PTR_SIZE_IM INLINE gen_call_function_RI(const T func,Bitu op1,Bitu op2) { 00530 gen_load_param_imm(op2,1); 00531 gen_load_param_reg(op1,0); 00532 return gen_call_function_setup(func, 2); 00533 } 00534 00535 template <typename T> static DRC_PTR_SIZE_IM INLINE gen_call_function_RA(const T func,Bitu op1,DRC_PTR_SIZE_IM op2) { 00536 gen_load_param_addr(op2,1); 00537 gen_load_param_reg(op1,0); 00538 return gen_call_function_setup(func, 2); 00539 } 00540 00541 template <typename T> static DRC_PTR_SIZE_IM INLINE gen_call_function_RR(const T func,Bitu op1,Bitu op2) { 00542 gen_load_param_reg(op2,1); 00543 gen_load_param_reg(op1,0); 00544 return gen_call_function_setup(func, 2); 00545 } 00546 00547 template <typename T> static DRC_PTR_SIZE_IM INLINE gen_call_function_IR(const T func,Bitu op1,Bitu op2) { 00548 gen_load_param_reg(op2,1); 00549 gen_load_param_imm(op1,0); 00550 return gen_call_function_setup(func, 2); 00551 } 00552 00553 template <typename T> static DRC_PTR_SIZE_IM INLINE gen_call_function_I(const T func,Bitu op) { 00554 gen_load_param_imm(op,0); 00555 return gen_call_function_setup(func, 1); 00556 } 00557 00558 template <typename T> static DRC_PTR_SIZE_IM INLINE gen_call_function_II(const T func,Bitu op1,Bitu op2) { 00559 gen_load_param_imm(op2,1); 00560 gen_load_param_imm(op1,0); 00561 return gen_call_function_setup(func, 2); 00562 } 00563 00564 template <typename T> static DRC_PTR_SIZE_IM INLINE gen_call_function_III(const T func,Bitu op1,Bitu op2,Bitu op3) { 00565 gen_load_param_imm(op3,2); 00566 gen_load_param_imm(op2,1); 00567 gen_load_param_imm(op1,0); 00568 return gen_call_function_setup(func, 3); 00569 } 00570 00571 template <typename T> static DRC_PTR_SIZE_IM INLINE gen_call_function_IA(const T func,Bitu op1,DRC_PTR_SIZE_IM op2) { 00572 gen_load_param_addr(op2,1); 00573 gen_load_param_imm(op1,0); 00574 return gen_call_function_setup(func, 2); 00575 } 00576 00577 template <typename T> static DRC_PTR_SIZE_IM INLINE gen_call_function_IIR(const T func,Bitu op1,Bitu op2,Bitu op3) { 00578 gen_load_param_reg(op3,2); 00579 gen_load_param_imm(op2,1); 00580 gen_load_param_imm(op1,0); 00581 return gen_call_function_setup(func, 3); 00582 } 00583 00584 template <typename T> static DRC_PTR_SIZE_IM INLINE gen_call_function_IIIR(const T func,Bitu op1,Bitu op2,Bitu op3,Bitu op4) { 00585 gen_load_param_reg(op4,3); 00586 gen_load_param_imm(op3,2); 00587 gen_load_param_imm(op2,1); 00588 gen_load_param_imm(op1,0); 00589 return gen_call_function_setup(func, 4); 00590 } 00591 00592 template <typename T> static DRC_PTR_SIZE_IM INLINE gen_call_function_IRRR(const T func,Bitu op1,Bitu op2,Bitu op3,Bitu op4) { 00593 gen_load_param_reg(op4,3); 00594 gen_load_param_reg(op3,2); 00595 gen_load_param_reg(op2,1); 00596 gen_load_param_imm(op1,0); 00597 return gen_call_function_setup(func, 4); 00598 } 00599 00600 template <typename T> static DRC_PTR_SIZE_IM INLINE gen_call_function_m(const T func,Bitu op) { 00601 gen_load_param_mem(op,2); 00602 return gen_call_function_setup(func, 3, true); 00603 } 00604 00605 template <typename T> static DRC_PTR_SIZE_IM INLINE gen_call_function_mm(const T func,Bitu op1,Bitu op2) { 00606 gen_load_param_mem(op2,3); 00607 gen_load_param_mem(op1,2); 00608 return gen_call_function_setup(func, 4, true); 00609 } 00610 00611 00612 00613 enum save_info_type {db_exception, cycle_check, string_break}; 00614 00615 00616 // function that is called on exceptions 00617 static BlockReturn DynRunException(Bit32u eip_add,Bit32u cycle_sub) { 00618 reg_eip+=eip_add; 00619 CPU_Cycles-=cycle_sub; 00620 if (cpu.exception.which==SMC_CURRENT_BLOCK) return BR_SMCBlock; 00621 CPU_Exception(cpu.exception.which,cpu.exception.error); 00622 return BR_Normal; 00623 } 00624 00625 00626 // array with information about code that is generated at the 00627 // end of a cache block because it is rarely reached (like exceptions) 00628 static struct { 00629 save_info_type type; 00630 DRC_PTR_SIZE_IM branch_pos; 00631 Bit32u eip_change; 00632 Bitu cycles; 00633 } save_info_dynrec[512]; 00634 00635 Bitu used_save_info_dynrec=0; 00636 00637 00638 // return from current block, with returncode 00639 static void dyn_return(BlockReturn retcode,bool ret_exception=false) { 00640 if (!ret_exception) { 00641 gen_mov_dword_to_reg_imm(FC_RETOP,retcode); 00642 } 00643 gen_return_function(); 00644 } 00645 00646 static void dyn_run_code(void) { 00647 gen_run_code(); 00648 gen_return_function(); 00649 } 00650 00651 // fill in code at the end of the block that contains rarely-executed code 00652 // which is executed conditionally (like exceptions) 00653 static void dyn_fill_blocks(void) { 00654 for (Bitu sct=0; sct<used_save_info_dynrec; sct++) { 00655 gen_fill_branch_long(save_info_dynrec[sct].branch_pos); 00656 switch (save_info_dynrec[sct].type) { 00657 case db_exception: 00658 // code for exception handling, load cycles and call DynRunException 00659 decode.cycles=save_info_dynrec[sct].cycles; 00660 if (cpu.code.big) gen_call_function_II(DynRunException,save_info_dynrec[sct].eip_change,save_info_dynrec[sct].cycles); 00661 else gen_call_function_II(DynRunException,save_info_dynrec[sct].eip_change&0xffff,save_info_dynrec[sct].cycles); 00662 dyn_return(BR_Normal,true); 00663 break; 00664 case cycle_check: 00665 // cycles are <=0 so exit the core 00666 dyn_return(BR_Cycles); 00667 break; 00668 case string_break: 00669 // interrupt looped string instruction, can be continued later 00670 gen_add_direct_word(®_eip,save_info_dynrec[sct].eip_change,decode.big_op); 00671 dyn_return(BR_Cycles); 00672 break; 00673 } 00674 } 00675 used_save_info_dynrec=0; 00676 } 00677 00678 00679 static void dyn_closeblock(void) { 00680 //Shouldn't create empty block normally but let's do it like this 00681 dyn_fill_blocks(); 00682 cache_block_before_close(); 00683 cache_closeblock(); 00684 cache_block_closing(decode.block->cache.start,decode.block->cache.size); 00685 } 00686 00687 00688 // add a check that can branch to the exception handling 00689 static void dyn_check_exception(HostReg reg) { 00690 save_info_dynrec[used_save_info_dynrec].branch_pos=gen_create_branch_long_nonzero(reg,false); 00691 if (!decode.cycles) decode.cycles++; 00692 save_info_dynrec[used_save_info_dynrec].cycles=decode.cycles; 00693 // in case of an exception eip will point to the start of the current instruction 00694 save_info_dynrec[used_save_info_dynrec].eip_change=decode.op_start-decode.code_start; 00695 if (!cpu.code.big) save_info_dynrec[used_save_info_dynrec].eip_change&=0xffff; 00696 save_info_dynrec[used_save_info_dynrec].type=db_exception; 00697 used_save_info_dynrec++; 00698 } 00699 00700 00701 00702 bool DRC_CALL_CONV mem_readb_checked_drc(PhysPt address) DRC_FC; 00703 bool DRC_CALL_CONV mem_readb_checked_drc(PhysPt address) { 00704 HostPt tlb_addr=get_tlb_read(address); 00705 if (tlb_addr) { 00706 *((Bit8u*)(&core_dynrec.readdata))=host_readb(tlb_addr+address); 00707 return false; 00708 } else { 00709 return get_tlb_readhandler(address)->readb_checked(address, (Bit8u*)(&core_dynrec.readdata)); 00710 } 00711 } 00712 00713 bool DRC_CALL_CONV mem_writeb_checked_drc(PhysPt address,Bit8u val) DRC_FC; 00714 bool DRC_CALL_CONV mem_writeb_checked_drc(PhysPt address,Bit8u val) { 00715 HostPt tlb_addr=get_tlb_write(address); 00716 if (tlb_addr) { 00717 host_writeb(tlb_addr+address,val); 00718 return false; 00719 } else return get_tlb_writehandler(address)->writeb_checked(address,val); 00720 } 00721 00722 bool DRC_CALL_CONV mem_readw_checked_drc(PhysPt address) DRC_FC; 00723 bool DRC_CALL_CONV mem_readw_checked_drc(PhysPt address) { 00724 if ((address & 0xfff)<0xfff) { 00725 HostPt tlb_addr=get_tlb_read(address); 00726 if (tlb_addr) { 00727 *((Bit16u*)(&core_dynrec.readdata))=host_readw(tlb_addr+address); 00728 return false; 00729 } else return get_tlb_readhandler(address)->readw_checked(address, (Bit16u*)(&core_dynrec.readdata)); 00730 } else return mem_unalignedreadw_checked(address, ((Bit16u*)(&core_dynrec.readdata))); 00731 } 00732 00733 bool DRC_CALL_CONV mem_readd_checked_drc(PhysPt address) DRC_FC; 00734 bool DRC_CALL_CONV mem_readd_checked_drc(PhysPt address) { 00735 if ((address & 0xfff)<0xffd) { 00736 HostPt tlb_addr=get_tlb_read(address); 00737 if (tlb_addr) { 00738 *((Bit32u*)(&core_dynrec.readdata))=host_readd(tlb_addr+address); 00739 return false; 00740 } else return get_tlb_readhandler(address)->readd_checked(address, (Bit32u*)(&core_dynrec.readdata)); 00741 } else return mem_unalignedreadd_checked(address, ((Bit32u*)(&core_dynrec.readdata))); 00742 } 00743 00744 bool DRC_CALL_CONV mem_writew_checked_drc(PhysPt address,Bit16u val) DRC_FC; 00745 bool DRC_CALL_CONV mem_writew_checked_drc(PhysPt address,Bit16u val) { 00746 if ((address & 0xfff)<0xfff) { 00747 HostPt tlb_addr=get_tlb_write(address); 00748 if (tlb_addr) { 00749 host_writew(tlb_addr+address,val); 00750 return false; 00751 } else return get_tlb_writehandler(address)->writew_checked(address,val); 00752 } else return mem_unalignedwritew_checked(address,val); 00753 } 00754 00755 bool DRC_CALL_CONV mem_writed_checked_drc(PhysPt address,Bit32u val) DRC_FC; 00756 bool DRC_CALL_CONV mem_writed_checked_drc(PhysPt address,Bit32u val) { 00757 if ((address & 0xfff)<0xffd) { 00758 HostPt tlb_addr=get_tlb_write(address); 00759 if (tlb_addr) { 00760 host_writed(tlb_addr+address,val); 00761 return false; 00762 } else return get_tlb_writehandler(address)->writed_checked(address,val); 00763 } else return mem_unalignedwrited_checked(address,val); 00764 } 00765 00766 00767 // functions that enable access to the memory 00768 00769 // read a byte from a given address and store it in reg_dst 00770 static void dyn_read_byte(HostReg reg_addr,HostReg reg_dst) { 00771 gen_mov_regs(FC_OP1,reg_addr); 00772 gen_call_function_raw(mem_readb_checked_drc); 00773 dyn_check_exception(FC_RETOP); 00774 gen_mov_byte_to_reg_low(reg_dst,&core_dynrec.readdata); 00775 } 00776 static void dyn_read_byte_canuseword(HostReg reg_addr,HostReg reg_dst) { 00777 gen_mov_regs(FC_OP1,reg_addr); 00778 gen_call_function_raw(mem_readb_checked_drc); 00779 dyn_check_exception(FC_RETOP); 00780 gen_mov_byte_to_reg_low_canuseword(reg_dst,&core_dynrec.readdata); 00781 } 00782 00783 // write a byte from reg_val into the memory given by the address 00784 static void dyn_write_byte(HostReg reg_addr,HostReg reg_val) { 00785 gen_mov_regs(FC_OP2,reg_val); 00786 gen_mov_regs(FC_OP1,reg_addr); 00787 gen_call_function_raw(mem_writeb_checked_drc); 00788 dyn_check_exception(FC_RETOP); 00789 } 00790 00791 // read a 32bit (dword=true) or 16bit (dword=false) value 00792 // from a given address and store it in reg_dst 00793 static void dyn_read_word(HostReg reg_addr,HostReg reg_dst,bool dword) { 00794 gen_mov_regs(FC_OP1,reg_addr); 00795 if (dword) gen_call_function_raw(mem_readd_checked_drc); 00796 else gen_call_function_raw(mem_readw_checked_drc); 00797 dyn_check_exception(FC_RETOP); 00798 gen_mov_word_to_reg(reg_dst,&core_dynrec.readdata,dword); 00799 } 00800 00801 // write a 32bit (dword=true) or 16bit (dword=false) value 00802 // from reg_val into the memory given by the address 00803 static void dyn_write_word(HostReg reg_addr,HostReg reg_val,bool dword) { 00804 // if (!dword) gen_extend_word(false,reg_val); 00805 gen_mov_regs(FC_OP2,reg_val); 00806 gen_mov_regs(FC_OP1,reg_addr); 00807 if (dword) gen_call_function_raw(mem_writed_checked_drc); 00808 else gen_call_function_raw(mem_writew_checked_drc); 00809 dyn_check_exception(FC_RETOP); 00810 } 00811 00812 00813 00814 // effective address calculation helper, op2 has to be present! 00815 // loads op1 into ea_reg and adds the scaled op2 and the immediate to it 00816 static void dyn_lea_mem_mem(HostReg ea_reg,void* op1,void* op2,Bitu scale,Bits imm) { 00817 if (scale || imm) { 00818 if (op1!=NULL) { 00819 gen_mov_word_to_reg(ea_reg,op1,true); 00820 gen_mov_word_to_reg(TEMP_REG_DRC,op2,true); 00821 00822 gen_lea(ea_reg,TEMP_REG_DRC,scale,imm); 00823 } else { 00824 gen_mov_word_to_reg(ea_reg,op2,true); 00825 gen_lea(ea_reg,scale,imm); 00826 } 00827 } else { 00828 gen_mov_word_to_reg(ea_reg,op2,true); 00829 if (op1!=NULL) gen_add(ea_reg,op1); 00830 } 00831 } 00832 00833 #ifdef DRC_USE_REGS_ADDR 00834 // effective address calculation helper 00835 // loads op1 into ea_reg and adds the scaled op2 and the immediate to it 00836 // op1 is cpu_regs[op1_index], op2 is cpu_regs[op2_index] 00837 static void dyn_lea_regval_regval(HostReg ea_reg,Bitu op1_index,Bitu op2_index,Bitu scale,Bits imm) { 00838 if (scale || imm) { 00839 MOV_REG_VAL_TO_HOST_REG(ea_reg,op1_index); 00840 MOV_REG_VAL_TO_HOST_REG(TEMP_REG_DRC,op2_index); 00841 00842 gen_lea(ea_reg,TEMP_REG_DRC,scale,imm); 00843 } else { 00844 MOV_REG_VAL_TO_HOST_REG(ea_reg,op2_index); 00845 ADD_REG_VAL_TO_HOST_REG(ea_reg,op1_index); 00846 } 00847 } 00848 00849 // effective address calculation helper 00850 // loads op1 into ea_reg and adds the scaled op2 and the immediate to it 00851 // op2 is cpu_regs[op2_index] 00852 static void dyn_lea_mem_regval(HostReg ea_reg,void* op1,Bitu op2_index,Bitu scale,Bits imm) { 00853 if (scale || imm) { 00854 if (op1!=NULL) { 00855 gen_mov_word_to_reg(ea_reg,op1,true); 00856 MOV_REG_VAL_TO_HOST_REG(TEMP_REG_DRC,op2_index); 00857 00858 gen_lea(ea_reg,TEMP_REG_DRC,scale,imm); 00859 } else { 00860 MOV_REG_VAL_TO_HOST_REG(ea_reg,op2_index); 00861 gen_lea(ea_reg,scale,imm); 00862 } 00863 } else { 00864 MOV_REG_VAL_TO_HOST_REG(ea_reg,op2_index); 00865 if (op1!=NULL) gen_add(ea_reg,op1); 00866 } 00867 } 00868 #endif 00869 00870 #ifdef DRC_USE_SEGS_ADDR 00871 #ifdef DRC_USE_REGS_ADDR 00872 // effective address calculation helper 00873 // loads op1 into ea_reg and adds the scaled op2 and the immediate to it 00874 // op1 is Segs[op1_index], op2 is cpu_regs[op2_index] 00875 static void dyn_lea_segphys_regval(HostReg ea_reg,Bitu op1_index,Bitu op2_index,Bitu scale,Bits imm) { 00876 if (scale || imm) { 00877 MOV_SEG_PHYS_TO_HOST_REG(ea_reg,op1_index); 00878 MOV_REG_VAL_TO_HOST_REG(TEMP_REG_DRC,op2_index); 00879 00880 gen_lea(ea_reg,TEMP_REG_DRC,scale,imm); 00881 } else { 00882 MOV_REG_VAL_TO_HOST_REG(ea_reg,op2_index); 00883 ADD_SEG_PHYS_TO_HOST_REG(ea_reg,op1_index); 00884 } 00885 } 00886 00887 #else 00888 00889 // effective address calculation helper, op2 has to be present! 00890 // loads op1 into ea_reg and adds the scaled op2 and the immediate to it 00891 // op1 is Segs[op1_index] 00892 static void dyn_lea_segphys_mem(HostReg ea_reg,Bitu op1_index,void* op2,Bitu scale,Bits imm) { 00893 if (scale || imm) { 00894 MOV_SEG_PHYS_TO_HOST_REG(ea_reg,op1_index); 00895 gen_mov_word_to_reg(TEMP_REG_DRC,op2,true); 00896 00897 gen_lea(ea_reg,TEMP_REG_DRC,scale,imm); 00898 } else { 00899 gen_mov_word_to_reg(ea_reg,op2,true); 00900 ADD_SEG_PHYS_TO_HOST_REG(ea_reg,op1_index); 00901 } 00902 } 00903 #endif 00904 #endif 00905 00906 // calculate the effective address and store it in ea_reg 00907 static void dyn_fill_ea(HostReg ea_reg,bool addseg=true) { 00908 Bit8u seg_base=DRC_SEG_DS; 00909 if (!decode.big_addr) { 00910 Bits imm=0; 00911 switch (decode.modrm.mod) { 00912 case 0:imm=0;break; 00913 case 1:imm=(Bit8s)decode_fetchb();break; 00914 case 2:imm=(Bit16s)decode_fetchw();break; 00915 } 00916 switch (decode.modrm.rm) { 00917 case 0:// BX+SI 00918 DYN_LEA_REG_VAL_REG_VAL(ea_reg,DRC_REG_EBX,DRC_REG_ESI,0,imm); 00919 break; 00920 case 1:// BX+DI 00921 DYN_LEA_REG_VAL_REG_VAL(ea_reg,DRC_REG_EBX,DRC_REG_EDI,0,imm); 00922 break; 00923 case 2:// BP+SI 00924 DYN_LEA_REG_VAL_REG_VAL(ea_reg,DRC_REG_EBP,DRC_REG_ESI,0,imm); 00925 seg_base=DRC_SEG_SS; 00926 break; 00927 case 3:// BP+DI 00928 DYN_LEA_REG_VAL_REG_VAL(ea_reg,DRC_REG_EBP,DRC_REG_EDI,0,imm); 00929 seg_base=DRC_SEG_SS; 00930 break; 00931 case 4:// SI 00932 MOV_REG_VAL_TO_HOST_REG(ea_reg,DRC_REG_ESI); 00933 if (imm) gen_add_imm(ea_reg,(Bit32u)imm); 00934 break; 00935 case 5:// DI 00936 MOV_REG_VAL_TO_HOST_REG(ea_reg,DRC_REG_EDI); 00937 if (imm) gen_add_imm(ea_reg,(Bit32u)imm); 00938 break; 00939 case 6:// imm/BP 00940 if (!decode.modrm.mod) { 00941 imm=decode_fetchw(); 00942 gen_mov_dword_to_reg_imm(ea_reg,(Bit32u)imm); 00943 goto skip_extend_word; 00944 } else { 00945 MOV_REG_VAL_TO_HOST_REG(ea_reg,DRC_REG_EBP); 00946 gen_add_imm(ea_reg,(Bit32u)imm); 00947 seg_base=DRC_SEG_SS; 00948 } 00949 break; 00950 case 7: // BX 00951 MOV_REG_VAL_TO_HOST_REG(ea_reg,DRC_REG_EBX); 00952 if (imm) gen_add_imm(ea_reg,(Bit32u)imm); 00953 break; 00954 } 00955 // zero out the high 16bit so ea_reg can be used as full register 00956 gen_extend_word(false,ea_reg); 00957 skip_extend_word: 00958 if (addseg) { 00959 // add the physical segment value if requested 00960 ADD_SEG_PHYS_TO_HOST_REG(ea_reg,(decode.seg_prefix_used ? decode.seg_prefix : seg_base)); 00961 } 00962 } else { 00963 Bits imm=0; 00964 Bit8u base_reg=0; 00965 switch (decode.modrm.rm) { 00966 case 0:base_reg=DRC_REG_EAX;break; 00967 case 1:base_reg=DRC_REG_ECX;break; 00968 case 2:base_reg=DRC_REG_EDX;break; 00969 case 3:base_reg=DRC_REG_EBX;break; 00970 case 4: // SIB 00971 { 00972 Bitu sib=decode_fetchb(); 00973 bool scaled_reg_used=false; 00974 static Bit8u scaledtable[8]={ 00975 DRC_REG_EAX,DRC_REG_ECX,DRC_REG_EDX,DRC_REG_EBX, 00976 0,DRC_REG_EBP,DRC_REG_ESI,DRC_REG_EDI 00977 }; 00978 // see if scaling should be used and which register is to be scaled in this case 00979 if (((sib >> 3) &7)!=4) scaled_reg_used=true; 00980 Bit8u scaled_reg=scaledtable[(sib >> 3) &7]; 00981 Bitu scale=(sib >> 6); 00982 00983 switch (sib & 7) { 00984 case 0:base_reg=DRC_REG_EAX;break; 00985 case 1:base_reg=DRC_REG_ECX;break; 00986 case 2:base_reg=DRC_REG_EDX;break; 00987 case 3:base_reg=DRC_REG_EBX;break; 00988 case 4:base_reg=DRC_REG_ESP;seg_base=DRC_SEG_SS;break; 00989 case 5: 00990 if (decode.modrm.mod) { 00991 base_reg=DRC_REG_EBP;seg_base=DRC_SEG_SS; 00992 } else { 00993 // no basereg, maybe scalereg 00994 Bitu val; 00995 // try to get a pointer to the next dword code position 00996 if (decode_fetchd_imm(val)) { 00997 // succeeded, use the pointer to avoid code invalidation 00998 if (!addseg) { 00999 if (!scaled_reg_used) { 01000 gen_mov_word_to_reg(ea_reg,(void*)val,true); 01001 } else { 01002 DYN_LEA_MEM_REG_VAL(ea_reg,NULL,scaled_reg,scale,0); 01003 gen_add(ea_reg,(void*)val); 01004 } 01005 } else { 01006 if (!scaled_reg_used) { 01007 MOV_SEG_PHYS_TO_HOST_REG(ea_reg,(decode.seg_prefix_used ? decode.seg_prefix : seg_base)); 01008 } else { 01009 DYN_LEA_SEG_PHYS_REG_VAL(ea_reg,(decode.seg_prefix_used ? decode.seg_prefix : seg_base),scaled_reg,scale,0); 01010 } 01011 gen_add(ea_reg,(void*)val); 01012 } 01013 return; 01014 } 01015 // couldn't get a pointer, use the current value 01016 imm=(Bit32s)val; 01017 01018 if (!addseg) { 01019 if (!scaled_reg_used) { 01020 gen_mov_dword_to_reg_imm(ea_reg,(Bit32u)imm); 01021 } else { 01022 DYN_LEA_MEM_REG_VAL(ea_reg,NULL,scaled_reg,scale,imm); 01023 } 01024 } else { 01025 if (!scaled_reg_used) { 01026 MOV_SEG_PHYS_TO_HOST_REG(ea_reg,(decode.seg_prefix_used ? decode.seg_prefix : seg_base)); 01027 if (imm) gen_add_imm(ea_reg,(Bit32u)imm); 01028 } else { 01029 DYN_LEA_SEG_PHYS_REG_VAL(ea_reg,(decode.seg_prefix_used ? decode.seg_prefix : seg_base),scaled_reg,scale,imm); 01030 } 01031 } 01032 01033 return; 01034 } 01035 break; 01036 case 6:base_reg=DRC_REG_ESI;break; 01037 case 7:base_reg=DRC_REG_EDI;break; 01038 } 01039 // basereg, maybe scalereg 01040 switch (decode.modrm.mod) { 01041 case 1: 01042 imm=(Bit8s)decode_fetchb(); 01043 break; 01044 case 2: { 01045 Bitu val; 01046 // try to get a pointer to the next dword code position 01047 if (decode_fetchd_imm(val)) { 01048 // succeeded, use the pointer to avoid code invalidation 01049 if (!addseg) { 01050 if (!scaled_reg_used) { 01051 MOV_REG_VAL_TO_HOST_REG(ea_reg,base_reg); 01052 gen_add(ea_reg,(void*)val); 01053 } else { 01054 DYN_LEA_REG_VAL_REG_VAL(ea_reg,base_reg,scaled_reg,scale,0); 01055 gen_add(ea_reg,(void*)val); 01056 } 01057 } else { 01058 if (!scaled_reg_used) { 01059 MOV_SEG_PHYS_TO_HOST_REG(ea_reg,(decode.seg_prefix_used ? decode.seg_prefix : seg_base)); 01060 } else { 01061 DYN_LEA_SEG_PHYS_REG_VAL(ea_reg,(decode.seg_prefix_used ? decode.seg_prefix : seg_base),scaled_reg,scale,0); 01062 } 01063 ADD_REG_VAL_TO_HOST_REG(ea_reg,base_reg); 01064 gen_add(ea_reg,(void*)val); 01065 } 01066 return; 01067 } 01068 // couldn't get a pointer, use the current value 01069 imm=(Bit32s)val; 01070 break; 01071 } 01072 } 01073 01074 if (!addseg) { 01075 if (!scaled_reg_used) { 01076 MOV_REG_VAL_TO_HOST_REG(ea_reg,base_reg); 01077 gen_add_imm(ea_reg,(Bit32u)imm); 01078 } else { 01079 DYN_LEA_REG_VAL_REG_VAL(ea_reg,base_reg,scaled_reg,scale,imm); 01080 } 01081 } else { 01082 if (!scaled_reg_used) { 01083 MOV_SEG_PHYS_TO_HOST_REG(ea_reg,(decode.seg_prefix_used ? decode.seg_prefix : seg_base)); 01084 ADD_REG_VAL_TO_HOST_REG(ea_reg,base_reg); 01085 if (imm) gen_add_imm(ea_reg,(Bit32u)imm); 01086 } else { 01087 DYN_LEA_SEG_PHYS_REG_VAL(ea_reg,(decode.seg_prefix_used ? decode.seg_prefix : seg_base),scaled_reg,scale,imm); 01088 ADD_REG_VAL_TO_HOST_REG(ea_reg,base_reg); 01089 } 01090 } 01091 01092 return; 01093 } 01094 break; // SIB Break 01095 case 5: 01096 if (decode.modrm.mod) { 01097 base_reg=DRC_REG_EBP;seg_base=DRC_SEG_SS; 01098 } else { 01099 // no base, no scalereg 01100 01101 imm=(Bit32s)decode_fetchd(); 01102 if (!addseg) { 01103 gen_mov_dword_to_reg_imm(ea_reg,(Bit32u)imm); 01104 } else { 01105 MOV_SEG_PHYS_TO_HOST_REG(ea_reg,(decode.seg_prefix_used ? decode.seg_prefix : seg_base)); 01106 if (imm) gen_add_imm(ea_reg,(Bit32u)imm); 01107 } 01108 01109 return; 01110 } 01111 break; 01112 case 6:base_reg=DRC_REG_ESI;break; 01113 case 7:base_reg=DRC_REG_EDI;break; 01114 } 01115 01116 // no scalereg, but basereg 01117 01118 switch (decode.modrm.mod) { 01119 case 1: 01120 imm=(Bit8s)decode_fetchb(); 01121 break; 01122 case 2: { 01123 Bitu val; 01124 // try to get a pointer to the next dword code position 01125 if (decode_fetchd_imm(val)) { 01126 // succeeded, use the pointer to avoid code invalidation 01127 if (!addseg) { 01128 MOV_REG_VAL_TO_HOST_REG(ea_reg,base_reg); 01129 gen_add(ea_reg,(void*)val); 01130 } else { 01131 MOV_SEG_PHYS_TO_HOST_REG(ea_reg,(decode.seg_prefix_used ? decode.seg_prefix : seg_base)); 01132 ADD_REG_VAL_TO_HOST_REG(ea_reg,base_reg); 01133 gen_add(ea_reg,(void*)val); 01134 } 01135 return; 01136 } 01137 // couldn't get a pointer, use the current value 01138 imm=(Bit32s)val; 01139 break; 01140 } 01141 } 01142 01143 if (!addseg) { 01144 MOV_REG_VAL_TO_HOST_REG(ea_reg,base_reg); 01145 if (imm) gen_add_imm(ea_reg,(Bit32u)imm); 01146 } else { 01147 MOV_SEG_PHYS_TO_HOST_REG(ea_reg,(decode.seg_prefix_used ? decode.seg_prefix : seg_base)); 01148 ADD_REG_VAL_TO_HOST_REG(ea_reg,base_reg); 01149 if (imm) gen_add_imm(ea_reg,(Bit32u)imm); 01150 } 01151 } 01152 } 01153 01154 01155 01156 // add code that checks if port access is allowed 01157 // the port is given in a register 01158 static void dyn_add_iocheck(HostReg reg_port,Bitu access_size) { 01159 if (cpu.pmode) { 01160 gen_call_function_RI(CPU_IO_Exception,reg_port,access_size); 01161 dyn_check_exception(FC_RETOP); 01162 } 01163 } 01164 01165 // add code that checks if port access is allowed 01166 // the port is a constant 01167 static void dyn_add_iocheck_var(Bit8u accessed_port,Bitu access_size) { 01168 if (cpu.pmode) { 01169 gen_call_function_II(CPU_IO_Exception,accessed_port,access_size); 01170 dyn_check_exception(FC_RETOP); 01171 } 01172 } 01173 01174 01175 01176 // save back the address register 01177 static void gen_protect_addr_reg(void) { 01178 #ifdef DRC_PROTECT_ADDR_REG 01179 gen_mov_word_from_reg(FC_ADDR,&core_dynrec.protected_regs[FC_ADDR],true); 01180 #endif 01181 } 01182 01183 // restore the address register 01184 static void gen_restore_addr_reg(void) { 01185 #ifdef DRC_PROTECT_ADDR_REG 01186 gen_mov_word_to_reg(FC_ADDR,&core_dynrec.protected_regs[FC_ADDR],true); 01187 #endif 01188 } 01189 01190 // save back an arbitrary register 01191 static void gen_protect_reg(HostReg reg) { 01192 gen_mov_word_from_reg(reg,&core_dynrec.protected_regs[reg],true); 01193 } 01194 01195 // restore an arbitrary register 01196 static void gen_restore_reg(HostReg reg) { 01197 gen_mov_word_to_reg(reg,&core_dynrec.protected_regs[reg],true); 01198 } 01199 01200 // restore an arbitrary register into a different register 01201 static void gen_restore_reg(HostReg reg,HostReg dest_reg) { 01202 gen_mov_word_to_reg(dest_reg,&core_dynrec.protected_regs[reg],true); 01203 } 01204 01205 01206 01207 // flags optimization functions 01208 // they try to find out if a function can be replaced by another 01209 // one that does not generate any flags at all 01210 01211 static Bitu mf_functions_num=0; 01212 static struct { 01213 Bit8u* pos; 01214 void* fct_ptr; 01215 Bitu ftype; 01216 } mf_functions[64]; 01217 01218 static void InitFlagsOptimization(void) { 01219 mf_functions_num=0; 01220 } 01221 01222 // replace all queued functions with their simpler variants 01223 // because the current instruction destroys all condition flags and 01224 // the flags are not required before 01225 static void InvalidateFlags(void) { 01226 #ifdef DRC_FLAGS_INVALIDATION 01227 for (Bitu ct=0; ct<mf_functions_num; ct++) { 01228 gen_fill_function_ptr(mf_functions[ct].pos,mf_functions[ct].fct_ptr,mf_functions[ct].ftype); 01229 } 01230 mf_functions_num=0; 01231 #endif 01232 } 01233 01234 // replace all queued functions with their simpler variants 01235 // because the current instruction destroys all condition flags and 01236 // the flags are not required before 01237 template <typename T> static void InvalidateFlags(const T current_simple_function,Bitu flags_type) { 01238 #ifdef DRC_FLAGS_INVALIDATION 01239 for (Bitu ct=0; ct<mf_functions_num; ct++) { 01240 gen_fill_function_ptr(mf_functions[ct].pos,mf_functions[ct].fct_ptr,mf_functions[ct].ftype); 01241 } 01242 mf_functions_num=1; 01243 mf_functions[0].pos=cache.pos; 01244 mf_functions[0].fct_ptr=reinterpret_cast<void*>((uintptr_t)current_simple_function); 01245 mf_functions[0].ftype=flags_type; 01246 #endif 01247 } 01248 01249 // enqueue this instruction, if later an instruction is encountered that 01250 // destroys all condition flags and the flags weren't needed in-between 01251 // this function can be replaced by a simpler one as well 01252 template <typename T> static void InvalidateFlagsPartially(const T current_simple_function,Bitu flags_type) { 01253 #ifdef DRC_FLAGS_INVALIDATION 01254 mf_functions[mf_functions_num].pos=cache.pos; 01255 mf_functions[mf_functions_num].fct_ptr=reinterpret_cast<void*>((uintptr_t)current_simple_function); 01256 mf_functions[mf_functions_num].ftype=flags_type; 01257 mf_functions_num++; 01258 #endif 01259 } 01260 01261 // enqueue this instruction, if later an instruction is encountered that 01262 // destroys all condition flags and the flags weren't needed in-between 01263 // this function can be replaced by a simpler one as well 01264 template <typename T> static void InvalidateFlagsPartially(const T current_simple_function,DRC_PTR_SIZE_IM cpos,Bitu flags_type) { 01265 #ifdef DRC_FLAGS_INVALIDATION 01266 mf_functions[mf_functions_num].pos=(Bit8u*)cpos; 01267 mf_functions[mf_functions_num].fct_ptr=reinterpret_cast<void*>((uintptr_t)current_simple_function); 01268 mf_functions[mf_functions_num].ftype=flags_type; 01269 mf_functions_num++; 01270 #endif 01271 } 01272 01273 // the current function needs the condition flags thus reset the queue 01274 static void AcquireFlags(Bitu flags_mask) { 01275 (void)flags_mask; 01276 01277 #ifdef DRC_FLAGS_INVALIDATION 01278 mf_functions_num=0; 01279 #endif 01280 }