DOSBox-X
|
00001 /* 00002 * Copyright (C) 2002-2020 The DOSBox Team 00003 * 00004 * This program is free software; you can redistribute it and/or modify 00005 * it under the terms of the GNU General Public License as published by 00006 * the Free Software Foundation; either version 2 of the License, or 00007 * (at your option) any later version. 00008 * 00009 * This program is distributed in the hope that it will be useful, 00010 * but WITHOUT ANY WARRANTY; without even the implied warranty of 00011 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 00012 * GNU General Public License for more details. 00013 * 00014 * You should have received a copy of the GNU General Public License along 00015 * with this program; if not, write to the Free Software Foundation, Inc., 00016 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 00017 */ 00018 00019 00020 #include <assert.h> 00021 #include <sstream> 00022 #include <stddef.h> 00023 #include "dosbox.h" 00024 #include "cpu.h" 00025 #include "memory.h" 00026 #include "debug.h" 00027 #include "mapper.h" 00028 #include "setup.h" 00029 #include "programs.h" 00030 #include "paging.h" 00031 #include "callback.h" 00032 #include "lazyflags.h" 00033 #include "support.h" 00034 #include "control.h" 00035 #include "zipfile.h" 00036 00037 #if defined(_MSC_VER) 00038 /* we don't care about switch statements with no case labels */ 00039 #pragma warning(disable:4065) 00040 #endif 00041 00042 /* caution: do not uncomment unless you want a lot of spew */ 00043 //#define CPU_DEBUG_SPEW 00044 00045 #if defined(CPU_DEBUG_SPEW) 00046 # define _LOG LOG 00047 #else 00048 class _LOG : public LOG { // HACK 00049 public: 00050 _LOG(LOG_TYPES type , LOG_SEVERITIES severity) : LOG(type,severity) { } 00051 }; 00052 # undef LOG 00053 # define LOG(X,Y) CPU_LOG 00054 # define CPU_LOG(...) 00055 # endif 00056 00057 bool enable_weitek = false; 00058 00059 bool CPU_NMI_gate = true; 00060 bool CPU_NMI_active = false; 00061 bool CPU_NMI_pending = false; 00062 bool do_seg_limits = false; 00063 00064 bool enable_fpu = true; 00065 bool enable_msr = true; 00066 bool enable_cmpxchg8b = true; 00067 bool ignore_undefined_msr = true; 00068 00069 extern bool ignore_opcode_63; 00070 00071 extern bool use_dynamic_core_with_paging; 00072 00073 bool cpu_double_fault_enable; 00074 bool cpu_triple_fault_reset; 00075 00076 int cpu_rep_max = 0; 00077 00078 Bitu DEBUG_EnableDebugger(void); 00079 extern void GFX_SetTitle(Bit32s cycles, Bits frameskip, Bits timing, bool paused); 00080 00081 CPU_Regs cpu_regs; 00082 CPUBlock cpu; 00083 Segments Segs; 00084 00085 /* [cpu] setting realbig16. 00086 * If set, allow code to switch back to real mode with the B (big) set in the 00087 * code selector, and retain the state of the B bit while running in 16-bit 00088 * real mode. Needed for demos like Project Angel. 00089 * 00090 * Modifications are a derivative of this patch: 00091 * 00092 * cpu.diff from http://www.vogons.org/viewtopic.php?f=33&t=28226&start=5 00093 * 00094 * The main difference between that patch and the modifications derived is that 00095 * I modified additional points to keep the big bit set (the original cpu.diff 00096 * missed the CALL and JMP emulation that also reset the flag) 00097 * 00098 * It's well known that DOS programs can access all 4GB of addressable memory by 00099 * jumping into protected mode, loading segment registers with a 4GB limit, then 00100 * jumping back to real mode without reloading the segment registers, knowing 00101 * that Intel processors will not update the shadow part of the segment register 00102 * in real mode. I'm guessing that what Project Angel is doing, is using the same 00103 * abuse of protected mode to also set the B (big) bit in the code segment so that 00104 * it's code segment can extend past 64KB (huge unreal mode), which works until 00105 * something like an interrupt chops off the top 16 bits of the instruction pointer. 00106 * 00107 * I want to clarify that realbig16 is an OPTION that is off by default, because 00108 * I am uncertain at this time whether or not the patch breaks any DOS games or 00109 * OS emulation. It is rare for a DOS game or demo to actually abuse the CPU in 00110 * that way, so it is set up that you have to enable it if you need it. --J.C. 00111 * 00112 * J.C. TODO: Write a program that abuses the B (big) bit in real mode in the same 00113 * way that Project Angel supposedly does, see if it works, then test it 00114 * and Project Angel on some old 386/486/Pentium systems lying around to 00115 * see how compatible such abuse is with PC hardware. That would make a 00116 * good Hackipedia.org episode as well. --J.C. 00117 * 00118 * 2014/01/19: I can attest that this patch does indeed allow Project Angel to 00119 * run when realbig16=true. And if GUS emulation is active, there is 00120 * music as well. Now as for reliability... testing shows that one of 00121 * three things can happen when you run the demo: 00122 * 00123 * 1) the demo hangs on startup, either right away or after it starts 00124 * the ominous music (if you sit for 30 seconds waiting for the 00125 * music to build up and nothing happens, consider closing the 00126 * emulator and trying again). 00127 * 00128 * 2) the demo runs perfectly fine, but timing is slightly screwed up, 00129 * and parts of the music sound badly out of sync with each other, 00130 * or randomly slows to about 1/2 speed in some sections, animations 00131 * run slow sometimes. If this happens, make sure you didn't set 00132 * forcerate=ntsc. 00133 * 00134 * 3) the demo runs perfectly fine, with no timing issues, except that 00135 * DOSBox's S3 emulation is not quite on-time and the bottom 1/4th 00136 * of the screen flickers with the contents of the next frame that 00137 * the demo is still drawing :( 00138 * 00139 * --J.C. */ 00140 bool cpu_allow_big16 = false; 00141 00142 cpu_cycles_count_t CPU_Cycles = 0; 00143 cpu_cycles_count_t CPU_CycleLeft = 3000; 00144 cpu_cycles_count_t CPU_CycleMax = 3000; 00145 cpu_cycles_count_t CPU_OldCycleMax = 3000; 00146 cpu_cycles_count_t CPU_CyclePercUsed = 100; 00147 cpu_cycles_count_t CPU_CycleLimit = -1; 00148 cpu_cycles_count_t CPU_CycleUp = 0; 00149 cpu_cycles_count_t CPU_CycleDown = 0; 00150 cpu_cycles_count_t CPU_CyclesSet = 3000; 00151 cpu_cycles_count_t CPU_IODelayRemoved = 0; 00152 char core_mode[16]; 00153 CPU_Decoder * cpudecoder; 00154 bool CPU_CycleAutoAdjust = false; 00155 bool CPU_SkipCycleAutoAdjust = false; 00156 unsigned char CPU_AutoDetermineMode = 0; 00157 00158 unsigned char CPU_ArchitectureType = CPU_ARCHTYPE_MIXED; 00159 00160 Bitu CPU_extflags_toggle=0; // ID and AC flags may be toggled depending on emulated CPU architecture 00161 00162 unsigned int CPU_PrefetchQueueSize=0; 00163 00164 void CPU_Core_Normal_Init(void); 00165 #if !defined(C_EMSCRIPTEN) 00166 void CPU_Core_Simple_Init(void); 00167 void CPU_Core_Full_Init(void); 00168 #endif 00169 #if (C_DYNAMIC_X86) 00170 void CPU_Core_Dyn_X86_Init(void); 00171 void CPU_Core_Dyn_X86_Cache_Init(bool enable_cache); 00172 void CPU_Core_Dyn_X86_Cache_Close(void); 00173 void CPU_Core_Dyn_X86_Cache_Reset(void); 00174 void CPU_Core_Dyn_X86_SetFPUMode(bool dh_fpu); 00175 void CPU_Core_Dyn_X86_Cache_Reset(void); 00176 #elif (C_DYNREC) 00177 void CPU_Core_Dynrec_Init(void); 00178 void CPU_Core_Dynrec_Cache_Init(bool enable_cache); 00179 void CPU_Core_Dynrec_Cache_Close(void); 00180 void CPU_Core_Dynrec_Cache_Reset(void); 00181 #endif 00182 00183 bool CPU_IsDynamicCore(void); 00184 00185 void menu_update_cputype(void) { 00186 bool allow_prefetch = false; 00187 bool allow_pre386 = false; 00188 00189 if (!CPU_IsDynamicCore()) { 00190 allow_prefetch = true; 00191 allow_pre386 = true; 00192 if ((cpudecoder == &CPU_Core_Full_Run) || 00193 (cpudecoder == &CPU_Core_Simple_Run)) { 00194 allow_prefetch = false; 00195 allow_pre386 = false; 00196 } 00197 } 00198 00199 mainMenu.get_item("cputype_auto"). 00200 check(CPU_ArchitectureType == CPU_ARCHTYPE_MIXED). 00201 refresh_item(mainMenu); 00202 mainMenu.get_item("cputype_8086"). 00203 check(CPU_ArchitectureType == CPU_ARCHTYPE_8086 && (cpudecoder != &CPU_Core8086_Prefetch_Run)). 00204 enable(allow_pre386). 00205 refresh_item(mainMenu); 00206 mainMenu.get_item("cputype_8086_prefetch"). 00207 check(CPU_ArchitectureType == CPU_ARCHTYPE_8086 && (cpudecoder == &CPU_Core8086_Prefetch_Run)). 00208 enable(allow_prefetch && allow_pre386). 00209 refresh_item(mainMenu); 00210 mainMenu.get_item("cputype_80186"). 00211 check(CPU_ArchitectureType == CPU_ARCHTYPE_80186 && (cpudecoder != &CPU_Core286_Prefetch_Run)). 00212 enable(allow_pre386). 00213 refresh_item(mainMenu); 00214 mainMenu.get_item("cputype_80186_prefetch"). 00215 check(CPU_ArchitectureType == CPU_ARCHTYPE_80186 && (cpudecoder == &CPU_Core286_Prefetch_Run)). 00216 enable(allow_prefetch && allow_pre386). 00217 refresh_item(mainMenu); 00218 mainMenu.get_item("cputype_286"). 00219 check(CPU_ArchitectureType == CPU_ARCHTYPE_286 && (cpudecoder != &CPU_Core286_Prefetch_Run)). 00220 enable(allow_pre386). 00221 refresh_item(mainMenu); 00222 mainMenu.get_item("cputype_286_prefetch"). 00223 check(CPU_ArchitectureType == CPU_ARCHTYPE_286 && (cpudecoder == &CPU_Core286_Prefetch_Run)). 00224 enable(allow_prefetch && allow_pre386). 00225 refresh_item(mainMenu); 00226 mainMenu.get_item("cputype_386"). 00227 check(CPU_ArchitectureType == CPU_ARCHTYPE_386 && (cpudecoder != &CPU_Core_Prefetch_Run)). 00228 refresh_item(mainMenu); 00229 mainMenu.get_item("cputype_386_prefetch"). 00230 check(CPU_ArchitectureType == CPU_ARCHTYPE_386 && (cpudecoder == &CPU_Core_Prefetch_Run)). 00231 enable(allow_prefetch). 00232 refresh_item(mainMenu); 00233 mainMenu.get_item("cputype_486old"). 00234 check(CPU_ArchitectureType == CPU_ARCHTYPE_486OLD && (cpudecoder != &CPU_Core_Prefetch_Run)). 00235 refresh_item(mainMenu); 00236 mainMenu.get_item("cputype_486old_prefetch"). 00237 check(CPU_ArchitectureType == CPU_ARCHTYPE_486OLD && (cpudecoder == &CPU_Core_Prefetch_Run)). 00238 enable(allow_prefetch). 00239 refresh_item(mainMenu); 00240 mainMenu.get_item("cputype_486"). 00241 check(CPU_ArchitectureType == CPU_ARCHTYPE_486NEW && (cpudecoder != &CPU_Core_Prefetch_Run)). 00242 refresh_item(mainMenu); 00243 mainMenu.get_item("cputype_486_prefetch"). 00244 check(CPU_ArchitectureType == CPU_ARCHTYPE_486NEW && (cpudecoder == &CPU_Core_Prefetch_Run)). 00245 enable(allow_prefetch). 00246 refresh_item(mainMenu); 00247 mainMenu.get_item("cputype_pentium"). 00248 check(CPU_ArchitectureType == CPU_ARCHTYPE_PENTIUM). 00249 refresh_item(mainMenu); 00250 #if C_FPU 00251 mainMenu.get_item("cputype_pentium_mmx"). 00252 check(CPU_ArchitectureType == CPU_ARCHTYPE_PMMXSLOW). 00253 refresh_item(mainMenu); 00254 #else 00255 mainMenu.get_item("cputype_pentium_mmx"). 00256 check(false). 00257 enable(false). 00258 refresh_item(mainMenu); 00259 #endif 00260 mainMenu.get_item("cputype_ppro_slow"). 00261 check(CPU_ArchitectureType == CPU_ARCHTYPE_PPROSLOW). 00262 refresh_item(mainMenu); 00263 } 00264 00265 void menu_update_core(void) { 00266 const Section_prop * cpu_section = static_cast<Section_prop *>(control->GetSection("cpu")); 00267 const std::string cpu_sec_type = cpu_section->Get_string("cputype"); 00268 bool allow_dynamic = false; 00269 00270 (void)cpu_section; 00271 (void)cpu_sec_type; 00272 (void)allow_dynamic; 00273 00274 /* cannot select Dynamic core if prefetch cpu types are in use */ 00275 allow_dynamic = (strstr(cpu_sec_type.c_str(),"_prefetch") == NULL); 00276 00277 mainMenu.get_item("mapper_normal"). 00278 check(cpudecoder == &CPU_Core_Normal_Run || 00279 cpudecoder == &CPU_Core286_Normal_Run || 00280 cpudecoder == &CPU_Core8086_Normal_Run || 00281 cpudecoder == &CPU_Core_Prefetch_Run || 00282 cpudecoder == &CPU_Core286_Prefetch_Run || 00283 cpudecoder == &CPU_Core8086_Prefetch_Run). 00284 refresh_item(mainMenu); 00285 #if !defined(C_EMSCRIPTEN)//FIXME: Shutdown causes problems with Emscripten 00286 mainMenu.get_item("mapper_simple"). 00287 check(cpudecoder == &CPU_Core_Simple_Run). 00288 enable((cpudecoder != &CPU_Core_Prefetch_Run) && 00289 (cpudecoder != &CPU_Core286_Prefetch_Run) && 00290 (cpudecoder != &CPU_Core8086_Prefetch_Run) && 00291 (cpudecoder != &CPU_Core286_Normal_Run) && 00292 (cpudecoder != &CPU_Core8086_Normal_Run)). 00293 refresh_item(mainMenu); 00294 mainMenu.get_item("mapper_full"). 00295 check(cpudecoder == &CPU_Core_Full_Run). 00296 enable((cpudecoder != &CPU_Core_Prefetch_Run) && 00297 (cpudecoder != &CPU_Core286_Prefetch_Run) && 00298 (cpudecoder != &CPU_Core8086_Prefetch_Run) && 00299 (cpudecoder != &CPU_Core286_Normal_Run) && 00300 (cpudecoder != &CPU_Core8086_Normal_Run)). 00301 refresh_item(mainMenu); 00302 #endif 00303 #if (C_DYNAMIC_X86) 00304 mainMenu.get_item("mapper_dynamic"). 00305 check(cpudecoder == &CPU_Core_Dyn_X86_Run). 00306 enable(allow_dynamic && 00307 (cpudecoder != &CPU_Core_Prefetch_Run) && 00308 (cpudecoder != &CPU_Core286_Prefetch_Run) && 00309 (cpudecoder != &CPU_Core8086_Prefetch_Run) && 00310 (cpudecoder != &CPU_Core286_Normal_Run) && 00311 (cpudecoder != &CPU_Core8086_Normal_Run)). 00312 refresh_item(mainMenu); 00313 #endif 00314 #if (C_DYNREC) 00315 mainMenu.get_item("mapper_dynamic"). 00316 check(cpudecoder == &CPU_Core_Dynrec_Run). 00317 enable(allow_dynamic && 00318 (cpudecoder != &CPU_Core_Prefetch_Run) && 00319 (cpudecoder != &CPU_Core286_Prefetch_Run) && 00320 (cpudecoder != &CPU_Core8086_Prefetch_Run) && 00321 (cpudecoder != &CPU_Core286_Normal_Run) && 00322 (cpudecoder != &CPU_Core8086_Normal_Run)). 00323 refresh_item(mainMenu); 00324 #endif 00325 } 00326 00327 void menu_update_autocycle(void) { 00328 DOSBoxMenu::item &item = mainMenu.get_item("mapper_cycauto"); 00329 if (CPU_CycleAutoAdjust) 00330 item.set_text("Auto cycles [max]"); 00331 else if (CPU_AutoDetermineMode&CPU_AUTODETERMINE_CYCLES) 00332 item.set_text("Auto cycles [auto]"); 00333 else 00334 item.set_text("Auto cycles [off]"); 00335 00336 item.check(CPU_CycleAutoAdjust || (CPU_AutoDetermineMode&CPU_AUTODETERMINE_CYCLES)); 00337 item.refresh_item(mainMenu); 00338 } 00339 00340 /* called to signal an NMI. */ 00341 00342 /* NTS: From the Intel 80386 programmer's reference manual: 00343 * 00344 * " 00345 * 9.2.1 NMI Masks Further NMIs 00346 * 00347 * While an NMI handler is executing, the processor ignores further interrupt 00348 * signals at the NMI pin until the next IRET instruction is executed. 00349 * " 00350 * 00351 * This is why, further down, CPU_IRET() clears the CPU_NMI_active flag. 00352 * 00353 * 00354 * And, in response to my father's incredulous response regarding the fact that 00355 * NMI is edge-triggered (from the Intel 386SX Microprocessor datasheet): 00356 * 00357 * " 00358 * Non-Maskable Interrupt Request (NMI)) 00359 * 00360 * This input indicates a request for interrupt service 00361 * which cannot be masked by software. The non- 00362 * maskable interrupt request is always processed ac- 00363 * cording to the pointer or gate in slot 2 of the interrupt 00364 * table. Because of the fixed NMI slot assignment, no 00365 * interrupt acknowledge cycles are performed when 00366 * processing NMI. 00367 * 00368 * NMI is an active HIGH, rising edge-sensitive asyn- 00369 * chronous signal. Setup and hold times, t27 and and t28, 00370 * relative to the CLK2 signal must be met to guarantee 00371 * recognition at a particular clock edge. To assure rec- 00372 * ognition of NMI, it must be inactive for at least eight 00373 * CLK2 periods, and then be active for at least eight 00374 * CLK2 periods before the beginning of the instruction 00375 * boundary in the Intel386 SX Microprocessor's Exe- 00376 * cution Unit. 00377 * 00378 * Once NMI processing has begun, no additional 00379 * NMI's are processed until after the next IRET in- 00380 * struction, which is typically the end of the NMI serv- 00381 * ice routine. If NMI is re-asserted prior to that time, 00382 * however, one rising edge on NMI will be remem- 00383 * bered for processing after executing the next IRET 00384 * instruction 00385 * " 00386 * 00387 * From the Pentium Pro Processor datasheet: 00388 * 00389 * " 00390 * A.38 NMI (I) 00391 * 00392 * The NMI signal is the Non-maskable Interrupt signal. 00393 * It is the state of the LINT1 signal when APIC is 00394 * disabled. Asserting NMI causes an interrupt with an 00395 * internally supplied vector value of 2. An external 00396 * interrupt-acknowledge transaction is not generated. If 00397 * NMI is asserted during the execution of an NMI 00398 * service routine, it remains pending and is recognized 00399 * after the IRET is executed by the NMI service 00400 * routine. At most, one assertion of NMI is held 00401 * pending. 00402 * 00403 * NMI is rising-edge sensitive. Recognition of NMI is 00404 * guaranteed in a specific clock if it is asserted 00405 * synchronously and meets the setup and hold times. If 00406 * asserted asynchronously, active and inactive pulse 00407 * widths must be a minimum of two clocks. In FRC 00408 * mode, NMI must be synchronous to BCLK. 00409 * " 00410 * 00411 * Similar references exist in the Pentium III and Pentium 4 00412 * datasheets, while later on in the Core 2 datasheets there 00413 * is no mention whatsoever to the NMI that I can find. 00414 */ 00415 void CPU_NMI_Interrupt() { 00416 /* WARNING: Do not call while running inside a CPU core loop */ 00417 if (CPU_NMI_active) E_Exit("CPU_NMI_Interrupt() called while NMI already active"); 00418 CPU_NMI_active = true; 00419 CPU_NMI_pending = false; 00420 CPU_Interrupt(2/*INT 2 = NMI*/,0,reg_eip); 00421 } 00422 00423 void CPU_Raise_NMI() { 00424 CPU_NMI_pending = true; 00425 CPU_Check_NMI(); 00426 } 00427 00428 extern Bitu PIC_IRQCheck; 00429 00430 void CPU_Check_NMI() { 00431 if (!CPU_NMI_active && CPU_NMI_gate && CPU_NMI_pending) { 00432 /* STOP THE CPU CORE. 00433 * reg_eip is not valid until the CPU core has left the runtime loop. */ 00434 if (CPU_Cycles > 1) { 00435 CPU_CycleLeft += CPU_Cycles; 00436 CPU_Cycles = 1; 00437 } 00438 00439 PIC_IRQCheck = true; 00440 } 00441 } 00442 00443 /* In debug mode exceptions are tested and dosbox exits when 00444 * a unhandled exception state is detected. 00445 * USE CHECK_EXCEPT to raise an exception in that case to see if that exception 00446 * solves the problem. 00447 * 00448 * In non-debug mode dosbox doesn't do detection (and hence doesn't crash at 00449 * that point). (game might crash later due to the unhandled exception) */ 00450 00451 #define CPU_CHECK_EXCEPT 1 00452 // #define CPU_CHECK_IGNORE 1 00453 00454 #if C_DEBUG 00455 // #define CPU_CHECK_EXCEPT 1 00456 // #define CPU_CHECK_IGNORE 1 00457 /* Use CHECK_EXCEPT when something doesn't work to see if a exception is 00458 * needed that isn't enabled by default.*/ 00459 #else 00460 /* NORMAL NO CHECKING => More Speed */ 00461 //#define CPU_CHECK_IGNORE 1 00462 #endif /* C_DEBUG */ 00463 00464 #if defined(CPU_CHECK_IGNORE) 00465 #define CPU_CHECK_COND(cond,msg,exc,sel) { \ 00466 if (cond) do {} while (0); \ 00467 } 00468 #elif defined(CPU_CHECK_EXCEPT) 00469 #define CPU_CHECK_COND(cond,msg,exc,sel) { \ 00470 if (cond) { \ 00471 CPU_Exception(exc,sel); \ 00472 return; \ 00473 } \ 00474 } 00475 #else 00476 #define CPU_CHECK_COND(cond,msg,exc,sel) { \ 00477 if (cond) E_Exit(msg); \ 00478 } 00479 #endif 00480 00481 00482 void Descriptor::Load(PhysPt address) { 00483 cpu.mpl=0; 00484 Bit32u* data = (Bit32u*)&saved; 00485 *data = mem_readd(address); 00486 *(data+1) = mem_readd(address+4); 00487 cpu.mpl=3; 00488 } 00489 void Descriptor:: Save(PhysPt address) { 00490 cpu.mpl=0; 00491 const Bit32u* data = (Bit32u*)&saved; 00492 mem_writed(address,*data); 00493 mem_writed(address+4,*(data+1)); 00494 cpu.mpl=3; 00495 } 00496 00497 00498 void CPU_Push16(Bit16u value) { 00499 Bit32u new_esp=(reg_esp&cpu.stack.notmask)|((reg_esp-2)&cpu.stack.mask); 00500 mem_writew(SegPhys(ss) + (new_esp & cpu.stack.mask) ,value); 00501 reg_esp=new_esp; 00502 } 00503 00504 void CPU_Push32(Bit32u value) { 00505 Bit32u new_esp=(reg_esp&cpu.stack.notmask)|((reg_esp-4)&cpu.stack.mask); 00506 mem_writed(SegPhys(ss) + (new_esp & cpu.stack.mask) ,value); 00507 reg_esp=new_esp; 00508 } 00509 00510 Bit16u CPU_Pop16(void) { 00511 Bit16u val=mem_readw(SegPhys(ss) + (reg_esp & cpu.stack.mask)); 00512 reg_esp=(reg_esp&cpu.stack.notmask)|((reg_esp+2)&cpu.stack.mask); 00513 return val; 00514 } 00515 00516 Bit32u CPU_Pop32(void) { 00517 Bit32u val=mem_readd(SegPhys(ss) + (reg_esp & cpu.stack.mask)); 00518 reg_esp=(reg_esp&cpu.stack.notmask)|((reg_esp+4)&cpu.stack.mask); 00519 return val; 00520 } 00521 00522 PhysPt SelBase(Bitu sel) { 00523 if (cpu.cr0 & CR0_PROTECTION) { 00524 Descriptor desc; 00525 cpu.gdt.GetDescriptor(sel,desc); 00526 return desc.GetBase(); 00527 } else { 00528 return (PhysPt)(sel<<4); 00529 } 00530 } 00531 00532 void CPU_SetCPL(Bitu newcpl) { 00533 if (newcpl != cpu.cpl) { 00534 if (paging.enabled) { 00535 if ( ((cpu.cpl < 3) && (newcpl == 3)) || ((cpu.cpl == 3) && (newcpl < 3)) ) 00536 PAGING_SwitchCPL(newcpl == 3); 00537 } 00538 cpu.cpl = newcpl; 00539 } 00540 } 00541 00542 void CPU_SetFlags(Bitu word,Bitu mask) { 00543 /* 8086/286 flags manipulation. 00544 * For more information read about the Intel CPU detection algorithm and other bits of info: 00545 * [http://www.rcollins.org/ddj/Sep96/Sep96.html] */ 00546 00547 /* 8086: bits 12-15 cannot be zeroed */ 00548 if (CPU_ArchitectureType <= CPU_ARCHTYPE_80186) { 00549 /* update mask and word to ensure bits 12-15 are set */ 00550 word |= 0xF000U; 00551 mask |= 0xF000U; 00552 } 00553 /* 286 real mode: bits 12-15 bits cannot be set, always zero */ 00554 else if (CPU_ArchitectureType <= CPU_ARCHTYPE_286) { 00555 if (!(cpu.cr0 & CR0_PROTECTION)) { 00556 /* update mask and word to ensure bits 12-15 are zero */ 00557 word &= ~0xF000U; 00558 mask |= 0xF000U; 00559 } 00560 } 00561 else { 00562 mask |= CPU_extflags_toggle; // ID-flag and AC-flag can be toggled on CPUID-supporting CPUs 00563 } 00564 00565 reg_flags=(reg_flags & ~mask)|(word & mask)|2U; 00566 cpu.direction=1 - (int)((reg_flags & FLAG_DF) >> 9U); 00567 // ^ NTS: Notice the DF flag is bit 10. This code computes (reg_flags & FLAG_DF) >> 9 on purpose. 00568 // It's not a typo (9 vs 10), it's done to set cpu.direction to either 1 or -1. 00569 } 00570 00571 bool CPU_PrepareException(Bitu which,Bitu error) { 00572 cpu.exception.which=which; 00573 cpu.exception.error=error; 00574 return true; 00575 } 00576 00577 bool CPU_CLI(void) { 00578 if (cpu.pmode && ((!GETFLAG(VM) && (GETFLAG_IOPL<cpu.cpl)) || (GETFLAG(VM) && (GETFLAG_IOPL<3)))) { 00579 return CPU_PrepareException(EXCEPTION_GP,0); 00580 } else { 00581 SETFLAGBIT(IF,false); 00582 return false; 00583 } 00584 } 00585 00586 bool CPU_STI(void) { 00587 if (cpu.pmode && ((!GETFLAG(VM) && (GETFLAG_IOPL<cpu.cpl)) || (GETFLAG(VM) && (GETFLAG_IOPL<3)))) { 00588 return CPU_PrepareException(EXCEPTION_GP,0); 00589 } else { 00590 SETFLAGBIT(IF,true); 00591 return false; 00592 } 00593 } 00594 00595 bool CPU_POPF(Bitu use32) { 00596 if (cpu.pmode && GETFLAG(VM) && (GETFLAG(IOPL)!=FLAG_IOPL)) { 00597 /* Not enough privileges to execute POPF */ 00598 return CPU_PrepareException(EXCEPTION_GP,0); 00599 } 00600 Bitu mask=FMASK_ALL; 00601 /* IOPL field can only be modified when CPL=0 or in real mode: */ 00602 if (cpu.pmode && (cpu.cpl>0)) mask &= (~FLAG_IOPL); 00603 if (cpu.pmode && !GETFLAG(VM) && (GETFLAG_IOPL<cpu.cpl)) mask &= (~FLAG_IF); 00604 if (use32) 00605 CPU_SetFlags(CPU_Pop32(),mask); 00606 else CPU_SetFlags(CPU_Pop16(),mask & 0xffff); 00607 DestroyConditionFlags(); 00608 return false; 00609 } 00610 00611 bool CPU_PUSHF(Bitu use32) { 00612 if (cpu.pmode && GETFLAG(VM) && (GETFLAG(IOPL)!=FLAG_IOPL)) { 00613 /* Not enough privileges to execute PUSHF */ 00614 return CPU_PrepareException(EXCEPTION_GP,0); 00615 } 00616 FillFlags(); 00617 if (use32) 00618 CPU_Push32(reg_flags & 0xfcffff); 00619 else CPU_Push16((Bit16u)reg_flags); 00620 return false; 00621 } 00622 00623 void CPU_CheckSegment(const enum SegNames segi) { 00624 bool needs_invalidation=false; 00625 Descriptor desc; 00626 00627 if (!cpu.gdt.GetDescriptor(SegValue(segi),desc)) { 00628 needs_invalidation=true; 00629 } 00630 else { 00631 switch (desc.Type()) { 00632 case DESC_DATA_EU_RO_NA: case DESC_DATA_EU_RO_A: case DESC_DATA_EU_RW_NA: case DESC_DATA_EU_RW_A: 00633 case DESC_DATA_ED_RO_NA: case DESC_DATA_ED_RO_A: case DESC_DATA_ED_RW_NA: case DESC_DATA_ED_RW_A: 00634 case DESC_CODE_N_NC_A: case DESC_CODE_N_NC_NA: case DESC_CODE_R_NC_A: case DESC_CODE_R_NC_NA: 00635 if (cpu.cpl>desc.DPL()) needs_invalidation=true; 00636 break; 00637 default: 00638 break; 00639 } 00640 } 00641 00642 if (needs_invalidation) 00643 CPU_SetSegGeneral(segi,0); 00644 } 00645 00646 void CPU_CheckSegments(void) { 00647 CPU_CheckSegment(es); 00648 CPU_CheckSegment(ds); 00649 CPU_CheckSegment(fs); 00650 CPU_CheckSegment(gs); 00651 } 00652 00653 class TaskStateSegment { 00654 public: 00655 TaskStateSegment() { 00656 } 00657 bool IsValid(void) { 00658 return valid; 00659 } 00660 Bitu Get_back(void) { 00661 cpu.mpl=0; 00662 Bit16u backlink=mem_readw(base); 00663 cpu.mpl=3; 00664 return backlink; 00665 } 00666 void SaveSelector(void) { 00667 cpu.gdt.SetDescriptor(selector,desc); 00668 } 00669 void Get_SSx_ESPx(Bitu level,Bit16u & _ss,Bit32u & _esp) { 00670 cpu.mpl=0; 00671 if (is386) { 00672 PhysPt where=(PhysPt)(base+offsetof(TSS_32,esp0)+level*8); 00673 _esp=mem_readd(where); 00674 _ss=mem_readw(where+4); 00675 } else { 00676 PhysPt where= (PhysPt)(base+offsetof(TSS_16,sp0)+level*4); 00677 _esp=mem_readw(where); 00678 _ss=mem_readw(where+2); 00679 } 00680 cpu.mpl=3; 00681 } 00682 bool SetSelector(Bitu new_sel) { 00683 valid=false; 00684 if ((new_sel & 0xfffc)==0) { 00685 selector=0; 00686 base=0; 00687 limit=0; 00688 is386=1; 00689 return true; 00690 } 00691 if (new_sel&4) return false; 00692 if (!cpu.gdt.GetDescriptor(new_sel,desc)) return false; 00693 switch (desc.Type()) { 00694 case DESC_286_TSS_A: case DESC_286_TSS_B: 00695 case DESC_386_TSS_A: case DESC_386_TSS_B: 00696 break; 00697 default: 00698 return false; 00699 } 00700 if (!desc.saved.seg.p) return false; 00701 selector=new_sel; 00702 valid=true; 00703 base=desc.GetBase(); 00704 limit=desc.GetLimit(); 00705 is386=desc.Is386(); 00706 return true; 00707 } 00708 00709 void SaveState( std::ostream& stream ); 00710 void LoadState( std::istream& stream ); 00711 00712 TSS_Descriptor desc; 00713 Bitu selector = 0; 00714 PhysPt base = 0; 00715 Bitu limit = 0; 00716 Bitu is386 = 0; 00717 bool valid = false; 00718 }; 00719 00720 TaskStateSegment cpu_tss; 00721 00722 enum TSwitchType { 00723 TSwitch_JMP,TSwitch_CALL_INT,TSwitch_IRET 00724 }; 00725 00726 bool CPU_SwitchTask(Bitu new_tss_selector,TSwitchType tstype,Bit32u old_eip) { 00727 bool old_allow = dosbox_allow_nonrecursive_page_fault; 00728 00729 /* this code isn't very easy to make interruptible. so temporarily revert to recursive PF handling method */ 00730 dosbox_allow_nonrecursive_page_fault = false; 00731 00732 FillFlags(); 00733 TaskStateSegment new_tss; 00734 if (!new_tss.SetSelector(new_tss_selector)) 00735 E_Exit("Illegal TSS for switch, selector=%x, switchtype=%lx",(int)new_tss_selector,(unsigned long)tstype); 00736 if (tstype==TSwitch_IRET) { 00737 if (!new_tss.desc.IsBusy()) 00738 E_Exit("TSS not busy for IRET"); 00739 } else { 00740 if (new_tss.desc.IsBusy()) 00741 E_Exit("TSS busy for JMP/CALL/INT"); 00742 } 00743 Bitu new_cr3=0; 00744 Bit32u new_eax,new_ebx,new_ecx,new_edx,new_esp,new_ebp,new_esi,new_edi; 00745 Bit16u new_es,new_cs,new_ss,new_ds,new_fs,new_gs; 00746 Bitu new_ldt,new_eflags; 00747 Bit32u new_eip; 00748 /* Read new context from new TSS */ 00749 if (new_tss.is386) { 00750 new_cr3=mem_readd(new_tss.base+offsetof(TSS_32,cr3)); 00751 new_eip=mem_readd(new_tss.base+offsetof(TSS_32,eip)); 00752 new_eflags=mem_readd(new_tss.base+offsetof(TSS_32,eflags)); 00753 new_eax=mem_readd(new_tss.base+offsetof(TSS_32,eax)); 00754 new_ecx=mem_readd(new_tss.base+offsetof(TSS_32,ecx)); 00755 new_edx=mem_readd(new_tss.base+offsetof(TSS_32,edx)); 00756 new_ebx=mem_readd(new_tss.base+offsetof(TSS_32,ebx)); 00757 new_esp=mem_readd(new_tss.base+offsetof(TSS_32,esp)); 00758 new_ebp=mem_readd(new_tss.base+offsetof(TSS_32,ebp)); 00759 new_edi=mem_readd(new_tss.base+offsetof(TSS_32,edi)); 00760 new_esi=mem_readd(new_tss.base+offsetof(TSS_32,esi)); 00761 00762 new_es=mem_readw(new_tss.base+offsetof(TSS_32,es)); 00763 new_cs=mem_readw(new_tss.base+offsetof(TSS_32,cs)); 00764 new_ss=mem_readw(new_tss.base+offsetof(TSS_32,ss)); 00765 new_ds=mem_readw(new_tss.base+offsetof(TSS_32,ds)); 00766 new_fs=mem_readw(new_tss.base+offsetof(TSS_32,fs)); 00767 new_gs=mem_readw(new_tss.base+offsetof(TSS_32,gs)); 00768 new_ldt=mem_readw(new_tss.base+offsetof(TSS_32,ldt)); 00769 } else { 00770 E_Exit("286 task switch"); 00771 new_cr3=0; 00772 new_eip=0; 00773 new_eflags=0; 00774 new_eax=0; new_ecx=0; new_edx=0; new_ebx=0; 00775 new_esp=0; new_ebp=0; new_edi=0; new_esi=0; 00776 00777 new_es=0; new_cs=0; new_ss=0; new_ds=0; new_fs=0; new_gs=0; 00778 new_ldt=0; 00779 } 00780 00781 /* Check if we need to clear busy bit of old TASK */ 00782 if (tstype==TSwitch_JMP || tstype==TSwitch_IRET) { 00783 cpu_tss.desc.SetBusy(false); 00784 cpu_tss.SaveSelector(); 00785 } 00786 Bit32u old_flags = (Bit32u)reg_flags; 00787 if (tstype==TSwitch_IRET) old_flags &= (~FLAG_NT); 00788 00789 /* Save current context in current TSS */ 00790 if (cpu_tss.is386) { 00791 mem_writed(cpu_tss.base+offsetof(TSS_32,eflags),old_flags); 00792 mem_writed(cpu_tss.base+offsetof(TSS_32,eip),old_eip); 00793 00794 mem_writed(cpu_tss.base+offsetof(TSS_32,eax),reg_eax); 00795 mem_writed(cpu_tss.base+offsetof(TSS_32,ecx),reg_ecx); 00796 mem_writed(cpu_tss.base+offsetof(TSS_32,edx),reg_edx); 00797 mem_writed(cpu_tss.base+offsetof(TSS_32,ebx),reg_ebx); 00798 mem_writed(cpu_tss.base+offsetof(TSS_32,esp),reg_esp); 00799 mem_writed(cpu_tss.base+offsetof(TSS_32,ebp),reg_ebp); 00800 mem_writed(cpu_tss.base+offsetof(TSS_32,esi),reg_esi); 00801 mem_writed(cpu_tss.base+offsetof(TSS_32,edi),reg_edi); 00802 00803 mem_writed(cpu_tss.base+offsetof(TSS_32,es),SegValue(es)); 00804 mem_writed(cpu_tss.base+offsetof(TSS_32,cs),SegValue(cs)); 00805 mem_writed(cpu_tss.base+offsetof(TSS_32,ss),SegValue(ss)); 00806 mem_writed(cpu_tss.base+offsetof(TSS_32,ds),SegValue(ds)); 00807 mem_writed(cpu_tss.base+offsetof(TSS_32,fs),SegValue(fs)); 00808 mem_writed(cpu_tss.base+offsetof(TSS_32,gs),SegValue(gs)); 00809 } else { 00810 E_Exit("286 task switch"); 00811 } 00812 00813 /* Setup a back link to the old TSS in new TSS */ 00814 if (tstype==TSwitch_CALL_INT) { 00815 if (new_tss.is386) { 00816 mem_writed(new_tss.base+offsetof(TSS_32,back),(Bit32u)cpu_tss.selector); 00817 } else { 00818 mem_writew(new_tss.base+offsetof(TSS_16,back),(Bit16u)cpu_tss.selector); 00819 } 00820 /* And make the new task's eflag have the nested task bit */ 00821 new_eflags|=FLAG_NT; 00822 } 00823 /* Set the busy bit in the new task */ 00824 if (tstype==TSwitch_JMP || tstype==TSwitch_CALL_INT) { 00825 new_tss.desc.SetBusy(true); 00826 new_tss.SaveSelector(); 00827 } 00828 00829 // cpu.cr0|=CR0_TASKSWITCHED; 00830 if (new_tss_selector == cpu_tss.selector) { 00831 reg_eip = old_eip; 00832 new_cs = SegValue(cs); 00833 new_ss = SegValue(ss); 00834 new_ds = SegValue(ds); 00835 new_es = SegValue(es); 00836 new_fs = SegValue(fs); 00837 new_gs = SegValue(gs); 00838 } else { 00839 00840 /* Setup the new cr3 */ 00841 if (paging.cr3 != new_cr3) 00842 // if they are the same it is not flushed 00843 // according to the 386 manual 00844 PAGING_SetDirBase(new_cr3); 00845 00846 /* Load new context */ 00847 if (new_tss.is386) { 00848 reg_eip=new_eip; 00849 CPU_SetFlags(new_eflags,FMASK_ALL | FLAG_VM); 00850 reg_eax=new_eax; 00851 reg_ecx=new_ecx; 00852 reg_edx=new_edx; 00853 reg_ebx=new_ebx; 00854 reg_esp=new_esp; 00855 reg_ebp=new_ebp; 00856 reg_edi=new_edi; 00857 reg_esi=new_esi; 00858 00859 // new_cs=mem_readw(new_tss.base+offsetof(TSS_32,cs)); 00860 } else { 00861 E_Exit("286 task switch"); 00862 } 00863 } 00864 /* Load the new selectors */ 00865 if (reg_flags & FLAG_VM) { 00866 SegSet16(cs,new_cs); 00867 cpu.code.big=false; 00868 CPU_SetCPL(3); //We don't have segment caches so this will do 00869 } else { 00870 /* Protected mode task */ 00871 if (new_ldt!=0) CPU_LLDT(new_ldt); 00872 /* Load the new CS*/ 00873 Descriptor cs_desc; 00874 CPU_SetCPL(new_cs & 3); 00875 if (!cpu.gdt.GetDescriptor(new_cs,cs_desc)) 00876 E_Exit("Task switch with CS beyond limits"); 00877 if (!cs_desc.saved.seg.p) 00878 E_Exit("Task switch with non present code-segment"); 00879 switch (cs_desc.Type()) { 00880 case DESC_CODE_N_NC_A: case DESC_CODE_N_NC_NA: 00881 case DESC_CODE_R_NC_A: case DESC_CODE_R_NC_NA: 00882 if (cpu.cpl != cs_desc.DPL()) E_Exit("Task CS RPL != DPL"); 00883 goto doconforming; 00884 case DESC_CODE_N_C_A: case DESC_CODE_N_C_NA: 00885 case DESC_CODE_R_C_A: case DESC_CODE_R_C_NA: 00886 if (cpu.cpl < cs_desc.DPL()) E_Exit("Task CS RPL < DPL"); 00887 doconforming: 00888 Segs.expanddown[cs]=cs_desc.GetExpandDown(); 00889 Segs.limit[cs]=do_seg_limits? (PhysPt)cs_desc.GetLimit():((PhysPt)(~0UL)); 00890 Segs.phys[cs]=cs_desc.GetBase(); 00891 cpu.code.big=cs_desc.Big()>0; 00892 Segs.val[cs]=new_cs; 00893 break; 00894 default: 00895 E_Exit("Task switch CS Type %d",(int)cs_desc.Type()); 00896 } 00897 } 00898 CPU_SetSegGeneral(es,new_es); 00899 CPU_SetSegGeneral(ss,new_ss); 00900 CPU_SetSegGeneral(ds,new_ds); 00901 CPU_SetSegGeneral(fs,new_fs); 00902 CPU_SetSegGeneral(gs,new_gs); 00903 if (!cpu_tss.SetSelector(new_tss_selector)) { 00904 LOG(LOG_CPU,LOG_NORMAL)("TaskSwitch: set tss selector %X failed",new_tss_selector); 00905 } 00906 // cpu_tss.desc.SetBusy(true); 00907 // cpu_tss.SaveSelector(); 00908 // LOG_MSG("Task CPL %X CS:%X IP:%X SS:%X SP:%X eflags %x",cpu.cpl,SegValue(cs),reg_eip,SegValue(ss),reg_esp,reg_flags); 00909 00910 dosbox_allow_nonrecursive_page_fault = old_allow; 00911 return true; 00912 } 00913 00914 bool CPU_IO_Exception(Bitu port,Bitu size) { 00915 if (cpu.pmode && ((GETFLAG_IOPL<cpu.cpl) || GETFLAG(VM))) { 00916 cpu.mpl=0; 00917 if (!cpu_tss.is386) goto doexception; 00918 PhysPt bwhere=cpu_tss.base+0x66; 00919 Bit16u ofs=mem_readw(bwhere); 00920 if (ofs>cpu_tss.limit) goto doexception; 00921 bwhere=(PhysPt)(cpu_tss.base+ofs+(port/8)); 00922 Bit16u map=mem_readw(bwhere); 00923 Bit16u mask=(0xffffu >> (16u - size)) << (port & 7u); 00924 if (map & mask) goto doexception; 00925 cpu.mpl=3; 00926 } 00927 return false; 00928 doexception: 00929 cpu.mpl=3; 00930 LOG(LOG_CPU,LOG_NORMAL)("IO Exception port %X",port); 00931 return CPU_PrepareException(EXCEPTION_GP,0); 00932 } 00933 00934 #include <stack> 00935 00936 int CPU_Exception_Level[0x20] = {0}; 00937 std::stack<int> CPU_Exception_In_Progress; 00938 00939 void CPU_Exception_Level_Reset() { 00940 int i; 00941 00942 for (i=0;i < 0x20;i++) 00943 CPU_Exception_Level[i] = 0; 00944 while (!CPU_Exception_In_Progress.empty()) 00945 CPU_Exception_In_Progress.pop(); 00946 } 00947 00948 bool has_printed_double_fault = false; 00949 bool has_printed_triple_fault = false; 00950 bool always_report_double_fault = false; 00951 bool always_report_triple_fault = false; 00952 00953 void On_Software_CPU_Reset(); 00954 00955 void CPU_Exception(Bitu which,Bitu error ) { 00956 assert(which < 0x20); 00957 // LOG_MSG("Exception %d error %x",which,error); 00958 if (CPU_Exception_Level[which] != 0) { 00959 if (CPU_Exception_Level[EXCEPTION_DF] != 0 && cpu_triple_fault_reset) { 00960 if (always_report_triple_fault || !has_printed_triple_fault) { 00961 LOG_MSG("CPU_Exception: Double fault already in progress == Triple Fault. Resetting CPU."); 00962 has_printed_triple_fault = true; 00963 } 00964 00965 // Triple fault -> special shutdown cycle -> reset signal -> reset. 00966 // Sickening, I know, but that's how IBM wired things a long long time ago. 00967 On_Software_CPU_Reset(); 00968 E_Exit("Triple fault reset call unexpectedly returned"); 00969 } 00970 00971 if (always_report_double_fault || !has_printed_double_fault) { 00972 LOG_MSG("CPU_Exception: Exception %d already in progress, triggering double fault instead",(int)which); 00973 has_printed_double_fault = true; 00974 } 00975 which = EXCEPTION_DF; 00976 error = 0; 00977 } 00978 00979 if (cpu_double_fault_enable) { 00980 /* NTS: Putting some thought into it, I don't think divide by zero counts as something to throw a double fault 00981 * over. I may be wrong. The behavior of Intel processors will ultimately decide. 00982 * 00983 * Until then, don't count Divide Overflow exceptions, so that the "EFP loader" can do it's disgusting 00984 * anti-debugger hackery when loading parts of a demo. --J.C. */ 00985 if (!(which == 0/*divide by zero/overflow*/)) { 00986 /* CPU_Interrupt() could cause another fault during memory access. This needs to happen here */ 00987 CPU_Exception_Level[which]++; 00988 CPU_Exception_In_Progress.push((int)which); 00989 } 00990 } 00991 00992 cpu.exception.error=error; 00993 CPU_Interrupt(which,CPU_INT_EXCEPTION | ((which>=8) ? CPU_INT_HAS_ERROR : 0),reg_eip); 00994 00995 /* allow recursive page faults. required for multitasking OSes like Windows 95. 00996 * we set this AFTER CPU_Interrupt so that if CPU_Interrupt faults while starting 00997 * a page fault we still trigger double fault. */ 00998 if (which == EXCEPTION_PF || which == EXCEPTION_GP) { 00999 if (CPU_Exception_Level[which] > 0) 01000 CPU_Exception_Level[which]--; 01001 01002 if (!CPU_Exception_In_Progress.empty()) { 01003 if ((Bitu)CPU_Exception_In_Progress.top() == which) 01004 CPU_Exception_In_Progress.pop(); 01005 else 01006 LOG_MSG("Top of fault stack not the same as what I'm handling"); 01007 } 01008 } 01009 } 01010 01011 Bit8u lastint; 01012 void CPU_Interrupt(Bitu num,Bitu type,Bit32u oldeip) { 01013 lastint=(Bit8u)num; 01014 FillFlags(); 01015 #if C_DEBUG 01016 # if C_HEAVY_DEBUG 01017 bool DEBUG_IntBreakpoint(Bit8u intNum); 01018 Bitu DEBUG_EnableDebugger(void); 01019 01020 if (type != CPU_INT_SOFTWARE) { /* CPU core already takes care of SW interrupts */ 01021 if (DEBUG_IntBreakpoint((Bit8u)num)) 01022 DEBUG_EnableDebugger(); 01023 } 01024 # endif 01025 if (type == CPU_INT_SOFTWARE && boothax == BOOTHAX_MSDOS) { 01026 if (num == 0x21 && boothax == BOOTHAX_MSDOS) { 01027 extern bool dos_kernel_disabled; 01028 if (dos_kernel_disabled) { 01029 if ((reg_ah == 0x4A/*alloc*/ || reg_ah == 0x49/*free*/) && guest_msdos_LoL == 0) { /* needed for MS-DOS 3.3 */ 01030 if (SegValue(cs) != CB_SEG) { 01031 Bit16u old_es,old_bx,old_ax; 01032 01033 LOG_MSG("INT 21h AH=%02xh intercepting call to determine LoL\n",reg_ah); 01034 01035 old_es = SegValue(es); 01036 old_bx = reg_bx; 01037 old_ax = reg_ax; 01038 01039 reg_ah = 0x52; 01040 CALLBACK_RunRealInt(0x21); 01041 01042 /* save off ES:BX */ 01043 guest_msdos_LoL = RealMake(SegValue(es),reg_bx); 01044 /* Read off the MCB chain base (WARNING: Only works with MS-DOS 3.3 or later) */ 01045 guest_msdos_mcb_chain = real_readw(guest_msdos_LoL>>16,(guest_msdos_LoL&0xFFFF)-2); 01046 #if 1 01047 LOG_MSG("List of Lists: %04x:%04x",guest_msdos_LoL>>16,guest_msdos_LoL&0xFFFF); 01048 LOG_MSG("MCB chain starts at: %04x",guest_msdos_mcb_chain); 01049 #endif 01050 01051 CPU_SetSegGeneral(es,old_es); 01052 reg_bx = old_bx; 01053 reg_ax = old_ax; 01054 } 01055 } 01056 if (reg_ah == 0x52) { /* get list of lists. MS-DOS 5.0 and higher call this surprisingly often! */ 01057 if (SegValue(cs) != CB_SEG) { 01058 LOG_MSG("INT 21h AH=52h intercepting call\n"); 01059 reg_eip = oldeip;//HACK 01060 CALLBACK_RunRealInt(0x21); 01061 /* save off ES:BX */ 01062 guest_msdos_LoL = RealMake(SegValue(es),reg_bx); 01063 /* Read off the MCB chain base (WARNING: Only works with MS-DOS 3.3 or later) */ 01064 guest_msdos_mcb_chain = real_readw(guest_msdos_LoL>>16,(guest_msdos_LoL&0xFFFF)-2); 01065 #if 1 01066 LOG_MSG("List of Lists: %04x:%04x",guest_msdos_LoL>>16,guest_msdos_LoL&0xFFFF); 01067 LOG_MSG("MCB chain starts at: %04x",guest_msdos_mcb_chain); 01068 #endif 01069 return; 01070 } 01071 } 01072 } 01073 } 01074 } 01075 01076 switch (num) { 01077 case 0xcd: 01078 #if C_HEAVY_DEBUG 01079 LOG(LOG_CPU,LOG_ERROR)("Call to interrupt 0xCD this is BAD"); 01080 // DEBUG_HeavyWriteLogInstruction(); 01081 // E_Exit("Call to interrupt 0xCD this is BAD"); 01082 #endif 01083 break; 01084 case 0x03: 01085 if (DEBUG_Breakpoint()) { 01086 CPU_Cycles=0; 01087 return; 01088 } 01089 } 01090 #endif 01091 if (!cpu.pmode) { 01092 /* Save everything on a 16-bit stack */ 01093 CPU_Push16(reg_flags & 0xffff); 01094 CPU_Push16(SegValue(cs)); 01095 CPU_Push16(oldeip); 01096 SETFLAGBIT(IF,false); 01097 SETFLAGBIT(TF,false); 01098 /* Get the new CS:IP from vector table */ 01099 PhysPt base=cpu.idt.GetBase(); 01100 reg_eip=mem_readw((PhysPt)(base+(num << 2))); 01101 Segs.val[cs]=mem_readw((PhysPt)(base+(num << 2)+2)); 01102 Segs.phys[cs]=(PhysPt)Segs.val[cs] << 4u; 01103 if (!cpu_allow_big16) cpu.code.big=false; 01104 return; 01105 } else { 01106 /* Protected Mode Interrupt */ 01107 if ((reg_flags & FLAG_VM) && (type&CPU_INT_SOFTWARE) && !(type&CPU_INT_NOIOPLCHECK)) { 01108 // LOG_MSG("Software int in v86, AH %X IOPL %x",reg_ah,(reg_flags & FLAG_IOPL) >>12); 01109 if ((reg_flags & FLAG_IOPL)!=FLAG_IOPL) { 01110 CPU_Exception(EXCEPTION_GP,0); 01111 return; 01112 } 01113 } 01114 01115 Descriptor gate; 01116 if (!cpu.idt.GetDescriptor(num<<3,gate)) { 01117 // zone66 01118 CPU_Exception(EXCEPTION_GP,num*8+2+((type&CPU_INT_SOFTWARE)?0:1)); 01119 return; 01120 } 01121 01122 if ((type&CPU_INT_SOFTWARE) && (gate.DPL()<cpu.cpl)) { 01123 // zone66, win3.x e 01124 CPU_Exception(EXCEPTION_GP,num*8+2); 01125 return; 01126 } 01127 01128 Bit16u old_ss; 01129 Bit32u old_esp; 01130 Bitu old_cpl; 01131 01132 old_esp = reg_esp; 01133 old_ss = SegValue(ss); 01134 old_cpl = cpu.cpl; 01135 01136 try { 01137 switch (gate.Type()) { 01138 case DESC_286_INT_GATE: case DESC_386_INT_GATE: 01139 case DESC_286_TRAP_GATE: case DESC_386_TRAP_GATE: 01140 { 01141 CPU_CHECK_COND(!gate.saved.seg.p, 01142 "INT:Gate segment not present", 01143 EXCEPTION_NP,num*8+2+((type&CPU_INT_SOFTWARE)?0:1)) 01144 01145 Descriptor cs_desc; 01146 Bitu gate_sel=gate.GetSelector(); 01147 Bitu gate_off=gate.GetOffset(); 01148 CPU_CHECK_COND((gate_sel & 0xfffc)==0, 01149 "INT:Gate with CS zero selector", 01150 EXCEPTION_GP,(type&CPU_INT_SOFTWARE)?0:1) 01151 CPU_CHECK_COND(!cpu.gdt.GetDescriptor(gate_sel,cs_desc), 01152 "INT:Gate with CS beyond limit", 01153 EXCEPTION_GP,(gate_sel & 0xfffc)+((type&CPU_INT_SOFTWARE)?0:1)) 01154 01155 Bitu cs_dpl=cs_desc.DPL(); 01156 CPU_CHECK_COND(cs_dpl>cpu.cpl, 01157 "Interrupt to higher privilege", 01158 EXCEPTION_GP,(gate_sel & 0xfffc)+((type&CPU_INT_SOFTWARE)?0:1)) 01159 switch (cs_desc.Type()) { 01160 case DESC_CODE_N_NC_A: case DESC_CODE_N_NC_NA: 01161 case DESC_CODE_R_NC_A: case DESC_CODE_R_NC_NA: 01162 if (cs_dpl<cpu.cpl) { 01163 /* Prepare for gate to inner level */ 01164 CPU_CHECK_COND(!cs_desc.saved.seg.p, 01165 "INT:Inner level:CS segment not present", 01166 EXCEPTION_NP,(gate_sel & 0xfffc)+((type&CPU_INT_SOFTWARE)?0:1)) 01167 CPU_CHECK_COND((reg_flags & FLAG_VM) && (cs_dpl!=0), 01168 "V86 interrupt calling codesegment with DPL>0", 01169 EXCEPTION_GP,gate_sel & 0xfffc) 01170 01171 Bit16u n_ss; 01172 Bit32u n_esp; 01173 Bit16u o_ss; 01174 Bit32u o_esp; 01175 o_ss=SegValue(ss); 01176 o_esp=reg_esp; 01177 cpu_tss.Get_SSx_ESPx(cs_dpl,n_ss,n_esp); 01178 CPU_CHECK_COND((n_ss & 0xfffc)==0, 01179 "INT:Gate with SS zero selector", 01180 EXCEPTION_TS,(type&CPU_INT_SOFTWARE)?0:1) 01181 Descriptor n_ss_desc; 01182 CPU_CHECK_COND(!cpu.gdt.GetDescriptor(n_ss,n_ss_desc), 01183 "INT:Gate with SS beyond limit", 01184 EXCEPTION_TS,(n_ss & 0xfffc)+((type&CPU_INT_SOFTWARE)?0:1)) 01185 CPU_CHECK_COND(((n_ss & 3)!=cs_dpl) || (n_ss_desc.DPL()!=cs_dpl), 01186 "INT:Inner level with CS_DPL!=SS_DPL and SS_RPL", 01187 EXCEPTION_TS,(n_ss & 0xfffc)+((type&CPU_INT_SOFTWARE)?0:1)) 01188 01189 // check if stack segment is a writable data segment 01190 switch (n_ss_desc.Type()) { 01191 case DESC_DATA_EU_RW_NA: case DESC_DATA_EU_RW_A: 01192 case DESC_DATA_ED_RW_NA: case DESC_DATA_ED_RW_A: 01193 break; 01194 default: 01195 E_Exit("INT:Inner level:Stack segment not writable."); // or #TS(ss_sel+EXT) 01196 } 01197 CPU_CHECK_COND(!n_ss_desc.saved.seg.p, 01198 "INT:Inner level with nonpresent SS", 01199 EXCEPTION_SS,(n_ss & 0xfffc)+((type&CPU_INT_SOFTWARE)?0:1)) 01200 01201 // commit point 01202 Segs.expanddown[ss]=n_ss_desc.GetExpandDown(); 01203 Segs.limit[ss]=do_seg_limits? (PhysPt)n_ss_desc.GetLimit():((PhysPt)(~0UL)); 01204 Segs.phys[ss]=n_ss_desc.GetBase(); 01205 Segs.val[ss]=n_ss; 01206 if (n_ss_desc.Big()) { 01207 cpu.stack.big=true; 01208 cpu.stack.mask=0xffffffff; 01209 cpu.stack.notmask=0; 01210 reg_esp=n_esp; 01211 } else { 01212 cpu.stack.big=false; 01213 cpu.stack.mask=0xffff; 01214 cpu.stack.notmask=0xffff0000; 01215 reg_sp=n_esp & 0xffff; 01216 } 01217 01218 CPU_SetCPL(cs_dpl); 01219 if (gate.Type() & 0x8) { /* 32-bit Gate */ 01220 if (reg_flags & FLAG_VM) { 01221 CPU_Push32(SegValue(gs));SegSet16(gs,0x0); 01222 CPU_Push32(SegValue(fs));SegSet16(fs,0x0); 01223 CPU_Push32(SegValue(ds));SegSet16(ds,0x0); 01224 CPU_Push32(SegValue(es));SegSet16(es,0x0); 01225 } 01226 CPU_Push32(o_ss); 01227 CPU_Push32(o_esp); 01228 } else { /* 16-bit Gate */ 01229 if (reg_flags & FLAG_VM) E_Exit("V86 to 16-bit gate"); 01230 CPU_Push16(o_ss); 01231 CPU_Push16((Bit16u)o_esp); 01232 } 01233 // LOG_MSG("INT:Gate to inner level SS:%X SP:%X",n_ss,n_esp); 01234 goto do_interrupt; 01235 } 01236 if (cs_dpl!=cpu.cpl) 01237 E_Exit("Non-conforming intra privilege INT with DPL!=CPL"); 01238 case DESC_CODE_N_C_A: case DESC_CODE_N_C_NA: 01239 case DESC_CODE_R_C_A: case DESC_CODE_R_C_NA: 01240 /* Prepare stack for gate to same priviledge */ 01241 CPU_CHECK_COND(!cs_desc.saved.seg.p, 01242 "INT:Same level:CS segment not present", 01243 EXCEPTION_NP,(gate_sel & 0xfffc)+((type&CPU_INT_SOFTWARE)?0:1)) 01244 if ((reg_flags & FLAG_VM) && (cs_dpl<cpu.cpl)) 01245 E_Exit("V86 interrupt doesn't change to pl0"); // or #GP(cs_sel) 01246 01247 // commit point 01248 do_interrupt: 01249 if (gate.Type() & 0x8) { /* 32-bit Gate */ 01250 CPU_Push32((Bit32u)reg_flags); 01251 CPU_Push32(SegValue(cs)); 01252 CPU_Push32(oldeip); 01253 if (type & CPU_INT_HAS_ERROR) CPU_Push32((Bit32u)cpu.exception.error); 01254 } else { /* 16-bit gate */ 01255 CPU_Push16(reg_flags & 0xffff); 01256 CPU_Push16(SegValue(cs)); 01257 CPU_Push16(oldeip); 01258 if (type & CPU_INT_HAS_ERROR) CPU_Push16((Bit16u)cpu.exception.error); 01259 } 01260 break; 01261 default: 01262 E_Exit("INT:Gate Selector points to illegal descriptor with type %x",(int)cs_desc.Type()); 01263 } 01264 01265 Segs.val[cs]=(Bit16u)((gate_sel&0xfffc) | cpu.cpl); 01266 Segs.phys[cs]=cs_desc.GetBase(); 01267 Segs.limit[cs]=do_seg_limits? (PhysPt)cs_desc.GetLimit():((PhysPt)(~0UL)); 01268 Segs.expanddown[cs]=cs_desc.GetExpandDown(); 01269 cpu.code.big=cs_desc.Big()>0; 01270 reg_eip=(Bit32u)gate_off; 01271 01272 if (!(gate.Type()&1)) { 01273 SETFLAGBIT(IF,false); 01274 } 01275 SETFLAGBIT(TF,false); 01276 SETFLAGBIT(NT,false); 01277 SETFLAGBIT(VM,false); 01278 LOG(LOG_CPU,LOG_NORMAL)("INT:Gate to %X:%X big %d %s",gate_sel,gate_off,cs_desc.Big(),gate.Type() & 0x8 ? "386" : "286"); 01279 return; 01280 } 01281 case DESC_TASK_GATE: 01282 CPU_CHECK_COND(!gate.saved.seg.p, 01283 "INT:Gate segment not present", 01284 EXCEPTION_NP,num*8+2+((type&CPU_INT_SOFTWARE)?0:1)) 01285 01286 CPU_SwitchTask(gate.GetSelector(),TSwitch_CALL_INT,oldeip); 01287 if (type & CPU_INT_HAS_ERROR) { 01288 //TODO Be sure about this, seems somewhat unclear 01289 if (cpu_tss.is386) CPU_Push32((Bit32u)cpu.exception.error); 01290 else CPU_Push16((Bit16u)cpu.exception.error); 01291 } 01292 return; 01293 default: 01294 E_Exit("Illegal descriptor type %X for int %X",(int)gate.Type(),(int)num); 01295 } 01296 } 01297 catch (const GuestPageFaultException &pf) { 01298 (void)pf;//UNUSED 01299 LOG_MSG("CPU_Interrupt() interrupted"); 01300 CPU_SetSegGeneral(ss,old_ss); 01301 reg_esp = old_esp; 01302 CPU_SetCPL(old_cpl); 01303 throw; 01304 } 01305 } 01306 assert(1); 01307 return ; // make compiler happy 01308 } 01309 01310 01311 void CPU_IRET(bool use32,Bit32u oldeip) { 01312 Bit32u orig_esp = reg_esp; 01313 01314 /* x86 CPUs consider IRET the completion of an NMI, no matter where it happens */ 01315 /* FIXME: If the IRET causes an exception, is it still considered the end of the NMI? */ 01316 CPU_NMI_active = false; 01317 01318 /* Fault emulation */ 01319 if (!CPU_Exception_In_Progress.empty()) { 01320 int which = CPU_Exception_In_Progress.top(); 01321 CPU_Exception_In_Progress.pop(); 01322 assert(which < 0x20); 01323 01324 if (CPU_Exception_Level[which] > 0) 01325 CPU_Exception_Level[which]--; 01326 01327 // LOG_MSG("Leaving CPU exception %d",which); 01328 } 01329 01330 if (!cpu.pmode) { /* RealMode IRET */ 01331 if (use32) { 01332 reg_eip=CPU_Pop32(); 01333 SegSet16(cs,CPU_Pop32()); 01334 CPU_SetFlags(CPU_Pop32(),FMASK_ALL); 01335 } else { 01336 reg_eip=CPU_Pop16(); 01337 SegSet16(cs,CPU_Pop16()); 01338 CPU_SetFlags(CPU_Pop16(),FMASK_ALL & 0xffff); 01339 } 01340 if (!cpu_allow_big16) cpu.code.big=false; 01341 DestroyConditionFlags(); 01342 return; 01343 } else { /* Protected mode IRET */ 01344 if (reg_flags & FLAG_VM) { 01345 if ((reg_flags & FLAG_IOPL)!=FLAG_IOPL) { 01346 // win3.x e 01347 CPU_Exception(EXCEPTION_GP,0); 01348 return; 01349 } else { 01350 try { 01351 if (use32) { 01352 Bit32u new_eip=mem_readd(SegPhys(ss) + (reg_esp & cpu.stack.mask)); 01353 Bit32u tempesp=(reg_esp&cpu.stack.notmask)|((reg_esp+4)&cpu.stack.mask); 01354 Bit32u new_cs=mem_readd(SegPhys(ss) + (tempesp & cpu.stack.mask)); 01355 tempesp=(tempesp&cpu.stack.notmask)|((tempesp+4)&cpu.stack.mask); 01356 Bit32u new_flags=mem_readd(SegPhys(ss) + (tempesp & cpu.stack.mask)); 01357 reg_esp=(tempesp&cpu.stack.notmask)|((tempesp+4)&cpu.stack.mask); 01358 01359 reg_eip=new_eip; 01360 SegSet16(cs,(Bit16u)(new_cs&0xffff)); 01361 /* IOPL can not be modified in v86 mode by IRET */ 01362 CPU_SetFlags(new_flags,FMASK_NORMAL|FLAG_NT); 01363 } else { 01364 Bit16u new_eip=mem_readw(SegPhys(ss) + (reg_esp & cpu.stack.mask)); 01365 Bit32u tempesp=(reg_esp&cpu.stack.notmask)|((reg_esp+2)&cpu.stack.mask); 01366 Bit16u new_cs=mem_readw(SegPhys(ss) + (tempesp & cpu.stack.mask)); 01367 tempesp=(tempesp&cpu.stack.notmask)|((tempesp+2)&cpu.stack.mask); 01368 Bit16u new_flags=mem_readw(SegPhys(ss) + (tempesp & cpu.stack.mask)); 01369 reg_esp=(tempesp&cpu.stack.notmask)|((tempesp+2)&cpu.stack.mask); 01370 01371 reg_eip=(Bit32u)new_eip; 01372 SegSet16(cs,new_cs); 01373 /* IOPL can not be modified in v86 mode by IRET */ 01374 CPU_SetFlags(new_flags,FMASK_NORMAL|FLAG_NT); 01375 } 01376 } 01377 catch (const GuestPageFaultException &pf) { 01378 (void)pf;//UNUSED 01379 LOG_MSG("CPU_IRET() interrupted prot vm86"); 01380 reg_esp = orig_esp; 01381 throw; 01382 } 01383 cpu.code.big=false; 01384 DestroyConditionFlags(); 01385 return; 01386 } 01387 } 01388 /* Check if this is task IRET */ 01389 if (GETFLAG(NT)) { 01390 if (GETFLAG(VM)) E_Exit("Pmode IRET with VM bit set"); 01391 CPU_CHECK_COND(!cpu_tss.IsValid(), 01392 "TASK Iret without valid TSS", 01393 EXCEPTION_TS,cpu_tss.selector & 0xfffc) 01394 if (!cpu_tss.desc.IsBusy()) { 01395 LOG(LOG_CPU,LOG_ERROR)("TASK Iret:TSS not busy"); 01396 } 01397 Bitu back_link=cpu_tss.Get_back(); 01398 CPU_SwitchTask(back_link,TSwitch_IRET,oldeip); 01399 return; 01400 } 01401 Bit32u n_cs_sel,n_eip,n_flags,tempesp; 01402 if (use32) { 01403 n_eip=mem_readd(SegPhys(ss) + (reg_esp & cpu.stack.mask)); 01404 tempesp=(reg_esp&cpu.stack.notmask)|((reg_esp+4)&cpu.stack.mask); 01405 n_cs_sel=mem_readd(SegPhys(ss) + (tempesp & cpu.stack.mask)) & 0xffff; 01406 tempesp=(tempesp&cpu.stack.notmask)|((tempesp+4)&cpu.stack.mask); 01407 n_flags=mem_readd(SegPhys(ss) + (tempesp & cpu.stack.mask)); 01408 tempesp=(tempesp&cpu.stack.notmask)|((tempesp+4)&cpu.stack.mask); 01409 01410 if ((n_flags & FLAG_VM) && (cpu.cpl==0)) { 01411 // commit point 01412 try { 01413 reg_esp=tempesp; 01414 reg_eip=n_eip & 0xffff; 01415 Bit16u n_ss,n_es,n_ds,n_fs,n_gs; 01416 Bit32u n_esp; 01417 n_esp=CPU_Pop32(); 01418 n_ss=CPU_Pop32() & 0xffff; 01419 n_es=CPU_Pop32() & 0xffff; 01420 n_ds=CPU_Pop32() & 0xffff; 01421 n_fs=CPU_Pop32() & 0xffff; 01422 n_gs=CPU_Pop32() & 0xffff; 01423 01424 CPU_SetFlags(n_flags,FMASK_ALL | FLAG_VM); 01425 DestroyConditionFlags(); 01426 CPU_SetCPL(3); 01427 01428 CPU_SetSegGeneral(ss,n_ss); 01429 CPU_SetSegGeneral(es,n_es); 01430 CPU_SetSegGeneral(ds,n_ds); 01431 CPU_SetSegGeneral(fs,n_fs); 01432 CPU_SetSegGeneral(gs,n_gs); 01433 reg_esp=n_esp; 01434 cpu.code.big=false; 01435 SegSet16(cs,(Bit16u)n_cs_sel); 01436 LOG(LOG_CPU,LOG_NORMAL)("IRET:Back to V86: CS:%X IP %X SS:%X SP %X FLAGS:%X",SegValue(cs),reg_eip,SegValue(ss),reg_esp,reg_flags); 01437 return; 01438 } 01439 catch (const GuestPageFaultException &pf) { 01440 (void)pf;//UNUSED 01441 LOG_MSG("CPU_IRET() interrupted prot use32"); 01442 reg_esp = orig_esp; 01443 throw; 01444 } 01445 } 01446 if (n_flags & FLAG_VM) E_Exit("IRET from pmode to v86 with CPL!=0"); 01447 } else { 01448 n_eip=mem_readw(SegPhys(ss) + (reg_esp & cpu.stack.mask)); 01449 tempesp=(reg_esp&cpu.stack.notmask)|((reg_esp+2)&cpu.stack.mask); 01450 n_cs_sel=mem_readw(SegPhys(ss) + (tempesp & cpu.stack.mask)); 01451 tempesp=(tempesp&cpu.stack.notmask)|((tempesp+2)&cpu.stack.mask); 01452 n_flags=mem_readw(SegPhys(ss) + (tempesp & cpu.stack.mask)); 01453 n_flags|=(reg_flags & 0xffff0000); 01454 tempesp=(tempesp&cpu.stack.notmask)|((tempesp+2)&cpu.stack.mask); 01455 01456 if (n_flags & FLAG_VM) E_Exit("VM Flag in 16-bit iret"); 01457 } 01458 CPU_CHECK_COND((n_cs_sel & 0xfffc)==0, 01459 "IRET:CS selector zero", 01460 EXCEPTION_GP,0) 01461 Bitu n_cs_rpl=n_cs_sel & 3; 01462 Descriptor n_cs_desc; 01463 CPU_CHECK_COND(!cpu.gdt.GetDescriptor(n_cs_sel,n_cs_desc), 01464 "IRET:CS selector beyond limits", 01465 EXCEPTION_GP,n_cs_sel & 0xfffc) 01466 CPU_CHECK_COND(n_cs_rpl<cpu.cpl, 01467 "IRET to lower privilege", 01468 EXCEPTION_GP,n_cs_sel & 0xfffc) 01469 01470 switch (n_cs_desc.Type()) { 01471 case DESC_CODE_N_NC_A: case DESC_CODE_N_NC_NA: 01472 case DESC_CODE_R_NC_A: case DESC_CODE_R_NC_NA: 01473 CPU_CHECK_COND(n_cs_rpl!=n_cs_desc.DPL(), 01474 "IRET:NC:DPL!=RPL", 01475 EXCEPTION_GP,n_cs_sel & 0xfffc) 01476 break; 01477 case DESC_CODE_N_C_A: case DESC_CODE_N_C_NA: 01478 case DESC_CODE_R_C_A: case DESC_CODE_R_C_NA: 01479 CPU_CHECK_COND(n_cs_desc.DPL()>n_cs_rpl, 01480 "IRET:C:DPL>RPL", 01481 EXCEPTION_GP,n_cs_sel & 0xfffc) 01482 break; 01483 default: 01484 E_Exit("IRET:Illegal descriptor type %X",(int)n_cs_desc.Type()); 01485 } 01486 CPU_CHECK_COND(!n_cs_desc.saved.seg.p, 01487 "IRET with nonpresent code segment", 01488 EXCEPTION_NP,n_cs_sel & 0xfffc) 01489 01490 if (n_cs_rpl==cpu.cpl) { 01491 /* Return to same level */ 01492 01493 // commit point 01494 reg_esp=tempesp; 01495 Segs.expanddown[cs]=n_cs_desc.GetExpandDown(); 01496 Segs.limit[cs]=do_seg_limits? (PhysPt)n_cs_desc.GetLimit():((PhysPt)(~0UL)); 01497 Segs.phys[cs]=n_cs_desc.GetBase(); 01498 cpu.code.big=n_cs_desc.Big()>0; 01499 Segs.val[cs]=(Bit16u)n_cs_sel; 01500 reg_eip=n_eip; 01501 01502 Bitu mask=cpu.cpl ? (FMASK_NORMAL | FLAG_NT) : FMASK_ALL; 01503 if (GETFLAG_IOPL<cpu.cpl) mask &= (~FLAG_IF); 01504 CPU_SetFlags(n_flags,mask); 01505 DestroyConditionFlags(); 01506 LOG(LOG_CPU,LOG_NORMAL)("IRET:Same level:%X:%X big %d",n_cs_sel,n_eip,cpu.code.big); 01507 } else { 01508 /* Return to outer level */ 01509 Bit32u n_ss,n_esp; 01510 if (use32) { 01511 n_esp=mem_readd(SegPhys(ss) + (tempesp & cpu.stack.mask)); 01512 tempesp=(tempesp&cpu.stack.notmask)|((tempesp+4)&cpu.stack.mask); 01513 n_ss=mem_readd(SegPhys(ss) + (tempesp & cpu.stack.mask)) & 0xffff; 01514 } else { 01515 n_esp=mem_readw(SegPhys(ss) + (tempesp & cpu.stack.mask)); 01516 tempesp=(tempesp&cpu.stack.notmask)|((tempesp+2)&cpu.stack.mask); 01517 n_ss=mem_readw(SegPhys(ss) + (tempesp & cpu.stack.mask)); 01518 } 01519 CPU_CHECK_COND((n_ss & 0xfffc)==0, 01520 "IRET:Outer level:SS selector zero", 01521 EXCEPTION_GP,0) 01522 CPU_CHECK_COND((n_ss & 3)!=n_cs_rpl, 01523 "IRET:Outer level:SS rpl!=CS rpl", 01524 EXCEPTION_GP,n_ss & 0xfffc) 01525 Descriptor n_ss_desc; 01526 CPU_CHECK_COND(!cpu.gdt.GetDescriptor(n_ss,n_ss_desc), 01527 "IRET:Outer level:SS beyond limit", 01528 EXCEPTION_GP,n_ss & 0xfffc) 01529 CPU_CHECK_COND(n_ss_desc.DPL()!=n_cs_rpl, 01530 "IRET:Outer level:SS dpl!=CS rpl", 01531 EXCEPTION_GP,n_ss & 0xfffc) 01532 01533 // check if stack segment is a writable data segment 01534 switch (n_ss_desc.Type()) { 01535 case DESC_DATA_EU_RW_NA: case DESC_DATA_EU_RW_A: 01536 case DESC_DATA_ED_RW_NA: case DESC_DATA_ED_RW_A: 01537 break; 01538 default: 01539 E_Exit("IRET:Outer level:Stack segment not writable"); // or #GP(ss_sel) 01540 } 01541 CPU_CHECK_COND(!n_ss_desc.saved.seg.p, 01542 "IRET:Outer level:Stack segment not present", 01543 EXCEPTION_NP,n_ss & 0xfffc) 01544 01545 // commit point 01546 01547 Segs.expanddown[cs]=n_cs_desc.GetExpandDown(); 01548 Segs.limit[cs]=do_seg_limits? (PhysPt)n_cs_desc.GetLimit():((PhysPt)(~0UL)); 01549 Segs.phys[cs]=n_cs_desc.GetBase(); 01550 cpu.code.big=n_cs_desc.Big()>0; 01551 Segs.val[cs]=n_cs_sel; 01552 01553 Bitu mask=cpu.cpl ? (FMASK_NORMAL | FLAG_NT) : FMASK_ALL; 01554 if (GETFLAG_IOPL<cpu.cpl) mask &= (~FLAG_IF); 01555 CPU_SetFlags(n_flags,mask); 01556 DestroyConditionFlags(); 01557 01558 CPU_SetCPL(n_cs_rpl); 01559 reg_eip=n_eip; 01560 01561 Segs.val[ss]=(Bit16u)n_ss; 01562 Segs.phys[ss]=n_ss_desc.GetBase(); 01563 Segs.limit[ss]=do_seg_limits? (PhysPt)n_ss_desc.GetLimit():((PhysPt)(~0UL)); 01564 Segs.expanddown[ss]=n_ss_desc.GetExpandDown(); 01565 if (n_ss_desc.Big()) { 01566 cpu.stack.big=true; 01567 cpu.stack.mask=0xffffffff; 01568 cpu.stack.notmask=0; 01569 reg_esp=n_esp; 01570 } else { 01571 cpu.stack.big=false; 01572 cpu.stack.mask=0xffff; 01573 cpu.stack.notmask=0xffff0000; 01574 reg_sp=n_esp & 0xffff; 01575 } 01576 01577 // borland extender, zrdx 01578 CPU_CheckSegments(); 01579 01580 LOG(LOG_CPU,LOG_NORMAL)("IRET:Outer level:%X:%X big %d",n_cs_sel,n_eip,cpu.code.big); 01581 } 01582 return; 01583 } 01584 } 01585 01586 01587 void CPU_JMP(bool use32,Bitu selector,Bitu offset,Bit32u oldeip) { 01588 if (!cpu.pmode || (reg_flags & FLAG_VM)) { 01589 if (!use32) { 01590 reg_eip=offset&0xffff; 01591 } else { 01592 reg_eip=(Bit32u)offset; 01593 } 01594 SegSet16(cs,(Bit16u)selector); 01595 if (!cpu_allow_big16) cpu.code.big=false; 01596 return; 01597 } else { 01598 CPU_CHECK_COND((selector & 0xfffc)==0, 01599 "JMP:CS selector zero", 01600 EXCEPTION_GP,0) 01601 Bitu rpl=selector & 3; 01602 Descriptor desc; 01603 CPU_CHECK_COND(!cpu.gdt.GetDescriptor(selector,desc), 01604 "JMP:CS beyond limits", 01605 EXCEPTION_GP,selector & 0xfffc) 01606 switch (desc.Type()) { 01607 case DESC_CODE_N_NC_A: case DESC_CODE_N_NC_NA: 01608 case DESC_CODE_R_NC_A: case DESC_CODE_R_NC_NA: 01609 CPU_CHECK_COND(rpl>cpu.cpl, 01610 "JMP:NC:RPL>CPL", 01611 EXCEPTION_GP,selector & 0xfffc) 01612 CPU_CHECK_COND(cpu.cpl!=desc.DPL(), 01613 "JMP:NC:RPL != DPL", 01614 EXCEPTION_GP,selector & 0xfffc) 01615 LOG(LOG_CPU,LOG_NORMAL)("JMP:Code:NC to %X:%X big %d",selector,offset,desc.Big()); 01616 goto CODE_jmp; 01617 case DESC_CODE_N_C_A: case DESC_CODE_N_C_NA: 01618 case DESC_CODE_R_C_A: case DESC_CODE_R_C_NA: 01619 LOG(LOG_CPU,LOG_NORMAL)("JMP:Code:C to %X:%X big %d",selector,offset,desc.Big()); 01620 CPU_CHECK_COND(cpu.cpl<desc.DPL(), 01621 "JMP:C:CPL < DPL", 01622 EXCEPTION_GP,selector & 0xfffc) 01623 CODE_jmp: 01624 if (!desc.saved.seg.p) { 01625 // win 01626 CPU_Exception(EXCEPTION_NP,selector & 0xfffc); 01627 return; 01628 } 01629 01630 /* Normal jump to another selector:offset */ 01631 Segs.expanddown[cs]=desc.GetExpandDown(); 01632 Segs.limit[cs]=do_seg_limits? (PhysPt)desc.GetLimit():((PhysPt)(~0UL)); 01633 Segs.phys[cs]=desc.GetBase(); 01634 cpu.code.big=desc.Big()>0; 01635 Segs.val[cs]=(Bit16u)((selector & 0xfffc) | cpu.cpl); 01636 reg_eip=(Bit32u)offset; 01637 return; 01638 case DESC_386_TSS_A: 01639 CPU_CHECK_COND(desc.DPL()<cpu.cpl, 01640 "JMP:TSS:dpl<cpl", 01641 EXCEPTION_GP,selector & 0xfffc) 01642 CPU_CHECK_COND(desc.DPL()<rpl, 01643 "JMP:TSS:dpl<rpl", 01644 EXCEPTION_GP,selector & 0xfffc) 01645 LOG(LOG_CPU,LOG_NORMAL)("JMP:TSS to %X",selector); 01646 CPU_SwitchTask(selector,TSwitch_JMP,oldeip); 01647 break; 01648 default: 01649 E_Exit("JMP Illegal descriptor type %X",(int)desc.Type()); 01650 } 01651 } 01652 assert(1); 01653 } 01654 01655 01656 void CPU_CALL(bool use32,Bitu selector,Bitu offset,Bit32u oldeip) { 01657 Bit32u old_esp = reg_esp; 01658 Bit32u old_eip = reg_eip; 01659 01660 if (!cpu.pmode || (reg_flags & FLAG_VM)) { 01661 try { 01662 if (!use32) { 01663 CPU_Push16(SegValue(cs)); 01664 CPU_Push16(oldeip); 01665 reg_eip=offset&0xffff; 01666 } else { 01667 CPU_Push32(SegValue(cs)); 01668 CPU_Push32(oldeip); 01669 reg_eip=(Bit32u)offset; 01670 } 01671 } 01672 catch (const GuestPageFaultException &pf) { 01673 (void)pf;//UNUSED 01674 reg_esp = old_esp; 01675 reg_eip = old_eip; 01676 throw; 01677 } 01678 if (!cpu_allow_big16) cpu.code.big=false; 01679 SegSet16(cs,(Bit16u)selector); 01680 return; 01681 } else { 01682 CPU_CHECK_COND((selector & 0xfffc)==0, 01683 "CALL:CS selector zero", 01684 EXCEPTION_GP,0) 01685 Bitu rpl=selector & 3; 01686 Descriptor call; 01687 CPU_CHECK_COND(!cpu.gdt.GetDescriptor(selector,call), 01688 "CALL:CS beyond limits", 01689 EXCEPTION_GP,selector & 0xfffc) 01690 /* Check for type of far call */ 01691 switch (call.Type()) { 01692 case DESC_CODE_N_NC_A:case DESC_CODE_N_NC_NA: 01693 case DESC_CODE_R_NC_A:case DESC_CODE_R_NC_NA: 01694 CPU_CHECK_COND(rpl>cpu.cpl, 01695 "CALL:CODE:NC:RPL>CPL", 01696 EXCEPTION_GP,selector & 0xfffc) 01697 CPU_CHECK_COND(call.DPL()!=cpu.cpl, 01698 "CALL:CODE:NC:DPL!=CPL", 01699 EXCEPTION_GP,selector & 0xfffc) 01700 LOG(LOG_CPU,LOG_NORMAL)("CALL:CODE:NC to %X:%X",selector,offset); 01701 goto call_code; 01702 case DESC_CODE_N_C_A:case DESC_CODE_N_C_NA: 01703 case DESC_CODE_R_C_A:case DESC_CODE_R_C_NA: 01704 CPU_CHECK_COND(call.DPL()>cpu.cpl, 01705 "CALL:CODE:C:DPL>CPL", 01706 EXCEPTION_GP,selector & 0xfffc) 01707 LOG(LOG_CPU,LOG_NORMAL)("CALL:CODE:C to %X:%X",selector,offset); 01708 call_code: 01709 if (!call.saved.seg.p) { 01710 // borland extender (RTM) 01711 CPU_Exception(EXCEPTION_NP,selector & 0xfffc); 01712 return; 01713 } 01714 // commit point 01715 try { 01716 if (!use32) { 01717 CPU_Push16(SegValue(cs)); 01718 CPU_Push16(oldeip); 01719 reg_eip=offset & 0xffff; 01720 } else { 01721 CPU_Push32(SegValue(cs)); 01722 CPU_Push32(oldeip); 01723 reg_eip=(Bit32u)offset; 01724 } 01725 } 01726 catch (const GuestPageFaultException &pf) { 01727 (void)pf;//UNUSED 01728 reg_esp = old_esp; 01729 reg_eip = old_eip; 01730 throw; 01731 } 01732 01733 Segs.expanddown[cs]=call.GetExpandDown(); 01734 Segs.limit[cs]=do_seg_limits? (PhysPt)call.GetLimit():((PhysPt)(~0UL)); 01735 Segs.phys[cs]=call.GetBase(); 01736 cpu.code.big=call.Big()>0; 01737 Segs.val[cs]=(Bit16u)((selector & 0xfffc) | cpu.cpl); 01738 return; 01739 case DESC_386_CALL_GATE: 01740 case DESC_286_CALL_GATE: 01741 { 01742 CPU_CHECK_COND(call.DPL()<cpu.cpl, 01743 "CALL:Gate:Gate DPL<CPL", 01744 EXCEPTION_GP,selector & 0xfffc) 01745 CPU_CHECK_COND(call.DPL()<rpl, 01746 "CALL:Gate:Gate DPL<RPL", 01747 EXCEPTION_GP,selector & 0xfffc) 01748 CPU_CHECK_COND(!call.saved.seg.p, 01749 "CALL:Gate:Segment not present", 01750 EXCEPTION_NP,selector & 0xfffc) 01751 Descriptor n_cs_desc; 01752 Bitu n_cs_sel=call.GetSelector(); 01753 01754 CPU_CHECK_COND((n_cs_sel & 0xfffc)==0, 01755 "CALL:Gate:CS selector zero", 01756 EXCEPTION_GP,0) 01757 CPU_CHECK_COND(!cpu.gdt.GetDescriptor(n_cs_sel,n_cs_desc), 01758 "CALL:Gate:CS beyond limits", 01759 EXCEPTION_GP,n_cs_sel & 0xfffc) 01760 Bitu n_cs_dpl = n_cs_desc.DPL(); 01761 CPU_CHECK_COND(n_cs_dpl>cpu.cpl, 01762 "CALL:Gate:CS DPL>CPL", 01763 EXCEPTION_GP,n_cs_sel & 0xfffc) 01764 01765 CPU_CHECK_COND(!n_cs_desc.saved.seg.p, 01766 "CALL:Gate:CS not present", 01767 EXCEPTION_NP,n_cs_sel & 0xfffc) 01768 01769 Bitu n_eip = call.GetOffset(); 01770 switch (n_cs_desc.Type()) { 01771 case DESC_CODE_N_NC_A:case DESC_CODE_N_NC_NA: 01772 case DESC_CODE_R_NC_A:case DESC_CODE_R_NC_NA: 01773 /* Check if we goto inner priviledge */ 01774 if (n_cs_dpl < cpu.cpl) { 01775 /* Get new SS:ESP out of TSS */ 01776 Bit16u n_ss_sel; 01777 Bit32u n_esp; 01778 Descriptor n_ss_desc; 01779 cpu_tss.Get_SSx_ESPx(n_cs_dpl,n_ss_sel,n_esp); 01780 CPU_CHECK_COND((n_ss_sel & 0xfffc)==0, 01781 "CALL:Gate:NC:SS selector zero", 01782 EXCEPTION_TS,0) 01783 CPU_CHECK_COND(!cpu.gdt.GetDescriptor(n_ss_sel,n_ss_desc), 01784 "CALL:Gate:Invalid SS selector", 01785 EXCEPTION_TS,n_ss_sel & 0xfffc) 01786 CPU_CHECK_COND(((n_ss_sel & 3)!=n_cs_desc.DPL()) || (n_ss_desc.DPL()!=n_cs_desc.DPL()), 01787 "CALL:Gate:Invalid SS selector privileges", 01788 EXCEPTION_TS,n_ss_sel & 0xfffc) 01789 01790 switch (n_ss_desc.Type()) { 01791 case DESC_DATA_EU_RW_NA: case DESC_DATA_EU_RW_A: 01792 case DESC_DATA_ED_RW_NA: case DESC_DATA_ED_RW_A: 01793 // writable data segment 01794 break; 01795 default: 01796 E_Exit("Call:Gate:SS no writable data segment"); // or #TS(ss_sel) 01797 } 01798 CPU_CHECK_COND(!n_ss_desc.saved.seg.p, 01799 "CALL:Gate:Stack segment not present", 01800 EXCEPTION_SS,n_ss_sel & 0xfffc) 01801 01802 /* Load the new SS:ESP and save data on it */ 01803 Bit32u o_esp = reg_esp; 01804 Bit16u o_ss = SegValue(ss); 01805 PhysPt o_stack = SegPhys(ss)+(reg_esp & cpu.stack.mask); 01806 01807 01808 // catch pagefaults 01809 if (call.saved.gate.paramcount&31) { 01810 if (call.Type()==DESC_386_CALL_GATE) { 01811 for (Bit8s i=(call.saved.gate.paramcount&31)-1;i>=0;i--) 01812 mem_readd(o_stack+(Bit8u)i*4u); 01813 } else { 01814 for (Bit8s i=(call.saved.gate.paramcount&31)-1;i>=0;i--) 01815 mem_readw(o_stack+(Bit8u)i*2u); 01816 } 01817 } 01818 01819 bool old_allow = dosbox_allow_nonrecursive_page_fault; 01820 01821 /* this code isn't very easy to make interruptible. so temporarily revert to recursive PF handling method */ 01822 dosbox_allow_nonrecursive_page_fault = false; 01823 01824 // commit point 01825 Segs.val[ss]=n_ss_sel; 01826 Segs.phys[ss]=n_ss_desc.GetBase(); 01827 Segs.limit[ss]=do_seg_limits? (PhysPt)n_ss_desc.GetLimit():((PhysPt)(~0UL)); 01828 Segs.expanddown[ss]=n_ss_desc.GetExpandDown(); 01829 if (n_ss_desc.Big()) { 01830 cpu.stack.big=true; 01831 cpu.stack.mask=0xffffffff; 01832 cpu.stack.notmask=0; 01833 reg_esp=n_esp; 01834 } else { 01835 cpu.stack.big=false; 01836 cpu.stack.mask=0xffff; 01837 cpu.stack.notmask=0xffff0000; 01838 reg_sp=n_esp & 0xffff; 01839 } 01840 01841 CPU_SetCPL(n_cs_desc.DPL()); 01842 Bit16u oldcs = SegValue(cs); 01843 /* Switch to new CS:EIP */ 01844 Segs.expanddown[cs]=n_cs_desc.GetExpandDown(); 01845 Segs.limit[cs] = do_seg_limits? (PhysPt)n_cs_desc.GetLimit():((PhysPt)(~0UL)); 01846 Segs.phys[cs] = n_cs_desc.GetBase(); 01847 Segs.val[cs] = (Bit16u)((n_cs_sel & 0xfffc) | cpu.cpl); 01848 cpu.code.big = n_cs_desc.Big()>0; 01849 reg_eip = (Bit32u)n_eip; 01850 if (!use32) reg_eip&=0xffff; 01851 01852 if (call.Type()==DESC_386_CALL_GATE) { 01853 CPU_Push32(o_ss); //save old stack 01854 CPU_Push32(o_esp); 01855 if (call.saved.gate.paramcount&31) 01856 for (Bit8s i=(call.saved.gate.paramcount&31)-1;i>=0;i--) 01857 CPU_Push32(mem_readd(o_stack+(Bit8u)i*4u)); 01858 CPU_Push32(oldcs); 01859 CPU_Push32(oldeip); 01860 } else { 01861 CPU_Push16(o_ss); //save old stack 01862 CPU_Push16((Bit16u)o_esp); 01863 if (call.saved.gate.paramcount&31) 01864 for (Bit8s i=(call.saved.gate.paramcount&31)-1;i>=0;i--) 01865 CPU_Push16(mem_readw(o_stack+(Bit8u)i*2u)); 01866 CPU_Push16(oldcs); 01867 CPU_Push16(oldeip); 01868 } 01869 01870 dosbox_allow_nonrecursive_page_fault = old_allow; 01871 break; 01872 } else if (n_cs_dpl > cpu.cpl) 01873 E_Exit("CALL:GATE:CS DPL>CPL"); // or #GP(sel) 01874 case DESC_CODE_N_C_A:case DESC_CODE_N_C_NA: 01875 case DESC_CODE_R_C_A:case DESC_CODE_R_C_NA: 01876 // zrdx extender 01877 01878 try { 01879 if (call.Type()==DESC_386_CALL_GATE) { 01880 CPU_Push32(SegValue(cs)); 01881 CPU_Push32(oldeip); 01882 } else { 01883 CPU_Push16(SegValue(cs)); 01884 CPU_Push16(oldeip); 01885 } 01886 } 01887 catch (const GuestPageFaultException &pf) { 01888 (void)pf;//UNUSED 01889 reg_esp = old_esp; 01890 reg_eip = old_eip; 01891 throw; 01892 } 01893 01894 /* Switch to new CS:EIP */ 01895 Segs.expanddown[cs]=n_cs_desc.GetExpandDown(); 01896 Segs.limit[cs] = do_seg_limits? (PhysPt)n_cs_desc.GetLimit():((PhysPt)(~0UL)); 01897 Segs.phys[cs] = n_cs_desc.GetBase(); 01898 Segs.val[cs] = (Bit16u)((n_cs_sel & 0xfffc) | cpu.cpl); 01899 cpu.code.big = n_cs_desc.Big()>0; 01900 reg_eip = (Bit32u)n_eip; 01901 if (!use32) reg_eip&=0xffff; 01902 break; 01903 default: 01904 E_Exit("CALL:GATE:CS no executable segment"); 01905 } 01906 } /* Call Gates */ 01907 break; 01908 case DESC_386_TSS_A: 01909 CPU_CHECK_COND(call.DPL()<cpu.cpl, 01910 "CALL:TSS:dpl<cpl", 01911 EXCEPTION_GP,selector & 0xfffc) 01912 CPU_CHECK_COND(call.DPL()<rpl, 01913 "CALL:TSS:dpl<rpl", 01914 EXCEPTION_GP,selector & 0xfffc) 01915 01916 CPU_CHECK_COND(!call.saved.seg.p, 01917 "CALL:TSS:Segment not present", 01918 EXCEPTION_NP,selector & 0xfffc) 01919 01920 LOG(LOG_CPU,LOG_NORMAL)("CALL:TSS to %X",selector); 01921 CPU_SwitchTask(selector,TSwitch_CALL_INT,oldeip); 01922 break; 01923 case DESC_DATA_EU_RW_NA: // vbdos 01924 case DESC_INVALID: // used by some installers 01925 CPU_Exception(EXCEPTION_GP,selector & 0xfffc); 01926 return; 01927 default: 01928 E_Exit("CALL:Descriptor type %x unsupported",(int)call.Type()); 01929 } 01930 } 01931 assert(1); 01932 } 01933 01934 01935 void CPU_RET(bool use32,Bitu bytes,Bit32u oldeip) { 01936 (void)oldeip;//UNUSED 01937 01938 Bit32u orig_esp = reg_esp; 01939 01940 if (!cpu.pmode || (reg_flags & FLAG_VM)) { 01941 try { 01942 Bit32u new_ip; 01943 Bit16u new_cs; 01944 if (!use32) { 01945 new_ip=CPU_Pop16(); 01946 new_cs=CPU_Pop16(); 01947 } else { 01948 new_ip=CPU_Pop32(); 01949 new_cs=CPU_Pop32() & 0xffff; 01950 } 01951 reg_esp+=(Bit32u)bytes; 01952 SegSet16(cs,new_cs); 01953 reg_eip=new_ip; 01954 if (!cpu_allow_big16) cpu.code.big=false; 01955 return; 01956 } 01957 catch (const GuestPageFaultException &pf) { 01958 (void)pf;//UNUSED 01959 LOG_MSG("CPU_RET() interrupted real/vm86"); 01960 reg_esp = orig_esp; 01961 throw; 01962 } 01963 } else { 01964 Bit32u offset,selector; 01965 if (!use32) selector = mem_readw(SegPhys(ss) + (reg_esp & cpu.stack.mask) + 2); 01966 else selector = mem_readd(SegPhys(ss) + (reg_esp & cpu.stack.mask) + 4) & 0xffff; 01967 01968 Descriptor desc; 01969 Bit32u rpl=selector & 3; 01970 if(rpl < cpu.cpl) { 01971 // win setup 01972 CPU_Exception(EXCEPTION_GP,selector & 0xfffc); 01973 return; 01974 } 01975 01976 CPU_CHECK_COND((selector & 0xfffc)==0, 01977 "RET:CS selector zero", 01978 EXCEPTION_GP,0) 01979 CPU_CHECK_COND(!cpu.gdt.GetDescriptor(selector,desc), 01980 "RET:CS beyond limits", 01981 EXCEPTION_GP,selector & 0xfffc) 01982 01983 if (cpu.cpl==rpl) { 01984 /* Return to same level */ 01985 switch (desc.Type()) { 01986 case DESC_CODE_N_NC_A:case DESC_CODE_N_NC_NA: 01987 case DESC_CODE_R_NC_A:case DESC_CODE_R_NC_NA: 01988 CPU_CHECK_COND(cpu.cpl!=desc.DPL(), 01989 "RET to NC segment of other privilege", 01990 EXCEPTION_GP,selector & 0xfffc) 01991 goto RET_same_level; 01992 case DESC_CODE_N_C_A:case DESC_CODE_N_C_NA: 01993 case DESC_CODE_R_C_A:case DESC_CODE_R_C_NA: 01994 CPU_CHECK_COND(desc.DPL()>cpu.cpl, 01995 "RET to C segment of higher privilege", 01996 EXCEPTION_GP,selector & 0xfffc) 01997 break; 01998 default: 01999 E_Exit("RET from illegal descriptor type %X",(int)desc.Type()); 02000 } 02001 RET_same_level: 02002 if (!desc.saved.seg.p) { 02003 // borland extender (RTM) 02004 CPU_Exception(EXCEPTION_NP,selector & 0xfffc); 02005 return; 02006 } 02007 02008 // commit point 02009 try { 02010 if (!use32) { 02011 offset=CPU_Pop16(); 02012 selector=CPU_Pop16(); 02013 } else { 02014 offset=CPU_Pop32(); 02015 selector=CPU_Pop32() & 0xffff; 02016 } 02017 } 02018 catch (const GuestPageFaultException &pf) { 02019 (void)pf;//UNUSED 02020 LOG_MSG("CPU_RET() interrupted prot rpl==cpl"); 02021 reg_esp = orig_esp; 02022 throw; 02023 } 02024 02025 Segs.expanddown[cs]=desc.GetExpandDown(); 02026 Segs.limit[cs]=do_seg_limits? (PhysPt)desc.GetLimit():((PhysPt)(~0UL)); 02027 Segs.phys[cs]=desc.GetBase(); 02028 cpu.code.big=desc.Big()>0; 02029 Segs.val[cs]=(Bit16u)selector; 02030 reg_eip=offset; 02031 if (cpu.stack.big) { 02032 reg_esp+=(Bit32u)bytes; 02033 } else { 02034 reg_sp+=(Bit16u)bytes; 02035 } 02036 LOG(LOG_CPU,LOG_NORMAL)("RET - Same level to %X:%X RPL %X DPL %X",selector,offset,rpl,desc.DPL()); 02037 return; 02038 } else { 02039 /* Return to outer level */ 02040 switch (desc.Type()) { 02041 case DESC_CODE_N_NC_A:case DESC_CODE_N_NC_NA: 02042 case DESC_CODE_R_NC_A:case DESC_CODE_R_NC_NA: 02043 CPU_CHECK_COND(desc.DPL()!=rpl, 02044 "RET to outer NC segment with DPL!=RPL", 02045 EXCEPTION_GP,selector & 0xfffc) 02046 break; 02047 case DESC_CODE_N_C_A:case DESC_CODE_N_C_NA: 02048 case DESC_CODE_R_C_A:case DESC_CODE_R_C_NA: 02049 CPU_CHECK_COND(desc.DPL()>rpl, 02050 "RET to outer C segment with DPL>RPL", 02051 EXCEPTION_GP,selector & 0xfffc) 02052 break; 02053 default: 02054 E_Exit("RET from illegal descriptor type %X",(int)desc.Type()); // or #GP(selector) 02055 } 02056 02057 CPU_CHECK_COND(!desc.saved.seg.p, 02058 "RET:Outer level:CS not present", 02059 EXCEPTION_NP,selector & 0xfffc) 02060 02061 // commit point 02062 Bit32u n_esp,n_ss; 02063 try { 02064 if (use32) { 02065 offset=CPU_Pop32(); 02066 selector=CPU_Pop32() & 0xffff; 02067 reg_esp+= (Bit32u)bytes; 02068 n_esp = CPU_Pop32(); 02069 n_ss = CPU_Pop32() & 0xffff; 02070 } else { 02071 offset=CPU_Pop16(); 02072 selector=CPU_Pop16(); 02073 reg_esp+= (Bit32u)bytes; 02074 n_esp = CPU_Pop16(); 02075 n_ss = CPU_Pop16(); 02076 } 02077 } 02078 catch (const GuestPageFaultException &pf) { 02079 (void)pf;//UNUSED 02080 LOG_MSG("CPU_RET() interrupted prot #2"); 02081 reg_esp = orig_esp; 02082 throw; 02083 } 02084 02085 CPU_CHECK_COND((n_ss & 0xfffc)==0, 02086 "RET to outer level with SS selector zero", 02087 EXCEPTION_GP,0) 02088 02089 Descriptor n_ss_desc; 02090 CPU_CHECK_COND(!cpu.gdt.GetDescriptor(n_ss,n_ss_desc), 02091 "RET:SS beyond limits", 02092 EXCEPTION_GP,n_ss & 0xfffc) 02093 02094 CPU_CHECK_COND(((n_ss & 3)!=rpl) || (n_ss_desc.DPL()!=rpl), 02095 "RET to outer segment with invalid SS privileges", 02096 EXCEPTION_GP,n_ss & 0xfffc) 02097 switch (n_ss_desc.Type()) { 02098 case DESC_DATA_EU_RW_NA: case DESC_DATA_EU_RW_A: 02099 case DESC_DATA_ED_RW_NA: case DESC_DATA_ED_RW_A: 02100 break; 02101 default: 02102 E_Exit("RET:SS selector type no writable data segment"); // or #GP(selector) 02103 } 02104 CPU_CHECK_COND(!n_ss_desc.saved.seg.p, 02105 "RET:Stack segment not present", 02106 EXCEPTION_SS,n_ss & 0xfffc) 02107 02108 CPU_SetCPL(rpl); 02109 Segs.expanddown[cs]=desc.GetExpandDown(); 02110 Segs.limit[cs]=do_seg_limits? (PhysPt)desc.GetLimit():((PhysPt)(~0UL)); 02111 Segs.phys[cs]=desc.GetBase(); 02112 cpu.code.big=desc.Big()>0; 02113 Segs.val[cs]=(Bit16u)((selector&0xfffc) | cpu.cpl); 02114 reg_eip=offset; 02115 02116 Segs.val[ss]=(Bit16u)n_ss; 02117 Segs.phys[ss]=n_ss_desc.GetBase(); 02118 Segs.limit[ss]=do_seg_limits? (PhysPt)n_ss_desc.GetLimit():((PhysPt)(~0UL)); 02119 Segs.expanddown[ss]=n_ss_desc.GetExpandDown(); 02120 if (n_ss_desc.Big()) { 02121 cpu.stack.big=true; 02122 cpu.stack.mask=0xffffffff; 02123 cpu.stack.notmask=0; 02124 reg_esp=(Bit32u)(n_esp+bytes); 02125 } else { 02126 cpu.stack.big=false; 02127 cpu.stack.mask=0xffff; 02128 cpu.stack.notmask=0xffff0000; 02129 reg_sp=(Bit16u)((n_esp & 0xffff)+bytes); 02130 } 02131 02132 CPU_CheckSegments(); 02133 02134 // LOG(LOG_MISC,LOG_ERROR)("RET - Higher level to %X:%X RPL %X DPL %X",selector,offset,rpl,desc.DPL()); 02135 return; 02136 } 02137 LOG(LOG_CPU,LOG_NORMAL)("Prot ret %lX:%lX",(unsigned long)selector,(unsigned long)offset); 02138 return; 02139 } 02140 assert(1); 02141 } 02142 02143 02144 Bitu CPU_SLDT(void) { 02145 return cpu.gdt.SLDT(); 02146 } 02147 02148 bool CPU_LLDT(Bitu selector) { 02149 if (!cpu.gdt.LLDT(selector)) { 02150 LOG(LOG_CPU,LOG_ERROR)("LLDT failed, selector=%X",selector); 02151 return true; 02152 } 02153 LOG(LOG_CPU,LOG_NORMAL)("LDT Set to %X",selector); 02154 return false; 02155 } 02156 02157 Bitu CPU_STR(void) { 02158 return cpu_tss.selector; 02159 } 02160 02161 bool CPU_LTR(Bitu selector) { 02162 if ((selector & 0xfffc)==0) { 02163 cpu_tss.SetSelector(selector); 02164 return false; 02165 } 02166 TSS_Descriptor desc; 02167 if ((selector & 4) || (!cpu.gdt.GetDescriptor(selector,desc))) { 02168 LOG(LOG_CPU,LOG_ERROR)("LTR failed, selector=%X",selector); 02169 return CPU_PrepareException(EXCEPTION_GP,selector); 02170 } 02171 02172 if ((desc.Type()==DESC_286_TSS_A) || (desc.Type()==DESC_386_TSS_A)) { 02173 if (!desc.saved.seg.p) { 02174 LOG(LOG_CPU,LOG_ERROR)("LTR failed, selector=%X (not present)",selector); 02175 return CPU_PrepareException(EXCEPTION_NP,selector); 02176 } 02177 if (!cpu_tss.SetSelector(selector)) E_Exit("LTR failed, selector=%X",(int)selector); 02178 cpu_tss.desc.SetBusy(true); 02179 cpu_tss.SaveSelector(); 02180 } else { 02181 /* Descriptor was no available TSS descriptor */ 02182 LOG(LOG_CPU,LOG_NORMAL)("LTR failed, selector=%X (type=%X)",selector,desc.Type()); 02183 return CPU_PrepareException(EXCEPTION_GP,selector); 02184 } 02185 return false; 02186 } 02187 02188 void CPU_LGDT(Bitu limit,Bitu base) { 02189 LOG(LOG_CPU,LOG_NORMAL)("GDT Set to base:%X limit:%X",base,limit); 02190 cpu.gdt.SetLimit(limit); 02191 cpu.gdt.SetBase((PhysPt)base); 02192 } 02193 02194 void CPU_LIDT(Bitu limit,Bitu base) { 02195 LOG(LOG_CPU,LOG_NORMAL)("IDT Set to base:%X limit:%X",base,limit); 02196 cpu.idt.SetLimit(limit); 02197 cpu.idt.SetBase((PhysPt)base); 02198 } 02199 02200 Bitu CPU_SGDT_base(void) { 02201 return cpu.gdt.GetBase(); 02202 } 02203 Bitu CPU_SGDT_limit(void) { 02204 return cpu.gdt.GetLimit(); 02205 } 02206 02207 Bitu CPU_SIDT_base(void) { 02208 return cpu.idt.GetBase(); 02209 } 02210 Bitu CPU_SIDT_limit(void) { 02211 return cpu.idt.GetLimit(); 02212 } 02213 02214 static bool snap_cpu_snapped=false; 02215 static Bit32u snap_cpu_saved_cr0; 02216 static Bit32u snap_cpu_saved_cr2; 02217 static Bit32u snap_cpu_saved_cr3; 02218 02219 /* On shutdown, DOSBox needs to snap back to real mode 02220 * so that it's shutdown code doesn't cause page faults 02221 * trying to clean up DOS structures when we've booted 02222 * a 32-bit OS. It shouldn't be cleaning up DOS structures 02223 * anyway in that case considering they're likely obliterated 02224 * by the guest OS, but that's something we'll clean up 02225 * later. */ 02226 void CPU_Snap_Back_To_Real_Mode() { 02227 if (snap_cpu_snapped) return; 02228 02229 SETFLAGBIT(IF,false); /* forcibly clear interrupt flag */ 02230 02231 cpu.code.big = false; /* force back to 16-bit */ 02232 cpu.stack.big = false; 02233 cpu.stack.mask = 0xffff; 02234 cpu.stack.notmask = 0xffff0000; 02235 02236 snap_cpu_saved_cr0 = (Bit32u)cpu.cr0; 02237 snap_cpu_saved_cr2 = (Bit32u)paging.cr2; 02238 snap_cpu_saved_cr3 = (Bit32u)paging.cr3; 02239 02240 CPU_SET_CRX(0,0); /* force CPU to real mode */ 02241 CPU_SET_CRX(2,0); /* disable paging */ 02242 CPU_SET_CRX(3,0); /* clear the page table dir */ 02243 02244 cpu.idt.SetBase(0); /* or ELSE weird things will happen when INTerrupts are run */ 02245 cpu.idt.SetLimit(1023); 02246 02247 snap_cpu_snapped = true; 02248 } 02249 02250 void CPU_Snap_Back_Restore() { 02251 if (!snap_cpu_snapped) return; 02252 02253 CPU_SET_CRX(0,snap_cpu_saved_cr0); 02254 CPU_SET_CRX(2,snap_cpu_saved_cr2); 02255 CPU_SET_CRX(3,snap_cpu_saved_cr3); 02256 02257 snap_cpu_snapped = false; 02258 } 02259 02260 void CPU_Snap_Back_Forget() { 02261 snap_cpu_snapped = false; 02262 } 02263 02264 bool CPU_IsDynamicCore(void) { 02265 #if (C_DYNAMIC_X86) 02266 if (cpudecoder == &CPU_Core_Dyn_X86_Run) 02267 return true; 02268 #elif (C_DYNREC) 02269 if (cpudecoder == &CPU_Core_Dynrec_Run) 02270 return true; 02271 #endif 02272 return false; 02273 } 02274 02275 static bool printed_cycles_auto_info = false; 02276 void CPU_SET_CRX(Bitu cr,Bitu value) { 02277 switch (cr) { 02278 case 0: 02279 { 02280 value|=CR0_FPUPRESENT; 02281 Bitu changed=cpu.cr0 ^ value; 02282 if (!changed) return; 02283 if (GCC_UNLIKELY(changed & CR0_WRITEPROTECT)) { 02284 if (CPU_ArchitectureType >= CPU_ARCHTYPE_486OLD) 02285 PAGING_SetWP((value&CR0_WRITEPROTECT)? true:false); 02286 } 02287 cpu.cr0=value; 02288 if (value & CR0_PROTECTION) { 02289 cpu.pmode=true; 02290 LOG(LOG_CPU,LOG_NORMAL)("Protected mode"); 02291 PAGING_Enable((value&CR0_PAGING)? true:false); 02292 02293 if (!(CPU_AutoDetermineMode&CPU_AUTODETERMINE_MASK)) break; 02294 02295 if (CPU_AutoDetermineMode&CPU_AUTODETERMINE_CYCLES) { 02296 CPU_CycleAutoAdjust=true; 02297 CPU_CycleLeft=0; 02298 CPU_Cycles=0; 02299 CPU_OldCycleMax=CPU_CycleMax; 02300 GFX_SetTitle((Bit32s)CPU_CyclePercUsed,-1,-1,false); 02301 if(!printed_cycles_auto_info) { 02302 printed_cycles_auto_info = true; 02303 LOG_MSG("DOSBox has switched to max cycles, because of the setting: cycles=auto.\nIf the game runs too fast, try a fixed cycles amount in DOSBox's options."); 02304 } 02305 menu_update_autocycle(); 02306 } else { 02307 GFX_SetTitle(-1,-1,-1,false); 02308 } 02309 #if (C_DYNAMIC_X86) 02310 if (CPU_AutoDetermineMode&CPU_AUTODETERMINE_CORE) { 02311 CPU_Core_Dyn_X86_Cache_Init(true); 02312 cpudecoder=&CPU_Core_Dyn_X86_Run; 02313 strcpy(core_mode, "dynamic"); 02314 } 02315 #elif (C_DYNREC) 02316 if (CPU_AutoDetermineMode&CPU_AUTODETERMINE_CORE) { 02317 CPU_Core_Dynrec_Cache_Init(true); 02318 cpudecoder=&CPU_Core_Dynrec_Run; 02319 } 02320 #endif 02321 CPU_AutoDetermineMode<<=CPU_AUTODETERMINE_SHIFT; 02322 } else { 02323 cpu.pmode=false; 02324 if (value & CR0_PAGING) LOG_MSG("Paging requested without PE=1"); 02325 PAGING_Enable(false); 02326 LOG(LOG_CPU,LOG_NORMAL)("Real mode"); 02327 } 02328 break; 02329 } 02330 case 2: 02331 paging.cr2=value; 02332 break; 02333 case 3: 02334 PAGING_SetDirBase(value); 02335 break; 02336 default: 02337 LOG(LOG_CPU,LOG_ERROR)("Unhandled MOV CR%d,%X",cr,value); 02338 break; 02339 } 02340 } 02341 02342 bool CPU_WRITE_CRX(Bitu cr,Bitu value) { 02343 /* Check if privileged to access control registers */ 02344 if (cpu.pmode && (cpu.cpl>0)) return CPU_PrepareException(EXCEPTION_GP,0); 02345 if ((cr==1) || (cr>4)) return CPU_PrepareException(EXCEPTION_UD,0); 02346 if (CPU_ArchitectureType<CPU_ARCHTYPE_486OLD) { 02347 if (cr==4) return CPU_PrepareException(EXCEPTION_UD,0); 02348 } 02349 CPU_SET_CRX(cr,value); 02350 return false; 02351 } 02352 02353 Bitu CPU_GET_CRX(Bitu cr) { 02354 switch (cr) { 02355 case 0: 02356 if (CPU_ArchitectureType>=CPU_ARCHTYPE_PENTIUM) return cpu.cr0; 02357 else if (CPU_ArchitectureType>=CPU_ARCHTYPE_486OLD) return (cpu.cr0 & 0xe005003f); 02358 else return (cpu.cr0 | 0x7ffffff0); 02359 case 2: 02360 return paging.cr2; 02361 case 3: 02362 return PAGING_GetDirBase() & 0xfffff000; 02363 default: 02364 LOG(LOG_CPU,LOG_ERROR)("Unhandled MOV XXX, CR%d",cr); 02365 break; 02366 } 02367 return 0; 02368 } 02369 02370 bool CPU_READ_CRX(Bitu cr,Bit32u & retvalue) { 02371 /* Check if privileged to access control registers */ 02372 if (cpu.pmode && (cpu.cpl>0)) return CPU_PrepareException(EXCEPTION_GP,0); 02373 if ((cr==1) || (cr>4)) return CPU_PrepareException(EXCEPTION_UD,0); 02374 if (CPU_ArchitectureType<CPU_ARCHTYPE_486OLD) { 02375 if (cr==4) return CPU_PrepareException(EXCEPTION_UD,0); 02376 } 02377 retvalue=(Bit32u)CPU_GET_CRX(cr); 02378 return false; 02379 } 02380 02381 02382 bool CPU_WRITE_DRX(Bitu dr,Bitu value) { 02383 /* Check if privileged to access control registers */ 02384 if (cpu.pmode && (cpu.cpl>0)) return CPU_PrepareException(EXCEPTION_GP,0); 02385 switch (dr) { 02386 case 0: 02387 case 1: 02388 case 2: 02389 case 3: 02390 cpu.drx[dr]=(Bit32u)value; 02391 break; 02392 case 4: 02393 case 6: 02394 cpu.drx[6]=(value|0xffff0ff0) & 0xffffefff; 02395 break; 02396 case 5: 02397 case 7: 02398 if (CPU_ArchitectureType<CPU_ARCHTYPE_PENTIUM) { 02399 cpu.drx[7]=(value|0x400) & 0xffff2fff; 02400 } else { 02401 cpu.drx[7]=(Bit32u)(value|0x400); 02402 } 02403 break; 02404 default: 02405 LOG(LOG_CPU,LOG_ERROR)("Unhandled MOV DR%d,%X",dr,value); 02406 break; 02407 } 02408 return false; 02409 } 02410 02411 bool CPU_READ_DRX(Bitu dr,Bit32u & retvalue) { 02412 /* Check if privileged to access control registers */ 02413 if (cpu.pmode && (cpu.cpl>0)) return CPU_PrepareException(EXCEPTION_GP,0); 02414 switch (dr) { 02415 case 0: 02416 case 1: 02417 case 2: 02418 case 3: 02419 case 6: 02420 case 7: 02421 retvalue=cpu.drx[dr]; 02422 break; 02423 case 4: 02424 retvalue=cpu.drx[6]; 02425 break; 02426 case 5: 02427 retvalue=cpu.drx[7]; 02428 break; 02429 default: 02430 LOG(LOG_CPU,LOG_ERROR)("Unhandled MOV XXX, DR%d",dr); 02431 retvalue=0; 02432 break; 02433 } 02434 return false; 02435 } 02436 02437 bool CPU_WRITE_TRX(Bitu tr,Bitu value) { 02438 /* Check if privileged to access control registers */ 02439 if (cpu.pmode && (cpu.cpl>0)) return CPU_PrepareException(EXCEPTION_GP,0); 02440 switch (tr) { 02441 // case 3: 02442 case 6: 02443 case 7: 02444 cpu.trx[tr]=(Bit32u)value; 02445 return false; 02446 default: 02447 LOG(LOG_CPU,LOG_ERROR)("Unhandled MOV TR%d,%X",tr,value); 02448 break; 02449 } 02450 return CPU_PrepareException(EXCEPTION_UD,0); 02451 } 02452 02453 bool CPU_READ_TRX(Bitu tr,Bit32u & retvalue) { 02454 /* Check if privileged to access control registers */ 02455 if (cpu.pmode && (cpu.cpl>0)) return CPU_PrepareException(EXCEPTION_GP,0); 02456 switch (tr) { 02457 // case 3: 02458 case 6: 02459 case 7: 02460 retvalue=cpu.trx[tr]; 02461 return false; 02462 default: 02463 LOG(LOG_CPU,LOG_ERROR)("Unhandled MOV XXX, TR%d",tr); 02464 break; 02465 } 02466 return CPU_PrepareException(EXCEPTION_UD,0); 02467 } 02468 02469 02470 Bitu CPU_SMSW(void) { 02471 return cpu.cr0; 02472 } 02473 02474 bool CPU_LMSW(Bitu word) { 02475 if (cpu.pmode && (cpu.cpl>0)) return CPU_PrepareException(EXCEPTION_GP,0); 02476 word&=0xf; 02477 if (cpu.cr0 & 1) word|=1; 02478 word|=(cpu.cr0&0xfffffff0); 02479 CPU_SET_CRX(0,word); 02480 return false; 02481 } 02482 02483 void CPU_ARPL(Bitu & dest_sel,Bitu src_sel) { 02484 FillFlags(); 02485 if ((dest_sel & 3) < (src_sel & 3)) { 02486 dest_sel=(dest_sel & 0xfffc) + (src_sel & 3); 02487 // dest_sel|=0xff3f0000; 02488 SETFLAGBIT(ZF,true); 02489 } else { 02490 SETFLAGBIT(ZF,false); 02491 } 02492 } 02493 02494 void CPU_LAR(Bitu selector,Bitu & ar) { 02495 FillFlags(); 02496 if (selector == 0) { 02497 SETFLAGBIT(ZF,false); 02498 return; 02499 } 02500 Descriptor desc;Bitu rpl=selector & 3; 02501 if (!cpu.gdt.GetDescriptor(selector,desc)){ 02502 SETFLAGBIT(ZF,false); 02503 return; 02504 } 02505 switch (desc.Type()){ 02506 case DESC_CODE_N_C_A: case DESC_CODE_N_C_NA: 02507 case DESC_CODE_R_C_A: case DESC_CODE_R_C_NA: 02508 break; 02509 02510 case DESC_286_INT_GATE: case DESC_286_TRAP_GATE: { 02511 case DESC_386_INT_GATE: case DESC_386_TRAP_GATE: 02512 SETFLAGBIT(ZF,false); 02513 return; 02514 } 02515 02516 case DESC_LDT: 02517 case DESC_TASK_GATE: 02518 02519 case DESC_286_TSS_A: case DESC_286_TSS_B: 02520 case DESC_286_CALL_GATE: 02521 02522 case DESC_386_TSS_A: case DESC_386_TSS_B: 02523 case DESC_386_CALL_GATE: 02524 02525 02526 case DESC_DATA_EU_RO_NA: case DESC_DATA_EU_RO_A: 02527 case DESC_DATA_EU_RW_NA: case DESC_DATA_EU_RW_A: 02528 case DESC_DATA_ED_RO_NA: case DESC_DATA_ED_RO_A: 02529 case DESC_DATA_ED_RW_NA: case DESC_DATA_ED_RW_A: 02530 case DESC_CODE_N_NC_A: case DESC_CODE_N_NC_NA: 02531 case DESC_CODE_R_NC_A: case DESC_CODE_R_NC_NA: 02532 if (desc.DPL()<cpu.cpl || desc.DPL() < rpl) { 02533 SETFLAGBIT(ZF,false); 02534 return; 02535 } 02536 break; 02537 default: 02538 SETFLAGBIT(ZF,false); 02539 return; 02540 } 02541 /* Valid descriptor */ 02542 ar=desc.saved.fill[1] & 0x00ffff00; 02543 SETFLAGBIT(ZF,true); 02544 } 02545 02546 void CPU_LSL(Bitu selector,Bitu & limit) { 02547 FillFlags(); 02548 if (selector == 0) { 02549 SETFLAGBIT(ZF,false); 02550 return; 02551 } 02552 Descriptor desc;Bitu rpl=selector & 3; 02553 if (!cpu.gdt.GetDescriptor(selector,desc)){ 02554 SETFLAGBIT(ZF,false); 02555 return; 02556 } 02557 switch (desc.Type()){ 02558 case DESC_CODE_N_C_A: case DESC_CODE_N_C_NA: 02559 case DESC_CODE_R_C_A: case DESC_CODE_R_C_NA: 02560 break; 02561 02562 case DESC_LDT: 02563 case DESC_286_TSS_A: 02564 case DESC_286_TSS_B: 02565 02566 case DESC_386_TSS_A: 02567 case DESC_386_TSS_B: 02568 02569 case DESC_DATA_EU_RO_NA: case DESC_DATA_EU_RO_A: 02570 case DESC_DATA_EU_RW_NA: case DESC_DATA_EU_RW_A: 02571 case DESC_DATA_ED_RO_NA: case DESC_DATA_ED_RO_A: 02572 case DESC_DATA_ED_RW_NA: case DESC_DATA_ED_RW_A: 02573 02574 case DESC_CODE_N_NC_A: case DESC_CODE_N_NC_NA: 02575 case DESC_CODE_R_NC_A: case DESC_CODE_R_NC_NA: 02576 if (desc.DPL()<cpu.cpl || desc.DPL() < rpl) { 02577 SETFLAGBIT(ZF,false); 02578 return; 02579 } 02580 break; 02581 default: 02582 SETFLAGBIT(ZF,false); 02583 return; 02584 } 02585 limit=desc.GetLimit(); 02586 SETFLAGBIT(ZF,true); 02587 } 02588 02589 void CPU_VERR(Bitu selector) { 02590 FillFlags(); 02591 if (selector == 0) { 02592 SETFLAGBIT(ZF,false); 02593 return; 02594 } 02595 Descriptor desc;Bitu rpl=selector & 3; 02596 if (!cpu.gdt.GetDescriptor(selector,desc)){ 02597 SETFLAGBIT(ZF,false); 02598 return; 02599 } 02600 switch (desc.Type()){ 02601 case DESC_CODE_R_C_A: case DESC_CODE_R_C_NA: 02602 //Conforming readable code segments can be always read 02603 break; 02604 case DESC_DATA_EU_RO_NA: case DESC_DATA_EU_RO_A: 02605 case DESC_DATA_EU_RW_NA: case DESC_DATA_EU_RW_A: 02606 case DESC_DATA_ED_RO_NA: case DESC_DATA_ED_RO_A: 02607 case DESC_DATA_ED_RW_NA: case DESC_DATA_ED_RW_A: 02608 02609 case DESC_CODE_R_NC_A: case DESC_CODE_R_NC_NA: 02610 if (desc.DPL()<cpu.cpl || desc.DPL() < rpl) { 02611 SETFLAGBIT(ZF,false); 02612 return; 02613 } 02614 break; 02615 default: 02616 SETFLAGBIT(ZF,false); 02617 return; 02618 } 02619 SETFLAGBIT(ZF,true); 02620 } 02621 02622 void CPU_VERW(Bitu selector) { 02623 FillFlags(); 02624 if (selector == 0) { 02625 SETFLAGBIT(ZF,false); 02626 return; 02627 } 02628 Descriptor desc;Bitu rpl=selector & 3; 02629 if (!cpu.gdt.GetDescriptor(selector,desc)){ 02630 SETFLAGBIT(ZF,false); 02631 return; 02632 } 02633 switch (desc.Type()){ 02634 case DESC_DATA_EU_RW_NA: case DESC_DATA_EU_RW_A: 02635 case DESC_DATA_ED_RW_NA: case DESC_DATA_ED_RW_A: 02636 if (desc.DPL()<cpu.cpl || desc.DPL() < rpl) { 02637 SETFLAGBIT(ZF,false); 02638 return; 02639 } 02640 break; 02641 default: 02642 SETFLAGBIT(ZF,false); 02643 return; 02644 } 02645 SETFLAGBIT(ZF,true); 02646 } 02647 02648 bool CPU_SetSegGeneral(SegNames seg,Bit16u value) { 02649 if (!cpu.pmode || (reg_flags & FLAG_VM)) { 02650 Segs.val[seg]=value; 02651 Segs.phys[seg]=(PhysPt)value << 4u; 02652 if (seg==ss) { 02653 cpu.stack.big=false; 02654 cpu.stack.mask=0xffff; 02655 cpu.stack.notmask=0xffff0000; 02656 } 02657 02658 /* real mode: loads do not change the limit. "Flat real mode" would not be possible otherwise. 02659 * vm86: loads are fixed at 64KB (right?) */ 02660 if (reg_flags & FLAG_VM) 02661 Segs.limit[seg] = 0xFFFF; 02662 02663 return false; 02664 } else { 02665 if (seg==ss) { 02666 // Stack needs to be non-zero 02667 if ((value & 0xfffc)==0) { 02668 // E_Exit("CPU_SetSegGeneral: Stack segment zero"); 02669 return CPU_PrepareException(EXCEPTION_GP,0); 02670 } 02671 Descriptor desc; 02672 if (!cpu.gdt.GetDescriptor(value,desc)) { 02673 // E_Exit("CPU_SetSegGeneral: Stack segment beyond limits"); 02674 return CPU_PrepareException(EXCEPTION_GP,value & 0xfffc); 02675 } 02676 if (((value & 3)!=cpu.cpl) || (desc.DPL()!=cpu.cpl)) { 02677 // E_Exit("CPU_SetSegGeneral: Stack segment with invalid privileges"); 02678 return CPU_PrepareException(EXCEPTION_GP,value & 0xfffc); 02679 } 02680 02681 switch (desc.Type()) { 02682 case DESC_DATA_EU_RW_NA: case DESC_DATA_EU_RW_A: 02683 case DESC_DATA_ED_RW_NA: case DESC_DATA_ED_RW_A: 02684 break; 02685 default: 02686 //Earth Siege 1 02687 return CPU_PrepareException(EXCEPTION_GP,value & 0xfffc); 02688 } 02689 02690 if (!desc.saved.seg.p) { 02691 // E_Exit("CPU_SetSegGeneral: Stack segment not present"); // or #SS(sel) 02692 return CPU_PrepareException(EXCEPTION_SS,value & 0xfffc); 02693 } 02694 02695 Segs.val[seg]=value; 02696 Segs.phys[seg]=desc.GetBase(); 02697 Segs.limit[seg]=do_seg_limits? (PhysPt)desc.GetLimit():((PhysPt)(~0UL)); 02698 Segs.expanddown[seg]=desc.GetExpandDown(); 02699 if (desc.Big()) { 02700 cpu.stack.big=true; 02701 cpu.stack.mask=0xffffffff; 02702 cpu.stack.notmask=0; 02703 } else { 02704 cpu.stack.big=false; 02705 cpu.stack.mask=0xffff; 02706 cpu.stack.notmask=0xffff0000; 02707 } 02708 } else { 02709 if ((value & 0xfffc)==0) { 02710 Segs.val[seg]=value; 02711 Segs.phys[seg]=0; // ?? 02712 return false; 02713 } 02714 Descriptor desc; 02715 if (!cpu.gdt.GetDescriptor(value,desc)) { 02716 return CPU_PrepareException(EXCEPTION_GP,value & 0xfffc); 02717 } 02718 switch (desc.Type()) { 02719 case DESC_DATA_EU_RO_NA: case DESC_DATA_EU_RO_A: 02720 case DESC_DATA_EU_RW_NA: case DESC_DATA_EU_RW_A: 02721 case DESC_DATA_ED_RO_NA: case DESC_DATA_ED_RO_A: 02722 case DESC_DATA_ED_RW_NA: case DESC_DATA_ED_RW_A: 02723 case DESC_CODE_R_NC_A: case DESC_CODE_R_NC_NA: 02724 if (((value & 3u)>desc.DPL()) || (cpu.cpl>desc.DPL())) { 02725 // extreme pinball 02726 return CPU_PrepareException(EXCEPTION_GP,value & 0xfffc); 02727 } 02728 break; 02729 case DESC_CODE_R_C_A: case DESC_CODE_R_C_NA: 02730 break; 02731 default: 02732 // gabriel knight 02733 return CPU_PrepareException(EXCEPTION_GP,value & 0xfffc); 02734 02735 } 02736 if (!desc.saved.seg.p) { 02737 // win 02738 return CPU_PrepareException(EXCEPTION_NP,value & 0xfffc); 02739 } 02740 02741 Segs.val[seg]=value; 02742 Segs.phys[seg]=desc.GetBase(); 02743 Segs.limit[seg]=do_seg_limits?(PhysPt)desc.GetLimit():((PhysPt)(~0UL)); 02744 Segs.expanddown[seg]=desc.GetExpandDown(); 02745 } 02746 02747 return false; 02748 } 02749 } 02750 02751 bool CPU_PopSeg(SegNames seg,bool use32) { 02752 Bitu val=mem_readw(SegPhys(ss) + (reg_esp & cpu.stack.mask)); 02753 Bitu addsp = use32 ? 0x04 : 0x02; 02754 //Calculate this beforehand since the stack mask might change 02755 Bit32u new_esp = (reg_esp&cpu.stack.notmask) | ((reg_esp + addsp)&cpu.stack.mask); 02756 if (CPU_SetSegGeneral(seg,(Bit16u)val)) return true; 02757 reg_esp = new_esp; 02758 return false; 02759 } 02760 02761 extern bool enable_fpu; 02762 02763 bool CPU_CPUID(void) { 02764 if (CPU_ArchitectureType < CPU_ARCHTYPE_486NEW) return false; 02765 switch (reg_eax) { 02766 case 0: /* Vendor ID String and maximum level? */ 02767 reg_eax=1; /* Maximum level */ 02768 reg_ebx='G' | ('e' << 8) | ('n' << 16) | ('u'<< 24); 02769 reg_edx='i' | ('n' << 8) | ('e' << 16) | ('I'<< 24); 02770 reg_ecx='n' | ('t' << 8) | ('e' << 16) | ('l'<< 24); 02771 break; 02772 case 1: /* get processor type/family/model/stepping and feature flags */ 02773 if ((CPU_ArchitectureType == CPU_ARCHTYPE_486NEW) || 02774 (CPU_ArchitectureType == CPU_ARCHTYPE_MIXED)) { 02775 reg_eax=0x402; /* intel 486dx */ 02776 reg_ebx=0; /* Not Supported */ 02777 reg_ecx=0; /* No features */ 02778 reg_edx=enable_fpu?1:0; /* FPU */ 02779 } else if (CPU_ArchitectureType == CPU_ARCHTYPE_PENTIUM) { 02780 reg_eax=0x513; /* intel pentium */ 02781 reg_ebx=0; /* Not Supported */ 02782 reg_ecx=0; /* No features */ 02783 reg_edx=0x00000010|(enable_fpu?1:0); /* FPU+TimeStamp/RDTSC */ 02784 if (enable_msr) reg_edx |= 0x20; /* ModelSpecific/MSR */ 02785 if (enable_cmpxchg8b) reg_edx |= 0x100; /* CMPXCHG8B */ 02786 } else if (CPU_ArchitectureType == CPU_ARCHTYPE_PMMXSLOW) { 02787 reg_eax=0x543; /* intel pentium mmx (PMMX) */ 02788 reg_ebx=0; /* Not Supported */ 02789 reg_ecx=0; /* No features */ 02790 reg_edx=0x00800010|(enable_fpu?1:0); /* FPU+TimeStamp/RDTSC+MMX+ModelSpecific/MSR */ 02791 if (enable_msr) reg_edx |= 0x20; /* ModelSpecific/MSR */ 02792 if (enable_cmpxchg8b) reg_edx |= 0x100; /* CMPXCHG8B */ 02793 } else if (CPU_ArchitectureType == CPU_ARCHTYPE_PPROSLOW) { 02794 reg_eax=0x612; /* intel pentium pro */ 02795 reg_ebx=0; /* Not Supported */ 02796 reg_ecx=0; /* No features */ 02797 reg_edx=0x00008011; /* FPU+TimeStamp/RDTSC */ 02798 } else { 02799 return false; 02800 } 02801 break; 02802 default: 02803 LOG(LOG_CPU,LOG_ERROR)("Unhandled CPUID Function %x",reg_eax); 02804 reg_eax=0; 02805 reg_ebx=0; 02806 reg_ecx=0; 02807 reg_edx=0; 02808 break; 02809 } 02810 return true; 02811 } 02812 02813 Bits HLT_Decode(void) { 02814 /* Once an interrupt occurs, it should change cpu core */ 02815 if (reg_eip!=cpu.hlt.eip || SegValue(cs) != cpu.hlt.cs) { 02816 cpudecoder=cpu.hlt.old_decoder; 02817 } else { 02818 CPU_IODelayRemoved += CPU_Cycles; 02819 CPU_Cycles=0; 02820 } 02821 return 0; 02822 } 02823 02824 void CPU_HLT(Bit32u oldeip) { 02825 /* Since cpu.hlt.old_decoder assigns the current decoder to old, and relies on restoring 02826 * it back when finished, setting cpudecoder to HLT_Decode while already HLT_Decode effectively 02827 * hangs DOSBox and makes it complete unresponsive. Don't want that! */ 02828 if (cpudecoder == &HLT_Decode) E_Exit("CPU_HLT attempted to set HLT_Decode while CPU decoder already HLT_Decode"); 02829 02830 reg_eip=oldeip; 02831 CPU_IODelayRemoved += CPU_Cycles; 02832 CPU_Cycles=0; 02833 cpu.hlt.cs=SegValue(cs); 02834 cpu.hlt.eip=reg_eip; 02835 cpu.hlt.old_decoder=cpudecoder; 02836 cpudecoder=&HLT_Decode; 02837 } 02838 02839 void CPU_ENTER(bool use32,Bitu bytes,Bitu level) { 02840 level&=0x1f; 02841 Bit32u sp_index=reg_esp&cpu.stack.mask; 02842 Bit32u bp_index=reg_ebp&cpu.stack.mask; 02843 if (!use32) { 02844 sp_index-=2; 02845 mem_writew(SegPhys(ss)+sp_index,reg_bp); 02846 reg_bp=(Bit16u)(reg_esp-2); 02847 if (level) { 02848 for (Bitu i=1;i<level;i++) { 02849 sp_index-=2;bp_index-=2; 02850 mem_writew(SegPhys(ss)+sp_index,mem_readw(SegPhys(ss)+bp_index)); 02851 } 02852 sp_index-=2; 02853 mem_writew(SegPhys(ss)+sp_index,reg_bp); 02854 } 02855 } else { 02856 sp_index-=4; 02857 mem_writed(SegPhys(ss)+sp_index,reg_ebp); 02858 reg_ebp=(reg_esp-4); 02859 if (level) { 02860 for (Bitu i=1;i<level;i++) { 02861 sp_index-=4;bp_index-=4; 02862 mem_writed(SegPhys(ss)+sp_index,mem_readd(SegPhys(ss)+bp_index)); 02863 } 02864 sp_index-=4; 02865 mem_writed(SegPhys(ss)+sp_index,reg_ebp); 02866 } 02867 } 02868 sp_index-=(Bit32u)bytes; 02869 reg_esp=(reg_esp&cpu.stack.notmask)|((sp_index)&cpu.stack.mask); 02870 } 02871 02872 void CPU_SyncCycleMaxToProp(void) { 02873 char tmp[64]; 02874 02875 Section* sec=control->GetSection("cpu"); 02876 const Section_prop * secprop = static_cast<Section_prop *>(sec); 02877 Prop_multival* p = secprop->Get_multival("cycles"); 02878 Property* prop = p->GetSection()->Get_prop("type"); 02879 sprintf(tmp,"%llu",(unsigned long long)CPU_CycleMax); 02880 prop->SetValue(tmp); 02881 } 02882 02883 void CPU_CycleIncrease(bool pressed) { 02884 if (!pressed) return; 02885 02886 if (CPU_CycleAutoAdjust) { 02887 CPU_CyclePercUsed+=5; 02888 if (CPU_CyclePercUsed>105) CPU_CyclePercUsed=105; 02889 LOG_MSG("CPU speed: max %ld percent.",(unsigned long)CPU_CyclePercUsed); 02890 GFX_SetTitle((Bit32s)CPU_CyclePercUsed,-1,-1,false); 02891 } else { 02892 Bit32s old_cycles= (Bit32s)CPU_CycleMax; 02893 if (CPU_CycleUp < 100) { 02894 CPU_CycleMax = (Bit32s)(CPU_CycleMax * (1 + (float)CPU_CycleUp / 100.0)); 02895 } else { 02896 CPU_CycleMax = (Bit32s)(CPU_CycleMax + CPU_CycleUp); 02897 } 02898 02899 CPU_CycleLeft=0;CPU_Cycles=0; 02900 if (CPU_CycleMax==old_cycles) CPU_CycleMax++; 02901 if (CPU_AutoDetermineMode&CPU_AUTODETERMINE_CYCLES) { 02902 LOG_MSG("CPU:%ld cycles (auto)",(unsigned long)CPU_CycleMax); 02903 } else { 02904 CPU_CyclesSet=CPU_CycleMax; 02905 #if (C_DYNAMIC_X86) 02906 if (CPU_CycleMax > 15000 && cpudecoder != &CPU_Core_Dyn_X86_Run) 02907 LOG_MSG("CPU speed: fixed %ld cycles. If you need more than 20000, try core=dynamic in DOSBox's options.",(unsigned long)CPU_CycleMax); 02908 else 02909 // TODO: Add C_DYNREC version 02910 #endif 02911 LOG_MSG("CPU speed: fixed %ld cycles.",(unsigned long)CPU_CycleMax); 02912 } 02913 GFX_SetTitle((Bit32s)CPU_CycleMax,-1,-1,false); 02914 CPU_SyncCycleMaxToProp(); 02915 } 02916 } 02917 02918 void CPU_CycleDecrease(bool pressed) { 02919 if (!pressed) return; 02920 if (CPU_CycleAutoAdjust) { 02921 CPU_CyclePercUsed-=5; 02922 if (CPU_CyclePercUsed<=0) CPU_CyclePercUsed=1; 02923 if(CPU_CyclePercUsed <=70) 02924 LOG_MSG("CPU speed: max %ld percent. If the game runs too fast, try a fixed cycles amount in DOSBox's options.",(unsigned long)CPU_CyclePercUsed); 02925 else 02926 LOG_MSG("CPU speed: max %ld percent.",(unsigned long)CPU_CyclePercUsed); 02927 GFX_SetTitle((Bit32s)CPU_CyclePercUsed,-1,-1,false); 02928 } else { 02929 if (CPU_CycleDown < 100) { 02930 CPU_CycleMax = (Bit32s)(CPU_CycleMax / (1 + (float)CPU_CycleDown / 100.0)); 02931 } else { 02932 CPU_CycleMax = (Bit32s)(CPU_CycleMax - CPU_CycleDown); 02933 } 02934 CPU_CycleLeft=0;CPU_Cycles=0; 02935 if (CPU_CycleMax <= 0) CPU_CycleMax=1; 02936 if (CPU_AutoDetermineMode&CPU_AUTODETERMINE_CYCLES) { 02937 LOG_MSG("CPU:%ld cycles (auto)",(unsigned long)CPU_CycleMax); 02938 } else { 02939 CPU_CyclesSet=CPU_CycleMax; 02940 LOG_MSG("CPU speed: fixed %ld cycles.",(unsigned long)CPU_CycleMax); 02941 } 02942 GFX_SetTitle((Bit32s)CPU_CycleMax,-1,-1,false); 02943 CPU_SyncCycleMaxToProp(); 02944 } 02945 } 02946 02947 static void CPU_ToggleAutoCycles(bool pressed) { 02948 if (!pressed) 02949 return; 02950 02951 Section* sec=control->GetSection("cpu"); 02952 if (sec) { 02953 std::string tmp("cycles="); 02954 if (CPU_CycleAutoAdjust) { 02955 std::ostringstream str; 02956 str << "fixed " << CPU_CyclesSet; 02957 tmp.append(str.str()); 02958 } else if (CPU_AutoDetermineMode&CPU_AUTODETERMINE_CYCLES) { 02959 tmp.append("max"); 02960 } else { 02961 tmp.append("auto"); 02962 } 02963 02964 sec->HandleInputline(tmp); 02965 } 02966 } 02967 02968 #if !defined(C_EMSCRIPTEN) 02969 static void CPU_ToggleFullCore(bool pressed) { 02970 if (!pressed) 02971 return; 02972 Section* sec=control->GetSection("cpu"); 02973 if(sec) { 02974 std::string tmp="core=full"; 02975 sec->HandleInputline(tmp); 02976 } 02977 } 02978 #endif 02979 02980 static void CPU_ToggleNormalCore(bool pressed) { 02981 if (!pressed) 02982 return; 02983 Section* sec=control->GetSection("cpu"); 02984 if(sec) { 02985 std::string tmp="core=normal"; 02986 sec->HandleInputline(tmp); 02987 } 02988 } 02989 02990 #if (C_DYNAMIC_X86) || (C_DYNREC) 02991 static void CPU_ToggleDynamicCore(bool pressed) { 02992 if (!pressed) 02993 return; 02994 Section* sec=control->GetSection("cpu"); 02995 if(sec) { 02996 std::string tmp="core=dynamic"; 02997 sec->HandleInputline(tmp); 02998 } 02999 } 03000 #endif 03001 03002 #if !defined(C_EMSCRIPTEN) 03003 static void CPU_ToggleSimpleCore(bool pressed) { 03004 if (!pressed) 03005 return; 03006 Section* sec=control->GetSection("cpu"); 03007 std::string tmp="core=simple"; 03008 if(sec) { 03009 sec->HandleInputline(tmp); 03010 } 03011 } 03012 #endif 03013 03014 void CPU_Enable_SkipAutoAdjust(void) { 03015 if (CPU_CycleAutoAdjust) { 03016 CPU_CycleMax /= 2; 03017 if (CPU_CycleMax < CPU_CYCLES_LOWER_LIMIT) 03018 CPU_CycleMax = CPU_CYCLES_LOWER_LIMIT; 03019 } 03020 CPU_SkipCycleAutoAdjust=true; 03021 } 03022 03023 void CPU_Disable_SkipAutoAdjust(void) { 03024 CPU_SkipCycleAutoAdjust=false; 03025 } 03026 03027 03028 extern Bit32s ticksDone; 03029 extern Bit32u ticksScheduled; 03030 extern int dynamic_core_cache_block_size; 03031 03032 void CPU_Reset_AutoAdjust(void) { 03033 CPU_IODelayRemoved = 0; 03034 ticksDone = 0; 03035 ticksScheduled = 0; 03036 } 03037 03038 class Weitek_PageHandler : public PageHandler { 03039 public: 03040 Weitek_PageHandler(HostPt /*addr*/){ 03041 flags=PFLAG_NOCODE; 03042 } 03043 03044 ~Weitek_PageHandler() { 03045 } 03046 03047 Bit8u readb(PhysPt addr); 03048 void writeb(PhysPt addr,Bit8u val); 03049 Bit16u readw(PhysPt addr); 03050 void writew(PhysPt addr,Bit16u val); 03051 Bit32u readd(PhysPt addr); 03052 void writed(PhysPt addr,Bit32u val); 03053 }; 03054 03055 Bit8u Weitek_PageHandler::readb(PhysPt addr) { 03056 LOG_MSG("Weitek stub: readb at 0x%lx",(unsigned long)addr); 03057 return (Bit8u)-1; 03058 } 03059 void Weitek_PageHandler::writeb(PhysPt addr,Bit8u val) { 03060 LOG_MSG("Weitek stub: writeb at 0x%lx val=0x%lx",(unsigned long)addr,(unsigned long)val); 03061 } 03062 03063 Bit16u Weitek_PageHandler::readw(PhysPt addr) { 03064 LOG_MSG("Weitek stub: readw at 0x%lx",(unsigned long)addr); 03065 return (Bit16u)-1; 03066 } 03067 03068 void Weitek_PageHandler::writew(PhysPt addr,Bit16u val) { 03069 LOG_MSG("Weitek stub: writew at 0x%lx val=0x%lx",(unsigned long)addr,(unsigned long)val); 03070 } 03071 03072 Bit32u Weitek_PageHandler::readd(PhysPt addr) { 03073 LOG_MSG("Weitek stub: readd at 0x%lx",(unsigned long)addr); 03074 return (Bit32u)-1; 03075 } 03076 03077 void Weitek_PageHandler::writed(PhysPt addr,Bit32u val) { 03078 LOG_MSG("Weitek stub: writed at 0x%lx val=0x%lx",(unsigned long)addr,(unsigned long)val); 03079 } 03080 03081 Weitek_PageHandler weitek_pagehandler(0); 03082 03083 PageHandler* weitek_memio_cb(MEM_CalloutObject &co,Bitu phys_page) { 03084 (void)co; // UNUSED 03085 (void)phys_page; // UNUSED 03086 return &weitek_pagehandler; 03087 } 03088 03089 bool CpuType_Auto(DOSBoxMenu * const menu,DOSBoxMenu::item * const menuitem) { 03090 (void)menu;//UNUSED 03091 (void)menuitem;//UNUSED 03092 Section* sec=control->GetSection("cpu"); 03093 if (sec) sec->HandleInputline("cputype=auto"); 03094 return true; 03095 } 03096 03097 bool CpuType_ByName(DOSBoxMenu * const menu,DOSBoxMenu::item * const menuitem) { 03098 (void)menu;//UNUSED 03099 03100 const char *name = menuitem->get_name().c_str(); 03101 03102 /* name should be cputype_... */ 03103 if (!strncmp(name,"cputype_",8)) name += 8; 03104 else abort(); 03105 03106 Section* sec=control->GetSection("cpu"); 03107 if (sec) sec->HandleInputline(std::string("cputype=")+name); 03108 return true; 03109 } 03110 03111 static int pcpu_type = -1; 03112 03113 class CPU: public Module_base { 03114 private: 03115 static bool inited; 03116 public: 03117 CPU(Section* configuration):Module_base(configuration) { 03118 const Section_prop * section=static_cast<Section_prop *>(configuration); 03119 DOSBoxMenu::item *item; 03120 03121 if(inited) { 03122 CPU::Change_Config(configuration); 03123 return; 03124 } 03125 // Section_prop * section=static_cast<Section_prop *>(configuration); 03126 inited=true; 03127 reg_eax=0; 03128 reg_ebx=0; 03129 reg_ecx=0; 03130 reg_edx=0; 03131 reg_edi=0; 03132 reg_esi=0; 03133 reg_ebp=0; 03134 reg_esp=0; 03135 03136 do_seg_limits = section->Get_bool("segment limits"); 03137 03138 SegSet16(cs,0); Segs.limit[cs] = do_seg_limits ? 0xFFFF : ((PhysPt)(~0UL)); Segs.expanddown[cs] = false; 03139 SegSet16(ds,0); Segs.limit[ds] = do_seg_limits ? 0xFFFF : ((PhysPt)(~0UL)); Segs.expanddown[ds] = false; 03140 SegSet16(es,0); Segs.limit[es] = do_seg_limits ? 0xFFFF : ((PhysPt)(~0UL)); Segs.expanddown[es] = false; 03141 SegSet16(fs,0); Segs.limit[fs] = do_seg_limits ? 0xFFFF : ((PhysPt)(~0UL)); Segs.expanddown[fs] = false; 03142 SegSet16(gs,0); Segs.limit[gs] = do_seg_limits ? 0xFFFF : ((PhysPt)(~0UL)); Segs.expanddown[gs] = false; 03143 SegSet16(ss,0); Segs.limit[ss] = do_seg_limits ? 0xFFFF : ((PhysPt)(~0UL)); Segs.expanddown[ss] = false; 03144 03145 CPU_SetFlags(FLAG_IF,FMASK_ALL); //Enable interrupts 03146 cpu.cr0=0xffffffff; 03147 CPU_SET_CRX(0,0); //Initialize 03148 cpu.code.big=false; 03149 cpu.stack.mask=0xffff; 03150 cpu.stack.notmask=0xffff0000; 03151 cpu.stack.big=false; 03152 cpu.trap_skip=false; 03153 cpu.idt.SetBase(0); 03154 cpu.idt.SetLimit(1023); 03155 03156 for (Bitu i=0; i<7; i++) { 03157 cpu.drx[i]=0; 03158 cpu.trx[i]=0; 03159 } 03160 if (CPU_ArchitectureType>=CPU_ARCHTYPE_PENTIUM) { 03161 cpu.drx[6]=0xffff0ff0; 03162 } else { 03163 cpu.drx[6]=0xffff1ff0; 03164 } 03165 cpu.drx[7]=0x00000400; 03166 03167 /* Init the cpu cores */ 03168 CPU_Core_Normal_Init(); 03169 #if !defined(C_EMSCRIPTEN) 03170 CPU_Core_Simple_Init(); 03171 CPU_Core_Full_Init(); 03172 #endif 03173 #if (C_DYNAMIC_X86) 03174 CPU_Core_Dyn_X86_Init(); 03175 #elif (C_DYNREC) 03176 CPU_Core_Dynrec_Init(); 03177 #endif 03178 MAPPER_AddHandler(CPU_CycleDecrease,MK_minus,MMODHOST,"cycledown","Dec Cycles",&item); 03179 item->set_text("Decrement cycles"); 03180 03181 MAPPER_AddHandler(CPU_CycleIncrease,MK_equals,MMODHOST,"cycleup" ,"Inc Cycles",&item); 03182 item->set_text("Increment cycles"); 03183 03184 MAPPER_AddHandler(CPU_ToggleAutoCycles,MK_nothing,0,"cycauto","AutoCycles",&item); 03185 item->set_text("Auto cycles"); 03186 item->set_description("Enable automatic cycle count"); 03187 03188 MAPPER_AddHandler(CPU_ToggleNormalCore,MK_nothing,0,"normal" ,"NormalCore", &item); 03189 item->set_text("Normal core"); 03190 03191 #if !defined(C_EMSCRIPTEN) 03192 MAPPER_AddHandler(CPU_ToggleFullCore,MK_nothing,0,"full","Full Core", &item); 03193 item->set_text("Full core"); 03194 #endif 03195 #if !defined(C_EMSCRIPTEN) 03196 MAPPER_AddHandler(CPU_ToggleSimpleCore,MK_nothing,0,"simple","SimpleCore", &item); 03197 item->set_text("Simple core"); 03198 #endif 03199 #if (C_DYNAMIC_X86) || (C_DYNREC) 03200 MAPPER_AddHandler(CPU_ToggleDynamicCore,MK_nothing,0,"dynamic","DynCore",&item); 03201 item->set_text("Dynamic core"); 03202 #endif 03203 03204 /* these are not mapper shortcuts, and probably should not be mapper shortcuts */ 03205 mainMenu.alloc_item(DOSBoxMenu::item_type_id,"cputype_auto"). 03206 set_text("Auto").set_callback_function(CpuType_Auto); 03207 mainMenu.alloc_item(DOSBoxMenu::item_type_id,"cputype_8086"). 03208 set_text("8086").set_callback_function(CpuType_ByName); 03209 mainMenu.alloc_item(DOSBoxMenu::item_type_id,"cputype_8086_prefetch"). 03210 set_text("8086 with prefetch").set_callback_function(CpuType_ByName); 03211 mainMenu.alloc_item(DOSBoxMenu::item_type_id,"cputype_80186"). 03212 set_text("80186").set_callback_function(CpuType_ByName); 03213 mainMenu.alloc_item(DOSBoxMenu::item_type_id,"cputype_80186_prefetch"). 03214 set_text("80186 with prefetch").set_callback_function(CpuType_ByName); 03215 mainMenu.alloc_item(DOSBoxMenu::item_type_id,"cputype_286"). 03216 set_text("286").set_callback_function(CpuType_ByName); 03217 mainMenu.alloc_item(DOSBoxMenu::item_type_id,"cputype_286_prefetch"). 03218 set_text("286 with prefetch").set_callback_function(CpuType_ByName); 03219 mainMenu.alloc_item(DOSBoxMenu::item_type_id,"cputype_386"). 03220 set_text("386").set_callback_function(CpuType_ByName); 03221 mainMenu.alloc_item(DOSBoxMenu::item_type_id,"cputype_386_prefetch"). 03222 set_text("386 with prefetch").set_callback_function(CpuType_ByName); 03223 mainMenu.alloc_item(DOSBoxMenu::item_type_id,"cputype_486old"). 03224 set_text("486 (old)").set_callback_function(CpuType_ByName); 03225 mainMenu.alloc_item(DOSBoxMenu::item_type_id,"cputype_486old_prefetch"). 03226 set_text("486 (old) with prefetch").set_callback_function(CpuType_ByName); 03227 mainMenu.alloc_item(DOSBoxMenu::item_type_id,"cputype_486"). 03228 set_text("486").set_callback_function(CpuType_ByName); 03229 mainMenu.alloc_item(DOSBoxMenu::item_type_id,"cputype_486_prefetch"). 03230 set_text("486 with prefetch").set_callback_function(CpuType_ByName); 03231 03232 mainMenu.alloc_item(DOSBoxMenu::item_type_id,"cputype_pentium"). 03233 set_text("Pentium").set_callback_function(CpuType_ByName); 03234 mainMenu.alloc_item(DOSBoxMenu::item_type_id,"cputype_pentium_mmx"). 03235 set_text("Pentium MMX").set_callback_function(CpuType_ByName); 03236 mainMenu.alloc_item(DOSBoxMenu::item_type_id,"cputype_ppro_slow"). 03237 set_text("Pentium Pro").set_callback_function(CpuType_ByName); 03238 03239 CPU::Change_Config(configuration); 03240 CPU_JMP(false,0,0,0); //Setup the first cpu core 03241 } 03242 bool Change_Config(Section* newconfig){ 03243 const Section_prop * section=static_cast<Section_prop *>(newconfig); 03244 CPU_AutoDetermineMode=CPU_AUTODETERMINE_NONE; 03245 //CPU_CycleLeft=0;//needed ? 03246 CPU_Cycles=0; 03247 CPU_SkipCycleAutoAdjust=false; 03248 03249 ignore_opcode_63 = section->Get_bool("ignore opcode 63"); 03250 use_dynamic_core_with_paging = section->Get_bool("use dynamic core with paging on"); 03251 cpu_double_fault_enable = section->Get_bool("double fault"); 03252 cpu_triple_fault_reset = section->Get_bool("reset on triple fault"); 03253 cpu_allow_big16 = section->Get_bool("realbig16"); 03254 03255 if (cpu_allow_big16) { 03256 /* FIXME: GCC 4.8: How is this an empty body? Explain. */ 03257 LOG(LOG_CPU,LOG_DEBUG)("Emulation of the B (big) bit in real mode enabled\n"); 03258 } 03259 03260 always_report_double_fault = section->Get_bool("always report double fault"); 03261 always_report_triple_fault = section->Get_bool("always report triple fault"); 03262 03263 dynamic_core_cache_block_size = section->Get_int("dynamic core cache block size"); 03264 if (dynamic_core_cache_block_size < 1 || dynamic_core_cache_block_size > 65536) dynamic_core_cache_block_size = 32; 03265 03266 Prop_multival* p = section->Get_multival("cycles"); 03267 std::string type = p->GetSection()->Get_string("type"); 03268 std::string str ; 03269 CommandLine cmd(0,p->GetSection()->Get_string("parameters")); 03270 if (type=="max") { 03271 CPU_CycleMax=0; 03272 CPU_CyclePercUsed=100; 03273 CPU_CycleAutoAdjust=true; 03274 CPU_CycleLimit=-1; 03275 for (Bitu cmdnum=1; cmdnum<=cmd.GetCount(); cmdnum++) { 03276 if (cmd.FindCommand((unsigned int)cmdnum,str)) { 03277 if (str.find('%')==str.length()-1) { 03278 str.erase(str.find('%')); 03279 int percval=0; 03280 std::istringstream stream(str); 03281 stream >> percval; 03282 if ((percval>0) && (percval<=105)) CPU_CyclePercUsed=(Bit32s)percval; 03283 } else if (str=="limit") { 03284 cmdnum++; 03285 if (cmd.FindCommand((unsigned int)cmdnum,str)) { 03286 int cyclimit=0; 03287 std::istringstream stream(str); 03288 stream >> cyclimit; 03289 if (cyclimit>0) CPU_CycleLimit=cyclimit; 03290 } 03291 } 03292 } 03293 } 03294 } else { 03295 if (type=="auto") { 03296 CPU_AutoDetermineMode|=CPU_AUTODETERMINE_CYCLES; 03297 CPU_CycleMax=3000; 03298 CPU_OldCycleMax=3000; 03299 CPU_CyclePercUsed=100; 03300 for (Bitu cmdnum=0; cmdnum<=cmd.GetCount(); cmdnum++) { 03301 if (cmd.FindCommand((unsigned int)cmdnum,str)) { 03302 if (str.find('%')==str.length()-1) { 03303 str.erase(str.find('%')); 03304 int percval=0; 03305 std::istringstream stream(str); 03306 stream >> percval; 03307 if ((percval>0) && (percval<=105)) CPU_CyclePercUsed=(Bit32s)percval; 03308 } else if (str=="limit") { 03309 cmdnum++; 03310 if (cmd.FindCommand((unsigned int)cmdnum,str)) { 03311 int cyclimit=0; 03312 std::istringstream stream(str); 03313 stream >> cyclimit; 03314 if (cyclimit>0) CPU_CycleLimit=cyclimit; 03315 } 03316 } else { 03317 int rmdval=0; 03318 std::istringstream stream(str); 03319 stream >> rmdval; 03320 if (rmdval>0) { 03321 CPU_CycleMax=(Bit32s)rmdval; 03322 CPU_OldCycleMax=(Bit32s)rmdval; 03323 } 03324 } 03325 } 03326 } 03327 } else if(type =="fixed") { 03328 cmd.FindCommand(1,str); 03329 int rmdval=0; 03330 std::istringstream stream(str); 03331 stream >> rmdval; 03332 CPU_CycleMax=(Bit32s)rmdval; 03333 } else { 03334 std::istringstream stream(type); 03335 int rmdval=0; 03336 stream >> rmdval; 03337 if(rmdval) { 03338 CPU_CycleMax=(Bit32s)rmdval; 03339 CPU_CyclesSet=(Bit32s)rmdval; 03340 } 03341 } 03342 CPU_CycleAutoAdjust=false; 03343 } 03344 03345 menu_update_autocycle(); 03346 03347 enable_fpu=section->Get_bool("fpu"); 03348 cpu_rep_max=section->Get_int("interruptible rep string op"); 03349 ignore_undefined_msr=section->Get_bool("ignore undefined msr"); 03350 enable_msr=section->Get_bool("enable msr"); 03351 enable_cmpxchg8b=section->Get_bool("enable cmpxchg8b"); 03352 CPU_CycleUp=section->Get_int("cycleup"); 03353 CPU_CycleDown=section->Get_int("cycledown"); 03354 std::string core(section->Get_string("core")); 03355 cpudecoder=&CPU_Core_Normal_Run; 03356 safe_strncpy(core_mode,core.c_str(),15); 03357 core_mode[15] = '\0'; 03358 if (core == "normal") { 03359 cpudecoder=&CPU_Core_Normal_Run; 03360 } else if (core =="simple") { 03361 #if defined(C_EMSCRIPTEN) 03362 cpudecoder=&CPU_Core_Normal_Run; 03363 #else 03364 cpudecoder=&CPU_Core_Simple_Run; 03365 #endif 03366 } else if (core == "full") { 03367 #if defined(C_EMSCRIPTEN) 03368 cpudecoder=&CPU_Core_Normal_Run; 03369 #else 03370 cpudecoder=&CPU_Core_Full_Run; 03371 #endif 03372 } else if (core == "auto") { 03373 cpudecoder=&CPU_Core_Normal_Run; 03374 CPU_AutoDetermineMode|=CPU_AUTODETERMINE_CORE; 03375 #if (C_DYNAMIC_X86) 03376 } else if (core == "dynamic") { 03377 cpudecoder=&CPU_Core_Dyn_X86_Run; 03378 CPU_Core_Dyn_X86_SetFPUMode(true); 03379 } else if (core == "dynamic_nodhfpu") { 03380 cpudecoder=&CPU_Core_Dyn_X86_Run; 03381 CPU_Core_Dyn_X86_SetFPUMode(false); 03382 #elif (C_DYNREC) 03383 } else if (core == "dynamic") { 03384 cpudecoder=&CPU_Core_Dynrec_Run; 03385 #endif 03386 } else { 03387 strcpy(core_mode,"normal"); 03388 cpudecoder=&CPU_Core_Normal_Run; 03389 LOG_MSG("CPU:Unknown core type %s, switching back to normal.",core.c_str()); 03390 } 03391 03392 #if (C_DYNAMIC_X86) 03393 CPU_Core_Dyn_X86_Cache_Init((core == "dynamic") || (core == "dynamic_nodhfpu")); 03394 #elif (C_DYNREC) 03395 CPU_Core_Dynrec_Cache_Init( core == "dynamic" ); 03396 #endif 03397 03398 CPU_ArchitectureType = CPU_ARCHTYPE_MIXED; 03399 std::string cputype(section->Get_string("cputype")); 03400 if (cputype == "auto") { 03401 CPU_ArchitectureType = CPU_ARCHTYPE_MIXED; 03402 } else if (cputype == "8086") { 03403 CPU_ArchitectureType = CPU_ARCHTYPE_8086; 03404 cpudecoder=&CPU_Core8086_Normal_Run; 03405 } else if (cputype == "8086_prefetch") { /* 6-byte prefetch queue ref [http://www.phatcode.net/res/224/files/html/ch11/11-02.html] */ 03406 CPU_ArchitectureType = CPU_ARCHTYPE_8086; 03407 if (core == "normal") { 03408 cpudecoder=&CPU_Core8086_Prefetch_Run; 03409 CPU_PrefetchQueueSize = 4; /* Emulate the 8088, which was more common in home PCs than having an 8086 */ 03410 } else if (core == "auto") { 03411 cpudecoder=&CPU_Core8086_Prefetch_Run; 03412 CPU_PrefetchQueueSize = 4; /* Emulate the 8088, which was more common in home PCs than having an 8086 */ 03413 CPU_AutoDetermineMode&=(~CPU_AUTODETERMINE_CORE); 03414 } else { 03415 E_Exit("prefetch queue emulation requires the normal core setting."); 03416 } 03417 } else if (cputype == "80186") { 03418 CPU_ArchitectureType = CPU_ARCHTYPE_80186; 03419 cpudecoder=&CPU_Core286_Normal_Run; 03420 } else if (cputype == "80186_prefetch") { /* 6-byte prefetch queue ref [http://www.phatcode.net/res/224/files/html/ch11/11-02.html] */ 03421 CPU_ArchitectureType = CPU_ARCHTYPE_80186; 03422 if (core == "normal") { 03423 cpudecoder=&CPU_Core286_Prefetch_Run; /* TODO: Alternate 16-bit only decoder for 286 that does NOT include 386+ instructions */ 03424 CPU_PrefetchQueueSize = 6; 03425 } else if (core == "auto") { 03426 cpudecoder=&CPU_Core286_Prefetch_Run; /* TODO: Alternate 16-bit only decoder for 286 that does NOT include 386+ instructions */ 03427 CPU_PrefetchQueueSize = 6; 03428 CPU_AutoDetermineMode&=(~CPU_AUTODETERMINE_CORE); 03429 } else { 03430 E_Exit("prefetch queue emulation requires the normal core setting."); 03431 } 03432 } else if (cputype == "286") { 03433 CPU_ArchitectureType = CPU_ARCHTYPE_286; 03434 cpudecoder=&CPU_Core286_Normal_Run; 03435 } else if (cputype == "286_prefetch") { /* 6-byte prefetch queue ref [http://www.phatcode.net/res/224/files/html/ch11/11-02.html] */ 03436 CPU_ArchitectureType = CPU_ARCHTYPE_286; 03437 if (core == "normal") { 03438 cpudecoder=&CPU_Core286_Prefetch_Run; /* TODO: Alternate 16-bit only decoder for 286 that does NOT include 386+ instructions */ 03439 CPU_PrefetchQueueSize = 6; 03440 } else if (core == "auto") { 03441 cpudecoder=&CPU_Core286_Prefetch_Run; /* TODO: Alternate 16-bit only decoder for 286 that does NOT include 386+ instructions */ 03442 CPU_PrefetchQueueSize = 6; 03443 CPU_AutoDetermineMode&=(~CPU_AUTODETERMINE_CORE); 03444 } else { 03445 E_Exit("prefetch queue emulation requires the normal core setting."); 03446 } 03447 } else if (cputype == "386") { 03448 CPU_ArchitectureType = CPU_ARCHTYPE_386; 03449 } else if (cputype == "386_prefetch") { 03450 CPU_ArchitectureType = CPU_ARCHTYPE_386; 03451 if (core == "normal") { 03452 cpudecoder=&CPU_Core_Prefetch_Run; 03453 CPU_PrefetchQueueSize = 16; 03454 } else if (core == "auto") { 03455 cpudecoder=&CPU_Core_Prefetch_Run; 03456 CPU_PrefetchQueueSize = 16; 03457 CPU_AutoDetermineMode&=(~CPU_AUTODETERMINE_CORE); 03458 } else { 03459 E_Exit("prefetch queue emulation requires the normal core setting."); 03460 } 03461 } else if (cputype == "486") { 03462 CPU_ArchitectureType = CPU_ARCHTYPE_486NEW; 03463 } else if (cputype == "486_prefetch") { 03464 CPU_ArchitectureType = CPU_ARCHTYPE_486NEW; 03465 if (core == "normal") { 03466 cpudecoder=&CPU_Core_Prefetch_Run; 03467 CPU_PrefetchQueueSize = 32; 03468 } else if (core == "auto") { 03469 cpudecoder=&CPU_Core_Prefetch_Run; 03470 CPU_PrefetchQueueSize = 32; 03471 CPU_AutoDetermineMode&=(~CPU_AUTODETERMINE_CORE); 03472 } else { 03473 E_Exit("prefetch queue emulation requires the normal core setting."); 03474 } 03475 } else if (cputype == "486old") { 03476 CPU_ArchitectureType = CPU_ARCHTYPE_486OLD; 03477 } else if (cputype == "486old_prefetch") { 03478 CPU_ArchitectureType = CPU_ARCHTYPE_486OLD; 03479 if (core == "normal") { 03480 cpudecoder=&CPU_Core_Prefetch_Run; 03481 CPU_PrefetchQueueSize = 16; 03482 } else if (core == "auto") { 03483 cpudecoder=&CPU_Core_Prefetch_Run; 03484 CPU_PrefetchQueueSize = 16; 03485 CPU_AutoDetermineMode&=(~CPU_AUTODETERMINE_CORE); 03486 } else { 03487 E_Exit("prefetch queue emulation requires the normal core setting."); 03488 } 03489 } else if (cputype == "pentium") { 03490 CPU_ArchitectureType = CPU_ARCHTYPE_PENTIUM; 03491 } else if (cputype == "pentium_mmx") { 03492 #if C_FPU 03493 CPU_ArchitectureType = CPU_ARCHTYPE_PMMXSLOW; 03494 #else 03495 E_Exit("Pentium MMX emulation requires FPU emulation, which was not compiled into this binary"); 03496 #endif 03497 } else if (cputype == "ppro_slow") { 03498 CPU_ArchitectureType = CPU_ARCHTYPE_PPROSLOW; 03499 } 03500 03501 /* WARNING */ 03502 if (CPU_ArchitectureType == CPU_ARCHTYPE_8086) { 03503 LOG_MSG("CPU warning: 8086 cpu type is experimental at this time"); 03504 } 03505 else if (CPU_ArchitectureType == CPU_ARCHTYPE_80186) { 03506 LOG_MSG("CPU warning: 80186 cpu type is experimental at this time"); 03507 } 03508 03509 /* because of the way the BIOS writes certain entry points, a reboot is required 03510 * if changing between specific levels of CPU. These entry points will fault the 03511 * CPU otherwise. */ 03512 bool reboot_now = false; 03513 03514 if (pcpu_type >= 0 && pcpu_type != CPU_ArchitectureType) { 03515 if (CPU_ArchitectureType >= CPU_ARCHTYPE_386) { 03516 if (pcpu_type < CPU_ARCHTYPE_386) /* from 8086/286, to 386+ */ 03517 reboot_now = true; 03518 } 03519 else if (CPU_ArchitectureType >= CPU_ARCHTYPE_286) { 03520 if (pcpu_type >= CPU_ARCHTYPE_386) /* from 386, to 286 */ 03521 reboot_now = true; 03522 else if (pcpu_type < CPU_ARCHTYPE_286) /* from 8086, to 286 */ 03523 reboot_now = true; 03524 } 03525 else if (CPU_ArchitectureType >= CPU_ARCHTYPE_80186) { 03526 if (pcpu_type >= CPU_ARCHTYPE_286) /* from 286, to 80186 */ 03527 reboot_now = true; 03528 else if (pcpu_type < CPU_ARCHTYPE_80186) /* from 8086, to 80186 */ 03529 reboot_now = true; 03530 } 03531 else if (CPU_ArchitectureType >= CPU_ARCHTYPE_8086) { 03532 if (pcpu_type >= CPU_ARCHTYPE_80186) /* from 186, to 8086 */ 03533 reboot_now = true; 03534 } 03535 } 03536 03537 pcpu_type = CPU_ArchitectureType; 03538 03539 if (CPU_ArchitectureType>=CPU_ARCHTYPE_486NEW) CPU_extflags_toggle=(FLAG_ID|FLAG_AC); 03540 else if (CPU_ArchitectureType>=CPU_ARCHTYPE_486OLD) CPU_extflags_toggle=(FLAG_AC); 03541 else CPU_extflags_toggle=0; 03542 03543 // weitek coprocessor emulation? 03544 if (CPU_ArchitectureType == CPU_ARCHTYPE_386 || CPU_ArchitectureType == CPU_ARCHTYPE_486OLD || CPU_ArchitectureType == CPU_ARCHTYPE_486NEW) { 03545 const Section_prop *dsection = static_cast<Section_prop *>(control->GetSection("dosbox")); 03546 03547 enable_weitek = dsection->Get_bool("weitek"); 03548 if (enable_weitek) { 03549 LOG_MSG("Weitek coprocessor emulation enabled"); 03550 03551 static MEM_Callout_t weitek_lfb_cb = MEM_Callout_t_none; 03552 03553 if (weitek_lfb_cb == MEM_Callout_t_none) { 03554 weitek_lfb_cb = MEM_AllocateCallout(MEM_TYPE_MB); 03555 if (weitek_lfb_cb == MEM_Callout_t_none) E_Exit("Unable to allocate weitek cb for LFB"); 03556 } 03557 03558 { 03559 MEM_CalloutObject *cb = MEM_GetCallout(weitek_lfb_cb); 03560 03561 assert(cb != NULL); 03562 03563 cb->Uninstall(); 03564 03565 static Bitu weitek_lfb = 0xC0000000UL; 03566 static Bitu weitek_lfb_pages = 0x2000000UL >> 12UL; /* "The coprocessor will respond to memory addresses 0xC0000000-0xC1FFFFFF" */ 03567 03568 cb->Install(weitek_lfb>>12UL,MEMMASK_Combine(MEMMASK_FULL,MEMMASK_Range(weitek_lfb_pages)),weitek_memio_cb); 03569 03570 MEM_PutCallout(cb); 03571 } 03572 } 03573 } 03574 else { 03575 enable_weitek = false; 03576 } 03577 03578 if (cpu_rep_max < 0) cpu_rep_max = 4; /* compromise to help emulation speed without too much loss of accuracy */ 03579 03580 if(CPU_CycleMax <= 0) CPU_CycleMax = 3000; 03581 if(CPU_CycleUp <= 0) CPU_CycleUp = 500; 03582 if(CPU_CycleDown <= 0) CPU_CycleDown = 20; 03583 03584 if (enable_cmpxchg8b && CPU_ArchitectureType >= CPU_ARCHTYPE_PENTIUM) LOG_MSG("Pentium CMPXCHG8B emulation is enabled"); 03585 03586 menu_update_core(); 03587 menu_update_cputype(); 03588 03589 void CPU_Core_Prefetch_reset(void); 03590 CPU_Core_Prefetch_reset(); 03591 void CPU_Core286_Prefetch_reset(void); 03592 CPU_Core286_Prefetch_reset(); 03593 void CPU_Core8086_Prefetch_reset(void); 03594 CPU_Core8086_Prefetch_reset(); 03595 03596 if (reboot_now) { 03597 LOG_MSG("CPU change requires guest system reboot"); 03598 throw int(3); 03599 } 03600 03601 if (CPU_CycleAutoAdjust) GFX_SetTitle((Bit32s)CPU_CyclePercUsed,-1,-1,false); 03602 else GFX_SetTitle((Bit32s)CPU_CycleMax,-1,-1,false); 03603 // savestate support 03604 cpu.hlt.old_decoder=cpudecoder; 03605 return true; 03606 } 03607 ~CPU(){ /* empty */}; 03608 }; 03609 03610 static CPU * test; 03611 03612 void CPU_ShutDown(Section* sec) { 03613 (void)sec;//UNUSED 03614 03615 #if (C_DYNAMIC_X86) 03616 CPU_Core_Dyn_X86_Cache_Close(); 03617 #elif (C_DYNREC) 03618 CPU_Core_Dynrec_Cache_Close(); 03619 #endif 03620 delete test; 03621 } 03622 03623 void CPU_OnReset(Section* sec) { 03624 (void)sec;//UNUSED 03625 03626 LOG(LOG_CPU,LOG_DEBUG)("CPU reset"); 03627 03628 CPU_Snap_Back_To_Real_Mode(); 03629 CPU_Snap_Back_Forget(); 03630 CPU_SetFlags(0,~0UL); 03631 03632 Segs.limit[cs]=0xFFFF; 03633 Segs.expanddown[cs]=false; 03634 if (CPU_ArchitectureType >= CPU_ARCHTYPE_386) { 03635 /* 386 and later start at F000:FFF0 with CS base set to FFFF0000 (really?) */ 03636 SegSet16(cs,0xF000); 03637 reg_eip=0xFFF0; 03638 Segs.phys[cs]=0xFFFF0000; 03639 } 03640 else if (CPU_ArchitectureType >= CPU_ARCHTYPE_286) { 03641 /* 286 start at F000:FFF0 (FFFF0) */ 03642 SegSet16(cs,0xF000); 03643 reg_eip=0xFFF0; 03644 } 03645 else { 03646 /* 8086 start at FFFF:0000 (FFFF0) */ 03647 SegSet16(cs,0xFFFF); 03648 reg_eip=0x0000; 03649 } 03650 } 03651 03652 void CPU_OnSectionPropChange(Section *x) { 03653 if (test != NULL) test->Change_Config(x); 03654 } 03655 03656 void CPU_Init() { 03657 LOG(LOG_MISC,LOG_DEBUG)("Initializing CPU"); 03658 03659 control->GetSection("cpu")->onpropchange.push_back(&CPU_OnSectionPropChange); 03660 03661 test = new CPU(control->GetSection("cpu")); 03662 AddExitFunction(AddExitFunctionFuncPair(CPU_ShutDown),true); 03663 AddVMEventFunction(VM_EVENT_RESET,AddVMEventFunctionFuncPair(CPU_OnReset)); 03664 } 03665 //initialize static members 03666 bool CPU::inited=false; 03667 03668 //save state support 03669 void DescriptorTable::SaveState( std::ostream& stream ) 03670 { 03671 WRITE_POD( &table_base, table_base ); 03672 WRITE_POD( &table_limit, table_limit ); 03673 } 03674 03675 03676 void DescriptorTable::LoadState( std::istream& stream ) 03677 { 03678 READ_POD( &table_base, table_base ); 03679 READ_POD( &table_limit, table_limit ); 03680 } 03681 03682 03683 void GDTDescriptorTable::SaveState(std::ostream& stream) 03684 { 03685 this->DescriptorTable::SaveState(stream); 03686 03687 03688 WRITE_POD( &ldt_base, ldt_base ); 03689 WRITE_POD( &ldt_limit, ldt_limit ); 03690 WRITE_POD( &ldt_value, ldt_value ); 03691 } 03692 03693 03694 void GDTDescriptorTable::LoadState(std::istream& stream) 03695 { 03696 this->DescriptorTable::LoadState(stream); 03697 03698 03699 READ_POD( &ldt_base, ldt_base ); 03700 READ_POD( &ldt_limit, ldt_limit ); 03701 READ_POD( &ldt_value, ldt_value ); 03702 } 03703 03704 03705 void TaskStateSegment::SaveState( std::ostream& stream ) 03706 { 03707 WRITE_POD( &desc.saved, desc.saved ); 03708 WRITE_POD( &selector, selector ); 03709 WRITE_POD( &base, base ); 03710 WRITE_POD( &limit, limit ); 03711 WRITE_POD( &is386, is386 ); 03712 WRITE_POD( &valid, valid ); 03713 } 03714 03715 03716 void TaskStateSegment::LoadState( std::istream& stream ) 03717 { 03718 READ_POD( &desc.saved, desc.saved ); 03719 READ_POD( &selector, selector ); 03720 READ_POD( &base, base ); 03721 READ_POD( &limit, limit ); 03722 READ_POD( &is386, is386 ); 03723 READ_POD( &valid, valid ); 03724 } 03725 03726 // TODO: This looks to be unused 03727 Bit16u CPU_FindDecoderType( CPU_Decoder *decoder ) 03728 { 03729 (void)decoder;//UNUSED 03730 03731 Bit16u decoder_idx; 03732 03733 decoder_idx = 0xffff; 03734 03735 03736 if(0) {} 03737 else if( cpudecoder == &CPU_Core_Normal_Run ) decoder_idx = 0; 03738 else if( cpudecoder == &CPU_Core_Prefetch_Run ) decoder_idx = 1; 03739 #if !defined(C_EMSCRIPTEN) 03740 else if( cpudecoder == &CPU_Core_Simple_Run ) decoder_idx = 2; 03741 else if( cpudecoder == &CPU_Core_Full_Run ) decoder_idx = 3; 03742 else if( cpudecoder == &CPU_Core_Normal_Trap_Run ) decoder_idx = 100; 03743 #endif 03744 #if C_DYNAMIC_X86 03745 else if( cpudecoder == &CPU_Core_Dyn_X86_Run ) decoder_idx = 4; 03746 #endif 03747 #if (C_DYNREC) 03748 else if( cpudecoder == &CPU_Core_Dynrec_Run ) decoder_idx = 5; 03749 #endif 03750 else if( cpudecoder == &CPU_Core_Normal_Trap_Run ) decoder_idx = 100; 03751 #if C_DYNAMIC_X86 03752 else if( cpudecoder == &CPU_Core_Dyn_X86_Trap_Run ) decoder_idx = 101; 03753 #endif 03754 #if(C_DYNREC) 03755 else if( cpudecoder == &CPU_Core_Dynrec_Trap_Run ) decoder_idx = 102; 03756 #endif 03757 else if( cpudecoder == &HLT_Decode ) decoder_idx = 200; 03758 03759 03760 return decoder_idx; 03761 } 03762 03763 // TODO: This looks to be unused 03764 CPU_Decoder* CPU_IndexDecoderType(Bit16u decoder_idx) 03765 { 03766 switch (decoder_idx) { 03767 case 0: return &CPU_Core_Normal_Run; 03768 case 1: return &CPU_Core_Prefetch_Run; 03769 #if !defined(C_EMSCRIPTEN) 03770 case 2: return &CPU_Core_Simple_Run; 03771 case 3: return &CPU_Core_Full_Run; 03772 #endif 03773 #if C_DYNAMIC_X86 03774 case 4: return &CPU_Core_Dyn_X86_Run; 03775 #endif 03776 #if (C_DYNREC) 03777 case 5: return &CPU_Core_Dynrec_Run; 03778 #endif 03779 case 100: return &CPU_Core_Normal_Trap_Run; 03780 #if C_DYNAMIC_X86 03781 case 101: return &CPU_Core_Dyn_X86_Trap_Run; 03782 #endif 03783 #if(C_DYNREC) 03784 case 102: return &CPU_Core_Dynrec_Trap_Run; 03785 #endif 03786 case 200: return &HLT_Decode; 03787 default: return 0; 03788 } 03789 } 03790 03791 extern void POD_Save_CPU_Flags( std::ostream& stream ); 03792 extern void POD_Save_CPU_Paging( std::ostream& stream ); 03793 extern void POD_Load_CPU_Flags( std::istream& stream ); 03794 extern void POD_Load_CPU_Paging( std::istream& stream ); 03795 03796 #if (C_DYNAMIC_X86) 03797 extern void CPU_Core_Dyn_X86_Cache_Reset(void); 03798 #endif 03799 03800 Bitu vm86_fake_io_seg = 0xF000; /* unused area in BIOS for IO instruction */ 03801 Bitu vm86_fake_io_off = 0x0700; 03802 Bitu vm86_fake_io_offs[3*2]={0}; /* offsets from base off because of dynamic core cache */ 03803 03804 void init_vm86_fake_io() { 03805 Bitu phys = (vm86_fake_io_seg << 4) + vm86_fake_io_off; 03806 Bitu wo = 0; 03807 03808 if (vm86_fake_io_offs[0] != 0) 03809 return; 03810 03811 /* read */ 03812 vm86_fake_io_offs[0] = vm86_fake_io_off + wo; 03813 phys_writeb((PhysPt)(phys+wo+0x00),(Bit8u)0xEC); /* IN AL,DX */ 03814 phys_writeb((PhysPt)(phys+wo+0x01),(Bit8u)0xCB); /* RETF */ 03815 wo += 2; 03816 03817 vm86_fake_io_offs[1] = vm86_fake_io_off + wo; 03818 phys_writeb((PhysPt)(phys+wo+0x00),(Bit8u)0xED); /* IN AX,DX */ 03819 phys_writeb((PhysPt)(phys+wo+0x01),(Bit8u)0xCB); /* RETF */ 03820 wo += 2; 03821 03822 vm86_fake_io_offs[2] = vm86_fake_io_off + wo; 03823 phys_writeb((PhysPt)(phys+wo+0x00),(Bit8u)0x66); /* IN EAX,DX */ 03824 phys_writeb((PhysPt)(phys+wo+0x01),(Bit8u)0xED); 03825 phys_writeb((PhysPt)(phys+wo+0x02),(Bit8u)0xCB); /* RETF */ 03826 wo += 3; 03827 03828 /* write */ 03829 vm86_fake_io_offs[3] = vm86_fake_io_off + wo; 03830 phys_writeb((PhysPt)(phys+wo+0x00),(Bit8u)0xEE); /* OUT DX,AL */ 03831 phys_writeb((PhysPt)(phys+wo+0x01),(Bit8u)0xCB); /* RETF */ 03832 wo += 2; 03833 03834 vm86_fake_io_offs[4] = vm86_fake_io_off + wo; 03835 phys_writeb((PhysPt)(phys+wo+0x00),(Bit8u)0xEF); /* OUT DX,AX */ 03836 phys_writeb((PhysPt)(phys+wo+0x01),(Bit8u)0xCB); /* RETF */ 03837 wo += 2; 03838 03839 vm86_fake_io_offs[5] = vm86_fake_io_off + wo; 03840 phys_writeb((PhysPt)(phys+wo+0x00),(Bit8u)0x66); /* OUT DX,EAX */ 03841 phys_writeb((PhysPt)(phys+wo+0x01),(Bit8u)0xEF); 03842 phys_writeb((PhysPt)(phys+wo+0x02),(Bit8u)0xCB); /* RETF */ 03843 } 03844 03845 Bitu CPU_ForceV86FakeIO_In(Bitu port,Bitu len) { 03846 Bit32u old_ax,old_dx,ret; 03847 03848 /* save EAX:EDX and setup DX for IN instruction */ 03849 old_ax = reg_eax; 03850 old_dx = reg_edx; 03851 03852 reg_edx = (Bit32u)port; 03853 03854 /* make the CPU execute that instruction */ 03855 CALLBACK_RunRealFar((Bit16u)vm86_fake_io_seg, (Bit16u)vm86_fake_io_offs[(len==4?2:(len-1))+0]); 03856 03857 /* take whatever the CPU or OS v86 trap left in EAX and return it */ 03858 ret = reg_eax; 03859 if (len == 1) ret &= 0xFF; 03860 else if (len == 2) ret &= 0xFFFF; 03861 03862 /* then restore EAX:EDX */ 03863 reg_eax = old_ax; 03864 reg_edx = old_dx; 03865 03866 return ret; 03867 } 03868 03869 void CPU_ForceV86FakeIO_Out(Bitu port,Bitu val,Bitu len) { 03870 Bit32u old_eax,old_edx; 03871 03872 /* save EAX:EDX and setup DX/AX for OUT instruction */ 03873 old_eax = reg_eax; 03874 old_edx = reg_edx; 03875 03876 reg_edx = (Bit32u)port; 03877 reg_eax = (Bit32u)val; 03878 03879 /* make the CPU execute that instruction */ 03880 CALLBACK_RunRealFar((Bit16u)vm86_fake_io_seg, (Bit16u)vm86_fake_io_offs[(len==4?2:(len-1))+3]); 03881 03882 /* then restore EAX:EDX */ 03883 reg_eax = old_eax; 03884 reg_edx = old_edx; 03885 } 03886 03887 /* pentium machine-specific registers */ 03888 bool CPU_RDMSR() { 03889 if (!enable_msr) return false; 03890 03891 switch (reg_ecx) { 03892 default: 03893 LOG(LOG_CPU,LOG_NORMAL)("RDMSR: Unknown register 0x%08lx",(unsigned long)reg_ecx); 03894 break; 03895 } 03896 03897 if (ignore_undefined_msr) { 03898 /* wing it and hope nobody notices */ 03899 reg_edx = reg_eax = 0; 03900 return true; 03901 } 03902 03903 return false; /* unknown reg, signal illegal opcode */ 03904 } 03905 03906 bool CPU_WRMSR() { 03907 if (!enable_msr) return false; 03908 03909 switch (reg_ecx) { 03910 default: 03911 LOG(LOG_CPU,LOG_NORMAL)("WRMSR: Unknown register 0x%08lx (write 0x%08lx:0x%08lx)",(unsigned long)reg_ecx,(unsigned long)reg_edx,(unsigned long)reg_eax); 03912 break; 03913 } 03914 03915 if (ignore_undefined_msr) return true; /* ignore */ 03916 return false; /* unknown reg, signal illegal opcode */ 03917 } 03918 03919 /* NTS: Hopefully by implementing this Windows ME can stop randomly crashing when cputype=pentium */ 03920 void CPU_CMPXCHG8B(PhysPt eaa) { 03921 uint32_t hi,lo; 03922 03923 /* NTS: We assume that, if reading doesn't cause a page fault, writing won't either */ 03924 hi = (uint32_t)mem_readd(eaa+(PhysPt)4); 03925 lo = (uint32_t)mem_readd(eaa); 03926 03927 LOG_MSG("Experimental CMPXCHG8B implementation executed. EDX:EAX=0x%08lx%08lx ECX:EBX=0x%08lx%08lx EA=0x%08lx MEM64=0x%08lx%08lx", 03928 (unsigned long)reg_edx, 03929 (unsigned long)reg_eax, 03930 (unsigned long)reg_ecx, 03931 (unsigned long)reg_ebx, 03932 (unsigned long)eaa, 03933 (unsigned long)hi, 03934 (unsigned long)lo); 03935 03936 /* Compare EDX:EAX with 64-bit DWORD at memaddr 'eaa'. 03937 * if they match, ZF=1 and write ECX:EBX to memaddr 'eaa'. 03938 * else, ZF=0 and load memaddr 'eaa' into EDX:EAX */ 03939 if (reg_edx == hi && reg_eax == lo) { 03940 mem_writed(eaa+(PhysPt)4,reg_ecx); 03941 mem_writed(eaa, reg_ebx); 03942 SETFLAGBIT(ZF,true); 03943 } 03944 else { 03945 SETFLAGBIT(ZF,false); 03946 reg_edx = hi; 03947 reg_eax = lo; 03948 } 03949 } 03950 03951 void CPU_Core_Dyn_X86_SaveDHFPUState(void) { 03952 } 03953 03954 void CPU_Core_Dyn_X86_RestoreDHFPUState(void) { 03955 } 03956 03957 namespace 03958 { 03959 class SerializeCPU : public SerializeGlobalPOD 03960 { 03961 public: 03962 SerializeCPU() : SerializeGlobalPOD("CPU") 03963 {} 03964 03965 private: 03966 virtual void getBytes(std::ostream& stream) 03967 { 03968 Bit16u decoder_idx; 03969 03970 // UNUSED 03971 // extern Bits PageFaultCore(void); 03972 // extern Bits IOFaultCore(void); 03973 03974 03975 03976 decoder_idx = CPU_FindDecoderType( cpudecoder ); 03977 03978 //******************************************** 03979 //******************************************** 03980 //******************************************** 03981 03982 SerializeGlobalPOD::getBytes(stream); 03983 03984 03985 // - pure data 03986 WRITE_POD( &cpu_regs, cpu_regs ); 03987 03988 WRITE_POD( &cpu.cpl, cpu.cpl ); 03989 WRITE_POD( &cpu.mpl, cpu.mpl ); 03990 WRITE_POD( &cpu.cr0, cpu.cr0 ); 03991 WRITE_POD( &cpu.pmode, cpu.pmode ); 03992 cpu.gdt.SaveState(stream); 03993 cpu.idt.SaveState(stream); 03994 WRITE_POD( &cpu.stack, cpu.stack ); 03995 WRITE_POD( &cpu.code, cpu.code ); 03996 WRITE_POD( &cpu.hlt.cs, cpu.hlt.cs ); 03997 WRITE_POD( &cpu.hlt.eip, cpu.hlt.eip ); 03998 WRITE_POD( &cpu.exception, cpu.exception ); 03999 WRITE_POD( &cpu.direction, cpu.direction ); 04000 WRITE_POD( &cpu.trap_skip, cpu.trap_skip ); 04001 WRITE_POD( &cpu.drx, cpu.drx ); 04002 WRITE_POD( &cpu.trx, cpu.trx ); 04003 04004 WRITE_POD( &Segs, Segs ); 04005 WRITE_POD( &CPU_Cycles, CPU_Cycles ); 04006 WRITE_POD( &CPU_CycleLeft, CPU_CycleLeft ); 04007 WRITE_POD( &CPU_IODelayRemoved, CPU_IODelayRemoved ); 04008 cpu_tss.SaveState(stream); 04009 WRITE_POD( &lastint, lastint ); 04010 04011 //******************************************** 04012 //******************************************** 04013 //******************************************** 04014 04015 // - reloc func ptr 04016 WRITE_POD( &decoder_idx, decoder_idx ); 04017 04018 POD_Save_CPU_Flags(stream); 04019 POD_Save_CPU_Paging(stream); 04020 } 04021 04022 virtual void setBytes(std::istream& stream) 04023 { 04024 Bit16u decoder_idx; 04025 Bit16u decoder_old; 04026 04027 04028 04029 04030 04031 decoder_old = CPU_FindDecoderType( cpudecoder ); 04032 04033 //******************************************** 04034 //******************************************** 04035 //******************************************** 04036 04037 SerializeGlobalPOD::setBytes(stream); 04038 04039 04040 // - pure data 04041 READ_POD( &cpu_regs, cpu_regs ); 04042 04043 READ_POD( &cpu.cpl, cpu.cpl ); 04044 READ_POD( &cpu.mpl, cpu.mpl ); 04045 READ_POD( &cpu.cr0, cpu.cr0 ); 04046 READ_POD( &cpu.pmode, cpu.pmode ); 04047 cpu.gdt.LoadState(stream); 04048 cpu.idt.LoadState(stream); 04049 READ_POD( &cpu.stack, cpu.stack ); 04050 READ_POD( &cpu.code, cpu.code ); 04051 READ_POD( &cpu.hlt.cs, cpu.hlt.cs ); 04052 READ_POD( &cpu.hlt.eip, cpu.hlt.eip ); 04053 READ_POD( &cpu.exception, cpu.exception ); 04054 READ_POD( &cpu.direction, cpu.direction ); 04055 READ_POD( &cpu.trap_skip, cpu.trap_skip ); 04056 READ_POD( &cpu.drx, cpu.drx ); 04057 READ_POD( &cpu.trx, cpu.trx ); 04058 04059 READ_POD( &Segs, Segs ); 04060 READ_POD( &CPU_Cycles, CPU_Cycles ); 04061 READ_POD( &CPU_CycleLeft, CPU_CycleLeft ); 04062 READ_POD( &CPU_IODelayRemoved, CPU_IODelayRemoved ); 04063 cpu_tss.LoadState(stream); 04064 READ_POD( &lastint, lastint ); 04065 04066 //******************************************** 04067 //******************************************** 04068 //******************************************** 04069 04070 // - reloc func ptr 04071 READ_POD( &decoder_idx, decoder_idx ); 04072 04073 04074 04075 POD_Load_CPU_Flags(stream); 04076 POD_Load_CPU_Paging(stream); 04077 04078 //******************************************* 04079 //******************************************* 04080 //******************************************* 04081 04082 // switch to running core 04083 if( decoder_idx < 100 ) { 04084 switch( decoder_old ) { 04085 // run -> run (0-99) 04086 04087 // trap -> run 04088 case 100: cpudecoder = CPU_IndexDecoderType(0); break; 04089 case 101: cpudecoder = CPU_IndexDecoderType(4); break; 04090 case 102: cpudecoder = CPU_IndexDecoderType(5); break; 04091 04092 // hlt -> run 04093 case 200: cpudecoder = cpu.hlt.old_decoder; break; 04094 } 04095 } 04096 04097 // switch to trap core 04098 else if( decoder_idx < 200 ) { 04099 switch( decoder_old ) { 04100 // run -> trap 04101 case 0: 04102 case 1: 04103 case 2: 04104 case 3: cpudecoder = CPU_IndexDecoderType(100); break; 04105 case 4: cpudecoder = CPU_IndexDecoderType(101); break; 04106 case 5: cpudecoder = CPU_IndexDecoderType(102); break; 04107 04108 // trap -> trap (100-199) 04109 04110 // hlt -> trap 04111 case 200: { 04112 switch( CPU_FindDecoderType(cpu.hlt.old_decoder) ) { 04113 case 0: 04114 case 1: 04115 case 2: 04116 case 3: cpudecoder = CPU_IndexDecoderType(100); break; 04117 case 4: cpudecoder = CPU_IndexDecoderType(101); break; 04118 case 5: cpudecoder = CPU_IndexDecoderType(102); break; 04119 } 04120 } 04121 } 04122 } 04123 04124 // switch to hlt core 04125 else if( decoder_idx < 300 ) { 04126 cpudecoder = CPU_IndexDecoderType(200); 04127 } 04128 #if (C_DYNAMIC_X86) 04129 CPU_Core_Dyn_X86_Cache_Reset(); 04130 #elif (C_DYNREC) 04131 CPU_Core_Dynrec_Cache_Reset(); 04132 #endif 04133 } 04134 } dummy; 04135 }