/[pcsx2_0.9.7]/trunk/pcsx2/x86/microVU_Compile.inl
ViewVC logotype

Annotation of /trunk/pcsx2/x86/microVU_Compile.inl

Parent Directory Parent Directory | Revision Log Revision Log


Revision 273 - (hide annotations) (download)
Fri Nov 12 01:10:22 2010 UTC (9 years, 7 months ago) by william
File size: 19414 byte(s)
Auto Commited Import of: pcsx2-0.9.7-DEBUG (upstream: v0.9.7.4013 local: v0.9.7.197-latest) in ./trunk
1 william 31 /* PCSX2 - PS2 Emulator for PCs
2     * Copyright (C) 2002-2010 PCSX2 Dev Team
3     *
4     * PCSX2 is free software: you can redistribute it and/or modify it under the terms
5     * of the GNU Lesser General Public License as published by the Free Software Found-
6     * ation, either version 3 of the License, or (at your option) any later version.
7     *
8     * PCSX2 is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
9     * without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
10     * PURPOSE. See the GNU General Public License for more details.
11     *
12     * You should have received a copy of the GNU General Public License along with PCSX2.
13     * If not, see <http://www.gnu.org/licenses/>.
14     */
15    
16     #pragma once
17    
18     //------------------------------------------------------------------
19 william 62 // Messages Called at Execution Time...
20 william 31 //------------------------------------------------------------------
21    
22 william 62 static void __fastcall mVUbadOp0(u32 prog, u32 pc) { Console.Error("microVU0 Warning: Exiting... Block started with illegal opcode. [%04x] [%x]", pc, prog); }
23     static void __fastcall mVUbadOp1(u32 prog, u32 pc) { Console.Error("microVU1 Warning: Exiting... Block started with illegal opcode. [%04x] [%x]", pc, prog); }
24     static void __fastcall mVUwarning0(u32 prog) { Console.Error("microVU0 Warning: Exiting from Possible Infinite Loop [%04x] [%x]", prog); }
25     static void __fastcall mVUwarning1(u32 prog) { Console.Error("microVU1 Warning: Exiting from Possible Infinite Loop [%04x] [%x]", prog); }
26     static void __fastcall mVUprintPC1(u32 pc) { Console.WriteLn("Block Start PC = 0x%04x", pc); }
27     static void __fastcall mVUprintPC2(u32 pc) { Console.WriteLn("Block End PC = 0x%04x", pc); }
28 william 31
29     //------------------------------------------------------------------
30 william 62 // Program Range Checking and Setting up Ranges
31 william 31 //------------------------------------------------------------------
32    
33     // Used by mVUsetupRange
34 william 62 static __fi void mVUcheckIsSame(mV) {
35 william 31 if (mVU->prog.isSame == -1) {
36 william 62 mVU->prog.isSame = !memcmp_mmx((u8*)mVUcurProg.data, mVU->regs().Micro, mVU->microMemSize);
37 william 31 }
38     if (mVU->prog.isSame == 0) {
39     if (!isVU1) mVUcacheProg<0>(*mVU->prog.cur);
40     else mVUcacheProg<1>(*mVU->prog.cur);
41     mVU->prog.isSame = 1;
42     }
43     }
44    
45     // Sets up microProgram PC ranges based on whats been recompiled
46 william 62 static void mVUsetupRange(microVU* mVU, s32 pc, bool isStartPC) {
47 william 31 deque<microRange>*& ranges = mVUcurProg.ranges;
48     pc &= mVU->microMemSize - 8;
49    
50     if (isStartPC) { // Check if startPC is already within a block we've recompiled
51 william 62 deque<microRange>::const_iterator it(ranges->begin());
52     for ( ; it != ranges->end(); ++it) {
53 william 31 if ((pc >= it[0].start) && (pc <= it[0].end)) {
54     if (it[0].start != it[0].end)
55     return; // Last case makes sure its not a 1-opcode EvilBlock
56     }
57     }
58     }
59     elif (mVUrange.end != -1) return; // Above case was true
60    
61     mVUcheckIsSame(mVU);
62    
63     if (isStartPC) {
64     microRange mRange = {pc, -1};
65     ranges->push_front(mRange);
66     return;
67     }
68     if (mVUrange.start <= pc) {
69     mVUrange.end = pc;
70     bool mergedRange = 0;
71     s32 rStart = mVUrange.start;
72     s32 rEnd = mVUrange.end;
73 william 62 deque<microRange>::iterator it(ranges->begin());
74     for (++it; it != ranges->end(); ++it) {
75 william 31 if((it[0].start >= rStart) && (it[0].start <= rEnd)) {
76     it[0].end = aMax(it[0].end, rEnd);
77     mergedRange = 1;
78     }
79     elif((it[0].end >= rStart) && (it[0].end <= rEnd)) {
80     it[0].start = aMin(it[0].start, rStart);
81     mergedRange = 1;
82     }
83     }
84     if (mergedRange) {
85     //DevCon.WriteLn(Color_Green, "microVU%d: Prog Range Merging", mVU->index);
86     ranges->erase(ranges->begin());
87     }
88     }
89     else {
90 william 62 DevCon.WriteLn(Color_Green, "microVU%d: Prog Range Wrap [%04x] [%d]", mVU->index, mVUrange.start, mVUrange.end);
91 william 31 mVUrange.end = mVU->microMemSize;
92     microRange mRange = {0, pc};
93     ranges->push_front(mRange);
94     }
95     }
96    
97 william 62 //------------------------------------------------------------------
98     // Execute VU Opcode/Instruction (Upper and Lower)
99     //------------------------------------------------------------------
100 william 31
101 william 62 __ri void doUpperOp(mV) { mVUopU(mVU, 1); mVUdivSet(mVU); }
102     __ri void doLowerOp(mV) { incPC(-1); mVUopL(mVU, 1); incPC(1); }
103     __ri void flushRegs(mV) { if (!doRegAlloc) mVU->regAlloc->flushAll(); }
104    
105     static void doIbit(mV) {
106 william 31 if (mVUup.iBit) {
107     incPC(-1);
108     u32 tempI;
109     mVU->regAlloc->clearRegVF(33);
110    
111     if (CHECK_VU_OVERFLOW && ((curI & 0x7fffffff) >= 0x7f800000)) {
112 william 273 DevCon.WriteLn(Color_Green,"microVU%d: Clamping I Reg", mVU->index);
113 william 31 tempI = (0x80000000 & curI) | 0x7f7fffff; // Clamp I Reg
114     }
115     else tempI = curI;
116    
117 william 62 xMOV(ptr32[&mVU->getVI(REG_I)], tempI);
118 william 31 incPC(1);
119     }
120     }
121    
122 william 62 static void doSwapOp(mV) {
123 william 31 if (mVUinfo.backupVF && !mVUlow.noWriteVF) {
124     DevCon.WriteLn(Color_Green, "microVU%d: Backing Up VF Reg [%04x]", getIndex, xPC);
125 william 62
126     // Allocate t1 first for better chance of reg-alloc
127     const xmm& t1 = mVU->regAlloc->allocReg(mVUlow.VF_write.reg);
128     const xmm& t2 = mVU->regAlloc->allocReg();
129     xMOVAPS(t2, t1); // Backup VF reg
130 william 31 mVU->regAlloc->clearNeeded(t1);
131 william 62
132 william 31 mVUopL(mVU, 1);
133 william 62
134     const xmm& t3 = mVU->regAlloc->allocReg(mVUlow.VF_write.reg, mVUlow.VF_write.reg, 0xf, 0);
135     xXOR.PS(t2, t3); // Swap new and old values of the register
136     xXOR.PS(t3, t2); // Uses xor swap trick...
137     xXOR.PS(t2, t3);
138     mVU->regAlloc->clearNeeded(t3);
139    
140 william 31 incPC(1);
141 william 62 doUpperOp(mVU);
142    
143     const xmm& t4 = mVU->regAlloc->allocReg(-1, mVUlow.VF_write.reg, 0xf);
144     xMOVAPS(t4, t2);
145     mVU->regAlloc->clearNeeded(t4);
146 william 31 mVU->regAlloc->clearNeeded(t2);
147     }
148 william 62 else { mVUopL(mVU, 1); incPC(1); flushRegs(mVU); doUpperOp(mVU); }
149 william 31 }
150    
151 william 62 static void mVUexecuteInstruction(mV) {
152     if (mVUlow.isNOP) { incPC(1); doUpperOp(mVU); flushRegs(mVU); doIbit(mVU); }
153     elif(!mVUinfo.swapOps) { incPC(1); doUpperOp(mVU); flushRegs(mVU); doLowerOp(mVU); }
154     else doSwapOp(mVU);
155     flushRegs(mVU);
156     }
157    
158     //------------------------------------------------------------------
159     // Warnings / Errors / Illegal Instructions
160     //------------------------------------------------------------------
161    
162     // If 1st op in block is a bad opcode, then don't compile rest of block (Dawn of Mana Level 2)
163     static __fi void mVUcheckBadOp(mV) {
164     if (mVUinfo.isBadOp && mVUcount == 0) {
165     mVUinfo.isEOB = true;
166     Console.Warning("microVU Warning: First Instruction of block contains illegal opcode...");
167     }
168     }
169    
170     // Prints msg when exiting block early if 1st op was a bad opcode (Dawn of Mana Level 2)
171     static __fi void handleBadOp(mV, int count) {
172     if (mVUinfo.isBadOp && count == 0) {
173     mVUbackupRegs(mVU, true);
174     xMOV(gprT2, mVU->prog.cur->idx);
175     xMOV(gprT3, xPC);
176     if (!isVU1) xCALL(mVUbadOp0);
177     else xCALL(mVUbadOp1);
178     mVUrestoreRegs(mVU, true);
179     }
180     }
181    
182     static __ri void branchWarning(mV) {
183 william 31 incPC(-2);
184     if (mVUup.eBit && mVUbranch) {
185     incPC(2);
186     Console.Error("microVU%d Warning: Branch in E-bit delay slot! [%04x]", mVU->index, xPC);
187     mVUlow.isNOP = 1;
188     }
189     else incPC(2);
190     if (mVUinfo.isBdelay) { // Check if VI Reg Written to on Branch Delay Slot Instruction
191     if (mVUlow.VI_write.reg && mVUlow.VI_write.used && !mVUlow.readFlags) {
192     mVUlow.backupVI = 1;
193     mVUregs.viBackUp = mVUlow.VI_write.reg;
194     }
195     }
196     }
197    
198 william 62 static __fi void eBitPass1(mV, int& branch) {
199 william 31 if (mVUregs.blockType != 1) {
200     branch = 1;
201     mVUup.eBit = 1;
202     }
203     }
204    
205 william 62 static __ri void eBitWarning(mV) {
206 william 31 if (mVUpBlock->pState.blockType == 1) Console.Error("microVU%d Warning: Branch, E-bit, Branch! [%04x]", mVU->index, xPC);
207     if (mVUpBlock->pState.blockType == 2) Console.Error("microVU%d Warning: Branch, Branch, Branch! [%04x]", mVU->index, xPC);
208     incPC(2);
209     if (curI & _Ebit_) {
210     DevCon.Warning("microVU%d: E-bit in Branch delay slot! [%04x]", mVU->index, xPC);
211     mVUregs.blockType = 1;
212     }
213     incPC(-2);
214     }
215    
216 william 62 //------------------------------------------------------------------
217     // Cycles / Pipeline State / Early Exit from Execution
218     //------------------------------------------------------------------
219     __fi void optimizeReg(u8& rState) { rState = (rState==1) ? 0 : rState; }
220     __fi void calcCycles(u8& reg, u8 x) { reg = ((reg > x) ? (reg - x) : 0); }
221     __fi void tCycles(u8& dest, u8& src) { dest = aMax(dest, src); }
222     __fi void incP(mV) { mVU->p ^= 1; }
223     __fi void incQ(mV) { mVU->q ^= 1; }
224    
225 william 31 // Optimizes the End Pipeline State Removing Unnecessary Info
226 william 62 // If the cycles remaining is just '1', we don't have to transfer it to the next block
227     // because mVU automatically decrements this number at the start of its loop,
228     // so essentially '1' will be the same as '0'...
229     static void mVUoptimizePipeState(mV) {
230 william 31 for (int i = 0; i < 32; i++) {
231     optimizeReg(mVUregs.VF[i].x);
232     optimizeReg(mVUregs.VF[i].y);
233     optimizeReg(mVUregs.VF[i].z);
234     optimizeReg(mVUregs.VF[i].w);
235     }
236     for (int i = 0; i < 16; i++) {
237     optimizeReg(mVUregs.VI[i]);
238     }
239 william 62 if (mVUregs.q) { optimizeReg(mVUregs.q); if (!mVUregs.q) { incQ(mVU); } }
240     if (mVUregs.p) { optimizeReg(mVUregs.p); if (!mVUregs.p) { incP(mVU); } }
241 william 31 mVUregs.r = 0; // There are no stalls on the R-reg, so its Safe to discard info
242     }
243    
244 william 62 void mVUincCycles(mV, int x) {
245 william 31 mVUcycles += x;
246     for (int z = 31; z > 0; z--) {
247     calcCycles(mVUregs.VF[z].x, x);
248     calcCycles(mVUregs.VF[z].y, x);
249     calcCycles(mVUregs.VF[z].z, x);
250     calcCycles(mVUregs.VF[z].w, x);
251     }
252     for (int z = 16; z > 0; z--) {
253     calcCycles(mVUregs.VI[z], x);
254     }
255     if (mVUregs.q) {
256     if (mVUregs.q > 4) { calcCycles(mVUregs.q, x); if (mVUregs.q <= 4) { mVUinfo.doDivFlag = 1; } }
257     else { calcCycles(mVUregs.q, x); }
258 william 62 if (!mVUregs.q) { incQ(mVU); }
259 william 31 }
260     if (mVUregs.p) {
261     calcCycles(mVUregs.p, x);
262 william 62 if (!mVUregs.p || mVUregsTemp.p) { incP(mVU); }
263 william 31 }
264     if (mVUregs.xgkick) {
265     calcCycles(mVUregs.xgkick, x);
266     if (!mVUregs.xgkick) { mVUinfo.doXGKICK = 1; }
267     }
268     calcCycles(mVUregs.r, x);
269     }
270    
271 william 62 // Helps check if upper/lower ops read/write to same regs...
272     void cmpVFregs(microVFreg& VFreg1, microVFreg& VFreg2, bool& xVar) {
273     if (VFreg1.reg == VFreg2.reg) {
274     if ((VFreg1.x && VFreg2.x) || (VFreg1.y && VFreg2.y)
275     || (VFreg1.z && VFreg2.z) || (VFreg1.w && VFreg2.w))
276     { xVar = 1; }
277     }
278 william 31 }
279    
280 william 62 void mVUsetCycles(mV) {
281 william 31 mVUincCycles(mVU, mVUstall);
282     // If upper Op && lower Op write to same VF reg:
283     if ((mVUregsTemp.VFreg[0] == mVUregsTemp.VFreg[1]) && mVUregsTemp.VFreg[0]) {
284     if (mVUregsTemp.r || mVUregsTemp.VI) mVUlow.noWriteVF = 1;
285     else mVUlow.isNOP = 1; // If lower Op doesn't modify anything else, then make it a NOP
286     }
287     // If lower op reads a VF reg that upper Op writes to:
288     if ((mVUlow.VF_read[0].reg || mVUlow.VF_read[1].reg) && mVUup.VF_write.reg) {
289     cmpVFregs(mVUup.VF_write, mVUlow.VF_read[0], mVUinfo.swapOps);
290     cmpVFregs(mVUup.VF_write, mVUlow.VF_read[1], mVUinfo.swapOps);
291     }
292     // If above case is true, and upper op reads a VF reg that lower Op Writes to:
293     if (mVUinfo.swapOps && ((mVUup.VF_read[0].reg || mVUup.VF_read[1].reg) && mVUlow.VF_write.reg)) {
294     cmpVFregs(mVUlow.VF_write, mVUup.VF_read[0], mVUinfo.backupVF);
295     cmpVFregs(mVUlow.VF_write, mVUup.VF_read[1], mVUinfo.backupVF);
296     }
297    
298     tCycles(mVUregs.VF[mVUregsTemp.VFreg[0]].x, mVUregsTemp.VF[0].x);
299     tCycles(mVUregs.VF[mVUregsTemp.VFreg[0]].y, mVUregsTemp.VF[0].y);
300     tCycles(mVUregs.VF[mVUregsTemp.VFreg[0]].z, mVUregsTemp.VF[0].z);
301     tCycles(mVUregs.VF[mVUregsTemp.VFreg[0]].w, mVUregsTemp.VF[0].w);
302    
303     tCycles(mVUregs.VF[mVUregsTemp.VFreg[1]].x, mVUregsTemp.VF[1].x);
304     tCycles(mVUregs.VF[mVUregsTemp.VFreg[1]].y, mVUregsTemp.VF[1].y);
305     tCycles(mVUregs.VF[mVUregsTemp.VFreg[1]].z, mVUregsTemp.VF[1].z);
306     tCycles(mVUregs.VF[mVUregsTemp.VFreg[1]].w, mVUregsTemp.VF[1].w);
307    
308     tCycles(mVUregs.VI[mVUregsTemp.VIreg], mVUregsTemp.VI);
309     tCycles(mVUregs.q, mVUregsTemp.q);
310     tCycles(mVUregs.p, mVUregsTemp.p);
311     tCycles(mVUregs.r, mVUregsTemp.r);
312     tCycles(mVUregs.xgkick, mVUregsTemp.xgkick);
313     }
314    
315 william 62 // Prints Start/End PC of blocks executed, for debugging...
316     static void mVUdebugPrintBlocks(microVU* mVU, bool isEndPC) {
317     if (mVUdebugNow) {
318     mVUbackupRegs(mVU, true);
319     xMOV(gprT2, xPC);
320     if (isEndPC) xCALL(mVUprintPC2);
321     else xCALL(mVUprintPC1);
322     mVUrestoreRegs(mVU, true);
323     }
324     }
325 william 31
326     // vu0 is allowed to exit early, so are dev builds (for inf loops)
327 william 62 __fi bool doEarlyExit(microVU* mVU) {
328 william 31 return IsDevBuild || !isVU1;
329     }
330    
331     // Saves Pipeline State for resuming from early exits
332 william 62 static __fi void mVUsavePipelineState(microVU* mVU) {
333 william 31 u32* lpS = (u32*)&mVU->prog.lpState.vi15;
334     for (int i = 0; i < (sizeof(microRegInfo)-4)/4; i++, lpS++) {
335 william 62 xMOV(ptr32[lpS], lpS[0]);
336 william 31 }
337     }
338    
339 william 62 // Test cycles to see if we need to exit-early...
340     static void mVUtestCycles(microVU* mVU) {
341 william 31 iPC = mVUstartPC;
342     if (doEarlyExit(mVU)) {
343 william 62 xCMP(ptr32[&mVU->cycles], 0);
344     xForwardJG32 skip;
345     if (isVU0) {
346     // TEST32ItoM((uptr)&mVU->regs().flags, VUFLAG_MFLAGSET);
347     // xFowardJZ32 vu0jmp;
348     // mVUbackupRegs(mVU, true);
349     // xMOV(gprT2, mVU->prog.cur->idx);
350     // xCALL(mVUwarning0); // VU0 is allowed early exit for COP2 Interlock Simulation
351     // mVUbackupRegs(mVU, true);
352 william 31 mVUsavePipelineState(mVU);
353     mVUendProgram(mVU, NULL, 0);
354 william 62 // vu0jmp.SetTarget();
355     }
356     else {
357     mVUbackupRegs(mVU, true);
358     xMOV(gprT2, mVU->prog.cur->idx);
359     xCALL(mVUwarning1);
360     mVUbackupRegs(mVU, true);
361     mVUsavePipelineState(mVU);
362     mVUendProgram(mVU, NULL, 0);
363     }
364     skip.SetTarget();
365 william 31 }
366 william 62 xSUB(ptr32[&mVU->cycles], mVUcycles);
367 william 31 }
368    
369 william 62 //------------------------------------------------------------------
370     // Initializing
371     //------------------------------------------------------------------
372    
373     // This gets run at the start of every loop of mVU's first pass
374     static __fi void startLoop(mV) {
375 william 273 if (curI & _Mbit_) { DevCon.WriteLn (Color_Green, "microVU%d: M-bit set!", getIndex); }
376 william 62 if (curI & _Dbit_) { DevCon.WriteLn (Color_Green, "microVU%d: D-bit set!", getIndex); }
377     if (curI & _Tbit_) { DevCon.WriteLn (Color_Green, "microVU%d: T-bit set!", getIndex); }
378     memzero(mVUinfo);
379     memzero(mVUregsTemp);
380     }
381    
382 william 31 // Initialize VI Constants (vi15 propagates through blocks)
383 william 62 static __fi void mVUinitConstValues(microVU* mVU) {
384 william 31 for (int i = 0; i < 16; i++) {
385     mVUconstReg[i].isValid = 0;
386     mVUconstReg[i].regValue = 0;
387     }
388     mVUconstReg[15].isValid = mVUregs.vi15 >> 31;
389     mVUconstReg[15].regValue = mVUconstReg[15].isValid ? (mVUregs.vi15&0xffff) : 0;
390     }
391    
392     // Initialize Variables
393 william 62 static __fi void mVUinitFirstPass(microVU* mVU, uptr pState, u8* thisPtr) {
394 william 31 mVUstartPC = iPC; // Block Start PC
395     mVUbranch = 0; // Branch Type
396     mVUcount = 0; // Number of instructions ran
397     mVUcycles = 0; // Skips "M" phase, and starts counting cycles at "T" stage
398     mVU->p = 0; // All blocks start at p index #0
399     mVU->q = 0; // All blocks start at q index #0
400     if ((uptr)&mVUregs != pState) { // Loads up Pipeline State Info
401     memcpy_const((u8*)&mVUregs, (u8*)pState, sizeof(microRegInfo));
402     }
403     if (doEarlyExit(mVU) && ((uptr)&mVU->prog.lpState != pState)) {
404     memcpy_const((u8*)&mVU->prog.lpState, (u8*)pState, sizeof(microRegInfo));
405     }
406     mVUblock.x86ptrStart = thisPtr;
407 william 62 mVUpBlock = mVUblocks[mVUstartPC/2]->add(&mVUblock); // Add this block to block manager
408     mVUregs.needExactMatch =(mVUregs.blockType || noFlagOpts) ? 7 : 0; // 1-Op blocks should just always set exactMatch (Sly Cooper)
409 william 31 mVUregs.blockType = 0;
410     mVUregs.viBackUp = 0;
411     mVUregs.flags = 0;
412     mVUsFlagHack = CHECK_VU_FLAGHACK;
413     mVUinitConstValues(mVU);
414     }
415    
416     //------------------------------------------------------------------
417     // Recompiler
418     //------------------------------------------------------------------
419    
420 william 62 void* mVUcompile(microVU* mVU, u32 startPC, uptr pState) {
421 william 31
422     microFlagCycles mFC;
423     u8* thisPtr = x86Ptr;
424     const u32 endCount = (((microRegInfo*)pState)->blockType) ? 1 : (mVU->microMemSize / 8);
425    
426     // First Pass
427     iPC = startPC / 4;
428 william 62 mVUsetupRange(mVU, startPC, 1); // Setup Program Bounds/Range
429     mVU->regAlloc->reset(); // Reset regAlloc
430 william 31 mVUinitFirstPass(mVU, pState, thisPtr);
431     for (int branch = 0; mVUcount < endCount; mVUcount++) {
432     incPC(1);
433     startLoop(mVU);
434     mVUincCycles(mVU, 1);
435     mVUopU(mVU, 0);
436 william 62 mVUcheckBadOp(mVU);
437 william 31 if (curI & _Ebit_) { eBitPass1(mVU, branch); }
438     if (curI & _DTbit_) { branch = 4; }
439     if (curI & _Mbit_) { mVUup.mBit = 1; }
440     if (curI & _Ibit_) { mVUlow.isNOP = 1; mVUup.iBit = 1; }
441     else { incPC(-1); mVUopL(mVU, 0); incPC(1); }
442     mVUsetCycles(mVU);
443     mVUinfo.readQ = mVU->q;
444     mVUinfo.writeQ = !mVU->q;
445     mVUinfo.readP = mVU->p;
446     mVUinfo.writeP = !mVU->p;
447     if (branch >= 2) { mVUinfo.isEOB = 1; if (branch == 3) { mVUinfo.isBdelay = 1; } mVUcount++; branchWarning(mVU); break; }
448     else if (branch == 1) { branch = 2; }
449     if (mVUbranch) { mVUsetFlagInfo(mVU); eBitWarning(mVU); branch = 3; mVUbranch = 0; }
450     incPC(1);
451     }
452    
453     // Fix up vi15 const info for propagation through blocks
454 william 62 mVUregs.vi15 = (mVUconstReg[15].isValid && doConstProp) ? ((1<<31) | (mVUconstReg[15].regValue&0xffff)) : 0;
455 william 31
456     mVUsetFlags(mVU, mFC); // Sets Up Flag instances
457     mVUoptimizePipeState(mVU); // Optimize the End Pipeline State for nicer Block Linking
458 william 62 mVUdebugPrintBlocks(mVU,0);// Prints Start/End PC of blocks executed, for debugging...
459 william 31 mVUtestCycles(mVU); // Update VU Cycles and Exit Early if Necessary
460    
461     // Second Pass
462     iPC = mVUstartPC;
463     setCode();
464     mVUbranch = 0;
465     u32 x = 0;
466     for (; x < endCount; x++) {
467 william 62 if (mVUinfo.isEOB) { handleBadOp(mVU, x); x = 0xffff; }
468     if (mVUup.mBit) { xOR(ptr32[&mVU->regs().flags], VUFLAG_MFLAGSET); }
469     mVUexecuteInstruction(mVU);
470 william 31 if (mVUinfo.doXGKICK) { mVU_XGKICK_DELAY(mVU, 1); }
471     if (isEvilBlock) { mVUsetupRange(mVU, xPC, 0); normJumpCompile(mVU, mFC, 1); return thisPtr; }
472     else if (!mVUinfo.isBdelay) { incPC(1); }
473     else {
474     mVUsetupRange(mVU, xPC, 0);
475 william 62 mVUdebugPrintBlocks(mVU,1);
476 william 31 incPC(-3); // Go back to branch opcode
477     switch (mVUlow.branch) {
478     case 1: case 2: normBranch(mVU, mFC); return thisPtr; // B/BAL
479     case 9: case 10: normJump (mVU, mFC); return thisPtr; // JR/JALR
480     case 3: condBranch(mVU, mFC, Jcc_Equal); return thisPtr; // IBEQ
481     case 4: condBranch(mVU, mFC, Jcc_GreaterOrEqual); return thisPtr; // IBGEZ
482     case 5: condBranch(mVU, mFC, Jcc_Greater); return thisPtr; // IBGTZ
483     case 6: condBranch(mVU, mFC, Jcc_LessOrEqual); return thisPtr; // IBLEQ
484     case 7: condBranch(mVU, mFC, Jcc_Less); return thisPtr; // IBLTZ
485     case 8: condBranch(mVU, mFC, Jcc_NotEqual); return thisPtr; // IBNEQ
486     }
487     }
488     }
489     if ((x == endCount) && (x!=1)) { Console.Error("microVU%d: Possible infinite compiling loop!", mVU->index); }
490    
491     // E-bit End
492     mVUsetupRange(mVU, xPC-8, 0);
493     mVUendProgram(mVU, &mFC, 1);
494     return thisPtr;
495     }
496    
497     // Returns the entry point of the block (compiles it if not found)
498 william 62 static __fi void* mVUentryGet(microVU* mVU, microBlockManager* block, u32 startPC, uptr pState) {
499 william 31 microBlock* pBlock = block->search((microRegInfo*)pState);
500     if (pBlock) return pBlock->x86ptrStart;
501     else return mVUcompile(mVU, startPC, pState);
502     }
503    
504     // Search for Existing Compiled Block (if found, return x86ptr; else, compile and return x86ptr)
505 william 62 static __fi void* mVUblockFetch(microVU* mVU, u32 startPC, uptr pState) {
506 william 31
507 william 62 pxAssumeDev( (startPC & 7) == 0, pxsFmt("microVU%d: unaligned startPC=0x%04x", mVU->index, startPC) );
508     pxAssumeDev( startPC < mVU->microMemSize-8, pxsFmt("microVU%d: invalid startPC=0x%04x", mVU->index, startPC) );
509 william 31 startPC &= mVU->microMemSize-8;
510    
511     blockCreate(startPC/8);
512     return mVUentryGet(mVU, mVUblocks[startPC/8], startPC, pState);
513     }
514    
515     // mVUcompileJIT() - Called By JR/JALR during execution
516     _mVUt void* __fastcall mVUcompileJIT(u32 startPC, uptr pState) {
517     //return mVUblockFetch(mVUx, startPC, pState);
518     return mVUsearchProg<vuIndex>(startPC, pState); // Find and set correct program
519     }

  ViewVC Help
Powered by ViewVC 1.1.22