/[pcsx2_0.9.7]/trunk/pcsx2/x86/sVU_Lower.cpp
ViewVC logotype

Contents of /trunk/pcsx2/x86/sVU_Lower.cpp

Parent Directory Parent Directory | Revision Log Revision Log


Revision 280 - (show annotations) (download)
Thu Dec 23 12:02:12 2010 UTC (9 years, 1 month ago) by william
File size: 63500 byte(s)
re-commit (had local access denied errors when committing)
1 /* PCSX2 - PS2 Emulator for PCs
2 * Copyright (C) 2002-2010 PCSX2 Dev Team
3 *
4 * PCSX2 is free software: you can redistribute it and/or modify it under the terms
5 * of the GNU Lesser General Public License as published by the Free Software Found-
6 * ation, either version 3 of the License, or (at your option) any later version.
7 *
8 * PCSX2 is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
9 * without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
10 * PURPOSE. See the GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License along with PCSX2.
13 * If not, see <http://www.gnu.org/licenses/>.
14 */
15
16 #include "PrecompiledHeader.h"
17
18 #include "Common.h"
19 #include "GS.h"
20 #include "R5900OpcodeTables.h"
21 #include "iR5900.h"
22 #include "iMMI.h"
23 #include "iFPU.h"
24 #include "iCOP0.h"
25 #include "VUmicro.h"
26 #include "sVU_Micro.h"
27 #include "sVU_Debug.h"
28 #include "sVU_zerorec.h"
29 #include "Gif.h"
30
31 using namespace x86Emitter;
32 //------------------------------------------------------------------
33
34 //------------------------------------------------------------------
35 // Helper Macros
36 //------------------------------------------------------------------
37 #define _Ft_ (( VU->code >> 16) & 0x1F) // The rt part of the instruction register
38 #define _Fs_ (( VU->code >> 11) & 0x1F) // The rd part of the instruction register
39 #define _Fd_ (( VU->code >> 6) & 0x1F) // The sa part of the instruction register
40 #define _It_ (_Ft_ & 15)
41 #define _Is_ (_Fs_ & 15)
42 #define _Id_ (_Fd_ & 15)
43
44 #define _X (( VU->code>>24) & 0x1)
45 #define _Y (( VU->code>>23) & 0x1)
46 #define _Z (( VU->code>>22) & 0x1)
47 #define _W (( VU->code>>21) & 0x1)
48
49 #define _XYZW_SS (_X+_Y+_Z+_W==1)
50
51 #define _Fsf_ (( VU->code >> 21) & 0x03)
52 #define _Ftf_ (( VU->code >> 23) & 0x03)
53
54 #define _Imm11_ (s32)(VU->code & 0x400 ? 0xfffffc00 | (VU->code & 0x3ff) : VU->code & 0x3ff)
55 #define _UImm11_ (s32)(VU->code & 0x7ff)
56
57 #define VU_VFx_ADDR(x) (uptr)&VU->VF[x].UL[0]
58 #define VU_VFy_ADDR(x) (uptr)&VU->VF[x].UL[1]
59 #define VU_VFz_ADDR(x) (uptr)&VU->VF[x].UL[2]
60 #define VU_VFw_ADDR(x) (uptr)&VU->VF[x].UL[3]
61
62 #define VU_REGR_ADDR (uptr)&VU->VI[REG_R]
63 #define VU_REGQ_ADDR (uptr)&VU->VI[REG_Q]
64 #define VU_REGMAC_ADDR (uptr)&VU->VI[REG_MAC_FLAG]
65
66 #define VU_VI_ADDR(x, read) GetVIAddr(VU, x, read, info)
67
68 #define VU_ACCx_ADDR (uptr)&VU->ACC.UL[0]
69 #define VU_ACCy_ADDR (uptr)&VU->ACC.UL[1]
70 #define VU_ACCz_ADDR (uptr)&VU->ACC.UL[2]
71 #define VU_ACCw_ADDR (uptr)&VU->ACC.UL[3]
72
73 #define _X_Y_Z_W ((( VU->code >> 21 ) & 0xF ) )
74
75
76 static const __aligned16 u32 VU_ONE[4] = {0x3f800000, 0xffffffff, 0xffffffff, 0xffffffff};
77 //------------------------------------------------------------------
78
79
80 //------------------------------------------------------------------
81 // *VU Lower Instructions!*
82 //
83 // Note: * = Checked for errors by cottonvibes
84 //------------------------------------------------------------------
85
86
87 //------------------------------------------------------------------
88 // DIV*
89 //------------------------------------------------------------------
90 void recVUMI_DIV(VURegs *VU, int info)
91 {
92 u8 *pjmp, *pjmp1;
93 u32 *ajmp32, *bjmp32;
94
95 //Console.WriteLn("recVUMI_DIV()");
96 AND32ItoM(VU_VI_ADDR(REG_STATUS_FLAG, 2), 0xFCF); // Clear D/I flags
97
98 // FT can be zero here! so we need to check if its zero and set the correct flag.
99 SSE_XORPS_XMM_to_XMM(EEREC_TEMP, EEREC_TEMP); // Clear EEREC_TEMP
100 SSE_CMPEQPS_XMM_to_XMM(EEREC_TEMP, EEREC_T); // Set all F's if each vector is zero
101
102 SSE_MOVMSKPS_XMM_to_R32( EAX, EEREC_TEMP); // Move the sign bits of the previous calculation
103
104 AND32ItoR( EAX, (1<<_Ftf_) ); // Grab "Is Zero" bits from the previous calculation
105 ajmp32 = JZ32(0); // Skip if none are
106
107 SSE_XORPS_XMM_to_XMM(EEREC_TEMP, EEREC_TEMP); // Clear EEREC_TEMP
108 SSE_CMPEQPS_XMM_to_XMM(EEREC_TEMP, EEREC_S); // Set all F's if each vector is zero
109 SSE_MOVMSKPS_XMM_to_R32(EAX, EEREC_TEMP); // Move the sign bits of the previous calculation
110
111 AND32ItoR( EAX, (1<<_Fsf_) ); // Grab "Is Zero" bits from the previous calculation
112 pjmp = JZ8(0);
113 OR32ItoM( VU_VI_ADDR(REG_STATUS_FLAG, 2), 0x410 ); // Set invalid flag (0/0)
114 pjmp1 = JMP8(0);
115 x86SetJ8(pjmp);
116 OR32ItoM( VU_VI_ADDR(REG_STATUS_FLAG, 2), 0x820 ); // Zero divide (only when not 0/0)
117 x86SetJ8(pjmp1);
118
119 _unpackVFSS_xyzw(EEREC_TEMP, EEREC_S, _Fsf_);
120
121 _vuFlipRegSS_xyzw(EEREC_T, _Ftf_);
122 SSE_XORPS_XMM_to_XMM(EEREC_TEMP, EEREC_T);
123 _vuFlipRegSS_xyzw(EEREC_T, _Ftf_);
124
125 SSE_ANDPS_M128_to_XMM(EEREC_TEMP, (uptr)&const_clip[4]);
126 SSE_ORPS_M128_to_XMM(EEREC_TEMP, (uptr)&g_maxvals[0]); // If division by zero, then EEREC_TEMP = +/- fmax
127
128 bjmp32 = JMP32(0);
129
130 x86SetJ32(ajmp32);
131
132 if (CHECK_VU_EXTRA_OVERFLOW) {
133 vuFloat5_useEAX(EEREC_S, EEREC_TEMP, (1 << (3-_Fsf_)));
134 vuFloat5_useEAX(EEREC_T, EEREC_TEMP, (1 << (3-_Ftf_)));
135 }
136
137 _unpackVFSS_xyzw(EEREC_TEMP, EEREC_S, _Fsf_);
138
139 _vuFlipRegSS_xyzw(EEREC_T, _Ftf_);
140 SSE_DIVSS_XMM_to_XMM(EEREC_TEMP, EEREC_T);
141 _vuFlipRegSS_xyzw(EEREC_T, _Ftf_);
142
143 vuFloat_useEAX(info, EEREC_TEMP, 0x8);
144
145 x86SetJ32(bjmp32);
146
147 SSE_MOVSS_XMM_to_M32(VU_VI_ADDR(REG_Q, 0), EEREC_TEMP);
148 }
149 //------------------------------------------------------------------
150
151
152 //------------------------------------------------------------------
153 // SQRT*
154 //------------------------------------------------------------------
155 void recVUMI_SQRT( VURegs *VU, int info )
156 {
157 u8* pjmp;
158 //Console.WriteLn("recVUMI_SQRT()");
159
160 _unpackVFSS_xyzw(EEREC_TEMP, EEREC_T, _Ftf_);
161 AND32ItoM(VU_VI_ADDR(REG_STATUS_FLAG, 2), 0xFCF); // Clear D/I flags
162
163 /* Check for negative sqrt */
164 SSE_MOVMSKPS_XMM_to_R32(EAX, EEREC_TEMP);
165 AND32ItoR(EAX, 1); //Check sign
166 pjmp = JZ8(0); //Skip if none are
167 OR32ItoM(VU_VI_ADDR(REG_STATUS_FLAG, 2), 0x410); // Invalid Flag - Negative number sqrt
168 x86SetJ8(pjmp);
169
170 SSE_ANDPS_M128_to_XMM(EEREC_TEMP, (uptr)const_clip); // Do a cardinal sqrt
171 if (CHECK_VU_OVERFLOW) SSE_MINSS_M32_to_XMM(EEREC_TEMP, (uptr)g_maxvals); // Clamp infinities (only need to do positive clamp since EEREC_TEMP is positive)
172 SSE_SQRTSS_XMM_to_XMM(EEREC_TEMP, EEREC_TEMP);
173 SSE_MOVSS_XMM_to_M32(VU_VI_ADDR(REG_Q, 0), EEREC_TEMP);
174 }
175 //------------------------------------------------------------------
176
177
178 //------------------------------------------------------------------
179 // RSQRT*
180 //------------------------------------------------------------------
181 __aligned16 u64 RSQRT_TEMP_XMM[2];
182 void recVUMI_RSQRT(VURegs *VU, int info)
183 {
184 u8 *ajmp8, *bjmp8;
185 u8 *qjmp1, *qjmp2;
186 int t1reg, t1boolean;
187 //Console.WriteLn("recVUMI_RSQRT()");
188
189 _unpackVFSS_xyzw(EEREC_TEMP, EEREC_T, _Ftf_);
190 AND32ItoM(VU_VI_ADDR(REG_STATUS_FLAG, 2), 0xFCF); // Clear D/I flags
191
192 /* Check for negative divide */
193 SSE_MOVMSKPS_XMM_to_R32(EAX, EEREC_TEMP);
194 AND32ItoR(EAX, 1); //Check sign
195 ajmp8 = JZ8(0); //Skip if none are
196 OR32ItoM(VU_VI_ADDR(REG_STATUS_FLAG, 2), 0x410); // Invalid Flag - Negative number sqrt
197 x86SetJ8(ajmp8);
198
199 SSE_ANDPS_M128_to_XMM(EEREC_TEMP, (uptr)const_clip); // Do a cardinal sqrt
200 if (CHECK_VU_OVERFLOW) SSE_MINSS_M32_to_XMM(EEREC_TEMP, (uptr)g_maxvals); // Clamp Infinities to Fmax
201 SSE_SQRTSS_XMM_to_XMM(EEREC_TEMP, EEREC_TEMP);
202
203 t1reg = _vuGetTempXMMreg(info);
204 if( t1reg < 0 ) {
205 for (t1reg = 0; ( (t1reg == EEREC_TEMP) || (t1reg == EEREC_S) ); t1reg++)
206 ; // Makes t1reg not be EEREC_TEMP or EEREC_S.
207 SSE_MOVAPS_XMM_to_M128( (uptr)&RSQRT_TEMP_XMM[0], t1reg ); // backup data in t1reg to a temp address
208 t1boolean = 1;
209 }
210 else t1boolean = 0;
211
212 // Ft can still be zero here! so we need to check if its zero and set the correct flag.
213 SSE_XORPS_XMM_to_XMM(t1reg, t1reg); // Clear t1reg
214 SSE_CMPEQSS_XMM_to_XMM(t1reg, EEREC_TEMP); // Set all F's if each vector is zero
215
216 SSE_MOVMSKPS_XMM_to_R32(EAX, t1reg); // Move the sign bits of the previous calculation
217
218 AND32ItoR( EAX, 0x01 ); // Grab "Is Zero" bits from the previous calculation
219 ajmp8 = JZ8(0); // Skip if none are
220
221 //check for 0/0
222 _unpackVFSS_xyzw(EEREC_TEMP, EEREC_S, _Fsf_);
223
224 SSE_XORPS_XMM_to_XMM(t1reg, t1reg); // Clear EEREC_TEMP
225 SSE_CMPEQPS_XMM_to_XMM(t1reg, EEREC_TEMP); // Set all F's if each vector is zero
226 SSE_MOVMSKPS_XMM_to_R32(EAX, t1reg); // Move the sign bits of the previous calculation
227
228 AND32ItoR( EAX, 0x01 ); // Grab "Is Zero" bits from the previous calculation
229 qjmp1 = JZ8(0);
230 OR32ItoM( VU_VI_ADDR(REG_STATUS_FLAG, 2), 0x410 ); // Set invalid flag (0/0)
231 qjmp2 = JMP8(0);
232 x86SetJ8(qjmp1);
233 OR32ItoM( VU_VI_ADDR(REG_STATUS_FLAG, 2), 0x820 ); // Zero divide (only when not 0/0)
234 x86SetJ8(qjmp2);
235
236 SSE_ANDPS_M128_to_XMM(EEREC_TEMP, (uptr)&const_clip[4]);
237 SSE_ORPS_M128_to_XMM(EEREC_TEMP, (uptr)&g_maxvals[0]); // If division by zero, then EEREC_TEMP = +/- fmax
238 SSE_MOVSS_XMM_to_M32(VU_VI_ADDR(REG_Q, 0), EEREC_TEMP);
239 bjmp8 = JMP8(0);
240 x86SetJ8(ajmp8);
241
242 _unpackVFSS_xyzw(t1reg, EEREC_S, _Fsf_);
243 if (CHECK_VU_EXTRA_OVERFLOW) vuFloat_useEAX(info, t1reg, 0x8); // Clamp Infinities
244 SSE_DIVSS_XMM_to_XMM(t1reg, EEREC_TEMP);
245 vuFloat_useEAX(info, t1reg, 0x8);
246 SSE_MOVSS_XMM_to_M32(VU_VI_ADDR(REG_Q, 0), t1reg);
247
248 x86SetJ8(bjmp8);
249
250 if (t1boolean) SSE_MOVAPS_M128_to_XMM( t1reg, (uptr)&RSQRT_TEMP_XMM[0] ); // restore t1reg data
251 else _freeXMMreg(t1reg); // free t1reg
252 }
253 //------------------------------------------------------------------
254
255
256 //------------------------------------------------------------------
257 // _addISIMMtoIT() - Used in IADDI, IADDIU, and ISUBIU instructions
258 //------------------------------------------------------------------
259 void _addISIMMtoIT(VURegs *VU, s16 imm, int info)
260 {
261 int isreg = -1, itreg;
262 if (_It_ == 0) return;
263
264 if( _Is_ == 0 ) {
265 itreg = ALLOCVI(_It_, MODE_WRITE);
266 MOV32ItoR(itreg, imm&0xffff);
267 return;
268 }
269
270 ADD_VI_NEEDED(_It_);
271 isreg = ALLOCVI(_Is_, MODE_READ);
272 itreg = ALLOCVI(_It_, MODE_WRITE);
273
274 if ( _It_ == _Is_ ) {
275 if (imm != 0 ) ADD16ItoR(itreg, imm);
276 }
277 else {
278 if( imm ) {
279 LEA32RtoR(itreg, isreg, imm);
280 MOVZX32R16toR(itreg, itreg);
281 }
282 else MOV32RtoR(itreg, isreg);
283 }
284 }
285 //------------------------------------------------------------------
286
287
288 //------------------------------------------------------------------
289 // IADDI
290 //------------------------------------------------------------------
291 void recVUMI_IADDI(VURegs *VU, int info)
292 {
293 s16 imm;
294
295 if ( _It_ == 0 ) return;
296 //Console.WriteLn("recVUMI_IADDI");
297 imm = ( VU->code >> 6 ) & 0x1f;
298 imm = ( imm & 0x10 ? 0xfff0 : 0) | ( imm & 0xf );
299 _addISIMMtoIT(VU, imm, info);
300 }
301 //------------------------------------------------------------------
302
303
304 //------------------------------------------------------------------
305 // IADDIU
306 //------------------------------------------------------------------
307 void recVUMI_IADDIU(VURegs *VU, int info)
308 {
309 s16 imm;
310
311 if ( _It_ == 0 ) return;
312 //Console.WriteLn("recVUMI_IADDIU");
313 imm = ( ( VU->code >> 10 ) & 0x7800 ) | ( VU->code & 0x7ff );
314 _addISIMMtoIT(VU, imm, info);
315 }
316 //------------------------------------------------------------------
317
318
319 //------------------------------------------------------------------
320 // IADD
321 //------------------------------------------------------------------
322 void recVUMI_IADD( VURegs *VU, int info )
323 {
324 int idreg, isreg = -1, itreg = -1;
325 if ( _Id_ == 0 ) return;
326 //Console.WriteLn("recVUMI_IADD");
327 if ( ( _It_ == 0 ) && ( _Is_ == 0 ) ) {
328 idreg = ALLOCVI(_Id_, MODE_WRITE);
329 XOR32RtoR(idreg, idreg);
330 return;
331 }
332
333 ADD_VI_NEEDED(_Is_);
334 ADD_VI_NEEDED(_It_);
335 idreg = ALLOCVI(_Id_, MODE_WRITE);
336
337 if ( _Is_ == 0 )
338 {
339 if( (itreg = _checkX86reg(X86TYPE_VI|((VU==&VU1)?X86TYPE_VU1:0), _It_, MODE_READ)) >= 0 ) {
340 if( idreg != itreg ) MOV32RtoR(idreg, itreg);
341 }
342 else MOVZX32M16toR(idreg, VU_VI_ADDR(_It_, 1));
343 }
344 else if ( _It_ == 0 )
345 {
346 if( (isreg = _checkX86reg(X86TYPE_VI|((VU==&VU1)?X86TYPE_VU1:0), _Is_, MODE_READ)) >= 0 ) {
347 if( idreg != isreg ) MOV32RtoR(idreg, isreg);
348 }
349 else MOVZX32M16toR(idreg, VU_VI_ADDR(_Is_, 1));
350 }
351 else {
352 //ADD_VI_NEEDED(_It_);
353 isreg = ALLOCVI(_Is_, MODE_READ);
354 itreg = ALLOCVI(_It_, MODE_READ);
355
356 if( idreg == isreg ) ADD32RtoR(idreg, itreg);
357 else if( idreg == itreg ) ADD32RtoR(idreg, isreg);
358 else LEA32RRtoR(idreg, isreg, itreg);
359 MOVZX32R16toR(idreg, idreg); // needed since don't know if idreg's upper bits are 0
360 }
361 }
362 //------------------------------------------------------------------
363
364
365 //------------------------------------------------------------------
366 // IAND
367 //------------------------------------------------------------------
368 void recVUMI_IAND( VURegs *VU, int info )
369 {
370 int idreg, isreg = -1, itreg = -1;
371 if ( _Id_ == 0 ) return;
372 //Console.WriteLn("recVUMI_IAND");
373 if ( ( _Is_ == 0 ) || ( _It_ == 0 ) ) {
374 idreg = ALLOCVI(_Id_, MODE_WRITE);
375 XOR32RtoR(idreg, idreg);
376 return;
377 }
378
379 ADD_VI_NEEDED(_Is_);
380 ADD_VI_NEEDED(_It_);
381 idreg = ALLOCVI(_Id_, MODE_WRITE);
382
383 isreg = ALLOCVI(_Is_, MODE_READ);
384 itreg = ALLOCVI(_It_, MODE_READ);
385
386 if( idreg == isreg ) AND16RtoR(idreg, itreg);
387 else if( idreg == itreg ) AND16RtoR(idreg, isreg);
388 else {
389 MOV32RtoR(idreg, itreg);
390 AND32RtoR(idreg, isreg);
391 }
392 }
393 //------------------------------------------------------------------
394
395
396 //------------------------------------------------------------------
397 // IOR
398 //------------------------------------------------------------------
399 void recVUMI_IOR( VURegs *VU, int info )
400 {
401 int idreg, isreg = -1, itreg = -1;
402 if ( _Id_ == 0 ) return;
403 //Console.WriteLn("recVUMI_IOR");
404 if ( ( _It_ == 0 ) && ( _Is_ == 0 ) ) {
405 idreg = ALLOCVI(_Id_, MODE_WRITE);
406 XOR32RtoR(idreg, idreg);
407 return;
408 }
409
410 ADD_VI_NEEDED(_Is_);
411 ADD_VI_NEEDED(_It_);
412 idreg = ALLOCVI(_Id_, MODE_WRITE);
413
414 if ( _Is_ == 0 )
415 {
416 if( (itreg = _checkX86reg(X86TYPE_VI|((VU==&VU1)?X86TYPE_VU1:0), _It_, MODE_READ)) >= 0 ) {
417 if( idreg != itreg ) MOV32RtoR(idreg, itreg);
418 }
419 else MOVZX32M16toR(idreg, VU_VI_ADDR(_It_, 1));
420 }
421 else if ( _It_ == 0 )
422 {
423 if( (isreg = _checkX86reg(X86TYPE_VI|((VU==&VU1)?X86TYPE_VU1:0), _Is_, MODE_READ)) >= 0 ) {
424 if( idreg != isreg ) MOV32RtoR(idreg, isreg);
425 }
426 else MOVZX32M16toR(idreg, VU_VI_ADDR(_Is_, 1));
427 }
428 else
429 {
430 isreg = ALLOCVI(_Is_, MODE_READ);
431 itreg = ALLOCVI(_It_, MODE_READ);
432
433 if( idreg == isreg ) OR16RtoR(idreg, itreg);
434 else if( idreg == itreg ) OR16RtoR(idreg, isreg);
435 else {
436 MOV32RtoR(idreg, isreg);
437 OR32RtoR(idreg, itreg);
438 }
439 }
440 }
441 //------------------------------------------------------------------
442
443
444 //------------------------------------------------------------------
445 // ISUB
446 //------------------------------------------------------------------
447 void recVUMI_ISUB( VURegs *VU, int info )
448 {
449 int idreg, isreg = -1, itreg = -1;
450 if ( _Id_ == 0 ) return;
451 //Console.WriteLn("recVUMI_ISUB");
452 if ( ( _It_ == 0 ) && ( _Is_ == 0 ) ) {
453 idreg = ALLOCVI(_Id_, MODE_WRITE);
454 XOR32RtoR(idreg, idreg);
455 return;
456 }
457
458 ADD_VI_NEEDED(_Is_);
459 ADD_VI_NEEDED(_It_);
460 idreg = ALLOCVI(_Id_, MODE_WRITE);
461
462 if ( _Is_ == 0 )
463 {
464 if( (itreg = _checkX86reg(X86TYPE_VI|((VU==&VU1)?X86TYPE_VU1:0), _It_, MODE_READ)) >= 0 ) {
465 if( idreg != itreg ) MOV32RtoR(idreg, itreg);
466 }
467 else MOVZX32M16toR(idreg, VU_VI_ADDR(_It_, 1));
468 NEG16R(idreg);
469 }
470 else if ( _It_ == 0 )
471 {
472 if( (isreg = _checkX86reg(X86TYPE_VI|((VU==&VU1)?X86TYPE_VU1:0), _Is_, MODE_READ)) >= 0 ) {
473 if( idreg != isreg ) MOV32RtoR(idreg, isreg);
474 }
475 else MOVZX32M16toR(idreg, VU_VI_ADDR(_Is_, 1));
476 }
477 else
478 {
479 isreg = ALLOCVI(_Is_, MODE_READ);
480 itreg = ALLOCVI(_It_, MODE_READ);
481
482 if( idreg == isreg ) SUB16RtoR(idreg, itreg);
483 else if( idreg == itreg ) {
484 SUB16RtoR(idreg, isreg);
485 NEG16R(idreg);
486 }
487 else {
488 MOV32RtoR(idreg, isreg);
489 SUB16RtoR(idreg, itreg);
490 }
491 }
492 }
493 //------------------------------------------------------------------
494
495 //------------------------------------------------------------------
496 // ISUBIU
497 //------------------------------------------------------------------
498 void recVUMI_ISUBIU( VURegs *VU, int info )
499 {
500 s16 imm;
501
502 if ( _It_ == 0 ) return;
503 //Console.WriteLn("recVUMI_ISUBIU");
504 imm = ( ( VU->code >> 10 ) & 0x7800 ) | ( VU->code & 0x7ff );
505 imm = -imm;
506 _addISIMMtoIT(VU, imm, info);
507 }
508 //------------------------------------------------------------------
509
510
511 //------------------------------------------------------------------
512 // MOVE*
513 //------------------------------------------------------------------
514 void recVUMI_MOVE( VURegs *VU, int info )
515 {
516 if ( (_Ft_ == 0) || (_X_Y_Z_W == 0) ) return;
517 //Console.WriteLn("recVUMI_MOVE");
518 if (_X_Y_Z_W == 0x8) SSE_MOVSS_XMM_to_XMM(EEREC_T, EEREC_S);
519 else if (_X_Y_Z_W == 0xf) SSE_MOVAPS_XMM_to_XMM(EEREC_T, EEREC_S);
520 else {
521 SSE_MOVAPS_XMM_to_XMM(EEREC_TEMP, EEREC_S);
522 VU_MERGE_REGS(EEREC_T, EEREC_TEMP);
523 }
524 }
525 //------------------------------------------------------------------
526
527
528 //------------------------------------------------------------------
529 // MFIR*
530 //------------------------------------------------------------------
531 void recVUMI_MFIR( VURegs *VU, int info )
532 {
533 if ( (_Ft_ == 0) || (_X_Y_Z_W == 0) ) return;
534 //Console.WriteLn("recVUMI_MFIR");
535 _deleteX86reg(X86TYPE_VI|((VU==&VU1)?X86TYPE_VU1:0), _Is_, 1);
536
537 if( _XYZW_SS ) {
538 SSE2_MOVD_M32_to_XMM(EEREC_TEMP, VU_VI_ADDR(_Is_, 1)-2);
539 _vuFlipRegSS(VU, EEREC_T);
540 SSE2_PSRAD_I8_to_XMM(EEREC_TEMP, 16);
541 SSE_MOVSS_XMM_to_XMM(EEREC_T, EEREC_TEMP);
542 _vuFlipRegSS(VU, EEREC_T);
543 }
544 else if (_X_Y_Z_W != 0xf) {
545 SSE2_MOVD_M32_to_XMM(EEREC_TEMP, VU_VI_ADDR(_Is_, 1)-2);
546 SSE2_PSRAD_I8_to_XMM(EEREC_TEMP, 16);
547 SSE_SHUFPS_XMM_to_XMM(EEREC_TEMP, EEREC_TEMP, 0);
548 VU_MERGE_REGS(EEREC_T, EEREC_TEMP);
549 }
550 else {
551 SSE2_MOVD_M32_to_XMM(EEREC_T, VU_VI_ADDR(_Is_, 1)-2);
552 SSE2_PSRAD_I8_to_XMM(EEREC_T, 16);
553 SSE_SHUFPS_XMM_to_XMM(EEREC_T, EEREC_T, 0);
554 }
555 }
556 //------------------------------------------------------------------
557
558
559 //------------------------------------------------------------------
560 // MTIR*
561 //------------------------------------------------------------------
562 void recVUMI_MTIR( VURegs *VU, int info )
563 {
564 if ( _It_ == 0 ) return;
565 //Console.WriteLn("recVUMI_MTIR");
566 _deleteX86reg(X86TYPE_VI|((VU==&VU1)?X86TYPE_VU1:0), _It_, 2);
567
568 if( _Fsf_ == 0 ) {
569 SSE_MOVSS_XMM_to_M32(VU_VI_ADDR(_It_, 0), EEREC_S);
570 }
571 else {
572 _unpackVFSS_xyzw(EEREC_TEMP, EEREC_S, _Fsf_);
573 SSE_MOVSS_XMM_to_M32(VU_VI_ADDR(_It_, 0), EEREC_TEMP);
574 }
575
576 AND32ItoM(VU_VI_ADDR(_It_, 0), 0xffff);
577 }
578 //------------------------------------------------------------------
579
580
581 //------------------------------------------------------------------
582 // MR32*
583 //------------------------------------------------------------------
584 void recVUMI_MR32( VURegs *VU, int info )
585 {
586 if ( (_Ft_ == 0) || (_X_Y_Z_W == 0) ) return;
587 //Console.WriteLn("recVUMI_MR32");
588 if (_X_Y_Z_W != 0xf) {
589 SSE_MOVAPS_XMM_to_XMM(EEREC_TEMP, EEREC_S);
590 SSE_SHUFPS_XMM_to_XMM(EEREC_TEMP, EEREC_TEMP, 0x39);
591 VU_MERGE_REGS(EEREC_T, EEREC_TEMP);
592 }
593 else {
594 SSE_MOVAPS_XMM_to_XMM(EEREC_T, EEREC_S);
595 SSE_SHUFPS_XMM_to_XMM(EEREC_T, EEREC_T, 0x39);
596 }
597 }
598 //------------------------------------------------------------------
599
600
601 //------------------------------------------------------------------
602 // _loadEAX()
603 //
604 // NOTE: If x86reg < 0, reads directly from offset
605 //------------------------------------------------------------------
606 void _loadEAX(VURegs *VU, int x86reg, uptr offset, int info)
607 {
608 pxAssert( offset < 0x80000000 );
609
610 if( x86reg >= 0 ) {
611 switch(_X_Y_Z_W) {
612 case 3: // ZW
613 SSE_MOVHPS_Rm_to_XMM(EEREC_T, x86reg, offset+8);
614 break;
615 case 6: // YZ
616 SSE_SHUFPS_Rm_to_XMM(EEREC_T, x86reg, offset, 0x9c);
617 SSE_SHUFPS_XMM_to_XMM(EEREC_T, EEREC_T, 0x78);
618 break;
619
620 case 8: // X
621 SSE_MOVSS_Rm_to_XMM(EEREC_TEMP, x86reg, offset);
622 SSE_MOVSS_XMM_to_XMM(EEREC_T, EEREC_TEMP);
623 break;
624 case 9: // XW
625 SSE_SHUFPS_Rm_to_XMM(EEREC_T, x86reg, offset, 0xc9);
626 SSE_SHUFPS_XMM_to_XMM(EEREC_T, EEREC_T, 0xd2);
627 break;
628 case 12: // XY
629 SSE_MOVLPS_Rm_to_XMM(EEREC_T, x86reg, offset);
630 break;
631 case 15:
632 if( VU == &VU1 ) SSE_MOVAPSRmtoR(EEREC_T, x86reg, offset);
633 else SSE_MOVUPSRmtoR(EEREC_T, x86reg, offset);
634 break;
635 default:
636 if( VU == &VU1 ) SSE_MOVAPSRmtoR(EEREC_TEMP, x86reg, offset);
637 else SSE_MOVUPSRmtoR(EEREC_TEMP, x86reg, offset);
638
639 VU_MERGE_REGS(EEREC_T, EEREC_TEMP);
640 break;
641 }
642 }
643 else {
644 switch(_X_Y_Z_W) {
645 case 3: // ZW
646 SSE_MOVHPS_M64_to_XMM(EEREC_T, offset+8);
647 break;
648 case 6: // YZ
649 SSE_SHUFPS_M128_to_XMM(EEREC_T, offset, 0x9c);
650 SSE_SHUFPS_XMM_to_XMM(EEREC_T, EEREC_T, 0x78);
651 break;
652 case 8: // X
653 SSE_MOVSS_M32_to_XMM(EEREC_TEMP, offset);
654 SSE_MOVSS_XMM_to_XMM(EEREC_T, EEREC_TEMP);
655 break;
656 case 9: // XW
657 SSE_SHUFPS_M128_to_XMM(EEREC_T, offset, 0xc9);
658 SSE_SHUFPS_XMM_to_XMM(EEREC_T, EEREC_T, 0xd2);
659 break;
660 case 12: // XY
661 SSE_MOVLPS_M64_to_XMM(EEREC_T, offset);
662 break;
663 case 15:
664 if( VU == &VU1 ) SSE_MOVAPS_M128_to_XMM(EEREC_T, offset);
665 else SSE_MOVUPS_M128_to_XMM(EEREC_T, offset);
666 break;
667 default:
668 if( VU == &VU1 ) SSE_MOVAPS_M128_to_XMM(EEREC_TEMP, offset);
669 else SSE_MOVUPS_M128_to_XMM(EEREC_TEMP, offset);
670 VU_MERGE_REGS(EEREC_T, EEREC_TEMP);
671 break;
672 }
673 }
674 }
675 //------------------------------------------------------------------
676
677
678 //------------------------------------------------------------------
679 // recVUTransformAddr()
680 //------------------------------------------------------------------
681 int recVUTransformAddr(int x86reg, VURegs* VU, int vireg, int imm)
682 {
683 if( x86reg == EAX ) {
684 if (imm) ADD32ItoR(x86reg, imm);
685 }
686 else {
687 if( imm ) LEA32RtoR(EAX, x86reg, imm);
688 else MOV32RtoR(EAX, x86reg);
689 }
690
691 if( VU == &VU1 ) {
692 AND32ItoR(EAX, 0x3ff); // wrap around
693 SHL32ItoR(EAX, 4);
694 }
695 else {
696
697 // VU0 has a somewhat interesting memory mapping:
698 // if addr & 0x4000, reads VU1's VF regs and VI regs
699 // otherwise, wrap around at 0x1000
700
701 xTEST(eax, 0x400);
702 xForwardJNZ8 vu1regs; // if addr & 0x4000, reads VU1's VF regs and VI regs
703 xAND(eax, 0xff); // if !(addr & 0x4000), wrap around
704 xForwardJump8 done;
705 vu1regs.SetTarget();
706 xAND(eax, 0x3f);
707 xADD(eax, (u128*)VU1.VF - (u128*)VU0.Mem);
708 done.SetTarget();
709
710 SHL32ItoR(EAX, 4); // multiply by 16 (shift left by 4)
711 }
712
713 return EAX;
714 }
715 //------------------------------------------------------------------
716
717
718 //------------------------------------------------------------------
719 // LQ
720 //------------------------------------------------------------------
721 void recVUMI_LQ(VURegs *VU, int info)
722 {
723 s16 imm;
724 if ( _Ft_ == 0 ) return;
725 //Console.WriteLn("recVUMI_LQ");
726 imm = (VU->code & 0x400) ? (VU->code & 0x3ff) | 0xfc00 : (VU->code & 0x3ff);
727 if (_Is_ == 0) {
728 _loadEAX(VU, -1, (uptr)GET_VU_MEM(VU, (u32)imm*16), info);
729 }
730 else {
731 int isreg = ALLOCVI(_Is_, MODE_READ);
732 _loadEAX(VU, recVUTransformAddr(isreg, VU, _Is_, imm), (uptr)VU->Mem, info);
733 }
734 }
735 //------------------------------------------------------------------
736
737
738 //------------------------------------------------------------------
739 // LQD
740 //------------------------------------------------------------------
741 void recVUMI_LQD( VURegs *VU, int info )
742 {
743 int isreg;
744 //Console.WriteLn("recVUMI_LQD");
745 if ( _Is_ != 0 ) {
746 isreg = ALLOCVI(_Is_, MODE_READ|MODE_WRITE);
747 SUB16ItoR( isreg, 1 );
748 }
749
750 if ( _Ft_ == 0 ) return;
751
752 if ( _Is_ == 0 ) _loadEAX(VU, -1, (uptr)VU->Mem, info);
753 else _loadEAX(VU, recVUTransformAddr(isreg, VU, _Is_, 0), (uptr)VU->Mem, info);
754 }
755 //------------------------------------------------------------------
756
757
758 //------------------------------------------------------------------
759 // LQI
760 //------------------------------------------------------------------
761 void recVUMI_LQI(VURegs *VU, int info)
762 {
763 int isreg;
764 //Console.WriteLn("recVUMI_LQI");
765 if ( _Ft_ == 0 ) {
766 if( _Is_ != 0 ) {
767 if( (isreg = _checkX86reg(X86TYPE_VI|(VU==&VU1?X86TYPE_VU1:0), _Is_, MODE_WRITE|MODE_READ)) >= 0 ) {
768 ADD16ItoR(isreg, 1);
769 }
770 else {
771 ADD16ItoM( VU_VI_ADDR( _Is_, 0 ), 1 );
772 }
773 }
774 return;
775 }
776
777 if (_Is_ == 0) {
778 _loadEAX(VU, -1, (uptr)VU->Mem, info);
779 }
780 else {
781 isreg = ALLOCVI(_Is_, MODE_READ|MODE_WRITE);
782 _loadEAX(VU, recVUTransformAddr(isreg, VU, _Is_, 0), (uptr)VU->Mem, info);
783 ADD16ItoR( isreg, 1 );
784 }
785 }
786 //------------------------------------------------------------------
787
788
789 //------------------------------------------------------------------
790 // _saveEAX()
791 //------------------------------------------------------------------
792 void _saveEAX(VURegs *VU, int x86reg, uptr offset, int info)
793 {
794 pxAssert( offset < 0x80000000 );
795
796 if ( _Fs_ == 0 ) {
797 if ( _XYZW_SS ) {
798 u32 c = _W ? 0x3f800000 : 0;
799 if ( x86reg >= 0 ) MOV32ItoRm(x86reg, c, offset+(_W?12:(_Z?8:(_Y?4:0))));
800 else MOV32ItoM(offset+(_W?12:(_Z?8:(_Y?4:0))), c);
801 }
802 else {
803
804 // (this is one of my test cases for the new emitter --air)
805 using namespace x86Emitter;
806 xAddressVoid indexer( offset );
807 if( x86reg != -1 ) indexer.Add( xAddressReg( x86reg ) );
808
809 if ( _X ) xMOV(ptr32[indexer], 0x00000000);
810 if ( _Y ) xMOV(ptr32[indexer+4], 0x00000000);
811 if ( _Z ) xMOV(ptr32[indexer+8], 0x00000000);
812 if ( _W ) xMOV(ptr32[indexer+12], 0x3f800000);
813 }
814 return;
815 }
816
817 switch ( _X_Y_Z_W ) {
818 case 1: // W
819 SSE2_PSHUFD_XMM_to_XMM(EEREC_TEMP, EEREC_S, 0x27);
820 if ( x86reg >= 0 ) SSE_MOVSS_XMM_to_Rm(x86reg, EEREC_TEMP, offset+12);
821 else SSE_MOVSS_XMM_to_M32(offset+12, EEREC_TEMP);
822 break;
823 case 2: // Z
824 SSE_MOVHLPS_XMM_to_XMM(EEREC_TEMP, EEREC_S);
825 if ( x86reg >= 0 ) SSE_MOVSS_XMM_to_Rm(x86reg, EEREC_TEMP, offset+8);
826 else SSE_MOVSS_XMM_to_M32(offset+8, EEREC_TEMP);
827 break;
828 case 3: // ZW
829 if ( x86reg >= 0 ) SSE_MOVHPS_XMM_to_Rm(x86reg, EEREC_S, offset+8);
830 else SSE_MOVHPS_XMM_to_M64(offset+8, EEREC_S);
831 break;
832 case 4: // Y
833 SSE2_PSHUFLW_XMM_to_XMM(EEREC_TEMP, EEREC_S, 0x4e);
834 if ( x86reg >= 0 ) SSE_MOVSS_XMM_to_Rm(x86reg, EEREC_TEMP, offset+4);
835 else SSE_MOVSS_XMM_to_M32(offset+4, EEREC_TEMP);
836 break;
837 case 5: // YW
838 SSE_SHUFPS_XMM_to_XMM(EEREC_S, EEREC_S, 0xB1);
839 SSE_MOVHLPS_XMM_to_XMM(EEREC_TEMP, EEREC_S);
840 if ( x86reg >= 0 ) {
841 SSE_MOVSS_XMM_to_Rm(x86reg, EEREC_S, offset+4);
842 SSE_MOVSS_XMM_to_Rm(x86reg, EEREC_TEMP, offset+12);
843 }
844 else {
845 SSE_MOVSS_XMM_to_M32(offset+4, EEREC_S);
846 SSE_MOVSS_XMM_to_M32(offset+12, EEREC_TEMP);
847 }
848 SSE_SHUFPS_XMM_to_XMM(EEREC_S, EEREC_S, 0xB1);
849 break;
850 case 6: // YZ
851 SSE2_PSHUFD_XMM_to_XMM(EEREC_TEMP, EEREC_S, 0xc9);
852 if ( x86reg >= 0 ) SSE_MOVLPS_XMM_to_Rm(x86reg, EEREC_TEMP, offset+4);
853 else SSE_MOVLPS_XMM_to_M64(offset+4, EEREC_TEMP);
854 break;
855 case 7: // YZW
856 SSE2_PSHUFD_XMM_to_XMM(EEREC_TEMP, EEREC_S, 0x93); //ZYXW
857 if ( x86reg >= 0 ) {
858 SSE_MOVHPS_XMM_to_Rm(x86reg, EEREC_TEMP, offset+4);
859 SSE_MOVSS_XMM_to_Rm(x86reg, EEREC_TEMP, offset+12);
860 }
861 else {
862 SSE_MOVHPS_XMM_to_M64(offset+4, EEREC_TEMP);
863 SSE_MOVSS_XMM_to_M32(offset+12, EEREC_TEMP);
864 }
865 break;
866 case 8: // X
867 if ( x86reg >= 0 ) SSE_MOVSS_XMM_to_Rm(x86reg, EEREC_S, offset);
868 else SSE_MOVSS_XMM_to_M32(offset, EEREC_S);
869 break;
870 case 9: // XW
871 if ( x86reg >= 0 ) SSE_MOVSS_XMM_to_Rm(x86reg, EEREC_S, offset);
872 else SSE_MOVSS_XMM_to_M32(offset, EEREC_S);
873
874 SSE2_PSHUFD_XMM_to_XMM(EEREC_TEMP, EEREC_S, 0xff); //WWWW
875
876 if ( x86reg >= 0 ) SSE_MOVSS_XMM_to_Rm(x86reg, EEREC_TEMP, offset+12);
877 else SSE_MOVSS_XMM_to_M32(offset+12, EEREC_TEMP);
878
879 break;
880 case 10: //XZ
881 SSE_MOVHLPS_XMM_to_XMM(EEREC_TEMP, EEREC_S);
882 if ( x86reg >= 0 ) {
883 SSE_MOVSS_XMM_to_Rm(x86reg, EEREC_S, offset);
884 SSE_MOVSS_XMM_to_Rm(x86reg, EEREC_TEMP, offset+8);
885 }
886 else {
887 SSE_MOVSS_XMM_to_M32(offset, EEREC_S);
888 SSE_MOVSS_XMM_to_M32(offset+8, EEREC_TEMP);
889 }
890 break;
891 case 11: //XZW
892 if ( x86reg >= 0 ) {
893 SSE_MOVSS_XMM_to_Rm(x86reg, EEREC_S, offset);
894 SSE_MOVHPS_XMM_to_Rm(x86reg, EEREC_S, offset+8);
895 }
896 else {
897 SSE_MOVSS_XMM_to_M32(offset, EEREC_S);
898 SSE_MOVHPS_XMM_to_M64(offset+8, EEREC_S);
899 }
900 break;
901 case 12: // XY
902 if ( x86reg >= 0 ) SSE_MOVLPS_XMM_to_Rm(x86reg, EEREC_S, offset+0);
903 else SSE_MOVLPS_XMM_to_M64(offset, EEREC_S);
904 break;
905 case 13: // XYW
906 SSE2_PSHUFD_XMM_to_XMM(EEREC_TEMP, EEREC_S, 0x4b); //YXZW
907 if ( x86reg >= 0 ) {
908 SSE_MOVHPS_XMM_to_Rm(x86reg, EEREC_TEMP, offset+0);
909 SSE_MOVSS_XMM_to_Rm(x86reg, EEREC_TEMP, offset+12);
910 }
911 else {
912 SSE_MOVHPS_XMM_to_M64(offset, EEREC_TEMP);
913 SSE_MOVSS_XMM_to_M32(offset+12, EEREC_TEMP);
914 }
915 break;
916 case 14: // XYZ
917 SSE_MOVHLPS_XMM_to_XMM(EEREC_TEMP, EEREC_S);
918 if ( x86reg >= 0 ) {
919 SSE_MOVLPS_XMM_to_Rm(x86reg, EEREC_S, offset+0);
920 SSE_MOVSS_XMM_to_Rm(x86reg, EEREC_TEMP, offset+8);
921 }
922 else {
923 SSE_MOVLPS_XMM_to_M64(offset, EEREC_S);
924 SSE_MOVSS_XMM_to_M32(offset+8, EEREC_TEMP);
925 }
926 break;
927 case 15: // XYZW
928 if ( VU == &VU1 ) {
929 if( x86reg >= 0 ) SSE_MOVAPSRtoRm(x86reg, EEREC_S, offset+0);
930 else SSE_MOVAPS_XMM_to_M128(offset, EEREC_S);
931 }
932 else {
933 if( x86reg >= 0 ) SSE_MOVUPSRtoRm(x86reg, EEREC_S, offset+0);
934 else {
935 if( offset & 15 ) SSE_MOVUPS_XMM_to_M128(offset, EEREC_S);
936 else SSE_MOVAPS_XMM_to_M128(offset, EEREC_S);
937 }
938 }
939 break;
940 }
941 }
942 //------------------------------------------------------------------
943
944
945 //------------------------------------------------------------------
946 // SQ
947 //------------------------------------------------------------------
948 void recVUMI_SQ(VURegs *VU, int info)
949 {
950 s16 imm;
951 //Console.WriteLn("recVUMI_SQ");
952 imm = ( VU->code & 0x400) ? ( VU->code & 0x3ff) | 0xfc00 : ( VU->code & 0x3ff);
953 if ( _It_ == 0 ) _saveEAX(VU, -1, (uptr)GET_VU_MEM(VU, (int)imm * 16), info);
954 else {
955 int itreg = ALLOCVI(_It_, MODE_READ);
956 _saveEAX(VU, recVUTransformAddr(itreg, VU, _It_, imm), (uptr)VU->Mem, info);
957 }
958 }
959 //------------------------------------------------------------------
960
961
962 //------------------------------------------------------------------
963 // SQD
964 //------------------------------------------------------------------
965 void recVUMI_SQD(VURegs *VU, int info)
966 {
967 //Console.WriteLn("recVUMI_SQD");
968 if (_It_ == 0) _saveEAX(VU, -1, (uptr)VU->Mem, info);
969 else {
970 int itreg = ALLOCVI(_It_, MODE_READ|MODE_WRITE);
971 SUB16ItoR( itreg, 1 );
972 _saveEAX(VU, recVUTransformAddr(itreg, VU, _It_, 0), (uptr)VU->Mem, info);
973 }
974 }
975 //------------------------------------------------------------------
976
977
978 //------------------------------------------------------------------
979 // SQI
980 //------------------------------------------------------------------
981 void recVUMI_SQI(VURegs *VU, int info)
982 {
983 //Console.WriteLn("recVUMI_SQI");
984 if (_It_ == 0) _saveEAX(VU, -1, (uptr)VU->Mem, info);
985 else {
986 int itreg = ALLOCVI(_It_, MODE_READ|MODE_WRITE);
987 _saveEAX(VU, recVUTransformAddr(itreg, VU, _It_, 0), (uptr)VU->Mem, info);
988 ADD16ItoR( itreg, 1 );
989 }
990 }
991 //------------------------------------------------------------------
992
993
994 //------------------------------------------------------------------
995 // ILW
996 //------------------------------------------------------------------
997 void recVUMI_ILW(VURegs *VU, int info)
998 {
999 int itreg;
1000 s16 imm, off;
1001
1002 if ( ( _It_ == 0 ) || ( _X_Y_Z_W == 0 ) ) return;
1003 //Console.WriteLn("recVUMI_ILW");
1004 imm = ( VU->code & 0x400) ? ( VU->code & 0x3ff) | 0xfc00 : ( VU->code & 0x3ff);
1005 if (_X) off = 0;
1006 else if (_Y) off = 4;
1007 else if (_Z) off = 8;
1008 else if (_W) off = 12;
1009
1010 ADD_VI_NEEDED(_Is_);
1011 itreg = ALLOCVI(_It_, MODE_WRITE);
1012
1013 if ( _Is_ == 0 ) {
1014 MOVZX32M16toR( itreg, (uptr)GET_VU_MEM(VU, (int)imm * 16 + off) );
1015 }
1016 else {
1017 int isreg = ALLOCVI(_Is_, MODE_READ);
1018 MOV32RmtoR(itreg, recVUTransformAddr(isreg, VU, _Is_, imm), (uptr)VU->Mem + off);
1019 }
1020 }
1021 //------------------------------------------------------------------
1022
1023
1024 //------------------------------------------------------------------
1025 // ISW
1026 //------------------------------------------------------------------
1027 void recVUMI_ISW( VURegs *VU, int info )
1028 {
1029 s16 imm;
1030 //Console.WriteLn("recVUMI_ISW");
1031 imm = ( VU->code & 0x400) ? ( VU->code & 0x3ff) | 0xfc00 : ( VU->code & 0x3ff);
1032
1033 if (_Is_ == 0) {
1034 uptr off = (uptr)GET_VU_MEM(VU, (int)imm * 16);
1035 int itreg = ALLOCVI(_It_, MODE_READ);
1036
1037 if (_X) MOV32RtoM(off, itreg);
1038 if (_Y) MOV32RtoM(off+4, itreg);
1039 if (_Z) MOV32RtoM(off+8, itreg);
1040 if (_W) MOV32RtoM(off+12, itreg);
1041 }
1042 else {
1043 int x86reg, isreg, itreg;
1044
1045 ADD_VI_NEEDED(_It_);
1046 isreg = ALLOCVI(_Is_, MODE_READ);
1047 itreg = ALLOCVI(_It_, MODE_READ);
1048
1049 x86reg = recVUTransformAddr(isreg, VU, _Is_, imm);
1050
1051 if (_X) MOV32RtoRm(x86reg, itreg, (uptr)VU->Mem);
1052 if (_Y) MOV32RtoRm(x86reg, itreg, (uptr)VU->Mem+4);
1053 if (_Z) MOV32RtoRm(x86reg, itreg, (uptr)VU->Mem+8);
1054 if (_W) MOV32RtoRm(x86reg, itreg, (uptr)VU->Mem+12);
1055 }
1056 }
1057 //------------------------------------------------------------------
1058
1059
1060 //------------------------------------------------------------------
1061 // ILWR
1062 //------------------------------------------------------------------
1063 void recVUMI_ILWR( VURegs *VU, int info )
1064 {
1065 int off, itreg;
1066
1067 if ( ( _It_ == 0 ) || ( _X_Y_Z_W == 0 ) ) return;
1068 //Console.WriteLn("recVUMI_ILWR");
1069 if (_X) off = 0;
1070 else if (_Y) off = 4;
1071 else if (_Z) off = 8;
1072 else if (_W) off = 12;
1073
1074 ADD_VI_NEEDED(_Is_);
1075 itreg = ALLOCVI(_It_, MODE_WRITE);
1076
1077 if ( _Is_ == 0 ) {
1078 MOVZX32M16toR( itreg, (uptr)VU->Mem + off );
1079 }
1080 else {
1081 int isreg = ALLOCVI(_Is_, MODE_READ);
1082 MOVZX32Rm16toR(itreg, recVUTransformAddr(isreg, VU, _Is_, 0), (uptr)VU->Mem + off);
1083 }
1084 }
1085 //------------------------------------------------------------------
1086
1087
1088 //------------------------------------------------------------------
1089 // ISWR
1090 //------------------------------------------------------------------
1091 void recVUMI_ISWR( VURegs *VU, int info )
1092 {
1093 int itreg;
1094 //Console.WriteLn("recVUMI_ISWR");
1095 ADD_VI_NEEDED(_Is_);
1096 itreg = ALLOCVI(_It_, MODE_READ);
1097
1098 if (_Is_ == 0) {
1099 if (_X) MOV32RtoM((uptr)VU->Mem, itreg);
1100 if (_Y) MOV32RtoM((uptr)VU->Mem+4, itreg);
1101 if (_Z) MOV32RtoM((uptr)VU->Mem+8, itreg);
1102 if (_W) MOV32RtoM((uptr)VU->Mem+12, itreg);
1103 }
1104 else {
1105 int x86reg;
1106 int isreg = ALLOCVI(_Is_, MODE_READ);
1107 x86reg = recVUTransformAddr(isreg, VU, _Is_, 0);
1108
1109 if (_X) MOV32RtoRm(x86reg, itreg, (uptr)VU->Mem);
1110 if (_Y) MOV32RtoRm(x86reg, itreg, (uptr)VU->Mem+4);
1111 if (_Z) MOV32RtoRm(x86reg, itreg, (uptr)VU->Mem+8);
1112 if (_W) MOV32RtoRm(x86reg, itreg, (uptr)VU->Mem+12);
1113 }
1114 }
1115 //------------------------------------------------------------------
1116
1117
1118 //------------------------------------------------------------------
1119 // RINIT*
1120 //------------------------------------------------------------------
1121 void recVUMI_RINIT(VURegs *VU, int info)
1122 {
1123 //Console.WriteLn("recVUMI_RINIT()");
1124 if( (xmmregs[EEREC_S].mode & MODE_WRITE) && (xmmregs[EEREC_S].mode & MODE_NOFLUSH) ) {
1125 _deleteX86reg(X86TYPE_VI|(VU==&VU1?X86TYPE_VU1:0), REG_R, 2);
1126 _unpackVFSS_xyzw(EEREC_TEMP, EEREC_S, _Fsf_);
1127
1128 SSE_ANDPS_M128_to_XMM(EEREC_TEMP, (uptr)s_mask);
1129 SSE_ORPS_M128_to_XMM(EEREC_TEMP, (uptr)VU_ONE);
1130 SSE_MOVSS_XMM_to_M32(VU_REGR_ADDR, EEREC_TEMP);
1131 }
1132 else {
1133 int rreg = ALLOCVI(REG_R, MODE_WRITE);
1134
1135 if( xmmregs[EEREC_S].mode & MODE_WRITE ) {
1136 SSE_MOVAPS_XMM_to_M128((uptr)&VU->VF[_Fs_], EEREC_S);
1137 xmmregs[EEREC_S].mode &= ~MODE_WRITE;
1138 }
1139
1140 MOV32MtoR( rreg, VU_VFx_ADDR( _Fs_ ) + 4 * _Fsf_ );
1141 AND32ItoR( rreg, 0x7fffff );
1142 OR32ItoR( rreg, 0x7f << 23 );
1143
1144 _deleteX86reg(X86TYPE_VI|(VU==&VU1?X86TYPE_VU1:0), REG_R, 1);
1145 }
1146 }
1147 //------------------------------------------------------------------
1148
1149
1150 //------------------------------------------------------------------
1151 // RGET*
1152 //------------------------------------------------------------------
1153 void recVUMI_RGET(VURegs *VU, int info)
1154 {
1155 //Console.WriteLn("recVUMI_RGET()");
1156 if ( (_Ft_ == 0) || (_X_Y_Z_W == 0) ) return;
1157
1158 _deleteX86reg(X86TYPE_VI|(VU==&VU1?X86TYPE_VU1:0), REG_R, 1);
1159
1160 if (_X_Y_Z_W != 0xf) {
1161 SSE_MOVSS_M32_to_XMM(EEREC_TEMP, VU_REGR_ADDR);
1162 SSE_SHUFPS_XMM_to_XMM(EEREC_TEMP, EEREC_TEMP, 0);
1163 VU_MERGE_REGS(EEREC_T, EEREC_TEMP);
1164 }
1165 else {
1166 SSE_MOVSS_M32_to_XMM(EEREC_T, VU_REGR_ADDR);
1167 SSE_SHUFPS_XMM_to_XMM(EEREC_T, EEREC_T, 0);
1168 }
1169 }
1170 //------------------------------------------------------------------
1171
1172
1173 //------------------------------------------------------------------
1174 // RNEXT*
1175 //------------------------------------------------------------------
1176 void recVUMI_RNEXT( VURegs *VU, int info )
1177 {
1178 int rreg, x86temp0, x86temp1;
1179 //Console.WriteLn("recVUMI_RNEXT()");
1180
1181 rreg = ALLOCVI(REG_R, MODE_WRITE|MODE_READ);
1182
1183 x86temp0 = ALLOCTEMPX86(0);
1184 x86temp1 = ALLOCTEMPX86(0);
1185
1186 // code from www.project-fao.org
1187 //MOV32MtoR(rreg, VU_REGR_ADDR);
1188 MOV32RtoR(x86temp0, rreg);
1189 SHR32ItoR(x86temp0, 4);
1190 AND32ItoR(x86temp0, 1);
1191
1192 MOV32RtoR(x86temp1, rreg);
1193 SHR32ItoR(x86temp1, 22);
1194 AND32ItoR(x86temp1, 1);
1195
1196 SHL32ItoR(rreg, 1);
1197 XOR32RtoR(x86temp0, x86temp1);
1198 XOR32RtoR(rreg, x86temp0);
1199 AND32ItoR(rreg, 0x7fffff);
1200 OR32ItoR(rreg, 0x3f800000);
1201
1202 _freeX86reg(x86temp0);
1203 _freeX86reg(x86temp1);
1204
1205 if ( (_Ft_ == 0) || (_X_Y_Z_W == 0) ) {
1206 _deleteX86reg(X86TYPE_VI|(VU==&VU1?X86TYPE_VU1:0), REG_R, 1);
1207 return;
1208 }
1209
1210 recVUMI_RGET(VU, info);
1211 }
1212 //------------------------------------------------------------------
1213
1214
1215 //------------------------------------------------------------------
1216 // RXOR*
1217 //------------------------------------------------------------------
1218 void recVUMI_RXOR( VURegs *VU, int info )
1219 {
1220 //Console.WriteLn("recVUMI_RXOR()");
1221 if( (xmmregs[EEREC_S].mode & MODE_WRITE) && (xmmregs[EEREC_S].mode & MODE_NOFLUSH) ) {
1222 _deleteX86reg(X86TYPE_VI|(VU==&VU1?X86TYPE_VU1:0), REG_R, 1);
1223 _unpackVFSS_xyzw(EEREC_TEMP, EEREC_S, _Fsf_);
1224
1225 SSE_XORPS_M128_to_XMM(EEREC_TEMP, VU_REGR_ADDR);
1226 SSE_ANDPS_M128_to_XMM(EEREC_TEMP, (uptr)s_mask);
1227 SSE_ORPS_M128_to_XMM(EEREC_TEMP, (uptr)s_fones);
1228 SSE_MOVSS_XMM_to_M32(VU_REGR_ADDR, EEREC_TEMP);
1229 }
1230 else {
1231 int rreg = ALLOCVI(REG_R, MODE_WRITE|MODE_READ);
1232
1233 if( xmmregs[EEREC_S].mode & MODE_WRITE ) {
1234 SSE_MOVAPS_XMM_to_M128((uptr)&VU->VF[_Fs_], EEREC_S);
1235 xmmregs[EEREC_S].mode &= ~MODE_WRITE;
1236 }
1237
1238 XOR32MtoR( rreg, VU_VFx_ADDR( _Fs_ ) + 4 * _Fsf_ );
1239 AND32ItoR( rreg, 0x7fffff );
1240 OR32ItoR ( rreg, 0x3f800000 );
1241
1242 _deleteX86reg(X86TYPE_VI|(VU==&VU1?X86TYPE_VU1:0), REG_R, 1);
1243 }
1244 }
1245 //------------------------------------------------------------------
1246
1247
1248 //------------------------------------------------------------------
1249 // WAITQ
1250 //------------------------------------------------------------------
1251 void recVUMI_WAITQ( VURegs *VU, int info )
1252 {
1253 //Console.WriteLn("recVUMI_WAITQ");
1254 // if( info & PROCESS_VU_SUPER ) {
1255 // //CALLFunc(waitqfn);
1256 // SuperVUFlush(0, 1);
1257 // }
1258 }
1259 //------------------------------------------------------------------
1260
1261
1262 //------------------------------------------------------------------
1263 // FSAND
1264 //------------------------------------------------------------------
1265 void recVUMI_FSAND( VURegs *VU, int info )
1266 {
1267 int itreg;
1268 u16 imm;
1269 //Console.WriteLn("recVUMI_FSAND");
1270 imm = (((VU->code >> 21 ) & 0x1) << 11) | (VU->code & 0x7ff);
1271 if(_It_ == 0) return;
1272
1273 itreg = ALLOCVI(_It_, MODE_WRITE);
1274 MOV32MtoR( itreg, VU_VI_ADDR(REG_STATUS_FLAG, 1) );
1275 AND32ItoR( itreg, imm );
1276 }
1277 //------------------------------------------------------------------
1278
1279
1280 //------------------------------------------------------------------
1281 // FSEQ
1282 //------------------------------------------------------------------
1283 void recVUMI_FSEQ( VURegs *VU, int info )
1284 {
1285 int itreg;
1286 u16 imm;
1287 if ( _It_ == 0 ) return;
1288 //Console.WriteLn("recVUMI_FSEQ");
1289 imm = (((VU->code >> 21 ) & 0x1) << 11) | (VU->code & 0x7ff);
1290
1291 itreg = ALLOCVI(_It_, MODE_WRITE|MODE_8BITREG);
1292
1293 MOVZX32M16toR( EAX, VU_VI_ADDR(REG_STATUS_FLAG, 1) );
1294 XOR32RtoR(itreg, itreg);
1295 CMP16ItoR(EAX, imm);
1296 SETE8R(itreg);
1297 }
1298 //------------------------------------------------------------------
1299
1300
1301 //------------------------------------------------------------------
1302 // FSOR
1303 //------------------------------------------------------------------
1304 void recVUMI_FSOR( VURegs *VU, int info )
1305 {
1306 int itreg;
1307 u32 imm;
1308 if(_It_ == 0) return;
1309 //Console.WriteLn("recVUMI_FSOR");
1310 imm = (((VU->code >> 21 ) & 0x1) << 11) | (VU->code & 0x7ff);
1311
1312 itreg = ALLOCVI(_It_, MODE_WRITE);
1313
1314 MOVZX32M16toR( itreg, VU_VI_ADDR(REG_STATUS_FLAG, 1) );
1315 OR32ItoR( itreg, imm );
1316 }
1317 //------------------------------------------------------------------
1318
1319
1320 //------------------------------------------------------------------
1321 // FSSET
1322 //------------------------------------------------------------------
1323 void recVUMI_FSSET(VURegs *VU, int info)
1324 {
1325 u32 writeaddr = VU_VI_ADDR(REG_STATUS_FLAG, 0);
1326 u32 prevaddr = VU_VI_ADDR(REG_STATUS_FLAG, 2);
1327
1328 u16 imm = 0;
1329 //Console.WriteLn("recVUMI_FSSET");
1330 imm = (((VU->code >> 21 ) & 0x1) << 11) | (VU->code & 0x7FF);
1331
1332 // keep the low 6 bits ONLY if the upper instruction is an fmac instruction (otherwise rewrite) - metal gear solid 3
1333 //if( (info & PROCESS_VU_SUPER) && VUREC_FMAC ) {
1334 MOV32MtoR(EAX, prevaddr);
1335 AND32ItoR(EAX, 0x3f);
1336 if ((imm&0xfc0) != 0) OR32ItoR(EAX, imm & 0xFC0);
1337 MOV32RtoM(writeaddr ? writeaddr : prevaddr, EAX);
1338 //}
1339 //else {
1340 // MOV32ItoM(writeaddr ? writeaddr : prevaddr, imm&0xfc0);
1341 //}
1342 }
1343 //------------------------------------------------------------------
1344
1345
1346 //------------------------------------------------------------------
1347 // FMAND
1348 //------------------------------------------------------------------
1349 void recVUMI_FMAND( VURegs *VU, int info )
1350 {
1351 int isreg, itreg;
1352 if ( _It_ == 0 ) return;
1353 //Console.WriteLn("recVUMI_FMAND");
1354 isreg = _checkX86reg(X86TYPE_VI|(VU==&VU1?X86TYPE_VU1:0), _Is_, MODE_READ);
1355 itreg = ALLOCVI(_It_, MODE_WRITE);//|MODE_8BITREG);
1356
1357 if( isreg >= 0 ) {
1358 if( itreg != isreg ) MOV32RtoR(itreg, isreg);
1359 }
1360 else MOVZX32M16toR(itreg, VU_VI_ADDR(_Is_, 1));
1361
1362 AND16MtoR( itreg, VU_VI_ADDR(REG_MAC_FLAG, 1));
1363 }
1364 //------------------------------------------------------------------
1365
1366
1367 //------------------------------------------------------------------
1368 // FMEQ
1369 //------------------------------------------------------------------
1370 void recVUMI_FMEQ( VURegs *VU, int info )
1371 {
1372 int itreg, isreg;
1373 if ( _It_ == 0 ) return;
1374 //Console.WriteLn("recVUMI_FMEQ");
1375 if( _It_ == _Is_ ) {
1376 itreg = ALLOCVI(_It_, MODE_WRITE|MODE_READ);//|MODE_8BITREG
1377
1378 CMP16MtoR(itreg, VU_VI_ADDR(REG_MAC_FLAG, 1));
1379 SETE8R(EAX);
1380 MOVZX32R8toR(itreg, EAX);
1381 }
1382 else {
1383 ADD_VI_NEEDED(_Is_);
1384 itreg = ALLOCVI(_It_, MODE_WRITE|MODE_8BITREG);
1385 isreg = ALLOCVI(_Is_, MODE_READ);
1386
1387 XOR32RtoR(itreg, itreg);
1388
1389 CMP16MtoR(isreg, VU_VI_ADDR(REG_MAC_FLAG, 1));
1390 SETE8R(itreg);
1391 }
1392 }
1393 //------------------------------------------------------------------
1394
1395
1396 //------------------------------------------------------------------
1397 // FMOR
1398 //------------------------------------------------------------------
1399 void recVUMI_FMOR( VURegs *VU, int info )
1400 {
1401 int isreg, itreg;
1402 if ( _It_ == 0 ) return;
1403 //Console.WriteLn("recVUMI_FMOR");
1404 if( _Is_ == 0 ) {
1405 itreg = ALLOCVI(_It_, MODE_WRITE);//|MODE_8BITREG);
1406 MOVZX32M16toR( itreg, VU_VI_ADDR(REG_MAC_FLAG, 1) );
1407 }
1408 else if( _It_ == _Is_ ) {
1409 itreg = ALLOCVI(_It_, MODE_WRITE|MODE_READ);//|MODE_8BITREG);
1410 OR16MtoR( itreg, VU_VI_ADDR(REG_MAC_FLAG, 1) );
1411 }
1412 else {
1413 isreg = _checkX86reg(X86TYPE_VI|(VU==&VU1?X86TYPE_VU1:0), _Is_, MODE_READ);
1414 itreg = ALLOCVI(_It_, MODE_WRITE);
1415
1416 MOVZX32M16toR( itreg, VU_VI_ADDR(REG_MAC_FLAG, 1) );
1417
1418 if( isreg >= 0 )
1419 OR16RtoR( itreg, isreg );
1420 else
1421 OR16MtoR( itreg, VU_VI_ADDR(_Is_, 1) );
1422 }
1423 }
1424 //------------------------------------------------------------------
1425
1426
1427 //------------------------------------------------------------------
1428 // FCAND
1429 //------------------------------------------------------------------
1430 void recVUMI_FCAND( VURegs *VU, int info )
1431 {
1432 int itreg = ALLOCVI(1, MODE_WRITE|MODE_8BITREG);
1433 //Console.WriteLn("recVUMI_FCAND");
1434 MOV32MtoR( EAX, VU_VI_ADDR(REG_CLIP_FLAG, 1) );
1435 XOR32RtoR( itreg, itreg );
1436 AND32ItoR( EAX, VU->code & 0xFFFFFF );
1437
1438 SETNZ8R(itreg);
1439 }
1440 //------------------------------------------------------------------
1441
1442
1443 //------------------------------------------------------------------
1444 // FCEQ
1445 //------------------------------------------------------------------
1446 void recVUMI_FCEQ( VURegs *VU, int info )
1447 {
1448 int itreg = ALLOCVI(1, MODE_WRITE|MODE_8BITREG);
1449 //Console.WriteLn("recVUMI_FCEQ");
1450 MOV32MtoR( EAX, VU_VI_ADDR(REG_CLIP_FLAG, 1) );
1451 AND32ItoR( EAX, 0xffffff );
1452 XOR32RtoR( itreg, itreg );
1453 CMP32ItoR( EAX, VU->code&0xffffff );
1454
1455 SETE8R(itreg);
1456 }
1457 //------------------------------------------------------------------
1458
1459
1460 //------------------------------------------------------------------
1461 // FCOR
1462 //------------------------------------------------------------------
1463 void recVUMI_FCOR( VURegs *VU, int info )
1464 {
1465 int itreg;
1466 //Console.WriteLn("recVUMI_FCOR");
1467 itreg = ALLOCVI(1, MODE_WRITE);
1468 MOV32MtoR( itreg, VU_VI_ADDR(REG_CLIP_FLAG, 1) );
1469 OR32ItoR ( itreg, VU->code );
1470 AND32ItoR( itreg, 0xffffff );
1471 ADD32ItoR( itreg, 1 ); // If 24 1's will make 25th bit 1, else 0
1472 SHR32ItoR( itreg, 24 ); // Get the 25th bit (also clears the rest of the garbage in the reg)
1473 }
1474 //------------------------------------------------------------------
1475
1476
1477 //------------------------------------------------------------------
1478 // FCSET
1479 //------------------------------------------------------------------
1480 void recVUMI_FCSET( VURegs *VU, int info )
1481 {
1482 u32 addr = VU_VI_ADDR(REG_CLIP_FLAG, 0);
1483 //Console.WriteLn("recVUMI_FCSET");
1484 MOV32ItoM(addr ? addr : VU_VI_ADDR(REG_CLIP_FLAG, 2), VU->code&0xffffff );
1485
1486 if( !(info & (PROCESS_VU_SUPER|PROCESS_VU_COP2)) )
1487 MOV32ItoM( VU_VI_ADDR(REG_CLIP_FLAG, 1), VU->code&0xffffff );
1488 }
1489 //------------------------------------------------------------------
1490
1491
1492 //------------------------------------------------------------------
1493 // FCGET
1494 //------------------------------------------------------------------
1495 void recVUMI_FCGET( VURegs *VU, int info )
1496 {
1497 int itreg;
1498 if(_It_ == 0) return;
1499 //Console.WriteLn("recVUMI_FCGET");
1500 itreg = ALLOCVI(_It_, MODE_WRITE);
1501
1502 MOV32MtoR(itreg, VU_VI_ADDR(REG_CLIP_FLAG, 1));
1503 AND32ItoR(itreg, 0x0fff);
1504 }
1505 //------------------------------------------------------------------
1506
1507
1508 //------------------------------------------------------------------
1509 // _recbranchAddr()
1510 //
1511 // NOTE: Due to static var dependencies, several SuperVU branch instructions
1512 // are still located in iVUzerorec.cpp.
1513 //------------------------------------------------------------------
1514
1515 //------------------------------------------------------------------
1516 // MFP*
1517 //------------------------------------------------------------------
1518 void recVUMI_MFP(VURegs *VU, int info)
1519 {
1520 if ( (_Ft_ == 0) || (_X_Y_Z_W == 0) ) return;
1521 //Console.WriteLn("recVUMI_MFP");
1522 if( _XYZW_SS ) {
1523 _vuFlipRegSS(VU, EEREC_T);
1524 SSE_MOVSS_M32_to_XMM(EEREC_TEMP, VU_VI_ADDR(REG_P, 1));
1525 SSE_MOVSS_XMM_to_XMM(EEREC_T, EEREC_TEMP);
1526 _vuFlipRegSS(VU, EEREC_T);
1527 }
1528 else if (_X_Y_Z_W != 0xf) {
1529 SSE_MOVSS_M32_to_XMM(EEREC_TEMP, VU_VI_ADDR(REG_P, 1));
1530 SSE_SHUFPS_XMM_to_XMM(EEREC_TEMP, EEREC_TEMP, 0);
1531 VU_MERGE_REGS(EEREC_T, EEREC_TEMP);
1532 }
1533 else {
1534 SSE_MOVSS_M32_to_XMM(EEREC_T, VU_VI_ADDR(REG_P, 1));
1535 SSE_SHUFPS_XMM_to_XMM(EEREC_T, EEREC_T, 0);
1536 }
1537 }
1538 //------------------------------------------------------------------
1539
1540
1541 //------------------------------------------------------------------
1542 // WAITP
1543 //------------------------------------------------------------------
1544 static __aligned16 float s_tempmem[4];
1545 void recVUMI_WAITP(VURegs *VU, int info)
1546 {
1547 //Console.WriteLn("recVUMI_WAITP");
1548 // if( info & PROCESS_VU_SUPER )
1549 // SuperVUFlush(1, 1);
1550 }
1551 //------------------------------------------------------------------
1552
1553
1554 //------------------------------------------------------------------
1555 // vuSqSumXYZ()*
1556 //
1557 // NOTE: In all EFU insts, EEREC_D is a temp reg
1558 //------------------------------------------------------------------
1559 void vuSqSumXYZ(int regd, int regs, int regtemp) // regd.x = x ^ 2 + y ^ 2 + z ^ 2
1560 {
1561 //Console.WriteLn("VU: SUMXYZ");
1562 if( x86caps.hasStreamingSIMD4Extensions )
1563 {
1564 SSE_MOVAPS_XMM_to_XMM(regd, regs);
1565 if (CHECK_VU_EXTRA_OVERFLOW) vuFloat2(regd, regtemp, 0xf);
1566 SSE4_DPPS_XMM_to_XMM(regd, regd, 0x71);
1567 }
1568 else
1569 {
1570 SSE_MOVAPS_XMM_to_XMM(regtemp, regs);
1571 if (CHECK_VU_EXTRA_OVERFLOW) vuFloat2(regtemp, regd, 0xf);
1572 SSE_MULPS_XMM_to_XMM(regtemp, regtemp); // xyzw ^ 2
1573
1574 if( x86caps.hasStreamingSIMD3Extensions ) {
1575 SSE3_HADDPS_XMM_to_XMM(regd, regtemp);
1576 SSE_ADDPS_XMM_to_XMM(regd, regtemp); // regd.z = x ^ 2 + y ^ 2 + z ^ 2
1577 SSE_MOVHLPS_XMM_to_XMM(regd, regd); // regd.x = regd.z
1578 }
1579 else {
1580 SSE_MOVSS_XMM_to_XMM(regd, regtemp);
1581 SSE2_PSHUFLW_XMM_to_XMM(regtemp, regtemp, 0x4e); // wzyx -> wzxy
1582 SSE_ADDSS_XMM_to_XMM(regd, regtemp); // x ^ 2 + y ^ 2
1583 SSE_SHUFPS_XMM_to_XMM(regtemp, regtemp, 0xD2); // wzxy -> wxyz
1584 SSE_ADDSS_XMM_to_XMM(regd, regtemp); // x ^ 2 + y ^ 2 + z ^ 2
1585 }
1586 }
1587 }
1588 //------------------------------------------------------------------
1589
1590
1591 //------------------------------------------------------------------
1592 // ESADD*
1593 //------------------------------------------------------------------
1594 void recVUMI_ESADD( VURegs *VU, int info)
1595 {
1596 //Console.WriteLn("VU: ESADD");
1597 pxAssert( VU == &VU1 );
1598 if( EEREC_TEMP == EEREC_D ) { // special code to reset P ( FixMe: don't know if this is still needed! (cottonvibes) )
1599 Console.Warning("ESADD: Resetting P reg!!!\n");
1600 MOV32ItoM(VU_VI_ADDR(REG_P, 0), 0);
1601 return;
1602 }
1603 vuSqSumXYZ(EEREC_D, EEREC_S, EEREC_TEMP);
1604 if (CHECK_VU_OVERFLOW) SSE_MINSS_M32_to_XMM(EEREC_D, (uptr)g_maxvals); // Only need to do positive clamp since (x ^ 2 + y ^ 2 + z ^ 2) is positive
1605 SSE_MOVSS_XMM_to_M32(VU_VI_ADDR(REG_P, 0), EEREC_D);
1606 }
1607 //------------------------------------------------------------------
1608
1609
1610 //------------------------------------------------------------------
1611 // ERSADD*
1612 //------------------------------------------------------------------
1613 void recVUMI_ERSADD( VURegs *VU, int info )
1614 {
1615 //Console.WriteLn("VU: ERSADD");
1616 pxAssert( VU == &VU1 );
1617 vuSqSumXYZ(EEREC_D, EEREC_S, EEREC_TEMP);
1618 // don't use RCPSS (very bad precision)
1619 SSE_MOVSS_M32_to_XMM(EEREC_TEMP, (uptr)VU_ONE);
1620 SSE_DIVSS_XMM_to_XMM(EEREC_TEMP, EEREC_D);
1621 if (CHECK_VU_OVERFLOW) SSE_MINSS_M32_to_XMM(EEREC_TEMP, (uptr)g_maxvals); // Only need to do positive clamp since (x ^ 2 + y ^ 2 + z ^ 2) is positive
1622 SSE_MOVSS_XMM_to_M32(VU_VI_ADDR(REG_P, 0), EEREC_TEMP);
1623 }
1624 //------------------------------------------------------------------
1625
1626
1627 //------------------------------------------------------------------
1628 // ELENG*
1629 //------------------------------------------------------------------
1630 void recVUMI_ELENG( VURegs *VU, int info )
1631 {
1632 //Console.WriteLn("VU: ELENG");
1633 pxAssert( VU == &VU1 );
1634 vuSqSumXYZ(EEREC_D, EEREC_S, EEREC_TEMP);
1635 if (CHECK_VU_OVERFLOW) SSE_MINSS_M32_to_XMM(EEREC_D, (uptr)g_maxvals); // Only need to do positive clamp since (x ^ 2 + y ^ 2 + z ^ 2) is positive
1636 SSE_SQRTSS_XMM_to_XMM(EEREC_D, EEREC_D);
1637 SSE_MOVSS_XMM_to_M32(VU_VI_ADDR(REG_P, 0), EEREC_D);
1638 }
1639 //------------------------------------------------------------------
1640
1641
1642 //------------------------------------------------------------------
1643 // ERLENG*
1644 //------------------------------------------------------------------
1645 void recVUMI_ERLENG( VURegs *VU, int info )
1646 {
1647 //Console.WriteLn("VU: ERLENG");
1648 pxAssert( VU == &VU1 );
1649 vuSqSumXYZ(EEREC_D, EEREC_S, EEREC_TEMP);
1650 if (CHECK_VU_OVERFLOW) SSE_MINSS_M32_to_XMM(EEREC_D, (uptr)g_maxvals); // Only need to do positive clamp since (x ^ 2 + y ^ 2 + z ^ 2) is positive
1651 SSE_SQRTSS_XMM_to_XMM(EEREC_D, EEREC_D); // regd <- sqrt(x^2 + y^2 + z^2)
1652 SSE_MOVSS_M32_to_XMM(EEREC_TEMP, (uptr)VU_ONE); // temp <- 1
1653 SSE_DIVSS_XMM_to_XMM(EEREC_TEMP, EEREC_D); // temp = 1 / sqrt(x^2 + y^2 + z^2)
1654 if (CHECK_VU_OVERFLOW) SSE_MINSS_M32_to_XMM(EEREC_TEMP, (uptr)g_maxvals); // Only need to do positive clamp
1655 SSE_MOVSS_XMM_to_M32(VU_VI_ADDR(REG_P, 0), EEREC_TEMP);
1656 }
1657 //------------------------------------------------------------------
1658
1659
1660 //------------------------------------------------------------------
1661 // EATANxy
1662 //------------------------------------------------------------------
1663 void recVUMI_EATANxy( VURegs *VU, int info )
1664 {
1665 pxAssert( VU == &VU1 );
1666 //Console.WriteLn("recVUMI_EATANxy");
1667 if( (xmmregs[EEREC_S].mode & MODE_WRITE) && (xmmregs[EEREC_S].mode&MODE_NOFLUSH) ) {
1668 SSE_MOVLPS_XMM_to_M64((uptr)s_tempmem, EEREC_S);
1669 FLD32((uptr)&s_tempmem[0]);
1670 FLD32((uptr)&s_tempmem[1]);
1671 }
1672 else {
1673 if( xmmregs[EEREC_S].mode & MODE_WRITE ) {
1674 SSE_MOVAPS_XMM_to_M128((uptr)&VU->VF[_Fs_], EEREC_S);
1675 xmmregs[EEREC_S].mode &= ~MODE_WRITE;
1676 }
1677
1678 FLD32((uptr)&VU->VF[_Fs_].UL[0]);
1679 FLD32((uptr)&VU->VF[_Fs_].UL[1]);
1680 }
1681
1682 FPATAN();
1683 FSTP32(VU_VI_ADDR(REG_P, 0));
1684 }
1685 //------------------------------------------------------------------
1686
1687
1688 //------------------------------------------------------------------
1689 // EATANxz
1690 //------------------------------------------------------------------
1691 void recVUMI_EATANxz( VURegs *VU, int info )
1692 {
1693 pxAssert( VU == &VU1 );
1694 //Console.WriteLn("recVUMI_EATANxz");
1695 if( (xmmregs[EEREC_S].mode & MODE_WRITE) && (xmmregs[EEREC_S].mode&MODE_NOFLUSH) ) {
1696 SSE_MOVLPS_XMM_to_M64((uptr)s_tempmem, EEREC_S);
1697 FLD32((uptr)&s_tempmem[0]);
1698 FLD32((uptr)&s_tempmem[2]);
1699 }
1700 else {
1701 if( xmmregs[EEREC_S].mode & MODE_WRITE ) {
1702 SSE_MOVAPS_XMM_to_M128((uptr)&VU->VF[_Fs_], EEREC_S);
1703 xmmregs[EEREC_S].mode &= ~MODE_WRITE;
1704 }
1705
1706 FLD32((uptr)&VU->VF[_Fs_].UL[0]);
1707 FLD32((uptr)&VU->VF[_Fs_].UL[2]);
1708 }
1709 FPATAN();
1710 FSTP32(VU_VI_ADDR(REG_P, 0));
1711 }
1712 //------------------------------------------------------------------
1713
1714
1715 //------------------------------------------------------------------
1716 // ESUM*
1717 //------------------------------------------------------------------
1718 void recVUMI_ESUM( VURegs *VU, int info )
1719 {
1720 //Console.WriteLn("VU: ESUM");
1721 pxAssert( VU == &VU1 );
1722
1723 if( x86caps.hasStreamingSIMD3Extensions ) {
1724 SSE_MOVAPS_XMM_to_XMM(EEREC_TEMP, EEREC_S);
1725 if (CHECK_VU_EXTRA_OVERFLOW) vuFloat_useEAX(info, EEREC_TEMP, 0xf);
1726 SSE3_HADDPS_XMM_to_XMM(EEREC_TEMP, EEREC_TEMP);
1727 SSE3_HADDPS_XMM_to_XMM(EEREC_TEMP, EEREC_TEMP);
1728 }
1729 else {
1730 SSE_MOVHLPS_XMM_to_XMM(EEREC_TEMP, EEREC_S); // z, w, z, w
1731 SSE_ADDPS_XMM_to_XMM(EEREC_TEMP, EEREC_S); // z+x, w+y, z+z, w+w
1732 SSE_UNPCKLPS_XMM_to_XMM(EEREC_TEMP, EEREC_TEMP); // z+x, z+x, w+y, w+y
1733 SSE_MOVHLPS_XMM_to_XMM(EEREC_D, EEREC_TEMP); // w+y, w+y, w+y, w+y
1734 SSE_ADDSS_XMM_to_XMM(EEREC_TEMP, EEREC_D); // x+y+z+w, w+y, w+y, w+y
1735 }
1736
1737 vuFloat_useEAX(info, EEREC_TEMP, 8);
1738 SSE_MOVSS_XMM_to_M32(VU_VI_ADDR(REG_P, 0), EEREC_TEMP);
1739 }
1740 //------------------------------------------------------------------
1741
1742
1743 //------------------------------------------------------------------
1744 // ERCPR*
1745 //------------------------------------------------------------------
1746 void recVUMI_ERCPR( VURegs *VU, int info )
1747 {
1748 pxAssert( VU == &VU1 );
1749 //Console.WriteLn("VU1: ERCPR");
1750
1751 // don't use RCPSS (very bad precision)
1752 switch ( _Fsf_ ) {
1753 case 0: //0001
1754 if (CHECK_VU_EXTRA_OVERFLOW) vuFloat5_useEAX(EEREC_S, EEREC_TEMP, 8);
1755 SSE_MOVSS_M32_to_XMM(EEREC_TEMP, (uptr)VU_ONE); // temp <- 1
1756 SSE_DIVSS_XMM_to_XMM(EEREC_TEMP, EEREC_S);
1757 break;
1758 case 1: //0010
1759 SSE2_PSHUFLW_XMM_to_XMM(EEREC_S, EEREC_S, 0x4e);
1760 if (CHECK_VU_EXTRA_OVERFLOW) vuFloat5_useEAX(EEREC_S, EEREC_TEMP, 8);
1761 SSE_MOVSS_M32_to_XMM(EEREC_TEMP, (uptr)VU_ONE); // temp <- 1
1762 SSE_DIVSS_XMM_to_XMM(EEREC_TEMP, EEREC_S);
1763 SSE2_PSHUFLW_XMM_to_XMM(EEREC_S, EEREC_S, 0x4e);
1764 break;
1765 case 2: //0100
1766 SSE_SHUFPS_XMM_to_XMM(EEREC_S, EEREC_S, 0xc6);
1767 if (CHECK_VU_EXTRA_OVERFLOW) vuFloat5_useEAX(EEREC_S, EEREC_TEMP, 8);
1768 SSE_MOVSS_M32_to_XMM(EEREC_TEMP, (uptr)VU_ONE); // temp <- 1
1769 SSE_DIVSS_XMM_to_XMM(EEREC_TEMP, EEREC_S);
1770 SSE_SHUFPS_XMM_to_XMM(EEREC_S, EEREC_S, 0xc6);
1771 break;
1772 case 3: //1000
1773 SSE_SHUFPS_XMM_to_XMM(EEREC_S, EEREC_S, 0x27);
1774 if (CHECK_VU_EXTRA_OVERFLOW) vuFloat5_useEAX(EEREC_S, EEREC_TEMP, 8);
1775 SSE_MOVSS_M32_to_XMM(EEREC_TEMP, (uptr)VU_ONE); // temp <- 1
1776 SSE_DIVSS_XMM_to_XMM(EEREC_TEMP, EEREC_S);
1777 SSE_SHUFPS_XMM_to_XMM(EEREC_S, EEREC_S, 0x27);
1778 break;
1779 }
1780
1781 vuFloat_useEAX(info, EEREC_TEMP, 8);
1782 SSE_MOVSS_XMM_to_M32(VU_VI_ADDR(REG_P, 0), EEREC_TEMP);
1783 }
1784 //------------------------------------------------------------------
1785
1786
1787 //------------------------------------------------------------------
1788 // ESQRT*
1789 //------------------------------------------------------------------
1790 void recVUMI_ESQRT( VURegs *VU, int info )
1791 {
1792 pxAssert( VU == &VU1 );
1793
1794 //Console.WriteLn("VU1: ESQRT");
1795 _unpackVFSS_xyzw(EEREC_TEMP, EEREC_S, _Fsf_);
1796 SSE_ANDPS_M128_to_XMM(EEREC_TEMP, (uptr)const_clip); // abs(x)
1797 if (CHECK_VU_OVERFLOW) SSE_MINSS_M32_to_XMM(EEREC_TEMP, (uptr)g_maxvals); // Only need to do positive clamp
1798 SSE_SQRTSS_XMM_to_XMM(EEREC_TEMP, EEREC_TEMP);
1799
1800 SSE_MOVSS_XMM_to_M32(VU_VI_ADDR(REG_P, 0), EEREC_TEMP);
1801 }
1802 //------------------------------------------------------------------
1803
1804
1805 //------------------------------------------------------------------
1806 // ERSQRT*
1807 //------------------------------------------------------------------
1808 void recVUMI_ERSQRT( VURegs *VU, int info )
1809 {
1810 int t1reg = _vuGetTempXMMreg(info);
1811
1812 pxAssert( VU == &VU1 );
1813 //Console.WriteLn("VU1: ERSQRT");
1814
1815 _unpackVFSS_xyzw(EEREC_TEMP, EEREC_S, _Fsf_);
1816 SSE_ANDPS_M128_to_XMM(EEREC_TEMP, (uptr)const_clip); // abs(x)
1817 SSE_MINSS_M32_to_XMM(EEREC_TEMP, (uptr)g_maxvals); // Clamp Infinities to Fmax
1818 SSE_SQRTSS_XMM_to_XMM(EEREC_TEMP, EEREC_TEMP); // SQRT(abs(x))
1819
1820 if( t1reg >= 0 )
1821 {
1822 SSE_MOVSS_M32_to_XMM(t1reg, (uptr)VU_ONE);
1823 SSE_DIVSS_XMM_to_XMM(t1reg, EEREC_TEMP);
1824 vuFloat_useEAX(info, t1reg, 8);
1825 SSE_MOVSS_XMM_to_M32(VU_VI_ADDR(REG_P, 0), t1reg);
1826 _freeXMMreg(t1reg);
1827 }
1828 else
1829 {
1830 SSE_MOVSS_XMM_to_M32(VU_VI_ADDR(REG_P, 0), EEREC_TEMP);
1831 SSE_MOVSS_M32_to_XMM(EEREC_TEMP, (uptr)VU_ONE);
1832 SSE_DIVSS_M32_to_XMM(EEREC_TEMP, VU_VI_ADDR(REG_P, 0));
1833 vuFloat_useEAX(info, EEREC_TEMP, 8);
1834 SSE_MOVSS_XMM_to_M32(VU_VI_ADDR(REG_P, 0), EEREC_TEMP);
1835 }
1836 }
1837 //------------------------------------------------------------------
1838
1839
1840 //------------------------------------------------------------------
1841 // ESIN
1842 //------------------------------------------------------------------
1843 void recVUMI_ESIN( VURegs *VU, int info )
1844 {
1845 pxAssert( VU == &VU1 );
1846
1847 //Console.WriteLn("recVUMI_ESIN");
1848 if( (xmmregs[EEREC_S].mode & MODE_WRITE) && (xmmregs[EEREC_S].mode&MODE_NOFLUSH) ) {
1849 switch(_Fsf_) {
1850 case 0: SSE_MOVSS_XMM_to_M32((uptr)s_tempmem, EEREC_S);
1851 case 1: SSE_MOVLPS_XMM_to_M64((uptr)s_tempmem, EEREC_S);
1852 default: SSE_MOVHPS_XMM_to_M64((uptr)&s_tempmem[2], EEREC_S);
1853 }
1854 FLD32((uptr)&s_tempmem[_Fsf_]);
1855 }
1856 else {
1857 if( xmmregs[EEREC_S].mode & MODE_WRITE ) {
1858 SSE_MOVAPS_XMM_to_M128((uptr)&VU->VF[_Fs_], EEREC_S);
1859 xmmregs[EEREC_S].mode &= ~MODE_WRITE;
1860 }
1861
1862 FLD32((uptr)&VU->VF[_Fs_].UL[_Fsf_]);
1863 }
1864
1865 FSIN();
1866 FSTP32(VU_VI_ADDR(REG_P, 0));
1867 }
1868 //------------------------------------------------------------------
1869
1870
1871 //------------------------------------------------------------------
1872 // EATAN
1873 //------------------------------------------------------------------
1874 void recVUMI_EATAN( VURegs *VU, int info )
1875 {
1876 pxAssert( VU == &VU1 );
1877
1878 //Console.WriteLn("recVUMI_EATAN");
1879 if( (xmmregs[EEREC_S].mode & MODE_WRITE) && (xmmregs[EEREC_S].mode&MODE_NOFLUSH) ) {
1880 switch(_Fsf_) {
1881 case 0: SSE_MOVSS_XMM_to_M32((uptr)s_tempmem, EEREC_S);
1882 case 1: SSE_MOVLPS_XMM_to_M64((uptr)s_tempmem, EEREC_S);
1883 default: SSE_MOVHPS_XMM_to_M64((uptr)&s_tempmem[2], EEREC_S);
1884 }
1885 FLD32((uptr)&s_tempmem[_Fsf_]);
1886 }
1887 else {
1888 if( xmmregs[EEREC_S].mode & MODE_WRITE ) {
1889 SSE_MOVAPS_XMM_to_M128((uptr)&VU->VF[_Fs_], EEREC_S);
1890 xmmregs[EEREC_S].mode &= ~MODE_WRITE;
1891 }
1892 }
1893
1894 FLD1();
1895 FLD32((uptr)&VU->VF[_Fs_].UL[_Fsf_]);
1896 FPATAN();
1897 FSTP32(VU_VI_ADDR(REG_P, 0));
1898 }
1899 //------------------------------------------------------------------
1900
1901
1902 //------------------------------------------------------------------
1903 // EEXP
1904 //------------------------------------------------------------------
1905 void recVUMI_EEXP( VURegs *VU, int info )
1906 {
1907 pxAssert( VU == &VU1 );
1908 //Console.WriteLn("recVUMI_EEXP");
1909 FLDL2E();
1910
1911 if( (xmmregs[EEREC_S].mode & MODE_WRITE) && (xmmregs[EEREC_S].mode&MODE_NOFLUSH) ) {
1912 switch(_Fsf_) {
1913 case 0: SSE_MOVSS_XMM_to_M32((uptr)s_tempmem, EEREC_S);
1914 case 1: SSE_MOVLPS_XMM_to_M64((uptr)s_tempmem, EEREC_S);
1915 default: SSE_MOVHPS_XMM_to_M64((uptr)&s_tempmem[2], EEREC_S);
1916 }
1917 FMUL32((uptr)&s_tempmem[_Fsf_]);
1918 }
1919 else {
1920 if( xmmregs[EEREC_S].mode & MODE_WRITE ) {
1921 SSE_MOVAPS_XMM_to_M128((uptr)&VU->VF[_Fs_], EEREC_S);
1922 xmmregs[EEREC_S].mode &= ~MODE_WRITE;
1923 }
1924
1925 FMUL32((uptr)&VU->VF[_Fs_].UL[_Fsf_]);
1926 }
1927
1928 // basically do 2^(log_2(e) * val)
1929 FLD(0);
1930 FRNDINT();
1931 FXCH(1);
1932 FSUB32Rto0(1);
1933 F2XM1();
1934 FLD1();
1935 FADD320toR(1);
1936 FSCALE();
1937 FSTP(1);
1938
1939 FSTP32(VU_VI_ADDR(REG_P, 0));
1940 }
1941 //------------------------------------------------------------------
1942
1943
1944 //------------------------------------------------------------------
1945 // XITOP
1946 //------------------------------------------------------------------
1947 void recVUMI_XITOP( VURegs *VU, int info )
1948 {
1949 int itreg;
1950 if (_It_ == 0) return;
1951 //Console.WriteLn("recVUMI_XITOP");
1952 itreg = ALLOCVI(_It_, MODE_WRITE);
1953 MOVZX32M16toR( itreg, (uptr)&VU->GetVifRegs().itop );
1954 }
1955 //------------------------------------------------------------------
1956
1957
1958 //------------------------------------------------------------------
1959 // XTOP
1960 //------------------------------------------------------------------
1961 void recVUMI_XTOP( VURegs *VU, int info )
1962 {
1963 int itreg;
1964 if ( _It_ == 0 ) return;
1965 //Console.WriteLn("recVUMI_XTOP");
1966 itreg = ALLOCVI(_It_, MODE_WRITE);
1967 MOVZX32M16toR( itreg, (uptr)&VU->GetVifRegs().top );
1968 }
1969 //------------------------------------------------------------------
1970
1971
1972 //------------------------------------------------------------------
1973 // VU1XGKICK_MTGSTransfer() - Called by ivuZerorec.cpp
1974 //------------------------------------------------------------------
1975 extern bool SIGNAL_IMR_Pending;
1976
1977 void __fastcall VU1XGKICK_MTGSTransfer(u32 *pMem, u32 addr)
1978 {
1979 addr &= 0x3fff;
1980 u8* data = VU1.Mem + (addr);
1981 u32 diff = 0x400 - (addr / 16);
1982 u32 size;
1983 u8* pDest;
1984
1985 ///////////////////////////////////////////////
1986 ///////////////SIGNAL WARNING!!////////////////
1987 ///////////////////////////////////////////////
1988 /* Due to the face SIGNAL can cause the loop
1989 to leave early, we can end up missing data.
1990 The only way we can avoid this is to queue
1991 it :(, im relying on someone else to come
1992 up with a better solution! */
1993
1994
1995 /*if(gifRegs.stat.APATH <= GIF_APATH1 || (gifRegs.stat.APATH == GIF_APATH3 && gifRegs.stat.IP3 == true) && SIGNAL_IMR_Pending == false)
1996 {
1997 if(Path1WritePos != 0)
1998 {
1999 //Flush any pending transfers so things dont go up in the wrong order
2000 while(gifRegs.stat.P1Q == true) gsPath1Interrupt();
2001 }
2002 GetMTGS().PrepDataPacket(GIF_PATH_1, 0x400);
2003 size = GIFPath_CopyTag(GIF_PATH_1, (u128*)data, diff);
2004 GetMTGS().SendDataPacket();
2005
2006 if(GSTransferStatus.PTH1 == STOPPED_MODE )
2007 {
2008 gifRegs.stat.OPH = false;
2009 gifRegs.stat.APATH = GIF_APATH_IDLE;
2010 }
2011 }
2012 else
2013 {*/
2014 //DevCon.Warning("GIF APATH busy %x Holding for later W %x, R %x", gifRegs.stat.APATH, Path1WritePos, Path1ReadPos);
2015 size = GIFPath_ParseTagQuick(GIF_PATH_1, data, diff);
2016 pDest = &Path1Buffer[Path1WritePos*16];
2017
2018 Path1WritePos += size;
2019
2020 pxAssumeMsg((Path1WritePos+size < sizeof(Path1Buffer)), "XGKick Buffer Overflow detected on Path1Buffer!");
2021
2022 if (size > diff) {
2023 //DevCon.Status("XGkick Wrap!");
2024 memcpy_qwc(pDest, VU1.Mem + addr, diff);
2025 memcpy_qwc(pDest+(diff*16), VU1.Mem, size-diff);
2026 }
2027 else {
2028 memcpy_qwc(pDest, VU1.Mem + addr, size);
2029 }
2030 //if(!gifRegs.stat.P1Q) CPU_INT(28, 128);
2031 gifRegs.stat.P1Q = true;
2032 //}
2033 gsPath1Interrupt();
2034 }
2035 //------------------------------------------------------------------

  ViewVC Help
Powered by ViewVC 1.1.22