/[pcsx2_0.9.7]/trunk/pcsx2/x86/microVU_Misc.inl
ViewVC logotype

Contents of /trunk/pcsx2/x86/microVU_Misc.inl

Parent Directory Parent Directory | Revision Log Revision Log


Revision 280 - (show annotations) (download)
Thu Dec 23 12:02:12 2010 UTC (9 years, 2 months ago) by william
File size: 16157 byte(s)
re-commit (had local access denied errors when committing)
1 /* PCSX2 - PS2 Emulator for PCs
2 * Copyright (C) 2002-2010 PCSX2 Dev Team
3 *
4 * PCSX2 is free software: you can redistribute it and/or modify it under the terms
5 * of the GNU Lesser General Public License as published by the Free Software Found-
6 * ation, either version 3 of the License, or (at your option) any later version.
7 *
8 * PCSX2 is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
9 * without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
10 * PURPOSE. See the GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License along with PCSX2.
13 * If not, see <http://www.gnu.org/licenses/>.
14 */
15
16 #pragma once
17
18 //------------------------------------------------------------------
19 // Micro VU - Reg Loading/Saving/Shuffling/Unpacking/Merging...
20 //------------------------------------------------------------------
21
22 void mVUunpack_xyzw(const xmm& dstreg, const xmm& srcreg, int xyzw)
23 {
24 switch ( xyzw ) {
25 case 0: xPSHUF.D(dstreg, srcreg, 0x00); break; // XXXX
26 case 1: xPSHUF.D(dstreg, srcreg, 0x55); break; // YYYY
27 case 2: xPSHUF.D(dstreg, srcreg, 0xaa); break; // ZZZZ
28 case 3: xPSHUF.D(dstreg, srcreg, 0xff); break; // WWWW
29 }
30 }
31
32 void mVUloadReg(const xmm& reg, xAddressVoid ptr, int xyzw)
33 {
34 switch( xyzw ) {
35 case 8: xMOVSSZX(reg, ptr32[ptr]); break; // X
36 case 4: xMOVSSZX(reg, ptr32[ptr+4]); break; // Y
37 case 2: xMOVSSZX(reg, ptr32[ptr+8]); break; // Z
38 case 1: xMOVSSZX(reg, ptr32[ptr+12]); break; // W
39 default: xMOVAPS (reg, ptr128[ptr]); break;
40 }
41 }
42
43 void mVUloadIreg(const xmm& reg, int xyzw, VURegs* vuRegs)
44 {
45 xMOVSSZX(reg, ptr32[&vuRegs->VI[REG_I].UL]);
46 if (!_XYZWss(xyzw)) xSHUF.PS(reg, reg, 0);
47 }
48
49 // Modifies the Source Reg!
50 void mVUsaveReg(const xmm& reg, xAddressVoid ptr, int xyzw, bool modXYZW)
51 {
52 /*xMOVAPS(xmmT2, ptr128[ptr]);
53 if (modXYZW && (xyzw == 8 || xyzw == 4 || xyzw == 2 || xyzw == 1)) {
54 mVUunpack_xyzw(reg, reg, 0);
55 }
56 mVUmergeRegs(xmmT2, reg, xyzw);
57
58 xMOVAPS(ptr128[ptr], xmmT2);
59 return;*/
60
61 switch ( xyzw ) {
62 case 5: if (x86caps.hasStreamingSIMD4Extensions) {
63 xEXTRACTPS(ptr32[ptr+4], reg, 1);
64 xEXTRACTPS(ptr32[ptr+12], reg, 3);
65 }
66 else {
67 xPSHUF.D(reg, reg, 0xe1); //WZXY
68 xMOVSS(ptr32[ptr+4], reg);
69 xPSHUF.D(reg, reg, 0xff); //WWWW
70 xMOVSS(ptr32[ptr+12], reg);
71 }
72 break; // YW
73 case 6: xPSHUF.D(reg, reg, 0xc9);
74 xMOVL.PS(ptr64[ptr+4], reg);
75 break; // YZ
76 case 7: if (x86caps.hasStreamingSIMD4Extensions) {
77 xMOVH.PS(ptr64[ptr+8], reg);
78 xEXTRACTPS(ptr32[ptr+4], reg, 1);
79 }
80 else {
81 xPSHUF.D(reg, reg, 0x93); //ZYXW
82 xMOVH.PS(ptr64[ptr+4], reg);
83 xMOVSS(ptr32[ptr+12], reg);
84 }
85 break; // YZW
86 case 9: if (x86caps.hasStreamingSIMD4Extensions) {
87 xMOVSS(ptr32[ptr], reg);
88 xEXTRACTPS(ptr32[ptr+12], reg, 3);
89 }
90 else {
91 xMOVSS(ptr32[ptr], reg);
92 xPSHUF.D(reg, reg, 0xff); //WWWW
93 xMOVSS(ptr32[ptr+12], reg);
94 }
95 break; // XW
96 case 10: if (x86caps.hasStreamingSIMD4Extensions) {
97 xMOVSS(ptr32[ptr], reg);
98 xEXTRACTPS(ptr32[ptr+8], reg, 2);
99 }
100 else {
101 xMOVSS(ptr32[ptr], reg);
102 xMOVHL.PS(reg, reg);
103 xMOVSS(ptr32[ptr+8], reg);
104 }
105 break; //XZ
106 case 11: xMOVSS(ptr32[ptr], reg);
107 xMOVH.PS(ptr64[ptr+8], reg);
108 break; //XZW
109 case 13: if (x86caps.hasStreamingSIMD4Extensions) {
110 xMOVL.PS(ptr64[ptr], reg);
111 xEXTRACTPS(ptr32[ptr+12], reg, 3);
112 }
113 else {
114 xPSHUF.D(reg, reg, 0x4b); //YXZW
115 xMOVH.PS(ptr64[ptr], reg);
116 xMOVSS(ptr32[ptr+12], reg);
117 }
118 break; // XYW
119 case 14: if (x86caps.hasStreamingSIMD4Extensions) {
120 xMOVL.PS(ptr64[ptr], reg);
121 xEXTRACTPS(ptr32[ptr+8], reg, 2);
122 }
123 else {
124 xMOVL.PS(ptr64[ptr], reg);
125 xMOVHL.PS(reg, reg);
126 xMOVSS(ptr32[ptr+8], reg);
127 }
128 break; // XYZ
129 case 4: if (!modXYZW) mVUunpack_xyzw(reg, reg, 1);
130 xMOVSS(ptr32[ptr+4], reg);
131 break; // Y
132 case 2: if (!modXYZW) mVUunpack_xyzw(reg, reg, 2);
133 xMOVSS(ptr32[ptr+8], reg);
134 break; // Z
135 case 1: if (!modXYZW) mVUunpack_xyzw(reg, reg, 3);
136 xMOVSS(ptr32[ptr+12], reg);
137 break; // W
138 case 8: xMOVSS(ptr32[ptr], reg); break; // X
139 case 12: xMOVL.PS(ptr64[ptr], reg); break; // XY
140 case 3: xMOVH.PS(ptr64[ptr+8], reg); break; // ZW
141 default: xMOVAPS(ptr128[ptr], reg); break; // XYZW
142 }
143 }
144
145 // Modifies the Source Reg! (ToDo: Optimize modXYZW = 1 cases)
146 void mVUmergeRegs(const xmm& dest, const xmm& src, int xyzw, bool modXYZW)
147 {
148 xyzw &= 0xf;
149 if ( (dest != src) && (xyzw != 0) ) {
150 if (x86caps.hasStreamingSIMD4Extensions && (xyzw != 0x8) && (xyzw != 0xf)) {
151 if (modXYZW) {
152 if (xyzw == 1) { xINSERTPS(dest, src, _MM_MK_INSERTPS_NDX(0, 3, 0)); return; }
153 else if (xyzw == 2) { xINSERTPS(dest, src, _MM_MK_INSERTPS_NDX(0, 2, 0)); return; }
154 else if (xyzw == 4) { xINSERTPS(dest, src, _MM_MK_INSERTPS_NDX(0, 1, 0)); return; }
155 }
156 xyzw = ((xyzw & 1) << 3) | ((xyzw & 2) << 1) | ((xyzw & 4) >> 1) | ((xyzw & 8) >> 3);
157 xBLEND.PS(dest, src, xyzw);
158 }
159 else {
160 switch (xyzw) {
161 case 1: if (modXYZW) mVUunpack_xyzw(src, src, 0);
162 xMOVHL.PS(src, dest); // src = Sw Sz Dw Dz
163 xSHUF.PS(dest, src, 0xc4); // 11 00 01 00
164 break;
165 case 2: if (modXYZW) mVUunpack_xyzw(src, src, 0);
166 xMOVHL.PS(src, dest);
167 xSHUF.PS(dest, src, 0x64);
168 break;
169 case 3: xSHUF.PS(dest, src, 0xe4);
170 break;
171 case 4: if (modXYZW) mVUunpack_xyzw(src, src, 0);
172 xMOVSS(src, dest);
173 xMOVSD(dest, src);
174 break;
175 case 5: xSHUF.PS(dest, src, 0xd8);
176 xPSHUF.D(dest, dest, 0xd8);
177 break;
178 case 6: xSHUF.PS(dest, src, 0x9c);
179 xPSHUF.D(dest, dest, 0x78);
180 break;
181 case 7: xMOVSS(src, dest);
182 xMOVAPS(dest, src);
183 break;
184 case 8: xMOVSS(dest, src);
185 break;
186 case 9: xSHUF.PS(dest, src, 0xc9);
187 xPSHUF.D(dest, dest, 0xd2);
188 break;
189 case 10: xSHUF.PS(dest, src, 0x8d);
190 xPSHUF.D(dest, dest, 0x72);
191 break;
192 case 11: xMOVSS(dest, src);
193 xSHUF.PS(dest, src, 0xe4);
194 break;
195 case 12: xMOVSD(dest, src);
196 break;
197 case 13: xMOVHL.PS(dest, src);
198 xSHUF.PS(src, dest, 0x64);
199 xMOVAPS(dest, src);
200 break;
201 case 14: xMOVHL.PS(dest, src);
202 xSHUF.PS(src, dest, 0xc4);
203 xMOVAPS(dest, src);
204 break;
205 default: xMOVAPS(dest, src);
206 break;
207 }
208 }
209 }
210 }
211
212 //------------------------------------------------------------------
213 // Micro VU - Misc Functions
214 //------------------------------------------------------------------
215
216 // Backup Volatile Regs (EAX, ECX, EDX, MM0~7, XMM0~7, are all volatile according to 32bit Win/Linux ABI)
217 __fi void mVUbackupRegs(microVU* mVU, bool toMemory = false)
218 {
219 if (toMemory) {
220 for(int i = 0; i < 8; i++) {
221 xMOVAPS(ptr128[&mVU->xmmBackup[i][0]], xmm(i));
222 }
223 }
224 else {
225 mVU->regAlloc->flushAll(); // Flush Regalloc
226 xMOVAPS(ptr128[&mVU->xmmBackup[xmmPQ.Id][0]], xmmPQ);
227 }
228 }
229
230 // Restore Volatile Regs
231 __fi void mVUrestoreRegs(microVU* mVU, bool fromMemory = false)
232 {
233 if (fromMemory) {
234 for(int i = 0; i < 8; i++) {
235 xMOVAPS(xmm(i), ptr128[&mVU->xmmBackup[i][0]]);
236 }
237 }
238 else xMOVAPS(xmmPQ, ptr128[&mVU->xmmBackup[xmmPQ.Id][0]]);
239 }
240
241 // Gets called by mVUaddrFix at execution-time
242 static void __fastcall mVUwarningRegAccess(u32 prog, u32 pc) { Console.Error("microVU0 Warning: Accessing VU1 Regs! [%04x] [%x]", pc, prog); }
243
244 // Transforms the Address in gprReg to valid VU0/VU1 Address
245 __fi void mVUaddrFix(mV, const x32& gprReg)
246 {
247 if (isVU1) {
248 xAND(gprReg, 0x3ff); // wrap around
249 xSHL(gprReg, 4);
250 }
251 else {
252 if (IsDevBuild && !isCOP2) mVUbackupRegs(mVU, true);
253 xTEST(gprReg, 0x400);
254 xForwardJNZ8 jmpA; // if addr & 0x4000, reads VU1's VF regs and VI regs
255 xAND(gprReg, 0xff); // if !(addr & 0x4000), wrap around
256 xForwardJump8 jmpB;
257 jmpA.SetTarget();
258 if (IsDevBuild && !isCOP2) { // Lets see which games do this!
259 xPUSH(gprT1); // Note: Kernel does it via COP2 to initialize VU1!
260 xPUSH(gprT2); // So we don't spam console, we'll only check micro-mode...
261 xPUSH(gprT3);
262 xMOV (gprT2, mVU->prog.cur->idx);
263 xMOV (gprT3, xPC);
264 xCALL(mVUwarningRegAccess);
265 xPOP (gprT3);
266 xPOP (gprT2);
267 xPOP (gprT1);
268 }
269 xAND(gprReg, 0x3f); // ToDo: theres a potential problem if VU0 overrides VU1's VF0/VI0 regs!
270 xADD(gprReg, (u128*)VU1.VF - (u128*)VU0.Mem);
271 jmpB.SetTarget();
272 xSHL(gprReg, 4); // multiply by 16 (shift left by 4)
273 if (IsDevBuild && !isCOP2) mVUrestoreRegs(mVU, true);
274 }
275 }
276
277 //------------------------------------------------------------------
278 // Micro VU - Custom SSE Instructions
279 //------------------------------------------------------------------
280
281 struct SSEMaskPair { u32 mask1[4], mask2[4]; };
282
283 static const __aligned16 SSEMaskPair MIN_MAX =
284 {
285 {0xffffffff, 0x80000000, 0xffffffff, 0x80000000},
286 {0x00000000, 0x40000000, 0x00000000, 0x40000000}
287 };
288
289
290 // Warning: Modifies t1 and t2
291 void MIN_MAX_PS(microVU* mVU, const xmm& to, const xmm& from, const xmm& t1in, const xmm& t2in, bool min)
292 {
293 const xmm& t1 = t1in.IsEmpty() ? mVU->regAlloc->allocReg() : t1in;
294 const xmm& t2 = t2in.IsEmpty() ? mVU->regAlloc->allocReg() : t2in;
295 // ZW
296 xPSHUF.D(t1, to, 0xfa);
297 xPAND (t1, ptr128[MIN_MAX.mask1]);
298 xPOR (t1, ptr128[MIN_MAX.mask2]);
299 xPSHUF.D(t2, from, 0xfa);
300 xPAND (t2, ptr128[MIN_MAX.mask1]);
301 xPOR (t2, ptr128[MIN_MAX.mask2]);
302 if (min) xMIN.PD(t1, t2);
303 else xMAX.PD(t1, t2);
304
305 // XY
306 xPSHUF.D(t2, from, 0x50);
307 xPAND (t2, ptr128[MIN_MAX.mask1]);
308 xPOR (t2, ptr128[MIN_MAX.mask2]);
309 xPSHUF.D(to, to, 0x50);
310 xPAND (to, ptr128[MIN_MAX.mask1]);
311 xPOR (to, ptr128[MIN_MAX.mask2]);
312 if (min) xMIN.PD(to, t2);
313 else xMAX.PD(to, t2);
314
315 xSHUF.PS(to, t1, 0x88);
316 if (t1 != t1in) mVU->regAlloc->clearNeeded(t1);
317 if (t2 != t2in) mVU->regAlloc->clearNeeded(t2);
318 }
319
320 // Warning: Modifies to's upper 3 vectors, and t1
321 void MIN_MAX_SS(mV, const xmm& to, const xmm& from, const xmm& t1in, bool min)
322 {
323 const xmm& t1 = t1in.IsEmpty() ? mVU->regAlloc->allocReg() : t1in;
324 xSHUF.PS(to, from, 0);
325 xPAND (to, ptr128[MIN_MAX.mask1]);
326 xPOR (to, ptr128[MIN_MAX.mask2]);
327 xPSHUF.D(t1, to, 0xee);
328 if (min) xMIN.PD(to, t1);
329 else xMAX.PD(to, t1);
330 if (t1 != t1in) mVU->regAlloc->clearNeeded(t1);
331 }
332
333 // Warning: Modifies all vectors in 'to' and 'from', and Modifies xmmT1 and xmmT2
334 void ADD_SS(microVU* mVU, const xmm& to, const xmm& from, const xmm& t1in, const xmm& t2in)
335 {
336 const xmm& t1 = t1in.IsEmpty() ? mVU->regAlloc->allocReg() : t1in;
337 const xmm& t2 = t2in.IsEmpty() ? mVU->regAlloc->allocReg() : t2in;
338
339 xMOVAPS(t1, to);
340 xMOVAPS(t2, from);
341 xMOVD(ecx, to);
342 xSHR(ecx, 23);
343 xMOVD(eax, from);
344 xSHR(eax, 23);
345 xAND(ecx, 0xff);
346 xAND(eax, 0xff);
347 xSUB(ecx, eax); //ecx = exponent difference
348
349 xCMP(ecx, 25);
350 xForwardJGE8 case2;
351 xCMP(ecx, 0);
352 xForwardJG8 case3;
353 xForwardJE8 toend1;
354 xCMP(ecx, -25);
355 xForwardJLE8 case4;
356
357 // negative small
358 xNOT(ecx); // -ecx - 1
359 xMOV(eax, 0xffffffff);
360 xSHL(eax, cl);
361 xPCMP.EQB(to, to);
362 xMOVDZX(from, eax);
363 xMOVSS(to, from);
364 xPCMP.EQB(from, from);
365 xForwardJump8 toend2;
366
367 case2.SetTarget(); // positive large
368 xMOV(eax, 0x80000000);
369 xPCMP.EQB(from, from);
370 xMOVDZX(to, eax);
371 xMOVSS(from, to);
372 xPCMP.EQB(to, to);
373 xForwardJump8 toend3;
374
375 case3.SetTarget(); // positive small
376 xDEC(ecx);
377 xMOV(eax, 0xffffffff);
378 xSHL(eax, cl);
379 xPCMP.EQB(from, from);
380 xMOVDZX(to, eax);
381 xMOVSS(from, to);
382 xPCMP.EQB(to, to);
383 xForwardJump8 toend4;
384
385 case4.SetTarget(); // negative large
386 xMOV(eax, 0x80000000);
387 xPCMP.EQB(to, to);
388 xMOVDZX(from, eax);
389 xMOVSS(to, from);
390 xPCMP.EQB(from, from);
391
392 toend1.SetTarget();
393 toend2.SetTarget();
394 toend3.SetTarget();
395 toend4.SetTarget();
396
397 xAND.PS(to, t1); // to contains mask
398 xAND.PS(from, t2); // from contains mask
399 xADD.SS(to, from);
400 if (t1 != t1in) mVU->regAlloc->clearNeeded(t1);
401 if (t2 != t2in) mVU->regAlloc->clearNeeded(t2);
402 }
403
404 #define clampOp(opX, isPS) { \
405 mVUclamp3(mVU, to, t1, (isPS)?0xf:0x8); \
406 mVUclamp3(mVU, from, t1, (isPS)?0xf:0x8); \
407 opX(to, from); \
408 mVUclamp4(to, t1, (isPS)?0xf:0x8); \
409 }
410
411 void SSE_MAXPS(mV, const xmm& to, const xmm& from, const xmm& t1 = xEmptyReg, const xmm& t2 = xEmptyReg)
412 {
413 if (CHECK_VU_MINMAXHACK) { xMAX.PS(to, from); }
414 else { MIN_MAX_PS(mVU, to, from, t1, t2, 0); }
415 }
416 void SSE_MINPS(mV, const xmm& to, const xmm& from, const xmm& t1 = xEmptyReg, const xmm& t2 = xEmptyReg)
417 {
418 if (CHECK_VU_MINMAXHACK) { xMIN.PS(to, from); }
419 else { MIN_MAX_PS(mVU, to, from, t1, t2, 1); }
420 }
421 void SSE_MAXSS(mV, const xmm& to, const xmm& from, const xmm& t1 = xEmptyReg, const xmm& t2 = xEmptyReg)
422 {
423 if (CHECK_VU_MINMAXHACK) { xMAX.SS(to, from); }
424 else { MIN_MAX_SS(mVU, to, from, t1, 0); }
425 }
426 void SSE_MINSS(mV, const xmm& to, const xmm& from, const xmm& t1 = xEmptyReg, const xmm& t2 = xEmptyReg)
427 {
428 if (CHECK_VU_MINMAXHACK) { xMIN.SS(to, from); }
429 else { MIN_MAX_SS(mVU, to, from, t1, 1); }
430 }
431 void SSE_ADD2SS(mV, const xmm& to, const xmm& from, const xmm& t1 = xEmptyReg, const xmm& t2 = xEmptyReg)
432 {
433 if (!CHECK_VUADDSUBHACK) { clampOp(xADD.SS, 0); }
434 else { ADD_SS(mVU, to, from, t1, t2); }
435 }
436
437 // FIXME: why do we need two identical definitions with different names?
438 void SSE_ADD2PS(mV, const xmm& to, const xmm& from, const xmm& t1 = xEmptyReg, const xmm& t2 = xEmptyReg)
439 {
440 clampOp(xADD.PS, 1);
441 }
442 void SSE_ADDPS(mV, const xmm& to, const xmm& from, const xmm& t1 = xEmptyReg, const xmm& t2 = xEmptyReg)
443 {
444 clampOp(xADD.PS, 1);
445 }
446 void SSE_ADDSS(mV, const xmm& to, const xmm& from, const xmm& t1 = xEmptyReg, const xmm& t2 = xEmptyReg)
447 {
448 clampOp(xADD.SS, 0);
449 }
450 void SSE_SUBPS(mV, const xmm& to, const xmm& from, const xmm& t1 = xEmptyReg, const xmm& t2 = xEmptyReg)
451 {
452 clampOp(xSUB.PS, 1);
453 }
454 void SSE_SUBSS(mV, const xmm& to, const xmm& from, const xmm& t1 = xEmptyReg, const xmm& t2 = xEmptyReg)
455 {
456 clampOp(xSUB.SS, 0);
457 }
458 void SSE_MULPS(mV, const xmm& to, const xmm& from, const xmm& t1 = xEmptyReg, const xmm& t2 = xEmptyReg)
459 {
460 clampOp(xMUL.PS, 1);
461 }
462 void SSE_MULSS(mV, const xmm& to, const xmm& from, const xmm& t1 = xEmptyReg, const xmm& t2 = xEmptyReg)
463 {
464 clampOp(xMUL.SS, 0);
465 }
466 void SSE_DIVPS(mV, const xmm& to, const xmm& from, const xmm& t1 = xEmptyReg, const xmm& t2 = xEmptyReg)
467 {
468 clampOp(xDIV.PS, 1);
469 }
470 void SSE_DIVSS(mV, const xmm& to, const xmm& from, const xmm& t1 = xEmptyReg, const xmm& t2 = xEmptyReg)
471 {
472 clampOp(xDIV.SS, 0);
473 }
474
475 //------------------------------------------------------------------
476 // Micro VU - Custom Quick Search
477 //------------------------------------------------------------------
478
479 static __pagealigned u8 mVUsearchXMM[__pagesize];
480
481 // Generates a custom optimized block-search function
482 // Note: Structs must be 16-byte aligned! (GCC doesn't guarantee this)
483 void mVUcustomSearch() {
484 HostSys::MemProtectStatic(mVUsearchXMM, PageAccess_ReadWrite());
485 memset_8<0xcc,__pagesize>(mVUsearchXMM);
486 xSetPtr(mVUsearchXMM);
487
488 xMOVAPS (xmm0, ptr32[ecx]);
489 xPCMP.EQD(xmm0, ptr32[edx]);
490 xMOVAPS (xmm1, ptr32[ecx + 0x10]);
491 xPCMP.EQD(xmm1, ptr32[edx + 0x10]);
492 xPAND (xmm0, xmm1);
493
494 xMOVMSKPS(eax, xmm0);
495 xCMP (eax, 0xf);
496 xForwardJL8 exitPoint;
497
498 xMOVAPS (xmm0, ptr32[ecx + 0x20]);
499 xPCMP.EQD(xmm0, ptr32[edx + 0x20]);
500 xMOVAPS (xmm1, ptr32[ecx + 0x30]);
501 xPCMP.EQD(xmm1, ptr32[edx + 0x30]);
502 xPAND (xmm0, xmm1);
503
504 xMOVAPS (xmm2, ptr32[ecx + 0x40]);
505 xPCMP.EQD(xmm2, ptr32[edx + 0x40]);
506 xMOVAPS (xmm3, ptr32[ecx + 0x50]);
507 xPCMP.EQD(xmm3, ptr32[edx + 0x50]);
508 xPAND (xmm2, xmm3);
509
510 xMOVAPS (xmm4, ptr32[ecx + 0x60]);
511 xPCMP.EQD(xmm4, ptr32[edx + 0x60]);
512 xMOVAPS (xmm5, ptr32[ecx + 0x70]);
513 xPCMP.EQD(xmm5, ptr32[edx + 0x70]);
514 xPAND (xmm4, xmm5);
515
516 xMOVAPS (xmm6, ptr32[ecx + 0x80]);
517 xPCMP.EQD(xmm6, ptr32[edx + 0x80]);
518 xMOVAPS (xmm7, ptr32[ecx + 0x90]);
519 xPCMP.EQD(xmm7, ptr32[edx + 0x90]);
520 xPAND (xmm6, xmm7);
521
522 xPAND (xmm0, xmm2);
523 xPAND (xmm4, xmm6);
524 xPAND (xmm0, xmm4);
525 xMOVMSKPS(eax, xmm0);
526
527 exitPoint.SetTarget();
528 xRET();
529 HostSys::MemProtectStatic(mVUsearchXMM, PageAccess_ExecOnly());
530 }

  ViewVC Help
Powered by ViewVC 1.1.22