/[pcsx2_0.9.7]/trunk/pcsx2/vtlb.cpp
ViewVC logotype

Contents of /trunk/pcsx2/vtlb.cpp

Parent Directory Parent Directory | Revision Log Revision Log


Revision 401 - (show annotations) (download)
Fri Feb 25 17:31:09 2011 UTC (9 years, 4 months ago) by william
File size: 22461 byte(s)
Auto Commited Import of: pcsx2-0.9.7-DEBUG (upstream: v0.9.7.4358 local: v0.9.7.313-latest) in ./trunk
1 /* PCSX2 - PS2 Emulator for PCs
2 * Copyright (C) 2002-2010 PCSX2 Dev Team
3 *
4 * PCSX2 is free software: you can redistribute it and/or modify it under the terms
5 * of the GNU Lesser General Public License as published by the Free Software Found-
6 * ation, either version 3 of the License, or (at your option) any later version.
7 *
8 * PCSX2 is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
9 * without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
10 * PURPOSE. See the GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License along with PCSX2.
13 * If not, see <http://www.gnu.org/licenses/>.
14 */
15
16 /*
17 EE physical map :
18 [0000 0000,1000 0000) -> Ram (mirrored ?)
19 [1000 0000,1400 0000) -> Registers
20 [1400 0000,1fc0 0000) -> Reserved (ingored writes, 'random' reads)
21 [1fc0 0000,2000 0000) -> Boot ROM
22
23 [2000 0000,4000 0000) -> Unmapped (BUS ERROR)
24 [4000 0000,8000 0000) -> "Extended memory", probably unmapped (BUS ERROR) on retail ps2's :)
25 [8000 0000,FFFF FFFF] -> Unmapped (BUS ERROR)
26
27 vtlb/phy only supports the [0000 0000,2000 0000) region, with 4k pages.
28 vtlb/vmap supports mapping to either of these locations, or some other (externaly) specified address.
29 */
30
31 #include "PrecompiledHeader.h"
32
33 #include "Common.h"
34 #include "vtlb.h"
35 #include "COP0.h"
36 #include "Cache.h"
37 #include "R5900Exceptions.h"
38
39 #include "Utilities/MemsetFast.inl"
40
41 using namespace R5900;
42 using namespace vtlb_private;
43
44 #define verify pxAssume
45
46 namespace vtlb_private
47 {
48 __aligned(64) MapData vtlbdata;
49 }
50
51 static vtlbHandler vtlbHandlerCount = 0;
52
53 static vtlbHandler DefaultPhyHandler;
54 static vtlbHandler UnmappedVirtHandler0;
55 static vtlbHandler UnmappedVirtHandler1;
56 static vtlbHandler UnmappedPhyHandler0;
57 static vtlbHandler UnmappedPhyHandler1;
58
59 __inline int CheckCache(u32 addr)
60 {
61 u32 mask;
62
63 if(((cpuRegs.CP0.n.Config >> 16) & 0x1) == 0)
64 {
65 //DevCon.Warning("Data Cache Disabled! %x", cpuRegs.CP0.n.Config);
66 return false;//
67 }
68
69 for(int i = 1; i < 48; i++)
70 {
71 if (((tlb[i].EntryLo1 & 0x38) >> 3) == 0x3 || ((tlb[i].EntryLo0 & 0x38) >> 3) == 0x3) {
72 mask = tlb[i].PageMask;
73
74 if ((addr >= tlb[i].PFN0) && (addr <= tlb[i].PFN0 + mask)) {
75 //DevCon.Warning("Yay! Cache check cache addr=%x, mask=%x, addr+mask=%x, VPN2=%x", addr, mask, (addr & mask), tlb[i].VPN2);
76 return true;
77 }
78 }
79 }
80 return false;
81 }
82 // --------------------------------------------------------------------------------------
83 // Interpreter Implementations of VTLB Memory Operations.
84 // --------------------------------------------------------------------------------------
85 // See recVTLB.cpp for the dynarec versions.
86
87 template< typename DataType >
88 DataType __fastcall vtlb_memRead(u32 addr)
89 {
90 static const uint DataSize = sizeof(DataType) * 8;
91 u32 vmv=vtlbdata.vmap[addr>>VTLB_PAGE_BITS];
92 s32 ppf=addr+vmv;
93
94 if (!(ppf<0))
95 {
96 if (!CHECK_EEREC)
97 {
98 if(CHECK_CACHE && CheckCache(addr))
99 {
100 switch( DataSize )
101 {
102 case 8:
103 return readCache8(addr);
104 break;
105 case 16:
106 return readCache16(addr);
107 break;
108 case 32:
109 return readCache32(addr);
110 break;
111
112 jNO_DEFAULT;
113 }
114 }
115 }
116
117 return *reinterpret_cast<DataType*>(ppf);
118 }
119
120 //has to: translate, find function, call function
121 u32 hand=(u8)vmv;
122 u32 paddr=ppf-hand+0x80000000;
123 //Console.WriteLn("Translated 0x%08X to 0x%08X", addr,paddr);
124 //return reinterpret_cast<TemplateHelper<DataSize,false>::HandlerType*>(vtlbdata.RWFT[TemplateHelper<DataSize,false>::sidx][0][hand])(paddr,data);
125
126 switch( DataSize )
127 {
128 case 8:
129
130 return ((vtlbMemR8FP*)vtlbdata.RWFT[0][0][hand])(paddr);
131 case 16:
132
133 return ((vtlbMemR16FP*)vtlbdata.RWFT[1][0][hand])(paddr);
134 case 32:
135
136 return ((vtlbMemR32FP*)vtlbdata.RWFT[2][0][hand])(paddr);
137
138 jNO_DEFAULT;
139 }
140
141 return 0; // technically unreachable, but suppresses warnings.
142 }
143
144 void __fastcall vtlb_memRead64(u32 mem, mem64_t *out)
145 {
146 u32 vmv=vtlbdata.vmap[mem>>VTLB_PAGE_BITS];
147 s32 ppf=mem+vmv;
148
149 if (!(ppf<0))
150 {
151 if (!CHECK_EEREC) {
152 if(CHECK_CACHE && CheckCache(mem))
153 {
154 *out = readCache64(mem);
155 return;
156 }
157 }
158
159 *out = *(mem64_t*)ppf;
160 }
161 else
162 {
163 //has to: translate, find function, call function
164 u32 hand=(u8)vmv;
165 u32 paddr=ppf-hand+0x80000000;
166 //Console.WriteLn("Translated 0x%08X to 0x%08X", addr,paddr);
167
168 ((vtlbMemR64FP*)vtlbdata.RWFT[3][0][hand])(paddr, out);
169 }
170 }
171 void __fastcall vtlb_memRead128(u32 mem, mem128_t *out)
172 {
173 u32 vmv=vtlbdata.vmap[mem>>VTLB_PAGE_BITS];
174 s32 ppf=mem+vmv;
175
176 if (!(ppf<0))
177 {
178 if (!CHECK_EEREC)
179 {
180 if(CHECK_CACHE && CheckCache(mem))
181 {
182 out->lo = readCache64(mem);
183 out->hi = readCache64(mem+8);
184 return;
185 }
186 }
187
188 CopyQWC(out,(void*)ppf);
189 }
190 else
191 {
192 //has to: translate, find function, call function
193 u32 hand=(u8)vmv;
194 u32 paddr=ppf-hand+0x80000000;
195 //Console.WriteLn("Translated 0x%08X to 0x%08X", addr,paddr);
196
197 ((vtlbMemR128FP*)vtlbdata.RWFT[4][0][hand])(paddr, out);
198 }
199 }
200
201 template< typename DataType >
202 void __fastcall vtlb_memWrite(u32 addr, DataType data)
203 {
204 static const uint DataSize = sizeof(DataType) * 8;
205
206 u32 vmv=vtlbdata.vmap[addr>>VTLB_PAGE_BITS];
207 s32 ppf=addr+vmv;
208 if (!(ppf<0))
209 {
210 if (!CHECK_EEREC)
211 {
212 if(CHECK_CACHE && CheckCache(addr))
213 {
214 switch( DataSize )
215 {
216 case 8:
217 writeCache8(addr, data);
218 return;
219 case 16:
220 writeCache16(addr, data);
221 return;
222 case 32:
223 writeCache32(addr, data);
224 return;
225 }
226 }
227 }
228
229 *reinterpret_cast<DataType*>(ppf)=data;
230 }
231 else
232 {
233 //has to: translate, find function, call function
234 u32 hand=(u8)vmv;
235 u32 paddr=ppf-hand+0x80000000;
236 //Console.WriteLn("Translated 0x%08X to 0x%08X", addr,paddr);
237
238 switch( DataSize )
239 {
240 case 8:
241 return ((vtlbMemW8FP*)vtlbdata.RWFT[0][1][hand])(paddr, (u8)data);
242 case 16:
243 return ((vtlbMemW16FP*)vtlbdata.RWFT[1][1][hand])(paddr, (u16)data);
244 case 32:
245 return ((vtlbMemW32FP*)vtlbdata.RWFT[2][1][hand])(paddr, (u32)data);
246
247 jNO_DEFAULT;
248 }
249 }
250 }
251
252 void __fastcall vtlb_memWrite64(u32 mem, const mem64_t* value)
253 {
254 u32 vmv=vtlbdata.vmap[mem>>VTLB_PAGE_BITS];
255 s32 ppf=mem+vmv;
256 if (!(ppf<0))
257 {
258 if (!CHECK_EEREC)
259 {
260 if(CHECK_CACHE && CheckCache(mem))
261 {
262 writeCache64(mem, *value);
263 return;
264 }
265 }
266
267 *(mem64_t*)ppf = *value;
268 }
269 else
270 {
271 //has to: translate, find function, call function
272 u32 hand=(u8)vmv;
273 u32 paddr=ppf-hand+0x80000000;
274 //Console.WriteLn("Translated 0x%08X to 0x%08X", addr,paddr);
275
276 ((vtlbMemW64FP*)vtlbdata.RWFT[3][1][hand])(paddr, value);
277 }
278 }
279
280 void __fastcall vtlb_memWrite128(u32 mem, const mem128_t *value)
281 {
282 u32 vmv=vtlbdata.vmap[mem>>VTLB_PAGE_BITS];
283 s32 ppf=mem+vmv;
284 if (!(ppf<0))
285 {
286 if (!CHECK_EEREC)
287 {
288 if(CHECK_CACHE && CheckCache(mem))
289 {
290 writeCache128(mem, value);
291 return;
292 }
293 }
294
295 CopyQWC((void*)ppf, value);
296 }
297 else
298 {
299 //has to: translate, find function, call function
300 u32 hand=(u8)vmv;
301 u32 paddr=ppf-hand+0x80000000;
302 //Console.WriteLn("Translated 0x%08X to 0x%08X", addr,paddr);
303
304 ((vtlbMemW128FP*)vtlbdata.RWFT[4][1][hand])(paddr, value);
305 }
306 }
307
308 template mem8_t vtlb_memRead<mem8_t>(u32 mem);
309 template mem16_t vtlb_memRead<mem16_t>(u32 mem);
310 template mem32_t vtlb_memRead<mem32_t>(u32 mem);
311 template void vtlb_memWrite<mem8_t>(u32 mem, mem8_t data);
312 template void vtlb_memWrite<mem16_t>(u32 mem, mem16_t data);
313 template void vtlb_memWrite<mem32_t>(u32 mem, mem32_t data);
314
315 // --------------------------------------------------------------------------------------
316 // TLB Miss / BusError Handlers
317 // --------------------------------------------------------------------------------------
318 // These are valid VM memory errors that should typically be handled by the VM itself via
319 // its own cpu exception system.
320 //
321 // [TODO] Add first-chance debugging hooks to these exceptions!
322 //
323 // Important recompiler note: Mid-block Exception handling isn't reliable *yet* because
324 // memory ops don't flush the PC prior to invoking the indirect handlers.
325
326 // Generates a tlbMiss Exception
327 static __ri void vtlb_Miss(u32 addr,u32 mode)
328 {
329 if( IsDevBuild )
330 Cpu->ThrowCpuException( R5900Exception::TLBMiss( addr, !!mode ) );
331 else
332 Console.Error( R5900Exception::TLBMiss( addr, !!mode ).FormatMessage() );
333 }
334
335 // BusError exception: more serious than a TLB miss. If properly emulated the PS2 kernel
336 // itself would invoke a diagnostic/assertion screen that displays the cpu state at the
337 // time of the exception.
338 static __ri void vtlb_BusError(u32 addr,u32 mode)
339 {
340 if( IsDevBuild )
341 Cpu->ThrowCpuException( R5900Exception::BusError( addr, !!mode ) );
342 else
343 Console.Error( R5900Exception::TLBMiss( addr, !!mode ).FormatMessage() );
344 }
345
346 #define _tmpl(ret) template<typename OperandType, u32 saddr> ret __fastcall
347
348 _tmpl(OperandType) vtlbUnmappedVReadSm(u32 addr) { vtlb_Miss(addr|saddr,0); return 0; }
349 _tmpl(void) vtlbUnmappedVReadLg(u32 addr,OperandType* data) { vtlb_Miss(addr|saddr,0); }
350 _tmpl(void) vtlbUnmappedVWriteSm(u32 addr,OperandType data) { vtlb_Miss(addr|saddr,1); }
351 _tmpl(void) vtlbUnmappedVWriteLg(u32 addr,const OperandType* data) { vtlb_Miss(addr|saddr,1); }
352
353 _tmpl(OperandType) vtlbUnmappedPReadSm(u32 addr) { vtlb_BusError(addr|saddr,0); return 0; }
354 _tmpl(void) vtlbUnmappedPReadLg(u32 addr,OperandType* data) { vtlb_BusError(addr|saddr,0); }
355 _tmpl(void) vtlbUnmappedPWriteSm(u32 addr,OperandType data) { vtlb_BusError(addr|saddr,1); }
356 _tmpl(void) vtlbUnmappedPWriteLg(u32 addr,const OperandType* data) { vtlb_BusError(addr|saddr,1); }
357
358 #undef _tmpl
359
360 // --------------------------------------------------------------------------------------
361 // VTLB mapping errors
362 // --------------------------------------------------------------------------------------
363 // These errors are assertion/logic errors that should never occur if PCSX2 has been initialized
364 // properly. All addressable physical memory should be configured as TLBMiss or Bus Error.
365 //
366
367 static mem8_t __fastcall vtlbDefaultPhyRead8(u32 addr)
368 {
369 pxFailDev(pxsFmt("(VTLB) Attempted read8 from unmapped physical address @ 0x%08X.", addr));
370 return 0;
371 }
372
373 static mem16_t __fastcall vtlbDefaultPhyRead16(u32 addr)
374 {
375 pxFailDev(pxsFmt("(VTLB) Attempted read16 from unmapped physical address @ 0x%08X.", addr));
376 return 0;
377 }
378
379 static mem32_t __fastcall vtlbDefaultPhyRead32(u32 addr)
380 {
381 pxFailDev(pxsFmt("(VTLB) Attempted read32 from unmapped physical address @ 0x%08X.", addr));
382 return 0;
383 }
384
385 static void __fastcall vtlbDefaultPhyRead64(u32 addr, mem64_t* dest)
386 {
387 pxFailDev(pxsFmt("(VTLB) Attempted read64 from unmapped physical address @ 0x%08X.", addr));
388 }
389
390 static void __fastcall vtlbDefaultPhyRead128(u32 addr, mem128_t* dest)
391 {
392 pxFailDev(pxsFmt("(VTLB) Attempted read128 from unmapped physical address @ 0x%08X.", addr));
393 }
394
395 static void __fastcall vtlbDefaultPhyWrite8(u32 addr, mem8_t data)
396 {
397 pxFailDev(pxsFmt("(VTLB) Attempted write8 to unmapped physical address @ 0x%08X.", addr));
398 }
399
400 static void __fastcall vtlbDefaultPhyWrite16(u32 addr, mem16_t data)
401 {
402 pxFailDev(pxsFmt("(VTLB) Attempted write16 to unmapped physical address @ 0x%08X.", addr));
403 }
404
405 static void __fastcall vtlbDefaultPhyWrite32(u32 addr, mem32_t data)
406 {
407 pxFailDev(pxsFmt("(VTLB) Attempted write32 to unmapped physical address @ 0x%08X.", addr));
408 }
409
410 static void __fastcall vtlbDefaultPhyWrite64(u32 addr,const mem64_t* data)
411 {
412 pxFailDev(pxsFmt("(VTLB) Attempted write64 to unmapped physical address @ 0x%08X.", addr));
413 }
414
415 static void __fastcall vtlbDefaultPhyWrite128(u32 addr,const mem128_t* data)
416 {
417 pxFailDev(pxsFmt("(VTLB) Attempted write128 to unmapped physical address @ 0x%08X.", addr));
418 }
419 #undef _tmpl
420
421 // ===========================================================================================
422 // VTLB Public API -- Init/Term/RegisterHandler stuff
423 // ===========================================================================================
424 //
425
426 // Assigns or re-assigns the callbacks for a VTLB memory handler. The handler defines specific behavior
427 // for how memory pages bound to the handler are read from / written to. If any of the handler pointers
428 // are NULL, the memory operations will be mapped to the BusError handler (thus generating BusError
429 // exceptions if the emulated app attempts to access them).
430 //
431 // Note: All handlers persist across calls to vtlb_Reset(), but are wiped/invalidated by calls to vtlb_Init()
432 //
433 __ri void vtlb_ReassignHandler( vtlbHandler rv,
434 vtlbMemR8FP* r8,vtlbMemR16FP* r16,vtlbMemR32FP* r32,vtlbMemR64FP* r64,vtlbMemR128FP* r128,
435 vtlbMemW8FP* w8,vtlbMemW16FP* w16,vtlbMemW32FP* w32,vtlbMemW64FP* w64,vtlbMemW128FP* w128 )
436 {
437 pxAssume(rv < VTLB_HANDLER_ITEMS);
438
439 vtlbdata.RWFT[0][0][rv] = (void*)((r8!=0) ? r8 : vtlbDefaultPhyRead8);
440 vtlbdata.RWFT[1][0][rv] = (void*)((r16!=0) ? r16 : vtlbDefaultPhyRead16);
441 vtlbdata.RWFT[2][0][rv] = (void*)((r32!=0) ? r32 : vtlbDefaultPhyRead32);
442 vtlbdata.RWFT[3][0][rv] = (void*)((r64!=0) ? r64 : vtlbDefaultPhyRead64);
443 vtlbdata.RWFT[4][0][rv] = (void*)((r128!=0) ? r128 : vtlbDefaultPhyRead128);
444
445 vtlbdata.RWFT[0][1][rv] = (void*)((w8!=0) ? w8 : vtlbDefaultPhyWrite8);
446 vtlbdata.RWFT[1][1][rv] = (void*)((w16!=0) ? w16 : vtlbDefaultPhyWrite16);
447 vtlbdata.RWFT[2][1][rv] = (void*)((w32!=0) ? w32 : vtlbDefaultPhyWrite32);
448 vtlbdata.RWFT[3][1][rv] = (void*)((w64!=0) ? w64 : vtlbDefaultPhyWrite64);
449 vtlbdata.RWFT[4][1][rv] = (void*)((w128!=0) ? w128 : vtlbDefaultPhyWrite128);
450 }
451
452 vtlbHandler vtlb_NewHandler()
453 {
454 pxAssertDev( vtlbHandlerCount < VTLB_HANDLER_ITEMS, "VTLB handler count overflow!" );
455 return vtlbHandlerCount++;
456 }
457
458 // Registers a handler into the VTLB's internal handler array. The handler defines specific behavior
459 // for how memory pages bound to the handler are read from / written to. If any of the handler pointers
460 // are NULL, the memory operations will be mapped to the BusError handler (thus generating BusError
461 // exceptions if the emulated app attempts to access them).
462 //
463 // Note: All handlers persist across calls to vtlb_Reset(), but are wiped/invalidated by calls to vtlb_Init()
464 //
465 // Returns a handle for the newly created handler See vtlb_MapHandler for use of the return value.
466 //
467 __ri vtlbHandler vtlb_RegisterHandler( vtlbMemR8FP* r8,vtlbMemR16FP* r16,vtlbMemR32FP* r32,vtlbMemR64FP* r64,vtlbMemR128FP* r128,
468 vtlbMemW8FP* w8,vtlbMemW16FP* w16,vtlbMemW32FP* w32,vtlbMemW64FP* w64,vtlbMemW128FP* w128)
469 {
470 vtlbHandler rv = vtlb_NewHandler();
471 vtlb_ReassignHandler( rv, r8, r16, r32, r64, r128, w8, w16, w32, w64, w128 );
472 return rv;
473 }
474
475
476 // Maps the given hander (created with vtlb_RegisterHandler) to the specified memory region.
477 // New mappings always assume priority over previous mappings, so place "generic" mappings for
478 // large areas of memory first, and then specialize specific small regions of memory afterward.
479 // A single handler can be mapped to many different regions by using multiple calls to this
480 // function.
481 //
482 // The memory region start and size parameters must be pagesize aligned.
483 void vtlb_MapHandler(vtlbHandler handler, u32 start, u32 size)
484 {
485 verify(0==(start&VTLB_PAGE_MASK));
486 verify(0==(size&VTLB_PAGE_MASK) && size>0);
487
488 s32 value = handler | 0x80000000;
489 u32 end = start + (size - VTLB_PAGE_SIZE);
490 pxAssume( (end>>VTLB_PAGE_BITS) < ArraySize(vtlbdata.pmap) );
491
492 while (start <= end)
493 {
494 vtlbdata.pmap[start>>VTLB_PAGE_BITS] = value;
495 start += VTLB_PAGE_SIZE;
496 }
497 }
498
499 void vtlb_MapBlock(void* base, u32 start, u32 size, u32 blocksize)
500 {
501 verify(0==(start&VTLB_PAGE_MASK));
502 verify(0==(size&VTLB_PAGE_MASK) && size>0);
503 if (!blocksize)
504 blocksize = size;
505 verify(0==(blocksize&VTLB_PAGE_MASK) && blocksize>0);
506 verify(0==(size%blocksize));
507
508 s32 baseint = (s32)base;
509 u32 end = start + (size - VTLB_PAGE_SIZE);
510 pxAssume( (end>>VTLB_PAGE_BITS) < ArraySize(vtlbdata.pmap) );
511
512 while (start <= end)
513 {
514 u32 loopsz = blocksize;
515 s32 ptr = baseint;
516
517 while (loopsz > 0)
518 {
519 vtlbdata.pmap[start>>VTLB_PAGE_BITS] = ptr;
520
521 start += VTLB_PAGE_SIZE;
522 ptr += VTLB_PAGE_SIZE;
523 loopsz -= VTLB_PAGE_SIZE;
524 }
525 }
526 }
527
528 void vtlb_Mirror(u32 new_region,u32 start,u32 size)
529 {
530 verify(0==(new_region&VTLB_PAGE_MASK));
531 verify(0==(start&VTLB_PAGE_MASK));
532 verify(0==(size&VTLB_PAGE_MASK) && size>0);
533
534 u32 end = start + (size-VTLB_PAGE_SIZE);
535 pxAssume( (end>>VTLB_PAGE_BITS) < ArraySize(vtlbdata.pmap) );
536
537 while(start <= end)
538 {
539 vtlbdata.pmap[start>>VTLB_PAGE_BITS] = vtlbdata.pmap[new_region>>VTLB_PAGE_BITS];
540
541 start += VTLB_PAGE_SIZE;
542 new_region += VTLB_PAGE_SIZE;
543 }
544 }
545
546 __fi void* vtlb_GetPhyPtr(u32 paddr)
547 {
548 if (paddr>=VTLB_PMAP_SZ || vtlbdata.pmap[paddr>>VTLB_PAGE_BITS]<0)
549 return NULL;
550 else
551 return reinterpret_cast<void*>(vtlbdata.pmap[paddr>>VTLB_PAGE_BITS]+(paddr&VTLB_PAGE_MASK));
552 }
553
554 //virtual mappings
555 //TODO: Add invalid paddr checks
556 void vtlb_VMap(u32 vaddr,u32 paddr,u32 size)
557 {
558 verify(0==(vaddr&VTLB_PAGE_MASK));
559 verify(0==(paddr&VTLB_PAGE_MASK));
560 verify(0==(size&VTLB_PAGE_MASK) && size>0);
561
562 while (size > 0)
563 {
564 s32 pme;
565 if (paddr >= VTLB_PMAP_SZ)
566 {
567 pme = UnmappedPhyHandler0;
568 if (paddr & 0x80000000)
569 pme = UnmappedPhyHandler1;
570 pme |= 0x80000000;
571 pme |= paddr;// top bit is set anyway ...
572 }
573 else
574 {
575 pme = vtlbdata.pmap[paddr>>VTLB_PAGE_BITS];
576 if (pme<0)
577 pme |= paddr;// top bit is set anyway ...
578 }
579
580 vtlbdata.vmap[vaddr>>VTLB_PAGE_BITS] = pme-vaddr;
581 vaddr += VTLB_PAGE_SIZE;
582 paddr += VTLB_PAGE_SIZE;
583 size -= VTLB_PAGE_SIZE;
584 }
585 }
586
587 void vtlb_VMapBuffer(u32 vaddr,void* buffer,u32 size)
588 {
589 verify(0==(vaddr&VTLB_PAGE_MASK));
590 verify(0==(size&VTLB_PAGE_MASK) && size>0);
591
592 u32 bu8 = (u32)buffer;
593 while (size > 0)
594 {
595 vtlbdata.vmap[vaddr>>VTLB_PAGE_BITS] = bu8-vaddr;
596 vaddr += VTLB_PAGE_SIZE;
597 bu8 += VTLB_PAGE_SIZE;
598 size -= VTLB_PAGE_SIZE;
599 }
600 }
601 void vtlb_VMapUnmap(u32 vaddr,u32 size)
602 {
603 verify(0==(vaddr&VTLB_PAGE_MASK));
604 verify(0==(size&VTLB_PAGE_MASK) && size>0);
605
606 while (size > 0)
607 {
608 u32 handl = UnmappedVirtHandler0;
609 if (vaddr & 0x80000000)
610 {
611 handl = UnmappedVirtHandler1;
612 }
613
614 handl |= vaddr; // top bit is set anyway ...
615 handl |= 0x80000000;
616
617 vtlbdata.vmap[vaddr>>VTLB_PAGE_BITS] = handl-vaddr;
618 vaddr += VTLB_PAGE_SIZE;
619 size -= VTLB_PAGE_SIZE;
620 }
621 }
622
623 // vtlb_Init -- Clears vtlb handlers and memory mappings.
624 void vtlb_Init()
625 {
626 vtlbHandlerCount=0;
627 memzero(vtlbdata.RWFT);
628
629 #define VTLB_BuildUnmappedHandler(baseName, highBit) \
630 baseName##ReadSm<mem8_t,0>, baseName##ReadSm<mem16_t,0>, baseName##ReadSm<mem32_t,0>, \
631 baseName##ReadLg<mem64_t,0>, baseName##ReadLg<mem128_t,0>, \
632 baseName##WriteSm<mem8_t,0>, baseName##WriteSm<mem16_t,0>, baseName##WriteSm<mem32_t,0>, \
633 baseName##WriteLg<mem64_t,0>, baseName##WriteLg<mem128_t,0>
634
635 //Register default handlers
636 //Unmapped Virt handlers _MUST_ be registered first.
637 //On address translation the top bit cannot be preserved.This is not normaly a problem since
638 //the physical address space can be 'compressed' to just 29 bits.However, to properly handle exceptions
639 //there must be a way to get the full address back.Thats why i use these 2 functions and encode the hi bit directly into em :)
640
641 UnmappedVirtHandler0 = vtlb_RegisterHandler( VTLB_BuildUnmappedHandler(vtlbUnmappedV, 0) );
642 UnmappedVirtHandler1 = vtlb_RegisterHandler( VTLB_BuildUnmappedHandler(vtlbUnmappedV, 0x80000000) );
643
644 UnmappedPhyHandler0 = vtlb_RegisterHandler( VTLB_BuildUnmappedHandler(vtlbUnmappedP, 0) );
645 UnmappedPhyHandler1 = vtlb_RegisterHandler( VTLB_BuildUnmappedHandler(vtlbUnmappedP, 0x80000000) );
646
647 DefaultPhyHandler = vtlb_RegisterHandler(0,0,0,0,0,0,0,0,0,0);
648
649 //done !
650
651 //Setup the initial mappings
652 vtlb_MapHandler(DefaultPhyHandler,0,VTLB_PMAP_SZ);
653
654 //Set the V space as unmapped
655 vtlb_VMapUnmap(0,(VTLB_VMAP_ITEMS-1)*VTLB_PAGE_SIZE);
656 //yeah i know, its stupid .. but this code has to be here for now ;p
657 vtlb_VMapUnmap((VTLB_VMAP_ITEMS-1)*VTLB_PAGE_SIZE,VTLB_PAGE_SIZE);
658
659 extern void vtlb_dynarec_init();
660 vtlb_dynarec_init();
661 }
662
663 // vtlb_Reset -- Performs a COP0-level reset of the PS2's TLB.
664 // This function should probably be part of the COP0 rather than here in VTLB.
665 void vtlb_Reset()
666 {
667 for(int i=0; i<48; i++) UnmapTLB(i);
668 }
669
670 void vtlb_Term()
671 {
672 //nothing to do for now
673 }
674
675 // Reserves the vtlb core allocation used by various emulation components!
676 // [TODO] basemem - request allocating memory at the specified virtual location, which can allow
677 // for easier debugging and/or 3rd party cheat programs. If 0, the operating system
678 // default is used.
679 void vtlb_Core_Alloc()
680 {
681 if (!vtlbdata.vmap)
682 {
683 vtlbdata.vmap = (s32*)_aligned_malloc( VTLB_VMAP_ITEMS * sizeof(*vtlbdata.vmap), 16 );
684 if (!vtlbdata.vmap)
685 throw Exception::OutOfMemory( L"VTLB Virtual Address Translation LUT" )
686 .SetDiagMsg(pxsFmt("(%u megs)", VTLB_VMAP_ITEMS * sizeof(*vtlbdata.vmap) / _1mb)
687 );
688 }
689 }
690
691 void vtlb_Core_Free()
692 {
693 safe_aligned_free( vtlbdata.vmap );
694 }
695
696 static wxString GetHostVmErrorMsg()
697 {
698 return pxE("!Notice:HostVmReserve",
699 L"Your system is too low on virtual resources for PCSX2 to run. This can be "
700 L"caused by having a small or disabled swapfile, or by other programs that are "
701 L"hogging resources."
702 );
703 }
704 // --------------------------------------------------------------------------------------
705 // VtlbMemoryReserve (implementations)
706 // --------------------------------------------------------------------------------------
707 VtlbMemoryReserve::VtlbMemoryReserve( const wxString& name, size_t size )
708 : m_reserve( name, size )
709 {
710 m_reserve.SetPageAccessOnCommit( PageAccess_ReadWrite() );
711 }
712
713 void VtlbMemoryReserve::SetBaseAddr( uptr newaddr )
714 {
715 m_reserve.SetBaseAddr( newaddr );
716 }
717
718 void VtlbMemoryReserve::Reserve( sptr hostptr )
719 {
720 if (!m_reserve.ReserveAt( hostptr ))
721 {
722 throw Exception::OutOfMemory( m_reserve.GetName() )
723 .SetDiagMsg(L"Vtlb memory could not be reserved.")
724 .SetUserMsg(GetHostVmErrorMsg());
725 }
726 }
727
728 void VtlbMemoryReserve::Commit()
729 {
730 if (IsCommitted()) return;
731 if (!m_reserve.Commit())
732 {
733 throw Exception::OutOfMemory( m_reserve.GetName() )
734 .SetDiagMsg(L"Vtlb memory could not be committed.")
735 .SetUserMsg(GetHostVmErrorMsg());
736 }
737 }
738
739 void VtlbMemoryReserve::Reset()
740 {
741 Commit();
742 memzero_sse_a(m_reserve.GetPtr(), m_reserve.GetCommittedBytes());
743 }
744
745 void VtlbMemoryReserve::Decommit()
746 {
747 m_reserve.Reset();
748 }
749
750 void VtlbMemoryReserve::Release()
751 {
752 m_reserve.Release();
753 }
754
755 bool VtlbMemoryReserve::IsCommitted() const
756 {
757 return !!m_reserve.GetCommittedPageCount();
758 }

  ViewVC Help
Powered by ViewVC 1.1.22