/[pcsx2_0.9.7]/trunk/pcsx2/vtlb.cpp
ViewVC logotype

Contents of /trunk/pcsx2/vtlb.cpp

Parent Directory Parent Directory | Revision Log Revision Log


Revision 280 - (show annotations) (download)
Thu Dec 23 12:02:12 2010 UTC (9 years, 1 month ago) by william
File size: 20692 byte(s)
re-commit (had local access denied errors when committing)
1 /* PCSX2 - PS2 Emulator for PCs
2 * Copyright (C) 2002-2010 PCSX2 Dev Team
3 *
4 * PCSX2 is free software: you can redistribute it and/or modify it under the terms
5 * of the GNU Lesser General Public License as published by the Free Software Found-
6 * ation, either version 3 of the License, or (at your option) any later version.
7 *
8 * PCSX2 is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
9 * without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
10 * PURPOSE. See the GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License along with PCSX2.
13 * If not, see <http://www.gnu.org/licenses/>.
14 */
15
16 /*
17 EE physical map :
18 [0000 0000,1000 0000) -> Ram (mirrored ?)
19 [1000 0000,1400 0000) -> Registers
20 [1400 0000,1fc0 0000) -> Reserved (ingored writes, 'random' reads)
21 [1fc0 0000,2000 0000) -> Boot ROM
22
23 [2000 0000,4000 0000) -> Unmapped (BUS ERROR)
24 [4000 0000,8000 0000) -> "Extended memory", probably unmapped (BUS ERROR) on retail ps2's :)
25 [8000 0000,FFFF FFFF] -> Unmapped (BUS ERROR)
26
27 vtlb/phy only supports the [0000 0000,2000 0000) region, with 4k pages.
28 vtlb/vmap supports mapping to either of these locations, or some other (externaly) specified address.
29 */
30
31 #include "PrecompiledHeader.h"
32
33 #include "Common.h"
34 #include "vtlb.h"
35 #include "COP0.h"
36 #include "R5900Exceptions.h"
37
38 #include "Utilities/MemsetFast.inl"
39
40 using namespace R5900;
41 using namespace vtlb_private;
42
43 #define verify pxAssume
44
45 namespace vtlb_private
46 {
47 __aligned(64) MapData vtlbdata;
48 }
49
50 static vtlbHandler vtlbHandlerCount = 0;
51
52 static vtlbHandler DefaultPhyHandler;
53 static vtlbHandler UnmappedVirtHandler0;
54 static vtlbHandler UnmappedVirtHandler1;
55 static vtlbHandler UnmappedPhyHandler0;
56 static vtlbHandler UnmappedPhyHandler1;
57
58
59 // --------------------------------------------------------------------------------------
60 // Interpreter Implementations of VTLB Memory Operations.
61 // --------------------------------------------------------------------------------------
62 // See recVTLB.cpp for the dynarec versions.
63
64 template< typename DataType >
65 DataType __fastcall vtlb_memRead(u32 addr)
66 {
67 static const uint DataSize = sizeof(DataType) * 8;
68 u32 vmv=vtlbdata.vmap[addr>>VTLB_PAGE_BITS];
69 s32 ppf=addr+vmv;
70
71 if (!(ppf<0))
72 return *reinterpret_cast<DataType*>(ppf);
73
74 //has to: translate, find function, call function
75 u32 hand=(u8)vmv;
76 u32 paddr=ppf-hand+0x80000000;
77 //Console.WriteLn("Translated 0x%08X to 0x%08X", addr,paddr);
78 //return reinterpret_cast<TemplateHelper<DataSize,false>::HandlerType*>(vtlbdata.RWFT[TemplateHelper<DataSize,false>::sidx][0][hand])(paddr,data);
79
80 switch( DataSize )
81 {
82 case 8: return ((vtlbMemR8FP*)vtlbdata.RWFT[0][0][hand])(paddr);
83 case 16: return ((vtlbMemR16FP*)vtlbdata.RWFT[1][0][hand])(paddr);
84 case 32: return ((vtlbMemR32FP*)vtlbdata.RWFT[2][0][hand])(paddr);
85
86 jNO_DEFAULT;
87 }
88
89 return 0; // technically unreachable, but suppresses warnings.
90 }
91
92 void __fastcall vtlb_memRead64(u32 mem, mem64_t *out)
93 {
94 u32 vmv=vtlbdata.vmap[mem>>VTLB_PAGE_BITS];
95 s32 ppf=mem+vmv;
96
97 if (!(ppf<0))
98 {
99 *out = *(mem64_t*)ppf;
100 }
101 else
102 {
103 //has to: translate, find function, call function
104 u32 hand=(u8)vmv;
105 u32 paddr=ppf-hand+0x80000000;
106 //Console.WriteLn("Translated 0x%08X to 0x%08X", addr,paddr);
107
108 ((vtlbMemR64FP*)vtlbdata.RWFT[3][0][hand])(paddr, out);
109 }
110 }
111 void __fastcall vtlb_memRead128(u32 mem, mem128_t *out)
112 {
113 u32 vmv=vtlbdata.vmap[mem>>VTLB_PAGE_BITS];
114 s32 ppf=mem+vmv;
115
116 if (!(ppf<0))
117 {
118 CopyQWC(out,(void*)ppf);
119 }
120 else
121 {
122 //has to: translate, find function, call function
123 u32 hand=(u8)vmv;
124 u32 paddr=ppf-hand+0x80000000;
125 //Console.WriteLn("Translated 0x%08X to 0x%08X", addr,paddr);
126
127 ((vtlbMemR128FP*)vtlbdata.RWFT[4][0][hand])(paddr, out);
128 }
129 }
130
131 template< typename DataType >
132 void __fastcall vtlb_memWrite(u32 addr, DataType data)
133 {
134 static const uint DataSize = sizeof(DataType) * 8;
135
136 u32 vmv=vtlbdata.vmap[addr>>VTLB_PAGE_BITS];
137 s32 ppf=addr+vmv;
138 if (!(ppf<0))
139 {
140 *reinterpret_cast<DataType*>(ppf)=data;
141 }
142 else
143 {
144 //has to: translate, find function, call function
145 u32 hand=(u8)vmv;
146 u32 paddr=ppf-hand+0x80000000;
147 //Console.WriteLn("Translated 0x%08X to 0x%08X", addr,paddr);
148
149 switch( DataSize )
150 {
151 case 8: return ((vtlbMemW8FP*)vtlbdata.RWFT[0][1][hand])(paddr, (u8)data);
152 case 16: return ((vtlbMemW16FP*)vtlbdata.RWFT[1][1][hand])(paddr, (u16)data);
153 case 32: return ((vtlbMemW32FP*)vtlbdata.RWFT[2][1][hand])(paddr, (u32)data);
154
155 jNO_DEFAULT;
156 }
157 }
158 }
159
160 void __fastcall vtlb_memWrite64(u32 mem, const mem64_t* value)
161 {
162 u32 vmv=vtlbdata.vmap[mem>>VTLB_PAGE_BITS];
163 s32 ppf=mem+vmv;
164 if (!(ppf<0))
165 {
166 *(mem64_t*)ppf = *value;
167 }
168 else
169 {
170 //has to: translate, find function, call function
171 u32 hand=(u8)vmv;
172 u32 paddr=ppf-hand+0x80000000;
173 //Console.WriteLn("Translated 0x%08X to 0x%08X", addr,paddr);
174
175 ((vtlbMemW64FP*)vtlbdata.RWFT[3][1][hand])(paddr, value);
176 }
177 }
178
179 void __fastcall vtlb_memWrite128(u32 mem, const mem128_t *value)
180 {
181 u32 vmv=vtlbdata.vmap[mem>>VTLB_PAGE_BITS];
182 s32 ppf=mem+vmv;
183 if (!(ppf<0))
184 {
185 CopyQWC((void*)ppf, value);
186 }
187 else
188 {
189 //has to: translate, find function, call function
190 u32 hand=(u8)vmv;
191 u32 paddr=ppf-hand+0x80000000;
192 //Console.WriteLn("Translated 0x%08X to 0x%08X", addr,paddr);
193
194 ((vtlbMemW128FP*)vtlbdata.RWFT[4][1][hand])(paddr, value);
195 }
196 }
197
198 template mem8_t vtlb_memRead<mem8_t>(u32 mem);
199 template mem16_t vtlb_memRead<mem16_t>(u32 mem);
200 template mem32_t vtlb_memRead<mem32_t>(u32 mem);
201 template void vtlb_memWrite<mem8_t>(u32 mem, mem8_t data);
202 template void vtlb_memWrite<mem16_t>(u32 mem, mem16_t data);
203 template void vtlb_memWrite<mem32_t>(u32 mem, mem32_t data);
204
205 // --------------------------------------------------------------------------------------
206 // TLB Miss / BusError Handlers
207 // --------------------------------------------------------------------------------------
208 // These are valid VM memory errors that should typically be handled by the VM itself via
209 // its own cpu exception system.
210 //
211 // [TODO] Add first-chance debugging hooks to these exceptions!
212 //
213 // Important recompiler note: Mid-block Exception handling isn't reliable *yet* because
214 // memory ops don't flush the PC prior to invoking the indirect handlers.
215
216 // Generates a tlbMiss Exception
217 static __ri void vtlb_Miss(u32 addr,u32 mode)
218 {
219 if( IsDevBuild )
220 Cpu->ThrowCpuException( R5900Exception::TLBMiss( addr, !!mode ) );
221 else
222 Console.Error( R5900Exception::TLBMiss( addr, !!mode ).FormatMessage() );
223 }
224
225 // BusError exception: more serious than a TLB miss. If properly emulated the PS2 kernel
226 // itself would invoke a diagnostic/assertion screen that displays the cpu state at the
227 // time of the exception.
228 static __ri void vtlb_BusError(u32 addr,u32 mode)
229 {
230 if( IsDevBuild )
231 Cpu->ThrowCpuException( R5900Exception::BusError( addr, !!mode ) );
232 else
233 Console.Error( R5900Exception::TLBMiss( addr, !!mode ).FormatMessage() );
234 }
235
236 #define _tmpl(ret) template<typename OperandType, u32 saddr> ret __fastcall
237
238 _tmpl(OperandType) vtlbUnmappedVReadSm(u32 addr) { vtlb_Miss(addr|saddr,0); return 0; }
239 _tmpl(void) vtlbUnmappedVReadLg(u32 addr,OperandType* data) { vtlb_Miss(addr|saddr,0); }
240 _tmpl(void) vtlbUnmappedVWriteSm(u32 addr,OperandType data) { vtlb_Miss(addr|saddr,1); }
241 _tmpl(void) vtlbUnmappedVWriteLg(u32 addr,const OperandType* data) { vtlb_Miss(addr|saddr,1); }
242
243 _tmpl(OperandType) vtlbUnmappedPReadSm(u32 addr) { vtlb_BusError(addr|saddr,0); return 0; }
244 _tmpl(void) vtlbUnmappedPReadLg(u32 addr,OperandType* data) { vtlb_BusError(addr|saddr,0); }
245 _tmpl(void) vtlbUnmappedPWriteSm(u32 addr,OperandType data) { vtlb_BusError(addr|saddr,1); }
246 _tmpl(void) vtlbUnmappedPWriteLg(u32 addr,const OperandType* data) { vtlb_BusError(addr|saddr,1); }
247
248 #undef _tmpl
249
250 // --------------------------------------------------------------------------------------
251 // VTLB mapping errors
252 // --------------------------------------------------------------------------------------
253 // These errors are assertion/logic errors that should never occur if PCSX2 has been initialized
254 // properly. All addressable physical memory should be configured as TLBMiss or Bus Error.
255 //
256
257 static mem8_t __fastcall vtlbDefaultPhyRead8(u32 addr)
258 {
259 pxFailDev(pxsFmt("(VTLB) Attempted read8 from unmapped physical address @ 0x%08X.", addr));
260 return 0;
261 }
262
263 static mem16_t __fastcall vtlbDefaultPhyRead16(u32 addr)
264 {
265 pxFailDev(pxsFmt("(VTLB) Attempted read16 from unmapped physical address @ 0x%08X.", addr));
266 return 0;
267 }
268
269 static mem32_t __fastcall vtlbDefaultPhyRead32(u32 addr)
270 {
271 pxFailDev(pxsFmt("(VTLB) Attempted read32 from unmapped physical address @ 0x%08X.", addr));
272 return 0;
273 }
274
275 static void __fastcall vtlbDefaultPhyRead64(u32 addr, mem64_t* dest)
276 {
277 pxFailDev(pxsFmt("(VTLB) Attempted read64 from unmapped physical address @ 0x%08X.", addr));
278 }
279
280 static void __fastcall vtlbDefaultPhyRead128(u32 addr, mem128_t* dest)
281 {
282 pxFailDev(pxsFmt("(VTLB) Attempted read128 from unmapped physical address @ 0x%08X.", addr));
283 }
284
285 static void __fastcall vtlbDefaultPhyWrite8(u32 addr, mem8_t data)
286 {
287 pxFailDev(pxsFmt("(VTLB) Attempted write8 to unmapped physical address @ 0x%08X.", addr));
288 }
289
290 static void __fastcall vtlbDefaultPhyWrite16(u32 addr, mem16_t data)
291 {
292 pxFailDev(pxsFmt("(VTLB) Attempted write16 to unmapped physical address @ 0x%08X.", addr));
293 }
294
295 static void __fastcall vtlbDefaultPhyWrite32(u32 addr, mem32_t data)
296 {
297 pxFailDev(pxsFmt("(VTLB) Attempted write32 to unmapped physical address @ 0x%08X.", addr));
298 }
299
300 static void __fastcall vtlbDefaultPhyWrite64(u32 addr,const mem64_t* data)
301 {
302 pxFailDev(pxsFmt("(VTLB) Attempted write64 to unmapped physical address @ 0x%08X.", addr));
303 }
304
305 static void __fastcall vtlbDefaultPhyWrite128(u32 addr,const mem128_t* data)
306 {
307 pxFailDev(pxsFmt("(VTLB) Attempted write128 to unmapped physical address @ 0x%08X.", addr));
308 }
309 #undef _tmpl
310
311 // ===========================================================================================
312 // VTLB Public API -- Init/Term/RegisterHandler stuff
313 // ===========================================================================================
314 //
315
316 // Assigns or re-assigns the callbacks for a VTLB memory handler. The handler defines specific behavior
317 // for how memory pages bound to the handler are read from / written to. If any of the handler pointers
318 // are NULL, the memory operations will be mapped to the BusError handler (thus generating BusError
319 // exceptions if the emulated app attempts to access them).
320 //
321 // Note: All handlers persist across calls to vtlb_Reset(), but are wiped/invalidated by calls to vtlb_Init()
322 //
323 __ri void vtlb_ReassignHandler( vtlbHandler rv,
324 vtlbMemR8FP* r8,vtlbMemR16FP* r16,vtlbMemR32FP* r32,vtlbMemR64FP* r64,vtlbMemR128FP* r128,
325 vtlbMemW8FP* w8,vtlbMemW16FP* w16,vtlbMemW32FP* w32,vtlbMemW64FP* w64,vtlbMemW128FP* w128 )
326 {
327 pxAssume(rv < VTLB_HANDLER_ITEMS);
328
329 vtlbdata.RWFT[0][0][rv] = (void*)((r8!=0) ? r8 : vtlbDefaultPhyRead8);
330 vtlbdata.RWFT[1][0][rv] = (void*)((r16!=0) ? r16 : vtlbDefaultPhyRead16);
331 vtlbdata.RWFT[2][0][rv] = (void*)((r32!=0) ? r32 : vtlbDefaultPhyRead32);
332 vtlbdata.RWFT[3][0][rv] = (void*)((r64!=0) ? r64 : vtlbDefaultPhyRead64);
333 vtlbdata.RWFT[4][0][rv] = (void*)((r128!=0) ? r128 : vtlbDefaultPhyRead128);
334
335 vtlbdata.RWFT[0][1][rv] = (void*)((w8!=0) ? w8 : vtlbDefaultPhyWrite8);
336 vtlbdata.RWFT[1][1][rv] = (void*)((w16!=0) ? w16 : vtlbDefaultPhyWrite16);
337 vtlbdata.RWFT[2][1][rv] = (void*)((w32!=0) ? w32 : vtlbDefaultPhyWrite32);
338 vtlbdata.RWFT[3][1][rv] = (void*)((w64!=0) ? w64 : vtlbDefaultPhyWrite64);
339 vtlbdata.RWFT[4][1][rv] = (void*)((w128!=0) ? w128 : vtlbDefaultPhyWrite128);
340 }
341
342 vtlbHandler vtlb_NewHandler()
343 {
344 pxAssertDev( vtlbHandlerCount < VTLB_HANDLER_ITEMS, "VTLB handler count overflow!" );
345 return vtlbHandlerCount++;
346 }
347
348 // Registers a handler into the VTLB's internal handler array. The handler defines specific behavior
349 // for how memory pages bound to the handler are read from / written to. If any of the handler pointers
350 // are NULL, the memory operations will be mapped to the BusError handler (thus generating BusError
351 // exceptions if the emulated app attempts to access them).
352 //
353 // Note: All handlers persist across calls to vtlb_Reset(), but are wiped/invalidated by calls to vtlb_Init()
354 //
355 // Returns a handle for the newly created handler See vtlb_MapHandler for use of the return value.
356 //
357 __ri vtlbHandler vtlb_RegisterHandler( vtlbMemR8FP* r8,vtlbMemR16FP* r16,vtlbMemR32FP* r32,vtlbMemR64FP* r64,vtlbMemR128FP* r128,
358 vtlbMemW8FP* w8,vtlbMemW16FP* w16,vtlbMemW32FP* w32,vtlbMemW64FP* w64,vtlbMemW128FP* w128)
359 {
360 vtlbHandler rv = vtlb_NewHandler();
361 vtlb_ReassignHandler( rv, r8, r16, r32, r64, r128, w8, w16, w32, w64, w128 );
362 return rv;
363 }
364
365
366 // Maps the given hander (created with vtlb_RegisterHandler) to the specified memory region.
367 // New mappings always assume priority over previous mappings, so place "generic" mappings for
368 // large areas of memory first, and then specialize specific small regions of memory afterward.
369 // A single handler can be mapped to many different regions by using multiple calls to this
370 // function.
371 //
372 // The memory region start and size parameters must be pagesize aligned.
373 void vtlb_MapHandler(vtlbHandler handler, u32 start, u32 size)
374 {
375 verify(0==(start&VTLB_PAGE_MASK));
376 verify(0==(size&VTLB_PAGE_MASK) && size>0);
377
378 s32 value = handler | 0x80000000;
379 u32 end = start + (size - VTLB_PAGE_SIZE);
380 pxAssume( (end>>VTLB_PAGE_BITS) < ArraySize(vtlbdata.pmap) );
381
382 while (start <= end)
383 {
384 vtlbdata.pmap[start>>VTLB_PAGE_BITS] = value;
385 start += VTLB_PAGE_SIZE;
386 }
387 }
388
389 void vtlb_MapBlock(void* base, u32 start, u32 size, u32 blocksize)
390 {
391 verify(0==(start&VTLB_PAGE_MASK));
392 verify(0==(size&VTLB_PAGE_MASK) && size>0);
393 if (!blocksize)
394 blocksize = size;
395 verify(0==(blocksize&VTLB_PAGE_MASK) && blocksize>0);
396 verify(0==(size%blocksize));
397
398 s32 baseint = (s32)base;
399 u32 end = start + (size - VTLB_PAGE_SIZE);
400 pxAssume( (end>>VTLB_PAGE_BITS) < ArraySize(vtlbdata.pmap) );
401
402 while (start <= end)
403 {
404 u32 loopsz = blocksize;
405 s32 ptr = baseint;
406
407 while (loopsz > 0)
408 {
409 vtlbdata.pmap[start>>VTLB_PAGE_BITS] = ptr;
410
411 start += VTLB_PAGE_SIZE;
412 ptr += VTLB_PAGE_SIZE;
413 loopsz -= VTLB_PAGE_SIZE;
414 }
415 }
416 }
417
418 void vtlb_Mirror(u32 new_region,u32 start,u32 size)
419 {
420 verify(0==(new_region&VTLB_PAGE_MASK));
421 verify(0==(start&VTLB_PAGE_MASK));
422 verify(0==(size&VTLB_PAGE_MASK) && size>0);
423
424 u32 end = start + (size-VTLB_PAGE_SIZE);
425 pxAssume( (end>>VTLB_PAGE_BITS) < ArraySize(vtlbdata.pmap) );
426
427 while(start <= end)
428 {
429 vtlbdata.pmap[start>>VTLB_PAGE_BITS] = vtlbdata.pmap[new_region>>VTLB_PAGE_BITS];
430
431 start += VTLB_PAGE_SIZE;
432 new_region += VTLB_PAGE_SIZE;
433 }
434 }
435
436 __fi void* vtlb_GetPhyPtr(u32 paddr)
437 {
438 if (paddr>=VTLB_PMAP_SZ || vtlbdata.pmap[paddr>>VTLB_PAGE_BITS]<0)
439 return NULL;
440 else
441 return reinterpret_cast<void*>(vtlbdata.pmap[paddr>>VTLB_PAGE_BITS]+(paddr&VTLB_PAGE_MASK));
442 }
443
444 //virtual mappings
445 //TODO: Add invalid paddr checks
446 void vtlb_VMap(u32 vaddr,u32 paddr,u32 size)
447 {
448 verify(0==(vaddr&VTLB_PAGE_MASK));
449 verify(0==(paddr&VTLB_PAGE_MASK));
450 verify(0==(size&VTLB_PAGE_MASK) && size>0);
451
452 while (size > 0)
453 {
454 s32 pme;
455 if (paddr >= VTLB_PMAP_SZ)
456 {
457 pme = UnmappedPhyHandler0;
458 if (paddr & 0x80000000)
459 pme = UnmappedPhyHandler1;
460 pme |= 0x80000000;
461 pme |= paddr;// top bit is set anyway ...
462 }
463 else
464 {
465 pme = vtlbdata.pmap[paddr>>VTLB_PAGE_BITS];
466 if (pme<0)
467 pme |= paddr;// top bit is set anyway ...
468 }
469
470 vtlbdata.vmap[vaddr>>VTLB_PAGE_BITS] = pme-vaddr;
471 vaddr += VTLB_PAGE_SIZE;
472 paddr += VTLB_PAGE_SIZE;
473 size -= VTLB_PAGE_SIZE;
474 }
475 }
476
477 void vtlb_VMapBuffer(u32 vaddr,void* buffer,u32 size)
478 {
479 verify(0==(vaddr&VTLB_PAGE_MASK));
480 verify(0==(size&VTLB_PAGE_MASK) && size>0);
481
482 u32 bu8 = (u32)buffer;
483 while (size > 0)
484 {
485 vtlbdata.vmap[vaddr>>VTLB_PAGE_BITS] = bu8-vaddr;
486 vaddr += VTLB_PAGE_SIZE;
487 bu8 += VTLB_PAGE_SIZE;
488 size -= VTLB_PAGE_SIZE;
489 }
490 }
491 void vtlb_VMapUnmap(u32 vaddr,u32 size)
492 {
493 verify(0==(vaddr&VTLB_PAGE_MASK));
494 verify(0==(size&VTLB_PAGE_MASK) && size>0);
495
496 while (size > 0)
497 {
498 u32 handl = UnmappedVirtHandler0;
499 if (vaddr & 0x80000000)
500 {
501 handl = UnmappedVirtHandler1;
502 }
503
504 handl |= vaddr; // top bit is set anyway ...
505 handl |= 0x80000000;
506
507 vtlbdata.vmap[vaddr>>VTLB_PAGE_BITS] = handl-vaddr;
508 vaddr += VTLB_PAGE_SIZE;
509 size -= VTLB_PAGE_SIZE;
510 }
511 }
512
513 // vtlb_Init -- Clears vtlb handlers and memory mappings.
514 void vtlb_Init()
515 {
516 vtlbHandlerCount=0;
517 memzero(vtlbdata.RWFT);
518
519 #define VTLB_BuildUnmappedHandler(baseName, highBit) \
520 baseName##ReadSm<mem8_t,0>, baseName##ReadSm<mem16_t,0>, baseName##ReadSm<mem32_t,0>, \
521 baseName##ReadLg<mem64_t,0>, baseName##ReadLg<mem128_t,0>, \
522 baseName##WriteSm<mem8_t,0>, baseName##WriteSm<mem16_t,0>, baseName##WriteSm<mem32_t,0>, \
523 baseName##WriteLg<mem64_t,0>, baseName##WriteLg<mem128_t,0>
524
525 //Register default handlers
526 //Unmapped Virt handlers _MUST_ be registered first.
527 //On address translation the top bit cannot be preserved.This is not normaly a problem since
528 //the physical address space can be 'compressed' to just 29 bits.However, to properly handle exceptions
529 //there must be a way to get the full address back.Thats why i use these 2 functions and encode the hi bit directly into em :)
530
531 UnmappedVirtHandler0 = vtlb_RegisterHandler( VTLB_BuildUnmappedHandler(vtlbUnmappedV, 0) );
532 UnmappedVirtHandler1 = vtlb_RegisterHandler( VTLB_BuildUnmappedHandler(vtlbUnmappedV, 0x80000000) );
533
534 UnmappedPhyHandler0 = vtlb_RegisterHandler( VTLB_BuildUnmappedHandler(vtlbUnmappedP, 0) );
535 UnmappedPhyHandler1 = vtlb_RegisterHandler( VTLB_BuildUnmappedHandler(vtlbUnmappedP, 0x80000000) );
536
537 DefaultPhyHandler = vtlb_RegisterHandler(0,0,0,0,0,0,0,0,0,0);
538
539 //done !
540
541 //Setup the initial mappings
542 vtlb_MapHandler(DefaultPhyHandler,0,VTLB_PMAP_SZ);
543
544 //Set the V space as unmapped
545 vtlb_VMapUnmap(0,(VTLB_VMAP_ITEMS-1)*VTLB_PAGE_SIZE);
546 //yeah i know, its stupid .. but this code has to be here for now ;p
547 vtlb_VMapUnmap((VTLB_VMAP_ITEMS-1)*VTLB_PAGE_SIZE,VTLB_PAGE_SIZE);
548
549 extern void vtlb_dynarec_init();
550 vtlb_dynarec_init();
551 }
552
553 // vtlb_Reset -- Performs a COP0-level reset of the PS2's TLB.
554 // This function should probably be part of the COP0 rather than here in VTLB.
555 void vtlb_Reset()
556 {
557 for(int i=0; i<48; i++) UnmapTLB(i);
558 }
559
560 void vtlb_Term()
561 {
562 //nothing to do for now
563 }
564
565 // Reserves the vtlb core allocation used by various emulation components!
566 // [TODO] basemem - request allocating memory at the specified virtual location, which can allow
567 // for easier debugging and/or 3rd party cheat programs. If 0, the operating system
568 // default is used.
569 void vtlb_Core_Alloc()
570 {
571 if (!vtlbdata.vmap)
572 {
573 vtlbdata.vmap = (s32*)_aligned_malloc( VTLB_VMAP_ITEMS * sizeof(*vtlbdata.vmap), 16 );
574 if (!vtlbdata.vmap)
575 throw Exception::OutOfMemory( L"VTLB Virtual Address Translation LUT" )
576 .SetDiagMsg(pxsFmt("(%u megs)", VTLB_VMAP_ITEMS * sizeof(*vtlbdata.vmap) / _1mb)
577 );
578 }
579 }
580
581 void vtlb_Core_Free()
582 {
583 safe_aligned_free( vtlbdata.vmap );
584 }
585
586 static wxString GetHostVmErrorMsg()
587 {
588 return pxE("!Notice:HostVmReserve",
589 L"Your system is too low on virtual resources for PCSX2 to run. This can be "
590 L"caused by having a small or disabled swapfile, or by other programs that are "
591 L"hogging resources."
592 );
593 }
594 // --------------------------------------------------------------------------------------
595 // VtlbMemoryReserve (implementations)
596 // --------------------------------------------------------------------------------------
597 VtlbMemoryReserve::VtlbMemoryReserve( const wxString& name, size_t size )
598 : m_reserve( name, size )
599 {
600 m_reserve.SetPageAccessOnCommit( PageAccess_ReadWrite() );
601 }
602
603 void VtlbMemoryReserve::SetBaseAddr( uptr newaddr )
604 {
605 m_reserve.SetBaseAddr( newaddr );
606 }
607
608 void VtlbMemoryReserve::Reserve( sptr hostptr )
609 {
610 if (!m_reserve.ReserveAt( hostptr ))
611 {
612 throw Exception::OutOfMemory( m_reserve.GetName() )
613 .SetDiagMsg(L"Vtlb memory could not be reserved.")
614 .SetUserMsg(GetHostVmErrorMsg());
615 }
616 }
617
618 void VtlbMemoryReserve::Commit()
619 {
620 if (IsCommitted()) return;
621 if (!m_reserve.Commit())
622 {
623 throw Exception::OutOfMemory( m_reserve.GetName() )
624 .SetDiagMsg(L"Vtlb memory could not be committed.")
625 .SetUserMsg(GetHostVmErrorMsg());
626 }
627 }
628
629 void VtlbMemoryReserve::Reset()
630 {
631 Commit();
632 memzero_sse_a(m_reserve.GetPtr(), m_reserve.GetCommittedBytes());
633 }
634
635 void VtlbMemoryReserve::Decommit()
636 {
637 m_reserve.Reset();
638 }
639
640 void VtlbMemoryReserve::Release()
641 {
642 m_reserve.Release();
643 }
644
645 bool VtlbMemoryReserve::IsCommitted() const
646 {
647 return !!m_reserve.GetCommittedPageCount();
648 }

  ViewVC Help
Powered by ViewVC 1.1.22