/[pcsx2_0.9.7]/trunk/pcsx2/vtlb.cpp
ViewVC logotype

Diff of /trunk/pcsx2/vtlb.cpp

Parent Directory Parent Directory | Revision Log Revision Log | View Patch Patch

--- trunk/pcsx2/vtlb.cpp	2010/09/07 03:24:11	31
+++ trunk/pcsx2/vtlb.cpp	2010/12/23 12:02:12	280
@@ -33,40 +33,38 @@
 #include "Common.h"
 #include "vtlb.h"
 #include "COP0.h"
-
 #include "R5900Exceptions.h"
 
+#include "Utilities/MemsetFast.inl"
+
 using namespace R5900;
 using namespace vtlb_private;
 
-#ifdef PCSX2_DEVBUILD
-#define verify(x) {if (!(x)) { (*(u8*)0)=3; }}
-#else
-#define verify jASSUME
-#endif
+#define verify pxAssume
 
 namespace vtlb_private
 {
 	__aligned(64) MapData vtlbdata;
 }
 
-vtlbHandler vtlbHandlerCount=0;
+static vtlbHandler vtlbHandlerCount = 0;
 
-vtlbHandler DefaultPhyHandler;
-vtlbHandler UnmappedVirtHandler0;
-vtlbHandler UnmappedVirtHandler1;
-vtlbHandler UnmappedPhyHandler0;
-vtlbHandler UnmappedPhyHandler1;
+static vtlbHandler DefaultPhyHandler;
+static vtlbHandler UnmappedVirtHandler0;
+static vtlbHandler UnmappedVirtHandler1;
+static vtlbHandler UnmappedPhyHandler0;
+static vtlbHandler UnmappedPhyHandler1;
 
 
-//////////////////////////////////////////////////////////////////////////////////////////
+// --------------------------------------------------------------------------------------
 // Interpreter Implementations of VTLB Memory Operations.
+// --------------------------------------------------------------------------------------
 // See recVTLB.cpp for the dynarec versions.
 
-// Interpreted VTLB lookup for 8, 16, and 32 bit accesses
-template<int DataSize,typename DataType>
-__forceinline DataType __fastcall MemOp_r0(u32 addr)
+template< typename DataType >
+DataType __fastcall vtlb_memRead(u32 addr)
 {
+	static const uint DataSize = sizeof(DataType) * 8;
 	u32 vmv=vtlbdata.vmap[addr>>VTLB_PAGE_BITS];
 	s32 ppf=addr+vmv;
 
@@ -91,19 +89,14 @@
 	return 0;		// technically unreachable, but suppresses warnings.
 }
 
-// ------------------------------------------------------------------------
-// Interpreterd VTLB lookup for 64 and 128 bit accesses.
-template<int DataSize,typename DataType>
-__forceinline void __fastcall MemOp_r1(u32 addr, DataType* data)
+void __fastcall vtlb_memRead64(u32 mem, mem64_t *out)
 {
-	u32 vmv=vtlbdata.vmap[addr>>VTLB_PAGE_BITS];
-	s32 ppf=addr+vmv;
+	u32 vmv=vtlbdata.vmap[mem>>VTLB_PAGE_BITS];
+	s32 ppf=mem+vmv;
 
 	if (!(ppf<0))
 	{
-		data[0]=*reinterpret_cast<DataType*>(ppf);
-		if (DataSize==128)
-			data[1]=*reinterpret_cast<DataType*>(ppf+8);
+		*out = *(mem64_t*)ppf;
 	}
 	else
 	{
@@ -111,22 +104,35 @@
 		u32 hand=(u8)vmv;
 		u32 paddr=ppf-hand+0x80000000;
 		//Console.WriteLn("Translated 0x%08X to 0x%08X", addr,paddr);
-		//return reinterpret_cast<TemplateHelper<DataSize,false>::HandlerType*>(RWFT[TemplateHelper<DataSize,false>::sidx][0][hand])(paddr,data);
 
-		switch( DataSize )
-		{
-			case 64: ((vtlbMemR64FP*)vtlbdata.RWFT[3][0][hand])(paddr, data); break;
-			case 128: ((vtlbMemR128FP*)vtlbdata.RWFT[4][0][hand])(paddr, data); break;
+		((vtlbMemR64FP*)vtlbdata.RWFT[3][0][hand])(paddr, out);
+	}
+}
+void __fastcall vtlb_memRead128(u32 mem, mem128_t *out)
+{
+	u32 vmv=vtlbdata.vmap[mem>>VTLB_PAGE_BITS];
+	s32 ppf=mem+vmv;
 
-			jNO_DEFAULT;
-		}
+	if (!(ppf<0))
+	{
+		CopyQWC(out,(void*)ppf);
+	}
+	else
+	{
+		//has to: translate, find function, call function
+		u32 hand=(u8)vmv;
+		u32 paddr=ppf-hand+0x80000000;
+		//Console.WriteLn("Translated 0x%08X to 0x%08X", addr,paddr);
+
+		((vtlbMemR128FP*)vtlbdata.RWFT[4][0][hand])(paddr, out);
 	}
 }
 
-// ------------------------------------------------------------------------
-template<int DataSize,typename DataType>
-__forceinline void __fastcall MemOp_w0(u32 addr, DataType data)
+template< typename DataType >
+void __fastcall vtlb_memWrite(u32 addr, DataType data)
 {
+	static const uint DataSize = sizeof(DataType) * 8;
+
 	u32 vmv=vtlbdata.vmap[addr>>VTLB_PAGE_BITS];
 	s32 ppf=addr+vmv;
 	if (!(ppf<0))
@@ -151,18 +157,13 @@
 	}
 }
 
-// ------------------------------------------------------------------------
-template<int DataSize,typename DataType>
-__forceinline void __fastcall MemOp_w1(u32 addr,const DataType* data)
+void __fastcall vtlb_memWrite64(u32 mem, const mem64_t* value)
 {
-	verify(DataSize==128 || DataSize==64);
-	u32 vmv=vtlbdata.vmap[addr>>VTLB_PAGE_BITS];
-	s32 ppf=addr+vmv;
+	u32 vmv=vtlbdata.vmap[mem>>VTLB_PAGE_BITS];
+	s32 ppf=mem+vmv;
 	if (!(ppf<0))
 	{
-		*reinterpret_cast<DataType*>(ppf)=*data;
-		if (DataSize==128)
-			*reinterpret_cast<DataType*>(ppf+8)=data[1];
+		*(mem64_t*)ppf = *value;
 	}
 	else
 	{
@@ -170,145 +171,146 @@
 		u32 hand=(u8)vmv;
 		u32 paddr=ppf-hand+0x80000000;
 		//Console.WriteLn("Translated 0x%08X to 0x%08X", addr,paddr);
-		switch( DataSize )
-		{
-			case 64: return ((vtlbMemW64FP*)vtlbdata.RWFT[3][1][hand])(paddr, data);
-			case 128: return ((vtlbMemW128FP*)vtlbdata.RWFT[4][1][hand])(paddr, data);
 
-			jNO_DEFAULT;
-		}
+		((vtlbMemW64FP*)vtlbdata.RWFT[3][1][hand])(paddr, value);
 	}
 }
 
-mem8_t __fastcall vtlb_memRead8(u32 mem)
+void __fastcall vtlb_memWrite128(u32 mem, const mem128_t *value)
 {
-	return MemOp_r0<8,mem8_t>(mem);
+	u32 vmv=vtlbdata.vmap[mem>>VTLB_PAGE_BITS];
+	s32 ppf=mem+vmv;
+	if (!(ppf<0))
+	{
+		CopyQWC((void*)ppf, value);
+	}
+	else
+	{
+		//has to: translate, find function, call function
+		u32 hand=(u8)vmv;
+		u32 paddr=ppf-hand+0x80000000;
+		//Console.WriteLn("Translated 0x%08X to 0x%08X", addr,paddr);
+
+		((vtlbMemW128FP*)vtlbdata.RWFT[4][1][hand])(paddr, value);
+	}
 }
-mem16_t __fastcall vtlb_memRead16(u32 mem)
+
+template mem8_t vtlb_memRead<mem8_t>(u32 mem);
+template mem16_t vtlb_memRead<mem16_t>(u32 mem);
+template mem32_t vtlb_memRead<mem32_t>(u32 mem);
+template void vtlb_memWrite<mem8_t>(u32 mem, mem8_t data);
+template void vtlb_memWrite<mem16_t>(u32 mem, mem16_t data);
+template void vtlb_memWrite<mem32_t>(u32 mem, mem32_t data);
+
+// --------------------------------------------------------------------------------------
+//  TLB Miss / BusError Handlers
+// --------------------------------------------------------------------------------------
+// These are valid VM memory errors that should typically be handled by the VM itself via
+// its own cpu exception system.
+//
+// [TODO]  Add first-chance debugging hooks to these exceptions!
+//
+// Important recompiler note: Mid-block Exception handling isn't reliable *yet* because
+// memory ops don't flush the PC prior to invoking the indirect handlers.
+
+// Generates a tlbMiss Exception
+static __ri void vtlb_Miss(u32 addr,u32 mode)
 {
-	return MemOp_r0<16,mem16_t>(mem);
+	if( IsDevBuild )
+		Cpu->ThrowCpuException( R5900Exception::TLBMiss( addr, !!mode ) );
+	else
+		Console.Error( R5900Exception::TLBMiss( addr, !!mode ).FormatMessage() );
 }
-mem32_t __fastcall vtlb_memRead32(u32 mem)
+
+// BusError exception: more serious than a TLB miss.  If properly emulated the PS2 kernel
+// itself would invoke a diagnostic/assertion screen that displays the cpu state at the
+// time of the exception.
+static __ri void vtlb_BusError(u32 addr,u32 mode)
 {
-	return MemOp_r0<32,mem32_t>(mem);
+	if( IsDevBuild )
+		Cpu->ThrowCpuException( R5900Exception::BusError( addr, !!mode ) );
+	else
+		Console.Error( R5900Exception::TLBMiss( addr, !!mode ).FormatMessage() );
 }
-void __fastcall vtlb_memRead64(u32 mem, u64 *out)
+
+#define _tmpl(ret) template<typename OperandType, u32 saddr> ret __fastcall
+
+_tmpl(OperandType) vtlbUnmappedVReadSm(u32 addr)					{ vtlb_Miss(addr|saddr,0); return 0; }
+_tmpl(void) vtlbUnmappedVReadLg(u32 addr,OperandType* data)			{ vtlb_Miss(addr|saddr,0); }
+_tmpl(void) vtlbUnmappedVWriteSm(u32 addr,OperandType data)			{ vtlb_Miss(addr|saddr,1); }
+_tmpl(void) vtlbUnmappedVWriteLg(u32 addr,const OperandType* data)	{ vtlb_Miss(addr|saddr,1); }
+
+_tmpl(OperandType) vtlbUnmappedPReadSm(u32 addr)					{ vtlb_BusError(addr|saddr,0); return 0; }
+_tmpl(void) vtlbUnmappedPReadLg(u32 addr,OperandType* data)			{ vtlb_BusError(addr|saddr,0); }
+_tmpl(void) vtlbUnmappedPWriteSm(u32 addr,OperandType data)			{ vtlb_BusError(addr|saddr,1); }
+_tmpl(void) vtlbUnmappedPWriteLg(u32 addr,const OperandType* data)	{ vtlb_BusError(addr|saddr,1); }
+
+#undef _tmpl
+
+// --------------------------------------------------------------------------------------
+//  VTLB mapping errors
+// --------------------------------------------------------------------------------------
+// These errors are assertion/logic errors that should never occur if PCSX2 has been initialized
+// properly.  All addressable physical memory should be configured as TLBMiss or Bus Error.
+//
+
+static mem8_t __fastcall vtlbDefaultPhyRead8(u32 addr)
 {
-	return MemOp_r1<64,mem64_t>(mem,out);
+	pxFailDev(pxsFmt("(VTLB) Attempted read8 from unmapped physical address @ 0x%08X.", addr));
+	return 0;
 }
-void __fastcall vtlb_memRead128(u32 mem, u64 *out)
+
+static mem16_t __fastcall vtlbDefaultPhyRead16(u32 addr)
 {
-	return MemOp_r1<128,mem128_t>(mem,out);
+	pxFailDev(pxsFmt("(VTLB) Attempted read16 from unmapped physical address @ 0x%08X.", addr));
+	return 0;
 }
-void __fastcall vtlb_memWrite8 (u32 mem, mem8_t value)
+
+static mem32_t __fastcall vtlbDefaultPhyRead32(u32 addr)
 {
-	MemOp_w0<8,mem8_t>(mem,value);
+	pxFailDev(pxsFmt("(VTLB) Attempted read32 from unmapped physical address @ 0x%08X.", addr));
+	return 0;
 }
-void __fastcall vtlb_memWrite16(u32 mem, mem16_t value)
+
+static void __fastcall vtlbDefaultPhyRead64(u32 addr, mem64_t* dest)
 {
-	MemOp_w0<16,mem16_t>(mem,value);
+	pxFailDev(pxsFmt("(VTLB) Attempted read64 from unmapped physical address @ 0x%08X.", addr));
 }
-void __fastcall vtlb_memWrite32(u32 mem, mem32_t value)
+
+static void __fastcall vtlbDefaultPhyRead128(u32 addr, mem128_t* dest)
 {
-	MemOp_w0<32,mem32_t>(mem,value);
+	pxFailDev(pxsFmt("(VTLB) Attempted read128 from unmapped physical address @ 0x%08X.", addr));
 }
-void __fastcall vtlb_memWrite64(u32 mem, const mem64_t* value)
+
+static void __fastcall vtlbDefaultPhyWrite8(u32 addr, mem8_t data)
 {
-	MemOp_w1<64,mem64_t>(mem,value);
+	pxFailDev(pxsFmt("(VTLB) Attempted write8 to unmapped physical address @ 0x%08X.", addr));
 }
-void __fastcall vtlb_memWrite128(u32 mem, const mem128_t *value)
+
+static void __fastcall vtlbDefaultPhyWrite16(u32 addr, mem16_t data)
 {
-	MemOp_w1<128,mem128_t>(mem,value);
+	pxFailDev(pxsFmt("(VTLB) Attempted write16 to unmapped physical address @ 0x%08X.", addr));
 }
 
-/////////////////////////////////////////////////////////////////////////
-// Error / TLB Miss Handlers
-//
-
-static const char* _getModeStr( u32 mode )
+static void __fastcall vtlbDefaultPhyWrite32(u32 addr, mem32_t data)
 {
-	return (mode==0) ? "read" : "write";
+	pxFailDev(pxsFmt("(VTLB) Attempted write32 to unmapped physical address @ 0x%08X.", addr));
 }
 
-// Generates a tlbMiss Exception
-// Note: Don't throw exceptions yet, they cause a crash when otherwise
-// there would be a (slight) chance the game continues (rama).
-static __forceinline void vtlb_Miss(u32 addr,u32 mode)
-{
-	Console.Error( "vtlb miss : addr 0x%X, mode %d [%s]", addr, mode, _getModeStr(mode) );
-	//verify(false);
-	//throw R5900Exception::TLBMiss( addr, !!mode );
-}
-
-// Just dies a horrible death for now.
-// Eventually should generate a BusError exception.
-static __forceinline void vtlb_BusError(u32 addr,u32 mode)
-{
-	Console.Error( "vtlb bus error : addr 0x%X, mode %d\n", addr, _getModeStr(mode) );
-	//verify(false);
-	throw R5900Exception::BusError( addr, !!mode );
-}
-
-///// Virtual Mapping Errors (TLB Miss)
-template<u32 saddr>
-mem8_t __fastcall vtlbUnmappedVRead8(u32 addr) { vtlb_Miss(addr|saddr,0); return 0; }
-template<u32 saddr>
-mem16_t __fastcall vtlbUnmappedVRead16(u32 addr)  { vtlb_Miss(addr|saddr,0); return 0; }
-template<u32 saddr>
-mem32_t __fastcall vtlbUnmappedVRead32(u32 addr) { vtlb_Miss(addr|saddr,0); return 0; }
-template<u32 saddr>
-void __fastcall vtlbUnmappedVRead64(u32 addr,mem64_t* data) { vtlb_Miss(addr|saddr,0); }
-template<u32 saddr>
-void __fastcall vtlbUnmappedVRead128(u32 addr,mem128_t* data) { vtlb_Miss(addr|saddr,0); }
-template<u32 saddr>
-void __fastcall vtlbUnmappedVWrite8(u32 addr,mem8_t data) { vtlb_Miss(addr|saddr,1); }
-template<u32 saddr>
-void __fastcall vtlbUnmappedVWrite16(u32 addr,mem16_t data) { vtlb_Miss(addr|saddr,1); }
-template<u32 saddr>
-void __fastcall vtlbUnmappedVWrite32(u32 addr,mem32_t data) { vtlb_Miss(addr|saddr,1); }
-template<u32 saddr>
-void __fastcall vtlbUnmappedVWrite64(u32 addr,const mem64_t* data) { vtlb_Miss(addr|saddr,1); }
-template<u32 saddr>
-void __fastcall vtlbUnmappedVWrite128(u32 addr,const mem128_t* data) { vtlb_Miss(addr|saddr,1); }
-
-///// Physical Mapping Errors (Bus Error)
-template<u32 saddr>
-mem8_t __fastcall vtlbUnmappedPRead8(u32 addr) { vtlb_BusError(addr|saddr,0); return 0; }
-template<u32 saddr>
-mem16_t __fastcall vtlbUnmappedPRead16(u32 addr)  { vtlb_BusError(addr|saddr,0); return 0; }
-template<u32 saddr>
-mem32_t __fastcall vtlbUnmappedPRead32(u32 addr) { vtlb_BusError(addr|saddr,0); return 0; }
-template<u32 saddr>
-void __fastcall vtlbUnmappedPRead64(u32 addr,mem64_t* data) { vtlb_BusError(addr|saddr,0); }
-template<u32 saddr>
-void __fastcall vtlbUnmappedPRead128(u32 addr,mem128_t* data) { vtlb_BusError(addr|saddr,0); }
-template<u32 saddr>
-void __fastcall vtlbUnmappedPWrite8(u32 addr,mem8_t data) { vtlb_BusError(addr|saddr,1); }
-template<u32 saddr>
-void __fastcall vtlbUnmappedPWrite16(u32 addr,mem16_t data) { vtlb_BusError(addr|saddr,1); }
-template<u32 saddr>
-void __fastcall vtlbUnmappedPWrite32(u32 addr,mem32_t data) { vtlb_BusError(addr|saddr,1); }
-template<u32 saddr>
-void __fastcall vtlbUnmappedPWrite64(u32 addr,const mem64_t* data) { vtlb_BusError(addr|saddr,1); }
-template<u32 saddr>
-void __fastcall vtlbUnmappedPWrite128(u32 addr,const mem128_t* data) { vtlb_BusError(addr|saddr,1); }
-
-///// VTLB mapping errors (unmapped address spaces)
-mem8_t __fastcall vtlbDefaultPhyRead8(u32 addr) { Console.Error("vtlbDefaultPhyRead8: 0x%X",addr); verify(false); return -1; }
-mem16_t __fastcall vtlbDefaultPhyRead16(u32 addr)  { Console.Error("vtlbDefaultPhyRead16: 0x%X",addr); verify(false); return -1; }
-mem32_t __fastcall vtlbDefaultPhyRead32(u32 addr) { Console.Error("vtlbDefaultPhyRead32: 0x%X",addr); verify(false); return -1; }
-void __fastcall vtlbDefaultPhyRead64(u32 addr,mem64_t* data) { Console.Error("vtlbDefaultPhyRead64: 0x%X",addr); verify(false); }
-void __fastcall vtlbDefaultPhyRead128(u32 addr,mem128_t* data) { Console.Error("vtlbDefaultPhyRead128: 0x%X",addr); verify(false); }
-
-void __fastcall vtlbDefaultPhyWrite8(u32 addr,mem8_t data) { Console.Error("vtlbDefaultPhyWrite8: 0x%X",addr); verify(false); }
-void __fastcall vtlbDefaultPhyWrite16(u32 addr,mem16_t data) { Console.Error("vtlbDefaultPhyWrite16: 0x%X",addr); verify(false); }
-void __fastcall vtlbDefaultPhyWrite32(u32 addr,mem32_t data) { Console.Error("vtlbDefaultPhyWrite32: 0x%X",addr); verify(false); }
-void __fastcall vtlbDefaultPhyWrite64(u32 addr,const mem64_t* data) { Console.Error("vtlbDefaultPhyWrite64: 0x%X",addr); verify(false); }
-void __fastcall vtlbDefaultPhyWrite128(u32 addr,const mem128_t* data) { Console.Error("vtlbDefaultPhyWrite128: 0x%X",addr); verify(false); }
+static void __fastcall vtlbDefaultPhyWrite64(u32 addr,const mem64_t* data)
+{
+	pxFailDev(pxsFmt("(VTLB) Attempted write64 to unmapped physical address @ 0x%08X.", addr));
+}
 
+static void __fastcall vtlbDefaultPhyWrite128(u32 addr,const mem128_t* data)
+{
+	pxFailDev(pxsFmt("(VTLB) Attempted write128 to unmapped physical address @ 0x%08X.", addr));
+}
+#undef _tmpl
 
-//////////////////////////////////////////////////////////////////////////////////////////
-// VTLB Public API -- Init/Term/RegisterHandler stuff
+// ===========================================================================================
+//  VTLB Public API -- Init/Term/RegisterHandler stuff 
+// ===========================================================================================
 //
 
 // Assigns or re-assigns the callbacks for a VTLB memory handler.  The handler defines specific behavior
@@ -318,32 +320,28 @@
 //
 // Note: All handlers persist across calls to vtlb_Reset(), but are wiped/invalidated by calls to vtlb_Init()
 //
-void vtlb_ReassignHandler( vtlbHandler rv,
-		 vtlbMemR8FP* r8,vtlbMemR16FP* r16,vtlbMemR32FP* r32,vtlbMemR64FP* r64,vtlbMemR128FP* r128,
-		 vtlbMemW8FP* w8,vtlbMemW16FP* w16,vtlbMemW32FP* w32,vtlbMemW64FP* w64,vtlbMemW128FP* w128 )
-{
-	vtlbdata.RWFT[0][0][rv] = (r8!=0) ? (void*)(r8): (void*)vtlbDefaultPhyRead8;
-	vtlbdata.RWFT[1][0][rv] = (r16!=0)  ? (void*)r16: (void*)vtlbDefaultPhyRead16;
-	vtlbdata.RWFT[2][0][rv] = (r32!=0)  ? (void*)r32: (void*)vtlbDefaultPhyRead32;
-	vtlbdata.RWFT[3][0][rv] = (r64!=0)  ? (void*)r64: (void*)vtlbDefaultPhyRead64;
-	vtlbdata.RWFT[4][0][rv] = (r128!=0) ? (void*)r128: (void*)vtlbDefaultPhyRead128;
-
-	vtlbdata.RWFT[0][0][rv] = (r8!=0)   ? (void*)r8:(void*)vtlbDefaultPhyRead8;
-	vtlbdata.RWFT[1][0][rv] = (r16!=0)  ? (void*)r16:(void*)vtlbDefaultPhyRead16;
-	vtlbdata.RWFT[2][0][rv] = (r32!=0)  ? (void*)r32:(void*)vtlbDefaultPhyRead32;
-	vtlbdata.RWFT[3][0][rv] = (r64!=0)  ? (void*)r64:(void*)vtlbDefaultPhyRead64;
-	vtlbdata.RWFT[4][0][rv] = (r128!=0) ? (void*)r128:(void*)vtlbDefaultPhyRead128;
-
-	vtlbdata.RWFT[0][1][rv] = (void*)((w8!=0)   ? w8:vtlbDefaultPhyWrite8);
-	vtlbdata.RWFT[1][1][rv] = (void*)((w16!=0)  ? w16:vtlbDefaultPhyWrite16);
-	vtlbdata.RWFT[2][1][rv] = (void*)((w32!=0)  ? w32:vtlbDefaultPhyWrite32);
-	vtlbdata.RWFT[3][1][rv] = (void*)((w64!=0)  ? w64:vtlbDefaultPhyWrite64);
-	vtlbdata.RWFT[4][1][rv] = (void*)((w128!=0) ? w128:vtlbDefaultPhyWrite128);
+__ri void vtlb_ReassignHandler( vtlbHandler rv,
+							   vtlbMemR8FP* r8,vtlbMemR16FP* r16,vtlbMemR32FP* r32,vtlbMemR64FP* r64,vtlbMemR128FP* r128,
+							   vtlbMemW8FP* w8,vtlbMemW16FP* w16,vtlbMemW32FP* w32,vtlbMemW64FP* w64,vtlbMemW128FP* w128 )
+{
+	pxAssume(rv < VTLB_HANDLER_ITEMS);
+
+	vtlbdata.RWFT[0][0][rv] = (void*)((r8!=0)   ? r8	: vtlbDefaultPhyRead8);
+	vtlbdata.RWFT[1][0][rv] = (void*)((r16!=0)  ? r16	: vtlbDefaultPhyRead16);
+	vtlbdata.RWFT[2][0][rv] = (void*)((r32!=0)  ? r32	: vtlbDefaultPhyRead32);
+	vtlbdata.RWFT[3][0][rv] = (void*)((r64!=0)  ? r64	: vtlbDefaultPhyRead64);
+	vtlbdata.RWFT[4][0][rv] = (void*)((r128!=0) ? r128	: vtlbDefaultPhyRead128);
+
+	vtlbdata.RWFT[0][1][rv] = (void*)((w8!=0)   ? w8	: vtlbDefaultPhyWrite8);
+	vtlbdata.RWFT[1][1][rv] = (void*)((w16!=0)  ? w16	: vtlbDefaultPhyWrite16);
+	vtlbdata.RWFT[2][1][rv] = (void*)((w32!=0)  ? w32	: vtlbDefaultPhyWrite32);
+	vtlbdata.RWFT[3][1][rv] = (void*)((w64!=0)  ? w64	: vtlbDefaultPhyWrite64);
+	vtlbdata.RWFT[4][1][rv] = (void*)((w128!=0) ? w128	: vtlbDefaultPhyWrite128);
 }
 
 vtlbHandler vtlb_NewHandler()
 {
-	pxAssertDev( vtlbHandlerCount < 127, "VTLB allowed handler count exceeded!" );
+	pxAssertDev( vtlbHandlerCount < VTLB_HANDLER_ITEMS, "VTLB handler count overflow!" );
 	return vtlbHandlerCount++;
 }
 
@@ -356,8 +354,8 @@
 //
 // Returns a handle for the newly created handler  See vtlb_MapHandler for use of the return value.
 //
-vtlbHandler vtlb_RegisterHandler(	vtlbMemR8FP* r8,vtlbMemR16FP* r16,vtlbMemR32FP* r32,vtlbMemR64FP* r64,vtlbMemR128FP* r128,
-									vtlbMemW8FP* w8,vtlbMemW16FP* w16,vtlbMemW32FP* w32,vtlbMemW64FP* w64,vtlbMemW128FP* w128)
+__ri vtlbHandler vtlb_RegisterHandler(	vtlbMemR8FP* r8,vtlbMemR16FP* r16,vtlbMemR32FP* r32,vtlbMemR64FP* r64,vtlbMemR128FP* r128,
+										vtlbMemW8FP* w8,vtlbMemW16FP* w16,vtlbMemW32FP* w32,vtlbMemW64FP* w64,vtlbMemW128FP* w128)
 {
 	vtlbHandler rv = vtlb_NewHandler();
 	vtlb_ReassignHandler( rv, r8, r16, r32, r64, r128, w8, w16, w32, w64, w128 );
@@ -365,7 +363,6 @@
 }
 
 
-//////////////////////////////////////////////////////////////////////////////////////////
 // Maps the given hander (created with vtlb_RegisterHandler) to the specified memory region.
 // New mappings always assume priority over previous mappings, so place "generic" mappings for
 // large areas of memory first, and then specialize specific small regions of memory afterward.
@@ -373,45 +370,47 @@
 // function.
 //
 // The memory region start and size parameters must be pagesize aligned.
-void vtlb_MapHandler(vtlbHandler handler,u32 start,u32 size)
+void vtlb_MapHandler(vtlbHandler handler, u32 start, u32 size)
 {
 	verify(0==(start&VTLB_PAGE_MASK));
 	verify(0==(size&VTLB_PAGE_MASK) && size>0);
-	s32 value=handler|0x80000000;
 
-	while(size>0)
-	{
-		vtlbdata.pmap[start>>VTLB_PAGE_BITS]=value;
+	s32 value = handler | 0x80000000;
+	u32 end = start + (size - VTLB_PAGE_SIZE);
+	pxAssume( (end>>VTLB_PAGE_BITS) < ArraySize(vtlbdata.pmap) );
 
-		start+=VTLB_PAGE_SIZE;
-		size-=VTLB_PAGE_SIZE;
+	while (start <= end)
+	{
+		vtlbdata.pmap[start>>VTLB_PAGE_BITS] = value;
+		start += VTLB_PAGE_SIZE;
 	}
 }
 
-void vtlb_MapBlock(void* base,u32 start,u32 size,u32 blocksize)
+void vtlb_MapBlock(void* base, u32 start, u32 size, u32 blocksize)
 {
-	s32 baseint=(s32)base;
-
 	verify(0==(start&VTLB_PAGE_MASK));
 	verify(0==(size&VTLB_PAGE_MASK) && size>0);
-	if (blocksize==0)
-		blocksize=size;
+	if (!blocksize)
+		blocksize = size;
 	verify(0==(blocksize&VTLB_PAGE_MASK) && blocksize>0);
 	verify(0==(size%blocksize));
 
-	while(size>0)
+	s32 baseint = (s32)base;
+	u32 end = start + (size - VTLB_PAGE_SIZE);
+	pxAssume( (end>>VTLB_PAGE_BITS) < ArraySize(vtlbdata.pmap) );
+
+	while (start <= end)
 	{
-		u32 blocksz=blocksize;
-		s32 ptr=baseint;
+		u32 loopsz = blocksize;
+		s32 ptr = baseint;
 
-		while(blocksz>0)
+		while (loopsz > 0)
 		{
-			vtlbdata.pmap[start>>VTLB_PAGE_BITS]=ptr;
+			vtlbdata.pmap[start>>VTLB_PAGE_BITS] = ptr;
 
-			start+=VTLB_PAGE_SIZE;
-			ptr+=VTLB_PAGE_SIZE;
-			blocksz-=VTLB_PAGE_SIZE;
-			size-=VTLB_PAGE_SIZE;
+			start	+= VTLB_PAGE_SIZE;
+			ptr		+= VTLB_PAGE_SIZE;
+			loopsz	-= VTLB_PAGE_SIZE;
 		}
 	}
 }
@@ -422,17 +421,19 @@
 	verify(0==(start&VTLB_PAGE_MASK));
 	verify(0==(size&VTLB_PAGE_MASK) && size>0);
 
-	while(size>0)
+	u32 end = start + (size-VTLB_PAGE_SIZE);
+	pxAssume( (end>>VTLB_PAGE_BITS) < ArraySize(vtlbdata.pmap) );
+
+	while(start <= end)
 	{
-		vtlbdata.pmap[start>>VTLB_PAGE_BITS]=vtlbdata.pmap[new_region>>VTLB_PAGE_BITS];
+		vtlbdata.pmap[start>>VTLB_PAGE_BITS] = vtlbdata.pmap[new_region>>VTLB_PAGE_BITS];
 
-		start+=VTLB_PAGE_SIZE;
-		new_region+=VTLB_PAGE_SIZE;
-		size-=VTLB_PAGE_SIZE;
+		start		+= VTLB_PAGE_SIZE;
+		new_region	+= VTLB_PAGE_SIZE;
 	}
 }
 
-__forceinline void* vtlb_GetPhyPtr(u32 paddr)
+__fi void* vtlb_GetPhyPtr(u32 paddr)
 {
 	if (paddr>=VTLB_PMAP_SZ || vtlbdata.pmap[paddr>>VTLB_PAGE_BITS]<0)
 		return NULL;
@@ -442,101 +443,96 @@
 
 //virtual mappings
 //TODO: Add invalid paddr checks
-void vtlb_VMap(u32 vaddr,u32 paddr,u32 sz)
+void vtlb_VMap(u32 vaddr,u32 paddr,u32 size)
 {
 	verify(0==(vaddr&VTLB_PAGE_MASK));
 	verify(0==(paddr&VTLB_PAGE_MASK));
-	verify(0==(sz&VTLB_PAGE_MASK) && sz>0);
+	verify(0==(size&VTLB_PAGE_MASK) && size>0);
 
-	while(sz>0)
+	while (size > 0)
 	{
 		s32 pme;
-		if (paddr>=VTLB_PMAP_SZ)
+		if (paddr >= VTLB_PMAP_SZ)
 		{
-			pme=UnmappedPhyHandler0;
-			if (paddr&0x80000000)
-				pme=UnmappedPhyHandler1;
-			pme|=0x80000000;
-			pme|=paddr;// top bit is set anyway ...
+			pme = UnmappedPhyHandler0;
+			if (paddr & 0x80000000)
+				pme = UnmappedPhyHandler1;
+			pme |= 0x80000000;
+			pme |= paddr;// top bit is set anyway ...
 		}
 		else
 		{
-			pme=vtlbdata.pmap[paddr>>VTLB_PAGE_BITS];
+			pme = vtlbdata.pmap[paddr>>VTLB_PAGE_BITS];
 			if (pme<0)
-				pme|=paddr;// top bit is set anyway ...
+				pme |= paddr;// top bit is set anyway ...
 		}
-		vtlbdata.vmap[vaddr>>VTLB_PAGE_BITS]=pme-vaddr;
-		vaddr+=VTLB_PAGE_SIZE;
-		paddr+=VTLB_PAGE_SIZE;
-		sz-=VTLB_PAGE_SIZE;
+
+		vtlbdata.vmap[vaddr>>VTLB_PAGE_BITS] = pme-vaddr;
+		vaddr += VTLB_PAGE_SIZE;
+		paddr += VTLB_PAGE_SIZE;
+		size -= VTLB_PAGE_SIZE;
 	}
 }
 
-void vtlb_VMapBuffer(u32 vaddr,void* buffer,u32 sz)
+void vtlb_VMapBuffer(u32 vaddr,void* buffer,u32 size)
 {
 	verify(0==(vaddr&VTLB_PAGE_MASK));
-	verify(0==(sz&VTLB_PAGE_MASK) && sz>0);
-	u32 bu8=(u32)buffer;
-	while(sz>0)
+	verify(0==(size&VTLB_PAGE_MASK) && size>0);
+
+	u32 bu8 = (u32)buffer;
+	while (size > 0)
 	{
-		vtlbdata.vmap[vaddr>>VTLB_PAGE_BITS]=bu8-vaddr;
-		vaddr+=VTLB_PAGE_SIZE;
-		bu8+=VTLB_PAGE_SIZE;
-		sz-=VTLB_PAGE_SIZE;
+		vtlbdata.vmap[vaddr>>VTLB_PAGE_BITS] = bu8-vaddr;
+		vaddr += VTLB_PAGE_SIZE;
+		bu8 += VTLB_PAGE_SIZE;
+		size -= VTLB_PAGE_SIZE;
 	}
 }
-void vtlb_VMapUnmap(u32 vaddr,u32 sz)
+void vtlb_VMapUnmap(u32 vaddr,u32 size)
 {
 	verify(0==(vaddr&VTLB_PAGE_MASK));
-	verify(0==(sz&VTLB_PAGE_MASK) && sz>0);
+	verify(0==(size&VTLB_PAGE_MASK) && size>0);
 
-	while(sz>0)
+	while (size > 0)
 	{
-		u32 handl=UnmappedVirtHandler0;
-		if (vaddr&0x80000000)
+		u32 handl = UnmappedVirtHandler0;
+		if (vaddr & 0x80000000)
 		{
-			handl=UnmappedVirtHandler1;
+			handl = UnmappedVirtHandler1;
 		}
-		handl|=vaddr; // top bit is set anyway ...
-		handl|=0x80000000;
-		vtlbdata.vmap[vaddr>>VTLB_PAGE_BITS]=handl-vaddr;
-		vaddr+=VTLB_PAGE_SIZE;
-		sz-=VTLB_PAGE_SIZE;
+
+		handl |= vaddr; // top bit is set anyway ...
+		handl |= 0x80000000;
+
+		vtlbdata.vmap[vaddr>>VTLB_PAGE_BITS] = handl-vaddr;
+		vaddr += VTLB_PAGE_SIZE;
+		size -= VTLB_PAGE_SIZE;
 	}
 }
 
-//////////////////////////////////////////////////////////////////////////////////////////
-// vtlb_init -- Clears vtlb handlers and memory mappings.
+// vtlb_Init -- Clears vtlb handlers and memory mappings.
 void vtlb_Init()
 {
 	vtlbHandlerCount=0;
 	memzero(vtlbdata.RWFT);
 
+#define VTLB_BuildUnmappedHandler(baseName, highBit) \
+	baseName##ReadSm<mem8_t,0>,		baseName##ReadSm<mem16_t,0>,	baseName##ReadSm<mem32_t,0>, \
+	baseName##ReadLg<mem64_t,0>,	baseName##ReadLg<mem128_t,0>, \
+	baseName##WriteSm<mem8_t,0>,	baseName##WriteSm<mem16_t,0>,	baseName##WriteSm<mem32_t,0>, \
+	baseName##WriteLg<mem64_t,0>,	baseName##WriteLg<mem128_t,0>
+
 	//Register default handlers
 	//Unmapped Virt handlers _MUST_ be registered first.
 	//On address translation the top bit cannot be preserved.This is not normaly a problem since
 	//the physical address space can be 'compressed' to just 29 bits.However, to properly handle exceptions
 	//there must be a way to get the full address back.Thats why i use these 2 functions and encode the hi bit directly into em :)
 
-	UnmappedVirtHandler0 = vtlb_RegisterHandler(
-		vtlbUnmappedVRead8<0>,vtlbUnmappedVRead16<0>,vtlbUnmappedVRead32<0>,vtlbUnmappedVRead64<0>,vtlbUnmappedVRead128<0>,
-		vtlbUnmappedVWrite8<0>,vtlbUnmappedVWrite16<0>,vtlbUnmappedVWrite32<0>,vtlbUnmappedVWrite64<0>,vtlbUnmappedVWrite128<0>
-	);
-
-	UnmappedVirtHandler1 = vtlb_RegisterHandler(
-		vtlbUnmappedVRead8<0x80000000>,vtlbUnmappedVRead16<0x80000000>,vtlbUnmappedVRead32<0x80000000>, vtlbUnmappedVRead64<0x80000000>,vtlbUnmappedVRead128<0x80000000>,
-		vtlbUnmappedVWrite8<0x80000000>,vtlbUnmappedVWrite16<0x80000000>,vtlbUnmappedVWrite32<0x80000000>, vtlbUnmappedVWrite64<0x80000000>,vtlbUnmappedVWrite128<0x80000000>
-	);
-
-	UnmappedPhyHandler0 = vtlb_RegisterHandler(
-		vtlbUnmappedPRead8<0>,vtlbUnmappedPRead16<0>,vtlbUnmappedPRead32<0>,vtlbUnmappedPRead64<0>,vtlbUnmappedPRead128<0>,
-		vtlbUnmappedPWrite8<0>,vtlbUnmappedPWrite16<0>,vtlbUnmappedPWrite32<0>,vtlbUnmappedPWrite64<0>,vtlbUnmappedPWrite128<0>
-	);
+	UnmappedVirtHandler0 = vtlb_RegisterHandler( VTLB_BuildUnmappedHandler(vtlbUnmappedV, 0) );
+	UnmappedVirtHandler1 = vtlb_RegisterHandler( VTLB_BuildUnmappedHandler(vtlbUnmappedV, 0x80000000) );
 
-	UnmappedPhyHandler1 = vtlb_RegisterHandler(
-		vtlbUnmappedPRead8<0x80000000>,vtlbUnmappedPRead16<0x80000000>,vtlbUnmappedPRead32<0x80000000>, vtlbUnmappedPRead64<0x80000000>,vtlbUnmappedPRead128<0x80000000>,
-		vtlbUnmappedPWrite8<0x80000000>,vtlbUnmappedPWrite16<0x80000000>,vtlbUnmappedPWrite32<0x80000000>, vtlbUnmappedPWrite64<0x80000000>,vtlbUnmappedPWrite128<0x80000000>
-	);
+	UnmappedPhyHandler0 = vtlb_RegisterHandler( VTLB_BuildUnmappedHandler(vtlbUnmappedP, 0) );
+	UnmappedPhyHandler1 = vtlb_RegisterHandler( VTLB_BuildUnmappedHandler(vtlbUnmappedP, 0x80000000) );
 
 	DefaultPhyHandler = vtlb_RegisterHandler(0,0,0,0,0,0,0,0,0,0);
 
@@ -554,7 +550,6 @@
 	vtlb_dynarec_init();
 }
 
-//////////////////////////////////////////////////////////////////////////////////////////
 // vtlb_Reset -- Performs a COP0-level reset of the PS2's TLB.
 // This function should probably be part of the COP0 rather than here in VTLB.
 void vtlb_Reset()
@@ -567,65 +562,87 @@
 	//nothing to do for now
 }
 
-//////////////////////////////////////////////////////////////////////////////////////////
 // Reserves the vtlb core allocation used by various emulation components!
-//
+// [TODO] basemem - request allocating memory at the specified virtual location, which can allow
+//    for easier debugging and/or 3rd party cheat programs.  If 0, the operating system
+//    default is used.
 void vtlb_Core_Alloc()
 {
-	if( vtlbdata.alloc_base != NULL ) return;
-
-	vtlbdata.alloc_current = 0;
+	if (!vtlbdata.vmap)
+	{
+		vtlbdata.vmap = (s32*)_aligned_malloc( VTLB_VMAP_ITEMS * sizeof(*vtlbdata.vmap), 16 );
+		if (!vtlbdata.vmap)
+			throw Exception::OutOfMemory( L"VTLB Virtual Address Translation LUT" )
+				.SetDiagMsg(pxsFmt("(%u megs)", VTLB_VMAP_ITEMS * sizeof(*vtlbdata.vmap) / _1mb)
+			);
+	}
+}
 
-#ifdef __LINUX__
-	vtlbdata.alloc_base = SysMmapEx( 0x16000000, VTLB_ALLOC_SIZE, 0x80000000, "Vtlb" );
-#else
-	// Win32 just needs this, since malloc always maps below 2GB.
-	vtlbdata.alloc_base = (u8*)_aligned_malloc( VTLB_ALLOC_SIZE, 4096 );
-	if( vtlbdata.alloc_base == NULL )
-		throw Exception::OutOfMemory( "Fatal Error: could not allocate 42Meg buffer for PS2's mappable system ram." );
-#endif
+void vtlb_Core_Free()
+{
+	safe_aligned_free( vtlbdata.vmap );
 }
 
-//////////////////////////////////////////////////////////////////////////////////////////
-//
-void vtlb_Core_Shutdown()
+static wxString GetHostVmErrorMsg()
 {
-	if( vtlbdata.alloc_base == NULL ) return;
+	return pxE("!Notice:HostVmReserve",
+		L"Your system is too low on virtual resources for PCSX2 to run.  This can be "
+		L"caused by having a small or disabled swapfile, or by other programs that are "
+		L"hogging resources."
+	);
+}
+// --------------------------------------------------------------------------------------
+//  VtlbMemoryReserve  (implementations)
+// --------------------------------------------------------------------------------------
+VtlbMemoryReserve::VtlbMemoryReserve( const wxString& name, size_t size )
+	: m_reserve( name, size )
+{
+	m_reserve.SetPageAccessOnCommit( PageAccess_ReadWrite() );
+}
 
-#ifdef __LINUX__
-	SafeSysMunmap( vtlbdata.alloc_base, VTLB_ALLOC_SIZE );
-#else
-	// Make sure and unprotect memory first, since CrtDebug will try to write to it.
-	HostSys::MemProtect( vtlbdata.alloc_base, VTLB_ALLOC_SIZE, Protect_ReadWrite );
-	safe_aligned_free( vtlbdata.alloc_base );
-#endif
+void VtlbMemoryReserve::SetBaseAddr( uptr newaddr )
+{
+	m_reserve.SetBaseAddr( newaddr );
+}
 
+void VtlbMemoryReserve::Reserve( sptr hostptr )
+{
+	if (!m_reserve.ReserveAt( hostptr ))
+	{
+		throw Exception::OutOfMemory( m_reserve.GetName() )
+			.SetDiagMsg(L"Vtlb memory could not be reserved.")
+			.SetUserMsg(GetHostVmErrorMsg());
+	}
 }
 
-//////////////////////////////////////////////////////////////////////////////////////////
-// This function allocates memory block with are compatible with the Vtlb's requirements
-// for memory locations.  The Vtlb requires the topmost bit (Sign bit) of the memory
-// pointer to be cleared.  Some operating systems and/or implementations of malloc do that,
-// but others do not.  So use this instead to allocate the memory correctly for your
-// platform.
-//
-u8* vtlb_malloc( uint size, uint align )
+void VtlbMemoryReserve::Commit()
 {
-	vtlbdata.alloc_current += align-1;
-	vtlbdata.alloc_current &= ~(align-1);
+	if (IsCommitted()) return;
+	if (!m_reserve.Commit())
+	{
+		throw Exception::OutOfMemory( m_reserve.GetName() )
+			.SetDiagMsg(L"Vtlb memory could not be committed.")
+			.SetUserMsg(GetHostVmErrorMsg());
+	}
+}
 
-	int rv = vtlbdata.alloc_current;
-	vtlbdata.alloc_current += size;
-	return &vtlbdata.alloc_base[rv];
+void VtlbMemoryReserve::Reset()
+{
+	Commit();
+	memzero_sse_a(m_reserve.GetPtr(), m_reserve.GetCommittedBytes());
 }
 
-//////////////////////////////////////////////////////////////////////////////////////////
-//
-void vtlb_free( void* pmem, uint size )
+void VtlbMemoryReserve::Decommit()
 {
-	// Does nothing anymore!  Alloc/dealloc is now handled by vtlb_Core_Alloc /
-	// vtlb_Core_Shutdown.  Placebo is left in place in case it becomes useful again
-	// at a later date.
+	m_reserve.Reset();
+}
 
-	return;
+void VtlbMemoryReserve::Release()
+{
+	m_reserve.Release();
 }
+
+bool VtlbMemoryReserve::IsCommitted() const
+{
+	return !!m_reserve.GetCommittedPageCount();
+}
\ No newline at end of file

 

  ViewVC Help
Powered by ViewVC 1.1.22