Jump to content

vmware fusion (mac) dont work with leadwerks ?


Gabriel
 Share

Recommended Posts

Hello,

I try to run an application leadwerks on vmware fusion virtual machine, but it does not work the same probleme with editor: (

someone has an idea?

 

below the log file

 

Warning: Failed to load material "abstract::dust.mat": Path not found.
Leadwerks Engine 2.5
Initializing Renderer...
OpenGL Version: 2.1 Mesa 7.5.1
GLSL Version: 1.20
Render device: Gallium 0.3 on SVGA3D; build: RELEASE; 
Vendor: VMware, Inc.
DrawBuffers2 supported: 0
16 texture units supported.
GPU instancing supported: 0
Shader model 4.0 supported: 0
Conditional render supported: 0
Loading shader "zip::c:/course/montanahorseleo/resources/shaders.pak//query.vert", ""...
Invoking script "C:/course/MontanaHorseLeo/Scripts/start/collisions.lua"...
Invoking script "C:/course/MontanaHorseLeo/Scripts/start/fliphook.lua"...
Invoking script "C:/course/MontanaHorseLeo/Scripts/start/globals.lua"...
Loading model "c:/course/montanahorseleo/resources/models/splin/voiture/voiture.gmf"...
Loading mesh "c:/course/montanahorseleo/resources/models/splin/voiture/voiture.gmf"...
Loading material "c:/course/montanahorseleo/resources/models/splin/voiture/voitureuv01.mat"...
Loading texture "c:/course/montanahorseleo/resources/models/splin/voiture/voitureuv01.dds"...
Loading shader "zip::c:/course/montanahorseleo/resources/shaders.pak//mesh/mesh_skin_diffuse.vert", "zip::c:/course/montanahorseleo/resources/shaders.pak//mesh/mesh_diffuse.frag"...
Error: Failed to compile fragment shader object.
Error: Array index out of bounds (index=1 size=1)


Source:
#version 120
#define LW_MAX_PASS_SIZE 1
#define LW_DIFFUSE texture0

#extension GL_ARB_draw_buffers : enable

uniform vec3 cameraposition;
uniform vec2 buffersize;
uniform vec2 camerarange;

float greyscale( in vec3 color ) {
return color.x * 0.3 + color.y * 0.59 + color.z * 0.11;
}

float DepthToZPosition(in float depth) {
return camerarange.x / (camerarange.y - depth * (camerarange.y - camerarange.x)) * camerarange.y;
}

float ZPositionToDepth(in float z) {
return (camerarange.x / (z / camerarange.y) - camerarange.y) / -(camerarange.y - camerarange.x);
}


#ifdef LW_DETAIL
uniform sampler2D LW_DETAIL;
#endif

#ifdef LW_DIFFUSE
uniform sampler2D LW_DIFFUSE;
#endif

#ifdef LW_DIFFUSE2
uniform sampler2D LW_DIFFUSE2;
#endif

#ifdef LW_SPECULARMAP
uniform sampler2D LW_SPECULARMAP;
#endif

#ifdef LW_BUMPMAP
uniform sampler2D LW_BUMPMAP;
#endif

#ifdef LW_BUMPMAP2
uniform sampler2D LW_BUMPMAP2;
#endif

#ifdef LW_BLOOM
uniform sampler2D LW_BLOOM;
#endif

#ifdef LW_CUBEMAP
uniform samplerCube LW_CUBEMAP;
varying vec3 cubemapdir;
varying vec3 cubemapnormal;
#endif

#ifdef LW_GIMAP
uniform sampler2D LW_GIMAP;
#endif

#ifdef LW_PARALLAXMAP
uniform sampler2D LW_PARALLAXMAP;
varying vec3 eyevec;
#endif

#ifdef LE_REFRACTION
uniform sampler2D LE_REFRACTION;
uniform sampler2D LE_DEPTHBUFFER;
uniform float refractionstrength = 0.01;
#endif

#ifdef LW_POMMAP
vec3 vLightTS=vec3(0.577,0.577,0.577);
varying vec3 eyevec;
float depthP = 0.01;
float nMinSamples = 20;
float nMaxSamples = 50;	
#endif

#ifdef LW_MESHLAYER
varying float vegetationfade;
#endif

#ifdef LW_ALPHABLEND
uniform sampler2D LW_ALPHABLEND_INCOMINGCOLOR;
uniform sampler2D LW_ALPHABLEND_INCOMINGNORMAL;
#endif

varying vec3 vertexposition;
varying vec3 T,B,N;
varying vec2 texcoord0;
varying vec2 texcoord1;
varying vec4 modelvertex;
varying vec4 fragcolor;

float fOcclusionShadow = 1.0;

uniform sampler2D texture14;
uniform float terrainsize;
uniform vec3 terrainscale;
uniform float bumpscale;
uniform float specular;
uniform float gloss;

//Terrain color map
uniform sampler2D texture12;

void main(void) {

vec4 diffuse = fragcolor;
vec3 normal;
float shininess = 0.0;
vec2 texcoord=texcoord0;// only use this because of parallax mapping
float selfillumination = 0.0;	

vec2 terraincoord;
float terrainresolution;

#ifdef LW_VERTEXGI
	diffuse = vec4(1);
#endif

#ifdef LW_PARALLAXMAP
	texcoord += (diffuse.w * 0.04 - 0.036) * normalize(eyevec).xy;
	//texcoord += (texture2D(LW_DIFFUSE,texcoord).w * 0.04 - 0.036) * normalize(eyevec).xy;
#endif

#ifdef LW_POMMAP
	// for POM, the heightmap is in the alpha of the diffuse so save ur diffuse with DXT5 I chose this because the alpha of DXT5 is higher precision
// from Microsoft's and Ati's implementation thank them for the source 
// Compute the ray direction for intersecting the height field profile with 
// current view ray. See the above paper for derivation of this computation. (Ati's comment)

// Compute initial parallax displacement direction: (Ati's comment)
vec2 vparallaxdirection = normalize(eyevec).xy;

// The length of this vector determines the furthest amount of displacement: (Ati's comment)
float flength         = length( eyevec );
float fparallaxlength = sqrt( flength * flength - eyevec.z * eyevec.z ) / eyevec.z; 

// Compute the actual reverse parallax displacement vector: (Ati's comment)
vec2 vParallaxOffsetTS = vparallaxdirection * fparallaxlength;

// Need to scale the amount of displacement to account for different height ranges
// in height maps. This is controlled by an artist-editable parameter: (Ati's comment)
vParallaxOffsetTS *= depthP; 

int nNumSamples;
nNumSamples = int((mix( nMinSamples, nMaxSamples, 1-dot( vparallaxdirection, N.xy ) )));	//In reference shader: int nNumSamples = (int)(lerp( nMinSamples, nMaxSamples, dot( eyeDirWS, N ) ));
float fStepSize = 1.0 / float(nNumSamples);	
float fCurrHeight = 0.0;
float fPrevHeight = 1.0;
float fNextHeight = 0.0;
int    nStepIndex = 0;
vec2 vTexOffsetPerStep = fStepSize * vParallaxOffsetTS;
vec2 vTexCurrentOffset = texcoord.xy;
float  fCurrentBound     = 1.0;
float  fParallaxAmount   = 0.0;

vec2 pt1 = vec2(0,0);
vec2 pt2 = vec2(0,0);	    

while ( nStepIndex < nNumSamples ) 
{
vTexCurrentOffset -= vTexOffsetPerStep;

// Sample height map which in this case is stored in the alpha channel of the normal map: (Ati's comment)
fCurrHeight = texture2D( LW_DIFFUSE, vTexCurrentOffset).a; 

fCurrentBound -= fStepSize;

if ( fCurrHeight > fCurrentBound ) 
{   
   pt1 = vec2( fCurrentBound, fCurrHeight );
   pt2 = vec2( fCurrentBound + fStepSize, fPrevHeight );

   nStepIndex = nNumSamples + 1;	//Exit loop
   fPrevHeight = fCurrHeight;
}
else
{
   nStepIndex++;
   fPrevHeight = fCurrHeight;
}
} 
float fDelta2 = pt2.x - pt2.y;
float fDelta1 = pt1.x - pt1.y;

float fDenominator = fDelta2 - fDelta1;

// SM 3.0 requires a check for divide by zero, since that operation will generate
// an 'Inf' number instead of 0, as previous models (conveniently) did: (Ati's comment)
if ( fDenominator == 0.0 )
{
fParallaxAmount = 0.0;
}
else
{
fParallaxAmount = (pt1.x * fDelta2 - pt2.x * fDelta1 ) / fDenominator;
}
vec2 vParallaxOffset = vParallaxOffsetTS * (1 - fParallaxAmount );
texcoord = texcoord - vParallaxOffset;        

vLightTS = T * vLightTS.x + B * vLightTS.y + N * vLightTS.z;
vec2 vLightRayTS = vLightTS.xy * depthP;
// Compute the soft blurry shadows taking into account self-occlusion for
// features of the height field:
float sh0 = texture2D( LW_DIFFUSE, texcoord).a;
//float sh7 = (texture2D( LW_DIFFUSE, texcoord + vLightRayTS * 0.55).a - sh0 - 0.55 ) * 6;
//float sh6 = (texture2D( LW_DIFFUSE, texcoord + vLightRayTS * 0.44).a - sh0 - 0.44 ) * 8;
//float sh5 = (texture2D( LW_DIFFUSE, texcoord + vLightRayTS * 0.33).a - sh0 - 0.33 ) * 10;
//float sh4 = (texture2D( LW_DIFFUSE, texcoord + vLightRayTS * 0.22).a - sh0 - 0.22 ) * 12;
// Compute the actual shadow strength:
//fOcclusionShadow = 1 - max( max( max(  sh7, sh6 ), sh5 ), sh4 );
// The previous computation overbrightens the image, let's adjust for that:
//fOcclusionShadow = fOcclusionShadow* .1+.45;

//diffuse *= fOcclusionShadow;


#endif

#ifdef LW_DIFFUSE2
	vec4 diffuse2 = texture2D(LW_DIFFUSE2,texcoord);
	diffuse = vec4(1);
#endif	

#ifdef LW_DIFFUSE
	diffuse *= texture2D(LW_DIFFUSE,texcoord);//*fOcclusionShadow;
#endif

#ifdef LW_ALPHATEST
	if (diffuse.w<0.5) {
		discard;
	}
#endif


normal = N;

#ifdef LW_BUMPMAP

	#ifdef LW_TERRAINNORMALS
		//Use terrain normals
		terraincoord=vec2(vertexposition.x,-vertexposition.z) / terrainsize + 0.5;
		terrainresolution = terrainsize / terrainscale.x;
		terraincoord += 0.5 / terrainresolution;
		vec3 worldNormal = ((texture2D(texture14,terraincoord).xyz - 0.5) * 2.0).xyz;
		normal = normalize(gl_NormalMatrix*worldNormal);
	#else
		vec4 bumpcolor = texture2D(LW_BUMPMAP,texcoord);
		normal = bumpcolor.xyz * 2.0 - 1.0;
	#endif

	#ifdef LW_DETAIL
		normal += texture2D(LW_DETAIL,texcoord * 4.0).xyz * 2.0 - 1.0;
	#endif
	normal.z /= bumpscale;
	normal = T * normal.x + B * normal.y + N * normal.z;
	normal = normalize(normal);
	#ifdef LW_SPECULAR
		shininess = bumpcolor.a*specular;//*fOcclusionShadow
	#endif
	#ifdef LW_SPECULARMAP
		shininess = texture2D(LW_SPECULARMAP,texcoord).x*specular;//*fOcclusionShadow
	#endif

#else
	normal=normalize(normal);
#endif

#ifdef LW_BUMPMAP2
	vec3 normal2;
	float shininess2;

	vec4 bumpcolor2 = texture2D(LW_BUMPMAP2,texcoord);
	normal2 = bumpcolor2.xyz * 2.0 - 1.0;

	normal2.z /= bumpscale;
	normal2 = T * normal2.x + B * normal2.y + N * normal2.z;
	normal2 = normalize(normal2);
	#ifdef LW_SPECULAR
		shininess2 = bumpcolor2.a*specular;
	#endif

#endif	

#ifdef LW_TERRAINNORMALS
	#ifndef LW_BUMPMAP
		//Use terrain normals
		terraincoord=vec2(vertexposition.x,-vertexposition.z) / terrainsize + 0.5;
		terrainresolution = terrainsize / terrainscale.x;
		terraincoord.x -= 0.5 / terrainresolution;
		//vec4 normsample=((texture2D(texture14,terraincoord).xyz - 0.5) * 2.0).xyz;
		vec3 worldNormal = ((texture2D(texture14,terraincoord).xyz - 0.5) * 2.0).xyz;
		normal = normalize(gl_NormalMatrix*worldNormal);
		//shininess = normsample.w;
	#endif
#endif

#ifdef LW_TERRAINCOLOR
	//Use terrain color
	terraincoord=vec2(vertexposition.x,-vertexposition.z) / terrainsize + 0.5;
	terrainresolution = terrainsize / terrainscale.x;
	terraincoord.x -= 0.5 / terrainresolution;
	//terraincoord.y += 0.5 / terrainresolution;
	vec4 terraincolor = texture2D(texture12,terraincoord);
	diffuse = vec4( greyscale(diffuse.xyz) * 2.0 * terraincolor.xyz,diffuse.w);

	#ifdef LW_MESHLAYER
		float temp_w=diffuse.w;
		diffuse = diffuse * (1.0-vegetationfade) + terraincolor * vegetationfade;
		diffuse.w=temp_w;
	#endif

	shininess = terraincolor.w;
#endif

#ifdef LE_REFRACTION
	diffuse.a=0.25;
	vec4 refractionvector = vec4( gl_FragCoord.x/buffersize.x, gl_FragCoord.y/buffersize.y, gl_FragCoord.z, 1.0 );
	vec4 refractionvector2 = refractionvector + refractionstrength * vec4(normal,0.0);		
	if (gl_FragCoord.z<DepthToZPosition(texture2DProj(LE_DEPTHBUFFER,refractionvector2).x)) {
		refractionvector=refractionvector2;
	}
	vec4 transparency = texture2DProj(LE_REFRACTION,refractionvector);
	diffuse = transparency * diffuse;
#endif

#ifdef LW_BUMPMAP2
	shininess = fragcolor.r * shininess + (1.0-fragcolor.r) * shininess2;
	normal = fragcolor.r * normal + (1.0-fragcolor.r) * normal2;
#endif

vec3 adjustednormal = normal*0.5+0.5;
float adjustedgloss = gloss;

shininess=clamp(shininess,0.0,1.0)*0.5;



#ifdef LW_DIFFUSE2
	diffuse = fragcolor.r * diffuse + (1.0-fragcolor.r) * diffuse2;
#endif	

   #ifdef LW_CUBEMAP
	vec3 cubecoord = reflect( normalize( modelvertex.xyz - cameraposition ), normal * gl_NormalMatrix );
	diffuse = vec4( textureCube(LW_CUBEMAP,cubecoord).xyz, diffuse.w);
   #endif

//Diffuse
gl_FragData[0] = diffuse;	

//Normal
#ifdef LW_FULLBRIGHT
	gl_FragData[1] = vec4(1.0,1.0,1.0,diffuse.w);
#else
	gl_FragData[1] = vec4(adjustednormal,diffuse.w);
#endif

//Bloom
#ifdef LW_BLOOM
	vec4 bloomcolor = texture2D(LW_BLOOM,texcoord) * fragcolor;
	gl_FragData[3] = bloomcolor;
	gl_FragData[3].w = 0;
#else
	#ifdef LW_GIMAP
		vec4 gicolor = texture2D(LW_GIMAP,vec2(texcoord1.x,1.0-texcoord1.y));

		//We're going to clamp the results to prevent overbrightening when the lightmap environment doesn't match the dynamic lighting
		//float gilum = gicolor.r * 0.3 + gicolor.g * 0.59 + gicolor.b * 0.11;
		//float giscale = clamp(gilum,0,0.1) / gilum;
		//gicolor.x *= giscale;
		//gicolor.y *= giscale;
		//gicolor.z *= giscale;
		gicolor *= diffuse;
		gicolor.w = 1.0;
		gl_FragData[3] = gicolor;
	#else
		#ifdef LW_VERTEXGI
			gl_FragData[3] = fragcolor * diffuse;
			gl_FragData[3].w = 1.0;
		#else
			gl_FragData[3] = vec4(0);
		#endif
	#endif
#endif

//Modify depth output for shadows
#ifdef LW_POMMAP
	float z = DepthToZPosition(gl_FragCoord.z);
	z = z -diffuse.a * fparallaxlength * depthP;
	gl_FragDepth = ZPositionToDepth( z );
#endif

gl_FragData[2]=vec4(shininess,gloss,0.0,diffuse.w);

/*
float c;
float temp;
temp=adjustednormal.x*100.0;
c=int(temp);
temp=adjustednormal.y*100.0;
c+=int(temp)/100.0;
temp=adjustednormal.z*100.0;
c+=int(temp)/100.0/100.0;
gl_FragData[1].x = c;
*/
}


stack traceback:
[C]: in function 'LoadScene'
[string "C:\course\MontanaHorseLeo\start.lua"]:29: in main chunk

Link to comment
Share on other sites

Loading shader "zip::c:/course/montanahorseleo/resources/shaders.pak//mesh/mesh_skin_diffuse.vert", "zip::c:/course/montanahorseleo/resources/shaders.pak//mesh/mesh_diffuse.frag"...

Error: Failed to compile fragment shader object.

Error: Array index out of bounds (index=1 size=1)

ATI and NVidia give you a line number an error occurs on. That would be a good start if we had that, but honestly I would be shocked if it ever ran on a setup like this.

 

Leadwerks3D will run on Mac natively.

My job is to make tools you love, with the features you want, and performance you can't live without.

Link to comment
Share on other sites

Hello Josh

 

the only laptop at home is a mac. and my wife is sick I stay glued to my desktop PC in my office along.

I thought that this could be a solution, I could continue my developement on my emulator.

 

PS: BlitzBasic works perfectly on the emulator VMWare fusion!

 

If you ever have an idea, my wife will be eternally grateful to you;-)

 

Gabriel

Link to comment
Share on other sites

I think VirtualBox has better hardware support for GPU than vmware. It's free also.

https://www.virtualbox.org/wiki/Downloads

 

Trust me, it doesn't. :(

 

I do almost all of my development in VM's and it's by far the future. Unfortunately we're still a few years off from truly usable hardware accelerated video passthrough.

There are three types of people in this world. People who make things happen. People who watch things happen. People who ask, "What happened?"

Let's make things happen.

Link to comment
Share on other sites

Trust me, it doesn't. :(

 

I do almost all of my development in VM's and it's by far the future. Unfortunately we're still a few years off from truly usable hardware accelerated video passthrough.

We're a few years off from truly usable hardware, too.

My job is to make tools you love, with the features you want, and performance you can't live without.

Link to comment
Share on other sites

Well, at least VirtualBox supports OpenGL3, which vmware doesn't.

 

Actually, according to the changelog and official sources, VirtualBox only supports OpenGL 2.1, not 3.0.

There are three types of people in this world. People who make things happen. People who watch things happen. People who ask, "What happened?"

Let's make things happen.

Link to comment
Share on other sites

Multi boot is the best anyway. I have Windows and 10 linuxes, and a few other OS on my C: drive :( I could also install OSX, because Apple made it impossible to use with an Apple computer, since it needs an Intel CPU, so basically it runs only on a PC (no matter if it has an apple logo or not on it), which apple made illegal also.

Ryzen 9 RX 6800M ■ 16GB XF8 Windows 11 ■
Ultra ■ LE 2.53DWS 5.6  Reaper ■ C/C++ C# ■ Fortran 2008 ■ Story ■
■ Homepage: https://canardia.com ■

Link to comment
Share on other sites

Multi boot is the best anyway. I have Windows and 10 linuxes, and a few other OS on my C: drive :( I could also install OSX, because Apple made it impossible to use with an Apple computer, since it needs an Intel CPU, so basically it runs only on a PC (no matter if it has an apple logo or not on it), which apple made illegal also.

 

I can already tell I'm going to regret asking this, but what do you mean Apple made it impossible to use OS X with an Apple computer?

There are three types of people in this world. People who make things happen. People who watch things happen. People who ask, "What happened?"

Let's make things happen.

Link to comment
Share on other sites

A real Apple computer has a Motorola 68000 or PowerPC CPU, although I think the latter is just a hopeless attempt to get forward with the silence of Motorola at that time. And then putting an Intel CPU into a Mac basically makes it a non-Mac. It's just a simple PC. I can paint an Orange, Banana or Apple on my PC too, but it doesn't make it a Motorola.

 

The major difference between a PC and a Mac/Amiga is a that PC is always 8 bit, and Mac/Amiga is truly 32 bit, and also superscalar, which means they can do 32 CPU instructions in the time a PC can only do 1. It's a technology you can't really even compare.

Ryzen 9 RX 6800M ■ 16GB XF8 Windows 11 ■
Ultra ■ LE 2.53DWS 5.6  Reaper ■ C/C++ C# ■ Fortran 2008 ■ Story ■
■ Homepage: https://canardia.com ■

Link to comment
Share on other sites

A real Apple computer has a Motorola 68000 or PowerPC CPU, although I think the latter is just a hopeless attempt to get forward with the silence of Motorola at that time. And then putting an Intel CPU into a Mac basically makes it a non-Mac. It's just a simple PC. I can paint an Orange, Banana or Apple on my PC too, but it doesn't make it a Motorola.

 

The major difference between a PC and a Mac/Amiga is a that PC is always 8 bit, and Mac/Amiga is truly 32 bit, and also superscalar, which means they can do 32 CPU instructions in the time a PC can only do 1. It's a technology you can't really even compare.

 

Normally I'd refute this, site articles and resources, even do a bit of logical reasoning to explain why Meta's wrong here. However, I'm just tired of dealing with the BS. I'm just going to leave it here and label it as a typical baseless Meta response. It's not worth my time anymore.

There are three types of people in this world. People who make things happen. People who watch things happen. People who ask, "What happened?"

Let's make things happen.

Link to comment
Share on other sites

Join the conversation

You can post now and register later. If you have an account, sign in now to post with your account.
Note: Your post will require moderator approval before it will be visible.

Guest
Reply to this topic...

×   Pasted as rich text.   Paste as plain text instead

  Only 75 emoji are allowed.

×   Your link has been automatically embedded.   Display as a link instead

×   Your previous content has been restored.   Clear editor

×   You cannot paste images directly. Upload or insert images from URL.

 Share

×
×
  • Create New...