Show / Hide Table of Contents
    ///////////////////////////////////////////////////////////////////////////
    //
    //  System:    Simplygon
    //  File:      VisibilityWeightExample.cpp
    //  Language:  C++
    //
    //  Copyright (c) 2019 Microsoft. All rights reserved.
    //
    //  This is private property, and it is illegal to copy or distribute in
    //  any form, without written authorization by the copyright owner(s).
    //
    ///////////////////////////////////////////////////////////////////////////
    //
    //  #Description#
    //
    //  RunExample() will load a model and set some reduction settings. Then it 
    //  will call VisibilitySettingsSetup() with the current iteration index.
    //  VisibilitySettingsSetup() will setup the visibility settings.
    //  
    //  VisibilitySettingsSetup() with "iteration":
    //
    //  0, will not use visibility in reducer or texture coordinate generator
    //
    //  1, will use visibility weights in both reducer and texture coordinate 
    //  generator
    //
    //  2, will generate a bunch of camera views located on a hemisphere above 
    //  the model and compute the visibility from them
    //
    //  3, will setup a camera path with an omni directional camera where to 
    //  compute the visibility from
    //
    //  4, will setup another camera path where to compute the visibility from 
    //  and cull triangles that aren't visible from the path
    //
    //  5, will setup an orthographic camera that will view the entire scene 
    //  from a direction.
    //
    ///////////////////////////////////////////////////////////////////////////
    
    #include "../Common/Example.h"
    
    void RunExample( const std::string& readFrom, const std::string& writeTo, bool use_occluder );
    void VisibilitySettingsSetup( spScene scene, spVisibilitySettings visibility_settings, uint iteration );
    
    int main( int argc, char* argv[] )
        {
        try
        {
            InitExample();
    
            //Running visibility computations on the GPU is faster, but can generate different result between different machines
            sg->SetGlobalSetting("AllowGPUAcceleration", false);
    
            std::string assetPath = GetAssetPath();
    
            //Run visibility example on a scene with a teapot inside an open box.
            RunExample(assetPath + "ObscuredTeapot/ObscuredTeapot.obj", "ObscuredTeapot_LOD", false);
    
            //Run visibility example on the teapot with the open box as an occluder.
            //Note that the result of the teapot reduction in this case will be slightly
            //different from the previous scene, since the open box as an occluder
            //will not be included in the average visibility of scene geometry.
            RunExample(assetPath + "ObscuredTeapot/Teapot.obj", "TeapotWithOccluder_LOD", true);
    
            DeinitExample();
        }
        catch (const std::exception& ex)
        {
            std::cerr << ex.what() << std::endl;
            return -1;
        }
    
        return 0;
        }
    
    
    void VisibilitySettingsSetup( spScene scene, spVisibilitySettings visibilitySettings, uint iteration )
        {
        switch( iteration )
            {
                case 0:
                    // Without using visibility weights
                    // Using default settings of visibility_settings
                    break;
                case 1:
                    // Use visibility weights both in reducer and texture coordinate generator
                    // The reduction will use visibility weights when processing.
                    // Areas that are more visible will be preserved better.
                    // If regular vertex weights exist as well, then both will be used
                    visibilitySettings->SetUseVisibilityWeightsInReducer( true );
    
                    // The UV generator will create charts based on the visibility weights.
                    // Areas that are more visible will have greater UV space.
                    // If regular vertex weights exist as well, then both will be used
                    visibilitySettings->SetUseVisibilityWeightsInTexcoordGenerator( true );
                    break;
                case 2:
                    // Setup a bunch of camera views located above on a hemisphere
                    {
                    spSceneCamera sceneCamera = sg->CreateSceneCamera();
                    sceneCamera->SetCustomSphereCameraPath(
                        3, //Fidelity
                        0.0f, //Pitch angle
                        0.0f, //Yaw angle
                        90.0f //Coverage (degrees)
                        );
                    if( sceneCamera->ValidateCamera() ) //If the camera is setup correctly
                        {
                        scene->GetRootNode()->AddChild( sceneCamera );
                        spSelectionSet selectionSet = sg->CreateSelectionSet();
                        selectionSet->AddItem( sceneCamera->GetNodeGUID() );
                        rid cameraSelectionSetID = scene->GetSelectionSetTable()->AddItem( selectionSet );
                        visibilitySettings->SetCameraSelectionSetID( cameraSelectionSetID );
                        }
    
                    visibilitySettings->SetUseVisibilityWeightsInReducer( true );
                    visibilitySettings->SetUseVisibilityWeightsInTexcoordGenerator( true );
                    }
    
                    break;
                case 3:
                    // Setup a camera path manually where an omni directional camera use the same
                    // coordinate system as the model.
                    // One omni directional camera is placed inside the handle of the teapot and 
                    // calculates visibility in all directions.
                    {
                    //Create a camera
                    spSceneCamera sceneCamera = sg->CreateSceneCamera();
    
                    //Get the camera position
                    spRealArray cameraPositions = sceneCamera->GetCameraPositions();
    
                    //Set the tuple count to 1
                    cameraPositions->SetTupleCount( 1 );
    
                    //Set the camera view
                    real xyz[] = { -11.32f, 8.654f, 0.204f };
                    cameraPositions->SetTuple( 0, xyz );
    
                    // Have the camera view all directions around itself
                    sceneCamera->SetCameraType( SG_CAMERATYPE_OMNIDIRECTIONAL );
    
                    // Sets the camera coordinates to use the same coordinates system as the scene
                    sceneCamera->SetUseNormalizedCoordinates( false );
    
                    if( sceneCamera->ValidateCamera() ) //If the camera is setup correctly
                        {
                        scene->GetRootNode()->AddChild( sceneCamera );
                        spSelectionSet cameraSelectionSet = sg->CreateSelectionSet();
                        cameraSelectionSet->AddItem( sceneCamera->GetNodeGUID() );
                        rid selectionSetID = scene->GetSelectionSetTable()->AddSelectionSet( cameraSelectionSet );
                        visibilitySettings->SetCameraSelectionSetID( selectionSetID );
                        }
                    visibilitySettings->SetUseVisibilityWeightsInReducer( true );
                    visibilitySettings->SetUseVisibilityWeightsInTexcoordGenerator( true );
                    }
    
                    break;
                case 4:
                    // Setup a camera manually and cull triangles that are not visible
                    // The camera use a normalized (around the model) coordinate system.
                    {
                    //Create a camera
                    spSceneCamera sceneCamera = sg->CreateSceneCamera();
    
                    //Get the camera position and target position
                    spRealArray cameraPositions = sceneCamera->GetCameraPositions();
                    spRealArray targetPositions = sceneCamera->GetTargetPositions();
    
                    //Set the tuple count to 1
                    cameraPositions->SetTupleCount( 1 );
                    targetPositions->SetTupleCount( 1 );
    
                    // Have the camera in (0, 0, 3)
                    // Have it point to origin ( center of model )
                    real xyz[] = { 0.0f,0.0f,3.0f };
                    real target[] = { 0.0f,0.0f,0.0f };
                    cameraPositions->SetTuple( 0, xyz );
                    targetPositions->SetTuple( 0, target );
    
                    //Increase the resolution of the camera
                    int pixelResolution = 4096;
                    sceneCamera->SetPixelFieldOfView( sceneCamera->GetFieldOfView() / float( pixelResolution ) );
    
                    // Sets the camera coordinates to use normalized coordinates
                    sceneCamera->SetUseNormalizedCoordinates( true );
    
                    if( sceneCamera->ValidateCamera() ) //If the camera is setup correctly
                        {
                        scene->GetRootNode()->AddChild( sceneCamera );
                        spSelectionSet selectionSet = sg->CreateSelectionSet();
                        selectionSet->AddItem( sceneCamera->GetNodeGUID() );
                        rid cameraSelectionSetID = scene->GetSelectionSetTable()->AddItem( selectionSet );
                        visibilitySettings->SetCameraSelectionSetID( cameraSelectionSetID );
                        }
    
                    // Simply remove triangles that aren't visible
                    visibilitySettings->SetCullOccludedGeometry( true );
                    }
    
                    break;
                case 5:
                    //Setup an orthographic camera that will view the entire scene from a set direction
                    {
                    //Create a camera
                    spSceneCamera sceneCamera = sg->CreateSceneCamera();
    
                    //Make it orthographic
                    sceneCamera->SetCameraType( SG_CAMERATYPE_ORTHOGRAPHIC );
    
                    //Decide the size of the orthographic pixels
                    //This should correspond to the size of details in the scene that should be preserved
                    real smallSceneSize = scene->GetRadius() / 500.0f;
    
                    //We want to make sure there aren't small patches of non visible triangles (surrounded by visible triangle) 
                    //that are culled from the scene. It is better to keep those non-visible triangles if we want to do a good job
                    //with the reducer and the tex-coord generator
                    real smallArea = (scene->GetRadius() / 40.0f)*(scene->GetRadius() / 40.0f);
                    visibilitySettings->SetFillNonVisibleAreaThreshold( smallArea );
    
                    sceneCamera->SetOrthographicCameraPixelSize( smallSceneSize );
    
                    //Get the camera position and target position
                    spRealArray cameraPositions = sceneCamera->GetCameraPositions();
                    spRealArray targetPositions = sceneCamera->GetTargetPositions();
    
                    //Set the tuple count to 1
                    cameraPositions->SetTupleCount( 1 );
                    targetPositions->SetTupleCount( 1 );
    
                    //When you have an orthographic camera,
                    //the position and target are only used to determine the direction
                    //of the camera
                    real xyz[] = { 1.0f,1.0f,1.0f };
                    real target[] = { 0.0f,0.0f,0.0f };
                    cameraPositions->SetTuple( 0, xyz );
                    targetPositions->SetTuple( 0, target );
    
                    if( sceneCamera->ValidateCamera() ) //If the camera is setup correctly
                        {
                        scene->GetRootNode()->AddChild( sceneCamera );
                        spSelectionSet selectionSet = sg->CreateSelectionSet();
                        selectionSet->AddItem( sceneCamera->GetNodeGUID() );
                        rid cameraSelectionSetID = scene->GetSelectionSetTable()->AddItem( selectionSet );
                        visibilitySettings->SetCameraSelectionSetID( cameraSelectionSetID );
                        }
    
                    // Simply remove triangles that aren't visible
                    visibilitySettings->SetCullOccludedGeometry( true );
                    }
                    break;
                default:
                    break;
            }
        }
    
    void RunExample( const std::string& readFrom, const std::string& writeTo, bool UseOccluder )
        {
        char * outputFilenameDescription[] = { "regular", "weighted", "multiple_cameras_above_in_custom_sphere", "custom_point_camera_in_teapot_handle", "custom_camera_culled_triangles", "orthographic_camera" };
    
        // Run all iterations, showing different uses of the visibility weights
        for( uint i = 0; i < 6; ++i )
            {
            // Run reduction with some basic settings.
            // The triangle count will be reduced to 10% of the original input, and
            // depending on if "useVisibilityWeights" is set or not, hidden triangles will
            // be more likely to be removed. The LOD is then stored to file.
    
            std::string outputGeometryFilename = GetExecutablePath() + writeTo + "_" + std::to_string( i ) + "_" + outputFilenameDescription[i] + ".obj";
            std::string outputDiffuseTextureFilename = GetExecutablePath() + writeTo + "_" + std::to_string( i ) + "_" + outputFilenameDescription[i] + ".png";
    
            // Load from file
            spWavefrontImporter objReader = sg->CreateWavefrontImporter();
            objReader->SetImportFilePath( readFrom.c_str() );
            if( !objReader->RunImport() )
                throw std::exception("Failed to load input file!");
    
            spScene scene = objReader->GetScene();
            spMaterialTable materials = scene->GetMaterialTable();
            spTextureTable textures = scene->GetTextureTable();
    
            // Create the reduction-processor.
            spReductionProcessor red = sg->CreateReductionProcessor();
            red->SetScene( scene );
    
            // Get the Reduction Settings.
            spReductionSettings reductionSettings = red->GetReductionSettings();
    
            // Below in the "UseOccluder" part we will add IGeometryData objects to the scene that will only be used to occlude the scene.
            // To only process the scene objects we currently have (i.e. how to exclude the geometries that are occluders), 
            // we create a selection set with all the current meshes and tell the reducer to only reduce these objects.
            int processSelectionSetID = scene->SelectNodes( "ISceneMesh" ); //Selects all the ISceneMeshes in the scene and returns the index of the selection set
            reductionSettings->SetProcessSelectionSetID( processSelectionSetID );
    
            // Will reduce to 3/10 of the original trianglecount.
            reductionSettings->SetTriangleRatio( 0.30f );
    
            // Just to show how generated tex-coords are affected by visibility
            spMappingImageSettings mappingImageSettings = red->GetMappingImageSettings();
            mappingImageSettings->SetGenerateTexCoords( true );
            mappingImageSettings->SetGenerateMappingImage( true );
    
            spVisibilitySettings visibilitySettings = red->GetVisibilitySettings();
    
            //Sets the visibility settings for each iteration
            VisibilitySettingsSetup( scene, visibilitySettings, i );
    
            if( UseOccluder )
                {
                // Load occluder from file
                objReader = sg->CreateWavefrontImporter();
                objReader->SetImportFilePath( (GetAssetPath() + "ObscuredTeapot/occluder.obj").c_str() );
                if( !objReader->RunImport() )
                    throw std::exception("Failed to load input file!");
    
                spScene occluderScene = objReader->GetScene();
    
                // Get the geometries from the scene
                spSelectionSet selectionSceneMeshes = occluderScene->GetSelectionSetTable()->GetSelectionSet( occluderScene->SelectNodes( "ISceneMesh" ) );
    
                // Add the geometries from the occluder scene as occluders to the scene we loaded before
                for( uint i = 0; i < selectionSceneMeshes->GetItemCount(); ++i )
                    {
                    spGeometryData occluderGeometry = Cast<ISceneMesh>( occluderScene->GetNodeByGUID( selectionSceneMeshes->GetItem( i ) ) )->GetGeometry();
    
                    spSceneMesh occluderSceneMesh = scene->GetRootNode()->CreateChildMesh( occluderGeometry );
                    spSelectionSet occluderSelectionSet = sg->CreateSelectionSet();
                    occluderSelectionSet->AddItem( occluderSceneMesh->GetNodeGUID() );
                    rid occluderSelectionSetID = scene->GetSelectionSetTable()->AddSelectionSet( occluderSelectionSet );
                    visibilitySettings->SetOccluderSelectionSetID( occluderSelectionSetID );
                    }
                }
    
            red->RunProcessing();
    
            spMappingImage mappingImage = red->GetMappingImage();
    
            spMaterialTable outputMaterials = sg->CreateMaterialTable();
            spTextureTable outputTextures = sg->CreateTextureTable();
    
            spMaterial outputMaterial = sg->CreateMaterial();
            outputMaterials->AddMaterial( outputMaterial );
            spColorCaster colorCaster = sg->CreateColorCaster();
            colorCaster->SetMappingImage( mappingImage );
            colorCaster->SetSourceMaterials( materials );
            colorCaster->SetSourceTextures( textures );
            colorCaster->SetColorType( SG_MATERIAL_CHANNEL_DIFFUSE );
            colorCaster->SetOutputChannels( 3 );
            colorCaster->SetOutputFilePath( outputDiffuseTextureFilename.c_str() );
            colorCaster->RunProcessing();
    
            // Set material to point to created texture filename.
            AddSimplygonTexture( outputMaterial, outputTextures, SG_MATERIAL_CHANNEL_DIFFUSE, outputDiffuseTextureFilename.c_str() );
    
            scene->GetMaterialTable()->Copy( outputMaterials );
            scene->GetTextureTable()->Copy( outputTextures );
    
            // Store to file
            spWavefrontExporter objexp = sg->CreateWavefrontExporter();
            objexp->SetScene( scene );
            objexp->SetSelectionSet( processSelectionSetID ); //Only output the LOD meshes (not the occluders)
            objexp->SetExportFilePath( outputGeometryFilename.c_str() );
            objexp->RunExport();
            }
        }
    
    Back to top Terms of Use | Privacy and cookies | Trademarks | Copyright © 2019 Microsoft