threejs样例:GPU拾取

文章链接:https://threejs.org/manual/#zh/indexed-textures

案例:https://threejs.org/manual/examples/picking-gpu.html

 

这是因为 JavaScript 无法通过简单的查看纹理和材质,就推测出你的对象是否存在一部分是透明的或者不透明。

对于这些问题的解决方案,就是使用基于GPU的拾取方法。可惜,该方法概念上简单,但是相比于射线追踪法,用起来就复杂了。

为了完成GPU拾取,对每一个对象使用唯一的颜色进行离屏渲染。然后,检查鼠标位置关联的像素的颜色。这个颜色就能告诉我们哪个对象被选中。

这能解决上面的问题2,3。至于问题1的速度问题,这取决于业务场景。每个对象会被绘制两次,一次用于观看,一次用于拾取。也许存在开脑洞的解决方案,可以只绘制一次就完成查看和拾取,此处我们不会尝试。

但是有一件事值得去做,因为拾取时我们只需读取1px,所以我们可以设置摄像机,只绘制1px,通过 PerspectiveCamera.setViewOffset 方法,可以告诉THREE.js 计算出一个摄像机 只呈现一个大矩形的一个很小的部分。这应该能节省一些运行时间。

此时,要在THREE.js中实现这种拾取方式,需要创建两个场景。一个使用正常的网格对象填充。另外一个使用“拾取材质”的网格对象填充。

 

<!-- Licensed under a BSD license. See license.html for license -->
<!DOCTYPE html>
<html>
  <head>
    <meta charset="utf-8">
    <meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes">
    <title>Three.js - Picking - RayCaster w/Transparency</title>
    <style>
    html, body {
        height: 100%;
        margin: 0;
    }
    #c {
        width: 100%;
        height: 100%;
        display: block;
    }
    </style>
  </head>
  <body>
    <canvas id="c"></canvas>
  </body>
<script type="importmap">
{
  "imports": {
    "three": "../../build/three.module.js"
  }
}
</script>

<script type="module">
import * as THREE from 'three';

function main() {

	const canvas = document.querySelector( '#c' );
	const renderer = new THREE.WebGLRenderer( { antialias: true, canvas } );

	const fov = 60;
	const aspect = 2; // the canvas default
	const near = 0.1;
	const far = 200;
	const camera = new THREE.PerspectiveCamera( fov, aspect, near, far );
	camera.position.z = 30;

	const scene = new THREE.Scene();
	scene.background = new THREE.Color( 'white' );
	const pickingScene = new THREE.Scene();
	pickingScene.background = new THREE.Color( 0 );

	// put the camera on a pole (parent it to an object)
	// so we can spin the pole to move the camera around the scene
	const cameraPole = new THREE.Object3D();
	scene.add( cameraPole );
	cameraPole.add( camera );

	{

		const color = 0xFFFFFF;
		const intensity = 3;
		const light = new THREE.DirectionalLight( color, intensity );
		light.position.set( - 1, 2, 4 );
		camera.add( light );

	}

	const boxWidth = 1;
	const boxHeight = 1;
	const boxDepth = 1;
	const geometry = new THREE.BoxGeometry( boxWidth, boxHeight, boxDepth );

	function rand( min, max ) {

		if ( max === undefined ) {

			max = min;
			min = 0;

		}

		return min + ( max - min ) * Math.random();

	}

	function randomColor() {

		return `hsl(${rand( 360 ) | 0}, ${rand( 50, 100 ) | 0}%, 50%)`;

	}

	const loader = new THREE.TextureLoader();
	const texture = loader.load( 'resources/images/frame.png' );

	const idToObject = {};
	const numObjects = 100;
	for ( let i = 0; i < numObjects; ++ i ) {

		const id = i + 1;
		const material = new THREE.MeshPhongMaterial( {
			color: randomColor(),
			map: texture,
			transparent: true,
			side: THREE.DoubleSide,
			alphaTest: 0.5,
		} );

		const cube = new THREE.Mesh( geometry, material );
		scene.add( cube );
		idToObject[ id ] = cube;

		cube.position.set( rand( - 20, 20 ), rand( - 20, 20 ), rand( - 20, 20 ) );
		cube.rotation.set( rand( Math.PI ), rand( Math.PI ), 0 );
		cube.scale.set( rand( 3, 6 ), rand( 3, 6 ), rand( 3, 6 ) );

		const pickingMaterial = new THREE.MeshPhongMaterial( {
			emissive: new THREE.Color().setHex( id, THREE.NoColorSpace ),
			color: new THREE.Color( 0, 0, 0 ),
			specular: new THREE.Color( 0, 0, 0 ),
			map: texture,
			transparent: true,
			side: THREE.DoubleSide,
			alphaTest: 0.5,
			blending: THREE.NoBlending,
		} );
		const pickingCube = new THREE.Mesh( geometry, pickingMaterial );
		pickingScene.add( pickingCube );
		pickingCube.position.copy( cube.position );
		pickingCube.rotation.copy( cube.rotation );
		pickingCube.scale.copy( cube.scale );

	}

	function resizeRendererToDisplaySize( renderer ) {

		const canvas = renderer.domElement;
		const width = canvas.clientWidth;
		const height = canvas.clientHeight;
		const needResize = canvas.width !== width || canvas.height !== height;
		if ( needResize ) {

			renderer.setSize( width, height, false );

		}

		return needResize;

	}

	class GPUPickHelper {

		constructor() {

			// create a 1x1 pixel render target
			this.pickingTexture = new THREE.WebGLRenderTarget( 1, 1 );
			this.pixelBuffer = new Uint8Array( 4 );
			this.pickedObject = null;
			this.pickedObjectSavedColor = 0;

		}
		pick( cssPosition, scene, camera, time ) {

			const { pickingTexture, pixelBuffer } = this;

			// restore the color if there is a picked object
			if ( this.pickedObject ) {

				this.pickedObject.material.emissive.setHex( this.pickedObjectSavedColor );
				this.pickedObject = undefined;

			}

			// set the view offset to represent just a single pixel under the mouse
			const pixelRatio = renderer.getPixelRatio();
			camera.setViewOffset(
				renderer.getContext().drawingBufferWidth, // full width
				renderer.getContext().drawingBufferHeight, // full top
				cssPosition.x * pixelRatio | 0, // rect x
				cssPosition.y * pixelRatio | 0, // rect y
				1, // rect width
				1, // rect height
			);
			// render the scene
			renderer.setRenderTarget( pickingTexture );
			renderer.render( scene, camera );
			renderer.setRenderTarget( null );
			// clear the view offset so rendering returns to normal
			camera.clearViewOffset();
			//read the pixel
			renderer.readRenderTargetPixels(
				pickingTexture,
				0, // x
				0, // y
				1, // width
				1, // height
				pixelBuffer );

			const id =
          ( pixelBuffer[ 0 ] << 16 ) |
          ( pixelBuffer[ 1 ] << 8 ) |
          ( pixelBuffer[ 2 ] );

			const intersectedObject = idToObject[ id ];
			if ( intersectedObject ) {

				// pick the first object. It's the closest one
				this.pickedObject = intersectedObject;
				// save its color
				this.pickedObjectSavedColor = this.pickedObject.material.emissive.getHex();
				// set its emissive color to flashing red/yellow
				this.pickedObject.material.emissive.setHex( ( time * 8 ) % 2 > 1 ? 0xFFFF00 : 0xFF0000 );

			}

		}

	}

	const pickPosition = { x: 0, y: 0 };
	const pickHelper = new GPUPickHelper();
	clearPickPosition();

	function render( time ) {

		time *= 0.001; // convert to seconds;

		if ( resizeRendererToDisplaySize( renderer ) ) {

			const canvas = renderer.domElement;
			camera.aspect = canvas.clientWidth / canvas.clientHeight;
			camera.updateProjectionMatrix();

		}

		cameraPole.rotation.y = time * .1;

		pickHelper.pick( pickPosition, pickingScene, camera, time );

		renderer.render( scene, camera );

		requestAnimationFrame( render );

	}

	requestAnimationFrame( render );

	function getCanvasRelativePosition( event ) {

		const rect = canvas.getBoundingClientRect();
		return {
			x: ( event.clientX - rect.left ) * canvas.width / rect.width,
			y: ( event.clientY - rect.top ) * canvas.height / rect.height,
		};

	}

	function setPickPosition( event ) {

		const pos = getCanvasRelativePosition( event );
		pickPosition.x = pos.x;
		pickPosition.y = pos.y;

	}

	function clearPickPosition() {

		// unlike the mouse which always has a position
		// if the user stops touching the screen we want
		// to stop picking. For now we just pick a value
		// unlikely to pick something
		pickPosition.x = - 100000;
		pickPosition.y = - 100000;

	}

	window.addEventListener( 'mousemove', setPickPosition );
	window.addEventListener( 'mouseout', clearPickPosition );
	window.addEventListener( 'mouseleave', clearPickPosition );

	window.addEventListener( 'touchstart', ( event ) => {

		// prevent the window from scrolling
		event.preventDefault();
		setPickPosition( event.touches[ 0 ] );

	}, { passive: false } );

	window.addEventListener( 'touchmove', ( event ) => {

		setPickPosition( event.touches[ 0 ] );

	} );

	window.addEventListener( 'touchend', clearPickPosition );

}

main();
</script>
</html>

 

posted @ 2025-11-04 11:03  SimoonJia  阅读(17)  评论(0)    收藏  举报