<body>
<canvas id="canvas"></canvas>
</body>
<script>
const canvas = document.querySelector('#canvas');
const renderer = new THREE.WebGLRenderer({canvas});
renderer.setSize( window.innerWidth, window.innerHeight );
document.body.appendChild( renderer.domElement );
const camera =
new THREE.PerspectiveCamera( 45, window.innerWidth / window.innerHeight, 1, 500 );
camera.position.set( 0, 0, 100 );
camera.lookAt( 0, 0, 0 );
const scene = new THREE.Scene();
const geometry = new THREE.BoxGeometry();
const material = new THREE.MeshBasicMaterial( { color: 0x00ff00 } );
const cube = new THREE.Mesh( geometry, material );
scene.add( cube );
</script>
const material = new THREE.MeshBasicMaterial( { color: 0x00ff00 } )
const material2 = new THREE.MeshDepthMaterial({color: 'red', roughness: 0.5})
const material3 = new THREE.MeshPhongMaterial({color: 0x00ff00, emissive: 0xFF0000})
const ico = new THREE.Mesh(new THREE.IcosahedronGeometry(75,1), pinkMat);
const torus = new THREE.TorusGeometry( 10, 3, 16, 100 );
const sphere = new THREE.SphereGeometry( 5, 32, 32 );
import { GLTFLoader } from 'three/examples/jsm/loaders/GLTFLoader'
const loader = new THREE.GLTFLoader()
loader.load( 'path/to/model.glb', gltfv => {
scene.add( gltf.scene )
}, undefined, error => {
console.error( error )
} )
Ортогональная камера
Перспективная камера
а)
б)
const fov = 75
const aspect = 2
const near = 0.1
const far = 5
const camera = new THREE.PerspectiveCamera(fov, aspect, near, far)
const scene = new THREE.Scene();
const geometry = new THREE.BoxGeometry();
const material = new THREE.MeshBasicMaterial( { color: 0x00ff00 } );
const cube = new THREE.Mesh( geometry, material );
scene.add( cube );
# Read source image.
im_src = cv2.imread('/src.png')
# Four corners of the book in source image
pts_src = np.array([[0, 0], [1000, 0], [1000, 359],[0, 359]])
print(im_src)
# Read destination image.
im_dst = cv2.imread('/dest.png')
# Four corners of the book in destination image.
pts_dst = np.array([[430, 411],[857, 392],[849, 519],[427, 522]])
# Calculate Homography
h, status = cv2.findHomography(pts_src, pts_dst)
# Warp source image to destination based on homography
im_out = cv2.warpPerspective(im_src, h, (im_dst.shape[1],im_dst.shape[0]))
а)
б)
tmp = cv2.cvtColor(im_out, cv2.COLOR_BGR2GRAY)
_,alpha = cv2.threshold(tmp,0,255,cv2.THRESH_BINARY)
b, g, r = cv2.split(im_out)
rgba = [b,g,r, alpha]
dst = cv2.merge(rgba,4)
cv2.imshow(dst)
cv2.imwrite("object.png", dst)
// 1
function animate() {
requestAnimationFrame( animate );
cube.rotation.x += 0.01;
cube.rotation.y += 0.01;
renderer.render( scene, camera );
}
// 2
videoListener.onNextFrame = function(currentTime){
const currentPosition = /* muted */
cube.position.x = currentPosition.x;
cube.position.y = currentPosition.y;
renderer.render( scene, camera );
}