AvatarAnimate / Avatar.html
arkleinberg's picture
Create Avatar.html
76a6df1 verified
<!DOCTYPE html>
<html lang="en">
<head>
<!-- =============================================
Lip‑Sync Avatar
---------------------------------------------
This page display a 3‑D
character in front of a looping background
texture and drive its mouth‑opening morph
target in real‑time, synchronised with speech
synthesis.
‑ Technologies used:
• three.js (WebGL) – core 3‑D engine
• GLTFLoader – load the avatar
• Web Speech API – text‑to‑speech
• Standard JS / CSS
✅ Place the following assets next to this file
├─ avatar.glb ← your character exported from
│ the video, with a morph target
│ named "viseme_aa" (or rename in
│ code below).
└─ bg.jpg ← background image (optional
video texture shown later)
============================================= -->
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Lip‑Sync Ark Avatar</title>
<!-- Page styling inspired by the glass‑look & gradients
of the uploaded examples → custom properties keep
things tidy. -->
<style>
:root {
--accent1: #6965db; /* primary violet */
--accent2: #3a86ff; /* secondary blue */
--bg‑dark : #0f0f1a;
--text‑lite: #e5e5f7;
}
* { box‑sizing: border‑box; margin: 0; padding: 0; }
html,body { height: 100%; overflow: hidden; font‑family: 'Segoe UI', Tahoma, sans‑serif; background: var(--bg‑dark); color: var(--text‑lite); }
/* Three.js full‑screen canvas */
#threecanvas { position: fixed; inset: 0; z‑index: 1; }
/* UI overlay */
#ui { position: fixed; left: 0; right: 0; bottom: 2rem; display: flex; justify‑content: center; gap: 1rem; z‑index: 2; }
button {
padding: .8rem 1.6rem; border: none; border‑radius: 40px;
background: linear‑gradient(100deg,var(--accent‑1),var(--accent‑2));
color: #fff; font‑size: 1rem; font‑weight: 600; cursor: pointer;
box‑shadow: 0 4px 15px rgba(0,0,0,.25); transition: transform .2s;
}
button:hover { transform: translateY(-3px); }
</style>
</head>
<body>
<!-- Three.js draws here -->
<canvas id="three‑canvas"></canvas>
<!-- Simple UI -->
<div id="ui">
<button id="speakBtn">Say it 👉 “Hello I’m your personal assistant”</button>
</div>
<!-- Three.js & loader -->
<script src="https://cdn.jsdelivr.net/npm/[email protected]/build/three.min.js"></script>
<script src="https://cdn.jsdelivr.net/npm/[email protected]/examples/js/controls/OrbitControls.js"></script>
<script src="https://cdn.jsdelivr.net/npm/[email protected]/examples/js/loaders/GLTFLoader.js"></script>
<script>
/* ==========================================================
1. Basic scene set‑up
========================================================== */
const canvas = document.getElementById('three‑canvas');
const renderer = new THREE.WebGLRenderer({ canvas, antialias:true, alpha:true });
renderer.setPixelRatio(Math.min(window.devicePixelRatio,2));
const scene = new THREE.Scene();
// Camera
const camera = new THREE.PerspectiveCamera(35, window.innerWidth/window.innerHeight, 0.1, 100);
camera.position.set(0, 1.55, 3.5);
// Controls (for debugging – remove on prod)
const controls = new THREE.OrbitControls(camera, canvas);
controls.enableDamping = true;
// Resize handling
function onResize(){
camera.aspect = window.innerWidth / window.innerHeight;
camera.updateProjectionMatrix();
renderer.setSize(window.innerWidth, window.innerHeight);
}
window.addEventListener('resize', onResize);
onResize();
/* ==========================================================
2. Background – simple textured quad (static image)
Replace with THREE.VideoTexture for a live video bg.
========================================================== */
const bgTex = new THREE.TextureLoader().load('bg.jpg', tex=>{ tex.encoding = THREE.sRGBEncoding; });
const bgMat = new THREE.MeshBasicMaterial({ map:bgTex });
const bgGeo = new THREE.PlaneGeometry(16, 9);
const bg = new THREE.Mesh(bgGeo, bgMat);
bg.position.z = -5; // push back
bg.scale.set(2,2,1); // cover
scene.add(bg);
/* ==========================================================
3. Avatar loading – expects a morph target named
"viseme_aa" (common for open‑mouth). Adjust index/name
below if different.
========================================================== */
let avatar, mouthIndex = null; // will store index of morph
const loader = new THREE.GLTFLoader();
loader.load('avatar.glb', gltf=>{
avatar = gltf.scene;
avatar.traverse(obj=>{
if (obj.isMesh && obj.morphTargetDictionary) {
// try to find a suitable mouth‑open morph
const dict = obj.morphTargetDictionary;
const possible = ['viseme_aa','mouthOpen','jawOpen','vrc.v_morph_aa'];
for(const key of possible){ if(key in dict){ mouthIndex = dict[key]; break; } }
if(mouthIndex!==null){ obj.userData.isMouth = true; }
}
});
// Center & scale heuristic – adjust as needed
const box = new THREE.Box3().setFromObject(avatar);
const size = new THREE.Vector3(); box.getSize(size);
avatar.scale.setScalar(1.6/size.y);
box.setFromObject(avatar);
const center = new THREE.Vector3(); box.getCenter(center);
avatar.position.sub(center); avatar.position.y -= box.min.y; // feet to ground
scene.add(avatar);
});
/* ==========================================================
4. Lip‑sync logic (very lightweight)
– Uses SpeechSynthesisUtterance and its onboundary
event (fires at each word).
– At each word start we trigger a quick mouth‑open
impulse, which then eases back to closed inside the
render loop. For higher fidelity, integrate a full
phoneme‑to‑viseme mapper (Google TTS marks, deepspeech
etc.).
========================================================== */
const mouthAnim = {
strength: 0 // 0 = closed, 1 = fully open
};
function speak(text){
if(!window.speechSynthesis) return alert('SpeechSynthesis unsupported');
const utter = new SpeechSynthesisUtterance(text);
utter.lang = 'en-US';
utter.rate = 1;
utter.pitch = 1;
utter.onboundary = ({ name }) => {
if(name === 'word') {
// quick open kick
mouthAnim.strength = 1;
}
};
window.speechSynthesis.speak(utter);
}
// UI button
document.getElementById('speakBtn').addEventListener('click', ()=>{
speak("Hello I'm your personal assistant");
});
/* ==========================================================
5. Render loop – drive mouth closing + optional subtle
idle movement.
========================================================== */
const clock = new THREE.Clock();
function tick(){
requestAnimationFrame(tick);
const dt = clock.getDelta();
// Ease mouth strength back to 0
mouthAnim.strength = THREE.MathUtils.damp(mouthAnim.strength, 0, 5, dt);
if(avatar && mouthIndex!==null){
avatar.traverse(obj=>{
if(obj.userData.isMouth){
obj.morphTargetInfluences[mouthIndex] = mouthAnim.strength;
}
});
}
controls.update();
renderer.render(scene, camera);
}
tick();
</script>
</body>
</html>