video.srcObject = stream) .catch(err => console.error(err)); video.addEventListener('playing', async ()=>{ console.log("event happened") await setupModel() requestAnimationFrame(createMask) requestAnimationFrame(drawMask) }); let segmentation; let segmenter; let model; let segmenterConfig; async function setupModel(){ model = bodySegmentation.SupportedModels.MediaPipeSelfieSegmentation; segmenterConfig = { runtime: 'mediapipe', solutionPath: 'https://cdn.jsdelivr.net/npm/@mediapipe/selfie_segmentation', modelType: 'landscape' } segmenter = await bodySegmentation.createSegmenter(model, segmenterC"> video.srcObject = stream) .catch(err => console.error(err)); video.addEventListener('playing', async ()=>{ console.log("event happened") await setupModel() requestAnimationFrame(createMask) requestAnimationFrame(drawMask) }); let segmentation; let segmenter; let model; let segmenterConfig; async function setupModel(){ model = bodySegmentation.SupportedModels.MediaPipeSelfieSegmentation; segmenterConfig = { runtime: 'mediapipe', solutionPath: 'https://cdn.jsdelivr.net/npm/@mediapipe/selfie_segmentation', modelType: 'landscape' } segmenter = await bodySegmentation.createSegmenter(model, segmenterC"> video.srcObject = stream) .catch(err => console.error(err)); video.addEventListener('playing', async ()=>{ console.log("event happened") await setupModel() requestAnimationFrame(createMask) requestAnimationFrame(drawMask) }); let segmentation; let segmenter; let model; let segmenterConfig; async function setupModel(){ model = bodySegmentation.SupportedModels.MediaPipeSelfieSegmentation; segmenterConfig = { runtime: 'mediapipe', solutionPath: 'https://cdn.jsdelivr.net/npm/@mediapipe/selfie_segmentation', modelType: 'landscape' } segmenter = await bodySegmentation.createSegmenter(model, segmenterC">
const video = document.getElementById("videoStream");
const constraints = {
    video: {
        position: "absolute",
        top: 0,
        left: 0,
        width: 1280,  
        height: 780,
        zIndex: 0,   
    }
} 
navigator.mediaDevices.getUserMedia(constraints) 
    .then(stream => video.srcObject = stream)        
    .catch(err => console.error(err)); 
    
    video.addEventListener('playing', async ()=>{
        console.log("event happened")
        await setupModel()
        requestAnimationFrame(createMask)
        requestAnimationFrame(drawMask)
    });

let segmentation;
let segmenter;
let model;
let segmenterConfig;

async function setupModel(){
    model = bodySegmentation.SupportedModels.MediaPipeSelfieSegmentation;
    segmenterConfig = {
        runtime: 'mediapipe', 
        solutionPath: '<https://cdn.jsdelivr.net/npm/@mediapipe/selfie_segmentation>',
        modelType: 'landscape'
    }
    segmenter = await bodySegmentation.createSegmenter(model, segmenterConfig);
}

let maskData; 

async function createMask() {    

    segmentation = await segmenter.segmentPeople(video);

    const foregroundColor = {r: 0, g: 0, b: 0, a: 0};
	const backgroundColor = {r: 255, g: 255, b: 255, a: 255};
	
    binaryMask = await bodySegmentation.toBinaryMask(
	    segmentation, 
        foregroundColor, 
        backgroundColor);
    
    maskData = new ImageData(binaryMask.data, binaryMask.width, binaryMask.height);    
    requestAnimationFrame(createMask); 
}

// Drawing the Canvas 
const spaceSRC = "images/img1.jpg"
const canvas = document.createElement("canvas") 
    canvas.style.position = "absolute";
    canvas.width = constraints.video.width
    canvas.height = constraints.video.height
    canvas.style.top = 0;  
    canvas.style.left = 0;   
    canvas.style.zIndex = 1;
    canvas.style.backgroundImage = "url('images/img1.jpg')"
    document.body.appendChild(canvas);

const ctx = canvas.getContext("2d");

function drawMask(){
    ctx.putImageData(maskData, 0, 0);
    requestAnimationFrame(drawMask)
}
<!DOCTYPE html>
<html lang="en">
<head>
    <meta charset="UTF-8">
    <meta name="viewport" content="width=device-width, initial-scale=1.0">
    <title>AstroDancers5000</title>
    
    <!-- Require the peer dependencies. -->
    <script src="<https://cdn.jsdelivr.net/npm/@mediapipe/selfie_segmentation>"></script>
    <script src="<https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-core>"></script>

    <!-- You must explicitly require a TF.js backend if you're not using the TF.js union bundle. -->
    <script src="<https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-backend-webgl>"></script>
    <script src="<https://cdn.jsdelivr.net/npm/@tensorflow-models/body-segmentation>"></script>
    
    <script defer src="script.js"></script>
    <style>
        video, canvas {
            position: absolute;
            top: 0;
            left: 0;
            width: 100%;
            height: 100%;
            display: block;
            object-fit: cover;
        }
    </style>
</head>
<body>
    <video id="videoStream" autoplay></video>
</body>
</html>