The goal of computer vision
• To bridge the gap between pixels and “meaning”
npm install @tensorflow/tfjs-node
npm install @tensorflow-models/coco-ssd - save
const webCamPromise = navigator.mediaDevices
.getUserMedia({
audio: false,
video: {
facingMode: "user"
}
})
.then(stream => {
window.stream = stream;
this.videoRef.current.srcObject = stream
return new Promise((resolve, reject) => {
this.videoRef.current.onloadedmetadata = () =>{
resolve();
};
});
});
detectFrame = (video, model) => {
model.detect(video).then(predictions => {
this.renderPredictions(predictions);
requestAnimationFrame(() => {
this.detectFrame(video, model);
});
});
};
const modelPromise = cocoSsd.load();
Promise.all([modelPromise, webCamPromise])
.then(values => {
this.detectFrame(this.videoRef.current,
values[0]);
})
.catch(error => {
console.error(error);
});
predictions.forEach(prediction => {
const x = prediction.bbox[0];
const y = prediction.bbox[1];
ctx.fillStyle = "#000000";
ctx.fillText(prediction.class, x, y);
});
npm install @tensorflow/tfjs-node
npm install @tensorflow-models/coco-ssd - save
npm install face-api.js
npm install p5.js
await faceapi.loadSsdMobilenetv1Model(MODEL_URL);
await faceapi.loadAgeGenderModel(MODEL_URL);
await faceapi.loadFaceExpressionModel(MODEL_URL);
faceapi.detectAllFaces
(capture.id())
.withAgeAndGender()
.withFaceExpressions()
.then((data) => {
showFaceDetectionData(data);
});
if(capture.loadedmetadata) {
if (cocossdModel) {
cocossdModel
.detect(document.getElementById("video"))
.then(showCocoSSDResults)
.catch((e) => {
console.log("Exception : ", e);
});
}
}