webcam.js 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194
  1. /**
  2. * FaceAPI Demo for Browsers
  3. * Loaded via `webcam.html`
  4. */
  5. import * as faceapi from '../dist/face-api.esm.js'; // use when in dev mode
  6. // import * as faceapi from '@vladmandic/face-api'; // use when downloading face-api as npm
  7. // configuration options
  8. const modelPath = '../model/'; // path to model folder that will be loaded using http
  9. // const modelPath = 'https://cdn.jsdelivr.net/npm/@vladmandic/face-api/model/'; // path to model folder that will be loaded using http
  10. const minScore = 0.2; // minimum score
  11. const maxResults = 5; // maximum number of results to return
  12. let optionsSSDMobileNet;
  13. // helper function to pretty-print json object to string
  14. function str(json) {
  15. let text = '<font color="lightblue">';
  16. text += json ? JSON.stringify(json).replace(/{|}|"|\[|\]/g, '').replace(/,/g, ', ') : '';
  17. text += '</font>';
  18. return text;
  19. }
  20. // helper function to print strings to html document as a log
  21. function log(...txt) {
  22. console.log(...txt); // eslint-disable-line no-console
  23. const div = document.getElementById('log');
  24. if (div) div.innerHTML += `<br>${txt}`;
  25. }
  26. // helper function to draw detected faces
  27. function drawFaces(canvas, data, fps) {
  28. const ctx = canvas.getContext('2d', { willReadFrequently: true });
  29. if (!ctx) return;
  30. ctx.clearRect(0, 0, canvas.width, canvas.height);
  31. // draw title
  32. ctx.font = 'small-caps 20px "Segoe UI"';
  33. ctx.fillStyle = 'white';
  34. ctx.fillText(`FPS: ${fps}`, 10, 25);
  35. for (const person of data) {
  36. // draw box around each face
  37. ctx.lineWidth = 3;
  38. ctx.strokeStyle = 'deepskyblue';
  39. ctx.fillStyle = 'deepskyblue';
  40. ctx.globalAlpha = 0.6;
  41. ctx.beginPath();
  42. ctx.rect(person.detection.box.x, person.detection.box.y, person.detection.box.width, person.detection.box.height);
  43. ctx.stroke();
  44. ctx.globalAlpha = 1;
  45. // draw text labels
  46. const expression = Object.entries(person.expressions).sort((a, b) => b[1] - a[1]);
  47. ctx.fillStyle = 'black';
  48. ctx.fillText(`gender: ${Math.round(100 * person.genderProbability)}% ${person.gender}`, person.detection.box.x, person.detection.box.y - 59);
  49. ctx.fillText(`expression: ${Math.round(100 * expression[0][1])}% ${expression[0][0]}`, person.detection.box.x, person.detection.box.y - 41);
  50. ctx.fillText(`age: ${Math.round(person.age)} years`, person.detection.box.x, person.detection.box.y - 23);
  51. ctx.fillText(`roll:${person.angle.roll}° pitch:${person.angle.pitch}° yaw:${person.angle.yaw}°`, person.detection.box.x, person.detection.box.y - 5);
  52. ctx.fillStyle = 'lightblue';
  53. ctx.fillText(`gender: ${Math.round(100 * person.genderProbability)}% ${person.gender}`, person.detection.box.x, person.detection.box.y - 60);
  54. ctx.fillText(`expression: ${Math.round(100 * expression[0][1])}% ${expression[0][0]}`, person.detection.box.x, person.detection.box.y - 42);
  55. ctx.fillText(`age: ${Math.round(person.age)} years`, person.detection.box.x, person.detection.box.y - 24);
  56. ctx.fillText(`roll:${person.angle.roll}° pitch:${person.angle.pitch}° yaw:${person.angle.yaw}°`, person.detection.box.x, person.detection.box.y - 6);
  57. // draw face points for each face
  58. ctx.globalAlpha = 0.8;
  59. ctx.fillStyle = 'lightblue';
  60. const pointSize = 2;
  61. for (let i = 0; i < person.landmarks.positions.length; i++) {
  62. ctx.beginPath();
  63. ctx.arc(person.landmarks.positions[i].x, person.landmarks.positions[i].y, pointSize, 0, 2 * Math.PI);
  64. ctx.fill();
  65. }
  66. }
  67. }
  68. async function detectVideo(video, canvas) {
  69. if (!video || video.paused) return false;
  70. const t0 = performance.now();
  71. faceapi
  72. .detectAllFaces(video, optionsSSDMobileNet)
  73. .withFaceLandmarks()
  74. .withFaceExpressions()
  75. // .withFaceDescriptors()
  76. .withAgeAndGender()
  77. .then((result) => {
  78. const fps = 1000 / (performance.now() - t0);
  79. drawFaces(canvas, result, fps.toLocaleString());
  80. requestAnimationFrame(() => detectVideo(video, canvas));
  81. return true;
  82. })
  83. .catch((err) => {
  84. log(`Detect Error: ${str(err)}`);
  85. return false;
  86. });
  87. return false;
  88. }
  89. // just initialize everything and call main function
  90. async function setupCamera() {
  91. const video = document.getElementById('video');
  92. const canvas = document.getElementById('canvas');
  93. if (!video || !canvas) return null;
  94. log('Setting up camera');
  95. // setup webcam. note that navigator.mediaDevices requires that page is accessed via https
  96. if (!navigator.mediaDevices) {
  97. log('Camera Error: access not supported');
  98. return null;
  99. }
  100. let stream;
  101. const constraints = { audio: false, video: { facingMode: 'user', resizeMode: 'crop-and-scale' } };
  102. if (window.innerWidth > window.innerHeight) constraints.video.width = { ideal: window.innerWidth };
  103. else constraints.video.height = { ideal: window.innerHeight };
  104. try {
  105. stream = await navigator.mediaDevices.getUserMedia(constraints);
  106. } catch (err) {
  107. if (err.name === 'PermissionDeniedError' || err.name === 'NotAllowedError') log(`Camera Error: camera permission denied: ${err.message || err}`);
  108. if (err.name === 'SourceUnavailableError') log(`Camera Error: camera not available: ${err.message || err}`);
  109. return null;
  110. }
  111. if (stream) {
  112. video.srcObject = stream;
  113. } else {
  114. log('Camera Error: stream empty');
  115. return null;
  116. }
  117. const track = stream.getVideoTracks()[0];
  118. const settings = track.getSettings();
  119. if (settings.deviceId) delete settings.deviceId;
  120. if (settings.groupId) delete settings.groupId;
  121. if (settings.aspectRatio) settings.aspectRatio = Math.trunc(100 * settings.aspectRatio) / 100;
  122. log(`Camera active: ${track.label}`);
  123. log(`Camera settings: ${str(settings)}`);
  124. canvas.addEventListener('click', () => {
  125. if (video && video.readyState >= 2) {
  126. if (video.paused) {
  127. video.play();
  128. detectVideo(video, canvas);
  129. } else {
  130. video.pause();
  131. }
  132. }
  133. log(`Camera state: ${video.paused ? 'paused' : 'playing'}`);
  134. });
  135. return new Promise((resolve) => {
  136. video.onloadeddata = async () => {
  137. canvas.width = video.videoWidth;
  138. canvas.height = video.videoHeight;
  139. video.play();
  140. detectVideo(video, canvas);
  141. resolve(true);
  142. };
  143. });
  144. }
  145. async function setupFaceAPI() {
  146. // load face-api models
  147. // log('Models loading');
  148. // await faceapi.nets.tinyFaceDetector.load(modelPath); // using ssdMobilenetv1
  149. await faceapi.nets.ssdMobilenetv1.load(modelPath);
  150. await faceapi.nets.ageGenderNet.load(modelPath);
  151. await faceapi.nets.faceLandmark68Net.load(modelPath);
  152. await faceapi.nets.faceRecognitionNet.load(modelPath);
  153. await faceapi.nets.faceExpressionNet.load(modelPath);
  154. optionsSSDMobileNet = new faceapi.SsdMobilenetv1Options({ minConfidence: minScore, maxResults });
  155. // check tf engine state
  156. log(`Models loaded: ${str(faceapi.tf.engine().state.numTensors)} tensors`);
  157. }
  158. async function main() {
  159. // initialize tfjs
  160. log('FaceAPI WebCam Test');
  161. // if you want to use wasm backend location for wasm binaries must be specified
  162. // await faceapi.tf?.setWasmPaths(`https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-backend-wasm@${faceapi.tf.version_core}/dist/`);
  163. // await faceapi.tf?.setBackend('wasm');
  164. // log(`WASM SIMD: ${await faceapi.tf?.env().getAsync('WASM_HAS_SIMD_SUPPORT')} Threads: ${await faceapi.tf?.env().getAsync('WASM_HAS_MULTITHREAD_SUPPORT') ? 'Multi' : 'Single'}`);
  165. // default is webgl backend
  166. await faceapi.tf.setBackend('webgl');
  167. await faceapi.tf.ready();
  168. // tfjs optimizations
  169. if (faceapi.tf?.env().flagRegistry.CANVAS2D_WILL_READ_FREQUENTLY) faceapi.tf.env().set('CANVAS2D_WILL_READ_FREQUENTLY', true);
  170. if (faceapi.tf?.env().flagRegistry.WEBGL_EXP_CONV) faceapi.tf.env().set('WEBGL_EXP_CONV', true);
  171. if (faceapi.tf?.env().flagRegistry.WEBGL_EXP_CONV) faceapi.tf.env().set('WEBGL_EXP_CONV', true);
  172. // check version
  173. log(`Version: FaceAPI ${str(faceapi?.version || '(not loaded)')} TensorFlow/JS ${str(faceapi.tf?.version_core || '(not loaded)')} Backend: ${str(faceapi.tf?.getBackend() || '(not loaded)')}`);
  174. await setupFaceAPI();
  175. await setupCamera();
  176. }
  177. // start processing as soon as page is loaded
  178. window.onload = main;