node.js 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139
  1. /**
  2. * FaceAPI Demo for NodeJS
  3. * - Uses external library [node-fetch](https://www.npmjs.com/package/node-fetch) to load images via http
  4. * - Loads image from provided param
  5. * - Outputs results to console
  6. */
  7. const fs = require('fs');
  8. const process = require('process');
  9. const path = require('path');
  10. const log = require('@vladmandic/pilogger');
  11. const tf = require('@tensorflow/tfjs-node'); // in nodejs environments tfjs-node is required to be loaded before face-api
  12. const faceapi = require('../dist/face-api.node.js'); // use this when using face-api in dev mode
  13. // const faceapi = require('@vladmandic/face-api'); // use this when face-api is installed as module (majority of use cases)
  14. const modelPathRoot = '../model';
  15. const imgPathRoot = './demo'; // modify to include your sample images
  16. const minConfidence = 0.15;
  17. const maxResults = 5;
  18. let optionsSSDMobileNet;
  19. let fetch; // dynamically imported later
  20. async function image(input) {
  21. // read input image file and create tensor to be used for processing
  22. let buffer;
  23. log.info('Loading image:', input);
  24. if (input.startsWith('http:') || input.startsWith('https:')) {
  25. const res = await fetch(input);
  26. if (res && res.ok) buffer = await res.buffer();
  27. else log.error('Invalid image URL:', input, res.status, res.statusText, res.headers.get('content-type'));
  28. } else {
  29. buffer = fs.readFileSync(input);
  30. }
  31. // decode image using tfjs-node so we don't need external depenencies
  32. // can also be done using canvas.js or some other 3rd party image library
  33. if (!buffer) return {};
  34. const tensor = tf.tidy(() => {
  35. const decode = faceapi.tf.node.decodeImage(buffer, 3);
  36. let expand;
  37. if (decode.shape[2] === 4) { // input is in rgba format, need to convert to rgb
  38. const channels = faceapi.tf.split(decode, 4, 2); // tf.split(tensor, 4, 2); // split rgba to channels
  39. const rgb = faceapi.tf.stack([channels[0], channels[1], channels[2]], 2); // stack channels back to rgb and ignore alpha
  40. expand = faceapi.tf.reshape(rgb, [1, decode.shape[0], decode.shape[1], 3]); // move extra dim from the end of tensor and use it as batch number instead
  41. } else {
  42. expand = faceapi.tf.expandDims(decode, 0);
  43. }
  44. const cast = faceapi.tf.cast(expand, 'float32');
  45. return cast;
  46. });
  47. return tensor;
  48. }
  49. async function detect(tensor) {
  50. try {
  51. const result = await faceapi
  52. .detectAllFaces(tensor, optionsSSDMobileNet)
  53. .withFaceLandmarks()
  54. .withFaceExpressions()
  55. .withFaceDescriptors()
  56. .withAgeAndGender();
  57. return result;
  58. } catch (err) {
  59. log.error('Caught error', err.message);
  60. return [];
  61. }
  62. }
  63. // eslint-disable-next-line no-unused-vars, @typescript-eslint/no-unused-vars
  64. function detectPromise(tensor) {
  65. return new Promise((resolve) => faceapi
  66. .detectAllFaces(tensor, optionsSSDMobileNet)
  67. .withFaceLandmarks()
  68. .withFaceExpressions()
  69. .withFaceDescriptors()
  70. .withAgeAndGender()
  71. .then((res) => resolve(res))
  72. .catch((err) => {
  73. log.error('Caught error', err.message);
  74. resolve([]);
  75. }));
  76. }
  77. function print(face) {
  78. const expression = Object.entries(face.expressions).reduce((acc, val) => ((val[1] > acc[1]) ? val : acc), ['', 0]);
  79. const box = [face.alignedRect._box._x, face.alignedRect._box._y, face.alignedRect._box._width, face.alignedRect._box._height];
  80. const gender = `Gender: ${Math.round(100 * face.genderProbability)}% ${face.gender}`;
  81. log.data(`Detection confidence: ${Math.round(100 * face.detection._score)}% ${gender} Age: ${Math.round(10 * face.age) / 10} Expression: ${Math.round(100 * expression[1])}% ${expression[0]} Box: ${box.map((a) => Math.round(a))}`);
  82. }
  83. async function main() {
  84. log.header();
  85. log.info('FaceAPI single-process test');
  86. // eslint-disable-next-line node/no-extraneous-import
  87. fetch = (await import('node-fetch')).default; // eslint-disable-line node/no-missing-import
  88. await faceapi.tf.setBackend('tensorflow');
  89. await faceapi.tf.ready();
  90. log.state(`Version: TensorFlow/JS ${faceapi.tf?.version_core} FaceAPI ${faceapi.version} Backend: ${faceapi.tf?.getBackend()}`);
  91. log.info('Loading FaceAPI models');
  92. const modelPath = path.join(__dirname, modelPathRoot);
  93. await faceapi.nets.ssdMobilenetv1.loadFromDisk(modelPath);
  94. await faceapi.nets.ageGenderNet.loadFromDisk(modelPath);
  95. await faceapi.nets.faceLandmark68Net.loadFromDisk(modelPath);
  96. await faceapi.nets.faceRecognitionNet.loadFromDisk(modelPath);
  97. await faceapi.nets.faceExpressionNet.loadFromDisk(modelPath);
  98. optionsSSDMobileNet = new faceapi.SsdMobilenetv1Options({ minConfidence, maxResults });
  99. if (process.argv.length !== 4) {
  100. const t0 = process.hrtime.bigint();
  101. const dir = fs.readdirSync(imgPathRoot);
  102. for (const img of dir) {
  103. if (!img.toLocaleLowerCase().endsWith('.jpg')) continue;
  104. const tensor = await image(path.join(imgPathRoot, img));
  105. const result = await detect(tensor);
  106. log.data('Image:', img, 'Detected faces:', result.length);
  107. for (const face of result) print(face);
  108. tensor.dispose();
  109. }
  110. const t1 = process.hrtime.bigint();
  111. log.info('Processed', dir.length, 'images in', Math.trunc(Number((t1 - t0)) / 1000 / 1000), 'ms');
  112. } else {
  113. const param = process.argv[2];
  114. if (fs.existsSync(param) || param.startsWith('http:') || param.startsWith('https:')) {
  115. const tensor = await image(param);
  116. const result = await detect(tensor);
  117. // const result = await detectPromise(null);
  118. log.data('Image:', param, 'Detected faces:', result.length);
  119. for (const face of result) print(face);
  120. tensor.dispose();
  121. }
  122. }
  123. }
  124. main();