Today I’ll show you how to create a small console application that will Detect and Blur Faces with .NET Core and Face API.

First be aware of the following prerequisites:

You will also need an Azure Cognitive Services Face API account and the correct set of access keys. (Start here: Subscribe in seconds if you need a Cognitive Service Account and here for the Documentation)

Now let’s start:

Create a folder for your new project


Open a command promt an run

1mkdir projectoxford

Create the project


1cd projectoxford
2dotnet new

Create a settings file


Create an appsettings.json file to hold your Face API Key (remember to replace the values with those from your Cognitive Service account):

1{
2  "FaceAPIKey": "[Your key here]"
3}

Modify the project file


Modify the project.json to add the Microsoft.ProjectOxford.Face dependency and also specify that the appsettings.json file must be copied to the output (buildOptions section) so it becomes available to the application once you build it.

We’ll be needing ImageProcessorCore to process the image (System.Drawing is not available in .NET Core) and also the extensions and tools to work with configuration files and user secrets.

 1{
 2  "userSecretsId": "cmendible3-dotnetcore.samples-projectOxford",
 3  "version": "1.0.0-*",
 4  "buildOptions": {
 5    "debugType": "portable",
 6    "emitEntryPoint": true,
 7    "copyToOutput": { 
 8      "include": "appsettings.json"
 9    }
10  },
11  "dependencies": {
12    "Microsoft.Extensions.Configuration": "1.0.0",
13    "Microsoft.Extensions.Configuration.Json": "1.0.0",
14    "Microsoft.Extensions.Configuration.UserSecrets": "1.0.0",
15    "Microsoft.ProjectOxford.Face": "1.1.0",
16    "System.Runtime.Serialization.Primitives": "4.1.1",
17    "ImageProcessorCore": "1.0.0-alpha-966"
18  },
19  "tools": {
20    "Microsoft.Extensions.SecretManager.Tools": "1.0.0-*"
21  },
22  "frameworks": {
23    "netcoreapp1.0": {
24      "dependencies": {
25        "Microsoft.NETCore.App": {
26          "type": "platform",
27          "version": "1.0.0"
28        }
29      },
30      "imports": "dnxcore50"
31    }
32  }
33}

Add ImageProcessorCore package source


ImageProcessorCore is in alpha stage and packages are available via MyGet so add NuGet.config file with the following content:

1<?xml version="1.0" encoding="utf-8"?>
2<configuration>
3  <packageSources>
4    <add key="imageprocessor" value="https://www.myget.org/F/imageprocessor/api/v3/index.json" protocolVersion="3" />
5  </packageSources>
6</configuration>

Restore packages


You just modified the project.json file with new dependencies and added the NuGet.config file so please restore the packages with the following command:

1dotnet restore

Modify Program.cs


Replace the contents of the Program.cs file with the following code

  1namespace ConsoleApplication
  2{
  3    using System;
  4    using System.IO;
  5    using System.Linq;
  6    using System.Threading.Tasks;
  7    using ImageProcessorCore;
  8    using Microsoft.Extensions.Configuration;
  9    using Microsoft.ProjectOxford.Face;
 10    using Microsoft.ProjectOxford.Face.Contract;
 11
 12    public class Program
 13    {
 14        /// <summary>
 15        /// Let's detect and blur some faces!
 16        /// </summary>
 17        /// 
 18        public static void Main(string[] args)
 19        {
 20            // The name of the source image.
 21            const string sourceImage = "faces.jpg";
 22
 23            // The name of the destination image
 24            const string destinationImage = "detectedfaces.jpg";
 25
 26            // Get the configuration
 27            var configuration = BuildConfiguration();
 28
 29            // Detect the faces in the source file
 30            DetectFaces(sourceImage, configuration["FaceAPIKey"])
 31                .ContinueWith((task) =>
 32                {
 33                    // Save the result of the detection
 34                    var faceRects = task.Result;
 35
 36                    Console.WriteLine($"Detected {faceRects.Length} faces");
 37
 38                    // Blur the detected faces and save in another file
 39                    BlurFaces(faceRects, sourceImage, destinationImage);
 40
 41                    Console.WriteLine($"Done!!!");
 42                });
 43
 44            Console.ReadLine();
 45        }
 46
 47        /// <summary>
 48        /// Build the confguration
 49        /// </summary>
 50        /// <returns>Returns the configuration</returns>
 51        private static IConfigurationRoot BuildConfiguration()
 52        {
 53            // Enable to app to read json setting files
 54            var builder = new ConfigurationBuilder()
 55                .AddJsonFile("appsettings.json", optional: false, reloadOnChange: true);
 56
 57#if DEBUG
 58            // We use user secrets in Debug mode so API keys are not uploaded to source control 
 59            builder.AddUserSecrets("cmendible3-dotnetcore.samples-projectOxford");
 60#endif
 61
 62            return builder.Build();
 63        }
 64
 65        /// <summary>
 66        /// Blur the detected faces from de source image.
 67        /// </summary>
 68        /// 
 69        /// 
 70        /// 
 71        private static void BlurFaces(FaceRectangle[] faceRects, string sourceImage, string destinationImage)
 72        {
 73            if (File.Exists(destinationImage))
 74            {
 75                File.Delete(destinationImage);
 76            }
 77
 78            if (faceRects.Length > 0)
 79            {
 80                using (FileStream stream = File.OpenRead("faces.jpg"))
 81                using (FileStream output = File.OpenWrite(destinationImage))
 82                {
 83                    var image = new Image<Color, uint>(stream);
 84
 85                    // Blur every detected face
 86                    foreach (var faceRect in faceRects)
 87                    {
 88                        var rectangle = new Rectangle(
 89                            faceRect.Left,
 90                            faceRect.Top,
 91                            faceRect.Width,
 92                            faceRect.Height);
 93
 94                        image = image.BoxBlur(20, rectangle);
 95                    }
 96
 97                    image.SaveAsJpeg(output);
 98                }
 99            }
100
101        }
102
103        /// <summary>
104        /// Detect faces calling the Face API
105        /// </summary>
106        /// 
107        /// 
108        /// <returns>Detected faces rectangles</returns>
109        private static async Task<FaceRectangle[]> DetectFaces(string imageFilePath, string apiKey)
110        {
111            var faceServiceClient = new FaceServiceClient(apiKey);
112
113            try
114            {
115                using (Stream imageFileStream = File.OpenRead(imageFilePath))
116                {
117                    var faces = await faceServiceClient.DetectAsync(imageFileStream);
118                    var faceRects = faces.Select(face => face.FaceRectangle);
119                    return faceRects.ToArray();
120                }
121            }
122            catch (Exception)
123            {
124                return new FaceRectangle[0];
125            }
126        }
127    }
128}

Build


Build and run the application with the following command

1dotnet run

Expected results


Command line should read

1Detected 26 faces
2Done!!!

The new detectedfaces.jpg file should look like this:

You can get the code here: https://github.com/cmendible/dotnetcore.samples/tree/main/projectoxford

Hope it helps!