tf_lite_model binary: update to use XNNPack.

Change-Id: Iae6dd9c9140fbd63b23d93f8e0bb4a1a1cc3a5e5
(cherry picked from commit 2b5d870e6bdc4420b378d7226c0b2e43269fb69c)
diff --git a/examples/tf_lite_model.cc b/examples/tf_lite_model.cc
index 8474ed6..1e41e67 100644
--- a/examples/tf_lite_model.cc
+++ b/examples/tf_lite_model.cc
@@ -195,6 +195,13 @@
                        tflite::ops::builtin::Register_SOFTMAX());
 }
 
+TfLiteDelegate *GetTfliteXnnpackDelegate(int num_threads) {
+  TfLiteXNNPackDelegateOptions xnnpack_options =
+      TfLiteXNNPackDelegateOptionsDefault();
+  xnnpack_options.num_threads = num_threads;
+  return TfLiteXNNPackDelegateCreate(&xnnpack_options);
+}
+
 }  // namespace
 
 int main() {
@@ -212,7 +219,14 @@
       tflite::DefaultErrorReporter());
 
   if (interpreter->AllocateTensors() != kTfLiteOk) {
-    reporter->Report("Failed");
+    reporter->Report("Failed at allocating tensors");
+    return EXIT_FAILURE;
+  }
+
+  // Using single-thread as an example. Can be changed as required.
+  TfLiteDelegate *xnnpack_delegate = GetTfliteXnnpackDelegate(1);
+  if (interpreter->ModifyGraphWithDelegate(xnnpack_delegate) != kTfLiteOk) {
+    reporter->Report("Failed at modifying graph with XNNPack delegate");
     return EXIT_FAILURE;
   }
 
@@ -223,7 +237,7 @@
   }
   auto status = interpreter->Invoke();
   if (status != kTfLiteOk) {
-    reporter->Report("Failed");
+    reporter->Report("Failed at invoke");
     return EXIT_FAILURE;
   }
   float y0 = interpreter->typed_output_tensor<float>(0)[0];