diff --git a/lib/config.dart b/lib/config.dart index 79956bf..26961d3 100644 --- a/lib/config.dart +++ b/lib/config.dart @@ -1,6 +1,8 @@ const String model = 'assets/datasets/model.tflite'; const String label = 'assets/datasets/label.txt'; const String inputType = 'decodedWav'; +const int numOfInferences = 1; const int sampleRate = 16000; const int recordingLength = 16000; -const int bufferSize = 2000; \ No newline at end of file +const int bufferSize = 2000; +const double detectionThreshold = 0.5; \ No newline at end of file diff --git a/lib/view/screens/content.dart b/lib/view/screens/content.dart index dc66645..58f9889 100644 --- a/lib/view/screens/content.dart +++ b/lib/view/screens/content.dart @@ -7,7 +7,7 @@ import 'package:flutter/material.dart'; import 'package:easy_learn/view/widgets/custom_button.dart'; import 'package:easy_learn/view/widgets/score_board.dart'; import 'package:easy_learn/view/widgets/list_contents_box.dart'; -import 'package:easy_learn/config.dart' show model, label, inputType, sampleRate, recordingLength, bufferSize; +import 'package:easy_learn/config.dart' show model, label, inputType, numOfInferences, sampleRate, recordingLength, bufferSize, detectionThreshold; class Content extends StatefulWidget { @@ -37,9 +37,11 @@ class _ContentState extends State { void getResult() { result = TfliteAudio.startAudioRecognition( inputType: inputType, + numOfInferences: numOfInferences, sampleRate: sampleRate, recordingLength: recordingLength, bufferSize: bufferSize, + detectionThreshold: detectionThreshold ); result ?.listen(