8static const char *
const TAG =
"micro_wake_word";
11namespace micro_wake_word {
14 ESP_LOGCONFIG(TAG,
" - Wake Word: %s", this->
wake_word_.c_str());
20 ESP_LOGCONFIG(TAG,
" - VAD Model");
31 ESP_LOGE(TAG,
"Could not allocate the streaming model's tensor arena.");
39 ESP_LOGE(TAG,
"Could not allocate the streaming model's variable tensor arena.");
42 this->
ma_ = tflite::MicroAllocator::Create(this->
var_arena_, STREAMING_MODEL_VARIABLE_ARENA_SIZE);
43 this->
mrv_ = tflite::MicroResourceVariables::Create(this->
ma_, 20);
46 const tflite::Model *model = tflite::GetModel(this->
model_start_);
47 if (model->version() != TFLITE_SCHEMA_VERSION) {
48 ESP_LOGE(TAG,
"Streaming model's schema is not supported");
56 if (this->
interpreter_->AllocateTensors() != kTfLiteOk) {
57 ESP_LOGE(TAG,
"Failed to allocate tensors for the streaming model");
64 if ((input->dims->size != 3) || (input->dims->data[0] != 1) ||
65 (input->dims->data[2] != PREPROCESSOR_FEATURE_SIZE)) {
66 ESP_LOGE(TAG,
"Streaming model tensor input dimensions has improper dimensions.");
70 if (input->type != kTfLiteInt8) {
71 ESP_LOGE(TAG,
"Streaming model tensor input is not int8.");
77 if ((output->dims->size != 2) || (output->dims->data[0] != 1) || (output->dims->data[1] != 1)) {
78 ESP_LOGE(TAG,
"Streaming model tensor output dimension is not 1x1.");
81 if (output->type != kTfLiteUInt8) {
82 ESP_LOGE(TAG,
"Streaming model tensor output is not uint8.");
127 uint8_t stride = this->
interpreter_->input(0)->dims->data[1];
131 (int8_t *) (tflite::GetTensorData<int8_t>(input)) + PREPROCESSOR_FEATURE_SIZE * this->
current_stride_step_,
132 features, PREPROCESSOR_FEATURE_SIZE);
135 if (this->current_stride_step_ >= stride) {
136 TfLiteStatus invoke_status = this->
interpreter_->Invoke();
137 if (invoke_status != kTfLiteOk) {
138 ESP_LOGW(TAG,
"Streaming interpreter invoke failed");
163 size_t sliding_window_average_size,
const std::string &wake_word,
size_t tensor_arena_size,
164 bool default_enabled,
bool internal_only) {
210 return detection_event;
223 return detection_event;
226VADModel::VADModel(
const uint8_t *model_start, uint8_t default_probability_cutoff,
size_t sliding_window_size,
227 size_t tensor_arena_size) {
245 return detection_event;
257 return detection_event;
261 if (op_resolver.AddCallOnce() != kTfLiteOk)
263 if (op_resolver.AddVarHandle() != kTfLiteOk)
265 if (op_resolver.AddReshape() != kTfLiteOk)
267 if (op_resolver.AddReadVariable() != kTfLiteOk)
269 if (op_resolver.AddStridedSlice() != kTfLiteOk)
271 if (op_resolver.AddConcatenation() != kTfLiteOk)
273 if (op_resolver.AddAssignVariable() != kTfLiteOk)
275 if (op_resolver.AddConv2D() != kTfLiteOk)
277 if (op_resolver.AddMul() != kTfLiteOk)
279 if (op_resolver.AddAdd() != kTfLiteOk)
281 if (op_resolver.AddMean() != kTfLiteOk)
283 if (op_resolver.AddFullyConnected() != kTfLiteOk)
285 if (op_resolver.AddLogistic() != kTfLiteOk)
287 if (op_resolver.AddQuantize() != kTfLiteOk)
289 if (op_resolver.AddDepthwiseConv2D() != kTfLiteOk)
291 if (op_resolver.AddAveragePool2D() != kTfLiteOk)
293 if (op_resolver.AddMaxPool2D() != kTfLiteOk)
295 if (op_resolver.AddPad() != kTfLiteOk)
297 if (op_resolver.AddPack() != kTfLiteOk)
299 if (op_resolver.AddSplitV() != kTfLiteOk)
virtual ESPPreferenceObject make_preference(size_t length, uint32_t type, bool in_flash)=0
An STL allocator that uses SPI or internal RAM.
void deallocate(T *p, size_t n)
tflite::MicroAllocator * ma_
bool load_model_()
Allocates tensor and variable arenas and sets up the model interpreter.
uint8_t current_stride_step_
size_t sliding_window_size_
std::unique_ptr< tflite::MicroInterpreter > interpreter_
uint8_t default_probability_cutoff_
tflite::MicroMutableOpResolver< 20 > streaming_op_resolver_
bool register_streaming_ops_(tflite::MicroMutableOpResolver< 20 > &op_resolver)
Returns true if successfully registered the streaming model's TensorFlow operations.
void reset_probabilities()
Sets all recent_streaming_probabilities to 0 and resets the ignore window count.
size_t tensor_arena_size_
std::vector< uint8_t > recent_streaming_probabilities_
bool unprocessed_probability_status_
tflite::MicroResourceVariables * mrv_
bool perform_streaming_inference(const int8_t features[PREPROCESSOR_FEATURE_SIZE])
uint8_t probability_cutoff_
void unload_model()
Destroys the TFLite interpreter and frees the tensor and variable arenas' memory.
const uint8_t * model_start_
DetectionEvent determine_detected() override
Checks for voice activity by comparing the max probability in the sliding window with the probability...
VADModel(const uint8_t *model_start, uint8_t default_probability_cutoff, size_t sliding_window_size, size_t tensor_arena_size)
void log_model_config() override
void enable() override
Enable the model and save to flash. The next performing_streaming_inference call will load it.
DetectionEvent determine_detected() override
Checks for the wake word by comparing the mean probability in the sliding window with the probability...
void log_model_config() override
ESPPreferenceObject pref_
WakeWordModel(const std::string &id, const uint8_t *model_start, uint8_t default_probability_cutoff, size_t sliding_window_average_size, const std::string &wake_word, size_t tensor_arena_size, bool default_enabled, bool internal_only)
Constructs a wake word model object.
void disable() override
Disable the model and save to flash. The next performing_streaming_inference call will unload it.
Providing packet encoding functions for exchanging data with a remote host.
uint32_t fnv1_hash(const std::string &str)
Calculate a FNV-1 hash of str.
ESPPreferences * global_preferences
std::unique_ptr< T > make_unique(Args &&...args)
T id(T value)
Helper function to make id(var) known from lambdas work in custom components.
uint8_t average_probability