9#include <tensorflow/lite/core/c/common.h>
10#include <tensorflow/lite/micro/micro_interpreter.h>
11#include <tensorflow/lite/micro/micro_mutable_op_resolver.h>
14namespace micro_wake_word {
16static const uint8_t MIN_SLICES_BEFORE_DETECTION = 100;
17static const uint32_t STREAMING_MODEL_VARIABLE_ARENA_SIZE = 1024;
89 tflite::MicroResourceVariables *
mrv_{
nullptr};
90 tflite::MicroAllocator *
ma_{
nullptr};
105 WakeWordModel(
const std::string &
id,
const uint8_t *model_start, uint8_t default_probability_cutoff,
106 size_t sliding_window_average_size,
const std::string &wake_word,
size_t tensor_arena_size,
107 bool default_enabled,
bool internal_only);
142 VADModel(
const uint8_t *model_start, uint8_t default_probability_cutoff,
size_t sliding_window_size,
143 size_t tensor_arena_size);
virtual void disable()
Disable the model. The next performing_streaming_inference call will unload it.
tflite::MicroAllocator * ma_
virtual DetectionEvent determine_detected()=0
bool load_model_()
Allocates tensor and variable arenas and sets up the model interpreter.
uint8_t get_default_probability_cutoff() const
virtual void enable()
Enable the model. The next performing_streaming_inference call will load it.
uint8_t current_stride_step_
virtual void log_model_config()=0
size_t sliding_window_size_
std::unique_ptr< tflite::MicroInterpreter > interpreter_
uint8_t default_probability_cutoff_
bool get_unprocessed_probability_status() const
tflite::MicroMutableOpResolver< 20 > streaming_op_resolver_
bool register_streaming_ops_(tflite::MicroMutableOpResolver< 20 > &op_resolver)
Returns true if successfully registered the streaming model's TensorFlow operations.
void reset_probabilities()
Sets all recent_streaming_probabilities to 0 and resets the ignore window count.
size_t tensor_arena_size_
std::vector< uint8_t > recent_streaming_probabilities_
bool unprocessed_probability_status_
uint8_t get_probability_cutoff() const
tflite::MicroResourceVariables * mrv_
bool perform_streaming_inference(const int8_t features[PREPROCESSOR_FEATURE_SIZE])
uint8_t probability_cutoff_
void unload_model()
Destroys the TFLite interpreter and frees the tensor and variable arenas' memory.
void set_probability_cutoff(uint8_t probability_cutoff)
bool is_enabled() const
Return true if the model is enabled.
const uint8_t * model_start_
DetectionEvent determine_detected() override
Checks for voice activity by comparing the max probability in the sliding window with the probability...
VADModel(const uint8_t *model_start, uint8_t default_probability_cutoff, size_t sliding_window_size, size_t tensor_arena_size)
void log_model_config() override
void enable() override
Enable the model and save to flash. The next performing_streaming_inference call will load it.
const std::string & get_wake_word() const
DetectionEvent determine_detected() override
Checks for the wake word by comparing the mean probability in the sliding window with the probability...
void log_model_config() override
const std::string & get_id() const
const std::vector< std::string > & get_trained_languages() const
ESPPreferenceObject pref_
WakeWordModel(const std::string &id, const uint8_t *model_start, uint8_t default_probability_cutoff, size_t sliding_window_average_size, const std::string &wake_word, size_t tensor_arena_size, bool default_enabled, bool internal_only)
Constructs a wake word model object.
void add_trained_language(const std::string &language)
void disable() override
Disable the model and save to flash. The next performing_streaming_inference call will unload it.
std::vector< std::string > trained_languages_
Providing packet encoding functions for exchanging data with a remote host.
uint8_t average_probability