ESPHome 2026.3.0
Loading...
Searching...
No Matches
scheduler.h
Go to the documentation of this file.
1#pragma once
2
4#include <cstring>
5#include <string>
6#include <vector>
7#ifdef ESPHOME_THREAD_MULTI_ATOMICS
8#include <atomic>
9#endif
10
12#include "esphome/core/hal.h"
15
16namespace esphome {
17
18class Component;
19struct RetryArgs;
20
21// Forward declaration of retry_handler - needs to be non-static for friend declaration
22void retry_handler(const std::shared_ptr<RetryArgs> &args);
23
24class Scheduler {
25 // Allow retry_handler to access protected members for internal retry mechanism
26 friend void ::esphome::retry_handler(const std::shared_ptr<RetryArgs> &args);
27 // Allow DelayAction to call set_timer_common_ with skip_cancel=true for parallel script delays.
28 // This is needed to fix issue #10264 where parallel scripts with delays interfere with each other.
29 // We use friend instead of a public API because skip_cancel is dangerous - it can cause delays
30 // to accumulate and overload the scheduler if misused.
31 template<typename... Ts> friend class DelayAction;
32
33 public:
34 // std::string overload - deprecated, use const char* or uint32_t instead
35 // Remove before 2026.7.0
36 ESPDEPRECATED("Use const char* or uint32_t overload instead. Removed in 2026.7.0", "2026.1.0")
37 void set_timeout(Component *component, const std::string &name, uint32_t timeout, std::function<void()> &&func);
38
47 void set_timeout(Component *component, const char *name, uint32_t timeout, std::function<void()> &&func);
49 void set_timeout(Component *component, uint32_t id, uint32_t timeout, std::function<void()> &&func);
51 void set_timeout(Component *component, InternalSchedulerID id, uint32_t timeout, std::function<void()> &&func) {
52 this->set_timer_common_(component, SchedulerItem::TIMEOUT, NameType::NUMERIC_ID_INTERNAL, nullptr,
53 static_cast<uint32_t>(id), timeout, std::move(func));
54 }
55
56 ESPDEPRECATED("Use const char* or uint32_t overload instead. Removed in 2026.7.0", "2026.1.0")
57 bool cancel_timeout(Component *component, const std::string &name);
58 bool cancel_timeout(Component *component, const char *name);
59 bool cancel_timeout(Component *component, uint32_t id);
60 bool cancel_timeout(Component *component, InternalSchedulerID id) {
61 return this->cancel_item_(component, NameType::NUMERIC_ID_INTERNAL, nullptr, static_cast<uint32_t>(id),
62 SchedulerItem::TIMEOUT);
63 }
64
65 ESPDEPRECATED("Use const char* or uint32_t overload instead. Removed in 2026.7.0", "2026.1.0")
66 void set_interval(Component *component, const std::string &name, uint32_t interval, std::function<void()> &&func);
67
76 void set_interval(Component *component, const char *name, uint32_t interval, std::function<void()> &&func);
78 void set_interval(Component *component, uint32_t id, uint32_t interval, std::function<void()> &&func);
80 void set_interval(Component *component, InternalSchedulerID id, uint32_t interval, std::function<void()> &&func) {
81 this->set_timer_common_(component, SchedulerItem::INTERVAL, NameType::NUMERIC_ID_INTERNAL, nullptr,
82 static_cast<uint32_t>(id), interval, std::move(func));
83 }
84
85 ESPDEPRECATED("Use const char* or uint32_t overload instead. Removed in 2026.7.0", "2026.1.0")
86 bool cancel_interval(Component *component, const std::string &name);
87 bool cancel_interval(Component *component, const char *name);
88 bool cancel_interval(Component *component, uint32_t id);
89 bool cancel_interval(Component *component, InternalSchedulerID id) {
90 return this->cancel_item_(component, NameType::NUMERIC_ID_INTERNAL, nullptr, static_cast<uint32_t>(id),
91 SchedulerItem::INTERVAL);
92 }
93
94 // Remove before 2026.8.0
95 ESPDEPRECATED("set_retry is deprecated and will be removed in 2026.8.0. Use set_timeout or set_interval instead.",
96 "2026.2.0")
97 void set_retry(Component *component, const std::string &name, uint32_t initial_wait_time, uint8_t max_attempts,
98 std::function<RetryResult(uint8_t)> func, float backoff_increase_factor = 1.0f);
99 // Remove before 2026.8.0
100 ESPDEPRECATED("set_retry is deprecated and will be removed in 2026.8.0. Use set_timeout or set_interval instead.",
101 "2026.2.0")
102 void set_retry(Component *component, const char *name, uint32_t initial_wait_time, uint8_t max_attempts,
103 std::function<RetryResult(uint8_t)> func, float backoff_increase_factor = 1.0f);
104 // Remove before 2026.8.0
105 ESPDEPRECATED("set_retry is deprecated and will be removed in 2026.8.0. Use set_timeout or set_interval instead.",
106 "2026.2.0")
107 void set_retry(Component *component, uint32_t id, uint32_t initial_wait_time, uint8_t max_attempts,
108 std::function<RetryResult(uint8_t)> func, float backoff_increase_factor = 1.0f);
109
110 // Remove before 2026.8.0
111 ESPDEPRECATED("cancel_retry is deprecated and will be removed in 2026.8.0.", "2026.2.0")
112 bool cancel_retry(Component *component, const std::string &name);
113 // Remove before 2026.8.0
114 ESPDEPRECATED("cancel_retry is deprecated and will be removed in 2026.8.0.", "2026.2.0")
115 bool cancel_retry(Component *component, const char *name);
116 // Remove before 2026.8.0
117 ESPDEPRECATED("cancel_retry is deprecated and will be removed in 2026.8.0.", "2026.2.0")
118 bool cancel_retry(Component *component, uint32_t id);
119
121 uint64_t millis_64() { return esphome::millis_64(); }
122
123 // Calculate when the next scheduled item should run.
124 // @param now On ESP32, unused for 64-bit extension (native); on other platforms, extended to 64-bit via rollover.
125 // Returns the time in milliseconds until the next scheduled item, or nullopt if no items.
126 // This method performs cleanup of removed items before checking the schedule.
127 // IMPORTANT: This method should only be called from the main thread (loop task).
128 optional<uint32_t> next_schedule_in(uint32_t now);
129
130 // Execute all scheduled items that are ready
131 // @param now Fresh timestamp from millis() - must not be stale/cached
132 void call(uint32_t now);
133
134 void process_to_add();
135
136 // Name storage type discriminator for SchedulerItem
137 // Used to distinguish between static strings, hashed strings, numeric IDs, and internal numeric IDs
138 enum class NameType : uint8_t {
139 STATIC_STRING = 0, // const char* pointer to static/flash storage
140 HASHED_STRING = 1, // uint32_t FNV-1a hash of a runtime string
141 NUMERIC_ID = 2, // uint32_t numeric identifier (component-level)
142 NUMERIC_ID_INTERNAL = 3 // uint32_t numeric identifier (core/internal, separate namespace)
143 };
144
145 protected:
146 struct SchedulerItem {
147 // Ordered by size to minimize padding
148 Component *component;
149 // Optimized name storage using tagged union - zero heap allocation
150 union {
151 const char *static_name; // For STATIC_STRING (string literals, no allocation)
152 uint32_t hash_or_id; // For HASHED_STRING or NUMERIC_ID
153 } name_;
154 uint32_t interval;
155 // Split time to handle millis() rollover. The scheduler combines the 32-bit millis()
156 // with a 16-bit rollover counter to create a 48-bit time space (using 32+16 bits).
157 // This is intentionally limited to 48 bits, not stored as a full 64-bit value.
158 // With 49.7 days per 32-bit rollover, the 16-bit counter supports
159 // 49.7 days × 65536 = ~8900 years. This ensures correct scheduling
160 // even when devices run for months. Split into two fields for better memory
161 // alignment on 32-bit systems.
162 uint32_t next_execution_low_; // Lower 32 bits of execution time (millis value)
163 std::function<void()> callback;
164 uint16_t next_execution_high_; // Upper 16 bits (millis_major counter)
165
166#ifdef ESPHOME_THREAD_MULTI_ATOMICS
167 // Multi-threaded with atomics: use atomic uint8_t for lock-free access.
168 // std::atomic<bool> is not used because GCC on Xtensa generates an indirect
169 // function call for std::atomic<bool>::load() instead of inlining it.
170 // std::atomic<uint8_t> inlines correctly on all platforms.
171 std::atomic<uint8_t> remove{0};
172
173 // Bit-packed fields (4 bits used, 4 bits padding in 1 byte)
174 enum Type : uint8_t { TIMEOUT, INTERVAL } type : 1;
175 NameType name_type_ : 2; // Discriminator for name_ union (0–3, see NameType enum)
176 bool is_retry : 1; // True if this is a retry timeout
177 // 4 bits padding
178#else
179 // Single-threaded or multi-threaded without atomics: can pack all fields together
180 // Bit-packed fields (5 bits used, 3 bits padding in 1 byte)
181 enum Type : uint8_t { TIMEOUT, INTERVAL } type : 1;
182 bool remove : 1;
183 NameType name_type_ : 2; // Discriminator for name_ union (0–3, see NameType enum)
184 bool is_retry : 1; // True if this is a retry timeout
185 // 3 bits padding
186#endif
187
188 // Constructor
189 SchedulerItem()
190 : component(nullptr),
191 interval(0),
192 next_execution_low_(0),
193 next_execution_high_(0),
194#ifdef ESPHOME_THREAD_MULTI_ATOMICS
195 // remove is initialized in the member declaration
196 type(TIMEOUT),
197 name_type_(NameType::STATIC_STRING),
198 is_retry(false) {
199#else
200 type(TIMEOUT),
201 remove(false),
202 name_type_(NameType::STATIC_STRING),
203 is_retry(false) {
204#endif
205 name_.static_name = nullptr;
206 }
207
208 // Destructor - no dynamic memory to clean up (callback's std::function handles its own)
209 ~SchedulerItem() = default;
210
211 // Delete copy operations to prevent accidental copies
212 SchedulerItem(const SchedulerItem &) = delete;
213 SchedulerItem &operator=(const SchedulerItem &) = delete;
214
215 // Delete move operations: SchedulerItem objects are managed via raw pointers, never moved directly
216 SchedulerItem(SchedulerItem &&) = delete;
217 SchedulerItem &operator=(SchedulerItem &&) = delete;
218
219 // Helper to get the static name (only valid for STATIC_STRING type)
220 const char *get_name() const { return (name_type_ == NameType::STATIC_STRING) ? name_.static_name : nullptr; }
221
222 // Helper to get the hash or numeric ID (only valid for HASHED_STRING or NUMERIC_ID types)
223 uint32_t get_name_hash_or_id() const { return (name_type_ != NameType::STATIC_STRING) ? name_.hash_or_id : 0; }
224
225 // Helper to get the name type
226 NameType get_name_type() const { return name_type_; }
227
228 // Set name storage: for STATIC_STRING stores the pointer, for all other types stores hash_or_id.
229 // Both union members occupy the same offset, so only one store is needed.
230 void set_name(NameType type, const char *static_name, uint32_t hash_or_id) {
231 if (type == NameType::STATIC_STRING) {
232 name_.static_name = static_name;
233 } else {
234 name_.hash_or_id = hash_or_id;
235 }
236 name_type_ = type;
237 }
238
239 static bool cmp(SchedulerItem *a, SchedulerItem *b);
240
241 // Note: We use 48 bits total (32 + 16), stored in a 64-bit value for API compatibility.
242 // The upper 16 bits of the 64-bit value are always zero, which is fine since
243 // millis_major_ is also 16 bits and they must match.
244 constexpr uint64_t get_next_execution() const {
245 return (static_cast<uint64_t>(next_execution_high_) << 32) | next_execution_low_;
246 }
247
248 constexpr void set_next_execution(uint64_t value) {
249 next_execution_low_ = static_cast<uint32_t>(value);
250 // Cast to uint16_t intentionally truncates to lower 16 bits of the upper 32 bits.
251 // This is correct because millis_major_ that creates these values is also 16 bits.
252 next_execution_high_ = static_cast<uint16_t>(value >> 32);
253 }
254 constexpr const char *get_type_str() const { return (type == TIMEOUT) ? "timeout" : "interval"; }
255 const LogString *get_source() const { return component ? component->get_component_log_str() : LOG_STR("unknown"); }
256 };
257
258 // Common implementation for both timeout and interval
259 // name_type determines storage type: STATIC_STRING uses static_name, others use hash_or_id
260 void set_timer_common_(Component *component, SchedulerItem::Type type, NameType name_type, const char *static_name,
261 uint32_t hash_or_id, uint32_t delay, std::function<void()> &&func, bool is_retry = false,
262 bool skip_cancel = false);
263
264 // Common implementation for retry - Remove before 2026.8.0
265 // name_type determines storage type: STATIC_STRING uses static_name, others use hash_or_id
266#pragma GCC diagnostic push
267#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
268 void set_retry_common_(Component *component, NameType name_type, const char *static_name, uint32_t hash_or_id,
269 uint32_t initial_wait_time, uint8_t max_attempts, std::function<RetryResult(uint8_t)> func,
270 float backoff_increase_factor);
271#pragma GCC diagnostic pop
272 // Common implementation for cancel_retry
273 bool cancel_retry_(Component *component, NameType name_type, const char *static_name, uint32_t hash_or_id);
274
275 // Extend a 32-bit millis() value to 64-bit. Use when the caller already has a fresh now.
276 // On platforms with native 64-bit time, ignores now and uses millis_64() directly.
277 // On other platforms, extends now to 64-bit using rollover tracking.
278 uint64_t millis_64_from_(uint32_t now) {
279#ifdef USE_NATIVE_64BIT_TIME
280 (void) now;
281 return millis_64();
282#else
283 return Millis64Impl::compute(now);
284#endif
285 }
286 // Cleanup logically deleted items from the scheduler
287 // Returns true if items remain after cleanup
288 // IMPORTANT: This method should only be called from the main thread (loop task).
289 bool cleanup_();
290 // Remove and return the front item from the heap as a raw pointer.
291 // Caller takes ownership and must either recycle or delete the item.
292 // IMPORTANT: Caller must hold the scheduler lock before calling this function.
293 SchedulerItem *pop_raw_locked_();
294 // Get or create a scheduler item from the pool
295 // IMPORTANT: Caller must hold the scheduler lock before calling this function.
296 SchedulerItem *get_item_from_pool_locked_();
297
298 private:
299 // Helper to cancel items - must be called with lock held
300 // name_type determines matching: STATIC_STRING uses static_name, others use hash_or_id
301 bool cancel_item_locked_(Component *component, NameType name_type, const char *static_name, uint32_t hash_or_id,
302 SchedulerItem::Type type, bool match_retry = false);
303
304 // Common implementation for cancel operations - handles locking
305 bool cancel_item_(Component *component, NameType name_type, const char *static_name, uint32_t hash_or_id,
306 SchedulerItem::Type type, bool match_retry = false);
307
308 // Helper to check if two static string names match
309 inline bool HOT names_match_static_(const char *name1, const char *name2) const {
310 // Check pointer equality first (common for static strings), then string contents
311 // The core ESPHome codebase uses static strings (const char*) for component names,
312 // making pointer comparison effective. The std::string overloads exist only for
313 // compatibility with external components but are rarely used in practice.
314 return (name1 != nullptr && name2 != nullptr) && ((name1 == name2) || (strcmp(name1, name2) == 0));
315 }
316
317 // Helper function to check if item matches criteria for cancellation
318 // name_type determines matching: STATIC_STRING uses static_name, others use hash_or_id
319 // IMPORTANT: Must be called with scheduler lock held
320 inline bool HOT matches_item_locked_(SchedulerItem *item, Component *component, NameType name_type,
321 const char *static_name, uint32_t hash_or_id, SchedulerItem::Type type,
322 bool match_retry, bool skip_removed = true) const {
323 // THREAD SAFETY: Check for nullptr first to prevent LoadProhibited crashes. On multi-threaded
324 // platforms, items can be nulled in defer_queue_ during processing.
325 // Fixes: https://github.com/esphome/esphome/issues/11940
326 if (item == nullptr)
327 return false;
328 if (item->component != component || item->type != type || (skip_removed && this->is_item_removed_locked_(item)) ||
329 (match_retry && !item->is_retry)) {
330 return false;
331 }
332 // Name type must match
333 if (item->get_name_type() != name_type)
334 return false;
335 // For static strings, compare the string content; for hash/ID, compare the value
336 if (name_type == NameType::STATIC_STRING) {
337 return this->names_match_static_(item->get_name(), static_name);
338 }
339 return item->get_name_hash_or_id() == hash_or_id;
340 }
341
342 // Helper to execute a scheduler item
343 uint32_t execute_item_(SchedulerItem *item, uint32_t now);
344
345 // Helper to check if item should be skipped
346 bool should_skip_item_(SchedulerItem *item) const {
347 return is_item_removed_(item) || (item->component != nullptr && item->component->is_failed());
348 }
349
350 // Helper to recycle a SchedulerItem back to the pool.
351 // Takes a raw pointer — caller transfers ownership. The item is either added to the
352 // pool or deleted if the pool is full.
353 // IMPORTANT: Only call from main loop context! Recycling clears the callback,
354 // so calling from another thread while the callback is executing causes use-after-free.
355 // IMPORTANT: Caller must hold the scheduler lock before calling this function.
356 void recycle_item_main_loop_(SchedulerItem *item);
357
358 // Helper to perform full cleanup when too many items are cancelled
359 void full_cleanup_removed_items_();
360
361 // Helper to calculate random offset for interval timers - extracted to reduce code size of set_timer_common_
362 // IMPORTANT: Must not be inlined - called only for intervals, keeping it out of the hot path saves flash.
363 uint32_t __attribute__((noinline)) calculate_interval_offset_(uint32_t delay);
364
365 // Helper to check if a retry was already cancelled - extracted to reduce code size of set_timer_common_
366 // Remove before 2026.8.0 along with all retry code.
367 // IMPORTANT: Must not be inlined - retry path is cold and deprecated.
368 // IMPORTANT: Caller must hold the scheduler lock before calling this function.
369 bool __attribute__((noinline))
370 is_retry_cancelled_locked_(Component *component, NameType name_type, const char *static_name, uint32_t hash_or_id);
371
372#ifdef ESPHOME_DEBUG_SCHEDULER
373 // Helper for debug logging in set_timer_common_ - extracted to reduce code size
374 void debug_log_timer_(const SchedulerItem *item, NameType name_type, const char *static_name, uint32_t hash_or_id,
375 SchedulerItem::Type type, uint32_t delay, uint64_t now);
376#endif /* ESPHOME_DEBUG_SCHEDULER */
377
378#ifndef ESPHOME_THREAD_SINGLE
379 // Helper to process defer queue - inline for performance in hot path
380 inline void process_defer_queue_(uint32_t &now) {
381 // Process defer queue first to guarantee FIFO execution order for deferred items.
382 // Previously, defer() used the heap which gave undefined order for equal timestamps,
383 // causing race conditions on multi-core systems (ESP32, BK7200).
384 // With the defer queue:
385 // - Deferred items (delay=0) go directly to defer_queue_ in set_timer_common_
386 // - Items execute in exact order they were deferred (FIFO guarantee)
387 // - No deferred items exist in to_add_, so processing order doesn't affect correctness
388 // Single-core platforms don't use this queue and fall back to the heap-based approach.
389 //
390 // Note: Items cancelled via cancel_item_locked_() are marked with remove=true but still
391 // processed here. They are skipped during execution by should_skip_item_().
392 // This is intentional - no memory leak occurs.
393 //
394 // We use an index (defer_queue_front_) to track the read position instead of calling
395 // erase() on every pop, which would be O(n). The queue is processed once per loop -
396 // any items added during processing are left for the next loop iteration.
397
398 // Fast path: nothing to process, avoid lock entirely.
399 // Worst case is a one-loop-iteration delay before newly deferred items are processed.
400 if (this->defer_empty_())
401 return;
402
403 // Merge lock acquisitions: instead of separate locks for move-out and recycle (2N+1 total),
404 // recycle each item after re-acquiring the lock for the next iteration (N+1 total).
405 // The lock is held across: recycle → loop condition → move-out, then released for execution.
406 SchedulerItem *item;
407
408 this->lock_.lock();
409 // Reset counter and snapshot queue end under lock
410 this->defer_count_clear_();
411 size_t defer_queue_end = this->defer_queue_.size();
412 if (this->defer_queue_front_ >= defer_queue_end) {
413 this->lock_.unlock();
414 return;
415 }
416 while (this->defer_queue_front_ < defer_queue_end) {
417 // Take ownership of the item, leaving nullptr in the vector slot.
418 // This is safe because:
419 // 1. The vector is only cleaned up by cleanup_defer_queue_locked_() at the end of this function
420 // 2. Any code iterating defer_queue_ MUST check for nullptr items (see mark_matching_items_removed_locked_)
421 // 3. The lock protects concurrent access, but the nullptr remains until cleanup
422 item = this->defer_queue_[this->defer_queue_front_];
423 this->defer_queue_[this->defer_queue_front_] = nullptr;
424 this->defer_queue_front_++;
425 this->lock_.unlock();
426
427 // Execute callback without holding lock to prevent deadlocks
428 // if the callback tries to call defer() again
429 if (!this->should_skip_item_(item)) {
430 now = this->execute_item_(item, now);
431 }
432
433 this->lock_.lock();
434 this->recycle_item_main_loop_(item);
435 }
436 // Clean up the queue (lock already held from last recycle or initial acquisition)
437 this->cleanup_defer_queue_locked_();
438 this->lock_.unlock();
439 }
440
441 // Helper to cleanup defer_queue_ after processing.
442 // Keeps the common clear() path inline, outlines the rare compaction to keep
443 // cold code out of the hot instruction cache lines.
444 // IMPORTANT: Caller must hold the scheduler lock before calling this function.
445 inline void cleanup_defer_queue_locked_() {
446 // Check if new items were added by producers during processing
447 if (this->defer_queue_front_ >= this->defer_queue_.size()) {
448 // Common case: no new items - clear everything
449 this->defer_queue_.clear();
450 } else {
451 // Rare case: new items were added during processing - outlined to keep cold code
452 // out of the hot instruction cache lines
453 this->compact_defer_queue_locked_();
454 }
455 this->defer_queue_front_ = 0;
456 }
457
458 // Cold path for compacting defer_queue_ when new items were added during processing.
459 // IMPORTANT: Caller must hold the scheduler lock before calling this function.
460 // IMPORTANT: Must not be inlined - rare path, outlined to keep it out of the hot instruction cache lines.
461 void __attribute__((noinline)) compact_defer_queue_locked_();
462#endif /* not ESPHOME_THREAD_SINGLE */
463
464 // Helper to check if item is marked for removal (platform-specific)
465 // Returns true if item should be skipped, handles platform-specific synchronization
466 // For ESPHOME_THREAD_MULTI_NO_ATOMICS platforms, the caller must hold the scheduler lock before calling this
467 // function.
468 bool is_item_removed_(SchedulerItem *item) const {
469#ifdef ESPHOME_THREAD_MULTI_ATOMICS
470 // Multi-threaded with atomics: use atomic load for lock-free access
471 return item->remove.load(std::memory_order_acquire);
472#else
473 // Single-threaded (ESPHOME_THREAD_SINGLE) or
474 // multi-threaded without atomics (ESPHOME_THREAD_MULTI_NO_ATOMICS): direct read
475 // For ESPHOME_THREAD_MULTI_NO_ATOMICS, caller MUST hold lock!
476 return item->remove;
477#endif
478 }
479
480 // Helper to check if item is marked for removal when lock is already held.
481 // Uses relaxed ordering since the mutex provides all necessary synchronization.
482 // IMPORTANT: Caller must hold the scheduler lock before calling this function.
483 bool is_item_removed_locked_(SchedulerItem *item) const {
484#ifdef ESPHOME_THREAD_MULTI_ATOMICS
485 // Lock already held - relaxed is sufficient, mutex provides ordering
486 return item->remove.load(std::memory_order_relaxed);
487#else
488 return item->remove;
489#endif
490 }
491
492 // Helper to set item removal flag (platform-specific)
493 // For ESPHOME_THREAD_MULTI_NO_ATOMICS platforms, the caller must hold the scheduler lock before calling this
494 // function. Uses memory_order_release when setting to true (for cancellation synchronization),
495 // and memory_order_relaxed when setting to false (for initialization).
496 void set_item_removed_(SchedulerItem *item, bool removed) {
497#ifdef ESPHOME_THREAD_MULTI_ATOMICS
498 // Multi-threaded with atomics: use atomic store with appropriate ordering
499 // Release ordering when setting to true ensures cancellation is visible to other threads
500 // Relaxed ordering when setting to false is sufficient for initialization
501 item->remove.store(removed ? 1 : 0, removed ? std::memory_order_release : std::memory_order_relaxed);
502#else
503 // Single-threaded (ESPHOME_THREAD_SINGLE) or
504 // multi-threaded without atomics (ESPHOME_THREAD_MULTI_NO_ATOMICS): direct write
505 // For ESPHOME_THREAD_MULTI_NO_ATOMICS, caller MUST hold lock!
506 item->remove = removed;
507#endif
508 }
509
510 // Helper to mark matching items in a container as removed
511 // name_type determines matching: STATIC_STRING uses static_name, others use hash_or_id
512 // Returns the number of items marked for removal
513 // IMPORTANT: Must be called with scheduler lock held
514 __attribute__((noinline)) size_t mark_matching_items_removed_locked_(std::vector<SchedulerItem *> &container,
515 Component *component, NameType name_type,
516 const char *static_name, uint32_t hash_or_id,
517 SchedulerItem::Type type, bool match_retry) {
518 size_t count = 0;
519 for (auto *item : container) {
520 if (this->matches_item_locked_(item, component, name_type, static_name, hash_or_id, type, match_retry)) {
521 this->set_item_removed_(item, true);
522 count++;
523 }
524 }
525 return count;
526 }
527
528 Mutex lock_;
529 std::vector<SchedulerItem *> items_;
530 std::vector<SchedulerItem *> to_add_;
531
532#ifndef ESPHOME_THREAD_SINGLE
533 // Fast-path counter for process_to_add() to skip taking the lock when there is
534 // nothing to add. Uses std::atomic on platforms that support it, plain uint32_t
535 // otherwise. On non-atomic platforms, callers must hold the scheduler lock when
536 // mutating this counter. Not needed on single-threaded platforms where we can
537 // check to_add_.empty() directly.
538#ifdef ESPHOME_THREAD_MULTI_ATOMICS
539 std::atomic<uint32_t> to_add_count_{0};
540#else
541 uint32_t to_add_count_{0};
542#endif
543#endif /* ESPHOME_THREAD_SINGLE */
544
545 // Fast-path helper for process_to_add() to decide if it can try the lock-free path.
546 // - On ESPHOME_THREAD_SINGLE: direct container check is safe (no concurrent writers).
547 // - On ESPHOME_THREAD_MULTI_ATOMICS: performs a lock-free check via to_add_count_.
548 // - On ESPHOME_THREAD_MULTI_NO_ATOMICS: always returns false to force the caller
549 // down the locked path; this is NOT a lock-free emptiness check on that platform.
550 bool to_add_empty_() const {
551#ifdef ESPHOME_THREAD_SINGLE
552 return this->to_add_.empty();
553#elif defined(ESPHOME_THREAD_MULTI_ATOMICS)
554 return this->to_add_count_.load(std::memory_order_relaxed) == 0;
555#else
556 return false;
557#endif
558 }
559
560 // Increment to_add_count_ (no-op on single-threaded platforms)
561 void to_add_count_increment_() {
562#ifdef ESPHOME_THREAD_SINGLE
563 // No counter needed — to_add_empty_() checks the vector directly
564#elif defined(ESPHOME_THREAD_MULTI_ATOMICS)
565 this->to_add_count_.fetch_add(1, std::memory_order_relaxed);
566#else
567 this->to_add_count_++;
568#endif
569 }
570
571 // Reset to_add_count_ (no-op on single-threaded platforms)
572 void to_add_count_clear_() {
573#ifdef ESPHOME_THREAD_SINGLE
574 // No counter needed — to_add_empty_() checks the vector directly
575#elif defined(ESPHOME_THREAD_MULTI_ATOMICS)
576 this->to_add_count_.store(0, std::memory_order_relaxed);
577#else
578 this->to_add_count_ = 0;
579#endif
580 }
581
582#ifndef ESPHOME_THREAD_SINGLE
583 // Single-core platforms don't need the defer queue and save ~32 bytes of RAM
584 // Using std::vector instead of std::deque avoids 512-byte chunked allocations
585 // Index tracking avoids O(n) erase() calls when draining the queue each loop
586 std::vector<SchedulerItem *> defer_queue_; // FIFO queue for defer() calls
587 size_t defer_queue_front_{0}; // Index of first valid item in defer_queue_ (tracks consumed items)
588
589 // Fast-path counter for process_defer_queue_() to skip lock when nothing to process.
590#ifdef ESPHOME_THREAD_MULTI_ATOMICS
591 std::atomic<uint32_t> defer_count_{0};
592#else
593 uint32_t defer_count_{0};
594#endif
595
596 bool defer_empty_() const {
597 // defer_queue_ only exists on multi-threaded platforms, so no ESPHOME_THREAD_SINGLE path
598 // ESPHOME_THREAD_MULTI_NO_ATOMICS: always take the lock
599#ifdef ESPHOME_THREAD_MULTI_ATOMICS
600 return this->defer_count_.load(std::memory_order_relaxed) == 0;
601#else
602 return false;
603#endif
604 }
605
606 void defer_count_increment_() {
607#ifdef ESPHOME_THREAD_MULTI_ATOMICS
608 this->defer_count_.fetch_add(1, std::memory_order_relaxed);
609#else
610 this->defer_count_++;
611#endif
612 }
613
614 void defer_count_clear_() {
615#ifdef ESPHOME_THREAD_MULTI_ATOMICS
616 this->defer_count_.store(0, std::memory_order_relaxed);
617#else
618 this->defer_count_ = 0;
619#endif
620 }
621
622#endif /* ESPHOME_THREAD_SINGLE */
623
624 // Counter for items marked for removal. Incremented cross-thread in cancel_item_locked_().
625 // On ESPHOME_THREAD_MULTI_ATOMICS this is read without a lock in the cleanup_() fast path;
626 // on ESPHOME_THREAD_MULTI_NO_ATOMICS the fast path is disabled so cleanup_() always takes the lock.
627#ifdef ESPHOME_THREAD_MULTI_ATOMICS
628 std::atomic<uint32_t> to_remove_{0};
629#else
630 uint32_t to_remove_{0};
631#endif
632
633 // Lock-free check if there are items to remove (for fast-path in cleanup_)
634 bool to_remove_empty_() const {
635#ifdef ESPHOME_THREAD_MULTI_ATOMICS
636 return this->to_remove_.load(std::memory_order_relaxed) == 0;
637#elif defined(ESPHOME_THREAD_SINGLE)
638 return this->to_remove_ == 0;
639#else
640 return false; // Always take the lock path
641#endif
642 }
643
644 void to_remove_add_(uint32_t count) {
645#ifdef ESPHOME_THREAD_MULTI_ATOMICS
646 this->to_remove_.fetch_add(count, std::memory_order_relaxed);
647#else
648 this->to_remove_ += count;
649#endif
650 }
651
652 void to_remove_decrement_() {
653#ifdef ESPHOME_THREAD_MULTI_ATOMICS
654 this->to_remove_.fetch_sub(1, std::memory_order_relaxed);
655#else
656 this->to_remove_--;
657#endif
658 }
659
660 void to_remove_clear_() {
661#ifdef ESPHOME_THREAD_MULTI_ATOMICS
662 this->to_remove_.store(0, std::memory_order_relaxed);
663#else
664 this->to_remove_ = 0;
665#endif
666 }
667
668 uint32_t to_remove_count_() const {
669#ifdef ESPHOME_THREAD_MULTI_ATOMICS
670 return this->to_remove_.load(std::memory_order_relaxed);
671#else
672 return this->to_remove_;
673#endif
674 }
675
676 // Memory pool for recycling SchedulerItem objects to reduce heap churn.
677 // Design decisions:
678 // - std::vector is used instead of a fixed array because many systems only need 1-2 scheduler items
679 // - The vector grows dynamically up to MAX_POOL_SIZE (5) only when needed, saving memory on simple setups
680 // - Pool size of 5 matches typical usage (2-4 timers) while keeping memory overhead low (~250 bytes on ESP32)
681 // - The pool significantly reduces heap fragmentation which is critical because heap allocation/deallocation
682 // can stall the entire system, causing timing issues and dropped events for any components that need
683 // to synchronize between tasks (see https://github.com/esphome/backlog/issues/52)
684 std::vector<SchedulerItem *> scheduler_item_pool_;
685
686#ifdef ESPHOME_DEBUG_SCHEDULER
687 // Leak detection: tracks total live SchedulerItem allocations.
688 // Invariant: debug_live_items_ == items_.size() + to_add_.size() + defer_queue_.size() + scheduler_item_pool_.size()
689 // Verified periodically in call() to catch leaks early.
690 size_t debug_live_items_{0};
691
692 // Verify the scheduler memory invariant: all allocated items are accounted for.
693 // Returns true if no leak detected. Logs an error and asserts on failure.
694 bool debug_verify_no_leak_() const;
695#endif
696};
697
698} // namespace esphome
struct @65::@66 __attribute__
const Component * component
Definition component.cpp:37
uint16_t type
Providing packet encoding functions for exchanging data with a remote host.
Definition a01nyub.cpp:7
void retry_handler(const std::shared_ptr< RetryArgs > &args)
const char int const __FlashStringHelper va_list args
Definition log.h:74
uint64_t HOT millis_64()
Definition core.cpp:27
struct ESPDEPRECATED("Use std::index_sequence instead. Removed in 2026.6.0", "2025.12.0") seq
Definition automation.h:26
static void uint32_t