ESPHome 2026.2.2
Loading...
Searching...
No Matches
scheduler.cpp
Go to the documentation of this file.
1#include "scheduler.h"
2
3#include "application.h"
5#include "esphome/core/hal.h"
7#include "esphome/core/log.h"
9#include <algorithm>
10#include <cinttypes>
11#include <cstring>
12#include <limits>
13
14namespace esphome {
15
16static const char *const TAG = "scheduler";
17
18// Memory pool configuration constants
19// Pool size of 5 matches typical usage patterns (2-4 active timers)
20// - Minimal memory overhead (~250 bytes on ESP32)
21// - Sufficient for most configs with a couple sensors/components
22// - Still prevents heap fragmentation and allocation stalls
23// - Complex setups with many timers will just allocate beyond the pool
24// See https://github.com/esphome/backlog/issues/52
25static constexpr size_t MAX_POOL_SIZE = 5;
26
27// Maximum number of logically deleted (cancelled) items before forcing cleanup.
28// Set to 5 to match the pool size - when we have as many cancelled items as our
29// pool can hold, it's time to clean up and recycle them.
30static constexpr uint32_t MAX_LOGICALLY_DELETED_ITEMS = 5;
31// Half the 32-bit range - used to detect rollovers vs normal time progression
32static constexpr uint32_t HALF_MAX_UINT32 = std::numeric_limits<uint32_t>::max() / 2;
33// max delay to start an interval sequence
34static constexpr uint32_t MAX_INTERVAL_DELAY = 5000;
35
36#if defined(ESPHOME_LOG_HAS_VERBOSE) || defined(ESPHOME_DEBUG_SCHEDULER)
37// Helper struct for formatting scheduler item names consistently in logs
38// Uses a stack buffer to avoid heap allocation
39// Uses ESPHOME_snprintf_P/ESPHOME_PSTR for ESP8266 to keep format strings in flash
40struct SchedulerNameLog {
41 char buffer[20]; // Enough for "id:4294967295" or "hash:0xFFFFFFFF" or "(null)"
42
43 // Format a scheduler item name for logging
44 // Returns pointer to formatted string (either static_name or internal buffer)
45 const char *format(Scheduler::NameType name_type, const char *static_name, uint32_t hash_or_id) {
46 using NameType = Scheduler::NameType;
47 if (name_type == NameType::STATIC_STRING) {
48 if (static_name)
49 return static_name;
50 // Copy "(null)" to buffer to keep it in flash on ESP8266
51 ESPHOME_strncpy_P(buffer, ESPHOME_PSTR("(null)"), sizeof(buffer));
52 return buffer;
53 } else if (name_type == NameType::HASHED_STRING) {
54 ESPHOME_snprintf_P(buffer, sizeof(buffer), ESPHOME_PSTR("hash:0x%08" PRIX32), hash_or_id);
55 return buffer;
56 } else if (name_type == NameType::NUMERIC_ID) {
57 ESPHOME_snprintf_P(buffer, sizeof(buffer), ESPHOME_PSTR("id:%" PRIu32), hash_or_id);
58 return buffer;
59 } else { // NUMERIC_ID_INTERNAL
60 ESPHOME_snprintf_P(buffer, sizeof(buffer), ESPHOME_PSTR("iid:%" PRIu32), hash_or_id);
61 return buffer;
62 }
63 }
64};
65#endif
66
67// Uncomment to debug scheduler
68// #define ESPHOME_DEBUG_SCHEDULER
69
70#ifdef ESPHOME_DEBUG_SCHEDULER
71// Helper to validate that a pointer looks like it's in static memory
72static void validate_static_string(const char *name) {
73 if (name == nullptr)
74 return;
75
76 // This is a heuristic check - stack and heap pointers are typically
77 // much higher in memory than static data
78 uintptr_t addr = reinterpret_cast<uintptr_t>(name);
79
80 // Create a stack variable to compare against
81 int stack_var;
82 uintptr_t stack_addr = reinterpret_cast<uintptr_t>(&stack_var);
83
84 // If the string pointer is near our stack variable, it's likely on the stack
85 // Using 8KB range as ESP32 main task stack is typically 8192 bytes
86 if (addr > (stack_addr - 0x2000) && addr < (stack_addr + 0x2000)) {
87 ESP_LOGW(TAG,
88 "WARNING: Scheduler name '%s' at %p appears to be on the stack - this is unsafe!\n"
89 " Stack reference at %p",
90 name, name, &stack_var);
91 }
92
93 // Also check if it might be on the heap by seeing if it's in a very different range
94 // This is platform-specific but generally heap is allocated far from static memory
95 static const char *static_str = "test";
96 uintptr_t static_addr = reinterpret_cast<uintptr_t>(static_str);
97
98 // If the address is very far from known static memory, it might be heap
99 if (addr > static_addr + 0x100000 || (static_addr > 0x100000 && addr < static_addr - 0x100000)) {
100 ESP_LOGW(TAG, "WARNING: Scheduler name '%s' at %p might be on heap (static ref at %p)", name, name, static_str);
101 }
102}
103#endif /* ESPHOME_DEBUG_SCHEDULER */
104
105// A note on locking: the `lock_` lock protects the `items_` and `to_add_` containers. It must be taken when writing to
106// them (i.e. when adding/removing items, but not when changing items). As items are only deleted from the loop task,
107// iterating over them from the loop task is fine; but iterating from any other context requires the lock to be held to
108// avoid the main thread modifying the list while it is being accessed.
109
110// Calculate random offset for interval timers
111// Extracted from set_timer_common_ to reduce code size - float math + random_float()
112// only needed for intervals, not timeouts
113uint32_t Scheduler::calculate_interval_offset_(uint32_t delay) {
114 return static_cast<uint32_t>(std::min(delay / 2, MAX_INTERVAL_DELAY) * random_float());
115}
116
117// Check if a retry was already cancelled in items_ or to_add_
118// Extracted from set_timer_common_ to reduce code size - retry path is cold and deprecated
119// Remove before 2026.8.0 along with all retry code
120bool Scheduler::is_retry_cancelled_locked_(Component *component, NameType name_type, const char *static_name,
121 uint32_t hash_or_id) {
122 return has_cancelled_timeout_in_container_locked_(this->items_, component, name_type, static_name, hash_or_id,
123 /* match_retry= */ true) ||
124 has_cancelled_timeout_in_container_locked_(this->to_add_, component, name_type, static_name, hash_or_id,
125 /* match_retry= */ true);
126}
127
128// Common implementation for both timeout and interval
129// name_type determines storage type: STATIC_STRING uses static_name, others use hash_or_id
130void HOT Scheduler::set_timer_common_(Component *component, SchedulerItem::Type type, NameType name_type,
131 const char *static_name, uint32_t hash_or_id, uint32_t delay,
132 std::function<void()> func, bool is_retry, bool skip_cancel) {
133 if (delay == SCHEDULER_DONT_RUN) {
134 // Still need to cancel existing timer if we have a name/id
135 if (!skip_cancel) {
136 LockGuard guard{this->lock_};
137 this->cancel_item_locked_(component, name_type, static_name, hash_or_id, type);
138 }
139 return;
140 }
141
142 // Get fresh timestamp BEFORE taking lock - millis_64_ may need to acquire lock itself
143 const uint64_t now = this->millis_64_(millis());
144
145 // Take lock early to protect scheduler_item_pool_ access
146 LockGuard guard{this->lock_};
147
148 // Create and populate the scheduler item
149 auto item = this->get_item_from_pool_locked_();
150 item->component = component;
151 item->set_name(name_type, static_name, hash_or_id);
152 item->type = type;
153 item->callback = std::move(func);
154 // Reset remove flag - recycled items may have been cancelled (remove=true) in previous use
155 this->set_item_removed_(item.get(), false);
156 item->is_retry = is_retry;
157
158 // Determine target container: defer_queue_ for deferred items, to_add_ for everything else.
159 // Using a pointer lets both paths share the cancel + push_back epilogue.
160 auto *target = &this->to_add_;
161
162#ifndef ESPHOME_THREAD_SINGLE
163 // Special handling for defer() (delay = 0, type = TIMEOUT)
164 // Single-core platforms don't need thread-safe defer handling
165 if (delay == 0 && type == SchedulerItem::TIMEOUT) {
166 // Put in defer queue for guaranteed FIFO execution
167 target = &this->defer_queue_;
168 } else
169#endif /* not ESPHOME_THREAD_SINGLE */
170 {
171 // Type-specific setup
172 if (type == SchedulerItem::INTERVAL) {
173 item->interval = delay;
174 // first execution happens immediately after a random smallish offset
175 uint32_t offset = this->calculate_interval_offset_(delay);
176 item->set_next_execution(now + offset);
177#ifdef ESPHOME_LOG_HAS_VERBOSE
178 SchedulerNameLog name_log;
179 ESP_LOGV(TAG, "Scheduler interval for %s is %" PRIu32 "ms, offset %" PRIu32 "ms",
180 name_log.format(name_type, static_name, hash_or_id), delay, offset);
181#endif
182 } else {
183 item->interval = 0;
184 item->set_next_execution(now + delay);
185 }
186
187#ifdef ESPHOME_DEBUG_SCHEDULER
188 this->debug_log_timer_(item.get(), name_type, static_name, hash_or_id, type, delay, now);
189#endif /* ESPHOME_DEBUG_SCHEDULER */
190
191 // For retries, check if there's a cancelled timeout first
192 // Skip check for anonymous retries (STATIC_STRING with nullptr) - they can't be cancelled by name
193 if (is_retry && (name_type != NameType::STATIC_STRING || static_name != nullptr) &&
194 type == SchedulerItem::TIMEOUT &&
195 this->is_retry_cancelled_locked_(component, name_type, static_name, hash_or_id)) {
196 // Skip scheduling - the retry was cancelled
197#ifdef ESPHOME_DEBUG_SCHEDULER
198 SchedulerNameLog skip_name_log;
199 ESP_LOGD(TAG, "Skipping retry '%s' - found cancelled item",
200 skip_name_log.format(name_type, static_name, hash_or_id));
201#endif
202 return;
203 }
204 }
205
206 // Common epilogue: atomic cancel-and-add (unless skip_cancel is true)
207 if (!skip_cancel) {
208 this->cancel_item_locked_(component, name_type, static_name, hash_or_id, type);
209 }
210 target->push_back(std::move(item));
211}
212
213void HOT Scheduler::set_timeout(Component *component, const char *name, uint32_t timeout, std::function<void()> func) {
214 this->set_timer_common_(component, SchedulerItem::TIMEOUT, NameType::STATIC_STRING, name, 0, timeout,
215 std::move(func));
216}
217
218void HOT Scheduler::set_timeout(Component *component, const std::string &name, uint32_t timeout,
219 std::function<void()> func) {
220 this->set_timer_common_(component, SchedulerItem::TIMEOUT, NameType::HASHED_STRING, nullptr, fnv1a_hash(name),
221 timeout, std::move(func));
222}
223void HOT Scheduler::set_timeout(Component *component, uint32_t id, uint32_t timeout, std::function<void()> func) {
224 this->set_timer_common_(component, SchedulerItem::TIMEOUT, NameType::NUMERIC_ID, nullptr, id, timeout,
225 std::move(func));
226}
227bool HOT Scheduler::cancel_timeout(Component *component, const std::string &name) {
228 return this->cancel_item_(component, NameType::HASHED_STRING, nullptr, fnv1a_hash(name), SchedulerItem::TIMEOUT);
229}
230bool HOT Scheduler::cancel_timeout(Component *component, const char *name) {
231 return this->cancel_item_(component, NameType::STATIC_STRING, name, 0, SchedulerItem::TIMEOUT);
232}
233bool HOT Scheduler::cancel_timeout(Component *component, uint32_t id) {
234 return this->cancel_item_(component, NameType::NUMERIC_ID, nullptr, id, SchedulerItem::TIMEOUT);
235}
236void HOT Scheduler::set_interval(Component *component, const std::string &name, uint32_t interval,
237 std::function<void()> func) {
238 this->set_timer_common_(component, SchedulerItem::INTERVAL, NameType::HASHED_STRING, nullptr, fnv1a_hash(name),
239 interval, std::move(func));
240}
241
242void HOT Scheduler::set_interval(Component *component, const char *name, uint32_t interval,
243 std::function<void()> func) {
244 this->set_timer_common_(component, SchedulerItem::INTERVAL, NameType::STATIC_STRING, name, 0, interval,
245 std::move(func));
246}
247void HOT Scheduler::set_interval(Component *component, uint32_t id, uint32_t interval, std::function<void()> func) {
248 this->set_timer_common_(component, SchedulerItem::INTERVAL, NameType::NUMERIC_ID, nullptr, id, interval,
249 std::move(func));
250}
251bool HOT Scheduler::cancel_interval(Component *component, const std::string &name) {
252 return this->cancel_item_(component, NameType::HASHED_STRING, nullptr, fnv1a_hash(name), SchedulerItem::INTERVAL);
253}
254bool HOT Scheduler::cancel_interval(Component *component, const char *name) {
255 return this->cancel_item_(component, NameType::STATIC_STRING, name, 0, SchedulerItem::INTERVAL);
256}
257bool HOT Scheduler::cancel_interval(Component *component, uint32_t id) {
258 return this->cancel_item_(component, NameType::NUMERIC_ID, nullptr, id, SchedulerItem::INTERVAL);
259}
260
261// Suppress deprecation warnings for RetryResult usage in the still-present (but deprecated) retry implementation.
262// Remove before 2026.8.0 along with all retry code.
263#pragma GCC diagnostic push
264#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
265
266struct RetryArgs {
267 // Ordered to minimize padding on 32-bit systems
268 std::function<RetryResult(uint8_t)> func;
269 Component *component;
270 Scheduler *scheduler;
271 // Union for name storage - only one is used based on name_type
272 union {
273 const char *static_name; // For STATIC_STRING
274 uint32_t hash_or_id; // For HASHED_STRING or NUMERIC_ID
275 } name_;
276 uint32_t current_interval;
277 float backoff_increase_factor;
278 Scheduler::NameType name_type; // Discriminator for name_ union
279 uint8_t retry_countdown;
280};
281
282void retry_handler(const std::shared_ptr<RetryArgs> &args) {
283 RetryResult const retry_result = args->func(--args->retry_countdown);
284 if (retry_result == RetryResult::DONE || args->retry_countdown <= 0)
285 return;
286 // second execution of `func` happens after `initial_wait_time`
287 // args->name_ is owned by the shared_ptr<RetryArgs>
288 // which is captured in the lambda and outlives the SchedulerItem
289 const char *static_name = (args->name_type == Scheduler::NameType::STATIC_STRING) ? args->name_.static_name : nullptr;
290 uint32_t hash_or_id = (args->name_type != Scheduler::NameType::STATIC_STRING) ? args->name_.hash_or_id : 0;
291 args->scheduler->set_timer_common_(
292 args->component, Scheduler::SchedulerItem::TIMEOUT, args->name_type, static_name, hash_or_id,
293 args->current_interval, [args]() { retry_handler(args); },
294 /* is_retry= */ true);
295 // backoff_increase_factor applied to third & later executions
296 args->current_interval *= args->backoff_increase_factor;
297}
298
299void HOT Scheduler::set_retry_common_(Component *component, NameType name_type, const char *static_name,
300 uint32_t hash_or_id, uint32_t initial_wait_time, uint8_t max_attempts,
301 std::function<RetryResult(uint8_t)> func, float backoff_increase_factor) {
302 this->cancel_retry_(component, name_type, static_name, hash_or_id);
303
304 if (initial_wait_time == SCHEDULER_DONT_RUN)
305 return;
306
307#ifdef ESPHOME_LOG_HAS_VERY_VERBOSE
308 {
309 SchedulerNameLog name_log;
310 ESP_LOGVV(TAG, "set_retry(name='%s', initial_wait_time=%" PRIu32 ", max_attempts=%u, backoff_factor=%0.1f)",
311 name_log.format(name_type, static_name, hash_or_id), initial_wait_time, max_attempts,
312 backoff_increase_factor);
313 }
314#endif
315
316 if (backoff_increase_factor < 0.0001) {
317 ESP_LOGE(TAG, "set_retry: backoff_factor %0.1f too small, using 1.0: %s", backoff_increase_factor,
318 (name_type == NameType::STATIC_STRING && static_name) ? static_name : "");
319 backoff_increase_factor = 1;
320 }
321
322 auto args = std::make_shared<RetryArgs>();
323 args->func = std::move(func);
324 args->component = component;
325 args->scheduler = this;
326 args->name_type = name_type;
327 if (name_type == NameType::STATIC_STRING) {
328 args->name_.static_name = static_name;
329 } else {
330 args->name_.hash_or_id = hash_or_id;
331 }
332 args->current_interval = initial_wait_time;
333 args->backoff_increase_factor = backoff_increase_factor;
334 args->retry_countdown = max_attempts;
335
336 // First execution of `func` immediately - use set_timer_common_ with is_retry=true
337 this->set_timer_common_(
338 component, SchedulerItem::TIMEOUT, name_type, static_name, hash_or_id, 0, [args]() { retry_handler(args); },
339 /* is_retry= */ true);
340}
341
342void HOT Scheduler::set_retry(Component *component, const char *name, uint32_t initial_wait_time, uint8_t max_attempts,
343 std::function<RetryResult(uint8_t)> func, float backoff_increase_factor) {
344 this->set_retry_common_(component, NameType::STATIC_STRING, name, 0, initial_wait_time, max_attempts, std::move(func),
345 backoff_increase_factor);
346}
347
348bool HOT Scheduler::cancel_retry_(Component *component, NameType name_type, const char *static_name,
349 uint32_t hash_or_id) {
350 return this->cancel_item_(component, name_type, static_name, hash_or_id, SchedulerItem::TIMEOUT,
351 /* match_retry= */ true);
352}
353bool HOT Scheduler::cancel_retry(Component *component, const char *name) {
354 return this->cancel_retry_(component, NameType::STATIC_STRING, name, 0);
355}
356
357void HOT Scheduler::set_retry(Component *component, const std::string &name, uint32_t initial_wait_time,
358 uint8_t max_attempts, std::function<RetryResult(uint8_t)> func,
359 float backoff_increase_factor) {
360 this->set_retry_common_(component, NameType::HASHED_STRING, nullptr, fnv1a_hash(name), initial_wait_time,
361 max_attempts, std::move(func), backoff_increase_factor);
362}
363
364bool HOT Scheduler::cancel_retry(Component *component, const std::string &name) {
365 return this->cancel_retry_(component, NameType::HASHED_STRING, nullptr, fnv1a_hash(name));
366}
367
368void HOT Scheduler::set_retry(Component *component, uint32_t id, uint32_t initial_wait_time, uint8_t max_attempts,
369 std::function<RetryResult(uint8_t)> func, float backoff_increase_factor) {
370 this->set_retry_common_(component, NameType::NUMERIC_ID, nullptr, id, initial_wait_time, max_attempts,
371 std::move(func), backoff_increase_factor);
372}
373
374bool HOT Scheduler::cancel_retry(Component *component, uint32_t id) {
375 return this->cancel_retry_(component, NameType::NUMERIC_ID, nullptr, id);
376}
377
378#pragma GCC diagnostic pop // End suppression of deprecated RetryResult warnings
379
380optional<uint32_t> HOT Scheduler::next_schedule_in(uint32_t now) {
381 // IMPORTANT: This method should only be called from the main thread (loop task).
382 // It performs cleanup and accesses items_[0] without holding a lock, which is only
383 // safe when called from the main thread. Other threads must not call this method.
384
385 // If no items, return empty optional
386 if (this->cleanup_() == 0)
387 return {};
388
389 auto &item = this->items_[0];
390 // Convert the fresh timestamp from caller (usually Application::loop()) to 64-bit
391 const auto now_64 = this->millis_64_(now); // 'now' from parameter - fresh from caller
392 const uint64_t next_exec = item->get_next_execution();
393 if (next_exec < now_64)
394 return 0;
395 return next_exec - now_64;
396}
397
398void Scheduler::full_cleanup_removed_items_() {
399 // We hold the lock for the entire cleanup operation because:
400 // 1. We're rebuilding the entire items_ list, so we need exclusive access throughout
401 // 2. Other threads must see either the old state or the new state, not intermediate states
402 // 3. The operation is already expensive (O(n)), so lock overhead is negligible
403 // 4. No operations inside can block or take other locks, so no deadlock risk
404 LockGuard guard{this->lock_};
405
406 // Compact in-place: move valid items forward, recycle removed ones
407 size_t write = 0;
408 for (size_t read = 0; read < this->items_.size(); ++read) {
409 if (!is_item_removed_(this->items_[read].get())) {
410 if (write != read) {
411 this->items_[write] = std::move(this->items_[read]);
412 }
413 ++write;
414 } else {
415 this->recycle_item_main_loop_(std::move(this->items_[read]));
416 }
417 }
418 this->items_.erase(this->items_.begin() + write, this->items_.end());
419 // Rebuild the heap structure since items are no longer in heap order
420 std::make_heap(this->items_.begin(), this->items_.end(), SchedulerItem::cmp);
421 this->to_remove_ = 0;
422}
423
424void HOT Scheduler::call(uint32_t now) {
425#ifndef ESPHOME_THREAD_SINGLE
426 this->process_defer_queue_(now);
427#endif /* not ESPHOME_THREAD_SINGLE */
428
429 // Convert the fresh timestamp from main loop to 64-bit for scheduler operations
430 const auto now_64 = this->millis_64_(now); // 'now' from parameter - fresh from Application::loop()
431 this->process_to_add();
432
433 // Track if any items were added to to_add_ during this call (intervals or from callbacks)
434 bool has_added_items = false;
435
436#ifdef ESPHOME_DEBUG_SCHEDULER
437 static uint64_t last_print = 0;
438
439 if (now_64 - last_print > 2000) {
440 last_print = now_64;
441 std::vector<std::unique_ptr<SchedulerItem>> old_items;
442#ifdef ESPHOME_THREAD_MULTI_ATOMICS
443 const auto last_dbg = this->last_millis_.load(std::memory_order_relaxed);
444 const auto major_dbg = this->millis_major_.load(std::memory_order_relaxed);
445 ESP_LOGD(TAG, "Items: count=%zu, pool=%zu, now=%" PRIu64 " (%" PRIu16 ", %" PRIu32 ")", this->items_.size(),
446 this->scheduler_item_pool_.size(), now_64, major_dbg, last_dbg);
447#else /* not ESPHOME_THREAD_MULTI_ATOMICS */
448 ESP_LOGD(TAG, "Items: count=%zu, pool=%zu, now=%" PRIu64 " (%" PRIu16 ", %" PRIu32 ")", this->items_.size(),
449 this->scheduler_item_pool_.size(), now_64, this->millis_major_, this->last_millis_);
450#endif /* else ESPHOME_THREAD_MULTI_ATOMICS */
451 // Cleanup before debug output
452 this->cleanup_();
453 while (!this->items_.empty()) {
454 std::unique_ptr<SchedulerItem> item;
455 {
456 LockGuard guard{this->lock_};
457 item = this->pop_raw_locked_();
458 }
459
460 SchedulerNameLog name_log;
461 bool is_cancelled = is_item_removed_(item.get());
462 ESP_LOGD(TAG, " %s '%s/%s' interval=%" PRIu32 " next_execution in %" PRIu64 "ms at %" PRIu64 "%s",
463 item->get_type_str(), LOG_STR_ARG(item->get_source()),
464 name_log.format(item->get_name_type(), item->get_name(), item->get_name_hash_or_id()), item->interval,
465 item->get_next_execution() - now_64, item->get_next_execution(), is_cancelled ? " [CANCELLED]" : "");
466
467 old_items.push_back(std::move(item));
468 }
469 ESP_LOGD(TAG, "\n");
470
471 {
472 LockGuard guard{this->lock_};
473 this->items_ = std::move(old_items);
474 // Rebuild heap after moving items back
475 std::make_heap(this->items_.begin(), this->items_.end(), SchedulerItem::cmp);
476 }
477 }
478#endif /* ESPHOME_DEBUG_SCHEDULER */
479
480 // Cleanup removed items before processing
481 // First try to clean items from the top of the heap (fast path)
482 this->cleanup_();
483
484 // If we still have too many cancelled items, do a full cleanup
485 // This only happens if cancelled items are stuck in the middle/bottom of the heap
486 if (this->to_remove_ >= MAX_LOGICALLY_DELETED_ITEMS) {
487 this->full_cleanup_removed_items_();
488 }
489 while (!this->items_.empty()) {
490 // Don't copy-by value yet
491 auto &item = this->items_[0];
492 if (item->get_next_execution() > now_64) {
493 // Not reached timeout yet, done for this call
494 break;
495 }
496 // Don't run on failed components
497 if (item->component != nullptr && item->component->is_failed()) {
498 LockGuard guard{this->lock_};
499 this->recycle_item_main_loop_(this->pop_raw_locked_());
500 continue;
501 }
502
503 // Check if item is marked for removal
504 // This handles two cases:
505 // 1. Item was marked for removal after cleanup_() but before we got here
506 // 2. Item is marked for removal but wasn't at the front of the heap during cleanup_()
507#ifdef ESPHOME_THREAD_MULTI_NO_ATOMICS
508 // Multi-threaded platforms without atomics: must take lock to safely read remove flag
509 {
510 LockGuard guard{this->lock_};
511 if (is_item_removed_(item.get())) {
512 this->recycle_item_main_loop_(this->pop_raw_locked_());
513 this->to_remove_--;
514 continue;
515 }
516 }
517#else
518 // Single-threaded or multi-threaded with atomics: can check without lock
519 if (is_item_removed_(item.get())) {
520 LockGuard guard{this->lock_};
521 this->recycle_item_main_loop_(this->pop_raw_locked_());
522 this->to_remove_--;
523 continue;
524 }
525#endif
526
527#ifdef ESPHOME_DEBUG_SCHEDULER
528 {
529 SchedulerNameLog name_log;
530 ESP_LOGV(TAG, "Running %s '%s/%s' with interval=%" PRIu32 " next_execution=%" PRIu64 " (now=%" PRIu64 ")",
531 item->get_type_str(), LOG_STR_ARG(item->get_source()),
532 name_log.format(item->get_name_type(), item->get_name(), item->get_name_hash_or_id()), item->interval,
533 item->get_next_execution(), now_64);
534 }
535#endif /* ESPHOME_DEBUG_SCHEDULER */
536
537 // Warning: During callback(), a lot of stuff can happen, including:
538 // - timeouts/intervals get added, potentially invalidating vector pointers
539 // - timeouts/intervals get cancelled
540 now = this->execute_item_(item.get(), now);
541
542 LockGuard guard{this->lock_};
543
544 // Only pop after function call, this ensures we were reachable
545 // during the function call and know if we were cancelled.
546 auto executed_item = this->pop_raw_locked_();
547
548 if (executed_item->remove) {
549 // We were removed/cancelled in the function call, recycle and continue
550 this->to_remove_--;
551 this->recycle_item_main_loop_(std::move(executed_item));
552 continue;
553 }
554
555 if (executed_item->type == SchedulerItem::INTERVAL) {
556 executed_item->set_next_execution(now_64 + executed_item->interval);
557 // Add new item directly to to_add_
558 // since we have the lock held
559 this->to_add_.push_back(std::move(executed_item));
560 } else {
561 // Timeout completed - recycle it
562 this->recycle_item_main_loop_(std::move(executed_item));
563 }
564
565 has_added_items |= !this->to_add_.empty();
566 }
567
568 if (has_added_items) {
569 this->process_to_add();
570 }
571}
572void HOT Scheduler::process_to_add() {
573 LockGuard guard{this->lock_};
574 for (auto &it : this->to_add_) {
575 if (is_item_removed_(it.get())) {
576 // Recycle cancelled items
577 this->recycle_item_main_loop_(std::move(it));
578 continue;
579 }
580
581 this->items_.push_back(std::move(it));
582 std::push_heap(this->items_.begin(), this->items_.end(), SchedulerItem::cmp);
583 }
584 this->to_add_.clear();
585}
586size_t HOT Scheduler::cleanup_() {
587 // Fast path: if nothing to remove, just return the current size
588 // Reading to_remove_ without lock is safe because:
589 // 1. We only call this from the main thread during call()
590 // 2. If it's 0, there's definitely nothing to cleanup
591 // 3. If it becomes non-zero after we check, cleanup will happen on the next loop iteration
592 // 4. Not all platforms support atomics, so we accept this race in favor of performance
593 // 5. The worst case is a one-loop-iteration delay in cleanup, which is harmless
594 if (this->to_remove_ == 0)
595 return this->items_.size();
596
597 // We must hold the lock for the entire cleanup operation because:
598 // 1. We're modifying items_ (via pop_raw_locked_) which requires exclusive access
599 // 2. We're decrementing to_remove_ which is also modified by other threads
600 // (though all modifications are already under lock)
601 // 3. Other threads read items_ when searching for items to cancel in cancel_item_locked_()
602 // 4. We need a consistent view of items_ and to_remove_ throughout the operation
603 // Without the lock, we could access items_ while another thread is reading it,
604 // leading to race conditions
605 LockGuard guard{this->lock_};
606 while (!this->items_.empty()) {
607 auto &item = this->items_[0];
608 if (!item->remove)
609 break;
610 this->to_remove_--;
611 this->recycle_item_main_loop_(this->pop_raw_locked_());
612 }
613 return this->items_.size();
614}
615std::unique_ptr<Scheduler::SchedulerItem> HOT Scheduler::pop_raw_locked_() {
616 std::pop_heap(this->items_.begin(), this->items_.end(), SchedulerItem::cmp);
617
618 // Move the item out before popping - this is the item that was at the front of the heap
619 auto item = std::move(this->items_.back());
620
621 this->items_.pop_back();
622 return item;
623}
624
625// Helper to execute a scheduler item
626uint32_t HOT Scheduler::execute_item_(SchedulerItem *item, uint32_t now) {
627 App.set_current_component(item->component);
628 WarnIfComponentBlockingGuard guard{item->component, now};
629 item->callback();
630 return guard.finish();
631}
632
633// Common implementation for cancel operations - handles locking
634bool HOT Scheduler::cancel_item_(Component *component, NameType name_type, const char *static_name, uint32_t hash_or_id,
635 SchedulerItem::Type type, bool match_retry) {
636 LockGuard guard{this->lock_};
637 return this->cancel_item_locked_(component, name_type, static_name, hash_or_id, type, match_retry);
638}
639
640// Helper to cancel items - must be called with lock held
641// name_type determines matching: STATIC_STRING uses static_name, others use hash_or_id
642bool HOT Scheduler::cancel_item_locked_(Component *component, NameType name_type, const char *static_name,
643 uint32_t hash_or_id, SchedulerItem::Type type, bool match_retry) {
644 // Early return if static string name is invalid
645 if (name_type == NameType::STATIC_STRING && static_name == nullptr) {
646 return false;
647 }
648
649 size_t total_cancelled = 0;
650
651#ifndef ESPHOME_THREAD_SINGLE
652 // Mark items in defer queue as cancelled (they'll be skipped when processed)
653 if (type == SchedulerItem::TIMEOUT) {
654 total_cancelled += this->mark_matching_items_removed_locked_(this->defer_queue_, component, name_type, static_name,
655 hash_or_id, type, match_retry);
656 }
657#endif /* not ESPHOME_THREAD_SINGLE */
658
659 // Cancel items in the main heap
660 // We only mark items for removal here - never recycle directly.
661 // The main loop may be executing an item's callback right now, and recycling
662 // would destroy the callback while it's running (use-after-free).
663 // Only the main loop in call() should recycle items after execution completes.
664 if (!this->items_.empty()) {
665 size_t heap_cancelled = this->mark_matching_items_removed_locked_(this->items_, component, name_type, static_name,
666 hash_or_id, type, match_retry);
667 total_cancelled += heap_cancelled;
668 this->to_remove_ += heap_cancelled;
669 }
670
671 // Cancel items in to_add_
672 total_cancelled += this->mark_matching_items_removed_locked_(this->to_add_, component, name_type, static_name,
673 hash_or_id, type, match_retry);
674
675 return total_cancelled > 0;
676}
677
678uint64_t Scheduler::millis_64_(uint32_t now) {
679 // THREAD SAFETY NOTE:
680 // This function has three implementations, based on the precompiler flags
681 // - ESPHOME_THREAD_SINGLE - Runs on single-threaded platforms (ESP8266, RP2040, etc.)
682 // - ESPHOME_THREAD_MULTI_NO_ATOMICS - Runs on multi-threaded platforms without atomics (LibreTiny BK72xx)
683 // - ESPHOME_THREAD_MULTI_ATOMICS - Runs on multi-threaded platforms with atomics (ESP32, HOST, LibreTiny
684 // RTL87xx/LN882x, etc.)
685 //
686 // Make sure all changes are synchronized if you edit this function.
687 //
688 // IMPORTANT: Always pass fresh millis() values to this function. The implementation
689 // handles out-of-order timestamps between threads, but minimizing time differences
690 // helps maintain accuracy.
691 //
692
693#ifdef ESPHOME_THREAD_SINGLE
694 // This is the single core implementation.
695 //
696 // Single-core platforms have no concurrency, so this is a simple implementation
697 // that just tracks 32-bit rollover (every 49.7 days) without any locking or atomics.
698
699 uint16_t major = this->millis_major_;
700 uint32_t last = this->last_millis_;
701
702 // Check for rollover
703 if (now < last && (last - now) > HALF_MAX_UINT32) {
704 this->millis_major_++;
705 major++;
706 this->last_millis_ = now;
707#ifdef ESPHOME_DEBUG_SCHEDULER
708 ESP_LOGD(TAG, "Detected true 32-bit rollover at %" PRIu32 "ms (was %" PRIu32 ")", now, last);
709#endif /* ESPHOME_DEBUG_SCHEDULER */
710 } else if (now > last) {
711 // Only update if time moved forward
712 this->last_millis_ = now;
713 }
714
715 // Combine major (high 32 bits) and now (low 32 bits) into 64-bit time
716 return now + (static_cast<uint64_t>(major) << 32);
717
718#elif defined(ESPHOME_THREAD_MULTI_NO_ATOMICS)
719 // This is the multi core no atomics implementation.
720 //
721 // Without atomics, this implementation uses locks more aggressively:
722 // 1. Always locks when near the rollover boundary (within 10 seconds)
723 // 2. Always locks when detecting a large backwards jump
724 // 3. Updates without lock in normal forward progression (accepting minor races)
725 // This is less efficient but necessary without atomic operations.
726 uint16_t major = this->millis_major_;
727 uint32_t last = this->last_millis_;
728
729 // Define a safe window around the rollover point (10 seconds)
730 // This covers any reasonable scheduler delays or thread preemption
731 static const uint32_t ROLLOVER_WINDOW = 10000; // 10 seconds in milliseconds
732
733 // Check if we're near the rollover boundary (close to std::numeric_limits<uint32_t>::max() or just past 0)
734 bool near_rollover = (last > (std::numeric_limits<uint32_t>::max() - ROLLOVER_WINDOW)) || (now < ROLLOVER_WINDOW);
735
736 if (near_rollover || (now < last && (last - now) > HALF_MAX_UINT32)) {
737 // Near rollover or detected a rollover - need lock for safety
738 LockGuard guard{this->lock_};
739 // Re-read with lock held
740 last = this->last_millis_;
741
742 if (now < last && (last - now) > HALF_MAX_UINT32) {
743 // True rollover detected (happens every ~49.7 days)
744 this->millis_major_++;
745 major++;
746#ifdef ESPHOME_DEBUG_SCHEDULER
747 ESP_LOGD(TAG, "Detected true 32-bit rollover at %" PRIu32 "ms (was %" PRIu32 ")", now, last);
748#endif /* ESPHOME_DEBUG_SCHEDULER */
749 }
750 // Update last_millis_ while holding lock
751 this->last_millis_ = now;
752 } else if (now > last) {
753 // Normal case: Not near rollover and time moved forward
754 // Update without lock. While this may cause minor races (microseconds of
755 // backwards time movement), they're acceptable because:
756 // 1. The scheduler operates at millisecond resolution, not microsecond
757 // 2. We've already prevented the critical rollover race condition
758 // 3. Any backwards movement is orders of magnitude smaller than scheduler delays
759 this->last_millis_ = now;
760 }
761 // If now <= last and we're not near rollover, don't update
762 // This minimizes backwards time movement
763
764 // Combine major (high 32 bits) and now (low 32 bits) into 64-bit time
765 return now + (static_cast<uint64_t>(major) << 32);
766
767#elif defined(ESPHOME_THREAD_MULTI_ATOMICS)
768 // This is the multi core with atomics implementation.
769 //
770 // Uses atomic operations with acquire/release semantics to ensure coherent
771 // reads of millis_major_ and last_millis_ across cores. Features:
772 // 1. Epoch-coherency retry loop to handle concurrent updates
773 // 2. Lock only taken for actual rollover detection and update
774 // 3. Lock-free CAS updates for normal forward time progression
775 // 4. Memory ordering ensures cores see consistent time values
776
777 for (;;) {
778 uint16_t major = this->millis_major_.load(std::memory_order_acquire);
779
780 /*
781 * Acquire so that if we later decide **not** to take the lock we still
782 * observe a `millis_major_` value coherent with the loaded `last_millis_`.
783 * The acquire load ensures any later read of `millis_major_` sees its
784 * corresponding increment.
785 */
786 uint32_t last = this->last_millis_.load(std::memory_order_acquire);
787
788 // If we might be near a rollover (large backwards jump), take the lock for the entire operation
789 // This ensures rollover detection and last_millis_ update are atomic together
790 if (now < last && (last - now) > HALF_MAX_UINT32) {
791 // Potential rollover - need lock for atomic rollover detection + update
792 LockGuard guard{this->lock_};
793 // Re-read with lock held; mutex already provides ordering
794 last = this->last_millis_.load(std::memory_order_relaxed);
795
796 if (now < last && (last - now) > HALF_MAX_UINT32) {
797 // True rollover detected (happens every ~49.7 days)
798 this->millis_major_.fetch_add(1, std::memory_order_relaxed);
799 major++;
800#ifdef ESPHOME_DEBUG_SCHEDULER
801 ESP_LOGD(TAG, "Detected true 32-bit rollover at %" PRIu32 "ms (was %" PRIu32 ")", now, last);
802#endif /* ESPHOME_DEBUG_SCHEDULER */
803 }
804 /*
805 * Update last_millis_ while holding the lock to prevent races
806 * Publish the new low-word *after* bumping `millis_major_` (done above)
807 * so readers never see a mismatched pair.
808 */
809 this->last_millis_.store(now, std::memory_order_release);
810 } else {
811 // Normal case: Try lock-free update, but only allow forward movement within same epoch
812 // This prevents accidentally moving backwards across a rollover boundary
813 while (now > last && (now - last) < HALF_MAX_UINT32) {
814 if (this->last_millis_.compare_exchange_weak(last, now,
815 std::memory_order_release, // success
816 std::memory_order_relaxed)) { // failure
817 break;
818 }
819 // CAS failure means no data was published; relaxed is fine
820 // last is automatically updated by compare_exchange_weak if it fails
821 }
822 }
823 uint16_t major_end = this->millis_major_.load(std::memory_order_relaxed);
824 if (major_end == major)
825 return now + (static_cast<uint64_t>(major) << 32);
826 }
827 // Unreachable - the loop always returns when major_end == major
828 __builtin_unreachable();
829
830#else
831#error \
832 "No platform threading model defined. One of ESPHOME_THREAD_SINGLE, ESPHOME_THREAD_MULTI_NO_ATOMICS, or ESPHOME_THREAD_MULTI_ATOMICS must be defined."
833#endif
834}
835
836bool HOT Scheduler::SchedulerItem::cmp(const std::unique_ptr<SchedulerItem> &a,
837 const std::unique_ptr<SchedulerItem> &b) {
838 // High bits are almost always equal (change only on 32-bit rollover ~49 days)
839 // Optimize for common case: check low bits first when high bits are equal
840 return (a->next_execution_high_ == b->next_execution_high_) ? (a->next_execution_low_ > b->next_execution_low_)
841 : (a->next_execution_high_ > b->next_execution_high_);
842}
843
844// Recycle a SchedulerItem back to the pool for reuse.
845// IMPORTANT: Caller must hold the scheduler lock before calling this function.
846// This protects scheduler_item_pool_ from concurrent access by other threads
847// that may be acquiring items from the pool in set_timer_common_().
848void Scheduler::recycle_item_main_loop_(std::unique_ptr<SchedulerItem> item) {
849 if (!item)
850 return;
851
852 if (this->scheduler_item_pool_.size() < MAX_POOL_SIZE) {
853 // Clear callback to release captured resources
854 item->callback = nullptr;
855 this->scheduler_item_pool_.push_back(std::move(item));
856#ifdef ESPHOME_DEBUG_SCHEDULER
857 ESP_LOGD(TAG, "Recycled item to pool (pool size now: %zu)", this->scheduler_item_pool_.size());
858#endif
859 } else {
860#ifdef ESPHOME_DEBUG_SCHEDULER
861 ESP_LOGD(TAG, "Pool full (size: %zu), deleting item", this->scheduler_item_pool_.size());
862#endif
863 }
864 // else: unique_ptr will delete the item when it goes out of scope
865}
866
867#ifdef ESPHOME_DEBUG_SCHEDULER
868void Scheduler::debug_log_timer_(const SchedulerItem *item, NameType name_type, const char *static_name,
869 uint32_t hash_or_id, SchedulerItem::Type type, uint32_t delay, uint64_t now) {
870 // Validate static strings in debug mode
871 if (name_type == NameType::STATIC_STRING && static_name != nullptr) {
872 validate_static_string(static_name);
873 }
874
875 // Debug logging
876 SchedulerNameLog name_log;
877 const char *type_str = (type == SchedulerItem::TIMEOUT) ? "timeout" : "interval";
878 if (type == SchedulerItem::TIMEOUT) {
879 ESP_LOGD(TAG, "set_%s(name='%s/%s', %s=%" PRIu32 ")", type_str, LOG_STR_ARG(item->get_source()),
880 name_log.format(name_type, static_name, hash_or_id), type_str, delay);
881 } else {
882 ESP_LOGD(TAG, "set_%s(name='%s/%s', %s=%" PRIu32 ", offset=%" PRIu32 ")", type_str, LOG_STR_ARG(item->get_source()),
883 name_log.format(name_type, static_name, hash_or_id), type_str, delay,
884 static_cast<uint32_t>(item->get_next_execution() - now));
885 }
886}
887#endif /* ESPHOME_DEBUG_SCHEDULER */
888
889// Helper to get or create a scheduler item from the pool
890// IMPORTANT: Caller must hold the scheduler lock before calling this function.
891std::unique_ptr<Scheduler::SchedulerItem> Scheduler::get_item_from_pool_locked_() {
892 std::unique_ptr<SchedulerItem> item;
893 if (!this->scheduler_item_pool_.empty()) {
894 item = std::move(this->scheduler_item_pool_.back());
895 this->scheduler_item_pool_.pop_back();
896#ifdef ESPHOME_DEBUG_SCHEDULER
897 ESP_LOGD(TAG, "Reused item from pool (pool size now: %zu)", this->scheduler_item_pool_.size());
898#endif
899 } else {
900 item = make_unique<SchedulerItem>();
901#ifdef ESPHOME_DEBUG_SCHEDULER
902 ESP_LOGD(TAG, "Allocated new item (pool empty)");
903#endif
904 }
905 return item;
906}
907
908} // namespace esphome
void set_current_component(Component *component)
ESPDEPRECATED("set_retry is deprecated and will be removed in 2026.8.0. Use set_timeout or set_interval instead.", "2026.2.0") void set_retry(const std uint32_t uint8_t std::function< RetryResult(uint8_t)> float backoff_increase_factor
Definition component.h:373
const Component * component
Definition component.cpp:37
uint16_t type
const char *const TAG
Definition spi.cpp:7
Providing packet encoding functions for exchanging data with a remote host.
Definition a01nyub.cpp:7
float random_float()
Return a random float between 0 and 1.
Definition helpers.cpp:159
void retry_handler(const std::shared_ptr< RetryArgs > &args)
void IRAM_ATTR HOT delay(uint32_t ms)
Definition core.cpp:26
uint32_t IRAM_ATTR HOT millis()
Definition core.cpp:25
Application App
Global storage of Application pointer - only one Application can exist.
constexpr uint32_t fnv1a_hash(const char *str)
Calculate a FNV-1a hash of str.
Definition helpers.h:512