ESPHome 2025.8.4
Loading...
Searching...
No Matches
scheduler.cpp
Go to the documentation of this file.
1#include "scheduler.h"
2
3#include "application.h"
5#include "esphome/core/hal.h"
7#include "esphome/core/log.h"
8#include <algorithm>
9#include <cinttypes>
10#include <cstring>
11#include <limits>
12
13namespace esphome {
14
15static const char *const TAG = "scheduler";
16
17static const uint32_t MAX_LOGICALLY_DELETED_ITEMS = 10;
18// Half the 32-bit range - used to detect rollovers vs normal time progression
19static constexpr uint32_t HALF_MAX_UINT32 = std::numeric_limits<uint32_t>::max() / 2;
20// max delay to start an interval sequence
21static constexpr uint32_t MAX_INTERVAL_DELAY = 5000;
22
23// Uncomment to debug scheduler
24// #define ESPHOME_DEBUG_SCHEDULER
25
26#ifdef ESPHOME_DEBUG_SCHEDULER
27// Helper to validate that a pointer looks like it's in static memory
28static void validate_static_string(const char *name) {
29 if (name == nullptr)
30 return;
31
32 // This is a heuristic check - stack and heap pointers are typically
33 // much higher in memory than static data
34 uintptr_t addr = reinterpret_cast<uintptr_t>(name);
35
36 // Create a stack variable to compare against
37 int stack_var;
38 uintptr_t stack_addr = reinterpret_cast<uintptr_t>(&stack_var);
39
40 // If the string pointer is near our stack variable, it's likely on the stack
41 // Using 8KB range as ESP32 main task stack is typically 8192 bytes
42 if (addr > (stack_addr - 0x2000) && addr < (stack_addr + 0x2000)) {
43 ESP_LOGW(TAG,
44 "WARNING: Scheduler name '%s' at %p appears to be on the stack - this is unsafe!\n"
45 " Stack reference at %p",
46 name, name, &stack_var);
47 }
48
49 // Also check if it might be on the heap by seeing if it's in a very different range
50 // This is platform-specific but generally heap is allocated far from static memory
51 static const char *static_str = "test";
52 uintptr_t static_addr = reinterpret_cast<uintptr_t>(static_str);
53
54 // If the address is very far from known static memory, it might be heap
55 if (addr > static_addr + 0x100000 || (static_addr > 0x100000 && addr < static_addr - 0x100000)) {
56 ESP_LOGW(TAG, "WARNING: Scheduler name '%s' at %p might be on heap (static ref at %p)", name, name, static_str);
57 }
58}
59#endif /* ESPHOME_DEBUG_SCHEDULER */
60
61// A note on locking: the `lock_` lock protects the `items_` and `to_add_` containers. It must be taken when writing to
62// them (i.e. when adding/removing items, but not when changing items). As items are only deleted from the loop task,
63// iterating over them from the loop task is fine; but iterating from any other context requires the lock to be held to
64// avoid the main thread modifying the list while it is being accessed.
65
66// Common implementation for both timeout and interval
67void HOT Scheduler::set_timer_common_(Component *component, SchedulerItem::Type type, bool is_static_string,
68 const void *name_ptr, uint32_t delay, std::function<void()> func, bool is_retry,
69 bool skip_cancel) {
70 // Get the name as const char*
71 const char *name_cstr = this->get_name_cstr_(is_static_string, name_ptr);
72
73 if (delay == SCHEDULER_DONT_RUN) {
74 // Still need to cancel existing timer if name is not empty
75 if (!skip_cancel) {
76 LockGuard guard{this->lock_};
77 this->cancel_item_locked_(component, name_cstr, type);
78 }
79 return;
80 }
81
82 // Create and populate the scheduler item
83 auto item = make_unique<SchedulerItem>();
84 item->component = component;
85 item->set_name(name_cstr, !is_static_string);
86 item->type = type;
87 item->callback = std::move(func);
88 // Initialize remove to false (though it should already be from constructor)
89 // Not using mark_item_removed_ helper since we're setting to false, not true
90#ifdef ESPHOME_THREAD_MULTI_ATOMICS
91 item->remove.store(false, std::memory_order_relaxed);
92#else
93 item->remove = false;
94#endif
95 item->is_retry = is_retry;
96
97#ifndef ESPHOME_THREAD_SINGLE
98 // Special handling for defer() (delay = 0, type = TIMEOUT)
99 // Single-core platforms don't need thread-safe defer handling
100 if (delay == 0 && type == SchedulerItem::TIMEOUT) {
101 // Put in defer queue for guaranteed FIFO execution
102 LockGuard guard{this->lock_};
103 if (!skip_cancel) {
104 this->cancel_item_locked_(component, name_cstr, type);
105 }
106 this->defer_queue_.push_back(std::move(item));
107 return;
108 }
109#endif /* not ESPHOME_THREAD_SINGLE */
110
111 // Get fresh timestamp for new timer/interval - ensures accurate scheduling
112 const auto now = this->millis_64_(millis()); // Fresh millis() call
113
114 // Type-specific setup
115 if (type == SchedulerItem::INTERVAL) {
116 item->interval = delay;
117 // first execution happens immediately after a random smallish offset
118 // Calculate random offset (0 to min(interval/2, 5s))
119 uint32_t offset = (uint32_t) (std::min(delay / 2, MAX_INTERVAL_DELAY) * random_float());
120 item->next_execution_ = now + offset;
121 ESP_LOGV(TAG, "Scheduler interval for %s is %" PRIu32 "ms, offset %" PRIu32 "ms", name_cstr ? name_cstr : "", delay,
122 offset);
123 } else {
124 item->interval = 0;
125 item->next_execution_ = now + delay;
126 }
127
128#ifdef ESPHOME_DEBUG_SCHEDULER
129 // Validate static strings in debug mode
130 if (is_static_string && name_cstr != nullptr) {
131 validate_static_string(name_cstr);
132 }
133
134 // Debug logging
135 const char *type_str = (type == SchedulerItem::TIMEOUT) ? "timeout" : "interval";
136 if (type == SchedulerItem::TIMEOUT) {
137 ESP_LOGD(TAG, "set_%s(name='%s/%s', %s=%" PRIu32 ")", type_str, item->get_source(),
138 name_cstr ? name_cstr : "(null)", type_str, delay);
139 } else {
140 ESP_LOGD(TAG, "set_%s(name='%s/%s', %s=%" PRIu32 ", offset=%" PRIu32 ")", type_str, item->get_source(),
141 name_cstr ? name_cstr : "(null)", type_str, delay, static_cast<uint32_t>(item->next_execution_ - now));
142 }
143#endif /* ESPHOME_DEBUG_SCHEDULER */
144
145 LockGuard guard{this->lock_};
146
147 // For retries, check if there's a cancelled timeout first
148 if (is_retry && name_cstr != nullptr && type == SchedulerItem::TIMEOUT &&
149 (has_cancelled_timeout_in_container_(this->items_, component, name_cstr, /* match_retry= */ true) ||
150 has_cancelled_timeout_in_container_(this->to_add_, component, name_cstr, /* match_retry= */ true))) {
151 // Skip scheduling - the retry was cancelled
152#ifdef ESPHOME_DEBUG_SCHEDULER
153 ESP_LOGD(TAG, "Skipping retry '%s' - found cancelled item", name_cstr);
154#endif
155 return;
156 }
157
158 // If name is provided, do atomic cancel-and-add (unless skip_cancel is true)
159 // Cancel existing items
160 if (!skip_cancel) {
161 this->cancel_item_locked_(component, name_cstr, type);
162 }
163 // Add new item directly to to_add_
164 // since we have the lock held
165 this->to_add_.push_back(std::move(item));
166}
167
168void HOT Scheduler::set_timeout(Component *component, const char *name, uint32_t timeout, std::function<void()> func) {
169 this->set_timer_common_(component, SchedulerItem::TIMEOUT, true, name, timeout, std::move(func));
170}
171
172void HOT Scheduler::set_timeout(Component *component, const std::string &name, uint32_t timeout,
173 std::function<void()> func) {
174 this->set_timer_common_(component, SchedulerItem::TIMEOUT, false, &name, timeout, std::move(func));
175}
176bool HOT Scheduler::cancel_timeout(Component *component, const std::string &name) {
177 return this->cancel_item_(component, false, &name, SchedulerItem::TIMEOUT);
178}
179bool HOT Scheduler::cancel_timeout(Component *component, const char *name) {
180 return this->cancel_item_(component, true, name, SchedulerItem::TIMEOUT);
181}
182void HOT Scheduler::set_interval(Component *component, const std::string &name, uint32_t interval,
183 std::function<void()> func) {
184 this->set_timer_common_(component, SchedulerItem::INTERVAL, false, &name, interval, std::move(func));
185}
186
187void HOT Scheduler::set_interval(Component *component, const char *name, uint32_t interval,
188 std::function<void()> func) {
189 this->set_timer_common_(component, SchedulerItem::INTERVAL, true, name, interval, std::move(func));
190}
191bool HOT Scheduler::cancel_interval(Component *component, const std::string &name) {
192 return this->cancel_item_(component, false, &name, SchedulerItem::INTERVAL);
193}
194bool HOT Scheduler::cancel_interval(Component *component, const char *name) {
195 return this->cancel_item_(component, true, name, SchedulerItem::INTERVAL);
196}
197
198struct RetryArgs {
199 std::function<RetryResult(uint8_t)> func;
200 uint8_t retry_countdown;
201 uint32_t current_interval;
202 Component *component;
203 std::string name; // Keep as std::string since retry uses it dynamically
204 float backoff_increase_factor;
205 Scheduler *scheduler;
206};
207
208void retry_handler(const std::shared_ptr<RetryArgs> &args) {
209 RetryResult const retry_result = args->func(--args->retry_countdown);
210 if (retry_result == RetryResult::DONE || args->retry_countdown <= 0)
211 return;
212 // second execution of `func` happens after `initial_wait_time`
213 args->scheduler->set_timer_common_(
214 args->component, Scheduler::SchedulerItem::TIMEOUT, false, &args->name, args->current_interval,
215 [args]() { retry_handler(args); }, /* is_retry= */ true);
216 // backoff_increase_factor applied to third & later executions
217 args->current_interval *= args->backoff_increase_factor;
218}
219
220void HOT Scheduler::set_retry_common_(Component *component, bool is_static_string, const void *name_ptr,
221 uint32_t initial_wait_time, uint8_t max_attempts,
222 std::function<RetryResult(uint8_t)> func, float backoff_increase_factor) {
223 const char *name_cstr = this->get_name_cstr_(is_static_string, name_ptr);
224
225 if (name_cstr != nullptr)
226 this->cancel_retry(component, name_cstr);
227
228 if (initial_wait_time == SCHEDULER_DONT_RUN)
229 return;
230
231 ESP_LOGVV(TAG, "set_retry(name='%s', initial_wait_time=%" PRIu32 ", max_attempts=%u, backoff_factor=%0.1f)",
232 name_cstr ? name_cstr : "", initial_wait_time, max_attempts, backoff_increase_factor);
233
234 if (backoff_increase_factor < 0.0001) {
235 ESP_LOGE(TAG, "backoff_factor %0.1f too small, using 1.0: %s", backoff_increase_factor, name_cstr ? name_cstr : "");
236 backoff_increase_factor = 1;
237 }
238
239 auto args = std::make_shared<RetryArgs>();
240 args->func = std::move(func);
241 args->retry_countdown = max_attempts;
242 args->current_interval = initial_wait_time;
243 args->component = component;
244 args->name = name_cstr ? name_cstr : ""; // Convert to std::string for RetryArgs
245 args->backoff_increase_factor = backoff_increase_factor;
246 args->scheduler = this;
247
248 // First execution of `func` immediately - use set_timer_common_ with is_retry=true
249 this->set_timer_common_(
250 component, SchedulerItem::TIMEOUT, false, &args->name, 0, [args]() { retry_handler(args); },
251 /* is_retry= */ true);
252}
253
254void HOT Scheduler::set_retry(Component *component, const std::string &name, uint32_t initial_wait_time,
255 uint8_t max_attempts, std::function<RetryResult(uint8_t)> func,
256 float backoff_increase_factor) {
257 this->set_retry_common_(component, false, &name, initial_wait_time, max_attempts, std::move(func),
258 backoff_increase_factor);
259}
260
261void HOT Scheduler::set_retry(Component *component, const char *name, uint32_t initial_wait_time, uint8_t max_attempts,
262 std::function<RetryResult(uint8_t)> func, float backoff_increase_factor) {
263 this->set_retry_common_(component, true, name, initial_wait_time, max_attempts, std::move(func),
264 backoff_increase_factor);
265}
266bool HOT Scheduler::cancel_retry(Component *component, const std::string &name) {
267 return this->cancel_retry(component, name.c_str());
268}
269
270bool HOT Scheduler::cancel_retry(Component *component, const char *name) {
271 // Cancel timeouts that have is_retry flag set
272 LockGuard guard{this->lock_};
273 return this->cancel_item_locked_(component, name, SchedulerItem::TIMEOUT, /* match_retry= */ true);
274}
275
276optional<uint32_t> HOT Scheduler::next_schedule_in(uint32_t now) {
277 // IMPORTANT: This method should only be called from the main thread (loop task).
278 // It performs cleanup and accesses items_[0] without holding a lock, which is only
279 // safe when called from the main thread. Other threads must not call this method.
280
281 // If no items, return empty optional
282 if (this->cleanup_() == 0)
283 return {};
284
285 auto &item = this->items_[0];
286 // Convert the fresh timestamp from caller (usually Application::loop()) to 64-bit
287 const auto now_64 = this->millis_64_(now); // 'now' from parameter - fresh from caller
288 if (item->next_execution_ < now_64)
289 return 0;
290 return item->next_execution_ - now_64;
291}
292void HOT Scheduler::call(uint32_t now) {
293#ifndef ESPHOME_THREAD_SINGLE
294 // Process defer queue first to guarantee FIFO execution order for deferred items.
295 // Previously, defer() used the heap which gave undefined order for equal timestamps,
296 // causing race conditions on multi-core systems (ESP32, BK7200).
297 // With the defer queue:
298 // - Deferred items (delay=0) go directly to defer_queue_ in set_timer_common_
299 // - Items execute in exact order they were deferred (FIFO guarantee)
300 // - No deferred items exist in to_add_, so processing order doesn't affect correctness
301 // Single-core platforms don't use this queue and fall back to the heap-based approach.
302 //
303 // Note: Items cancelled via cancel_item_locked_() are marked with remove=true but still
304 // processed here. They are removed from the queue normally via pop_front() but skipped
305 // during execution by should_skip_item_(). This is intentional - no memory leak occurs.
306 while (!this->defer_queue_.empty()) {
307 // The outer check is done without a lock for performance. If the queue
308 // appears non-empty, we lock and process an item. We don't need to check
309 // empty() again inside the lock because only this thread can remove items.
310 std::unique_ptr<SchedulerItem> item;
311 {
312 LockGuard lock(this->lock_);
313 item = std::move(this->defer_queue_.front());
314 this->defer_queue_.pop_front();
315 }
316
317 // Execute callback without holding lock to prevent deadlocks
318 // if the callback tries to call defer() again
319 if (!this->should_skip_item_(item.get())) {
320 this->execute_item_(item.get(), now);
321 }
322 }
323#endif /* not ESPHOME_THREAD_SINGLE */
324
325 // Convert the fresh timestamp from main loop to 64-bit for scheduler operations
326 const auto now_64 = this->millis_64_(now); // 'now' from parameter - fresh from Application::loop()
327 this->process_to_add();
328
329#ifdef ESPHOME_DEBUG_SCHEDULER
330 static uint64_t last_print = 0;
331
332 if (now_64 - last_print > 2000) {
333 last_print = now_64;
334 std::vector<std::unique_ptr<SchedulerItem>> old_items;
335#ifdef ESPHOME_THREAD_MULTI_ATOMICS
336 const auto last_dbg = this->last_millis_.load(std::memory_order_relaxed);
337 const auto major_dbg = this->millis_major_.load(std::memory_order_relaxed);
338 ESP_LOGD(TAG, "Items: count=%zu, now=%" PRIu64 " (%" PRIu16 ", %" PRIu32 ")", this->items_.size(), now_64,
339 major_dbg, last_dbg);
340#else /* not ESPHOME_THREAD_MULTI_ATOMICS */
341 ESP_LOGD(TAG, "Items: count=%zu, now=%" PRIu64 " (%" PRIu16 ", %" PRIu32 ")", this->items_.size(), now_64,
342 this->millis_major_, this->last_millis_);
343#endif /* else ESPHOME_THREAD_MULTI_ATOMICS */
344 // Cleanup before debug output
345 this->cleanup_();
346 while (!this->items_.empty()) {
347 std::unique_ptr<SchedulerItem> item;
348 {
349 LockGuard guard{this->lock_};
350 item = std::move(this->items_[0]);
351 this->pop_raw_();
352 }
353
354 const char *name = item->get_name();
355 ESP_LOGD(TAG, " %s '%s/%s' interval=%" PRIu32 " next_execution in %" PRIu64 "ms at %" PRIu64,
356 item->get_type_str(), item->get_source(), name ? name : "(null)", item->interval,
357 item->next_execution_ - now_64, item->next_execution_);
358
359 old_items.push_back(std::move(item));
360 }
361 ESP_LOGD(TAG, "\n");
362
363 {
364 LockGuard guard{this->lock_};
365 this->items_ = std::move(old_items);
366 // Rebuild heap after moving items back
367 std::make_heap(this->items_.begin(), this->items_.end(), SchedulerItem::cmp);
368 }
369 }
370#endif /* ESPHOME_DEBUG_SCHEDULER */
371
372 // If we have too many items to remove
373 if (this->to_remove_ > MAX_LOGICALLY_DELETED_ITEMS) {
374 // We hold the lock for the entire cleanup operation because:
375 // 1. We're rebuilding the entire items_ list, so we need exclusive access throughout
376 // 2. Other threads must see either the old state or the new state, not intermediate states
377 // 3. The operation is already expensive (O(n)), so lock overhead is negligible
378 // 4. No operations inside can block or take other locks, so no deadlock risk
379 LockGuard guard{this->lock_};
380
381 std::vector<std::unique_ptr<SchedulerItem>> valid_items;
382
383 // Move all non-removed items to valid_items
384 for (auto &item : this->items_) {
385 if (!item->remove) {
386 valid_items.push_back(std::move(item));
387 }
388 }
389
390 // Replace items_ with the filtered list
391 this->items_ = std::move(valid_items);
392 // Rebuild the heap structure since items are no longer in heap order
393 std::make_heap(this->items_.begin(), this->items_.end(), SchedulerItem::cmp);
394 this->to_remove_ = 0;
395 }
396
397 // Cleanup removed items before processing
398 this->cleanup_();
399 while (!this->items_.empty()) {
400 // use scoping to indicate visibility of `item` variable
401 {
402 // Don't copy-by value yet
403 auto &item = this->items_[0];
404 if (item->next_execution_ > now_64) {
405 // Not reached timeout yet, done for this call
406 break;
407 }
408 // Don't run on failed components
409 if (item->component != nullptr && item->component->is_failed()) {
410 LockGuard guard{this->lock_};
411 this->pop_raw_();
412 continue;
413 }
414
415 // Check if item is marked for removal
416 // This handles two cases:
417 // 1. Item was marked for removal after cleanup_() but before we got here
418 // 2. Item is marked for removal but wasn't at the front of the heap during cleanup_()
419#ifdef ESPHOME_THREAD_MULTI_NO_ATOMICS
420 // Multi-threaded platforms without atomics: must take lock to safely read remove flag
421 {
422 LockGuard guard{this->lock_};
423 if (is_item_removed_(item.get())) {
424 this->pop_raw_();
425 this->to_remove_--;
426 continue;
427 }
428 }
429#else
430 // Single-threaded or multi-threaded with atomics: can check without lock
431 if (is_item_removed_(item.get())) {
432 LockGuard guard{this->lock_};
433 this->pop_raw_();
434 this->to_remove_--;
435 continue;
436 }
437#endif
438
439#ifdef ESPHOME_DEBUG_SCHEDULER
440 const char *item_name = item->get_name();
441 ESP_LOGV(TAG, "Running %s '%s/%s' with interval=%" PRIu32 " next_execution=%" PRIu64 " (now=%" PRIu64 ")",
442 item->get_type_str(), item->get_source(), item_name ? item_name : "(null)", item->interval,
443 item->next_execution_, now_64);
444#endif /* ESPHOME_DEBUG_SCHEDULER */
445
446 // Warning: During callback(), a lot of stuff can happen, including:
447 // - timeouts/intervals get added, potentially invalidating vector pointers
448 // - timeouts/intervals get cancelled
449 this->execute_item_(item.get(), now);
450 }
451
452 {
453 LockGuard guard{this->lock_};
454
455 // new scope, item from before might have been moved in the vector
456 auto item = std::move(this->items_[0]);
457 // Only pop after function call, this ensures we were reachable
458 // during the function call and know if we were cancelled.
459 this->pop_raw_();
460
461 if (item->remove) {
462 // We were removed/cancelled in the function call, stop
463 this->to_remove_--;
464 continue;
465 }
466
467 if (item->type == SchedulerItem::INTERVAL) {
468 item->next_execution_ = now_64 + item->interval;
469 // Add new item directly to to_add_
470 // since we have the lock held
471 this->to_add_.push_back(std::move(item));
472 }
473 }
474 }
475
476 this->process_to_add();
477}
478void HOT Scheduler::process_to_add() {
479 LockGuard guard{this->lock_};
480 for (auto &it : this->to_add_) {
481 if (it->remove) {
482 continue;
483 }
484
485 this->items_.push_back(std::move(it));
486 std::push_heap(this->items_.begin(), this->items_.end(), SchedulerItem::cmp);
487 }
488 this->to_add_.clear();
489}
490size_t HOT Scheduler::cleanup_() {
491 // Fast path: if nothing to remove, just return the current size
492 // Reading to_remove_ without lock is safe because:
493 // 1. We only call this from the main thread during call()
494 // 2. If it's 0, there's definitely nothing to cleanup
495 // 3. If it becomes non-zero after we check, cleanup will happen on the next loop iteration
496 // 4. Not all platforms support atomics, so we accept this race in favor of performance
497 // 5. The worst case is a one-loop-iteration delay in cleanup, which is harmless
498 if (this->to_remove_ == 0)
499 return this->items_.size();
500
501 // We must hold the lock for the entire cleanup operation because:
502 // 1. We're modifying items_ (via pop_raw_) which requires exclusive access
503 // 2. We're decrementing to_remove_ which is also modified by other threads
504 // (though all modifications are already under lock)
505 // 3. Other threads read items_ when searching for items to cancel in cancel_item_locked_()
506 // 4. We need a consistent view of items_ and to_remove_ throughout the operation
507 // Without the lock, we could access items_ while another thread is reading it,
508 // leading to race conditions
509 LockGuard guard{this->lock_};
510 while (!this->items_.empty()) {
511 auto &item = this->items_[0];
512 if (!item->remove)
513 break;
514 this->to_remove_--;
515 this->pop_raw_();
516 }
517 return this->items_.size();
518}
519void HOT Scheduler::pop_raw_() {
520 std::pop_heap(this->items_.begin(), this->items_.end(), SchedulerItem::cmp);
521 this->items_.pop_back();
522}
523
524// Helper to execute a scheduler item
525void HOT Scheduler::execute_item_(SchedulerItem *item, uint32_t now) {
526 App.set_current_component(item->component);
527 WarnIfComponentBlockingGuard guard{item->component, now};
528 item->callback();
529 guard.finish();
530}
531
532// Common implementation for cancel operations
533bool HOT Scheduler::cancel_item_(Component *component, bool is_static_string, const void *name_ptr,
534 SchedulerItem::Type type) {
535 // Get the name as const char*
536 const char *name_cstr = this->get_name_cstr_(is_static_string, name_ptr);
537
538 // obtain lock because this function iterates and can be called from non-loop task context
539 LockGuard guard{this->lock_};
540 return this->cancel_item_locked_(component, name_cstr, type);
541}
542
543// Helper to cancel items by name - must be called with lock held
544bool HOT Scheduler::cancel_item_locked_(Component *component, const char *name_cstr, SchedulerItem::Type type,
545 bool match_retry) {
546 // Early return if name is invalid - no items to cancel
547 if (name_cstr == nullptr) {
548 return false;
549 }
550
551 size_t total_cancelled = 0;
552
553 // Check all containers for matching items
554#ifndef ESPHOME_THREAD_SINGLE
555 // Only check defer queue for timeouts (intervals never go there)
556 if (type == SchedulerItem::TIMEOUT) {
557 for (auto &item : this->defer_queue_) {
558 if (this->matches_item_(item, component, name_cstr, type, match_retry)) {
559 this->mark_item_removed_(item.get());
560 total_cancelled++;
561 }
562 }
563 }
564#endif /* not ESPHOME_THREAD_SINGLE */
565
566 // Cancel items in the main heap
567 for (auto &item : this->items_) {
568 if (this->matches_item_(item, component, name_cstr, type, match_retry)) {
569 this->mark_item_removed_(item.get());
570 total_cancelled++;
571 this->to_remove_++; // Track removals for heap items
572 }
573 }
574
575 // Cancel items in to_add_
576 for (auto &item : this->to_add_) {
577 if (this->matches_item_(item, component, name_cstr, type, match_retry)) {
578 this->mark_item_removed_(item.get());
579 total_cancelled++;
580 // Don't track removals for to_add_ items
581 }
582 }
583
584 return total_cancelled > 0;
585}
586
587uint64_t Scheduler::millis_64_(uint32_t now) {
588 // THREAD SAFETY NOTE:
589 // This function has three implementations, based on the precompiler flags
590 // - ESPHOME_THREAD_SINGLE - Runs on single-threaded platforms (ESP8266, RP2040, etc.)
591 // - ESPHOME_THREAD_MULTI_NO_ATOMICS - Runs on multi-threaded platforms without atomics (LibreTiny)
592 // - ESPHOME_THREAD_MULTI_ATOMICS - Runs on multi-threaded platforms with atomics (ESP32, HOST, etc.)
593 //
594 // Make sure all changes are synchronized if you edit this function.
595 //
596 // IMPORTANT: Always pass fresh millis() values to this function. The implementation
597 // handles out-of-order timestamps between threads, but minimizing time differences
598 // helps maintain accuracy.
599 //
600
601#ifdef ESPHOME_THREAD_SINGLE
602 // This is the single core implementation.
603 //
604 // Single-core platforms have no concurrency, so this is a simple implementation
605 // that just tracks 32-bit rollover (every 49.7 days) without any locking or atomics.
606
607 uint16_t major = this->millis_major_;
608 uint32_t last = this->last_millis_;
609
610 // Check for rollover
611 if (now < last && (last - now) > HALF_MAX_UINT32) {
612 this->millis_major_++;
613 major++;
614#ifdef ESPHOME_DEBUG_SCHEDULER
615 ESP_LOGD(TAG, "Detected true 32-bit rollover at %" PRIu32 "ms (was %" PRIu32 ")", now, last);
616#endif /* ESPHOME_DEBUG_SCHEDULER */
617 }
618
619 // Only update if time moved forward
620 if (now > last) {
621 this->last_millis_ = now;
622 }
623
624 // Combine major (high 32 bits) and now (low 32 bits) into 64-bit time
625 return now + (static_cast<uint64_t>(major) << 32);
626
627#elif defined(ESPHOME_THREAD_MULTI_NO_ATOMICS)
628 // This is the multi core no atomics implementation.
629 //
630 // Without atomics, this implementation uses locks more aggressively:
631 // 1. Always locks when near the rollover boundary (within 10 seconds)
632 // 2. Always locks when detecting a large backwards jump
633 // 3. Updates without lock in normal forward progression (accepting minor races)
634 // This is less efficient but necessary without atomic operations.
635 uint16_t major = this->millis_major_;
636 uint32_t last = this->last_millis_;
637
638 // Define a safe window around the rollover point (10 seconds)
639 // This covers any reasonable scheduler delays or thread preemption
640 static const uint32_t ROLLOVER_WINDOW = 10000; // 10 seconds in milliseconds
641
642 // Check if we're near the rollover boundary (close to std::numeric_limits<uint32_t>::max() or just past 0)
643 bool near_rollover = (last > (std::numeric_limits<uint32_t>::max() - ROLLOVER_WINDOW)) || (now < ROLLOVER_WINDOW);
644
645 if (near_rollover || (now < last && (last - now) > HALF_MAX_UINT32)) {
646 // Near rollover or detected a rollover - need lock for safety
647 LockGuard guard{this->lock_};
648 // Re-read with lock held
649 last = this->last_millis_;
650
651 if (now < last && (last - now) > HALF_MAX_UINT32) {
652 // True rollover detected (happens every ~49.7 days)
653 this->millis_major_++;
654 major++;
655#ifdef ESPHOME_DEBUG_SCHEDULER
656 ESP_LOGD(TAG, "Detected true 32-bit rollover at %" PRIu32 "ms (was %" PRIu32 ")", now, last);
657#endif /* ESPHOME_DEBUG_SCHEDULER */
658 }
659 // Update last_millis_ while holding lock
660 this->last_millis_ = now;
661 } else if (now > last) {
662 // Normal case: Not near rollover and time moved forward
663 // Update without lock. While this may cause minor races (microseconds of
664 // backwards time movement), they're acceptable because:
665 // 1. The scheduler operates at millisecond resolution, not microsecond
666 // 2. We've already prevented the critical rollover race condition
667 // 3. Any backwards movement is orders of magnitude smaller than scheduler delays
668 this->last_millis_ = now;
669 }
670 // If now <= last and we're not near rollover, don't update
671 // This minimizes backwards time movement
672
673 // Combine major (high 32 bits) and now (low 32 bits) into 64-bit time
674 return now + (static_cast<uint64_t>(major) << 32);
675
676#elif defined(ESPHOME_THREAD_MULTI_ATOMICS)
677 // This is the multi core with atomics implementation.
678 //
679 // Uses atomic operations with acquire/release semantics to ensure coherent
680 // reads of millis_major_ and last_millis_ across cores. Features:
681 // 1. Epoch-coherency retry loop to handle concurrent updates
682 // 2. Lock only taken for actual rollover detection and update
683 // 3. Lock-free CAS updates for normal forward time progression
684 // 4. Memory ordering ensures cores see consistent time values
685
686 for (;;) {
687 uint16_t major = this->millis_major_.load(std::memory_order_acquire);
688
689 /*
690 * Acquire so that if we later decide **not** to take the lock we still
691 * observe a `millis_major_` value coherent with the loaded `last_millis_`.
692 * The acquire load ensures any later read of `millis_major_` sees its
693 * corresponding increment.
694 */
695 uint32_t last = this->last_millis_.load(std::memory_order_acquire);
696
697 // If we might be near a rollover (large backwards jump), take the lock for the entire operation
698 // This ensures rollover detection and last_millis_ update are atomic together
699 if (now < last && (last - now) > HALF_MAX_UINT32) {
700 // Potential rollover - need lock for atomic rollover detection + update
701 LockGuard guard{this->lock_};
702 // Re-read with lock held; mutex already provides ordering
703 last = this->last_millis_.load(std::memory_order_relaxed);
704
705 if (now < last && (last - now) > HALF_MAX_UINT32) {
706 // True rollover detected (happens every ~49.7 days)
707 this->millis_major_.fetch_add(1, std::memory_order_relaxed);
708 major++;
709#ifdef ESPHOME_DEBUG_SCHEDULER
710 ESP_LOGD(TAG, "Detected true 32-bit rollover at %" PRIu32 "ms (was %" PRIu32 ")", now, last);
711#endif /* ESPHOME_DEBUG_SCHEDULER */
712 }
713 /*
714 * Update last_millis_ while holding the lock to prevent races
715 * Publish the new low-word *after* bumping `millis_major_` (done above)
716 * so readers never see a mismatched pair.
717 */
718 this->last_millis_.store(now, std::memory_order_release);
719 } else {
720 // Normal case: Try lock-free update, but only allow forward movement within same epoch
721 // This prevents accidentally moving backwards across a rollover boundary
722 while (now > last && (now - last) < HALF_MAX_UINT32) {
723 if (this->last_millis_.compare_exchange_weak(last, now,
724 std::memory_order_release, // success
725 std::memory_order_relaxed)) { // failure
726 break;
727 }
728 // CAS failure means no data was published; relaxed is fine
729 // last is automatically updated by compare_exchange_weak if it fails
730 }
731 }
732 uint16_t major_end = this->millis_major_.load(std::memory_order_relaxed);
733 if (major_end == major)
734 return now + (static_cast<uint64_t>(major) << 32);
735 }
736 // Unreachable - the loop always returns when major_end == major
737 __builtin_unreachable();
738
739#else
740#error \
741 "No platform threading model defined. One of ESPHOME_THREAD_SINGLE, ESPHOME_THREAD_MULTI_NO_ATOMICS, or ESPHOME_THREAD_MULTI_ATOMICS must be defined."
742#endif
743}
744
745bool HOT Scheduler::SchedulerItem::cmp(const std::unique_ptr<SchedulerItem> &a,
746 const std::unique_ptr<SchedulerItem> &b) {
747 return a->next_execution_ > b->next_execution_;
748}
749
750} // namespace esphome
void set_current_component(Component *component)
uint8_t type
const char *const TAG
Definition spi.cpp:8
Providing packet encoding functions for exchanging data with a remote host.
Definition a01nyub.cpp:7
float random_float()
Return a random float between 0 and 1.
Definition helpers.cpp:143
void retry_handler(const std::shared_ptr< RetryArgs > &args)
void IRAM_ATTR HOT delay(uint32_t ms)
Definition core.cpp:29
uint32_t IRAM_ATTR HOT millis()
Definition core.cpp:28
Application App
Global storage of Application pointer - only one Application can exist.