Fixes a regression introduced in 3af006efb39ad0b7aa7c0401c93329b654bca617 where WFE never blocked in `mp_wfe_or_timeout()` function and would busy-wait instead. This increases power consumption measurably. Root cause is that `mp_wfe_or_timeout()` calls soft timer functions that (after the regression) call `recursive_mutex_enter()` and `recursive_mutex_exit()`. The exit calls `lock_internal_spin_unlock_with_notify()` and the default pico-sdk implementation of this macro issues a SEV which negates the WFE that follows it, meaning the CPU never suspends. See https://forums.raspberrypi.com/viewtopic.php?p=2233908 for more details. The fix in this comment adds a custom "nowait" variant mutex that doesn't do WFE/SEV, and uses this one for PendSV. This will use more power when there's contention for the PendSV mutex as the other core will spin, but this shouldn't happen very often. This work was funded through GitHub Sponsors. Signed-off-by: Angus Gratton <angus@redyak.com.au>
55 lines
2.0 KiB
C
55 lines
2.0 KiB
C
/*
|
|
* Copyright (c) 2020 Raspberry Pi (Trading) Ltd.
|
|
*
|
|
* SPDX-License-Identifier: BSD-3-Clause
|
|
*/
|
|
|
|
#include "mutex_extra.h"
|
|
|
|
// These functions are taken from lib/pico-sdk/src/common/pico_sync/mutex.c and modified
|
|
// so that they atomically obtain the mutex and disable interrupts.
|
|
|
|
uint32_t __time_critical_func(recursive_mutex_enter_blocking_and_disable_interrupts)(recursive_mutex_t * mtx) {
|
|
lock_owner_id_t caller = lock_get_caller_owner_id();
|
|
do {
|
|
uint32_t save = spin_lock_blocking(mtx->core.spin_lock);
|
|
if (mtx->owner == caller || !lock_is_owner_id_valid(mtx->owner)) {
|
|
mtx->owner = caller;
|
|
uint __unused total = ++mtx->enter_count;
|
|
spin_unlock_unsafe(mtx->core.spin_lock);
|
|
assert(total); // check for overflow
|
|
return save;
|
|
}
|
|
lock_internal_spin_unlock_with_wait(&mtx->core, save);
|
|
} while (true);
|
|
}
|
|
|
|
void __time_critical_func(recursive_mutex_exit_and_restore_interrupts)(recursive_mutex_t * mtx, uint32_t save) {
|
|
spin_lock_unsafe_blocking(mtx->core.spin_lock);
|
|
assert(lock_is_owner_id_valid(mtx->owner));
|
|
assert(mtx->enter_count);
|
|
if (!--mtx->enter_count) {
|
|
mtx->owner = LOCK_INVALID_OWNER_ID;
|
|
}
|
|
lock_internal_spin_unlock_with_notify(&mtx->core, save);
|
|
}
|
|
|
|
void __time_critical_func(recursive_mutex_nowait_enter_blocking)(recursive_mutex_nowait_t * mtx) {
|
|
while (!recursive_mutex_try_enter(&mtx->mutex, NULL)) {
|
|
tight_loop_contents();
|
|
}
|
|
}
|
|
|
|
void __time_critical_func(recursive_mutex_nowait_exit)(recursive_mutex_nowait_t * wrapper) {
|
|
recursive_mutex_t *mtx = &wrapper->mutex;
|
|
// Rest of this function is a copy of recursive_mutex_exit(), with
|
|
// lock_internal_spin_unlock_with_notify() removed.
|
|
uint32_t save = spin_lock_blocking(mtx->core.spin_lock);
|
|
assert(lock_is_owner_id_valid(mtx->owner));
|
|
assert(mtx->enter_count);
|
|
if (!--mtx->enter_count) {
|
|
mtx->owner = LOCK_INVALID_OWNER_ID;
|
|
}
|
|
spin_unlock(mtx->core.spin_lock, save);
|
|
}
|