mp_thread_begin_atomic_section() is expected to be recursive (i.e. for nested machine.disable_irq() calls, or if Python code calls disable_irq() and then the Python runtime calls mp_handle_pending() which also enters an atomic section to check the scheduler state). On rp2 when not using core1 the atomic sections are recursive. However when core1 was active (i.e. _thread) then there was a bug that caused the core to live-lock if an atomic section recursed. Adds a test case specifically for mutual exclusion and recursive atomic sections when using two threads. Without this fix the test immediately hangs on rp2. This work was funded through GitHub Sponsors. Signed-off-by: Angus Gratton <angus@redyak.com.au>
36 lines
1.3 KiB
C
36 lines
1.3 KiB
C
/*
|
|
* Copyright (c) 2020 Raspberry Pi (Trading) Ltd.
|
|
*
|
|
* SPDX-License-Identifier: BSD-3-Clause
|
|
*/
|
|
|
|
#include "mutex_extra.h"
|
|
|
|
// These functions are taken from lib/pico-sdk/src/common/pico_sync/mutex.c and modified
|
|
// so that they atomically obtain the mutex and disable interrupts.
|
|
|
|
uint32_t __time_critical_func(recursive_mutex_enter_blocking_and_disable_interrupts)(recursive_mutex_t * mtx) {
|
|
lock_owner_id_t caller = lock_get_caller_owner_id();
|
|
do {
|
|
uint32_t save = spin_lock_blocking(mtx->core.spin_lock);
|
|
if (mtx->owner == caller || !lock_is_owner_id_valid(mtx->owner)) {
|
|
mtx->owner = caller;
|
|
uint __unused total = ++mtx->enter_count;
|
|
spin_unlock_unsafe(mtx->core.spin_lock);
|
|
assert(total); // check for overflow
|
|
return save;
|
|
}
|
|
lock_internal_spin_unlock_with_wait(&mtx->core, save);
|
|
} while (true);
|
|
}
|
|
|
|
void __time_critical_func(recursive_mutex_exit_and_restore_interrupts)(recursive_mutex_t * mtx, uint32_t save) {
|
|
spin_lock_unsafe_blocking(mtx->core.spin_lock);
|
|
assert(lock_is_owner_id_valid(mtx->owner));
|
|
assert(mtx->enter_count);
|
|
if (!--mtx->enter_count) {
|
|
mtx->owner = LOCK_INVALID_OWNER_ID;
|
|
}
|
|
lock_internal_spin_unlock_with_notify(&mtx->core, save);
|
|
}
|