diff --git a/core/include/mutex.h b/core/include/mutex.h index 633934a39..0e516bb0d 100644 --- a/core/include/mutex.h +++ b/core/include/mutex.h @@ -22,6 +22,7 @@ #define MUTEX_H_ #include "priority_queue.h" +#include "atomic.h" #ifdef __cplusplus extern "C" { @@ -37,7 +38,7 @@ typedef struct mutex_t { * never be changed by the user.** * @internal */ - unsigned int val; + atomic_int_t val; /** * @brief The process waiting queue of the mutex. **Must never be changed * by the user.** @@ -50,7 +51,7 @@ typedef struct mutex_t { * @brief Static initializer for mutex_t. * @details This initializer is preferable to mutex_init(). */ -#define MUTEX_INIT { 0, PRIORITY_QUEUE_INIT } +#define MUTEX_INIT { ATOMIC_INIT(0), PRIORITY_QUEUE_INIT } /** * @brief Initializes a mutex object. diff --git a/core/mutex.c b/core/mutex.c index 970b1d967..7d8dcb513 100644 --- a/core/mutex.c +++ b/core/mutex.c @@ -14,6 +14,7 @@ * @brief Kernel mutex implementation * * @author Kaspar Schleiser + * @author Joakim Gebart * * @} */ @@ -37,15 +38,15 @@ static void mutex_wait(struct mutex_t *mutex); int mutex_trylock(struct mutex_t *mutex) { - DEBUG("%s: trylocking to get mutex. val: %u\n", sched_active_thread->name, mutex->val); - return (atomic_set_return(&mutex->val, 1) == 0); + DEBUG("%s: trylocking to get mutex. val: %u\n", sched_active_thread->name, ATOMIC_VALUE(mutex->val)); + return atomic_set_to_one(&mutex->val); } void mutex_lock(struct mutex_t *mutex) { - DEBUG("%s: trying to get mutex. val: %u\n", sched_active_thread->name, mutex->val); + DEBUG("%s: trying to get mutex. val: %u\n", sched_active_thread->name, ATOMIC_VALUE(mutex->val)); - if (atomic_set_return(&mutex->val, 1) != 0) { + if (atomic_set_to_one(&mutex->val) == 0) { /* mutex was locked. */ mutex_wait(mutex); } @@ -54,12 +55,11 @@ void mutex_lock(struct mutex_t *mutex) static void mutex_wait(struct mutex_t *mutex) { unsigned irqstate = disableIRQ(); - DEBUG("%s: Mutex in use. %u\n", sched_active_thread->name, mutex->val); + DEBUG("%s: Mutex in use. %u\n", sched_active_thread->name, ATOMIC_VALUE(mutex->val)); - if (mutex->val == 0) { + if (atomic_set_to_one(&mutex->val)) { /* somebody released the mutex. return. */ - mutex->val = 1; - DEBUG("%s: mutex_wait early out. %u\n", sched_active_thread->name, mutex->val); + DEBUG("%s: mutex_wait early out. %u\n", sched_active_thread->name, ATOMIC_VALUE(mutex->val)); restoreIRQ(irqstate); return; } @@ -84,10 +84,10 @@ static void mutex_wait(struct mutex_t *mutex) void mutex_unlock(struct mutex_t *mutex) { - DEBUG("%s: unlocking mutex. val: %u pid: %" PRIkernel_pid "\n", sched_active_thread->name, mutex->val, sched_active_pid); + DEBUG("%s: unlocking mutex. val: %u pid: %" PRIkernel_pid "\n", sched_active_thread->name, ATOMIC_VALUE(mutex->val), sched_active_pid); unsigned irqstate = disableIRQ(); - if (mutex->val != 0) { + if (ATOMIC_VALUE(mutex->val) != 0) { priority_queue_node_t *next = priority_queue_remove_head(&(mutex->queue)); if (next) { tcb_t *process = (tcb_t *) next->data; @@ -97,7 +97,7 @@ void mutex_unlock(struct mutex_t *mutex) sched_switch(process->priority); } else { - mutex->val = 0; + ATOMIC_VALUE(mutex->val) = 0; /* This is safe, interrupts are disabled */ } } @@ -106,10 +106,10 @@ void mutex_unlock(struct mutex_t *mutex) void mutex_unlock_and_sleep(struct mutex_t *mutex) { - DEBUG("%s: unlocking mutex. val: %u pid: %" PRIkernel_pid ", and taking a nap\n", sched_active_thread->name, mutex->val, sched_active_pid); + DEBUG("%s: unlocking mutex. val: %u pid: %" PRIkernel_pid ", and taking a nap\n", sched_active_thread->name, ATOMIC_VALUE(mutex->val), sched_active_pid); unsigned irqstate = disableIRQ(); - if (mutex->val != 0) { + if (ATOMIC_VALUE(mutex->val) != 0) { priority_queue_node_t *next = priority_queue_remove_head(&(mutex->queue)); if (next) { tcb_t *process = (tcb_t *) next->data; @@ -117,7 +117,7 @@ void mutex_unlock_and_sleep(struct mutex_t *mutex) sched_set_status(process, STATUS_PENDING); } else { - mutex->val = 0; + ATOMIC_VALUE(mutex->val) = 0; /* This is safe, interrupts are disabled */ } } DEBUG("%s: going to sleep.\n", sched_active_thread->name);