(ARM v7)信号量、互斥体代码追踪

void down(struct semaphore *sem)
{
    unsigned long flags;

    raw_spin_lock_irqsave(&sem->lock, flags);
    if (likely(sem->count > 0))
        sem->count--;
    else
        __down(sem);
    raw_spin_unlock_irqrestore(&sem->lock, flags);
}
static noinline void __sched __down(struct semaphore *sem)
{
    __down_common(sem, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
}

static inline int __sched __down_common(struct semaphore *sem, long state,
                                long timeout)
{
    struct task_struct *task = current;
    struct semaphore_waiter waiter;

    list_add_tail(&waiter.list, &sem->wait_list);
    waiter.task = task;
    waiter.up = false;

    for (;;) {
        if (signal_pending_state(state, task))
            goto interrupted;
        if (unlikely(timeout <= 0))
            goto timed_out;
        __set_task_state(task, state);
        raw_spin_unlock_irq(&sem->lock);
        timeout = schedule_timeout(timeout);
        raw_spin_lock_irq(&sem->lock);
        if (waiter.up)
            return 0;
    }

 timed_out:
    list_del(&waiter.list);
    return -ETIME;

 interrupted:
    list_del(&waiter.list);
    return -EINTR;
}



void __sched mutex_lock(struct mutex *lock)
{
    might_sleep();
    /*
     * The locking fastpath is the 1->0 transition from
     * 'unlocked' into 'locked' state.
     */
    __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
    mutex_set_owner(lock);
}
# define might_sleep() do { might_resched(); } while (0)
# define might_resched() do { } while (0)
static inline void
__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
{
    if (unlikely(atomic_dec_return(count) < 0))
        fail_fn(count);
}
#define atomic_dec_return(v)        atomic_sub_return(1, (v))
ATOMIC_OP_RETURN(sub, -)
#define ATOMIC_OP_RETURN(op, c_op, asm_op)                \
static inline int atomic_##op##_return(int i, atomic_t *v)        \
{                                    \
    unsigned long tmp;                        \
    int result;                            \
                                    \
    smp_mb();                            \
    prefetchw(&v->counter);                        \
                                    \
    __asm__ __volatile__("@ atomic_" #op "_return\n"        \
"1:    ldrex    %0, [%3]\n"                        \
"    " #asm_op "    %0, %0, %4\n"                    \
"    strex    %1, %0, [%3]\n"                        \
"    teq    %1, #0\n"                        \
"    bne    1b"                            \
    : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)        \
    : "r" (&v->counter), "Ir" (i)                    \
    : "cc");                            \
                                    \
    smp_mb();                            \
                                    \
    return result;                            \
}
__visible void __sched
__mutex_lock_slowpath(atomic_t *lock_count)
{
    struct mutex *lock = container_of(lock_count, struct mutex, count);

    __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0,
                NULL, _RET_IP_, NULL, 0);
}
View Code

 

posted @ 2015-12-26 21:45  zer-o  阅读(490)  评论(0编辑  收藏  举报