patch-2.4.4 linux/arch/ia64/kernel/semaphore.c
Next file: linux/arch/ia64/kernel/setup.c
Previous file: linux/arch/ia64/kernel/ptrace.c
Back to the patch index
Back to the overall index
- Lines: 181
- Date:
Tue Apr 17 17:19:24 2001
- Orig file:
v2.4.3/linux/arch/ia64/kernel/semaphore.c
- Orig date:
Sat Nov 11 19:02:40 2000
diff -u --recursive --new-file v2.4.3/linux/arch/ia64/kernel/semaphore.c linux/arch/ia64/kernel/semaphore.c
@@ -155,180 +155,3 @@
spin_unlock_irqrestore(&semaphore_lock, flags);
return 1;
}
-
-/*
- * Helper routines for rw semaphores. These could be optimized some
- * more, but since they're off the critical path, I prefer clarity for
- * now...
- */
-
-/*
- * This gets called if we failed to acquire the lock, but we're biased
- * to acquire the lock by virtue of causing the count to change from 0
- * to -1. Being biased, we sleep and attempt to grab the lock until
- * we succeed. When this function returns, we own the lock.
- */
-static inline void
-down_read_failed_biased (struct rw_semaphore *sem)
-{
- struct task_struct *tsk = current;
- DECLARE_WAITQUEUE(wait, tsk);
-
- add_wait_queue(&sem->wait, &wait); /* put ourselves at the head of the list */
-
- for (;;) {
- if (sem->read_bias_granted && xchg(&sem->read_bias_granted, 0))
- break;
- set_task_state(tsk, TASK_UNINTERRUPTIBLE);
- if (!sem->read_bias_granted)
- schedule();
- }
- remove_wait_queue(&sem->wait, &wait);
- tsk->state = TASK_RUNNING;
-}
-
-/*
- * This gets called if we failed to acquire the lock and we are not
- * biased to acquire the lock. We undo the decrement that was
- * done earlier, go to sleep, and then attempt to re-acquire the
- * lock afterwards.
- */
-static inline void
-down_read_failed (struct rw_semaphore *sem)
-{
- struct task_struct *tsk = current;
- DECLARE_WAITQUEUE(wait, tsk);
-
- /*
- * Undo the decrement we did in down_read() and check if we
- * need to wake up someone.
- */
- __up_read(sem);
-
- add_wait_queue(&sem->wait, &wait);
- while (sem->count < 0) {
- set_task_state(tsk, TASK_UNINTERRUPTIBLE);
- if (sem->count >= 0)
- break;
- schedule();
- }
- remove_wait_queue(&sem->wait, &wait);
- tsk->state = TASK_RUNNING;
-}
-
-/*
- * Wait for the lock to become unbiased. Readers are non-exclusive.
- */
-void
-__down_read_failed (struct rw_semaphore *sem, long count)
-{
- while (1) {
- if (count == -1) {
- down_read_failed_biased(sem);
- return;
- }
- /* unbiased */
- down_read_failed(sem);
-
- count = ia64_fetch_and_add(-1, &sem->count);
- if (count >= 0)
- return;
- }
-}
-
-static inline void
-down_write_failed_biased (struct rw_semaphore *sem)
-{
- struct task_struct *tsk = current;
- DECLARE_WAITQUEUE(wait, tsk);
-
- /* put ourselves at the end of the list */
- add_wait_queue_exclusive(&sem->write_bias_wait, &wait);
-
- for (;;) {
- if (sem->write_bias_granted && xchg(&sem->write_bias_granted, 0))
- break;
- set_task_state(tsk, TASK_UNINTERRUPTIBLE);
- if (!sem->write_bias_granted)
- schedule();
- }
-
- remove_wait_queue(&sem->write_bias_wait, &wait);
- tsk->state = TASK_RUNNING;
-
- /*
- * If the lock is currently unbiased, awaken the sleepers
- * FIXME: this wakes up the readers early in a bit of a
- * stampede -> bad!
- */
- if (sem->count >= 0)
- wake_up(&sem->wait);
-}
-
-
-static inline void
-down_write_failed (struct rw_semaphore *sem)
-{
- struct task_struct *tsk = current;
- DECLARE_WAITQUEUE(wait, tsk);
-
- __up_write(sem); /* this takes care of granting the lock */
-
- add_wait_queue_exclusive(&sem->wait, &wait);
-
- while (sem->count < 0) {
- set_task_state(tsk, TASK_UNINTERRUPTIBLE);
- if (sem->count >= 0)
- break; /* we must attempt to acquire or bias the lock */
- schedule();
- }
-
- remove_wait_queue(&sem->wait, &wait);
- tsk->state = TASK_RUNNING;
-}
-
-
-/*
- * Wait for the lock to become unbiased. Since we're a writer, we'll
- * make ourselves exclusive.
- */
-void
-__down_write_failed (struct rw_semaphore *sem, long count)
-{
- long old_count;
-
- while (1) {
- if (count == -RW_LOCK_BIAS) {
- down_write_failed_biased(sem);
- return;
- }
- down_write_failed(sem);
-
- do {
- old_count = sem->count;
- count = old_count - RW_LOCK_BIAS;
- } while (cmpxchg_acq(&sem->count, old_count, count) != old_count);
-
- if (count == 0)
- return;
- }
-}
-
-void
-__rwsem_wake (struct rw_semaphore *sem, long count)
-{
- wait_queue_head_t *wq;
-
- if (count == 0) {
- /* wake a writer */
- if (xchg(&sem->write_bias_granted, 1))
- BUG();
- wq = &sem->write_bias_wait;
- } else {
- /* wake reader(s) */
- if (xchg(&sem->read_bias_granted, 1))
- BUG();
- wq = &sem->wait;
- }
- wake_up(wq); /* wake up everyone on the wait queue */
-}
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)