patch-2.4.4 linux/arch/sh/kernel/semaphore.c
Next file: linux/arch/sh/kernel/setup.c
Previous file: linux/arch/sh/kernel/rtc.c
Back to the patch index
Back to the overall index
- Lines: 163
- Date:
Tue Apr 17 17:19:25 2001
- Orig file:
v2.4.3/linux/arch/sh/kernel/semaphore.c
- Orig date:
Sat Nov 11 19:02:40 2000
diff -u --recursive --new-file v2.4.3/linux/arch/sh/kernel/semaphore.c linux/arch/sh/kernel/semaphore.c
@@ -135,162 +135,3 @@
{
return waking_non_zero_trylock(sem);
}
-
-/* Called when someone has done an up that transitioned from
- * negative to non-negative, meaning that the lock has been
- * granted to whomever owned the bias.
- */
-struct rw_semaphore *rwsem_wake_readers(struct rw_semaphore *sem)
-{
- if (xchg(&sem->read_bias_granted, 1))
- BUG();
- wake_up(&sem->wait);
- return sem;
-}
-
-struct rw_semaphore *rwsem_wake_writer(struct rw_semaphore *sem)
-{
- if (xchg(&sem->write_bias_granted, 1))
- BUG();
- wake_up(&sem->write_bias_wait);
- return sem;
-}
-
-struct rw_semaphore * __rwsem_wake(struct rw_semaphore *sem)
-{
- if (atomic_read(&sem->count) == 0)
- return rwsem_wake_writer(sem);
- else
- return rwsem_wake_readers(sem);
-}
-
-struct rw_semaphore *down_read_failed_biased(struct rw_semaphore *sem)
-{
- struct task_struct *tsk = current;
- DECLARE_WAITQUEUE(wait, tsk);
-
- add_wait_queue(&sem->wait, &wait); /* put ourselves at the head of the list */
-
- for (;;) {
- if (sem->read_bias_granted && xchg(&sem->read_bias_granted, 0))
- break;
- set_task_state(tsk, TASK_UNINTERRUPTIBLE);
- if (!sem->read_bias_granted)
- schedule();
- }
-
- remove_wait_queue(&sem->wait, &wait);
- tsk->state = TASK_RUNNING;
-
- return sem;
-}
-
-struct rw_semaphore *down_write_failed_biased(struct rw_semaphore *sem)
-{
- struct task_struct *tsk = current;
- DECLARE_WAITQUEUE(wait, tsk);
-
- add_wait_queue_exclusive(&sem->write_bias_wait, &wait); /* put ourselves at the end of the list */
-
- for (;;) {
- if (sem->write_bias_granted && xchg(&sem->write_bias_granted, 0))
- break;
- set_task_state(tsk, TASK_UNINTERRUPTIBLE);
- if (!sem->write_bias_granted)
- schedule();
- }
-
- remove_wait_queue(&sem->write_bias_wait, &wait);
- tsk->state = TASK_RUNNING;
-
- /* if the lock is currently unbiased, awaken the sleepers
- * FIXME: this wakes up the readers early in a bit of a
- * stampede -> bad!
- */
- if (atomic_read(&sem->count) >= 0)
- wake_up(&sem->wait);
-
- return sem;
-}
-
-/* Wait for the lock to become unbiased. Readers
- * are non-exclusive. =)
- */
-struct rw_semaphore *down_read_failed(struct rw_semaphore *sem)
-{
- struct task_struct *tsk = current;
- DECLARE_WAITQUEUE(wait, tsk);
-
- __up_read(sem); /* this takes care of granting the lock */
-
- add_wait_queue(&sem->wait, &wait);
-
- while (atomic_read(&sem->count) < 0) {
- set_task_state(tsk, TASK_UNINTERRUPTIBLE);
- if (atomic_read(&sem->count) >= 0)
- break;
- schedule();
- }
-
- remove_wait_queue(&sem->wait, &wait);
- tsk->state = TASK_RUNNING;
-
- return sem;
-}
-
-/* Wait for the lock to become unbiased. Since we're
- * a writer, we'll make ourselves exclusive.
- */
-struct rw_semaphore *down_write_failed(struct rw_semaphore *sem)
-{
- struct task_struct *tsk = current;
- DECLARE_WAITQUEUE(wait, tsk);
-
- __up_write(sem); /* this takes care of granting the lock */
-
- add_wait_queue_exclusive(&sem->wait, &wait);
-
- while (atomic_read(&sem->count) < 0) {
- set_task_state(tsk, TASK_UNINTERRUPTIBLE);
- if (atomic_read(&sem->count) >= 0)
- break; /* we must attempt to acquire or bias the lock */
- schedule();
- }
-
- remove_wait_queue(&sem->wait, &wait);
- tsk->state = TASK_RUNNING;
-
- return sem;
-}
-
-struct rw_semaphore *__down_read(struct rw_semaphore *sem, int carry)
-{
- if (carry) {
- int saved, new;
-
- do {
- down_read_failed(sem);
- saved = atomic_read(&sem->count);
- if ((new = atomic_dec_return(&sem->count)) >= 0)
- return sem;
- } while (!(new < 0 && saved >=0));
- }
-
- return down_read_failed_biased(sem);
-}
-
-struct rw_semaphore *__down_write(struct rw_semaphore *sem, int carry)
-{
- if (carry) {
- int saved, new;
-
- do {
- down_write_failed(sem);
- saved = atomic_read(&sem->count);
- if ((new = atomic_sub_return(RW_LOCK_BIAS, &sem->count) ) == 0)
- return sem;
- } while (!(new < 0 && saved >=0));
- }
-
- return down_write_failed_biased(sem);
-}
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)