patch-2.4.20 linux-2.4.20/net/sunrpc/sched.c
Next file: linux-2.4.20/net/sunrpc/stats.c
Previous file: linux-2.4.20/net/sunrpc/clnt.c
Back to the patch index
Back to the overall index
- Lines: 387
- Date:
Thu Nov 28 15:53:16 2002
- Orig file:
linux-2.4.19/net/sunrpc/sched.c
- Orig date:
Fri Aug 2 17:39:46 2002
diff -urN linux-2.4.19/net/sunrpc/sched.c linux-2.4.20/net/sunrpc/sched.c
@@ -41,23 +41,23 @@
* handler, or while executing another RPC task, it is put on
* schedq, and rpciod is woken up.
*/
-static struct rpc_wait_queue schedq = RPC_INIT_WAITQ("schedq");
+static RPC_WAITQ(schedq, "schedq");
/*
* RPC tasks that create another task (e.g. for contacting the portmapper)
* will wait on this queue for their child's completion
*/
-static struct rpc_wait_queue childq = RPC_INIT_WAITQ("childq");
+static RPC_WAITQ(childq, "childq");
/*
* RPC tasks sit here while waiting for conditions to improve.
*/
-static struct rpc_wait_queue delay_queue = RPC_INIT_WAITQ("delayq");
+static RPC_WAITQ(delay_queue, "delayq");
/*
* All RPC tasks are linked into this list
*/
-static struct rpc_task * all_tasks;
+static LIST_HEAD(all_tasks);
/*
* rpciod-related stuff
@@ -73,7 +73,7 @@
* Spinlock for wait queues. Access to the latter also has to be
* interrupt-safe in order to allow timers to wake up sleeping tasks.
*/
-spinlock_t rpc_queue_lock = SPIN_LOCK_UNLOCKED;
+static spinlock_t rpc_queue_lock = SPIN_LOCK_UNLOCKED;
/*
* Spinlock for other critical sections of code.
*/
@@ -157,7 +157,7 @@
void rpc_add_timer(struct rpc_task *task, rpc_action timer)
{
spin_lock_bh(&rpc_queue_lock);
- if (!(RPC_IS_RUNNING(task) || task->tk_wakeup))
+ if (!RPC_IS_RUNNING(task))
__rpc_add_timer(task, timer);
spin_unlock_bh(&rpc_queue_lock);
}
@@ -194,9 +194,9 @@
return -EWOULDBLOCK;
}
if (RPC_IS_SWAPPER(task))
- rpc_insert_list(&queue->task, task);
+ list_add(&task->tk_list, &queue->tasks);
else
- rpc_append_list(&queue->task, task);
+ list_add_tail(&task->tk_list, &queue->tasks);
task->tk_rpcwait = queue;
dprintk("RPC: %4d added to queue %p \"%s\"\n",
@@ -228,7 +228,7 @@
if (!queue)
return;
- rpc_remove_list(&queue->task, task);
+ list_del(&task->tk_list);
task->tk_rpcwait = NULL;
dprintk("RPC: %4d removed from queue %p \"%s\"\n",
@@ -358,27 +358,10 @@
spin_unlock_bh(&rpc_queue_lock);
}
-void
-rpc_sleep_locked(struct rpc_wait_queue *q, struct rpc_task *task,
- rpc_action action, rpc_action timer)
-{
- /*
- * Protect the queue operations.
- */
- spin_lock_bh(&rpc_queue_lock);
- __rpc_sleep_on(q, task, action, timer);
- __rpc_lock_task(task);
- spin_unlock_bh(&rpc_queue_lock);
-}
-
/**
* __rpc_wake_up_task - wake up a single rpc_task
* @task: task to be woken up
*
- * If the task is locked, it is merely removed from the queue, and
- * 'task->tk_wakeup' is set. rpc_unlock_task() will then ensure
- * that it is woken up as soon as the lock count goes to zero.
- *
* Caller must hold rpc_queue_lock
*/
static void
@@ -407,14 +390,6 @@
if (task->tk_rpcwait != &schedq)
__rpc_remove_wait_queue(task);
- /* If the task has been locked, then set tk_wakeup so that
- * rpc_unlock_task() wakes us up... */
- if (task->tk_lock) {
- task->tk_wakeup = 1;
- return;
- } else
- task->tk_wakeup = 0;
-
rpc_make_runnable(task);
dprintk("RPC: __rpc_wake_up_task done\n");
@@ -450,11 +425,11 @@
struct rpc_task *
rpc_wake_up_next(struct rpc_wait_queue *queue)
{
- struct rpc_task *task;
+ struct rpc_task *task = NULL;
dprintk("RPC: wake_up_next(%p \"%s\")\n", queue, rpc_qname(queue));
spin_lock_bh(&rpc_queue_lock);
- if ((task = queue->task) != 0)
+ task_for_first(task, &queue->tasks)
__rpc_wake_up_task(task);
spin_unlock_bh(&rpc_queue_lock);
@@ -470,9 +445,12 @@
void
rpc_wake_up(struct rpc_wait_queue *queue)
{
+ struct rpc_task *task;
+
spin_lock_bh(&rpc_queue_lock);
- while (queue->task)
- __rpc_wake_up_task(queue->task);
+ while (!list_empty(&queue->tasks))
+ task_for_first(task, &queue->tasks)
+ __rpc_wake_up_task(task);
spin_unlock_bh(&rpc_queue_lock);
}
@@ -486,41 +464,19 @@
void
rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
{
- struct rpc_task *task;
+ struct rpc_task *task;
spin_lock_bh(&rpc_queue_lock);
- while ((task = queue->task) != NULL) {
- task->tk_status = status;
- __rpc_wake_up_task(task);
+ while (!list_empty(&queue->tasks)) {
+ task_for_first(task, &queue->tasks) {
+ task->tk_status = status;
+ __rpc_wake_up_task(task);
+ }
}
spin_unlock_bh(&rpc_queue_lock);
}
/*
- * Lock down a sleeping task to prevent it from waking up
- * and disappearing from beneath us.
- *
- * This function should always be called with the
- * rpc_queue_lock held.
- */
-int
-__rpc_lock_task(struct rpc_task *task)
-{
- if (!RPC_IS_RUNNING(task))
- return ++task->tk_lock;
- return 0;
-}
-
-void
-rpc_unlock_task(struct rpc_task *task)
-{
- spin_lock_bh(&rpc_queue_lock);
- if (task->tk_lock && !--task->tk_lock && task->tk_wakeup)
- __rpc_wake_up_task(task);
- spin_unlock_bh(&rpc_queue_lock);
-}
-
-/*
* Run a task at a later time
*/
static void __rpc_atrun(struct rpc_task *);
@@ -700,23 +656,16 @@
dprintk("RPC: rpc_schedule enter\n");
while (1) {
spin_lock_bh(&rpc_queue_lock);
- if (!(task = schedq.task)) {
+
+ task_for_first(task, &schedq.tasks) {
+ __rpc_remove_wait_queue(task);
spin_unlock_bh(&rpc_queue_lock);
- break;
- }
- if (task->tk_lock) {
+
+ __rpc_execute(task);
+ } else {
spin_unlock_bh(&rpc_queue_lock);
- printk(KERN_ERR "RPC: Locked task was scheduled !!!!\n");
-#ifdef RPC_DEBUG
- rpc_debug = ~0;
- rpc_show_tasks();
-#endif
break;
}
- __rpc_remove_wait_queue(task);
- spin_unlock_bh(&rpc_queue_lock);
-
- __rpc_execute(task);
if (++count >= 200 || current->need_resched) {
count = 0;
@@ -770,8 +719,7 @@
}
if (flags & RPC_TASK_ASYNC)
return NULL;
- current->policy |= SCHED_YIELD;
- schedule();
+ yield();
} while (!signalled());
return NULL;
@@ -812,11 +760,7 @@
/* Add to global list of all tasks */
spin_lock(&rpc_sched_lock);
- task->tk_next_task = all_tasks;
- task->tk_prev_task = NULL;
- if (all_tasks)
- all_tasks->tk_prev_task = task;
- all_tasks = task;
+ list_add(&task->tk_task, &all_tasks);
spin_unlock(&rpc_sched_lock);
if (clnt)
@@ -875,8 +819,6 @@
void
rpc_release_task(struct rpc_task *task)
{
- struct rpc_task *next, *prev;
-
dprintk("RPC: %4d release task\n", task->tk_pid);
#ifdef RPC_DEBUG
@@ -890,15 +832,7 @@
/* Remove from global task list */
spin_lock(&rpc_sched_lock);
- prev = task->tk_prev_task;
- next = task->tk_next_task;
- if (next)
- next->tk_prev_task = prev;
- if (prev)
- prev->tk_next_task = next;
- else
- all_tasks = next;
- task->tk_next_task = task->tk_prev_task = NULL;
+ list_del(&task->tk_task);
spin_unlock(&rpc_sched_lock);
/* Protect the execution below. */
@@ -952,14 +886,13 @@
rpc_find_parent(struct rpc_task *child)
{
struct rpc_task *task, *parent;
+ struct list_head *le;
parent = (struct rpc_task *) child->tk_calldata;
- if ((task = childq.task) != NULL) {
- do {
- if (task == parent)
- return parent;
- } while ((task = task->tk_next) != childq.task);
- }
+ task_for_each(task, le, &childq.tasks)
+ if (task == parent)
+ return parent;
+
return NULL;
}
@@ -1013,7 +946,8 @@
void
rpc_killall_tasks(struct rpc_clnt *clnt)
{
- struct rpc_task **q, *rovr;
+ struct rpc_task *rovr;
+ struct list_head *le;
dprintk("RPC: killing all tasks for client %p\n", clnt);
@@ -1021,13 +955,12 @@
* Spin lock all_tasks to prevent changes...
*/
spin_lock(&rpc_sched_lock);
- for (q = &all_tasks; (rovr = *q); q = &rovr->tk_next_task) {
+ alltask_for_each(rovr, le, &all_tasks)
if (!clnt || rovr->tk_client == clnt) {
rovr->tk_flags |= RPC_TASK_KILLED;
rpc_exit(rovr, -EIO);
rpc_wake_up_task(rovr);
}
- }
spin_unlock(&rpc_sched_lock);
}
@@ -1036,7 +969,7 @@
static inline int
rpciod_task_pending(void)
{
- return schedq.task != NULL;
+ return !list_empty(&schedq.tasks);
}
@@ -1088,7 +1021,7 @@
}
dprintk("RPC: rpciod shutdown commences\n");
- if (all_tasks) {
+ if (!list_empty(&all_tasks)) {
printk(KERN_ERR "rpciod: active tasks at shutdown?!\n");
rpciod_killall();
}
@@ -1106,14 +1039,13 @@
{
unsigned long flags;
- while (all_tasks) {
+ while (!list_empty(&all_tasks)) {
current->sigpending = 0;
rpc_killall_tasks(NULL);
__rpc_schedule();
- if (all_tasks) {
+ if (!list_empty(&all_tasks)) {
dprintk("rpciod_killall: waiting for tasks to exit\n");
- current->policy |= SCHED_YIELD;
- schedule();
+ yield();
}
}
@@ -1183,8 +1115,7 @@
* wait briefly before checking the process id.
*/
current->sigpending = 0;
- current->policy |= SCHED_YIELD;
- schedule();
+ yield();
/*
* Display a message if we're going to wait longer.
*/
@@ -1207,25 +1138,23 @@
#ifdef RPC_DEBUG
void rpc_show_tasks(void)
{
- struct rpc_task *t = all_tasks, *next;
+ struct list_head *le;
+ struct rpc_task *t;
spin_lock(&rpc_sched_lock);
- t = all_tasks;
- if (!t) {
+ if (list_empty(&all_tasks)) {
spin_unlock(&rpc_sched_lock);
return;
}
printk("-pid- proc flgs status -client- -prog- --rqstp- -timeout "
"-rpcwait -action- --exit--\n");
- for (; t; t = next) {
- next = t->tk_next_task;
+ alltask_for_each(t, le, &all_tasks)
printk("%05d %04d %04x %06d %8p %6d %8p %08ld %8s %8p %8p\n",
t->tk_pid, t->tk_msg.rpc_proc, t->tk_flags, t->tk_status,
t->tk_client, t->tk_client->cl_prog,
t->tk_rqstp, t->tk_timeout,
t->tk_rpcwait ? rpc_qname(t->tk_rpcwait) : " <NULL> ",
t->tk_action, t->tk_exit);
- }
spin_unlock(&rpc_sched_lock);
}
#endif
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)